file_name
large_stringlengths 4
140
| prefix
large_stringlengths 0
12.1k
| suffix
large_stringlengths 0
12k
| middle
large_stringlengths 0
7.51k
| fim_type
large_stringclasses 4
values |
---|---|---|---|---|
ospf_sh_request_list.pb.go
|
eighborAddress
}
return ""
}
type OspfShLsaSum struct {
HeaderLsaType string `protobuf:"bytes,1,opt,name=header_lsa_type,json=headerLsaType,proto3" json:"header_lsa_type,omitempty"`
HeaderLsaAge uint32 `protobuf:"varint,2,opt,name=header_lsa_age,json=headerLsaAge,proto3" json:"header_lsa_age,omitempty"`
HeaderLsId string `protobuf:"bytes,3,opt,name=header_ls_id,json=headerLsId,proto3" json:"header_ls_id,omitempty"`
HeaderAdvertisingRouter string `protobuf:"bytes,4,opt,name=header_advertising_router,json=headerAdvertisingRouter,proto3" json:"header_advertising_router,omitempty"`
HeaderSequenceNumber uint32 `protobuf:"varint,5,opt,name=header_sequence_number,json=headerSequenceNumber,proto3" json:"header_sequence_number,omitempty"`
HeaderLsaChecksum uint32 `protobuf:"varint,6,opt,name=header_lsa_checksum,json=headerLsaChecksum,proto3" json:"header_lsa_checksum,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *OspfShLsaSum) Reset() { *m = OspfShLsaSum{} }
func (m *OspfShLsaSum) String() string { return proto.CompactTextString(m) }
func (*OspfShLsaSum) ProtoMessage() {}
func (*OspfShLsaSum) Descriptor() ([]byte, []int) {
return fileDescriptor_e4609c816fd64cee, []int{1}
}
func (m *OspfShLsaSum) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_OspfShLsaSum.Unmarshal(m, b)
}
func (m *OspfShLsaSum) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_OspfShLsaSum.Marshal(b, m, deterministic)
}
func (m *OspfShLsaSum) XXX_Merge(src proto.Message) {
xxx_messageInfo_OspfShLsaSum.Merge(m, src)
}
func (m *OspfShLsaSum) XXX_Size() int {
return xxx_messageInfo_OspfShLsaSum.Size(m)
}
func (m *OspfShLsaSum) XXX_DiscardUnknown() {
xxx_messageInfo_OspfShLsaSum.DiscardUnknown(m)
}
var xxx_messageInfo_OspfShLsaSum proto.InternalMessageInfo
func (m *OspfShLsaSum) GetHeaderLsaType() string {
if m != nil {
return m.HeaderLsaType
}
return ""
}
func (m *OspfShLsaSum) GetHeaderLsaAge() uint32 {
if m != nil {
return m.HeaderLsaAge
}
return 0
}
func (m *OspfShLsaSum) GetHeaderLsId() string {
if m != nil {
return m.HeaderLsId
}
return ""
}
func (m *OspfShLsaSum) GetHeaderAdvertisingRouter() string {
if m != nil {
return m.HeaderAdvertisingRouter
}
return ""
}
func (m *OspfShLsaSum) GetHeaderSequenceNumber() uint32 {
if m != nil {
return m.HeaderSequenceNumber
}
return 0
}
func (m *OspfShLsaSum) GetHeaderLsaChecksum() uint32 {
if m != nil {
return m.HeaderLsaChecksum
}
return 0
}
type OspfShRequestList struct {
RequestNeighborId string `protobuf:"bytes,50,opt,name=request_neighbor_id,json=requestNeighborId,proto3" json:"request_neighbor_id,omitempty"`
RequestNeighborAddress string `protobuf:"bytes,51,opt,name=request_neighbor_address,json=requestNeighborAddress,proto3" json:"request_neighbor_address,omitempty"`
RequestInterfaceName string `protobuf:"bytes,52,opt,name=request_interface_name,json=requestInterfaceName,proto3" json:"request_interface_name,omitempty"`
Request []*OspfShLsaSum `protobuf:"bytes,53,rep,name=request,proto3" json:"request,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *OspfShRequestList) Reset() { *m = OspfShRequestList{} }
func (m *OspfShRequestList) String() string { return proto.CompactTextString(m) }
func (*OspfShRequestList) ProtoMessage() {}
func (*OspfShRequestList) Descriptor() ([]byte, []int) {
return fileDescriptor_e4609c816fd64cee, []int{2}
}
func (m *OspfShRequestList) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_OspfShRequestList.Unmarshal(m, b)
}
func (m *OspfShRequestList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_OspfShRequestList.Marshal(b, m, deterministic)
}
func (m *OspfShRequestList) XXX_Merge(src proto.Message) {
xxx_messageInfo_OspfShRequestList.Merge(m, src)
}
func (m *OspfShRequestList) XXX_Size() int {
return xxx_messageInfo_OspfShRequestList.Size(m)
}
func (m *OspfShRequestList) XXX_DiscardUnknown() {
xxx_messageInfo_OspfShRequestList.DiscardUnknown(m)
}
var xxx_messageInfo_OspfShRequestList proto.InternalMessageInfo
func (m *OspfShRequestList) GetRequestNeighborId() string {
if m != nil {
return m.RequestNeighborId
}
return ""
}
func (m *OspfShRequestList) GetRequestNeighborAddress() string
|
func (m *OspfShRequestList) GetRequestInterfaceName() string {
if m != nil {
return m.RequestInterfaceName
}
return ""
}
func (m *OspfShRequestList) GetRequest() []*OspfShLsaSum {
if m != nil {
return m.Request
}
return nil
}
func init() {
proto.RegisterType((*OspfShRequestList_KEYS)(nil), "cisco_ios_xr_ipv4_ospf_oper.ospf.processes.process.vrfs.vrf.adjacency_information.requests.request.ospf_sh_request_list_KEYS")
proto.RegisterType((*OspfShLsaSum)(nil), "cisco_ios_xr_ipv4_ospf_oper.ospf.processes.process.vrfs.vrf.adjacency_information.requests.request.ospf_sh_lsa_sum")
proto.RegisterType((*OspfShRequestList)(nil), "cisco_ios_xr_ipv4_ospf_oper.ospf.processes.process.vrfs.vrf.adjacency_information.requests.request.ospf_sh_request_list")
}
func init() { proto.RegisterFile("ospf_sh_request_list.proto", fileDescriptor_e4609c816fd64cee) }
var fileDescriptor_e4609c816fd64cee = []byte{
// 451 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xb4, 0x93, 0x4f, 0x6f, 0xd3, 0x30,
0x18, 0x87, 0xd5, 0x0e, 0x36, 0x78, 0xd7, 0xae, 0xcc, 0x54, 0x23, 0xe5, 0x54, 0x2a, 0x40, 0xe3,
0x92, 0xc3, 0x56, 0x24, 0xc4, 0xad, 0x42, 0x1c, 0x2a, 0x50, 0x0f, 0x1d, 0x17, 0x4e, 0x96, 0x1b,
0xbf, 0x69, 0x0c, 0x4b, 0x1c, 0xfc, 0x3a, 0x11, 0xfd, 0x10, 0x7c, 0x0a, 0x8e, 0x7c, 0x0f, 0x3e,
0x17, 0x8a, 0x
|
{
if m != nil {
return m.RequestNeighborAddress
}
return ""
}
|
identifier_body
|
ospf_sh_request_list.pb.go
|
() string {
if m != nil {
return m.InterfaceName
}
return ""
}
func (m *OspfShRequestList_KEYS) GetNeighborAddress() string {
if m != nil {
return m.NeighborAddress
}
return ""
}
type OspfShLsaSum struct {
HeaderLsaType string `protobuf:"bytes,1,opt,name=header_lsa_type,json=headerLsaType,proto3" json:"header_lsa_type,omitempty"`
HeaderLsaAge uint32 `protobuf:"varint,2,opt,name=header_lsa_age,json=headerLsaAge,proto3" json:"header_lsa_age,omitempty"`
HeaderLsId string `protobuf:"bytes,3,opt,name=header_ls_id,json=headerLsId,proto3" json:"header_ls_id,omitempty"`
HeaderAdvertisingRouter string `protobuf:"bytes,4,opt,name=header_advertising_router,json=headerAdvertisingRouter,proto3" json:"header_advertising_router,omitempty"`
HeaderSequenceNumber uint32 `protobuf:"varint,5,opt,name=header_sequence_number,json=headerSequenceNumber,proto3" json:"header_sequence_number,omitempty"`
HeaderLsaChecksum uint32 `protobuf:"varint,6,opt,name=header_lsa_checksum,json=headerLsaChecksum,proto3" json:"header_lsa_checksum,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *OspfShLsaSum) Reset() { *m = OspfShLsaSum{} }
func (m *OspfShLsaSum) String() string { return proto.CompactTextString(m) }
func (*OspfShLsaSum) ProtoMessage() {}
func (*OspfShLsaSum) Descriptor() ([]byte, []int) {
return fileDescriptor_e4609c816fd64cee, []int{1}
}
func (m *OspfShLsaSum) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_OspfShLsaSum.Unmarshal(m, b)
}
func (m *OspfShLsaSum) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_OspfShLsaSum.Marshal(b, m, deterministic)
}
func (m *OspfShLsaSum) XXX_Merge(src proto.Message) {
xxx_messageInfo_OspfShLsaSum.Merge(m, src)
}
func (m *OspfShLsaSum) XXX_Size() int {
return xxx_messageInfo_OspfShLsaSum.Size(m)
}
func (m *OspfShLsaSum) XXX_DiscardUnknown() {
xxx_messageInfo_OspfShLsaSum.DiscardUnknown(m)
}
var xxx_messageInfo_OspfShLsaSum proto.InternalMessageInfo
func (m *OspfShLsaSum) GetHeaderLsaType() string {
if m != nil {
return m.HeaderLsaType
}
return ""
}
func (m *OspfShLsaSum) GetHeaderLsaAge() uint32 {
if m != nil {
return m.HeaderLsaAge
}
return 0
}
func (m *OspfShLsaSum) GetHeaderLsId() string {
if m != nil {
return m.HeaderLsId
}
return ""
}
func (m *OspfShLsaSum) GetHeaderAdvertisingRouter() string {
if m != nil {
return m.HeaderAdvertisingRouter
}
return ""
}
func (m *OspfShLsaSum) GetHeaderSequenceNumber() uint32 {
if m != nil {
return m.HeaderSequenceNumber
}
return 0
}
func (m *OspfShLsaSum) GetHeaderLsaChecksum() uint32 {
if m != nil {
return m.HeaderLsaChecksum
}
return 0
}
type OspfShRequestList struct {
RequestNeighborId string `protobuf:"bytes,50,opt,name=request_neighbor_id,json=requestNeighborId,proto3" json:"request_neighbor_id,omitempty"`
RequestNeighborAddress string `protobuf:"bytes,51,opt,name=request_neighbor_address,json=requestNeighborAddress,proto3" json:"request_neighbor_address,omitempty"`
RequestInterfaceName string `protobuf:"bytes,52,opt,name=request_interface_name,json=requestInterfaceName,proto3" json:"request_interface_name,omitempty"`
Request []*OspfShLsaSum `protobuf:"bytes,53,rep,name=request,proto3" json:"request,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *OspfShRequestList) Reset() { *m = OspfShRequestList{} }
func (m *OspfShRequestList) String() string { return proto.CompactTextString(m) }
func (*OspfShRequestList) ProtoMessage() {}
func (*OspfShRequestList) Descriptor() ([]byte, []int) {
return fileDescriptor_e4609c816fd64cee, []int{2}
}
func (m *OspfShRequestList) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_OspfShRequestList.Unmarshal(m, b)
}
func (m *OspfShRequestList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_OspfShRequestList.Marshal(b, m, deterministic)
}
func (m *OspfShRequestList) XXX_Merge(src proto.Message) {
xxx_messageInfo_OspfShRequestList.Merge(m, src)
}
func (m *OspfShRequestList) XXX_Size() int {
return xxx_messageInfo_OspfShRequestList.Size(m)
}
func (m *OspfShRequestList) XXX_DiscardUnknown() {
xxx_messageInfo_OspfShRequestList.DiscardUnknown(m)
}
var xxx_messageInfo_OspfShRequestList proto.InternalMessageInfo
func (m *OspfShRequestList) GetRequestNeighborId() string {
if m != nil {
return m.RequestNeighborId
}
return ""
}
func (m *OspfShRequestList) GetRequestNeighborAddress() string {
if m != nil {
return m.RequestNeighborAddress
}
return ""
}
func (m *OspfShRequestList) GetRequestInterfaceName() string {
if m != nil {
return m.RequestInterfaceName
}
return ""
}
func (m *OspfShRequestList) GetRequest() []*OspfShLsaSum {
if m != nil {
return m.Request
}
return nil
}
func init() {
proto.RegisterType((*OspfShRequestList_KEYS)(nil), "cisco_ios_xr_ipv4_ospf_oper.ospf.processes.process.vrfs.vrf.adjacency_information.requests.request.ospf_sh_request_list_KEYS")
proto.RegisterType((*OspfShLsaSum)(nil), "cisco_ios_xr_ipv4_ospf_oper.ospf.processes.process.vrfs.vrf.adjacency_information.requests.request.ospf_sh_lsa_sum")
proto.RegisterType((*OspfShRequestList)(nil), "cisco_ios_xr_ipv4_ospf_oper.ospf.processes.process.vrfs.vrf.adjacency_information.requests.request.ospf_sh_request_list")
}
func init() { proto.RegisterFile("ospf_sh_request_list.proto", fileDescriptor_e4609c816fd64cee) }
var fileDescriptor_e4609c816fd64cee = []byte{
// 451 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xb4, 0x93, 0x4f, 0x6f, 0xd3, 0x30,
0x18, 0x87, 0xd5, 0x0e, 0x36, 0x78, 0xd7, 0xae, 0xcc, 0x54, 0x23, 0xe5, 0x54, 0x2a, 0x40, 0xe3,
0x92, 0xc3, 0x56, 0x24, 0xc4, 0xad, 0x42, 0x1c, 0x2a, 0x50, 0x0f, 0x1d, 0x17, 0x4e, 0x96, 0x1b,
0xbf, 0x69, 0x0c, 0x4b, 0x1c, 0xfc, 0x3a, 0x11, 0xfd, 0x10, 0x7
|
GetInterfaceName
|
identifier_name
|
|
login.go
|
WriteLogFile(err)
return err
}
if len(results.Entries) < 1 {
return errors.New("not found")
}
// attempt auth
log.Println(results.Entries)
//role = strings.Join(results.Entries[0].Attributes[3].Values, "")
return conn.Bind(results.Entries[0].DN, password)
}
// New client with the provided config
// If the configuration provided is invalid,
// or client is unable to connect with the config
// provided, an error will be returned
func New(config model.Config) (model.Client, error) {
config, err := validateConfig(config)
if err != nil {
WriteLogFile(err)
return nil, err
}
c := client{config}
conn, err := connect(c.Host) // test connection
if err != nil {
WriteLogFile(err)
return nil, err
}
if err = conn.Bind(c.ROUser.Name, c.ROUser.Password); err != nil {
WriteLogFile(err)
return nil, err
}
conn.Close()
return c, err
}
// Helper functions
// establishes a connection with an ldap host
// (the caller is expected to Close the connection when finished)
func connect(host string) (*ldap.Conn, error) {
c, err := net.DialTimeout("tcp", host, time.Second*8)
if err != nil {
WriteLogFile(err)
return nil, err
}
conn := ldap.NewConn(c, false)
conn.Start()
return conn, nil
}
func validateConfig(config model.Config) (model.Config, error) {
if config.BaseDN == "" || config.Host == "" || config.ROUser.Name == "" || config.ROUser.Password == "" {
return model.Config{}, errors.New("[CONFIG] The config provided could not be validated")
}
if config.Filter == "" {
config.Filter = "sAMAccountName"
}
return config, nil
}
func token(w http.ResponseWriter, username string) {
var error model.Error
db := database.DbConn()
defer db.Close()
if Role == "project manager" {
selDB, err := db.Query("SELECT project_manager_email from project_manager WHERE project_manager_email =? ", username)
defer selDB.Close()
if err != nil {
WriteLogFile(err)
w.WriteHeader(http.StatusBadRequest)
return
}
if selDB.Next() == false {
w.WriteHeader(http.StatusUnauthorized)
error.Message = "Unauthorized user role"
json.NewEncoder(w).Encode(error)
return
}
} else if Role == "program manager" {
selDB, err := db.Query("SELECT program_manager_email from program_manager WHERE program_manager_email =? ", username)
defer selDB.Close()
if err != nil {
WriteLogFile(err)
w.WriteHeader(http.StatusBadRequest)
return
}
if selDB.Next() == false {
w.WriteHeader(http.StatusUnauthorized)
error.Message = "Unauthorized user role"
json.NewEncoder(w).Encode(error)
return
}
} else {
w.WriteHeader(http.StatusUnauthorized)
error.Message = "Unauthorized user role"
json.NewEncoder(w).Encode(error)
return
}
expirationTime := time.Now().Add(3600 * time.Second).Unix()
claims := &model.Claims{
Username: username,
StandardClaims: jwt.StandardClaims{
ExpiresAt: expirationTime,
},
}
token := jwt.NewWithClaims(jwt.SigningMethodHS256, claims)
// Create the JWT string
tokenString, err := token.SignedString(jwtKey)
if err != nil {
WriteLogFile(err)
// If there is an error in creating the JWT return an internal server error
w.WriteHeader(http.StatusInternalServerError)
return
}
rand.Seed(time.Now().UnixNano())
chars := []rune("ABCDEFGHIJKLMNOPQRSTUVWXYZ_-&" +
"abcdefghijklmnopqrstuvwxyz%*" +
"0123456789")
length := 20
var b strings.Builder
for i := 0; i < length; i++ {
b.WriteRune(chars[rand.Intn(len(chars))])
}
var jwtToken model.JwtToken
jwtToken.AccessToken = tokenString
jwtToken.TokenType = "bearer"
jwtToken.Expiry = "3600"
jwtToken.RefreshToken = b.String()
json.NewEncoder(w).Encode(jwtToken)
w.WriteHeader(http.StatusCreated)
createdAt := time.Now()
var query string = "Insert into token(username,access_token,expiration,role,created_at) values (?,?,?,?,?)"
insert, err := db.Prepare(query)
if err != nil {
WriteLogFile(err)
panic(err.Error())
}
insert.Exec(username, tokenString, expirationTime, Role, createdAt.Format("2006-01-02"))
defer insert.Close()
query = "Insert into refresh_token(username,access_token,refresh_token,created_at) values (?,?,?,?)"
insert1, err := db.Prepare(query)
if err != nil {
WriteLogFile(err)
panic(err.Error())
}
insert1.Exec(username, tokenString, b.String(), createdAt.Format("2006-01-02"))
defer insert1.Close()
}
// SignIn : for user sign-in through LDAP
func (c *Commander) SignIn(w http.ResponseWriter, r *http.Request) {
var client model.Client
var err error
var error model.Error
db := database.DbConn()
defer db.Close()
// create a new client
if client, err = New(model.Config{
BaseDN: "DC=sls,DC=ads,DC=valuelabs,DC=net",
//BaseDN: "cn=ltest,ou=SERVICE ACCOUNTS,ou=SLS,dc=SLS,dc=ads,dc=valuelabs,dc=net",
Filter: "userPrincipalName",
ROUser: model.User{Name: "L test", Password: "Welcome@123"},
Title: "title",
Host: "10.10.52.113:389",
}); err != nil {
WriteLogFile(err)
fmt.Println(err)
return
}
var creds model.Credentials
// var pass string
SetupResponse(&w, r)
if (*r).Method == "OPTIONS" {
w.Header().Set("Access-Control-Max-Age", "86400")
w.WriteHeader(http.StatusOK)
return
}
w.Header().Set("Content-Type", "application/json")
// Get the JSON body and decode into credentials
err = json.NewDecoder(r.Body).Decode(&creds)
if err != nil {
// If the structure of the body is wrong, return an HTTP error
WriteLogFile(err)
w.WriteHeader(http.StatusBadRequest)
return
}
// var usr = creds.Username
// var bytePassword = []byte(creds.Password)
username := creds.Username
password := creds.Password
Role = creds.Role
splitUser := strings.Split(username, "@")
print := splitUser[0]
user1 := fmt.Sprintf("%s@valuelabs.com", print)
user2 := fmt.Sprintf("%s@sls.ads.valuelabs.net", print)
err = client.Auth(user2, password)
if err == nil {
fmt.Println("Success!")
token(w, user1)
} else if err.Error() == "not found" {
fmt.Println("H2")
if errr := client.Auth(user1, password); errr != nil {
fmt.Println("H3")
WriteLogFile(errr)
w.WriteHeader(http.StatusUnauthorized)
error.Code = "401"
error.Message = "Invalid Username or Password"
json.NewEncoder(w).Encode(error)
return
} //else {
fmt.Println("Success!")
token(w, user1)
//}
} else {
fmt.Println("H4")
WriteLogFile(err)
w.WriteHeader(http.StatusUnauthorized)
error.Code = "401"
error.Message = "Invalid Username or Password"
json.NewEncoder(w).Encode(error)
return
}
}
// Refresh : to generate refresh tokens
func (c *Commander)
|
(w http.ResponseWriter, r *http.Request) {
fmt.Println("123")
SetupResponse(&w, r)
if (*r).Method == "OPTIONS" {
w.Header().Set("Access-Control-Max-Age", "86400")
w.WriteHeader(http.StatusOK)
return
}
w.Header().Set("Content-Type", "application/json")
claims := &model.Claims{}
type refresh struct {
AccessToken string `json:"access_token"`
RefreshToken string `json:"refresh_token"`
}
fmt.Println("123")
var ref refresh
err := json.NewDecoder(r.Body).Decode(&ref)
if err != nil {
// If the structure of the body is wrong, return an HTTP error
WriteLogFile(err)
w.WriteHeader(http.StatusBadRequest)
return
}
var refToken = ref.RefreshToken
var accToken = ref.AccessToken
fmt.Println(refToken)
fmt.Println(accToken)
db := database.DbConn()
defer db.Close()
selDB, err := db.Query("SELECT username from refresh_token where access_token=? and refresh_token=? and is_active='1'", accToken, refToken)
if err != nil {
WriteLogFile(err)
panic(err.Error())
}
defer selDB.Close()
if selDB.Next() == false {
w.WriteHeader(http.StatusUnauthorized)
return
} //
|
Refresh
|
identifier_name
|
login.go
|
defer conn.Close()
// perform initial read-only bind
if err = conn.Bind(c.ROUser.Name, c.ROUser.Password); err != nil {
WriteLogFile(err)
return err
}
// find the user attempting to login
results, err := conn.Search(ldap.NewSearchRequest(
c.BaseDN, ldap.ScopeWholeSubtree,
ldap.NeverDerefAliases,
0, 0, false, fmt.Sprintf("(%v=%v)", c.Filter, username),
[]string{}, nil,
))
if err != nil {
WriteLogFile(err)
return err
}
if len(results.Entries) < 1 {
return errors.New("not found")
}
// attempt auth
log.Println(results.Entries)
//role = strings.Join(results.Entries[0].Attributes[3].Values, "")
return conn.Bind(results.Entries[0].DN, password)
}
// New client with the provided config
// If the configuration provided is invalid,
// or client is unable to connect with the config
// provided, an error will be returned
func New(config model.Config) (model.Client, error) {
config, err := validateConfig(config)
if err != nil {
WriteLogFile(err)
return nil, err
}
c := client{config}
conn, err := connect(c.Host) // test connection
if err != nil {
WriteLogFile(err)
return nil, err
}
if err = conn.Bind(c.ROUser.Name, c.ROUser.Password); err != nil {
WriteLogFile(err)
return nil, err
}
conn.Close()
return c, err
}
// Helper functions
// establishes a connection with an ldap host
// (the caller is expected to Close the connection when finished)
func connect(host string) (*ldap.Conn, error) {
c, err := net.DialTimeout("tcp", host, time.Second*8)
if err != nil {
WriteLogFile(err)
return nil, err
}
conn := ldap.NewConn(c, false)
conn.Start()
return conn, nil
}
func validateConfig(config model.Config) (model.Config, error) {
if config.BaseDN == "" || config.Host == "" || config.ROUser.Name == "" || config.ROUser.Password == "" {
return model.Config{}, errors.New("[CONFIG] The config provided could not be validated")
}
if config.Filter == "" {
config.Filter = "sAMAccountName"
}
return config, nil
}
func token(w http.ResponseWriter, username string) {
var error model.Error
db := database.DbConn()
defer db.Close()
if Role == "project manager" {
selDB, err := db.Query("SELECT project_manager_email from project_manager WHERE project_manager_email =? ", username)
defer selDB.Close()
if err != nil {
WriteLogFile(err)
w.WriteHeader(http.StatusBadRequest)
return
}
if selDB.Next() == false {
w.WriteHeader(http.StatusUnauthorized)
error.Message = "Unauthorized user role"
json.NewEncoder(w).Encode(error)
return
}
} else if Role == "program manager" {
selDB, err := db.Query("SELECT program_manager_email from program_manager WHERE program_manager_email =? ", username)
defer selDB.Close()
if err != nil {
WriteLogFile(err)
w.WriteHeader(http.StatusBadRequest)
return
}
if selDB.Next() == false {
w.WriteHeader(http.StatusUnauthorized)
error.Message = "Unauthorized user role"
json.NewEncoder(w).Encode(error)
return
}
} else {
w.WriteHeader(http.StatusUnauthorized)
error.Message = "Unauthorized user role"
json.NewEncoder(w).Encode(error)
return
}
expirationTime := time.Now().Add(3600 * time.Second).Unix()
claims := &model.Claims{
Username: username,
StandardClaims: jwt.StandardClaims{
ExpiresAt: expirationTime,
},
}
token := jwt.NewWithClaims(jwt.SigningMethodHS256, claims)
// Create the JWT string
tokenString, err := token.SignedString(jwtKey)
if err != nil {
WriteLogFile(err)
// If there is an error in creating the JWT return an internal server error
w.WriteHeader(http.StatusInternalServerError)
return
}
rand.Seed(time.Now().UnixNano())
chars := []rune("ABCDEFGHIJKLMNOPQRSTUVWXYZ_-&" +
"abcdefghijklmnopqrstuvwxyz%*" +
"0123456789")
length := 20
var b strings.Builder
for i := 0; i < length; i++ {
b.WriteRune(chars[rand.Intn(len(chars))])
}
var jwtToken model.JwtToken
jwtToken.AccessToken = tokenString
jwtToken.TokenType = "bearer"
jwtToken.Expiry = "3600"
jwtToken.RefreshToken = b.String()
json.NewEncoder(w).Encode(jwtToken)
w.WriteHeader(http.StatusCreated)
createdAt := time.Now()
var query string = "Insert into token(username,access_token,expiration,role,created_at) values (?,?,?,?,?)"
insert, err := db.Prepare(query)
if err != nil {
WriteLogFile(err)
panic(err.Error())
}
insert.Exec(username, tokenString, expirationTime, Role, createdAt.Format("2006-01-02"))
defer insert.Close()
query = "Insert into refresh_token(username,access_token,refresh_token,created_at) values (?,?,?,?)"
insert1, err := db.Prepare(query)
if err != nil {
WriteLogFile(err)
panic(err.Error())
}
insert1.Exec(username, tokenString, b.String(), createdAt.Format("2006-01-02"))
defer insert1.Close()
}
// SignIn : for user sign-in through LDAP
func (c *Commander) SignIn(w http.ResponseWriter, r *http.Request) {
var client model.Client
var err error
var error model.Error
db := database.DbConn()
defer db.Close()
// create a new client
if client, err = New(model.Config{
BaseDN: "DC=sls,DC=ads,DC=valuelabs,DC=net",
//BaseDN: "cn=ltest,ou=SERVICE ACCOUNTS,ou=SLS,dc=SLS,dc=ads,dc=valuelabs,dc=net",
Filter: "userPrincipalName",
ROUser: model.User{Name: "L test", Password: "Welcome@123"},
Title: "title",
Host: "10.10.52.113:389",
}); err != nil {
WriteLogFile(err)
fmt.Println(err)
return
}
var creds model.Credentials
// var pass string
SetupResponse(&w, r)
if (*r).Method == "OPTIONS" {
w.Header().Set("Access-Control-Max-Age", "86400")
w.WriteHeader(http.StatusOK)
return
}
w.Header().Set("Content-Type", "application/json")
// Get the JSON body and decode into credentials
err = json.NewDecoder(r.Body).Decode(&creds)
if err != nil {
// If the structure of the body is wrong, return an HTTP error
WriteLogFile(err)
w.WriteHeader(http.StatusBadRequest)
return
}
// var usr = creds.Username
// var bytePassword = []byte(creds.Password)
username := creds.Username
password := creds.Password
Role = creds.Role
splitUser := strings.Split(username, "@")
print := splitUser[0]
user1 := fmt.Sprintf("%s@valuelabs.com", print)
user2 := fmt.Sprintf("%s@sls.ads.valuelabs.net", print)
err = client.Auth(user2, password)
if err == nil {
fmt.Println("Success!")
token(w, user1)
} else if err.Error() == "not found" {
fmt.Println("H2")
if errr := client.Auth(user1, password); errr != nil {
fmt.Println("H3")
WriteLogFile(errr)
w.WriteHeader(http.StatusUnauthorized)
error.Code = "401"
error.Message = "Invalid Username or Password"
json.NewEncoder(w).Encode(error)
return
} //else {
fmt.Println("Success!")
token(w, user1)
//}
} else {
fmt.Println("H4")
WriteLogFile(err)
w.WriteHeader(http.StatusUnauthorized)
error.Code = "401"
error.Message = "Invalid Username or Password"
json.NewEncoder(w).Encode(error)
return
}
}
// Refresh : to generate refresh tokens
func (c *Commander) Refresh(w http.ResponseWriter, r *http.Request) {
fmt.Println("123")
SetupResponse(&w, r)
if (*r).Method == "OPTIONS" {
w.Header().Set("Access-Control-Max-Age", "86400")
w.WriteHeader(http.StatusOK)
return
}
w.Header().Set("Content-Type", "application/json")
claims := &model.Claims{}
type refresh struct {
AccessToken string `json:"access_token"`
RefreshToken string `json:"refresh_token"`
}
fmt.Println("123")
var ref refresh
err := json.NewDecoder(r.Body).Decode(&ref)
if err != nil {
// If the structure of the body is wrong, return an HTTP
|
{
WriteLogFile(err)
return err
}
|
conditional_block
|
|
login.go
|
WriteLogFile(err)
return err
}
if len(results.Entries) < 1 {
return errors.New("not found")
}
// attempt auth
log.Println(results.Entries)
//role = strings.Join(results.Entries[0].Attributes[3].Values, "")
return conn.Bind(results.Entries[0].DN, password)
}
// New client with the provided config
// If the configuration provided is invalid,
// or client is unable to connect with the config
// provided, an error will be returned
func New(config model.Config) (model.Client, error) {
config, err := validateConfig(config)
if err != nil {
WriteLogFile(err)
return nil, err
}
c := client{config}
conn, err := connect(c.Host) // test connection
if err != nil {
WriteLogFile(err)
return nil, err
}
if err = conn.Bind(c.ROUser.Name, c.ROUser.Password); err != nil {
WriteLogFile(err)
return nil, err
}
conn.Close()
return c, err
}
// Helper functions
// establishes a connection with an ldap host
// (the caller is expected to Close the connection when finished)
func connect(host string) (*ldap.Conn, error) {
c, err := net.DialTimeout("tcp", host, time.Second*8)
if err != nil {
WriteLogFile(err)
return nil, err
}
conn := ldap.NewConn(c, false)
conn.Start()
return conn, nil
}
func validateConfig(config model.Config) (model.Config, error) {
if config.BaseDN == "" || config.Host == "" || config.ROUser.Name == "" || config.ROUser.Password == "" {
return model.Config{}, errors.New("[CONFIG] The config provided could not be validated")
}
if config.Filter == "" {
config.Filter = "sAMAccountName"
}
return config, nil
}
func token(w http.ResponseWriter, username string)
|
defer selDB.Close()
if err != nil {
WriteLogFile(err)
w.WriteHeader(http.StatusBadRequest)
return
}
if selDB.Next() == false {
w.WriteHeader(http.StatusUnauthorized)
error.Message = "Unauthorized user role"
json.NewEncoder(w).Encode(error)
return
}
} else {
w.WriteHeader(http.StatusUnauthorized)
error.Message = "Unauthorized user role"
json.NewEncoder(w).Encode(error)
return
}
expirationTime := time.Now().Add(3600 * time.Second).Unix()
claims := &model.Claims{
Username: username,
StandardClaims: jwt.StandardClaims{
ExpiresAt: expirationTime,
},
}
token := jwt.NewWithClaims(jwt.SigningMethodHS256, claims)
// Create the JWT string
tokenString, err := token.SignedString(jwtKey)
if err != nil {
WriteLogFile(err)
// If there is an error in creating the JWT return an internal server error
w.WriteHeader(http.StatusInternalServerError)
return
}
rand.Seed(time.Now().UnixNano())
chars := []rune("ABCDEFGHIJKLMNOPQRSTUVWXYZ_-&" +
"abcdefghijklmnopqrstuvwxyz%*" +
"0123456789")
length := 20
var b strings.Builder
for i := 0; i < length; i++ {
b.WriteRune(chars[rand.Intn(len(chars))])
}
var jwtToken model.JwtToken
jwtToken.AccessToken = tokenString
jwtToken.TokenType = "bearer"
jwtToken.Expiry = "3600"
jwtToken.RefreshToken = b.String()
json.NewEncoder(w).Encode(jwtToken)
w.WriteHeader(http.StatusCreated)
createdAt := time.Now()
var query string = "Insert into token(username,access_token,expiration,role,created_at) values (?,?,?,?,?)"
insert, err := db.Prepare(query)
if err != nil {
WriteLogFile(err)
panic(err.Error())
}
insert.Exec(username, tokenString, expirationTime, Role, createdAt.Format("2006-01-02"))
defer insert.Close()
query = "Insert into refresh_token(username,access_token,refresh_token,created_at) values (?,?,?,?)"
insert1, err := db.Prepare(query)
if err != nil {
WriteLogFile(err)
panic(err.Error())
}
insert1.Exec(username, tokenString, b.String(), createdAt.Format("2006-01-02"))
defer insert1.Close()
}
// SignIn : for user sign-in through LDAP
func (c *Commander) SignIn(w http.ResponseWriter, r *http.Request) {
var client model.Client
var err error
var error model.Error
db := database.DbConn()
defer db.Close()
// create a new client
if client, err = New(model.Config{
BaseDN: "DC=sls,DC=ads,DC=valuelabs,DC=net",
//BaseDN: "cn=ltest,ou=SERVICE ACCOUNTS,ou=SLS,dc=SLS,dc=ads,dc=valuelabs,dc=net",
Filter: "userPrincipalName",
ROUser: model.User{Name: "L test", Password: "Welcome@123"},
Title: "title",
Host: "10.10.52.113:389",
}); err != nil {
WriteLogFile(err)
fmt.Println(err)
return
}
var creds model.Credentials
// var pass string
SetupResponse(&w, r)
if (*r).Method == "OPTIONS" {
w.Header().Set("Access-Control-Max-Age", "86400")
w.WriteHeader(http.StatusOK)
return
}
w.Header().Set("Content-Type", "application/json")
// Get the JSON body and decode into credentials
err = json.NewDecoder(r.Body).Decode(&creds)
if err != nil {
// If the structure of the body is wrong, return an HTTP error
WriteLogFile(err)
w.WriteHeader(http.StatusBadRequest)
return
}
// var usr = creds.Username
// var bytePassword = []byte(creds.Password)
username := creds.Username
password := creds.Password
Role = creds.Role
splitUser := strings.Split(username, "@")
print := splitUser[0]
user1 := fmt.Sprintf("%s@valuelabs.com", print)
user2 := fmt.Sprintf("%s@sls.ads.valuelabs.net", print)
err = client.Auth(user2, password)
if err == nil {
fmt.Println("Success!")
token(w, user1)
} else if err.Error() == "not found" {
fmt.Println("H2")
if errr := client.Auth(user1, password); errr != nil {
fmt.Println("H3")
WriteLogFile(errr)
w.WriteHeader(http.StatusUnauthorized)
error.Code = "401"
error.Message = "Invalid Username or Password"
json.NewEncoder(w).Encode(error)
return
} //else {
fmt.Println("Success!")
token(w, user1)
//}
} else {
fmt.Println("H4")
WriteLogFile(err)
w.WriteHeader(http.StatusUnauthorized)
error.Code = "401"
error.Message = "Invalid Username or Password"
json.NewEncoder(w).Encode(error)
return
}
}
// Refresh : to generate refresh tokens
func (c *Commander) Refresh(w http.ResponseWriter, r *http.Request) {
fmt.Println("123")
SetupResponse(&w, r)
if (*r).Method == "OPTIONS" {
w.Header().Set("Access-Control-Max-Age", "86400")
w.WriteHeader(http.StatusOK)
return
}
w.Header().Set("Content-Type", "application/json")
claims := &model.Claims{}
type refresh struct {
AccessToken string `json:"access_token"`
RefreshToken string `json:"refresh_token"`
}
fmt.Println("123")
var ref refresh
err := json.NewDecoder(r.Body).Decode(&ref)
if err != nil {
// If the structure of the body is wrong, return an HTTP error
WriteLogFile(err)
w.WriteHeader(http.StatusBadRequest)
return
}
var refToken = ref.RefreshToken
var accToken = ref.AccessToken
fmt.Println(refToken)
fmt.Println(accToken)
db := database.DbConn()
defer db.Close()
selDB, err := db.Query("SELECT username from refresh_token where access_token=? and refresh_token=? and is_active='1'", accToken, refToken)
if err != nil {
WriteLogFile(err)
panic(err.Error())
}
defer selDB.Close()
if selDB.Next() == false {
w.WriteHeader(http.StatusUnauthorized)
return
} //
|
{
var error model.Error
db := database.DbConn()
defer db.Close()
if Role == "project manager" {
selDB, err := db.Query("SELECT project_manager_email from project_manager WHERE project_manager_email =? ", username)
defer selDB.Close()
if err != nil {
WriteLogFile(err)
w.WriteHeader(http.StatusBadRequest)
return
}
if selDB.Next() == false {
w.WriteHeader(http.StatusUnauthorized)
error.Message = "Unauthorized user role"
json.NewEncoder(w).Encode(error)
return
}
} else if Role == "program manager" {
selDB, err := db.Query("SELECT program_manager_email from program_manager WHERE program_manager_email =? ", username)
|
identifier_body
|
login.go
|
be validated")
}
if config.Filter == "" {
config.Filter = "sAMAccountName"
}
return config, nil
}
func token(w http.ResponseWriter, username string) {
var error model.Error
db := database.DbConn()
defer db.Close()
if Role == "project manager" {
selDB, err := db.Query("SELECT project_manager_email from project_manager WHERE project_manager_email =? ", username)
defer selDB.Close()
if err != nil {
WriteLogFile(err)
w.WriteHeader(http.StatusBadRequest)
return
}
if selDB.Next() == false {
w.WriteHeader(http.StatusUnauthorized)
error.Message = "Unauthorized user role"
json.NewEncoder(w).Encode(error)
return
}
} else if Role == "program manager" {
selDB, err := db.Query("SELECT program_manager_email from program_manager WHERE program_manager_email =? ", username)
defer selDB.Close()
if err != nil {
WriteLogFile(err)
w.WriteHeader(http.StatusBadRequest)
return
}
if selDB.Next() == false {
w.WriteHeader(http.StatusUnauthorized)
error.Message = "Unauthorized user role"
json.NewEncoder(w).Encode(error)
return
}
} else {
w.WriteHeader(http.StatusUnauthorized)
error.Message = "Unauthorized user role"
json.NewEncoder(w).Encode(error)
return
}
expirationTime := time.Now().Add(3600 * time.Second).Unix()
claims := &model.Claims{
Username: username,
StandardClaims: jwt.StandardClaims{
ExpiresAt: expirationTime,
},
}
token := jwt.NewWithClaims(jwt.SigningMethodHS256, claims)
// Create the JWT string
tokenString, err := token.SignedString(jwtKey)
if err != nil {
WriteLogFile(err)
// If there is an error in creating the JWT return an internal server error
w.WriteHeader(http.StatusInternalServerError)
return
}
rand.Seed(time.Now().UnixNano())
chars := []rune("ABCDEFGHIJKLMNOPQRSTUVWXYZ_-&" +
"abcdefghijklmnopqrstuvwxyz%*" +
"0123456789")
length := 20
var b strings.Builder
for i := 0; i < length; i++ {
b.WriteRune(chars[rand.Intn(len(chars))])
}
var jwtToken model.JwtToken
jwtToken.AccessToken = tokenString
jwtToken.TokenType = "bearer"
jwtToken.Expiry = "3600"
jwtToken.RefreshToken = b.String()
json.NewEncoder(w).Encode(jwtToken)
w.WriteHeader(http.StatusCreated)
createdAt := time.Now()
var query string = "Insert into token(username,access_token,expiration,role,created_at) values (?,?,?,?,?)"
insert, err := db.Prepare(query)
if err != nil {
WriteLogFile(err)
panic(err.Error())
}
insert.Exec(username, tokenString, expirationTime, Role, createdAt.Format("2006-01-02"))
defer insert.Close()
query = "Insert into refresh_token(username,access_token,refresh_token,created_at) values (?,?,?,?)"
insert1, err := db.Prepare(query)
if err != nil {
WriteLogFile(err)
panic(err.Error())
}
insert1.Exec(username, tokenString, b.String(), createdAt.Format("2006-01-02"))
defer insert1.Close()
}
// SignIn : for user sign-in through LDAP
func (c *Commander) SignIn(w http.ResponseWriter, r *http.Request) {
var client model.Client
var err error
var error model.Error
db := database.DbConn()
defer db.Close()
// create a new client
if client, err = New(model.Config{
BaseDN: "DC=sls,DC=ads,DC=valuelabs,DC=net",
//BaseDN: "cn=ltest,ou=SERVICE ACCOUNTS,ou=SLS,dc=SLS,dc=ads,dc=valuelabs,dc=net",
Filter: "userPrincipalName",
ROUser: model.User{Name: "L test", Password: "Welcome@123"},
Title: "title",
Host: "10.10.52.113:389",
}); err != nil {
WriteLogFile(err)
fmt.Println(err)
return
}
var creds model.Credentials
// var pass string
SetupResponse(&w, r)
if (*r).Method == "OPTIONS" {
w.Header().Set("Access-Control-Max-Age", "86400")
w.WriteHeader(http.StatusOK)
return
}
w.Header().Set("Content-Type", "application/json")
// Get the JSON body and decode into credentials
err = json.NewDecoder(r.Body).Decode(&creds)
if err != nil {
// If the structure of the body is wrong, return an HTTP error
WriteLogFile(err)
w.WriteHeader(http.StatusBadRequest)
return
}
// var usr = creds.Username
// var bytePassword = []byte(creds.Password)
username := creds.Username
password := creds.Password
Role = creds.Role
splitUser := strings.Split(username, "@")
print := splitUser[0]
user1 := fmt.Sprintf("%s@valuelabs.com", print)
user2 := fmt.Sprintf("%s@sls.ads.valuelabs.net", print)
err = client.Auth(user2, password)
if err == nil {
fmt.Println("Success!")
token(w, user1)
} else if err.Error() == "not found" {
fmt.Println("H2")
if errr := client.Auth(user1, password); errr != nil {
fmt.Println("H3")
WriteLogFile(errr)
w.WriteHeader(http.StatusUnauthorized)
error.Code = "401"
error.Message = "Invalid Username or Password"
json.NewEncoder(w).Encode(error)
return
} //else {
fmt.Println("Success!")
token(w, user1)
//}
} else {
fmt.Println("H4")
WriteLogFile(err)
w.WriteHeader(http.StatusUnauthorized)
error.Code = "401"
error.Message = "Invalid Username or Password"
json.NewEncoder(w).Encode(error)
return
}
}
// Refresh : to generate refresh tokens
func (c *Commander) Refresh(w http.ResponseWriter, r *http.Request) {
fmt.Println("123")
SetupResponse(&w, r)
if (*r).Method == "OPTIONS" {
w.Header().Set("Access-Control-Max-Age", "86400")
w.WriteHeader(http.StatusOK)
return
}
w.Header().Set("Content-Type", "application/json")
claims := &model.Claims{}
type refresh struct {
AccessToken string `json:"access_token"`
RefreshToken string `json:"refresh_token"`
}
fmt.Println("123")
var ref refresh
err := json.NewDecoder(r.Body).Decode(&ref)
if err != nil {
// If the structure of the body is wrong, return an HTTP error
WriteLogFile(err)
w.WriteHeader(http.StatusBadRequest)
return
}
var refToken = ref.RefreshToken
var accToken = ref.AccessToken
fmt.Println(refToken)
fmt.Println(accToken)
db := database.DbConn()
defer db.Close()
selDB, err := db.Query("SELECT username from refresh_token where access_token=? and refresh_token=? and is_active='1'", accToken, refToken)
if err != nil {
WriteLogFile(err)
panic(err.Error())
}
defer selDB.Close()
if selDB.Next() == false {
w.WriteHeader(http.StatusUnauthorized)
return
} // else {
err = selDB.Scan(&usr)
if err != nil {
WriteLogFile(err)
w.WriteHeader(http.StatusBadRequest)
}
w.WriteHeader(http.StatusCreated)
updDB, err := db.Prepare("UPDATE refresh_token SET is_active=? WHERE username=? and refresh_token=?")
if err != nil {
WriteLogFile(err)
panic(err.Error())
}
updDB.Exec(0, usr, refToken)
defer updDB.Close()
//}
// Now, create a new token for the current use, with a renewed expiration time
// expirationTime := time.Now().Add(3600 * time.Second)
// claims.ExpiresAt = expirationTime.Unix()
// token := jwt.NewWithClaims(jwt.SigningMethodHS256, claims)
// tokenStringRefresh, err := token.SignedString(jwtKey)
// if err != nil {
// w.WriteHeader(http.StatusInternalServerError)
// return
// }
expirationTime := time.Now().Add(3600 * time.Second).Unix()
claims = &model.Claims{
Username: usr,
StandardClaims: jwt.StandardClaims{
ExpiresAt: expirationTime,
},
}
token := jwt.NewWithClaims(jwt.SigningMethodHS256, claims)
// Create the JWT string
tokenStringRefresh, err := token.SignedString(jwtKey)
if err != nil {
WriteLogFile(err)
// If there is an error in creating the JWT return an internal server error
w.WriteHeader(http.StatusInternalServerError)
return
}
rand.Seed(time.Now().UnixNano())
chars := []rune("ABCDEFGHIJKLMNOPQRSTUVWXYZ_-&" +
|
"abcdefghijklmnopqrstuvwxyz%*" +
"0123456789")
|
random_line_split
|
|
shotsAlarm_asynchronous_with_hue.py
|
.nIntensity}
kitchenCommand = {'transitiontime': tTime, 'xy': self.orange, 'bri': self.nIntensity}
elif lrColor == self.lblue:
lrCommand = {'transitiontime': tTime, 'xy': self.green, 'bri': self.nIntensity}
doorCommand = {'transitiontime': tTime, 'xy': self.yellow, 'bri': self.nIntensity}
hwCommand = {'transitiontime': tTime, 'xy': self.orange, 'bri': self.nIntensity}
kitchenCommand = {'transitiontime': tTime, 'xy': self.red, 'bri': self.nIntensity}
elif lrColor == self.green:
lrCommand = {'transitiontime': tTime, 'xy': self.yellow, 'bri': self.nIntensity}
doorCommand = {'transitiontime': tTime, 'xy': self.orange, 'bri': self.nIntensity}
hwCommand = {'transitiontime': tTime, 'xy': self.red, 'bri': self.nIntensity}
kitchenCommand = {'transitiontime': tTime, 'xy': self.magenta, 'bri': self.nIntensity}
elif lrColor == self.yellow:
lrCommand = {'transitiontime': tTime, 'xy': self.orange, 'bri': self.nIntensity}
doorCommand = {'transitiontime': tTime, 'xy': self.red, 'bri': self.nIntensity}
hwCommand = {'transitiontime': tTime, 'xy': self.magenta, 'bri': self.nIntensity}
kitchenCommand = {'transitiontime': tTime, 'xy': self.blue, 'bri': self.nIntensity}
else:
lrCommand = {'transitiontime': tTime, 'xy': self.red, 'bri': self.nIntensity}
doorCommand = {'transitiontime': tTime, 'xy': self.magenta, 'bri': self.nIntensity}
hwCommand = {'transitiontime': tTime, 'xy': self.blue, 'bri': self.nIntensity}
kitchenCommand = {'transitiontime': tTime, 'xy': self.lblue, 'bri': self.nIntensity}
self.updateLR(lrCommand)
self.updateDoor(doorCommand)
self.updateHW(hwCommand)
self.updateKitchen(kitchenCommand)
class DisplayController:
def __init__(self, master, queue, alarmCancelCommand, GUIhideCommand, GUIshowCommand):
self.queue = queue
self.alarmCancel = alarmCancelCommand
self.GUIhide = GUIhideCommand
self.GUIshow = GUIshowCommand
self.seconds_var = tkinter.StringVar()
self.seconds_var.set("(HIDDEN)")
# Set up the GUI
self.frame = ttk.Frame(master, padding="5 5")
if fullscreen:
self.frame.master.attributes('-fullscreen', True)
self.frame.master = master # XXX
ttk.Label(self.frame, textvariable=self.seconds_var, font=("Courier", 100, "bold")).grid(row=1, column=1)
ttk.Button(self.frame, text='Cancel', command=self.cancel).grid(row=2, column=1, sticky="s")
root.grid_rowconfigure(0, weight=1)
root.grid_columnconfigure(0, weight=1)
self.frame.grid_rowconfigure(1, weight=1)
self.frame.grid_columnconfigure(1, weight=1)
self.frame.grid_rowconfigure(1, minsize=root.winfo_screenheight() / 2)
self.frame.grid()
self.frame.master.protocol("WM_DELETE_WINDOW", self.cancel)
def processIncoming(self, cdLen, goHold, songLen):
"""Handle all messages currently in the queue, if any."""
while self.queue.qsize():
try:
count = self.queue.get(0)
# did we actually send something in the queue
if not count == None:
# show GUI
self.GUIshow()
# countdown stage
if (count < cdLen):
self.seconds_var.set("SHOTS IN: {}".format(cdLen - count))
# GO!! stage
else:
# turn strobe on
strobe.on()
# alternate between GO and blank
if (count % 2):
self.seconds_var.set("GO!! GO!! GO!!")
else:
self.seconds_var.set("")
else: # count == None
# hide GUI
self.GUIhide()
self.seconds_var.set("(HIDDEN)")
# turn off strobe
strobe.off()
except Queue.Empty:
# just on general principles, although we don't
# expect this branch to be taken in this case
pass
def cancel(self):
"""Cancel callback, hide."""
self.alarmCancel()
#######################
## Thread Management ##
#######################
class ThreadedClient:
"""
Launch the main part of the GUI and the worker thread. periodicCall and
endApplication could reside in the GUI part, but putting them here
means that you have all the thread controls in a single place.
"""
def __init__(self, master, user, song, cdLen, goHold):
"""
Start the GUI and the asynchronous threads. We are in the main
(original) thread of the application, which will later be used by
the GUI as well. We spawn a new thread for the worker (I/O).
"""
# GUI will be visible window after init
self.master = master
# get window size
self.w = self.master.winfo_screenwidth()
self.h = self.master.winfo_screenheight()
# GUI will be visible after tkinter.Tk()
# hide the GUI window for now
self.guiVisible = 1
self.GUIhide()
# keep track of whether alarm is active or not
self.shotsFired = 0
# keep track of whether we have flashed hue
self.flashed = 0
self.flashed2 = 0
# keep track of seconds into a given track
self.count = 0
# What song are we going to play??
self.song = song
# keep track of length (sec) of selected song
# this will be assigned at alarmActivate()
self.songLength = 0
# song countdown length
# this is assigned by init call
self.cdLen = cdLen
# how long to display "GO!!"
# this is assigned by init call
self.goHold = goHold
# Create the queue
self.queue = Queue.Queue()
# Create a lock to access shared resources amongst threads
self.lock = threading.Lock()
# Set up the GUIPart
# we pass it the master (root), the queue, the endApplication function, and the hide / show functions
self.gui = DisplayController(master, self.queue, self.alarmCancel, self.GUIhide, self.GUIshow)
# Set up the Spotify instance
self.mySpotipy = ASpotipy(user, Private.CLIENT_ID, Private.CLIENT_SECRET, Private.REDIRECT_URI)
# setup hue
self.myHue = hueControl()
# Set up the thread to do asynchronous I/O
self.running = 1
self.thread1 = threading.Thread(target=self.workerThread1)
self.thread1.start()
self.thread2 = threading.Thread(target=self.workerThread2)
self.thread2.start()
self.thread3 = threading.Thread(target=self.workerThread3)
self.thread3.start()
self.thread4 = threading.Thread(target=self.workerThread4)
self.thread4.start()
# Start the periodic call in the GUI to check if the queue contains
# anything
self.periodicCall()
###########################################
## Periodic Update Function (root.after) ##
###########################################
def periodicCall(self):
"""
Check every 200 ms if there is something new in the queue.
"""
self.gui.processIncoming(self.cdLen, self.goHold, self.songLength)
if not self.running:
# This is the brutal stop of the system.
# should do some cleanup before actually shutting it down.
import sys
sys.exit(1)
self.master.after(200, self.periodicCall)
###########################################
## Worker Threads (for asynchronous I/O) ##
###########################################
def workerThread1(self): # ORIGINAL-WORKING
"""
This is where we handle the asynchronous I/O. For example, it may be
a 'select( )'. One important thing to remember is that the thread has
to yield control pretty regularly, by select or otherwise.
"""
# make sure we have access to shared resource
with self.lock:
# set count to 0 if this is our first run through
self.count = 0
while self.running:
# make sure we have access to shared resource
|
with self.lock:
# make sure shots is activated
if self.shotsFired:
# make sure we haven't been counting longer than the song length
if (self.count <= self.songLength):
# update queue with count if countdown stage or go stage
if (self.count <= (self.cdLen + self.goHold)):
self.queue.put(self.count)
self.count += 1
else: # not in countdown stage or go stage
self.queue.put(None)
else: # song has ended
self.alarmCancel()
else: # shots not fired
pass
time.sleep(1)
|
conditional_block
|
|
shotsAlarm_asynchronous_with_hue.py
|
, trackContext]
def playWithContext(self, context):
# double check that we are logged in
self.spLogin()
# double check to make sure we have a context URI
# if we do, go ahead and play with context
if context[2]:
self.sp.start_playback(None, context[2], None, {"uri": context[1]})
# if we don't have a context URI, just go back to the song
else:
self.playNoContext(context[1])
# we can then seek to song progress regardless of context URI
self.sp.seek_track(context[0])
def volumeUp(self):
self.spLogin()
self.sp.volume(88)
def volumeDown(self):
self.spLogin()
self.sp.volume(78)
class hueControl:
def __init__(self):
self.cIntensity = 175
self.fIntensity = 254
self.nIntensity = 128
# self.tTime = 50
self.nDelay = 5
self.red = [0.6901, 0.3076]
self.magenta = [0.4343, 0.1936]
self.blue = [0.1541, 0.0836]
self.lblue = [0.1695, 0.3364]
self.green = [0.2073, 0.6531]
self.yellow = [0.4898, 0.4761]
self.orange = [0.5706, 0.4078]
self.b = Bridge('10.142.1.114')
self.b.get_api()
def updateLR(self, command):
|
def updateDoor(self, command):
self.b.set_group(5, command)
def updateHW(self, command):
self.b.set_group(6, command)
def updateKitchen(self, command):
self.b.set_group(2, command)
def flashLights(self, color, delay, seconds):
command = {'transitiontime': 1, 'xy': color, 'bri': self.fIntensity}
self.b.set_group(0, command)
for i in range(1, (seconds + 1)):
command = {'transitiontime': 1, 'on': False}
self.b.set_group(0, command)
time.sleep(delay)
command = {'transitiontime': 1, 'on': True, 'bri': self.fIntensity}
self.b.set_group(0, command)
time.sleep(delay)
def advanceAsOne(self, tTime):
lrColor = self.b.get_light(10, 'xy')
if lrColor == self.red:
lrCommand = {'transitiontime': tTime, 'xy': self.magenta, 'bri': self.nIntensity}
elif lrColor == self.magenta:
lrCommand = {'transitiontime': tTime, 'xy': self.blue, 'bri': self.nIntensity}
elif lrColor == self.blue:
lrCommand = {'transitiontime': tTime, 'xy': self.lblue, 'bri': self.nIntensity}
elif lrColor == self.lblue:
lrCommand = {'transitiontime': tTime, 'xy': self.green, 'bri': self.nIntensity}
elif lrColor == self.green:
lrCommand = {'transitiontime': tTime, 'xy': self.yellow, 'bri': self.nIntensity}
elif lrColor == self.yellow:
lrCommand = {'transitiontime': tTime, 'xy': self.orange, 'bri': self.nIntensity}
else:
lrCommand = {'transitiontime': tTime, 'xy': self.red, 'bri': self.nIntensity}
self.b.set_group(0, lrCommand)
def advanceLights(self, tTime):
lrColor = self.b.get_light(10, 'xy')
if lrColor == self.red:
lrCommand = {'transitiontime': tTime, 'xy': self.magenta, 'bri': self.nIntensity}
doorCommand = {'transitiontime': tTime, 'xy': self.blue, 'bri': self.nIntensity}
hwCommand = {'transitiontime': tTime, 'xy': self.lblue, 'bri': self.nIntensity}
kitchenCommand = {'transitiontime': tTime, 'xy': self.green, 'bri': self.nIntensity}
elif lrColor == self.magenta:
lrCommand = {'transitiontime': tTime, 'xy': self.blue, 'bri': self.nIntensity}
doorCommand = {'transitiontime': tTime, 'xy': self.lblue, 'bri': self.nIntensity}
hwCommand = {'transitiontime': tTime, 'xy': self.green, 'bri': self.nIntensity}
kitchenCommand = {'transitiontime': tTime, 'xy': self.yellow, 'bri': self.nIntensity}
elif lrColor == self.blue:
lrCommand = {'transitiontime': tTime, 'xy': self.lblue, 'bri': self.nIntensity}
doorCommand = {'transitiontime': tTime, 'xy': self.green, 'bri': self.nIntensity}
hwCommand = {'transitiontime': tTime, 'xy': self.yellow, 'bri': self.nIntensity}
kitchenCommand = {'transitiontime': tTime, 'xy': self.orange, 'bri': self.nIntensity}
elif lrColor == self.lblue:
lrCommand = {'transitiontime': tTime, 'xy': self.green, 'bri': self.nIntensity}
doorCommand = {'transitiontime': tTime, 'xy': self.yellow, 'bri': self.nIntensity}
hwCommand = {'transitiontime': tTime, 'xy': self.orange, 'bri': self.nIntensity}
kitchenCommand = {'transitiontime': tTime, 'xy': self.red, 'bri': self.nIntensity}
elif lrColor == self.green:
lrCommand = {'transitiontime': tTime, 'xy': self.yellow, 'bri': self.nIntensity}
doorCommand = {'transitiontime': tTime, 'xy': self.orange, 'bri': self.nIntensity}
hwCommand = {'transitiontime': tTime, 'xy': self.red, 'bri': self.nIntensity}
kitchenCommand = {'transitiontime': tTime, 'xy': self.magenta, 'bri': self.nIntensity}
elif lrColor == self.yellow:
lrCommand = {'transitiontime': tTime, 'xy': self.orange, 'bri': self.nIntensity}
doorCommand = {'transitiontime': tTime, 'xy': self.red, 'bri': self.nIntensity}
hwCommand = {'transitiontime': tTime, 'xy': self.magenta, 'bri': self.nIntensity}
kitchenCommand = {'transitiontime': tTime, 'xy': self.blue, 'bri': self.nIntensity}
else:
lrCommand = {'transitiontime': tTime, 'xy': self.red, 'bri': self.nIntensity}
doorCommand = {'transitiontime': tTime, 'xy': self.magenta, 'bri': self.nIntensity}
hwCommand = {'transitiontime': tTime, 'xy': self.blue, 'bri': self.nIntensity}
kitchenCommand = {'transitiontime': tTime, 'xy': self.lblue, 'bri': self.nIntensity}
self.updateLR(lrCommand)
self.updateDoor(doorCommand)
self.updateHW(hwCommand)
self.updateKitchen(kitchenCommand)
class DisplayController:
def __init__(self, master, queue, alarmCancelCommand, GUIhideCommand, GUIshowCommand):
self.queue = queue
self.alarmCancel = alarmCancelCommand
self.GUIhide = GUIhideCommand
self.GUIshow = GUIshowCommand
self.seconds_var = tkinter.StringVar()
self.seconds_var.set("(HIDDEN)")
# Set up the GUI
self.frame = ttk.Frame(master, padding="5 5")
if fullscreen:
self.frame.master.attributes('-fullscreen', True)
self.frame.master = master # XXX
ttk.Label(self.frame, textvariable=self.seconds_var, font=("Courier", 100, "bold")).grid(row=1, column=1)
ttk.Button(self.frame, text='Cancel', command=self.cancel).grid(row=2, column=1, sticky="s")
root.grid_rowconfigure(0, weight=1)
root.grid_columnconfigure(0, weight=1)
self.frame.grid_rowconfigure(1, weight=1)
self.frame.grid_columnconfigure(1, weight=1)
self.frame.grid_rowconfigure(1, minsize=root.winfo_screenheight() / 2)
self.frame.grid()
self.frame.master.protocol("WM_DELETE_WINDOW", self.cancel)
def processIncoming(self, cdLen, goHold, songLen):
"""Handle all messages currently in the queue, if any."""
while self.queue.qsize():
try:
count = self.queue.get(0)
# did we actually send something in the queue
if not count == None:
# show GUI
self.GUIshow()
# countdown stage
if (count < cdLen):
self.seconds_var.set("SHOTS IN
|
self.b.set_group(4, command)
|
identifier_body
|
shotsAlarm_asynchronous_with_hue.py
|
() / 2)
self.frame.grid()
self.frame.master.protocol("WM_DELETE_WINDOW", self.cancel)
def processIncoming(self, cdLen, goHold, songLen):
"""Handle all messages currently in the queue, if any."""
while self.queue.qsize():
try:
count = self.queue.get(0)
# did we actually send something in the queue
if not count == None:
# show GUI
self.GUIshow()
# countdown stage
if (count < cdLen):
self.seconds_var.set("SHOTS IN: {}".format(cdLen - count))
# GO!! stage
else:
# turn strobe on
strobe.on()
# alternate between GO and blank
if (count % 2):
self.seconds_var.set("GO!! GO!! GO!!")
else:
self.seconds_var.set("")
else: # count == None
# hide GUI
self.GUIhide()
self.seconds_var.set("(HIDDEN)")
# turn off strobe
strobe.off()
except Queue.Empty:
# just on general principles, although we don't
# expect this branch to be taken in this case
pass
def cancel(self):
"""Cancel callback, hide."""
self.alarmCancel()
#######################
## Thread Management ##
#######################
class ThreadedClient:
"""
Launch the main part of the GUI and the worker thread. periodicCall and
endApplication could reside in the GUI part, but putting them here
means that you have all the thread controls in a single place.
"""
def __init__(self, master, user, song, cdLen, goHold):
"""
Start the GUI and the asynchronous threads. We are in the main
(original) thread of the application, which will later be used by
the GUI as well. We spawn a new thread for the worker (I/O).
"""
# GUI will be visible window after init
self.master = master
# get window size
self.w = self.master.winfo_screenwidth()
self.h = self.master.winfo_screenheight()
# GUI will be visible after tkinter.Tk()
# hide the GUI window for now
self.guiVisible = 1
self.GUIhide()
# keep track of whether alarm is active or not
self.shotsFired = 0
# keep track of whether we have flashed hue
self.flashed = 0
self.flashed2 = 0
# keep track of seconds into a given track
self.count = 0
# What song are we going to play??
self.song = song
# keep track of length (sec) of selected song
# this will be assigned at alarmActivate()
self.songLength = 0
# song countdown length
# this is assigned by init call
self.cdLen = cdLen
# how long to display "GO!!"
# this is assigned by init call
self.goHold = goHold
# Create the queue
self.queue = Queue.Queue()
# Create a lock to access shared resources amongst threads
self.lock = threading.Lock()
# Set up the GUIPart
# we pass it the master (root), the queue, the endApplication function, and the hide / show functions
self.gui = DisplayController(master, self.queue, self.alarmCancel, self.GUIhide, self.GUIshow)
# Set up the Spotify instance
self.mySpotipy = ASpotipy(user, Private.CLIENT_ID, Private.CLIENT_SECRET, Private.REDIRECT_URI)
# setup hue
self.myHue = hueControl()
# Set up the thread to do asynchronous I/O
self.running = 1
self.thread1 = threading.Thread(target=self.workerThread1)
self.thread1.start()
self.thread2 = threading.Thread(target=self.workerThread2)
self.thread2.start()
self.thread3 = threading.Thread(target=self.workerThread3)
self.thread3.start()
self.thread4 = threading.Thread(target=self.workerThread4)
self.thread4.start()
# Start the periodic call in the GUI to check if the queue contains
# anything
self.periodicCall()
###########################################
## Periodic Update Function (root.after) ##
###########################################
def periodicCall(self):
"""
Check every 200 ms if there is something new in the queue.
"""
self.gui.processIncoming(self.cdLen, self.goHold, self.songLength)
if not self.running:
# This is the brutal stop of the system.
# should do some cleanup before actually shutting it down.
import sys
sys.exit(1)
self.master.after(200, self.periodicCall)
###########################################
## Worker Threads (for asynchronous I/O) ##
###########################################
def workerThread1(self): # ORIGINAL-WORKING
"""
This is where we handle the asynchronous I/O. For example, it may be
a 'select( )'. One important thing to remember is that the thread has
to yield control pretty regularly, by select or otherwise.
"""
# make sure we have access to shared resource
with self.lock:
# set count to 0 if this is our first run through
self.count = 0
while self.running:
# make sure we have access to shared resource
with self.lock:
# make sure shots is activated
if self.shotsFired:
# make sure we haven't been counting longer than the song length
if (self.count <= self.songLength):
# update queue with count if countdown stage or go stage
if (self.count <= (self.cdLen + self.goHold)):
self.queue.put(self.count)
self.count += 1
else: # not in countdown stage or go stage
self.queue.put(None)
else: # song has ended
self.alarmCancel()
else: # shots not fired
pass
time.sleep(1)
# runs once an hour to make sure
# count doesn't get too big
def workerThread2(self):
while self.running:
time.sleep(3600)
if self.count >= 3600:
# make sure we have access to shared resource
with self.lock:
self.count = 0
def workerThread3(self):
while self.running:
if self.shotsFired and not self.flashed:
time.sleep(0.2)
self.flashed = 1
self.myHue.flashLights(self.myHue.red, 1, 5)
elif self.shotsFired and self.flashed and self.count >= 6 and self.count <= self.cdLen:
# self.myHue.advanceLights(1)
self.myHue.advanceAsOne(1)
time.sleep(1)
elif self.shotsFired and self.flashed and not self.flashed2 and self.count >= self.cdLen:
print("green")
self.flashed2 = 1
self.myHue.flashLights(self.myHue.green, 1, 5)
else:
time.sleep(0.2)
def workerThread4(self):
while self.running:
if not self.shotsFired:
self.myHue.advanceLights(50)
time.sleep(10)
####################
## GUI Visibility ##
####################
# hides the GUI window
def GUIhide(self):
if self.guiVisible:
# hide the root window
self.master.withdraw()
# remove root window border and title bar
self.master.overrideredirect(1)
# update to ensure changes are reflected
self.master.update()
# keep track of gui visibility
self.guiVisible = 0
# reveals the GUI window
def GUIshow(self):
if not self.guiVisible:
self.master.update()
self.master.deiconify()
if fullscreen:
self.master.geometry("{}x{}+0+0".format(self.w, self.h))
# keep track of gui visibility
self.guiVisible = 1
##########################
## PullStation Tracking ##
##########################
def alarmActivate(self):
print("alarm activated")
# PULL SPOTIFY DATA
# make sure we can get a token or refresh
if self.mySpotipy.spLogin():
# set hue flashed to 0
self.flashed = 0
self.flashed2 = 0
# turn on strobe
# strobe.on()
# save our current spot
self.mySpot = self.mySpotipy.saveSpot()
print(self.mySpot)
# get the length of the new song
self.songLength = self.mySpotipy.getSongLength(self.song)
print(self.seconds2string(self.songLength))
# keep track of whether or not wer are running Shots
self.shotsFired = 1
# play our desired song
self.mySpotipy.playNoContext(self.song)
# CRANK IT UP
self.mySpotipy.volumeUp()
else: # couldn't log in
print("ERROR: CAN'T GET SPOTIFY TOKEN")
# keep track of alarm activation
self.shotsFired = 1
# make sure we have access to shared resource
|
with self.lock:
self.count = 0
def alarmCancel(self):
|
random_line_split
|
|
shotsAlarm_asynchronous_with_hue.py
|
bri': self.nIntensity}
hwCommand = {'transitiontime': tTime, 'xy': self.blue, 'bri': self.nIntensity}
kitchenCommand = {'transitiontime': tTime, 'xy': self.lblue, 'bri': self.nIntensity}
self.updateLR(lrCommand)
self.updateDoor(doorCommand)
self.updateHW(hwCommand)
self.updateKitchen(kitchenCommand)
class DisplayController:
def __init__(self, master, queue, alarmCancelCommand, GUIhideCommand, GUIshowCommand):
self.queue = queue
self.alarmCancel = alarmCancelCommand
self.GUIhide = GUIhideCommand
self.GUIshow = GUIshowCommand
self.seconds_var = tkinter.StringVar()
self.seconds_var.set("(HIDDEN)")
# Set up the GUI
self.frame = ttk.Frame(master, padding="5 5")
if fullscreen:
self.frame.master.attributes('-fullscreen', True)
self.frame.master = master # XXX
ttk.Label(self.frame, textvariable=self.seconds_var, font=("Courier", 100, "bold")).grid(row=1, column=1)
ttk.Button(self.frame, text='Cancel', command=self.cancel).grid(row=2, column=1, sticky="s")
root.grid_rowconfigure(0, weight=1)
root.grid_columnconfigure(0, weight=1)
self.frame.grid_rowconfigure(1, weight=1)
self.frame.grid_columnconfigure(1, weight=1)
self.frame.grid_rowconfigure(1, minsize=root.winfo_screenheight() / 2)
self.frame.grid()
self.frame.master.protocol("WM_DELETE_WINDOW", self.cancel)
def processIncoming(self, cdLen, goHold, songLen):
"""Handle all messages currently in the queue, if any."""
while self.queue.qsize():
try:
count = self.queue.get(0)
# did we actually send something in the queue
if not count == None:
# show GUI
self.GUIshow()
# countdown stage
if (count < cdLen):
self.seconds_var.set("SHOTS IN: {}".format(cdLen - count))
# GO!! stage
else:
# turn strobe on
strobe.on()
# alternate between GO and blank
if (count % 2):
self.seconds_var.set("GO!! GO!! GO!!")
else:
self.seconds_var.set("")
else: # count == None
# hide GUI
self.GUIhide()
self.seconds_var.set("(HIDDEN)")
# turn off strobe
strobe.off()
except Queue.Empty:
# just on general principles, although we don't
# expect this branch to be taken in this case
pass
def cancel(self):
"""Cancel callback, hide."""
self.alarmCancel()
#######################
## Thread Management ##
#######################
class ThreadedClient:
"""
Launch the main part of the GUI and the worker thread. periodicCall and
endApplication could reside in the GUI part, but putting them here
means that you have all the thread controls in a single place.
"""
def __init__(self, master, user, song, cdLen, goHold):
"""
Start the GUI and the asynchronous threads. We are in the main
(original) thread of the application, which will later be used by
the GUI as well. We spawn a new thread for the worker (I/O).
"""
# GUI will be visible window after init
self.master = master
# get window size
self.w = self.master.winfo_screenwidth()
self.h = self.master.winfo_screenheight()
# GUI will be visible after tkinter.Tk()
# hide the GUI window for now
self.guiVisible = 1
self.GUIhide()
# keep track of whether alarm is active or not
self.shotsFired = 0
# keep track of whether we have flashed hue
self.flashed = 0
self.flashed2 = 0
# keep track of seconds into a given track
self.count = 0
# What song are we going to play??
self.song = song
# keep track of length (sec) of selected song
# this will be assigned at alarmActivate()
self.songLength = 0
# song countdown length
# this is assigned by init call
self.cdLen = cdLen
# how long to display "GO!!"
# this is assigned by init call
self.goHold = goHold
# Create the queue
self.queue = Queue.Queue()
# Create a lock to access shared resources amongst threads
self.lock = threading.Lock()
# Set up the GUIPart
# we pass it the master (root), the queue, the endApplication function, and the hide / show functions
self.gui = DisplayController(master, self.queue, self.alarmCancel, self.GUIhide, self.GUIshow)
# Set up the Spotify instance
self.mySpotipy = ASpotipy(user, Private.CLIENT_ID, Private.CLIENT_SECRET, Private.REDIRECT_URI)
# setup hue
self.myHue = hueControl()
# Set up the thread to do asynchronous I/O
self.running = 1
self.thread1 = threading.Thread(target=self.workerThread1)
self.thread1.start()
self.thread2 = threading.Thread(target=self.workerThread2)
self.thread2.start()
self.thread3 = threading.Thread(target=self.workerThread3)
self.thread3.start()
self.thread4 = threading.Thread(target=self.workerThread4)
self.thread4.start()
# Start the periodic call in the GUI to check if the queue contains
# anything
self.periodicCall()
###########################################
## Periodic Update Function (root.after) ##
###########################################
def periodicCall(self):
"""
Check every 200 ms if there is something new in the queue.
"""
self.gui.processIncoming(self.cdLen, self.goHold, self.songLength)
if not self.running:
# This is the brutal stop of the system.
# should do some cleanup before actually shutting it down.
import sys
sys.exit(1)
self.master.after(200, self.periodicCall)
###########################################
## Worker Threads (for asynchronous I/O) ##
###########################################
def workerThread1(self): # ORIGINAL-WORKING
"""
This is where we handle the asynchronous I/O. For example, it may be
a 'select( )'. One important thing to remember is that the thread has
to yield control pretty regularly, by select or otherwise.
"""
# make sure we have access to shared resource
with self.lock:
# set count to 0 if this is our first run through
self.count = 0
while self.running:
# make sure we have access to shared resource
with self.lock:
# make sure shots is activated
if self.shotsFired:
# make sure we haven't been counting longer than the song length
if (self.count <= self.songLength):
# update queue with count if countdown stage or go stage
if (self.count <= (self.cdLen + self.goHold)):
self.queue.put(self.count)
self.count += 1
else: # not in countdown stage or go stage
self.queue.put(None)
else: # song has ended
self.alarmCancel()
else: # shots not fired
pass
time.sleep(1)
# runs once an hour to make sure
# count doesn't get too big
def workerThread2(self):
while self.running:
time.sleep(3600)
if self.count >= 3600:
# make sure we have access to shared resource
with self.lock:
self.count = 0
def workerThread3(self):
while self.running:
if self.shotsFired and not self.flashed:
time.sleep(0.2)
self.flashed = 1
self.myHue.flashLights(self.myHue.red, 1, 5)
elif self.shotsFired and self.flashed and self.count >= 6 and self.count <= self.cdLen:
# self.myHue.advanceLights(1)
self.myHue.advanceAsOne(1)
time.sleep(1)
elif self.shotsFired and self.flashed and not self.flashed2 and self.count >= self.cdLen:
print("green")
self.flashed2 = 1
self.myHue.flashLights(self.myHue.green, 1, 5)
else:
time.sleep(0.2)
def workerThread4(self):
while self.running:
if not self.shotsFired:
self.myHue.advanceLights(50)
time.sleep(10)
####################
## GUI Visibility ##
####################
# hides the GUI window
def GUIhide(self):
if self.guiVisible:
# hide the root window
self.master.withdraw()
# remove root window border and title bar
self.master.overrideredirect(1)
# update to ensure changes are reflected
self.master.update()
# keep track of gui visibility
self.guiVisible = 0
# reveals the GUI window
def
|
GUIshow
|
identifier_name
|
|
spark-recordcount.py
|
import pybgpstream
import sys
try:
import urllib.request as urllib_request
except:
import urllib2 as urllib_request
# Output one data point per day
RESULT_GRANULARITY = 3600*24
# When processing RIBs, split days into 4hr chunks for RV, 8hrs for RIS
RV_RIB_PROCESSING_GRANULARITY = 3600*4
RIS_RIB_PROCESSING_GRANULARITY = 3600*8
# When processing updates, split days into 2hr chunks
UPD_PROCESSING_GRANULARITY = 3600*2
# The BGPStream broker service URL to query to get collector list from
COLLECTORS_URL = "http://bgpstream.caida.org/broker/meta/collectors"
# We only care about the two major projects
PROJECTS = ('routeviews', 'ris')
# Query the BGPStream broker and identify the collectors that are available
def get_collectors():
response = urllib_request.urlopen(COLLECTORS_URL)
data = json.load(response)
results = []
for coll in data['data']['collectors']:
if data['data']['collectors'][coll]['project'] in PROJECTS:
results.append(coll)
return results
# takes a record and an elem and builds a peer signature string that is globally
# unique.
def peer_signature(record, elem):
return record.project, record.collector, elem.peer_asn, elem.peer_address
def run_bgpstream(args):
(collector, start_time, end_time, data_type) = args
# initialize and configure BGPStream
stream = pybgpstream.BGPStream(
collector=collector,
from_time=start_time,
until_time=end_time-1,
record_type=data_type
)
# per-peer data
peers_data = {}
# loop over all records in the stream
for rec in stream.records():
# to track the peers that have elems in this record
peer_signatures = set()
# loop over all elems in the record
for elem in rec:
# create a peer signature for this elem
sig = peer_signature(rec, elem)
peer_signatures.add(sig)
# if this is the first time we have ever seen this peer, create
# an empty result: (elem_cnt, peer_record_cnt, coll_record_cnt)
if sig not in peers_data:
peers_data[sig] = [0, 0, 0]
peers_data[sig][0] += 1 # increment elem cnt for this peer
# done with elems, increment the 'coll_record_cnt' field for just
# one peer that was present in this record (allows a true, per-collector
# count of records since each record can contain elems for many peers)
if len(peer_signatures):
first = True
for sig in peer_signatures: # increment peer_record_cnt for all
if first:
|
peers_data[sig][1] += 1
# the time in the output row is truncated down to a multiple of
# RESULT_GRANULARITY so that slices can be merged correctly
start_time = \
int(math.floor(start_time/RESULT_GRANULARITY) * RESULT_GRANULARITY)
# for each peer that we processed data for, create an output row
return [((start_time, collector, p), (peers_data[p])) for p in peers_data]
# takes a start time, an end time, and a partition length and splits the time
# range up into slices, each of len seconds. the interval is assumed to be a
# multiple of the len
def partition_time(start_time, end_time, len):
slices = []
while start_time < end_time:
slices.append((start_time, start_time+len))
start_time += len
return slices
# takes two result tuples, each of the format:
# (elem_cnt, peer_record_cnt, coll_record_cnt)
# and returns a single result tuple which is the sum of the two inputs.
# len(result_x) is assumed to be the same length as len(result_y)
def merge_results(result_x, result_y):
return [result_x[i] + result_y[i] for i in range(0, len(result_x))]
# takes a result row:
# ((time, collector, peer), (elem_cnt, peer_record_cnt, coll_record_cnt))
# and returns
# ((time, collector), (elem_cnt, peer_record_cnt, coll_record_cnt))
def map_per_collector(row):
return (row[0][0], row[0][1]), row[1]
# takes a result row:
# ((time, collector), (elem_cnt, peer_record_cnt, coll_record_cnt))
# and returns
# ((time), (elem_cnt, peer_record_cnt, coll_record_cnt))
def map_per_time(row):
return (row[0][0]), row[1]
def analyze(start_time, end_time, data_type, outdir,
collector=None, num_cores=None, memory=None):
# round start time down to nearest day
start_time = \
int(math.floor(start_time/RESULT_GRANULARITY) * RESULT_GRANULARITY)
# round end time up to nearest day
rounded = int(math.floor(end_time/RESULT_GRANULARITY) * RESULT_GRANULARITY)
if rounded != end_time:
end_time = rounded + RESULT_GRANULARITY
# generate a list of time slices to process
time_slices = partition_time(start_time, end_time, RESULT_GRANULARITY)
start_str = datetime.utcfromtimestamp(start_time).strftime('%Y-%m-%d')
end_str = datetime.utcfromtimestamp(end_time).strftime('%Y-%m-%d')
# establish the spark context
conf = SparkConf()\
.setAppName("ElemCounter.%s.%s-%s" % (data_type, start_str, end_str))\
.set("spark.files.overwrite", "true")
if memory:
conf.set("spark.executor.memory", str(memory)+"g")
sc = SparkContext(conf=conf)
# either use the collector argument, or default to using all collectors
# that the BGPStream broker knows about
collectors = [collector]
if not collector:
collectors = get_collectors()
# build our input for spark -- a set of BGPStream configurations to process
# in parallel
bs_configs = []
for time_slice in time_slices:
for collector in collectors:
(start, end) = time_slice
while start < end:
duration = UPD_PROCESSING_GRANULARITY
if type == 'ribs':
if 'rrc' in collector:
duration = RIS_RIB_PROCESSING_GRANULARITY
else:
duration = RV_RIB_PROCESSING_GRANULARITY
slice_end = min(start+duration, end)
bs_configs.append((collector, start, slice_end, data_type))
start += duration
# debugging
sys.stderr.write(str(bs_configs) + "\n")
# we need to instruct spark to slice up our input more aggressively than
# it normally would since we know that each row will take some time to
# process. to do this we either use 4x the number of cores available,
# or we split once per row. Once per row will be most efficient, but we
# have seem problems with the JVM exploding when numSlices is huge (it
# tries to create thousands of threads...)
slice_cnt = len(bs_configs)
if num_cores:
slice_cnt = num_cores*4
# instruct spark to create an RDD from our BGPStream config list
bs_rdd = sc.parallelize(bs_configs, numSlices=slice_cnt)
# step 1: use BGPStream to process BGP data
# output will be a list:
# ((time, collector, peer), (elem_cnt, peer_record_cnt, coll_record_cnt))
# the peer and collector record counts are separate as a single record
# may have data for multiple peers, thus naively summing the per-peer
# record counts would yield incorrect results
raw_results = bs_rdd.flatMap(run_bgpstream)
# since we split the processing by time, there will be several rows for
# each peer.
reduced_time_collector_peer = raw_results.reduceByKey(merge_results)
# we will use this result multiple times, so persist it
reduced_time_collector_peer.persist()
# collect the reduced time-collector-peer results back to the driver
# we take results that are in the form:
# ((time, collector, peer), (elem_cnt, peer_record_cnt, coll_record_cnt))
# and map them into:
# (time, collector, peer) => (elem_cnt, peer_record_cnt)
final_time_collector_peer = reduced_time_collector_peer\
.mapValues(lambda x: [x[0], x[1]]).collectAsMap()
# take the time-collector-peer result and map it into a new RDD which
# is time-collector. after the 'map' stage there will be duplicate
# time-collector keys, so perform a reduction as we did before
reduced_time_collector = reduced_time_collector_peer\
|
peers_data[sig][2] += 1 # increment the coll_record_cnt
first = False
|
conditional_block
|
spark-recordcount.py
|
this peer
# done with elems, increment the 'coll_record_cnt' field for just
# one peer that was present in this record (allows a true, per-collector
# count of records since each record can contain elems for many peers)
if len(peer_signatures):
first = True
for sig in peer_signatures: # increment peer_record_cnt for all
if first:
peers_data[sig][2] += 1 # increment the coll_record_cnt
first = False
peers_data[sig][1] += 1
# the time in the output row is truncated down to a multiple of
# RESULT_GRANULARITY so that slices can be merged correctly
start_time = \
int(math.floor(start_time/RESULT_GRANULARITY) * RESULT_GRANULARITY)
# for each peer that we processed data for, create an output row
return [((start_time, collector, p), (peers_data[p])) for p in peers_data]
# takes a start time, an end time, and a partition length and splits the time
# range up into slices, each of len seconds. the interval is assumed to be a
# multiple of the len
def partition_time(start_time, end_time, len):
slices = []
while start_time < end_time:
slices.append((start_time, start_time+len))
start_time += len
return slices
# takes two result tuples, each of the format:
# (elem_cnt, peer_record_cnt, coll_record_cnt)
# and returns a single result tuple which is the sum of the two inputs.
# len(result_x) is assumed to be the same length as len(result_y)
def merge_results(result_x, result_y):
return [result_x[i] + result_y[i] for i in range(0, len(result_x))]
# takes a result row:
# ((time, collector, peer), (elem_cnt, peer_record_cnt, coll_record_cnt))
# and returns
# ((time, collector), (elem_cnt, peer_record_cnt, coll_record_cnt))
def map_per_collector(row):
return (row[0][0], row[0][1]), row[1]
# takes a result row:
# ((time, collector), (elem_cnt, peer_record_cnt, coll_record_cnt))
# and returns
# ((time), (elem_cnt, peer_record_cnt, coll_record_cnt))
def map_per_time(row):
return (row[0][0]), row[1]
def analyze(start_time, end_time, data_type, outdir,
collector=None, num_cores=None, memory=None):
# round start time down to nearest day
start_time = \
int(math.floor(start_time/RESULT_GRANULARITY) * RESULT_GRANULARITY)
# round end time up to nearest day
rounded = int(math.floor(end_time/RESULT_GRANULARITY) * RESULT_GRANULARITY)
if rounded != end_time:
end_time = rounded + RESULT_GRANULARITY
# generate a list of time slices to process
time_slices = partition_time(start_time, end_time, RESULT_GRANULARITY)
start_str = datetime.utcfromtimestamp(start_time).strftime('%Y-%m-%d')
end_str = datetime.utcfromtimestamp(end_time).strftime('%Y-%m-%d')
# establish the spark context
conf = SparkConf()\
.setAppName("ElemCounter.%s.%s-%s" % (data_type, start_str, end_str))\
.set("spark.files.overwrite", "true")
if memory:
conf.set("spark.executor.memory", str(memory)+"g")
sc = SparkContext(conf=conf)
# either use the collector argument, or default to using all collectors
# that the BGPStream broker knows about
collectors = [collector]
if not collector:
collectors = get_collectors()
# build our input for spark -- a set of BGPStream configurations to process
# in parallel
bs_configs = []
for time_slice in time_slices:
for collector in collectors:
(start, end) = time_slice
while start < end:
duration = UPD_PROCESSING_GRANULARITY
if type == 'ribs':
if 'rrc' in collector:
duration = RIS_RIB_PROCESSING_GRANULARITY
else:
duration = RV_RIB_PROCESSING_GRANULARITY
slice_end = min(start+duration, end)
bs_configs.append((collector, start, slice_end, data_type))
start += duration
# debugging
sys.stderr.write(str(bs_configs) + "\n")
# we need to instruct spark to slice up our input more aggressively than
# it normally would since we know that each row will take some time to
# process. to do this we either use 4x the number of cores available,
# or we split once per row. Once per row will be most efficient, but we
# have seem problems with the JVM exploding when numSlices is huge (it
# tries to create thousands of threads...)
slice_cnt = len(bs_configs)
if num_cores:
slice_cnt = num_cores*4
# instruct spark to create an RDD from our BGPStream config list
bs_rdd = sc.parallelize(bs_configs, numSlices=slice_cnt)
# step 1: use BGPStream to process BGP data
# output will be a list:
# ((time, collector, peer), (elem_cnt, peer_record_cnt, coll_record_cnt))
# the peer and collector record counts are separate as a single record
# may have data for multiple peers, thus naively summing the per-peer
# record counts would yield incorrect results
raw_results = bs_rdd.flatMap(run_bgpstream)
# since we split the processing by time, there will be several rows for
# each peer.
reduced_time_collector_peer = raw_results.reduceByKey(merge_results)
# we will use this result multiple times, so persist it
reduced_time_collector_peer.persist()
# collect the reduced time-collector-peer results back to the driver
# we take results that are in the form:
# ((time, collector, peer), (elem_cnt, peer_record_cnt, coll_record_cnt))
# and map them into:
# (time, collector, peer) => (elem_cnt, peer_record_cnt)
final_time_collector_peer = reduced_time_collector_peer\
.mapValues(lambda x: [x[0], x[1]]).collectAsMap()
# take the time-collector-peer result and map it into a new RDD which
# is time-collector. after the 'map' stage there will be duplicate
# time-collector keys, so perform a reduction as we did before
reduced_time_collector = reduced_time_collector_peer\
.map(map_per_collector).reduceByKey(merge_results)
reduced_time_collector.persist()
# collect the reduced time-collector results back to the driver
# we take results that are in the form:
# ((time, collector), (elem_cnt, peer_record_cnt, coll_record_cnt))
# and map them into:
# (time, collector) => (elem_cnt, coll_record_cnt)
final_time_collector = reduced_time_collector\
.mapValues(lambda x: [x[0], x[2]]).collectAsMap()
# take the time-collector result and map it into a new RDD which is keyed
# by time only (i.e. a global view). again we need to reduce after the map
# stage.
reduced_time = reduced_time_collector.map(map_per_time)\
.reduceByKey(merge_results)
# collect the reduced time-only results back to the driver
# we take results that are in the form:
# (time, (elem_cnt, peer_record_cnt, coll_record_cnt))
# and map them into:
# time => (elem_cnt, coll_record_cnt)
final_time = reduced_time.mapValues(lambda x: [x[0], x[2]]).collectAsMap()
# build the output file name
outfile = "%s/bgpstream-recordcounter.%s.%s-%s.csv" %\
(outdir, data_type, start_str, end_str)
with open(outfile, 'wb') as csvfile:
w = csv.writer(csvfile)
w.writerow(["Time", "Collector", "Peer", "#Elems", "#Records"])
# write out the per-peer statistics
for key in final_time_collector_peer:
(ts, coll, peer) = key
(elems, records) = final_time_collector_peer[key]
w.writerow([ts, coll, "AS"+str(peer[2])+"-"+peer[3],
elems, records])
# write out the per-collector statistics
for key in final_time_collector:
(ts, coll) = key
(elems, records) = final_time_collector[key]
w.writerow([ts, coll, "ALL-PEERS", elems, records])
# write out the global statistics
for key in final_time:
(ts) = key
(elems, records) = final_time[key]
w.writerow([ts, "ALL-COLLECTORS", "ALL-PEERS", elems, records])
reduced_time_collector_peer.unpersist()
reduced_time_collector.unpersist()
sc.stop()
return
def
|
main
|
identifier_name
|
|
spark-recordcount.py
|
import pybgpstream
import sys
try:
import urllib.request as urllib_request
except:
import urllib2 as urllib_request
# Output one data point per day
RESULT_GRANULARITY = 3600*24
# When processing RIBs, split days into 4hr chunks for RV, 8hrs for RIS
RV_RIB_PROCESSING_GRANULARITY = 3600*4
RIS_RIB_PROCESSING_GRANULARITY = 3600*8
# When processing updates, split days into 2hr chunks
UPD_PROCESSING_GRANULARITY = 3600*2
# The BGPStream broker service URL to query to get collector list from
COLLECTORS_URL = "http://bgpstream.caida.org/broker/meta/collectors"
# We only care about the two major projects
PROJECTS = ('routeviews', 'ris')
# Query the BGPStream broker and identify the collectors that are available
def get_collectors():
response = urllib_request.urlopen(COLLECTORS_URL)
data = json.load(response)
results = []
for coll in data['data']['collectors']:
if data['data']['collectors'][coll]['project'] in PROJECTS:
results.append(coll)
return results
# takes a record and an elem and builds a peer signature string that is globally
# unique.
def peer_signature(record, elem):
return record.project, record.collector, elem.peer_asn, elem.peer_address
def run_bgpstream(args):
(collector, start_time, end_time, data_type) = args
# initialize and configure BGPStream
stream = pybgpstream.BGPStream(
collector=collector,
from_time=start_time,
until_time=end_time-1,
record_type=data_type
)
# per-peer data
peers_data = {}
# loop over all records in the stream
for rec in stream.records():
# to track the peers that have elems in this record
peer_signatures = set()
# loop over all elems in the record
for elem in rec:
# create a peer signature for this elem
sig = peer_signature(rec, elem)
peer_signatures.add(sig)
# if this is the first time we have ever seen this peer, create
# an empty result: (elem_cnt, peer_record_cnt, coll_record_cnt)
if sig not in peers_data:
peers_data[sig] = [0, 0, 0]
peers_data[sig][0] += 1 # increment elem cnt for this peer
# done with elems, increment the 'coll_record_cnt' field for just
# one peer that was present in this record (allows a true, per-collector
# count of records since each record can contain elems for many peers)
if len(peer_signatures):
first = True
for sig in peer_signatures: # increment peer_record_cnt for all
if first:
peers_data[sig][2] += 1 # increment the coll_record_cnt
first = False
peers_data[sig][1] += 1
# the time in the output row is truncated down to a multiple of
# RESULT_GRANULARITY so that slices can be merged correctly
start_time = \
int(math.floor(start_time/RESULT_GRANULARITY) * RESULT_GRANULARITY)
# for each peer that we processed data for, create an output row
return [((start_time, collector, p), (peers_data[p])) for p in peers_data]
# takes a start time, an end time, and a partition length and splits the time
# range up into slices, each of len seconds. the interval is assumed to be a
# multiple of the len
def partition_time(start_time, end_time, len):
slices = []
while start_time < end_time:
slices.append((start_time, start_time+len))
start_time += len
return slices
# takes two result tuples, each of the format:
# (elem_cnt, peer_record_cnt, coll_record_cnt)
# and returns a single result tuple which is the sum of the two inputs.
# len(result_x) is assumed to be the same length as len(result_y)
def merge_results(result_x, result_y):
return [result_x[i] + result_y[i] for i in range(0, len(result_x))]
# takes a result row:
# ((time, collector, peer), (elem_cnt, peer_record_cnt, coll_record_cnt))
# and returns
# ((time, collector), (elem_cnt, peer_record_cnt, coll_record_cnt))
def map_per_collector(row):
return (row[0][0], row[0][1]), row[1]
# takes a result row:
# ((time, collector), (elem_cnt, peer_record_cnt, coll_record_cnt))
# and returns
# ((time), (elem_cnt, peer_record_cnt, coll_record_cnt))
def map_per_time(row):
|
def analyze(start_time, end_time, data_type, outdir,
collector=None, num_cores=None, memory=None):
# round start time down to nearest day
start_time = \
int(math.floor(start_time/RESULT_GRANULARITY) * RESULT_GRANULARITY)
# round end time up to nearest day
rounded = int(math.floor(end_time/RESULT_GRANULARITY) * RESULT_GRANULARITY)
if rounded != end_time:
end_time = rounded + RESULT_GRANULARITY
# generate a list of time slices to process
time_slices = partition_time(start_time, end_time, RESULT_GRANULARITY)
start_str = datetime.utcfromtimestamp(start_time).strftime('%Y-%m-%d')
end_str = datetime.utcfromtimestamp(end_time).strftime('%Y-%m-%d')
# establish the spark context
conf = SparkConf()\
.setAppName("ElemCounter.%s.%s-%s" % (data_type, start_str, end_str))\
.set("spark.files.overwrite", "true")
if memory:
conf.set("spark.executor.memory", str(memory)+"g")
sc = SparkContext(conf=conf)
# either use the collector argument, or default to using all collectors
# that the BGPStream broker knows about
collectors = [collector]
if not collector:
collectors = get_collectors()
# build our input for spark -- a set of BGPStream configurations to process
# in parallel
bs_configs = []
for time_slice in time_slices:
for collector in collectors:
(start, end) = time_slice
while start < end:
duration = UPD_PROCESSING_GRANULARITY
if type == 'ribs':
if 'rrc' in collector:
duration = RIS_RIB_PROCESSING_GRANULARITY
else:
duration = RV_RIB_PROCESSING_GRANULARITY
slice_end = min(start+duration, end)
bs_configs.append((collector, start, slice_end, data_type))
start += duration
# debugging
sys.stderr.write(str(bs_configs) + "\n")
# we need to instruct spark to slice up our input more aggressively than
# it normally would since we know that each row will take some time to
# process. to do this we either use 4x the number of cores available,
# or we split once per row. Once per row will be most efficient, but we
# have seem problems with the JVM exploding when numSlices is huge (it
# tries to create thousands of threads...)
slice_cnt = len(bs_configs)
if num_cores:
slice_cnt = num_cores*4
# instruct spark to create an RDD from our BGPStream config list
bs_rdd = sc.parallelize(bs_configs, numSlices=slice_cnt)
# step 1: use BGPStream to process BGP data
# output will be a list:
# ((time, collector, peer), (elem_cnt, peer_record_cnt, coll_record_cnt))
# the peer and collector record counts are separate as a single record
# may have data for multiple peers, thus naively summing the per-peer
# record counts would yield incorrect results
raw_results = bs_rdd.flatMap(run_bgpstream)
# since we split the processing by time, there will be several rows for
# each peer.
reduced_time_collector_peer = raw_results.reduceByKey(merge_results)
# we will use this result multiple times, so persist it
reduced_time_collector_peer.persist()
# collect the reduced time-collector-peer results back to the driver
# we take results that are in the form:
# ((time, collector, peer), (elem_cnt, peer_record_cnt, coll_record_cnt))
# and map them into:
# (time, collector, peer) => (elem_cnt, peer_record_cnt)
final_time_collector_peer = reduced_time_collector_peer\
.mapValues(lambda x: [x[0], x[1]]).collectAsMap()
# take the time-collector-peer result and map it into a new RDD which
# is time-collector. after the 'map' stage there will be duplicate
# time-collector keys, so perform a reduction as we did before
reduced_time_collector = reduced_time_collector_peer\
|
return (row[0][0]), row[1]
|
identifier_body
|
spark-recordcount.py
|
_cnt, peer_record_cnt, coll_record_cnt)
if sig not in peers_data:
peers_data[sig] = [0, 0, 0]
peers_data[sig][0] += 1 # increment elem cnt for this peer
# done with elems, increment the 'coll_record_cnt' field for just
# one peer that was present in this record (allows a true, per-collector
# count of records since each record can contain elems for many peers)
if len(peer_signatures):
first = True
for sig in peer_signatures: # increment peer_record_cnt for all
if first:
peers_data[sig][2] += 1 # increment the coll_record_cnt
first = False
peers_data[sig][1] += 1
# the time in the output row is truncated down to a multiple of
# RESULT_GRANULARITY so that slices can be merged correctly
start_time = \
int(math.floor(start_time/RESULT_GRANULARITY) * RESULT_GRANULARITY)
# for each peer that we processed data for, create an output row
return [((start_time, collector, p), (peers_data[p])) for p in peers_data]
# takes a start time, an end time, and a partition length and splits the time
# range up into slices, each of len seconds. the interval is assumed to be a
# multiple of the len
def partition_time(start_time, end_time, len):
slices = []
while start_time < end_time:
slices.append((start_time, start_time+len))
start_time += len
return slices
# takes two result tuples, each of the format:
# (elem_cnt, peer_record_cnt, coll_record_cnt)
# and returns a single result tuple which is the sum of the two inputs.
# len(result_x) is assumed to be the same length as len(result_y)
def merge_results(result_x, result_y):
return [result_x[i] + result_y[i] for i in range(0, len(result_x))]
# takes a result row:
# ((time, collector, peer), (elem_cnt, peer_record_cnt, coll_record_cnt))
# and returns
# ((time, collector), (elem_cnt, peer_record_cnt, coll_record_cnt))
def map_per_collector(row):
return (row[0][0], row[0][1]), row[1]
# takes a result row:
# ((time, collector), (elem_cnt, peer_record_cnt, coll_record_cnt))
# and returns
# ((time), (elem_cnt, peer_record_cnt, coll_record_cnt))
def map_per_time(row):
return (row[0][0]), row[1]
def analyze(start_time, end_time, data_type, outdir,
collector=None, num_cores=None, memory=None):
# round start time down to nearest day
start_time = \
int(math.floor(start_time/RESULT_GRANULARITY) * RESULT_GRANULARITY)
# round end time up to nearest day
rounded = int(math.floor(end_time/RESULT_GRANULARITY) * RESULT_GRANULARITY)
if rounded != end_time:
end_time = rounded + RESULT_GRANULARITY
# generate a list of time slices to process
time_slices = partition_time(start_time, end_time, RESULT_GRANULARITY)
start_str = datetime.utcfromtimestamp(start_time).strftime('%Y-%m-%d')
end_str = datetime.utcfromtimestamp(end_time).strftime('%Y-%m-%d')
# establish the spark context
conf = SparkConf()\
.setAppName("ElemCounter.%s.%s-%s" % (data_type, start_str, end_str))\
.set("spark.files.overwrite", "true")
if memory:
conf.set("spark.executor.memory", str(memory)+"g")
sc = SparkContext(conf=conf)
# either use the collector argument, or default to using all collectors
# that the BGPStream broker knows about
collectors = [collector]
if not collector:
collectors = get_collectors()
# build our input for spark -- a set of BGPStream configurations to process
# in parallel
bs_configs = []
for time_slice in time_slices:
for collector in collectors:
(start, end) = time_slice
while start < end:
duration = UPD_PROCESSING_GRANULARITY
if type == 'ribs':
if 'rrc' in collector:
duration = RIS_RIB_PROCESSING_GRANULARITY
else:
duration = RV_RIB_PROCESSING_GRANULARITY
slice_end = min(start+duration, end)
bs_configs.append((collector, start, slice_end, data_type))
start += duration
# debugging
sys.stderr.write(str(bs_configs) + "\n")
# we need to instruct spark to slice up our input more aggressively than
# it normally would since we know that each row will take some time to
# process. to do this we either use 4x the number of cores available,
# or we split once per row. Once per row will be most efficient, but we
# have seem problems with the JVM exploding when numSlices is huge (it
# tries to create thousands of threads...)
slice_cnt = len(bs_configs)
if num_cores:
slice_cnt = num_cores*4
# instruct spark to create an RDD from our BGPStream config list
bs_rdd = sc.parallelize(bs_configs, numSlices=slice_cnt)
# step 1: use BGPStream to process BGP data
# output will be a list:
# ((time, collector, peer), (elem_cnt, peer_record_cnt, coll_record_cnt))
# the peer and collector record counts are separate as a single record
# may have data for multiple peers, thus naively summing the per-peer
# record counts would yield incorrect results
raw_results = bs_rdd.flatMap(run_bgpstream)
# since we split the processing by time, there will be several rows for
# each peer.
reduced_time_collector_peer = raw_results.reduceByKey(merge_results)
# we will use this result multiple times, so persist it
reduced_time_collector_peer.persist()
# collect the reduced time-collector-peer results back to the driver
# we take results that are in the form:
# ((time, collector, peer), (elem_cnt, peer_record_cnt, coll_record_cnt))
# and map them into:
# (time, collector, peer) => (elem_cnt, peer_record_cnt)
final_time_collector_peer = reduced_time_collector_peer\
.mapValues(lambda x: [x[0], x[1]]).collectAsMap()
# take the time-collector-peer result and map it into a new RDD which
# is time-collector. after the 'map' stage there will be duplicate
# time-collector keys, so perform a reduction as we did before
reduced_time_collector = reduced_time_collector_peer\
.map(map_per_collector).reduceByKey(merge_results)
reduced_time_collector.persist()
# collect the reduced time-collector results back to the driver
# we take results that are in the form:
# ((time, collector), (elem_cnt, peer_record_cnt, coll_record_cnt))
# and map them into:
# (time, collector) => (elem_cnt, coll_record_cnt)
final_time_collector = reduced_time_collector\
.mapValues(lambda x: [x[0], x[2]]).collectAsMap()
# take the time-collector result and map it into a new RDD which is keyed
# by time only (i.e. a global view). again we need to reduce after the map
# stage.
reduced_time = reduced_time_collector.map(map_per_time)\
.reduceByKey(merge_results)
# collect the reduced time-only results back to the driver
# we take results that are in the form:
# (time, (elem_cnt, peer_record_cnt, coll_record_cnt))
# and map them into:
# time => (elem_cnt, coll_record_cnt)
final_time = reduced_time.mapValues(lambda x: [x[0], x[2]]).collectAsMap()
# build the output file name
outfile = "%s/bgpstream-recordcounter.%s.%s-%s.csv" %\
(outdir, data_type, start_str, end_str)
with open(outfile, 'wb') as csvfile:
w = csv.writer(csvfile)
w.writerow(["Time", "Collector", "Peer", "#Elems", "#Records"])
# write out the per-peer statistics
for key in final_time_collector_peer:
(ts, coll, peer) = key
(elems, records) = final_time_collector_peer[key]
w.writerow([ts, coll, "AS"+str(peer[2])+"-"+peer[3],
elems, records])
# write out the per-collector statistics
for key in final_time_collector:
(ts, coll) = key
(elems, records) = final_time_collector[key]
w.writerow([ts, coll, "ALL-PEERS", elems, records])
# write out the global statistics
|
for key in final_time:
(ts) = key
(elems, records) = final_time[key]
|
random_line_split
|
|
battle_traits.ts
|
`Masters of Destiny`,
desc: `Before rolling any dice for a TZEENTCH unit, you can use one or more of the remaining Destiny Dice from your pool in their stead; the result of the roll you would have made is automatically substituted with the result shown on the Destiny Dice you have chosen to use.
Each Destiny Dice spent only allows you to replace a single dice roll. If you want to replace a 2D6 roll (such as a casting roll or charge roll), you must spend 2 Destiny Dice. In addition, any rolls that have been replaced count as unmodified rolls and cannot be rerolled. They also cannot be modified, with the following two exceptions:
- If you spend a Destiny Dice to replace a save roll, the result of that Destiny Dice is modified by the Rend characteristic of the attack as normal.
- If you spend a Destiny Dice to replace a battleshock test, the result of that Destiny Dice is modified by the number of models slain from that unit as normal.
Designer's Note: This means that for the purposes of Pink Horror Icon Bearers, a Destiny Dice of 1 used to replace a battleshock roll counts as an unmodified roll of 1. `,
when: [DURING_GAME],
rule_sources: [
rule_sources.BATTLETOME_TZEENTCH,
rule_sources.ERRATA_TZEENTCH_JULY_2020,
rule_sources.ERRATA_TZEENTCH_JULY_2021,
],
},
{
name: `Summon Daemons of Tzeentch`,
desc: `You can summon units of Tzeentch Daemons to the battlefield by expending Fate Points. You receive 1 Fate Point each time a casting roll is successful, and the spell is not unbound. Note that you receive Fate Points whenever a spell is cast, be it by friend or foe - Tzeentch cares not from whence the magic flows.`,
when: [HERO_PHASE],
},
{
name: `Summon Daemons of Tzeentch`,
desc: `If you have 10 or more Fate Points at the end of your movement phase, you can summon one unit from the summoning list onto the battlefield, and add them to your army. Each unit you summon costs a number of Fate Points, as shown on the list, and you can only summon a unit if you have enough Fate Points to pay its cost.
Summoned units must be set up wholly within 12" of a friendly Tzeentch Hero and more than 9" from any enemy units. Subtract the cost of the summoned unit from the number of Fate Points you have immediately after the summoned unit has been set up.`,
when: [END_OF_MOVEMENT_PHASE],
},
{
name: `Summon Daemons of Tzeentch`,
desc: `Summoning Costs:
1 Exalted Greater Daemon of Tzeentch - 45 FP
1 Lord of Change - 30 FP
1 Fateskimmer on Burning Chariot - 24 FP
10 Pink Horrors - 20 FP
1 Burning Chariot - 18 FP
3 Flamers - 18 FP
1 Changecaster - 12 FP
1 Exalted Flamer - 12 FP
1 Fluxmaster - 12 FP
10 Blue Horrors - 10 FP
10 Brimstone Horrors - 10 FP
3 Screamers - 10 FP`,
when: [END_OF_MOVEMENT_PHASE],
},
{
name: `Locus of Change`,
desc: `Subtract 1 from hit rolls for attacks made with melee weapons that target friendly Tzeentch Daemon units that are wholly within 12" of a friendly Tzeentch Daemon Hero.`,
when: [COMBAT_PHASE],
},
{
|
desc: `At the start of your hero phase, you can say that your army intends to complete one of the following agendas before the start of your next hero phase. You must tell your opponent which agenda you intend to complete, and you cannot complete the same agenda more than once per battle.
If a friendly Tzeentch unit completes one of the following agendas during a battle, that unit gains that agenda's ability for the rest of the game.
Friendly Tzeentch units that complete more than 1 agenda must choose which ability they wish to keep; any other ability gained are lost.`,
when: [START_OF_HERO_PHASE],
},
{
name: `Agendas of Anarchy: Mass Conjuration`,
desc: `Requirement: 1 selected Tzeentch wizard successfully casts 2 spells/endless spells in this hero phase with unmodified casting rolls of 9+ without being unbound.
Reward: Add 1 to the casting rolls of the completing model.`,
when: [START_OF_HERO_PHASE],
},
{
name: `Agendas of Anarchy: Mass Conjuration`,
desc: `If active, add 1 to casting rolls for the buffed wizard.`,
when: [HERO_PHASE],
},
{
name: `Agendas of Anarchy: Ninefold Dismantlement`,
desc: `Requirement: 1 selected enemy unit with 9 or more models is destroyed this turn.
Reward: Add 1 to the melee hits rolls of the friendly Tzeentch unit that completed this agenda.`,
when: [START_OF_HERO_PHASE],
},
{
name: `Agendas of Anarchy: Ninefold Dismantlement`,
desc: `If active, add 1 to the melee hit rolls for the buffed unit.`,
when: [COMBAT_PHASE],
},
{
name: `Agendas of Anarchy: Overthrow Leaders`,
desc: `Requirement: 1 selected enemy hero or monster on the battlefield with a wounds characteristic of 9 or more slain this turn.
Reward: Add 1 to the save rolls for the friendly Tzeentch unit that completed this agenda.`,
when: [START_OF_HERO_PHASE],
},
{
name: `Agendas of Anarchy: Overthrow Leaders`,
desc: `If active, add 1 to the save rolls for the buffed unit.`,
when: [SAVES_PHASE],
},
{
name: `Agendas of Anarchy: Reckless Abandon`,
desc: `Requirement: 1 selected friendly Tzeentch unit 9" or more from any enemy units successfully completes a charge within 1/2" of an enemy model.
Reward: Add 1 to the melee attacks characteristic of the freindly Tzeentch unit that completed this agenda if it charges in the same turn.`,
when: [START_OF_HERO_PHASE, CHARGE_PHASE],
},
{
name: `Agendas of Anarchy: Reckless Abandon`,
desc: `If active, add 1 to the melee attacks characteristic of the buffed unit if it charged this turn.`,
when: [CHARGE_PHASE, COMBAT_PHASE],
},
{
name: `Agendas of Anarchy: Tides of Anarcy`,
desc: `Requirement: 1 selected friendly Tzeentch unit with 9 or more models takes control of an objective controlled by an enemy at the start of this phase.
Reward: Each Tzeentch model in the unit the completed this agenda counts as 2 models instead of 1 when determining objective control.`,
when: [START_OF_HERO_PHASE],
},
{
name: `Agendas of Anarchy: Tides of Anarchy`,
desc: `If active, each model in the buffed unit counts as 2 models when determining objective control.`,
when: [DURING_GAME],
},
],
},
// Eternal Conflagration Flavor
'Twisters of Materiality': {
effects: [
{
name: `Twisters of Materiality`,
desc: `Improve the Rend characteristic of friendly Eternal Conflagration units' Warpflame, Billowing Warpflame, and Magical Flames missile weapons by 1.`,
when: [SHOOTING_PHASE],
},
],
},
// Hosts Duplicitous Flavor
'Ranks of Mischievous Mirages': {
effects: [
{
name: `Ranks of Mischievous Mirages`,
desc: `Enemy units within 3" of a Hosts Duplicitous unit cannot retreat.`,
when: [MOVEMENT_PHASE],
},
],
},
// Hosts Arcanum Flavor
'Thieves of All Things Arcane': {
effects: [
{
name: `Thieves of All Things Arcane`,
desc: `Once per turn, in the first, third, and fifth battle rounds, when a friendly Host Arcanum Wizard attempts to unbind a spell, the spell is automatically unbound. (Do not roll 2D6).`,
when: [TURN_ONE_HERO_PHASE, TURN_THREE_HERO_PHASE, TURN_FIVE_HERO_PHASE],
},
],
},
// Cult of Transient Form Flavor
'The Change-gift': {
effects: [
{
name: `The Change-gift`,
desc: `Roll a dice each time a friendly CULT OF THE TRANSIENT FORM KAIRIC ACOLYTE model is slain in the combat phase. On a
|
name: `Agendas of Anarchy`,
|
random_line_split
|
Logistic regression.py
|
` package because we want easy access to all the statistical indicators that logistic regression can lead to.
# In[10]:
X = data_encoded.loc[:, list(data_encoded.loc[:, rfe.support_])]
y = data_encoded.loc[:, target]
logit_model=sm.Logit(y, X)
result=logit_model.fit(method='bfgs')
print(result.summary2())
# ### P-Values and feature selection
#
# Remove those predictors with _p-values_ above 0.05
#
# Mark those features with a p-value higher thatn 0.05 (or close) to be removed from $X$, and run the logistic regression again to re-.check the p-values. From that point we'll be ready to run the model properly in sklearn.
# In[11]:
to_remove = result.pvalues[result.pvalues > 0.05].index.tolist()
X.drop(to_remove, inplace=True, axis=1)
logit_model=sm.Logit(y, X)
result=logit_model.fit(method='bfgs')
print(result.summary2())
# ### The Logit model
#
# Here we train the model and evaluate on the test set. The interpretation of the results obtained by calling the `classification_report` are as follows:
#
# The **precision** is the ratio tp / (tp + fp) where tp is the number of true positives and fp the number of false positives. The precision is intuitively the ability of the classifier to not label a sample as positive if it is negative.
#
# The **recall** is the ratio tp / (tp + fn) where tp is the number of true positives and fn the number of false negatives. The recall is intuitively the ability of the classifier to find all the positive samples.
#
# The **F-beta** score can be interpreted as a weighted harmonic mean of the precision and recall, where an F-beta score reaches its best value at 1 and worst score at 0.
#
# The F-beta score weights the recall more than the precision by a factor of beta. beta = 1.0 means recall and precision are equally important.
#
# The **support** is the number of occurrences of each class in y_test.
# In[12]:
X_train, X_test, y_train, y_test = train_test_split(X, y,
test_size=0.3,
random_state=0)
logreg = LogisticRegression(solver='lbfgs')
logreg.fit(X_train, y_train)
y_pred = logreg.predict(X_test)
print('Accuracy on test: {:.2f}'.format(logreg.score(X_test, y_test)))
print(confusion_matrix(y_test, y_pred))
print(classification_report(y_test, y_pred))
# ### ROC Curve
#
# The receiver operating characteristic (ROC) curve is another common tool used with binary classifiers. The dotted line represents the ROC curve of a purely random classifier; a good classifier stays as far away from that line as possible (toward the top-left corner).
# In[13]:
logit_roc_auc = roc_auc_score(y_test, logreg.predict(X_test))
fpr, tpr, thresholds = roc_curve(y_test, logreg.predict_proba(X_test)[:,1])
# Plot the FPR vs. TPR, and the diagonal line representing the null model.
# In[14]:
def plot_roc(fpr, tpr, logit_roc_auc):
plt.figure()
plt.plot(fpr, tpr, label='Logistic Regression (area = %0.2f)' % logit_roc_auc)
plt.plot([0, 1], [0, 1], 'r--')
plt.xlim([0.0, 1.05])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('ROC curve')
plt.legend(loc="lower right")
plt.savefig('Log_ROC')
plt.show();
# In[15]:
plot_roc(fpr, tpr, logit_roc_auc)
# The results are very poor, and what we've got shouldn't be used in production. The proposal from this point is:
# 1. to know more about how the predictions are made in logistic regression
# 2. apply a logit to predict if the price of a house will be higher or lower than a given value
# ### Explore logit predictions
#
# What you've seen is that we irectly call the method `predict` in `logit`, which will tell me to which class each sample is classified: 0 or 1. To accomplish this, the model produces two probabilities
# In[16]:
pred_proba_df = pd.DataFrame(logreg.predict_proba(X_test))
threshold_list = np.arange(0.05, 1.0, 0.05)
accuracy_list = np.array([])
for threshold in threshold_list:
y_test_pred = pred_proba_df.applymap(lambda prob: 1 if prob > threshold else 0)
test_accuracy = accuracy_score(y_test.values,
y_test_pred[1].values.reshape(-1, 1))
accuracy_list = np.append(accuracy_list, test_accuracy)
# And the plot of the array of accuracy values got from each of the probabilities.
# In[17]:
plt.plot(range(accuracy_list.shape[0]), accuracy_list, 'o-', label='Accuracy')
plt.title('Accuracy for different threshold values')
plt.xlabel('Threshold')
plt.ylabel('Accuracy')
plt.xticks([i for i in range(1, accuracy_list.shape[0], 2)],
np.round(threshold_list[1::2], 1))
plt.grid()
plt.show();
# ## Default Dataset
#
# A simulated data set containing information on ten thousand customers. The aim here is to predict which customers will default on their credit card debt. A data frame with 10000 observations on the following 4 variables.
#
# `default`
# A factor with levels No and Yes indicating whether the customer defaulted on their debt
#
# `student`
# A factor with levels No and Yes indicating whether the customer is a student
#
# `balance`
# The average balance that the customer has remaining on their credit card after making their monthly payment
#
# `income`
# Income of customer
#
# In[18]:
data = pd.read_csv('default.csv')
data.head()
# Let's build a class column with the proper values on it (0 and 1) instead of the strings with Yes and No.
# In[19]:
data.default = data.default.map({'No': 0, 'Yes': 1})
data.student = data.student.map({'No': 0, 'Yes': 1})
data.head()
# We are interested in predicting whether an individual will default on his or her credit card payment, on the basis of annual income and monthly credit card balance.
#
# It is worth noting that figure below displays a very pronounced relationship between the predictor balance and the response default. In most real applications, the relationship between the predictor and the response will not be nearly so strong.
# In[20]:
def plot_descriptive(data):
fig = plt.figure(figsize=(9, 4))
gs = GridSpec(1, 3, width_ratios=[3, 1, 1])
ax0 = plt.subplot(gs[0])
ax0 = plt.scatter(data.balance[data.default==0],
data.income[data.default==0],
label='default=No',
marker='.', c='red', alpha=0.5)
ax0 = plt.scatter(data.balance[data.default==1],
data.income[data.default==1],
label='default=Yes',
marker='+', c='green', alpha=0.7)
ax0 = plt.xlabel('balance')
ax0 = plt.ylabel('income')
ax0 = plt.legend(loc='best')
ax0 = plt.subplot(gs[1])
ax1 = sns.boxplot(x="default", y="balance", data=data)
ax0 = plt.subplot(gs[2])
ax2 = sns.boxplot(x="default", y="income", data=data)
plt.tight_layout()
plt.show()
# In[21]:
plot_descriptive(data)
# Consider again the Default data set, where the response `default` falls into one of two categories, Yes or No. Rather than modeling this response $Y$ directly, logistic regression models the probability that $Y$ belongs to a particular category.
#
# For example, the probability of default given balance can be written as
#
# $$Pr(default = Yes|balance)$$
#
# The values of $Pr(default = Yes|balance)$ –$p(balance)$–, will range between 0 and 1. Then for any given value of `balance`, a prediction can be made for `default`. For example, one might predict `default = Yes` for any individual for whom $p(balance) > 0.5$.
# In[22]:
def plot_classes(show=True):
plt.scatter(data.balance[data.default==0],
data.default[data.default==0],
marker='o', color='red', alpha=0.5)
plt.scatter(data.balance[data.default==1],
data.default[data.default==1],
marker='+', color='green', alpha=0.7)
plt.xlabel('Balance')
plt.ylabel('Probability of default')
plt.yticks([0, 1], [0, 1])
if show is True:
plt.
|
show();
#
|
conditional_block
|
|
Logistic regression.py
|
Id')
print('Selecting {} features'.format(len(features)))
data_complete = data.filter(features + [target])
data_complete = data_complete[data_complete[target].notnull()]
meta_complete = dataframe_metainformation(data_complete)
print_metainformation(meta_complete)
dummy_columns = meta_complete['categorical_features']
dummy_columns.remove(target)
data_encoded = pd.get_dummies(data_complete, columns=dummy_columns)
data_encoded.head(3)
# How many occurences do we have from each class of the target variable?
# In[6]:
sns.countplot(x='FireplaceQu', data=data_encoded);
plt.show();
# Since we've very few occurences of classes `Ex`, `Fa` and `Po`, we will remove them from the training set, and we will train our model to learn to classify only between `TA` or `Gd`.
# In[7]:
data_encoded = data_encoded[(data_encoded[target] != 'Ex') &
(data_encoded[target] != 'Fa') &
(data_encoded[target] != 'Po')]
data_encoded[target] = data_encoded[target].map({'TA':0, 'Gd':1})
sns.countplot(x='FireplaceQu', data=data_encoded);
# Set the list of features prepared
# In[8]:
features = list(data_encoded)
features.remove(target)
# ### Recursive Feature Elimination
# Recursive Feature Elimination (RFE) is based on the idea to repeatedly construct a model and choose either the best or worst performing feature, setting the feature aside and then repeating the process with the rest of the features. This process is applied until all features in the dataset are exhausted. The goal of RFE is to select features by recursively considering smaller and smaller sets of features.
# In[9]:
from sklearn.exceptions import ConvergenceWarning
import warnings
warnings.filterwarnings(action='ignore', category=ConvergenceWarning)
X = data_encoded.loc[:, features]
y = data_encoded.loc[:, target]
logreg = LogisticRegression(solver='lbfgs', max_iter=250)
rfe = RFE(logreg, 15)
rfe = rfe.fit(X, y)
print('Selected features: {}'.format(list(data_encoded.loc[:, rfe.support_])))
# ## Building the model
#
# Set the variables $X$ and $Y$ to the contents of the dataframe I want to use, and fit a `Logit` model. Print a summary to check the results. We're using the `statmodels` package because we want easy access to all the statistical indicators that logistic regression can lead to.
# In[10]:
X = data_encoded.loc[:, list(data_encoded.loc[:, rfe.support_])]
y = data_encoded.loc[:, target]
logit_model=sm.Logit(y, X)
result=logit_model.fit(method='bfgs')
print(result.summary2())
# ### P-Values and feature selection
#
# Remove those predictors with _p-values_ above 0.05
#
# Mark those features with a p-value higher thatn 0.05 (or close) to be removed from $X$, and run the logistic regression again to re-.check the p-values. From that point we'll be ready to run the model properly in sklearn.
# In[11]:
to_remove = result.pvalues[result.pvalues > 0.05].index.tolist()
X.drop(to_remove, inplace=True, axis=1)
logit_model=sm.Logit(y, X)
result=logit_model.fit(method='bfgs')
print(result.summary2())
# ### The Logit model
#
# Here we train the model and evaluate on the test set. The interpretation of the results obtained by calling the `classification_report` are as follows:
#
# The **precision** is the ratio tp / (tp + fp) where tp is the number of true positives and fp the number of false positives. The precision is intuitively the ability of the classifier to not label a sample as positive if it is negative.
#
# The **recall** is the ratio tp / (tp + fn) where tp is the number of true positives and fn the number of false negatives. The recall is intuitively the ability of the classifier to find all the positive samples.
#
# The **F-beta** score can be interpreted as a weighted harmonic mean of the precision and recall, where an F-beta score reaches its best value at 1 and worst score at 0.
#
# The F-beta score weights the recall more than the precision by a factor of beta. beta = 1.0 means recall and precision are equally important.
#
# The **support** is the number of occurrences of each class in y_test.
# In[12]:
X_train, X_test, y_train, y_test = train_test_split(X, y,
test_size=0.3,
random_state=0)
logreg = LogisticRegression(solver='lbfgs')
logreg.fit(X_train, y_train)
y_pred = logreg.predict(X_test)
print('Accuracy on test: {:.2f}'.format(logreg.score(X_test, y_test)))
print(confusion_matrix(y_test, y_pred))
print(classification_report(y_test, y_pred))
# ### ROC Curve
#
# The receiver operating characteristic (ROC) curve is another common tool used with binary classifiers. The dotted line represents the ROC curve of a purely random classifier; a good classifier stays as far away from that line as possible (toward the top-left corner).
# In[13]:
logit_roc_auc = roc_auc_score(y_test, logreg.predict(X_test))
fpr, tpr, thresholds = roc_curve(y_test, logreg.predict_proba(X_test)[:,1])
# Plot the FPR vs. TPR, and the diagonal line representing the null model.
# In[14]:
def plot_roc(fpr, tpr, logit_roc_auc):
plt.figure()
plt.plot(fpr, tpr, label='Logistic Regression (area = %0.2f)' % logit_roc_auc)
plt.plot([0, 1], [0, 1], 'r--')
plt.xlim([0.0, 1.05])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('ROC curve')
plt.legend(loc="lower right")
plt.savefig('Log_ROC')
plt.show();
# In[15]:
plot_roc(fpr, tpr, logit_roc_auc)
# The results are very poor, and what we've got shouldn't be used in production. The proposal from this point is:
# 1. to know more about how the predictions are made in logistic regression
# 2. apply a logit to predict if the price of a house will be higher or lower than a given value
# ### Explore logit predictions
#
# What you've seen is that we irectly call the method `predict` in `logit`, which will tell me to which class each sample is classified: 0 or 1. To accomplish this, the model produces two probabilities
# In[16]:
pred_proba_df = pd.DataFrame(logreg.predict_proba(X_test))
threshold_list = np.arange(0.05, 1.0, 0.05)
accuracy_list = np.array([])
for threshold in threshold_list:
y_test_pred = pred_proba_df.applymap(lambda prob: 1 if prob > threshold else 0)
test_accuracy = accuracy_score(y_test.values,
y_test_pred[1].values.reshape(-1, 1))
accuracy_list = np.append(accuracy_list, test_accuracy)
# And the plot of the array of accuracy values got from each of the probabilities.
# In[17]:
plt.plot(range(accuracy_list.shape[0]), accuracy_list, 'o-', label='Accuracy')
plt.title('Accuracy for different threshold values')
plt.xlabel('Threshold')
plt.ylabel('Accuracy')
plt.xticks([i for i in range(1, accuracy_list.shape[0], 2)],
np.round(threshold_list[1::2], 1))
plt.grid()
plt.show();
# ## Default Dataset
#
# A simulated data set containing information on ten thousand customers. The aim here is to predict which customers will default on their credit card debt. A data frame with 10000 observations on the following 4 variables.
#
# `default`
# A factor with levels No and Yes indicating whether the customer defaulted on their debt
#
# `student`
# A factor with levels No and Yes indicating whether the customer is a student
#
# `balance`
# The average balance that the customer has remaining on their credit card after making their monthly payment
#
# `income`
# Income of customer
#
# In[18]:
data = pd.read_csv('default.csv')
data.head()
# Let's build a class column with the proper values on it (0 and 1) instead of the strings with Yes and No.
# In[19]:
data.default = data.default.map({'No': 0, 'Yes': 1})
data.student = data.student.map({'No': 0, 'Yes': 1})
data.head()
# We are interested in predicting whether an individual will default on his or her credit card payment, on the basis of annual income and monthly credit card balance.
#
# It is worth noting that figure below displays a very pronounced relationship between the predictor balance and the response default. In most real applications, the relationship between the predictor and the response will not be nearly so strong.
# In[20]:
def
|
plot_descriptive
|
identifier_name
|
|
Logistic regression.py
|
def print_metainformation(meta):
print('Available types:', meta['description']['dtype'].unique())
print('{} Features'.format(meta['description'].shape[0]))
print('{} categorical features'.format(len(meta['categorical_features'])))
print('{} numerical features'.format(len(meta['numerical_features'])))
print('{} categorical features with NAs'.format(len(meta['categorical_features_na'])))
print('{} numerical features with NAs'.format(len(meta['numerical_features_na'])))
print('{} Complete features'.format(len(meta['complete_features'])))
# In[4]:
meta = dataframe_metainformation(data)
print_metainformation(meta)
# #### Can we build a model that will predict the contents of one of those categorical columns with NAs?
#
# Let's try! I will start with `FireplaceQu` that presents a decent amount of NAs.
#
# Define **target** and **features** to hold the variable we want to predict and the features I can use (those with no NAs). We remove the `Id` from the list of features to be used by our model. Finally, we establish what is the source dataset, by using only those rows from `data` that are not equal to NA.
#
# Lastly, we will encode all categorical features (but the target) to have a proper setup for running the logistic regression. To encode, we'll use OneHotEncoding by calling `get_dummies`. The resulting dataset will have all numerical features.
# In[5]:
target = 'FireplaceQu'
features = meta['complete_features']
features.remove('Id')
print('Selecting {} features'.format(len(features)))
data_complete = data.filter(features + [target])
data_complete = data_complete[data_complete[target].notnull()]
meta_complete = dataframe_metainformation(data_complete)
print_metainformation(meta_complete)
dummy_columns = meta_complete['categorical_features']
dummy_columns.remove(target)
data_encoded = pd.get_dummies(data_complete, columns=dummy_columns)
data_encoded.head(3)
# How many occurences do we have from each class of the target variable?
# In[6]:
sns.countplot(x='FireplaceQu', data=data_encoded);
plt.show();
# Since we've very few occurences of classes `Ex`, `Fa` and `Po`, we will remove them from the training set, and we will train our model to learn to classify only between `TA` or `Gd`.
# In[7]:
data_encoded = data_encoded[(data_encoded[target] != 'Ex') &
(data_encoded[target] != 'Fa') &
(data_encoded[target] != 'Po')]
data_encoded[target] = data_encoded[target].map({'TA':0, 'Gd':1})
sns.countplot(x='FireplaceQu', data=data_encoded);
# Set the list of features prepared
# In[8]:
features = list(data_encoded)
features.remove(target)
# ### Recursive Feature Elimination
# Recursive Feature Elimination (RFE) is based on the idea to repeatedly construct a model and choose either the best or worst performing feature, setting the feature aside and then repeating the process with the rest of the features. This process is applied until all features in the dataset are exhausted. The goal of RFE is to select features by recursively considering smaller and smaller sets of features.
# In[9]:
from sklearn.exceptions import ConvergenceWarning
import warnings
warnings.filterwarnings(action='ignore', category=ConvergenceWarning)
X = data_encoded.loc[:, features]
y = data_encoded.loc[:, target]
logreg = LogisticRegression(solver='lbfgs', max_iter=250)
rfe = RFE(logreg, 15)
rfe = rfe.fit(X, y)
print('Selected features: {}'.format(list(data_encoded.loc[:, rfe.support_])))
# ## Building the model
#
# Set the variables $X$ and $Y$ to the contents of the dataframe I want to use, and fit a `Logit` model. Print a summary to check the results. We're using the `statmodels` package because we want easy access to all the statistical indicators that logistic regression can lead to.
# In[10]:
X = data_encoded.loc[:, list(data_encoded.loc[:, rfe.support_])]
y = data_encoded.loc[:, target]
logit_model=sm.Logit(y, X)
result=logit_model.fit(method='bfgs')
print(result.summary2())
# ### P-Values and feature selection
#
# Remove those predictors with _p-values_ above 0.05
#
# Mark those features with a p-value higher thatn 0.05 (or close) to be removed from $X$, and run the logistic regression again to re-.check the p-values. From that point we'll be ready to run the model properly in sklearn.
# In[11]:
to_remove = result.pvalues[result.pvalues > 0.05].index.tolist()
X.drop(to_remove, inplace=True, axis=1)
logit_model=sm.Logit(y, X)
result=logit_model.fit(method='bfgs')
print(result.summary2())
# ### The Logit model
#
# Here we train the model and evaluate on the test set. The interpretation of the results obtained by calling the `classification_report` are as follows:
#
# The **precision** is the ratio tp / (tp + fp) where tp is the number of true positives and fp the number of false positives. The precision is intuitively the ability of the classifier to not label a sample as positive if it is negative.
#
# The **recall** is the ratio tp / (tp + fn) where tp is the number of true positives and fn the number of false negatives. The recall is intuitively the ability of the classifier to find all the positive samples.
#
# The **F-beta** score can be interpreted as a weighted harmonic mean of the precision and recall, where an F-beta score reaches its best value at 1 and worst score at 0.
#
# The F-beta score weights the recall more than the precision by a factor of beta. beta = 1.0 means recall and precision are equally important.
#
# The **support** is the number of occurrences of each class in y_test.
# In[12]:
X_train, X_test, y_train, y_test = train_test_split(X, y,
test_size=0.3,
random_state=0)
logreg = LogisticRegression(solver='lbfgs')
logreg.fit(X_train, y_train)
y_pred = logreg.predict(X_test)
print('Accuracy on test: {:.2f}'.format(logreg.score(X_test, y_test)))
print(confusion_matrix(y_test, y_pred))
print(classification_report(y_test, y_pred))
# ### ROC Curve
#
# The receiver operating characteristic (ROC) curve is another common tool used with binary classifiers. The dotted line represents the ROC curve of a purely random classifier; a good classifier stays as far away from that line as possible (toward the top-left corner).
# In[13]:
logit_roc_auc = roc_auc_score(y_test, logreg.predict(X_test))
fpr, tpr, thresholds = roc_curve(y_test, logreg.predict_proba(X_test)[:,1])
# Plot the FPR vs. TPR, and the diagonal line representing the null model.
# In[14]:
def plot_roc(fpr, tpr, logit_roc_auc):
plt.figure()
plt.plot(fpr, tpr, label='Logistic Regression (area = %0.2f)' % logit_roc_auc)
plt.plot([0, 1], [0, 1], 'r--')
plt.xlim([0.0, 1.05])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('ROC curve')
plt.legend(loc="lower right")
plt.savefig('Log_ROC')
plt.show();
# In[15]:
plot_roc(fpr, tpr, logit_roc_auc)
# The results are very poor, and what we've got shouldn't be used in production. The proposal from this point is:
# 1. to know more about how the predictions are made in logistic regression
# 2. apply a logit to predict if the price of a house will be higher or lower than a given value
# ### Explore logit predictions
#
# What you've seen is that we irectly call the method `predict` in `logit`, which will
|
meta = dict()
descr = pd.DataFrame({'dtype': df.dtypes, 'NAs': df.isna().sum()})
categorical_features = descr.loc[descr['dtype'] == 'object'].index.values.tolist()
numerical_features = descr.loc[descr['dtype'] != 'object'].index.values.tolist()
numerical_features_na = descr.loc[(descr['dtype'] != 'object') & (descr['NAs'] > 0)].index.values.tolist()
categorical_features_na = descr.loc[(descr['dtype'] == 'object') & (descr['NAs'] > 0)].index.values.tolist()
complete_features = descr.loc[descr['NAs'] == 0].index.values.tolist()
meta['description'] = descr
meta['categorical_features'] = categorical_features
meta['categorical_features'] = categorical_features
meta['categorical_features_na'] = categorical_features_na
meta['numerical_features'] = numerical_features
meta['numerical_features_na'] = numerical_features_na
meta['complete_features'] = complete_features
return meta
|
identifier_body
|
|
Logistic regression.py
|
# In[11]:
to_remove = result.pvalues[result.pvalues > 0.05].index.tolist()
X.drop(to_remove, inplace=True, axis=1)
logit_model=sm.Logit(y, X)
result=logit_model.fit(method='bfgs')
print(result.summary2())
# ### The Logit model
#
# Here we train the model and evaluate on the test set. The interpretation of the results obtained by calling the `classification_report` are as follows:
#
# The **precision** is the ratio tp / (tp + fp) where tp is the number of true positives and fp the number of false positives. The precision is intuitively the ability of the classifier to not label a sample as positive if it is negative.
#
# The **recall** is the ratio tp / (tp + fn) where tp is the number of true positives and fn the number of false negatives. The recall is intuitively the ability of the classifier to find all the positive samples.
#
# The **F-beta** score can be interpreted as a weighted harmonic mean of the precision and recall, where an F-beta score reaches its best value at 1 and worst score at 0.
#
# The F-beta score weights the recall more than the precision by a factor of beta. beta = 1.0 means recall and precision are equally important.
#
# The **support** is the number of occurrences of each class in y_test.
# In[12]:
X_train, X_test, y_train, y_test = train_test_split(X, y,
test_size=0.3,
random_state=0)
logreg = LogisticRegression(solver='lbfgs')
logreg.fit(X_train, y_train)
y_pred = logreg.predict(X_test)
print('Accuracy on test: {:.2f}'.format(logreg.score(X_test, y_test)))
print(confusion_matrix(y_test, y_pred))
print(classification_report(y_test, y_pred))
# ### ROC Curve
#
# The receiver operating characteristic (ROC) curve is another common tool used with binary classifiers. The dotted line represents the ROC curve of a purely random classifier; a good classifier stays as far away from that line as possible (toward the top-left corner).
# In[13]:
logit_roc_auc = roc_auc_score(y_test, logreg.predict(X_test))
fpr, tpr, thresholds = roc_curve(y_test, logreg.predict_proba(X_test)[:,1])
# Plot the FPR vs. TPR, and the diagonal line representing the null model.
# In[14]:
def plot_roc(fpr, tpr, logit_roc_auc):
plt.figure()
plt.plot(fpr, tpr, label='Logistic Regression (area = %0.2f)' % logit_roc_auc)
plt.plot([0, 1], [0, 1], 'r--')
plt.xlim([0.0, 1.05])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('ROC curve')
plt.legend(loc="lower right")
plt.savefig('Log_ROC')
plt.show();
# In[15]:
plot_roc(fpr, tpr, logit_roc_auc)
# The results are very poor, and what we've got shouldn't be used in production. The proposal from this point is:
# 1. to know more about how the predictions are made in logistic regression
# 2. apply a logit to predict if the price of a house will be higher or lower than a given value
# ### Explore logit predictions
#
# What you've seen is that we irectly call the method `predict` in `logit`, which will tell me to which class each sample is classified: 0 or 1. To accomplish this, the model produces two probabilities
# In[16]:
pred_proba_df = pd.DataFrame(logreg.predict_proba(X_test))
threshold_list = np.arange(0.05, 1.0, 0.05)
accuracy_list = np.array([])
for threshold in threshold_list:
y_test_pred = pred_proba_df.applymap(lambda prob: 1 if prob > threshold else 0)
test_accuracy = accuracy_score(y_test.values,
y_test_pred[1].values.reshape(-1, 1))
accuracy_list = np.append(accuracy_list, test_accuracy)
# And the plot of the array of accuracy values got from each of the probabilities.
# In[17]:
plt.plot(range(accuracy_list.shape[0]), accuracy_list, 'o-', label='Accuracy')
plt.title('Accuracy for different threshold values')
plt.xlabel('Threshold')
plt.ylabel('Accuracy')
plt.xticks([i for i in range(1, accuracy_list.shape[0], 2)],
np.round(threshold_list[1::2], 1))
plt.grid()
plt.show();
# ## Default Dataset
#
# A simulated data set containing information on ten thousand customers. The aim here is to predict which customers will default on their credit card debt. A data frame with 10000 observations on the following 4 variables.
#
# `default`
# A factor with levels No and Yes indicating whether the customer defaulted on their debt
#
# `student`
# A factor with levels No and Yes indicating whether the customer is a student
#
# `balance`
# The average balance that the customer has remaining on their credit card after making their monthly payment
#
# `income`
# Income of customer
#
# In[18]:
data = pd.read_csv('default.csv')
data.head()
# Let's build a class column with the proper values on it (0 and 1) instead of the strings with Yes and No.
# In[19]:
data.default = data.default.map({'No': 0, 'Yes': 1})
data.student = data.student.map({'No': 0, 'Yes': 1})
data.head()
# We are interested in predicting whether an individual will default on his or her credit card payment, on the basis of annual income and monthly credit card balance.
#
# It is worth noting that figure below displays a very pronounced relationship between the predictor balance and the response default. In most real applications, the relationship between the predictor and the response will not be nearly so strong.
# In[20]:
def plot_descriptive(data):
fig = plt.figure(figsize=(9, 4))
gs = GridSpec(1, 3, width_ratios=[3, 1, 1])
ax0 = plt.subplot(gs[0])
ax0 = plt.scatter(data.balance[data.default==0],
data.income[data.default==0],
label='default=No',
marker='.', c='red', alpha=0.5)
ax0 = plt.scatter(data.balance[data.default==1],
data.income[data.default==1],
label='default=Yes',
marker='+', c='green', alpha=0.7)
ax0 = plt.xlabel('balance')
ax0 = plt.ylabel('income')
ax0 = plt.legend(loc='best')
ax0 = plt.subplot(gs[1])
ax1 = sns.boxplot(x="default", y="balance", data=data)
ax0 = plt.subplot(gs[2])
ax2 = sns.boxplot(x="default", y="income", data=data)
plt.tight_layout()
plt.show()
# In[21]:
plot_descriptive(data)
# Consider again the Default data set, where the response `default` falls into one of two categories, Yes or No. Rather than modeling this response $Y$ directly, logistic regression models the probability that $Y$ belongs to a particular category.
#
# For example, the probability of default given balance can be written as
#
# $$Pr(default = Yes|balance)$$
#
# The values of $Pr(default = Yes|balance)$ –$p(balance)$–, will range between 0 and 1. Then for any given value of `balance`, a prediction can be made for `default`. For example, one might predict `default = Yes` for any individual for whom $p(balance) > 0.5$.
# In[22]:
def plot_classes(show=True):
plt.scatter(data.balance[data.default==0],
data.default[data.default==0],
marker='o', color='red', alpha=0.5)
plt.scatter(data.balance[data.default==1],
data.default[data.default==1],
marker='+', color='green', alpha=0.7)
plt.xlabel('Balance')
plt.ylabel('Probability of default')
plt.yticks([0, 1], [0, 1])
if show is True:
plt.show();
# In[23]:
plot_classes()
# Build the model, and keep it on `logreg`.
# In[24]:
X_train, X_test, y_train, y_test = train_test_split(data.balance,
data.default,
test_size=0.3,
random_state=0)
logreg = LogisticRegression(solver='lbfgs')
logreg.fit(X_train.values.reshape(-1, 1), y_train)
y_pred = logreg.predict(X_test.values.reshape(-1, 1))
acc_test = logreg.score(X_test.values.reshape(-1, 1), y_test)
print('Accuracy on test: {:.2f}'.format(acc_test))
|
print(confusion_matrix(y_test, y_pred))
print(classification_report(y_test, y_pred))
|
random_line_split
|
|
common.pb.go
|
LocationSource_GPS LocationSource = 1
// Manually configured.
LocationSource_CONFIG LocationSource = 2
// Geo resolver (TDOA).
LocationSource_GEO_RESOLVER_TDOA LocationSource = 3
// Geo resolver (RSSI).
LocationSource_GEO_RESOLVER_RSSI LocationSource = 4
// Geo resolver (GNSS).
LocationSource_GEO_RESOLVER_GNSS LocationSource = 5
// Geo resolver (WIFI).
LocationSource_GEO_RESOLVER_WIFI LocationSource = 6
)
var LocationSource_name = map[int32]string{
0: "UNKNOWN",
1: "GPS",
2: "CONFIG",
3: "GEO_RESOLVER_TDOA",
4: "GEO_RESOLVER_RSSI",
5: "GEO_RESOLVER_GNSS",
6: "GEO_RESOLVER_WIFI",
}
var LocationSource_value = map[string]int32{
"UNKNOWN": 0,
"GPS": 1,
"CONFIG": 2,
"GEO_RESOLVER_TDOA": 3,
"GEO_RESOLVER_RSSI": 4,
"GEO_RESOLVER_GNSS": 5,
"GEO_RESOLVER_WIFI": 6,
}
func (x LocationSource) String() string {
return proto.EnumName(LocationSource_name, int32(x))
}
func (LocationSource) EnumDescriptor() ([]byte, []int) {
return fileDescriptor_8f954d82c0b891f6, []int{2}
}
type KeyEnvelope struct {
// KEK label.
KekLabel string `protobuf:"bytes,1,opt,name=kek_label,json=kekLabel,proto3" json:"kek_label,omitempty"`
// AES key (when the kek_label is set, this key is encrypted using a key
// known to the join-server and application-server.
// For more information please refer to the LoRaWAN Backend Interface
// 'Key Transport Security' section.
AesKey []byte `protobuf:"bytes,2,opt,name=aes_key,json=aesKey,proto3" json:"aes_key,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *KeyEnvelope) Reset() { *m = KeyEnvelope{} }
func (m *KeyEnvelope) String() string { return proto.CompactTextString(m) }
func (*KeyEnvelope) ProtoMessage() {}
func (*KeyEnvelope) Descriptor() ([]byte, []int) {
return fileDescriptor_8f954d82c0b891f6, []int{0}
}
func (m *KeyEnvelope) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_KeyEnvelope.Unmarshal(m, b)
}
func (m *KeyEnvelope) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_KeyEnvelope.Marshal(b, m, deterministic)
}
func (m *KeyEnvelope) XXX_Merge(src proto.Message) {
xxx_messageInfo_KeyEnvelope.Merge(m, src)
}
func (m *KeyEnvelope) XXX_Size() int {
return xxx_messageInfo_KeyEnvelope.Size(m)
}
func (m *KeyEnvelope) XXX_DiscardUnknown() {
xxx_messageInfo_KeyEnvelope.DiscardUnknown(m)
}
var xxx_messageInfo_KeyEnvelope proto.InternalMessageInfo
func (m *KeyEnvelope) GetKekLabel() string {
if m != nil
|
return ""
}
func (m *KeyEnvelope) GetAesKey() []byte {
if m != nil {
return m.AesKey
}
return nil
}
type Location struct {
// Latitude.
Latitude float64 `protobuf:"fixed64,1,opt,name=latitude,proto3" json:"latitude,omitempty"`
// Longitude.
Longitude float64 `protobuf:"fixed64,2,opt,name=longitude,proto3" json:"longitude,omitempty"`
// Altitude.
Altitude float64 `protobuf:"fixed64,3,opt,name=altitude,proto3" json:"altitude,omitempty"`
// Location source.
Source LocationSource `protobuf:"varint,4,opt,name=source,proto3,enum=common.LocationSource" json:"source,omitempty"`
// Accuracy (in meters).
Accuracy uint32 `protobuf:"varint,5,opt,name=accuracy,proto3" json:"accuracy,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *Location) Reset() { *m = Location{} }
func (m *Location) String() string { return proto.CompactTextString(m) }
func (*Location) ProtoMessage() {}
func (*Location) Descriptor() ([]byte, []int) {
return fileDescriptor_8f954d82c0b891f6, []int{1}
}
func (m *Location) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_Location.Unmarshal(m, b)
}
func (m *Location) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_Location.Marshal(b, m, deterministic)
}
func (m *Location) XXX_Merge(src proto.Message) {
xxx_messageInfo_Location.Merge(m, src)
}
func (m *Location) XXX_Size() int {
return xxx_messageInfo_Location.Size(m)
}
func (m *Location) XXX_DiscardUnknown() {
xxx_messageInfo_Location.DiscardUnknown(m)
}
var xxx_messageInfo_Location proto.InternalMessageInfo
func (m *Location) GetLatitude() float64 {
if m != nil {
return m.Latitude
}
return 0
}
func (m *Location) GetLongitude() float64 {
if m != nil {
return m.Longitude
}
return 0
}
func (m *Location) GetAltitude() float64 {
if m != nil {
return m.Altitude
}
return 0
}
func (m *Location) GetSource() LocationSource {
if m != nil {
return m.Source
}
return LocationSource_UNKNOWN
}
func (m *Location) GetAccuracy() uint32 {
if m != nil {
return m.Accuracy
}
return 0
}
func init() {
proto.RegisterEnum("common.Modulation", Modulation_name, Modulation_value)
proto.RegisterEnum("common.Region", Region_name, Region_value)
proto.RegisterEnum("common.LocationSource", LocationSource_name, LocationSource_value)
proto.RegisterType((*KeyEnvelope)(nil), "common.KeyEnvelope")
proto.RegisterType((*Location)(nil), "common.Location")
}
func init() {
proto.RegisterFile("common/common.proto", fileDescriptor_8f954d82c0b891f6)
}
var fileDescriptor_8f954d82c0b891f6 = []byte{
// 440 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x64, 0x52, 0xdd, 0x8e, 0xd2, 0x40,
0x14, 0xde, 0xe1, 0xa7, 0xb4, 0x67, 0x75, 0x33, 0x8e, 0x51, 0x89, 0x9a, 0x48, 0xf6, 0x8a, 0x90,
0x48, 0x81, 0xb2, 0xfc, 0x5c, 0x22, 0x16, 0xd2, 0x14, 0x5b, 0x33, 0x63, 0xdd, 0xc4, 0x1b, 0x52,
0xba, 0x93, 0xd2, 0xb4, 0xcb, 0x10, 0x68, 0x49, 0xfa, 0x12, 0x3e, 0x89, 0x0f, 0x69, 0xa6, 0x65,
0xd7, 0x18, 0xae, 0xfa, 0xfd, 0x9d, 0xd3, 0x73, 0x26, 0x07, 0x5e, 0x07, 0xe2, 0xf1, 0x51, 0xec,
0xf4, 0xf2, 0xd3, 0xdd, 0x1f, 0x44, 0x2a, 0x88, 0x52, 0xb2, 0xdb, 0
|
{
return m.KekLabel
}
|
conditional_block
|
common.pb.go
|
type Region int32
const (
// EU868
Region_EU868 Region = 0
// US915
Region_US915 Region = 2
// CN779
Region_CN779 Region = 3
// EU433
Region_EU433 Region = 4
// AU915
Region_AU915 Region = 5
// CN470
Region_CN470 Region = 6
// AS923
Region_AS923 Region = 7
// KR920
Region_KR920 Region = 8
// IN865
Region_IN865 Region = 9
// RU864
Region_RU864 Region = 10
)
var Region_name = map[int32]string{
0: "EU868",
2: "US915",
3: "CN779",
4: "EU433",
5: "AU915",
6: "CN470",
7: "AS923",
8: "KR920",
9: "IN865",
10: "RU864",
}
var Region_value = map[string]int32{
"EU868": 0,
"US915": 2,
"CN779": 3,
"EU433": 4,
"AU915": 5,
"CN470": 6,
"AS923": 7,
"KR920": 8,
"IN865": 9,
"RU864": 10,
}
func (x Region) String() string {
return proto.EnumName(Region_name, int32(x))
}
func (Region) EnumDescriptor() ([]byte, []int) {
return fileDescriptor_8f954d82c0b891f6, []int{1}
}
type LocationSource int32
const (
// Unknown.
LocationSource_UNKNOWN LocationSource = 0
// GPS.
LocationSource_GPS LocationSource = 1
// Manually configured.
LocationSource_CONFIG LocationSource = 2
// Geo resolver (TDOA).
LocationSource_GEO_RESOLVER_TDOA LocationSource = 3
// Geo resolver (RSSI).
LocationSource_GEO_RESOLVER_RSSI LocationSource = 4
// Geo resolver (GNSS).
LocationSource_GEO_RESOLVER_GNSS LocationSource = 5
// Geo resolver (WIFI).
LocationSource_GEO_RESOLVER_WIFI LocationSource = 6
)
var LocationSource_name = map[int32]string{
0: "UNKNOWN",
1: "GPS",
2: "CONFIG",
3: "GEO_RESOLVER_TDOA",
4: "GEO_RESOLVER_RSSI",
5: "GEO_RESOLVER_GNSS",
6: "GEO_RESOLVER_WIFI",
}
var LocationSource_value = map[string]int32{
"UNKNOWN": 0,
"GPS": 1,
"CONFIG": 2,
"GEO_RESOLVER_TDOA": 3,
"GEO_RESOLVER_RSSI": 4,
"GEO_RESOLVER_GNSS": 5,
"GEO_RESOLVER_WIFI": 6,
}
func (x LocationSource) String() string {
return proto.EnumName(LocationSource_name, int32(x))
}
func (LocationSource) EnumDescriptor() ([]byte, []int) {
return fileDescriptor_8f954d82c0b891f6, []int{2}
}
type KeyEnvelope struct {
// KEK label.
KekLabel string `protobuf:"bytes,1,opt,name=kek_label,json=kekLabel,proto3" json:"kek_label,omitempty"`
// AES key (when the kek_label is set, this key is encrypted using a key
// known to the join-server and application-server.
// For more information please refer to the LoRaWAN Backend Interface
// 'Key Transport Security' section.
AesKey []byte `protobuf:"bytes,2,opt,name=aes_key,json=aesKey,proto3" json:"aes_key,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *KeyEnvelope) Reset() { *m = KeyEnvelope{} }
func (m *KeyEnvelope) String() string { return proto.CompactTextString(m) }
func (*KeyEnvelope) ProtoMessage() {}
func (*KeyEnvelope) Descriptor() ([]byte, []int) {
return fileDescriptor_8f954d82c0b891f6, []int{0}
}
func (m *KeyEnvelope) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_KeyEnvelope.Unmarshal(m, b)
}
func (m *KeyEnvelope) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_KeyEnvelope.Marshal(b, m, deterministic)
}
func (m *KeyEnvelope) XXX_Merge(src proto.Message) {
xxx_messageInfo_KeyEnvelope.Merge(m, src)
}
func (m *KeyEnvelope) XXX_Size() int {
return xxx_messageInfo_KeyEnvelope.Size(m)
}
func (m *KeyEnvelope) XXX_DiscardUnknown() {
xxx_messageInfo_KeyEnvelope.DiscardUnknown(m)
}
var xxx_messageInfo_KeyEnvelope proto.InternalMessageInfo
func (m *KeyEnvelope) GetKekLabel() string {
if m != nil {
return m.KekLabel
}
return ""
}
func (m *KeyEnvelope) GetAesKey() []byte {
if m != nil {
return m.AesKey
}
return nil
}
type Location struct {
// Latitude.
Latitude float64 `protobuf:"fixed64,1,opt,name=latitude,proto3" json:"latitude,omitempty"`
// Longitude.
Longitude float64 `protobuf:"fixed64,2,opt,name=longitude,proto3" json:"longitude,omitempty"`
// Altitude.
Altitude float64 `protobuf:"fixed64,3,opt,name=altitude,proto3" json:"altitude,omitempty"`
// Location source.
Source LocationSource `protobuf:"varint,4,opt,name=source,proto3,enum=common.LocationSource" json:"source,omitempty"`
// Accuracy (in meters).
Accuracy uint32 `protobuf:"varint,5,opt,name=accuracy,proto3" json:"accuracy,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *Location) Reset() { *m = Location{} }
func (m *Location) String() string { return proto.CompactTextString(m) }
func (*Location) ProtoMessage() {}
func (*Location) Descriptor() ([]byte, []int) {
return fileDescriptor_8f954d82c0b891f6, []int{1}
}
func (m *Location) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_Location.Unmarshal(m, b)
}
func (m *Location) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_Location.Marshal(b, m, deterministic)
}
func (m *Location) XXX_Merge(src proto.Message) {
xxx_messageInfo_Location.Merge(m, src)
}
func (m *Location) XXX_Size() int {
return xxx_messageInfo_Location.Size(m)
}
func (m *Location) XXX_DiscardUnknown() {
xxx_messageInfo_Location.DiscardUnknown(m)
}
var xxx_messageInfo_Location proto.InternalMessageInfo
func (m *Location) GetLatitude() float64 {
if m != nil {
return m.Latitude
}
return 0
}
func (m *Location) GetLongitude() float64 {
if m != nil {
return m.Longitude
}
return 0
}
func (m *Location) GetAltitude() float64 {
if m != nil {
return m.Altitude
}
return 0
}
func (m *Location) GetSource() LocationSource {
if m != nil {
return m.Source
}
return LocationSource_UNKNOWN
}
func (m *Location) GetAccuracy() uint32 {
if m != nil {
return m.Accuracy
}
return 0
}
func init() {
proto.RegisterEnum("common.Modulation", Modulation_name, Modulation_value)
proto.RegisterEnum("common.Region", Region_name, Region_value)
proto.RegisterEnum("common.LocationSource", LocationSource_name, LocationSource_value)
proto.RegisterType((*KeyEnvelope)(nil), "common.KeyEnvelope")
proto.RegisterType((*Location)(nil), "common.Location")
}
func init() {
proto.RegisterFile("common/common.proto", fileDescriptor_8f954d82c0b891f6)
}
var fileDescriptor_
|
{
return fileDescriptor_8f954d82c0b891f6, []int{0}
}
|
identifier_body
|
|
common.pb.go
|
LocationSource_GPS LocationSource = 1
// Manually configured.
LocationSource_CONFIG LocationSource = 2
// Geo resolver (TDOA).
LocationSource_GEO_RESOLVER_TDOA LocationSource = 3
// Geo resolver (RSSI).
LocationSource_GEO_RESOLVER_RSSI LocationSource = 4
// Geo resolver (GNSS).
LocationSource_GEO_RESOLVER_GNSS LocationSource = 5
// Geo resolver (WIFI).
LocationSource_GEO_RESOLVER_WIFI LocationSource = 6
)
var LocationSource_name = map[int32]string{
0: "UNKNOWN",
1: "GPS",
2: "CONFIG",
3: "GEO_RESOLVER_TDOA",
4: "GEO_RESOLVER_RSSI",
5: "GEO_RESOLVER_GNSS",
6: "GEO_RESOLVER_WIFI",
}
var LocationSource_value = map[string]int32{
"UNKNOWN": 0,
"GPS": 1,
"CONFIG": 2,
"GEO_RESOLVER_TDOA": 3,
"GEO_RESOLVER_RSSI": 4,
"GEO_RESOLVER_GNSS": 5,
"GEO_RESOLVER_WIFI": 6,
}
func (x LocationSource) String() string {
return proto.EnumName(LocationSource_name, int32(x))
}
func (LocationSource) EnumDescriptor() ([]byte, []int) {
return fileDescriptor_8f954d82c0b891f6, []int{2}
}
type KeyEnvelope struct {
// KEK label.
KekLabel string `protobuf:"bytes,1,opt,name=kek_label,json=kekLabel,proto3" json:"kek_label,omitempty"`
// AES key (when the kek_label is set, this key is encrypted using a key
// known to the join-server and application-server.
// For more information please refer to the LoRaWAN Backend Interface
// 'Key Transport Security' section.
AesKey []byte `protobuf:"bytes,2,opt,name=aes_key,json=aesKey,proto3" json:"aes_key,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *KeyEnvelope) Reset() { *m = KeyEnvelope{} }
func (m *KeyEnvelope) String() string { return proto.CompactTextString(m) }
func (*KeyEnvelope) ProtoMessage() {}
func (*KeyEnvelope) Descriptor() ([]byte, []int) {
return fileDescriptor_8f954d82c0b891f6, []int{0}
}
func (m *KeyEnvelope) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_KeyEnvelope.Unmarshal(m, b)
}
func (m *KeyEnvelope) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_KeyEnvelope.Marshal(b, m, deterministic)
}
func (m *KeyEnvelope) XXX_Merge(src proto.Message) {
xxx_messageInfo_KeyEnvelope.Merge(m, src)
}
func (m *KeyEnvelope) XXX_Size() int {
return xxx_messageInfo_KeyEnvelope.Size(m)
}
func (m *KeyEnvelope) XXX_DiscardUnknown() {
xxx_messageInfo_KeyEnvelope.DiscardUnknown(m)
}
var xxx_messageInfo_KeyEnvelope proto.InternalMessageInfo
func (m *KeyEnvelope) GetKekLabel() string {
if m != nil {
return m.KekLabel
}
return ""
}
func (m *KeyEnvelope) GetAesKey() []byte {
if m != nil {
return m.AesKey
}
return nil
}
type Location struct {
// Latitude.
Latitude float64 `protobuf:"fixed64,1,opt,name=latitude,proto3" json:"latitude,omitempty"`
// Longitude.
Longitude float64 `protobuf:"fixed64,2,opt,name=longitude,proto3" json:"longitude,omitempty"`
// Altitude.
Altitude float64 `protobuf:"fixed64,3,opt,name=altitude,proto3" json:"altitude,omitempty"`
// Location source.
Source LocationSource `protobuf:"varint,4,opt,name=source,proto3,enum=common.LocationSource" json:"source,omitempty"`
// Accuracy (in meters).
Accuracy uint32 `protobuf:"varint,5,opt,name=accuracy,proto3" json:"accuracy,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *Location) Reset() { *m = Location{} }
func (m *Location) String() string { return proto.CompactTextString(m) }
func (*Location) ProtoMessage() {}
func (*Location) Descriptor() ([]byte, []int) {
return fileDescriptor_8f954d82c0b891f6, []int{1}
}
func (m *Location) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_Location.Unmarshal(m, b)
}
func (m *Location) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_Location.Marshal(b, m, deterministic)
}
func (m *Location) XXX_Merge(src proto.Message) {
xxx_messageInfo_Location.Merge(m, src)
}
func (m *Location) XXX_Size() int {
return xxx_messageInfo_Location.Size(m)
}
func (m *Location)
|
() {
xxx_messageInfo_Location.DiscardUnknown(m)
}
var xxx_messageInfo_Location proto.InternalMessageInfo
func (m *Location) GetLatitude() float64 {
if m != nil {
return m.Latitude
}
return 0
}
func (m *Location) GetLongitude() float64 {
if m != nil {
return m.Longitude
}
return 0
}
func (m *Location) GetAltitude() float64 {
if m != nil {
return m.Altitude
}
return 0
}
func (m *Location) GetSource() LocationSource {
if m != nil {
return m.Source
}
return LocationSource_UNKNOWN
}
func (m *Location) GetAccuracy() uint32 {
if m != nil {
return m.Accuracy
}
return 0
}
func init() {
proto.RegisterEnum("common.Modulation", Modulation_name, Modulation_value)
proto.RegisterEnum("common.Region", Region_name, Region_value)
proto.RegisterEnum("common.LocationSource", LocationSource_name, LocationSource_value)
proto.RegisterType((*KeyEnvelope)(nil), "common.KeyEnvelope")
proto.RegisterType((*Location)(nil), "common.Location")
}
func init() {
proto.RegisterFile("common/common.proto", fileDescriptor_8f954d82c0b891f6)
}
var fileDescriptor_8f954d82c0b891f6 = []byte{
// 440 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x64, 0x52, 0xdd, 0x8e, 0xd2, 0x40,
0x14, 0xde, 0xe1, 0xa7, 0xb4, 0x67, 0x75, 0x33, 0x8e, 0x51, 0x89, 0x9a, 0x48, 0xf6, 0x8a, 0x90,
0x48, 0x81, 0xb2, 0xfc, 0x5c, 0x22, 0x16, 0xd2, 0x14, 0x5b, 0x33, 0x63, 0xdd, 0xc4, 0x1b, 0x52,
0xba, 0x93, 0xd2, 0xb4, 0xcb, 0x10, 0x68, 0x49, 0xfa, 0x12, 0x3e, 0x89, 0x0f, 0x69, 0xa6, 0x65,
0xd7, 0x18, 0xae, 0xfa, 0xfd, 0x9d, 0xd3, 0x73, 0x26, 0x07, 0x5e, 0x07, 0xe2, 0xf1, 0x51, 0xec,
0xf4, 0xf2, 0xd3, 0xdd, 0x1f, 0x44, 0x2a, 0x88, 0x52, 0xb2, 0xdb, 0x
|
XXX_DiscardUnknown
|
identifier_name
|
common.pb.go
|
LocationSource_GPS LocationSource = 1
// Manually configured.
LocationSource_CONFIG LocationSource = 2
// Geo resolver (TDOA).
LocationSource_GEO_RESOLVER_TDOA LocationSource = 3
// Geo resolver (RSSI).
LocationSource_GEO_RESOLVER_RSSI LocationSource = 4
// Geo resolver (GNSS).
LocationSource_GEO_RESOLVER_GNSS LocationSource = 5
// Geo resolver (WIFI).
LocationSource_GEO_RESOLVER_WIFI LocationSource = 6
)
var LocationSource_name = map[int32]string{
0: "UNKNOWN",
1: "GPS",
2: "CONFIG",
3: "GEO_RESOLVER_TDOA",
4: "GEO_RESOLVER_RSSI",
5: "GEO_RESOLVER_GNSS",
6: "GEO_RESOLVER_WIFI",
}
var LocationSource_value = map[string]int32{
"UNKNOWN": 0,
"GPS": 1,
"CONFIG": 2,
"GEO_RESOLVER_TDOA": 3,
"GEO_RESOLVER_RSSI": 4,
"GEO_RESOLVER_GNSS": 5,
"GEO_RESOLVER_WIFI": 6,
}
func (x LocationSource) String() string {
return proto.EnumName(LocationSource_name, int32(x))
}
func (LocationSource) EnumDescriptor() ([]byte, []int) {
return fileDescriptor_8f954d82c0b891f6, []int{2}
}
type KeyEnvelope struct {
// KEK label.
KekLabel string `protobuf:"bytes,1,opt,name=kek_label,json=kekLabel,proto3" json:"kek_label,omitempty"`
// AES key (when the kek_label is set, this key is encrypted using a key
// known to the join-server and application-server.
// For more information please refer to the LoRaWAN Backend Interface
|
XXX_sizecache int32 `json:"-"`
}
func (m *KeyEnvelope) Reset() { *m = KeyEnvelope{} }
func (m *KeyEnvelope) String() string { return proto.CompactTextString(m) }
func (*KeyEnvelope) ProtoMessage() {}
func (*KeyEnvelope) Descriptor() ([]byte, []int) {
return fileDescriptor_8f954d82c0b891f6, []int{0}
}
func (m *KeyEnvelope) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_KeyEnvelope.Unmarshal(m, b)
}
func (m *KeyEnvelope) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_KeyEnvelope.Marshal(b, m, deterministic)
}
func (m *KeyEnvelope) XXX_Merge(src proto.Message) {
xxx_messageInfo_KeyEnvelope.Merge(m, src)
}
func (m *KeyEnvelope) XXX_Size() int {
return xxx_messageInfo_KeyEnvelope.Size(m)
}
func (m *KeyEnvelope) XXX_DiscardUnknown() {
xxx_messageInfo_KeyEnvelope.DiscardUnknown(m)
}
var xxx_messageInfo_KeyEnvelope proto.InternalMessageInfo
func (m *KeyEnvelope) GetKekLabel() string {
if m != nil {
return m.KekLabel
}
return ""
}
func (m *KeyEnvelope) GetAesKey() []byte {
if m != nil {
return m.AesKey
}
return nil
}
type Location struct {
// Latitude.
Latitude float64 `protobuf:"fixed64,1,opt,name=latitude,proto3" json:"latitude,omitempty"`
// Longitude.
Longitude float64 `protobuf:"fixed64,2,opt,name=longitude,proto3" json:"longitude,omitempty"`
// Altitude.
Altitude float64 `protobuf:"fixed64,3,opt,name=altitude,proto3" json:"altitude,omitempty"`
// Location source.
Source LocationSource `protobuf:"varint,4,opt,name=source,proto3,enum=common.LocationSource" json:"source,omitempty"`
// Accuracy (in meters).
Accuracy uint32 `protobuf:"varint,5,opt,name=accuracy,proto3" json:"accuracy,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *Location) Reset() { *m = Location{} }
func (m *Location) String() string { return proto.CompactTextString(m) }
func (*Location) ProtoMessage() {}
func (*Location) Descriptor() ([]byte, []int) {
return fileDescriptor_8f954d82c0b891f6, []int{1}
}
func (m *Location) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_Location.Unmarshal(m, b)
}
func (m *Location) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_Location.Marshal(b, m, deterministic)
}
func (m *Location) XXX_Merge(src proto.Message) {
xxx_messageInfo_Location.Merge(m, src)
}
func (m *Location) XXX_Size() int {
return xxx_messageInfo_Location.Size(m)
}
func (m *Location) XXX_DiscardUnknown() {
xxx_messageInfo_Location.DiscardUnknown(m)
}
var xxx_messageInfo_Location proto.InternalMessageInfo
func (m *Location) GetLatitude() float64 {
if m != nil {
return m.Latitude
}
return 0
}
func (m *Location) GetLongitude() float64 {
if m != nil {
return m.Longitude
}
return 0
}
func (m *Location) GetAltitude() float64 {
if m != nil {
return m.Altitude
}
return 0
}
func (m *Location) GetSource() LocationSource {
if m != nil {
return m.Source
}
return LocationSource_UNKNOWN
}
func (m *Location) GetAccuracy() uint32 {
if m != nil {
return m.Accuracy
}
return 0
}
func init() {
proto.RegisterEnum("common.Modulation", Modulation_name, Modulation_value)
proto.RegisterEnum("common.Region", Region_name, Region_value)
proto.RegisterEnum("common.LocationSource", LocationSource_name, LocationSource_value)
proto.RegisterType((*KeyEnvelope)(nil), "common.KeyEnvelope")
proto.RegisterType((*Location)(nil), "common.Location")
}
func init() {
proto.RegisterFile("common/common.proto", fileDescriptor_8f954d82c0b891f6)
}
var fileDescriptor_8f954d82c0b891f6 = []byte{
// 440 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x64, 0x52, 0xdd, 0x8e, 0xd2, 0x40,
0x14, 0xde, 0xe1, 0xa7, 0xb4, 0x67, 0x75, 0x33, 0x8e, 0x51, 0x89, 0x9a, 0x48, 0xf6, 0x8a, 0x90,
0x48, 0x81, 0xb2, 0xfc, 0x5c, 0x22, 0x16, 0xd2, 0x14, 0x5b, 0x33, 0x63, 0xdd, 0xc4, 0x1b, 0x52,
0xba, 0x93, 0xd2, 0xb4, 0xcb, 0x10, 0x68, 0x49, 0xfa, 0x12, 0x3e, 0x89, 0x0f, 0x69, 0xa6, 0x65,
0xd7, 0x18, 0xae, 0xfa, 0xfd, 0x9d, 0xd3, 0x73, 0x26, 0x07, 0x5e, 0x07, 0xe2, 0xf1, 0x51, 0xec,
0xf4, 0xf2, 0xd3, 0xdd, 0x1f, 0x44, 0x2a, 0x88, 0x52, 0xb2, 0xdb, 0x
|
// 'Key Transport Security' section.
AesKey []byte `protobuf:"bytes,2,opt,name=aes_key,json=aesKey,proto3" json:"aes_key,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
|
random_line_split
|
lib.rs
|
256::from(self.denominator)).as_u128()
}
}
#[derive(BorshDeserialize, BorshSerialize)]
pub struct WinnerInfo {
pub user: AccountId, // winner
pub amount: Balance, // win prize
pub height: BlockHeight,
pub ts: u64,
}
#[derive(Serialize, Deserialize)]
#[serde(crate = "near_sdk::serde")]
pub struct HumanReadableWinnerInfo {
pub user: AccountId,
pub amount: U128,
pub height: U64,
pub ts: U64,
}
#[derive(Serialize, Deserialize)]
#[serde(crate = "near_sdk::serde")]
pub struct HumanReadableContractInfo {
pub owner: AccountId,
pub jack_pod: U128,
pub owner_pod: U128,
pub dice_number: u8,
pub rolling_fee: U128,
}
#[derive(Serialize, Deserialize)]
#[serde(crate = "near_sdk::serde")]
pub struct HumanReadableDiceResult {
pub user: AccountId,
pub user_guess: u8,
pub dice_point: u8,
pub reward_amount: U128,
pub jackpod_left: U128,
pub height: U64,
pub ts: U64,
}
// Structs in Rust are similar to other languages, and may include impl keyword as shown below
// Note: the names of the structs are not important when calling the smart contract, but the function names are
#[near_bindgen]
#[derive(BorshDeserialize, BorshSerialize)]
pub struct NearDice {
pub owner_id: AccountId,
pub dice_number: u8,
pub rolling_fee: Balance, // how many NEAR needed to roll once.
pub jack_pod: Balance, // half of them would be show to user as jack_pod amount
pub owner_pod: Balance, // incoming of the contract, can be withdraw by owner
pub reward_fee_fraction: RewardFeeFraction,
pub win_history: Vector<WinnerInfo>,
pub accounts: LookupMap<AccountId, Balance>, // record user deposit to buy dice
}
impl Default for NearDice {
fn default() -> Self {
env::panic(b"dice contract should be initialized before usage")
}
}
#[near_bindgen]
impl NearDice {
#[init]
pub fn new(
owner_id: AccountId,
dice_number: u8,
rolling_fee: U128,
reward_fee_fraction: RewardFeeFraction,
) -> Self {
assert!(!env::state_exists(), "Already initialized");
reward_fee_fraction.assert_valid();
assert!(
env::is_valid_account_id(owner_id.as_bytes()),
"The owner account ID is invalid"
);
Self {
owner_id,
dice_number,
rolling_fee: rolling_fee.into(),
jack_pod: 0_u128,
owner_pod: 0_u128,
reward_fee_fraction,
win_history: Vector::new(b"w".to_vec()),
accounts: LookupMap::new(b"a".to_vec()),
}
}
//***********************/
// owner functions
//***********************/
fn assert_owner(&self) {
assert_eq!(
env::predecessor_account_id(),
self.owner_id,
"Can only be called by the owner"
);
}
///
pub fn withdraw_ownerpod(&mut self, amount: U128) {
self.assert_owner();
let amount: Balance = amount.into();
assert!(
self.owner_pod >= amount,
"The owner pod has insurficent funds"
);
let account_id = env::predecessor_account_id();
self.owner_pod -= amount;
Promise::new(account_id).transfer(amount);
}
#[payable]
pub fn deposit_jackpod(&mut self) {
self.assert_owner();
let amount = env::attached_deposit();
self.jack_pod += amount;
}
/// Owner's method.
/// Updates current reward fee fraction to the new given fraction.
pub fn update_reward_fee_fraction(&mut self, reward_fee_fraction: RewardFeeFraction) {
self.assert_owner();
reward_fee_fraction.assert_valid();
self.reward_fee_fraction = reward_fee_fraction;
}
pub fn update_dice_number(&mut self, dice_number: u8) {
self.assert_owner();
self.dice_number = dice_number;
}
pub fn update_rolling_fee(&mut self, rolling_fee: U128) {
self.assert_owner();
self.rolling_fee = rolling_fee.into();
}
//***********************/
// rolling functions
//***********************/
#[payable]
pub fn buy_dice(&mut self) {
// check called by real user NOT from other contracts
let account_id = env::predecessor_account_id();
assert_eq!(
account_id.clone(),
env::signer_account_id(),
"This method must be called directly from user."
);
// check user attached enough rolling fee to buy at least one dice
let amount = env::attached_deposit();
assert!(
amount >= self.rolling_fee,
format!("You must deposit more than {}", self.rolling_fee)
);
let buy_dice_count = amount / self.rolling_fee;
let leftover = amount - buy_dice_count * self.rolling_fee;
let old_value = self.accounts.get(&account_id).unwrap_or(0);
self.accounts.insert(&account_id, &(old_value + buy_dice_count * self.rolling_fee));
// change refund
if leftover > 0 {
Promise::new(account_id).transfer(leftover);
}
}
/// rolling dice
/// check the deposit is larger than rolling_fee NEAR, and return leftover back to caller at end of call,
/// add rolling_fee NEAR to jackpod and get random number between [1, self.dice_number * 6],
/// if identical to target, modify jackpod amount and transfer half of jackpod to caller (within a tip to the owner_pod)
pub fn
|
(&mut self, target: u8) -> HumanReadableDiceResult {
// check called by real user NOT from other contracts
let account_id = env::predecessor_account_id();
assert_eq!(
account_id.clone(),
env::signer_account_id(),
"This method must be called directly from user."
);
// check user has at least one dice remain
let balance = self.accounts.get(&account_id).unwrap_or(0);
assert!(
balance / self.rolling_fee >= 1,
"You must at least have one dice to play"
);
// update account dice
let leftover = balance - self.rolling_fee;
if leftover == 0 {
self.accounts.remove(&account_id);
} else {
self.accounts.insert(&account_id, &leftover);
}
// always update jack_pod before rolling dice
self.jack_pod += self.rolling_fee;
// rolling dice here
let random_u8: u8 = env::random_seed().iter().fold(0_u8, |acc, x| acc.wrapping_add(*x));
let dice_point = self.dice_number as u16 * 6_u16 * random_u8 as u16 / 0x100_u16 + 1;
let mut result = HumanReadableDiceResult {
user: account_id.clone(),
user_guess: target,
dice_point: dice_point as u8,
reward_amount: 0.into(), // if win, need update
jackpod_left: self.jack_pod.into(), // if win, need update
height: env::block_index().into(),
ts: env::block_timestamp().into(),
};
// let's see how lucky caller is this time
if target == dice_point as u8 { // Wow, he wins
// figure out gross reward and update jack pod
let gross_reward = self.jack_pod / 2;
self.jack_pod -= gross_reward;
// split gross to net and owner fee
let owners_fee = self.reward_fee_fraction.multiply(gross_reward);
result.reward_amount = (gross_reward - owners_fee).into();
result.jackpod_left = self.jack_pod.into();
// update owner pod
self.owner_pod += owners_fee;
// records this winning
self.win_history.push(&WinnerInfo {
user: account_id.clone(),
amount: gross_reward - owners_fee,
height: env::block_index(),
ts: env::block_timestamp(),
});
}
result
}
pub fn set_greeting(&mut self, message: String) {
let account_id = env::signer_account_id();
// Use env::log to record logs permanently to the blockchain!
env::log(format!("Saving greeting '{}' for account '{}'", message, account_id,).as_bytes());
}
//***********************/
// view functions
//***********************/
fn get_hr_info(&self, index: u64) -> HumanReadableWinnerInfo {
let info = self.win_history.get(index).expect("Error: no this item in winner history!");
HumanReadableWinnerInfo {
user: info.user.clone(),
amount: info.amount.into(),
height: info.height.into(),
ts: info.ts.into(),
}
}
/// Returns the list of winner info in LIFO order
pub fn get_win_history(&self, from_index: u64, limit: u
|
roll_dice
|
identifier_name
|
lib.rs
|
256::from(self.denominator)).as_u128()
}
}
#[derive(BorshDeserialize, BorshSerialize)]
pub struct WinnerInfo {
pub user: AccountId, // winner
pub amount: Balance, // win prize
pub height: BlockHeight,
pub ts: u64,
}
#[derive(Serialize, Deserialize)]
#[serde(crate = "near_sdk::serde")]
pub struct HumanReadableWinnerInfo {
pub user: AccountId,
pub amount: U128,
pub height: U64,
pub ts: U64,
}
#[derive(Serialize, Deserialize)]
#[serde(crate = "near_sdk::serde")]
pub struct HumanReadableContractInfo {
pub owner: AccountId,
pub jack_pod: U128,
pub owner_pod: U128,
pub dice_number: u8,
pub rolling_fee: U128,
}
#[derive(Serialize, Deserialize)]
#[serde(crate = "near_sdk::serde")]
pub struct HumanReadableDiceResult {
pub user: AccountId,
pub user_guess: u8,
pub dice_point: u8,
pub reward_amount: U128,
pub jackpod_left: U128,
pub height: U64,
pub ts: U64,
}
// Structs in Rust are similar to other languages, and may include impl keyword as shown below
// Note: the names of the structs are not important when calling the smart contract, but the function names are
#[near_bindgen]
#[derive(BorshDeserialize, BorshSerialize)]
pub struct NearDice {
pub owner_id: AccountId,
pub dice_number: u8,
pub rolling_fee: Balance, // how many NEAR needed to roll once.
pub jack_pod: Balance, // half of them would be show to user as jack_pod amount
pub owner_pod: Balance, // incoming of the contract, can be withdraw by owner
pub reward_fee_fraction: RewardFeeFraction,
pub win_history: Vector<WinnerInfo>,
pub accounts: LookupMap<AccountId, Balance>, // record user deposit to buy dice
}
impl Default for NearDice {
fn default() -> Self {
env::panic(b"dice contract should be initialized before usage")
}
}
#[near_bindgen]
impl NearDice {
#[init]
pub fn new(
owner_id: AccountId,
dice_number: u8,
rolling_fee: U128,
reward_fee_fraction: RewardFeeFraction,
) -> Self {
assert!(!env::state_exists(), "Already initialized");
reward_fee_fraction.assert_valid();
assert!(
env::is_valid_account_id(owner_id.as_bytes()),
"The owner account ID is invalid"
);
Self {
owner_id,
dice_number,
rolling_fee: rolling_fee.into(),
jack_pod: 0_u128,
owner_pod: 0_u128,
reward_fee_fraction,
win_history: Vector::new(b"w".to_vec()),
accounts: LookupMap::new(b"a".to_vec()),
}
}
//***********************/
// owner functions
//***********************/
fn assert_owner(&self) {
assert_eq!(
env::predecessor_account_id(),
self.owner_id,
"Can only be called by the owner"
);
}
///
pub fn withdraw_ownerpod(&mut self, amount: U128) {
self.assert_owner();
let amount: Balance = amount.into();
assert!(
self.owner_pod >= amount,
"The owner pod has insurficent funds"
);
let account_id = env::predecessor_account_id();
self.owner_pod -= amount;
Promise::new(account_id).transfer(amount);
}
#[payable]
pub fn deposit_jackpod(&mut self) {
self.assert_owner();
let amount = env::attached_deposit();
self.jack_pod += amount;
}
/// Owner's method.
/// Updates current reward fee fraction to the new given fraction.
pub fn update_reward_fee_fraction(&mut self, reward_fee_fraction: RewardFeeFraction) {
self.assert_owner();
reward_fee_fraction.assert_valid();
self.reward_fee_fraction = reward_fee_fraction;
}
pub fn update_dice_number(&mut self, dice_number: u8) {
self.assert_owner();
self.dice_number = dice_number;
}
pub fn update_rolling_fee(&mut self, rolling_fee: U128) {
self.assert_owner();
self.rolling_fee = rolling_fee.into();
}
//***********************/
// rolling functions
//***********************/
#[payable]
pub fn buy_dice(&mut self) {
// check called by real user NOT from other contracts
let account_id = env::predecessor_account_id();
assert_eq!(
account_id.clone(),
env::signer_account_id(),
"This method must be called directly from user."
);
// check user attached enough rolling fee to buy at least one dice
let amount = env::attached_deposit();
assert!(
amount >= self.rolling_fee,
format!("You must deposit more than {}", self.rolling_fee)
);
let buy_dice_count = amount / self.rolling_fee;
let leftover = amount - buy_dice_count * self.rolling_fee;
let old_value = self.accounts.get(&account_id).unwrap_or(0);
self.accounts.insert(&account_id, &(old_value + buy_dice_count * self.rolling_fee));
// change refund
if leftover > 0 {
Promise::new(account_id).transfer(leftover);
}
}
/// rolling dice
/// check the deposit is larger than rolling_fee NEAR, and return leftover back to caller at end of call,
/// add rolling_fee NEAR to jackpod and get random number between [1, self.dice_number * 6],
/// if identical to target, modify jackpod amount and transfer half of jackpod to caller (within a tip to the owner_pod)
pub fn roll_dice(&mut self, target: u8) -> HumanReadableDiceResult {
// check called by real user NOT from other contracts
let account_id = env::predecessor_account_id();
assert_eq!(
account_id.clone(),
env::signer_account_id(),
"This method must be called directly from user."
);
// check user has at least one dice remain
let balance = self.accounts.get(&account_id).unwrap_or(0);
assert!(
balance / self.rolling_fee >= 1,
"You must at least have one dice to play"
);
// update account dice
let leftover = balance - self.rolling_fee;
if leftover == 0
|
else {
self.accounts.insert(&account_id, &leftover);
}
// always update jack_pod before rolling dice
self.jack_pod += self.rolling_fee;
// rolling dice here
let random_u8: u8 = env::random_seed().iter().fold(0_u8, |acc, x| acc.wrapping_add(*x));
let dice_point = self.dice_number as u16 * 6_u16 * random_u8 as u16 / 0x100_u16 + 1;
let mut result = HumanReadableDiceResult {
user: account_id.clone(),
user_guess: target,
dice_point: dice_point as u8,
reward_amount: 0.into(), // if win, need update
jackpod_left: self.jack_pod.into(), // if win, need update
height: env::block_index().into(),
ts: env::block_timestamp().into(),
};
// let's see how lucky caller is this time
if target == dice_point as u8 { // Wow, he wins
// figure out gross reward and update jack pod
let gross_reward = self.jack_pod / 2;
self.jack_pod -= gross_reward;
// split gross to net and owner fee
let owners_fee = self.reward_fee_fraction.multiply(gross_reward);
result.reward_amount = (gross_reward - owners_fee).into();
result.jackpod_left = self.jack_pod.into();
// update owner pod
self.owner_pod += owners_fee;
// records this winning
self.win_history.push(&WinnerInfo {
user: account_id.clone(),
amount: gross_reward - owners_fee,
height: env::block_index(),
ts: env::block_timestamp(),
});
}
result
}
pub fn set_greeting(&mut self, message: String) {
let account_id = env::signer_account_id();
// Use env::log to record logs permanently to the blockchain!
env::log(format!("Saving greeting '{}' for account '{}'", message, account_id,).as_bytes());
}
//***********************/
// view functions
//***********************/
fn get_hr_info(&self, index: u64) -> HumanReadableWinnerInfo {
let info = self.win_history.get(index).expect("Error: no this item in winner history!");
HumanReadableWinnerInfo {
user: info.user.clone(),
amount: info.amount.into(),
height: info.height.into(),
ts: info.ts.into(),
}
}
/// Returns the list of winner info in LIFO order
pub fn get_win_history(&self, from_index: u64, limit: u
|
{
self.accounts.remove(&account_id);
}
|
conditional_block
|
lib.rs
|
256::from(self.denominator)).as_u128()
}
}
#[derive(BorshDeserialize, BorshSerialize)]
pub struct WinnerInfo {
pub user: AccountId, // winner
pub amount: Balance, // win prize
pub height: BlockHeight,
pub ts: u64,
}
#[derive(Serialize, Deserialize)]
#[serde(crate = "near_sdk::serde")]
pub struct HumanReadableWinnerInfo {
pub user: AccountId,
pub amount: U128,
pub height: U64,
pub ts: U64,
}
#[derive(Serialize, Deserialize)]
#[serde(crate = "near_sdk::serde")]
pub struct HumanReadableContractInfo {
pub owner: AccountId,
pub jack_pod: U128,
pub owner_pod: U128,
pub dice_number: u8,
pub rolling_fee: U128,
}
#[derive(Serialize, Deserialize)]
#[serde(crate = "near_sdk::serde")]
pub struct HumanReadableDiceResult {
pub user: AccountId,
pub user_guess: u8,
pub dice_point: u8,
pub reward_amount: U128,
pub jackpod_left: U128,
pub height: U64,
pub ts: U64,
}
// Structs in Rust are similar to other languages, and may include impl keyword as shown below
// Note: the names of the structs are not important when calling the smart contract, but the function names are
#[near_bindgen]
#[derive(BorshDeserialize, BorshSerialize)]
pub struct NearDice {
pub owner_id: AccountId,
pub dice_number: u8,
pub rolling_fee: Balance, // how many NEAR needed to roll once.
pub jack_pod: Balance, // half of them would be show to user as jack_pod amount
pub owner_pod: Balance, // incoming of the contract, can be withdraw by owner
pub reward_fee_fraction: RewardFeeFraction,
pub win_history: Vector<WinnerInfo>,
pub accounts: LookupMap<AccountId, Balance>, // record user deposit to buy dice
}
impl Default for NearDice {
fn default() -> Self {
env::panic(b"dice contract should be initialized before usage")
}
}
#[near_bindgen]
impl NearDice {
#[init]
pub fn new(
owner_id: AccountId,
dice_number: u8,
rolling_fee: U128,
reward_fee_fraction: RewardFeeFraction,
) -> Self {
assert!(!env::state_exists(), "Already initialized");
reward_fee_fraction.assert_valid();
assert!(
env::is_valid_account_id(owner_id.as_bytes()),
"The owner account ID is invalid"
);
Self {
owner_id,
dice_number,
rolling_fee: rolling_fee.into(),
jack_pod: 0_u128,
owner_pod: 0_u128,
reward_fee_fraction,
win_history: Vector::new(b"w".to_vec()),
accounts: LookupMap::new(b"a".to_vec()),
}
}
//***********************/
// owner functions
//***********************/
fn assert_owner(&self) {
assert_eq!(
env::predecessor_account_id(),
self.owner_id,
"Can only be called by the owner"
);
}
///
pub fn withdraw_ownerpod(&mut self, amount: U128) {
self.assert_owner();
let amount: Balance = amount.into();
assert!(
self.owner_pod >= amount,
"The owner pod has insurficent funds"
);
let account_id = env::predecessor_account_id();
self.owner_pod -= amount;
Promise::new(account_id).transfer(amount);
}
#[payable]
pub fn deposit_jackpod(&mut self) {
self.assert_owner();
let amount = env::attached_deposit();
self.jack_pod += amount;
}
/// Owner's method.
/// Updates current reward fee fraction to the new given fraction.
pub fn update_reward_fee_fraction(&mut self, reward_fee_fraction: RewardFeeFraction) {
self.assert_owner();
reward_fee_fraction.assert_valid();
self.reward_fee_fraction = reward_fee_fraction;
}
pub fn update_dice_number(&mut self, dice_number: u8) {
self.assert_owner();
self.dice_number = dice_number;
}
pub fn update_rolling_fee(&mut self, rolling_fee: U128) {
self.assert_owner();
self.rolling_fee = rolling_fee.into();
}
//***********************/
// rolling functions
//***********************/
#[payable]
pub fn buy_dice(&mut self) {
// check called by real user NOT from other contracts
let account_id = env::predecessor_account_id();
assert_eq!(
account_id.clone(),
env::signer_account_id(),
"This method must be called directly from user."
);
// check user attached enough rolling fee to buy at least one dice
let amount = env::attached_deposit();
assert!(
amount >= self.rolling_fee,
format!("You must deposit more than {}", self.rolling_fee)
);
let buy_dice_count = amount / self.rolling_fee;
let leftover = amount - buy_dice_count * self.rolling_fee;
let old_value = self.accounts.get(&account_id).unwrap_or(0);
self.accounts.insert(&account_id, &(old_value + buy_dice_count * self.rolling_fee));
// change refund
if leftover > 0 {
Promise::new(account_id).transfer(leftover);
}
}
/// rolling dice
/// check the deposit is larger than rolling_fee NEAR, and return leftover back to caller at end of call,
/// add rolling_fee NEAR to jackpod and get random number between [1, self.dice_number * 6],
/// if identical to target, modify jackpod amount and transfer half of jackpod to caller (within a tip to the owner_pod)
pub fn roll_dice(&mut self, target: u8) -> HumanReadableDiceResult {
// check called by real user NOT from other contracts
let account_id = env::predecessor_account_id();
assert_eq!(
account_id.clone(),
env::signer_account_id(),
"This method must be called directly from user."
);
// check user has at least one dice remain
let balance = self.accounts.get(&account_id).unwrap_or(0);
assert!(
balance / self.rolling_fee >= 1,
"You must at least have one dice to play"
);
// update account dice
let leftover = balance - self.rolling_fee;
if leftover == 0 {
self.accounts.remove(&account_id);
} else {
self.accounts.insert(&account_id, &leftover);
}
// always update jack_pod before rolling dice
self.jack_pod += self.rolling_fee;
// rolling dice here
let random_u8: u8 = env::random_seed().iter().fold(0_u8, |acc, x| acc.wrapping_add(*x));
let dice_point = self.dice_number as u16 * 6_u16 * random_u8 as u16 / 0x100_u16 + 1;
let mut result = HumanReadableDiceResult {
user: account_id.clone(),
user_guess: target,
dice_point: dice_point as u8,
reward_amount: 0.into(), // if win, need update
jackpod_left: self.jack_pod.into(), // if win, need update
height: env::block_index().into(),
ts: env::block_timestamp().into(),
};
// let's see how lucky caller is this time
if target == dice_point as u8 { // Wow, he wins
// figure out gross reward and update jack pod
let gross_reward = self.jack_pod / 2;
self.jack_pod -= gross_reward;
// split gross to net and owner fee
let owners_fee = self.reward_fee_fraction.multiply(gross_reward);
result.reward_amount = (gross_reward - owners_fee).into();
result.jackpod_left = self.jack_pod.into();
// update owner pod
self.owner_pod += owners_fee;
// records this winning
self.win_history.push(&WinnerInfo {
user: account_id.clone(),
amount: gross_reward - owners_fee,
height: env::block_index(),
ts: env::block_timestamp(),
});
}
result
}
pub fn set_greeting(&mut self, message: String)
|
//***********************/
// view functions
//***********************/
fn get_hr_info(&self, index: u64) -> HumanReadableWinnerInfo {
let info = self.win_history.get(index).expect("Error: no this item in winner history!");
HumanReadableWinnerInfo {
user: info.user.clone(),
amount: info.amount.into(),
height: info.height.into(),
ts: info.ts.into(),
}
}
/// Returns the list of winner info in LIFO order
pub fn get_win_history(&self, from_index: u64, limit:
|
{
let account_id = env::signer_account_id();
// Use env::log to record logs permanently to the blockchain!
env::log(format!("Saving greeting '{}' for account '{}'", message, account_id,).as_bytes());
}
|
identifier_body
|
lib.rs
|
256::from(self.denominator)).as_u128()
}
}
#[derive(BorshDeserialize, BorshSerialize)]
pub struct WinnerInfo {
pub user: AccountId, // winner
pub amount: Balance, // win prize
pub height: BlockHeight,
pub ts: u64,
}
#[derive(Serialize, Deserialize)]
#[serde(crate = "near_sdk::serde")]
pub struct HumanReadableWinnerInfo {
pub user: AccountId,
pub amount: U128,
pub height: U64,
pub ts: U64,
}
#[derive(Serialize, Deserialize)]
#[serde(crate = "near_sdk::serde")]
pub struct HumanReadableContractInfo {
pub owner: AccountId,
pub jack_pod: U128,
pub owner_pod: U128,
pub dice_number: u8,
pub rolling_fee: U128,
}
#[derive(Serialize, Deserialize)]
#[serde(crate = "near_sdk::serde")]
pub struct HumanReadableDiceResult {
pub user: AccountId,
pub user_guess: u8,
pub dice_point: u8,
pub reward_amount: U128,
pub jackpod_left: U128,
pub height: U64,
pub ts: U64,
}
// Structs in Rust are similar to other languages, and may include impl keyword as shown below
// Note: the names of the structs are not important when calling the smart contract, but the function names are
#[near_bindgen]
#[derive(BorshDeserialize, BorshSerialize)]
pub struct NearDice {
pub owner_id: AccountId,
pub dice_number: u8,
pub rolling_fee: Balance, // how many NEAR needed to roll once.
pub jack_pod: Balance, // half of them would be show to user as jack_pod amount
pub owner_pod: Balance, // incoming of the contract, can be withdraw by owner
pub reward_fee_fraction: RewardFeeFraction,
pub win_history: Vector<WinnerInfo>,
pub accounts: LookupMap<AccountId, Balance>, // record user deposit to buy dice
}
impl Default for NearDice {
fn default() -> Self {
env::panic(b"dice contract should be initialized before usage")
}
}
#[near_bindgen]
impl NearDice {
#[init]
pub fn new(
owner_id: AccountId,
dice_number: u8,
rolling_fee: U128,
reward_fee_fraction: RewardFeeFraction,
) -> Self {
assert!(!env::state_exists(), "Already initialized");
reward_fee_fraction.assert_valid();
assert!(
env::is_valid_account_id(owner_id.as_bytes()),
"The owner account ID is invalid"
);
Self {
owner_id,
dice_number,
rolling_fee: rolling_fee.into(),
jack_pod: 0_u128,
owner_pod: 0_u128,
reward_fee_fraction,
win_history: Vector::new(b"w".to_vec()),
accounts: LookupMap::new(b"a".to_vec()),
}
}
//***********************/
// owner functions
//***********************/
fn assert_owner(&self) {
assert_eq!(
env::predecessor_account_id(),
self.owner_id,
"Can only be called by the owner"
);
}
///
pub fn withdraw_ownerpod(&mut self, amount: U128) {
self.assert_owner();
let amount: Balance = amount.into();
assert!(
self.owner_pod >= amount,
"The owner pod has insurficent funds"
);
let account_id = env::predecessor_account_id();
self.owner_pod -= amount;
Promise::new(account_id).transfer(amount);
}
#[payable]
pub fn deposit_jackpod(&mut self) {
self.assert_owner();
let amount = env::attached_deposit();
self.jack_pod += amount;
}
/// Owner's method.
/// Updates current reward fee fraction to the new given fraction.
pub fn update_reward_fee_fraction(&mut self, reward_fee_fraction: RewardFeeFraction) {
self.assert_owner();
reward_fee_fraction.assert_valid();
self.reward_fee_fraction = reward_fee_fraction;
}
pub fn update_dice_number(&mut self, dice_number: u8) {
self.assert_owner();
self.dice_number = dice_number;
}
pub fn update_rolling_fee(&mut self, rolling_fee: U128) {
self.assert_owner();
self.rolling_fee = rolling_fee.into();
}
//***********************/
// rolling functions
//***********************/
#[payable]
pub fn buy_dice(&mut self) {
// check called by real user NOT from other contracts
let account_id = env::predecessor_account_id();
assert_eq!(
account_id.clone(),
env::signer_account_id(),
"This method must be called directly from user."
);
// check user attached enough rolling fee to buy at least one dice
let amount = env::attached_deposit();
assert!(
amount >= self.rolling_fee,
format!("You must deposit more than {}", self.rolling_fee)
);
let buy_dice_count = amount / self.rolling_fee;
let leftover = amount - buy_dice_count * self.rolling_fee;
let old_value = self.accounts.get(&account_id).unwrap_or(0);
self.accounts.insert(&account_id, &(old_value + buy_dice_count * self.rolling_fee));
// change refund
if leftover > 0 {
Promise::new(account_id).transfer(leftover);
}
}
/// rolling dice
/// check the deposit is larger than rolling_fee NEAR, and return leftover back to caller at end of call,
/// add rolling_fee NEAR to jackpod and get random number between [1, self.dice_number * 6],
/// if identical to target, modify jackpod amount and transfer half of jackpod to caller (within a tip to the owner_pod)
pub fn roll_dice(&mut self, target: u8) -> HumanReadableDiceResult {
// check called by real user NOT from other contracts
let account_id = env::predecessor_account_id();
assert_eq!(
account_id.clone(),
env::signer_account_id(),
"This method must be called directly from user."
);
// check user has at least one dice remain
let balance = self.accounts.get(&account_id).unwrap_or(0);
assert!(
balance / self.rolling_fee >= 1,
"You must at least have one dice to play"
);
// update account dice
let leftover = balance - self.rolling_fee;
|
// always update jack_pod before rolling dice
self.jack_pod += self.rolling_fee;
// rolling dice here
let random_u8: u8 = env::random_seed().iter().fold(0_u8, |acc, x| acc.wrapping_add(*x));
let dice_point = self.dice_number as u16 * 6_u16 * random_u8 as u16 / 0x100_u16 + 1;
let mut result = HumanReadableDiceResult {
user: account_id.clone(),
user_guess: target,
dice_point: dice_point as u8,
reward_amount: 0.into(), // if win, need update
jackpod_left: self.jack_pod.into(), // if win, need update
height: env::block_index().into(),
ts: env::block_timestamp().into(),
};
// let's see how lucky caller is this time
if target == dice_point as u8 { // Wow, he wins
// figure out gross reward and update jack pod
let gross_reward = self.jack_pod / 2;
self.jack_pod -= gross_reward;
// split gross to net and owner fee
let owners_fee = self.reward_fee_fraction.multiply(gross_reward);
result.reward_amount = (gross_reward - owners_fee).into();
result.jackpod_left = self.jack_pod.into();
// update owner pod
self.owner_pod += owners_fee;
// records this winning
self.win_history.push(&WinnerInfo {
user: account_id.clone(),
amount: gross_reward - owners_fee,
height: env::block_index(),
ts: env::block_timestamp(),
});
}
result
}
pub fn set_greeting(&mut self, message: String) {
let account_id = env::signer_account_id();
// Use env::log to record logs permanently to the blockchain!
env::log(format!("Saving greeting '{}' for account '{}'", message, account_id,).as_bytes());
}
//***********************/
// view functions
//***********************/
fn get_hr_info(&self, index: u64) -> HumanReadableWinnerInfo {
let info = self.win_history.get(index).expect("Error: no this item in winner history!");
HumanReadableWinnerInfo {
user: info.user.clone(),
amount: info.amount.into(),
height: info.height.into(),
ts: info.ts.into(),
}
}
/// Returns the list of winner info in LIFO order
pub fn get_win_history(&self, from_index: u64, limit: u6
|
if leftover == 0 {
self.accounts.remove(&account_id);
} else {
self.accounts.insert(&account_id, &leftover);
}
|
random_line_split
|
lambda_function.py
|
(soup, jobs, rows):
for div in rows:
for a in div.find_all(name='a', attrs={'data-tn-element': 'jobTitle'}):
jobs.append(a['title'])
return (jobs)
def extract_company(soup, companies, rows):
for div in rows:
company = div.find_all(name='span', attrs={'class': 'company'})
if len(company) > 0:
for b in company:
companies.append(b.text.strip())
else:
sec_try = div.find_all(name='span', attrs={'class': 'result-link-source'})
for span in sec_try:
companies.append(span.text.strip())
return (companies)
def extract_location(soup, locations, rows):
for div in rows:
try:
location_div = div.find(name='div', attrs={'class': 'recJobLoc'})
locations.append(location_div['data-rc-loc'])
except:
locations.append(np.nan)
return (locations)
def extract_salary(soup, salaries, rows):
for div in rows:
try:
salaries.append(div.find('nobr').text)
except:
try:
div_two = div.find(name='div', attrs={'class': 'salarySnippet'})
div_three = div_two.find('span')
salaries.append(div_three.text.strip())
except:
salaries.append(np.nan)
return (salaries)
def extract_description(soup, description, rows):
spans = soup.findAll('div', attrs={'class': 'summary'})
for span in spans:
description.append(span.text.strip())
# print(span.text.strip())
return (description)
# Extracting Job Title, Company Details, Location, Salary and Job Description for Analysis
def indeed_scrape():
jobs = []
companies = []
locations = []
salaries = []
description = []
# Number of pages to be scraped = (100*10)+1
# where 100 is the actual number of pages scraped. 10 is the offset for each indeed page.
max_results = 1001
for start_val in range(0, max_results, 10):
# url of indeed web page with job title filter set to data scientist.
page = requests.get('https://www.indeed.com/jobs?q=Data+Scientist&start={}'.format(start_val))
# ensuring at least 1 second between page extracts.
time.sleep(1)
soup = BeautifulSoup(page.text, 'html.parser')
# Extract div class which contains the information about a single job.
rows = soup.find_all(name='div', attrs={'class': 'row'})
job_title = extract_job_title(soup, jobs, rows)
company_name = extract_company(soup, companies, rows)
location = extract_location(soup, locations, rows)
salaries = extract_salary(soup, salaries, rows)
description = extract_description(soup, description, rows)
# Create a dataframe from scraped data.
indeed_df = pd.DataFrame(
{'company_name': company_name, 'job_title': job_title, 'location': location, 'salaries': salaries,
'description': description})
return indeed_df
def preprocess(indeed_data):
# Indeed webpages may contain multiple job postings of same job. To ensure single data entry per job, duplicate entries are
# dropped if a job with same location,job title , company name and description is already present.
indeed_data = indeed_data.drop_duplicates(subset=['location', 'job_title', 'company_name', 'description'],
keep='last').reset_index()
# Extract the state from location column.
indeed_data['state'] = np.nan
for i in range(len(indeed_data['state'])):
try:
indeed_data.loc[i, 'state'] = indeed_data.loc[i, 'location'].split(',')[1]
except:
pass
# Group data by state and count the number of jobs available per state.
no_of_jobs = indeed_data.groupby(['state'])['company_name'].count().reset_index().sort_values(['company_name'])
# Extract the available max and min salary boundaries for every job posting
indeed_data['min_salary'] = indeed_data['salaries'].str.split('-').str[0].str.split().str[0].str[1:]
indeed_data['max_salary'] = indeed_data['salaries'].str.split('-').str[1].str.split().str[0].str[1:]
for i in range(len(indeed_data['min_salary'])):
if indeed_data.loc[i, 'min_salary'] is not np.NaN:
indeed_data.loc[i, 'min_salary'] = str(indeed_data.loc[i, 'min_salary']).replace(',', '')
indeed_data.loc[i, 'max_salary'] = str(indeed_data.loc[i, 'max_salary']).replace(',', '')
# Check the salary unit (example: hourly salary/yearly) and convert the available salary to Yearly amount.
indeed_data['min_salary'] = indeed_data['min_salary'].str.replace('(Indeed est.)', '')
indeed_data["Suffix"] = indeed_data["salaries"].str.split().str[-1]
indeed_data['min_salary'] = indeed_data['min_salary'].astype('float')
indeed_data['max_salary'] = indeed_data['max_salary'].astype('float')
indeed_data['mean_salary'] = np.nan
for i in range(len(indeed_data['min_salary'])):
if (indeed_data.loc[i, 'Suffix'] == 'hour'):
# Consider full time employee with 40hours/ week , 1 year = 52.1429 weeks.
indeed_data.loc[i, 'min_salary'] = indeed_data.loc[i, 'min_salary'] * 40 * 52.1429
indeed_data.loc[i, 'max_salary'] = indeed_data.loc[i, 'max_salary'] * 40 * 52.1429
# Calculate mean salary from minimum and maximum salary
if pd.isnull(indeed_data['min_salary'][i]):
indeed_data.loc[i, 'mean_salary'] = indeed_data['max_salary'][i]
elif pd.isnull(indeed_data['max_salary'][i]):
indeed_data.loc[i, 'mean_salary'] = indeed_data['min_salary'][i]
else:
indeed_data.loc[i, 'mean_salary'] = (indeed_data['min_salary'][i] + indeed_data['max_salary'][i]) / 2
# Determine the specialization such as NLP , ML, AI from job title.
indeed_data = extract_specialization(indeed_data)
return indeed_data
# Formatting of all graphs
sns.set_style("darkgrid")
sns.set(rc={'figure.figsize': (12, 8)})
def extract_specialization(job_data):
# Categorizing job titles into specialization type.
job_data['Job_Title_Category'] = np.nan
job_data['job_title'] = job_data['job_title'].str.lower()
job_data.loc[job_data['job_title'].str.contains(
'data scientist|data science|data science & insights|data science and insights'), 'Job_Title_Category'] = 'Data Scientist'
job_data.loc[job_data['job_title'].str.contains(
'analyst|analytics|analysis'), 'Job_Title_Category'] = 'Data Analyst'
job_data.loc[job_data['job_title'].str.contains(
'intern|internship|university|graduate|coop|student|co-op'), 'Job_Title_Category'] = 'Data Science Intern/ University Graduate'
job_data.loc[job_data['job_title'].str.contains(
'jr|junior|entry level|early career'), 'Job_Title_Category'] = 'Junior Data Scientist'
job_data.loc[job_data['job_title'].str.contains(
'sr|senior|phd|research'), 'Job_Title_Category'] = 'Senior Data Scientist'
job_data.loc[job_data['job_title'].str.contains(
'machine learning|machine_learning|deep|ai|artificial intelligence'), 'Job_Title_Category'] = 'Machine Learning/ AI/ Deep Learning'
job_data.loc[job_data['job_title'].str.contains(
'health|biomedical|bio|bioengineer|bioinformatics|neuro'), 'Job_Title_Category'] = 'Health/ Biomedical Data Science'
job_data.loc[job_data['job_title'].str.contains(
'nlp|language'), 'Job_Title_Category'] = 'Natural Language Processing'
job_data.loc[job_data['job_title'].str.contains(
'market|quantitative|digital marketing|search|supply chain|payment|advertising'), 'Job_Title_Category'] = 'Data Science-Marketing'
job_data['Job_Title_Category'] = job_data.Job_Title_Category.replace(np.nan, 'Others', regex=True)
return job_data
def plot_mean_salary_per_state(indeed_data):
indeed_data = indeed_data[['mean_salary', 'state']].dropna()
fig, ax = plt.subplots()
sns.boxplot(x="state", y="mean_salary", data=indeed_data, ax=ax)
plt.xlabel("States")
plt.ylabel("Mean Salary")
plt.title("Mean Salary per State")
img_data = BytesIO()
plt.savefig(img_data, format='png')
img_data.seek(0)
save_plot_to_s3('indeed-analysis', 'mean_salary_per_state.png', img_data)
def plot_designation_cnt(indeed_data):
df = \
indeed_data.groupby('state').
|
extract_job_title
|
identifier_name
|
|
lambda_function.py
|
def extract_company(soup, companies, rows):
for div in rows:
company = div.find_all(name='span', attrs={'class': 'company'})
if len(company) > 0:
for b in company:
companies.append(b.text.strip())
else:
sec_try = div.find_all(name='span', attrs={'class': 'result-link-source'})
for span in sec_try:
companies.append(span.text.strip())
return (companies)
def extract_location(soup, locations, rows):
for div in rows:
try:
location_div = div.find(name='div', attrs={'class': 'recJobLoc'})
locations.append(location_div['data-rc-loc'])
except:
locations.append(np.nan)
return (locations)
def extract_salary(soup, salaries, rows):
for div in rows:
try:
salaries.append(div.find('nobr').text)
except:
try:
div_two = div.find(name='div', attrs={'class': 'salarySnippet'})
div_three = div_two.find('span')
salaries.append(div_three.text.strip())
except:
salaries.append(np.nan)
return (salaries)
def extract_description(soup, description, rows):
spans = soup.findAll('div', attrs={'class': 'summary'})
for span in spans:
description.append(span.text.strip())
# print(span.text.strip())
return (description)
# Extracting Job Title, Company Details, Location, Salary and Job Description for Analysis
def indeed_scrape():
jobs = []
companies = []
locations = []
salaries = []
description = []
# Number of pages to be scraped = (100*10)+1
# where 100 is the actual number of pages scraped. 10 is the offset for each indeed page.
max_results = 1001
for start_val in range(0, max_results, 10):
# url of indeed web page with job title filter set to data scientist.
page = requests.get('https://www.indeed.com/jobs?q=Data+Scientist&start={}'.format(start_val))
# ensuring at least 1 second between page extracts.
time.sleep(1)
soup = BeautifulSoup(page.text, 'html.parser')
# Extract div class which contains the information about a single job.
rows = soup.find_all(name='div', attrs={'class': 'row'})
job_title = extract_job_title(soup, jobs, rows)
company_name = extract_company(soup, companies, rows)
location = extract_location(soup, locations, rows)
salaries = extract_salary(soup, salaries, rows)
description = extract_description(soup, description, rows)
# Create a dataframe from scraped data.
indeed_df = pd.DataFrame(
{'company_name': company_name, 'job_title': job_title, 'location': location, 'salaries': salaries,
'description': description})
return indeed_df
def preprocess(indeed_data):
# Indeed webpages may contain multiple job postings of same job. To ensure single data entry per job, duplicate entries are
# dropped if a job with same location,job title , company name and description is already present.
indeed_data = indeed_data.drop_duplicates(subset=['location', 'job_title', 'company_name', 'description'],
keep='last').reset_index()
# Extract the state from location column.
indeed_data['state'] = np.nan
for i in range(len(indeed_data['state'])):
try:
indeed_data.loc[i, 'state'] = indeed_data.loc[i, 'location'].split(',')[1]
except:
pass
# Group data by state and count the number of jobs available per state.
no_of_jobs = indeed_data.groupby(['state'])['company_name'].count().reset_index().sort_values(['company_name'])
# Extract the available max and min salary boundaries for every job posting
indeed_data['min_salary'] = indeed_data['salaries'].str.split('-').str[0].str.split().str[0].str[1:]
indeed_data['max_salary'] = indeed_data['salaries'].str.split('-').str[1].str.split().str[0].str[1:]
for i in range(len(indeed_data['min_salary'])):
if indeed_data.loc[i, 'min_salary'] is not np.NaN:
indeed_data.loc[i, 'min_salary'] = str(indeed_data.loc[i, 'min_salary']).replace(',', '')
indeed_data.loc[i, 'max_salary'] = str(indeed_data.loc[i, 'max_salary']).replace(',', '')
# Check the salary unit (example: hourly salary/yearly) and convert the available salary to Yearly amount.
indeed_data['min_salary'] = indeed_data['min_salary'].str.replace('(Indeed est.)', '')
indeed_data["Suffix"] = indeed_data["salaries"].str.split().str[-1]
indeed_data['min_salary'] = indeed_data['min_salary'].astype('float')
indeed_data['max_salary'] = indeed_data['max_salary'].astype('float')
indeed_data['mean_salary'] = np.nan
for i in range(len(indeed_data['min_salary'])):
if (indeed_data.loc[i, 'Suffix'] == 'hour'):
# Consider full time employee with 40hours/ week , 1 year = 52.1429 weeks.
indeed_data.loc[i, 'min_salary'] = indeed_data.loc[i, 'min_salary'] * 40 * 52.1429
indeed_data.loc[i, 'max_salary'] = indeed_data.loc[i, 'max_salary'] * 40 * 52.1429
# Calculate mean salary from minimum and maximum salary
if pd.isnull(indeed_data['min_salary'][i]):
indeed_data.loc[i, 'mean_salary'] = indeed_data['max_salary'][i]
elif pd.isnull(indeed_data['max_salary'][i]):
indeed_data.loc[i, 'mean_salary'] = indeed_data['min_salary'][i]
else:
indeed_data.loc[i, 'mean_salary'] = (indeed_data['min_salary'][i] + indeed_data['max_salary'][i]) / 2
# Determine the specialization such as NLP , ML, AI from job title.
indeed_data = extract_specialization(indeed_data)
return indeed_data
# Formatting of all graphs
sns.set_style("darkgrid")
sns.set(rc={'figure.figsize': (12, 8)})
def extract_specialization(job_data):
# Categorizing job titles into specialization type.
job_data['Job_Title_Category'] = np.nan
job_data['job_title'] = job_data['job_title'].str.lower()
job_data.loc[job_data['job_title'].str.contains(
'data scientist|data science|data science & insights|data science and insights'), 'Job_Title_Category'] = 'Data Scientist'
job_data.loc[job_data['job_title'].str.contains(
'analyst|analytics|analysis'), 'Job_Title_Category'] = 'Data Analyst'
job_data.loc[job_data['job_title'].str.contains(
'intern|internship|university|graduate|coop|student|co-op'), 'Job_Title_Category'] = 'Data Science Intern/ University Graduate'
job_data.loc[job_data['job_title'].str.contains(
'jr|junior|entry level|early career'), 'Job_Title_Category'] = 'Junior Data Scientist'
job_data.loc[job_data['job_title'].str.contains(
'sr|senior|phd|research'), 'Job_Title_Category'] = 'Senior Data Scientist'
job_data.loc[job_data['job_title'].str.contains(
'machine learning|machine_learning|deep|ai|artificial intelligence'), 'Job_Title_Category'] = 'Machine Learning/ AI/ Deep Learning'
job_data.loc[job_data['job_title'].str.contains(
'health|biomedical|bio|bioengineer|bioinformatics|neuro'), 'Job_Title_Category'] = 'Health/ Biomedical Data Science'
job_data.loc[job_data['job_title'].str.contains(
'nlp|language'), 'Job_Title_Category'] = 'Natural Language Processing'
job_data.loc[job_data['job_title'].str.contains(
'market|quantitative|digital marketing|search|supply chain|payment|advertising'), 'Job_Title_Category'] = 'Data Science-Marketing'
job_data['Job_Title_Category'] = job_data.Job_Title_Category.replace(np.nan, 'Others', regex=True)
return job_data
def plot_mean_salary_per_state(indeed_data):
indeed_data = indeed_data[['mean_salary', 'state']].dropna()
fig, ax = plt.subplots()
sns.boxplot(x="state", y="mean_salary", data=indeed_data, ax=ax)
plt.xlabel("States")
plt.ylabel("Mean Salary")
plt.title("Mean Salary per State")
img_data = BytesIO()
plt.savefig(img_data, format='png')
img_data.seek(0)
save_plot_to_s3('indeed-analysis', 'mean_salary_per_state.png', img_data)
def plot_designation_cnt(indeed_data):
df = \
indeed_data.groupby('state').count().sort_values(['Job_Title_Category'], ascending
|
for div in rows:
for a in div.find_all(name='a', attrs={'data-tn-element': 'jobTitle'}):
jobs.append(a['title'])
return (jobs)
|
identifier_body
|
|
lambda_function.py
|
())
return (companies)
def extract_location(soup, locations, rows):
for div in rows:
try:
location_div = div.find(name='div', attrs={'class': 'recJobLoc'})
locations.append(location_div['data-rc-loc'])
except:
locations.append(np.nan)
return (locations)
def extract_salary(soup, salaries, rows):
for div in rows:
try:
salaries.append(div.find('nobr').text)
except:
try:
div_two = div.find(name='div', attrs={'class': 'salarySnippet'})
div_three = div_two.find('span')
salaries.append(div_three.text.strip())
except:
salaries.append(np.nan)
return (salaries)
def extract_description(soup, description, rows):
spans = soup.findAll('div', attrs={'class': 'summary'})
for span in spans:
description.append(span.text.strip())
# print(span.text.strip())
return (description)
# Extracting Job Title, Company Details, Location, Salary and Job Description for Analysis
def indeed_scrape():
jobs = []
companies = []
locations = []
salaries = []
description = []
# Number of pages to be scraped = (100*10)+1
# where 100 is the actual number of pages scraped. 10 is the offset for each indeed page.
max_results = 1001
for start_val in range(0, max_results, 10):
# url of indeed web page with job title filter set to data scientist.
page = requests.get('https://www.indeed.com/jobs?q=Data+Scientist&start={}'.format(start_val))
# ensuring at least 1 second between page extracts.
time.sleep(1)
soup = BeautifulSoup(page.text, 'html.parser')
# Extract div class which contains the information about a single job.
rows = soup.find_all(name='div', attrs={'class': 'row'})
job_title = extract_job_title(soup, jobs, rows)
company_name = extract_company(soup, companies, rows)
location = extract_location(soup, locations, rows)
salaries = extract_salary(soup, salaries, rows)
description = extract_description(soup, description, rows)
# Create a dataframe from scraped data.
indeed_df = pd.DataFrame(
{'company_name': company_name, 'job_title': job_title, 'location': location, 'salaries': salaries,
'description': description})
return indeed_df
def preprocess(indeed_data):
# Indeed webpages may contain multiple job postings of same job. To ensure single data entry per job, duplicate entries are
# dropped if a job with same location,job title , company name and description is already present.
indeed_data = indeed_data.drop_duplicates(subset=['location', 'job_title', 'company_name', 'description'],
keep='last').reset_index()
# Extract the state from location column.
indeed_data['state'] = np.nan
for i in range(len(indeed_data['state'])):
try:
indeed_data.loc[i, 'state'] = indeed_data.loc[i, 'location'].split(',')[1]
except:
pass
# Group data by state and count the number of jobs available per state.
no_of_jobs = indeed_data.groupby(['state'])['company_name'].count().reset_index().sort_values(['company_name'])
# Extract the available max and min salary boundaries for every job posting
indeed_data['min_salary'] = indeed_data['salaries'].str.split('-').str[0].str.split().str[0].str[1:]
indeed_data['max_salary'] = indeed_data['salaries'].str.split('-').str[1].str.split().str[0].str[1:]
for i in range(len(indeed_data['min_salary'])):
if indeed_data.loc[i, 'min_salary'] is not np.NaN:
indeed_data.loc[i, 'min_salary'] = str(indeed_data.loc[i, 'min_salary']).replace(',', '')
indeed_data.loc[i, 'max_salary'] = str(indeed_data.loc[i, 'max_salary']).replace(',', '')
# Check the salary unit (example: hourly salary/yearly) and convert the available salary to Yearly amount.
indeed_data['min_salary'] = indeed_data['min_salary'].str.replace('(Indeed est.)', '')
indeed_data["Suffix"] = indeed_data["salaries"].str.split().str[-1]
indeed_data['min_salary'] = indeed_data['min_salary'].astype('float')
indeed_data['max_salary'] = indeed_data['max_salary'].astype('float')
indeed_data['mean_salary'] = np.nan
for i in range(len(indeed_data['min_salary'])):
if (indeed_data.loc[i, 'Suffix'] == 'hour'):
# Consider full time employee with 40hours/ week , 1 year = 52.1429 weeks.
indeed_data.loc[i, 'min_salary'] = indeed_data.loc[i, 'min_salary'] * 40 * 52.1429
indeed_data.loc[i, 'max_salary'] = indeed_data.loc[i, 'max_salary'] * 40 * 52.1429
# Calculate mean salary from minimum and maximum salary
if pd.isnull(indeed_data['min_salary'][i]):
indeed_data.loc[i, 'mean_salary'] = indeed_data['max_salary'][i]
elif pd.isnull(indeed_data['max_salary'][i]):
indeed_data.loc[i, 'mean_salary'] = indeed_data['min_salary'][i]
else:
indeed_data.loc[i, 'mean_salary'] = (indeed_data['min_salary'][i] + indeed_data['max_salary'][i]) / 2
# Determine the specialization such as NLP , ML, AI from job title.
indeed_data = extract_specialization(indeed_data)
return indeed_data
# Formatting of all graphs
sns.set_style("darkgrid")
sns.set(rc={'figure.figsize': (12, 8)})
|
job_data['job_title'] = job_data['job_title'].str.lower()
job_data.loc[job_data['job_title'].str.contains(
'data scientist|data science|data science & insights|data science and insights'), 'Job_Title_Category'] = 'Data Scientist'
job_data.loc[job_data['job_title'].str.contains(
'analyst|analytics|analysis'), 'Job_Title_Category'] = 'Data Analyst'
job_data.loc[job_data['job_title'].str.contains(
'intern|internship|university|graduate|coop|student|co-op'), 'Job_Title_Category'] = 'Data Science Intern/ University Graduate'
job_data.loc[job_data['job_title'].str.contains(
'jr|junior|entry level|early career'), 'Job_Title_Category'] = 'Junior Data Scientist'
job_data.loc[job_data['job_title'].str.contains(
'sr|senior|phd|research'), 'Job_Title_Category'] = 'Senior Data Scientist'
job_data.loc[job_data['job_title'].str.contains(
'machine learning|machine_learning|deep|ai|artificial intelligence'), 'Job_Title_Category'] = 'Machine Learning/ AI/ Deep Learning'
job_data.loc[job_data['job_title'].str.contains(
'health|biomedical|bio|bioengineer|bioinformatics|neuro'), 'Job_Title_Category'] = 'Health/ Biomedical Data Science'
job_data.loc[job_data['job_title'].str.contains(
'nlp|language'), 'Job_Title_Category'] = 'Natural Language Processing'
job_data.loc[job_data['job_title'].str.contains(
'market|quantitative|digital marketing|search|supply chain|payment|advertising'), 'Job_Title_Category'] = 'Data Science-Marketing'
job_data['Job_Title_Category'] = job_data.Job_Title_Category.replace(np.nan, 'Others', regex=True)
return job_data
def plot_mean_salary_per_state(indeed_data):
indeed_data = indeed_data[['mean_salary', 'state']].dropna()
fig, ax = plt.subplots()
sns.boxplot(x="state", y="mean_salary", data=indeed_data, ax=ax)
plt.xlabel("States")
plt.ylabel("Mean Salary")
plt.title("Mean Salary per State")
img_data = BytesIO()
plt.savefig(img_data, format='png')
img_data.seek(0)
save_plot_to_s3('indeed-analysis', 'mean_salary_per_state.png', img_data)
def plot_designation_cnt(indeed_data):
df = \
indeed_data.groupby('state').count().sort_values(['Job_Title_Category'], ascending=False).head(15).reset_index()[
'state']
jobs_top_state = indeed_data[indeed_data['state'].isin(df)]
job_category = extract_specialization(jobs_top_state)
job_category = pd.crosstab(job_category.state, job_category.Job_Title_Category)
fig, ax = plt.subplots()
sns.heatmap(job_category, annot=True, fmt="d", ax=ax)
plt.title("Job Openings per State");
plt.xlabel("Job Specialization")
plt.ylabel("States")
img_data = BytesIO()
plt.savefig(img_data, format='png')
img_data.seek(
|
def extract_specialization(job_data):
# Categorizing job titles into specialization type.
job_data['Job_Title_Category'] = np.nan
|
random_line_split
|
lambda_function.py
|
return (jobs)
def extract_company(soup, companies, rows):
for div in rows:
company = div.find_all(name='span', attrs={'class': 'company'})
if len(company) > 0:
for b in company:
companies.append(b.text.strip())
else:
sec_try = div.find_all(name='span', attrs={'class': 'result-link-source'})
for span in sec_try:
companies.append(span.text.strip())
return (companies)
def extract_location(soup, locations, rows):
for div in rows:
try:
location_div = div.find(name='div', attrs={'class': 'recJobLoc'})
locations.append(location_div['data-rc-loc'])
except:
locations.append(np.nan)
return (locations)
def extract_salary(soup, salaries, rows):
for div in rows:
try:
salaries.append(div.find('nobr').text)
except:
try:
div_two = div.find(name='div', attrs={'class': 'salarySnippet'})
div_three = div_two.find('span')
salaries.append(div_three.text.strip())
except:
salaries.append(np.nan)
return (salaries)
def extract_description(soup, description, rows):
spans = soup.findAll('div', attrs={'class': 'summary'})
for span in spans:
description.append(span.text.strip())
# print(span.text.strip())
return (description)
# Extracting Job Title, Company Details, Location, Salary and Job Description for Analysis
def indeed_scrape():
jobs = []
companies = []
locations = []
salaries = []
description = []
# Number of pages to be scraped = (100*10)+1
# where 100 is the actual number of pages scraped. 10 is the offset for each indeed page.
max_results = 1001
for start_val in range(0, max_results, 10):
# url of indeed web page with job title filter set to data scientist.
page = requests.get('https://www.indeed.com/jobs?q=Data+Scientist&start={}'.format(start_val))
# ensuring at least 1 second between page extracts.
time.sleep(1)
soup = BeautifulSoup(page.text, 'html.parser')
# Extract div class which contains the information about a single job.
rows = soup.find_all(name='div', attrs={'class': 'row'})
job_title = extract_job_title(soup, jobs, rows)
company_name = extract_company(soup, companies, rows)
location = extract_location(soup, locations, rows)
salaries = extract_salary(soup, salaries, rows)
description = extract_description(soup, description, rows)
# Create a dataframe from scraped data.
indeed_df = pd.DataFrame(
{'company_name': company_name, 'job_title': job_title, 'location': location, 'salaries': salaries,
'description': description})
return indeed_df
def preprocess(indeed_data):
# Indeed webpages may contain multiple job postings of same job. To ensure single data entry per job, duplicate entries are
# dropped if a job with same location,job title , company name and description is already present.
indeed_data = indeed_data.drop_duplicates(subset=['location', 'job_title', 'company_name', 'description'],
keep='last').reset_index()
# Extract the state from location column.
indeed_data['state'] = np.nan
for i in range(len(indeed_data['state'])):
try:
indeed_data.loc[i, 'state'] = indeed_data.loc[i, 'location'].split(',')[1]
except:
pass
# Group data by state and count the number of jobs available per state.
no_of_jobs = indeed_data.groupby(['state'])['company_name'].count().reset_index().sort_values(['company_name'])
# Extract the available max and min salary boundaries for every job posting
indeed_data['min_salary'] = indeed_data['salaries'].str.split('-').str[0].str.split().str[0].str[1:]
indeed_data['max_salary'] = indeed_data['salaries'].str.split('-').str[1].str.split().str[0].str[1:]
for i in range(len(indeed_data['min_salary'])):
if indeed_data.loc[i, 'min_salary'] is not np.NaN:
indeed_data.loc[i, 'min_salary'] = str(indeed_data.loc[i, 'min_salary']).replace(',', '')
indeed_data.loc[i, 'max_salary'] = str(indeed_data.loc[i, 'max_salary']).replace(',', '')
# Check the salary unit (example: hourly salary/yearly) and convert the available salary to Yearly amount.
indeed_data['min_salary'] = indeed_data['min_salary'].str.replace('(Indeed est.)', '')
indeed_data["Suffix"] = indeed_data["salaries"].str.split().str[-1]
indeed_data['min_salary'] = indeed_data['min_salary'].astype('float')
indeed_data['max_salary'] = indeed_data['max_salary'].astype('float')
indeed_data['mean_salary'] = np.nan
for i in range(len(indeed_data['min_salary'])):
if (indeed_data.loc[i, 'Suffix'] == 'hour'):
# Consider full time employee with 40hours/ week , 1 year = 52.1429 weeks.
indeed_data.loc[i, 'min_salary'] = indeed_data.loc[i, 'min_salary'] * 40 * 52.1429
indeed_data.loc[i, 'max_salary'] = indeed_data.loc[i, 'max_salary'] * 40 * 52.1429
# Calculate mean salary from minimum and maximum salary
if pd.isnull(indeed_data['min_salary'][i]):
indeed_data.loc[i, 'mean_salary'] = indeed_data['max_salary'][i]
elif pd.isnull(indeed_data['max_salary'][i]):
indeed_data.loc[i, 'mean_salary'] = indeed_data['min_salary'][i]
else:
indeed_data.loc[i, 'mean_salary'] = (indeed_data['min_salary'][i] + indeed_data['max_salary'][i]) / 2
# Determine the specialization such as NLP , ML, AI from job title.
indeed_data = extract_specialization(indeed_data)
return indeed_data
# Formatting of all graphs
sns.set_style("darkgrid")
sns.set(rc={'figure.figsize': (12, 8)})
def extract_specialization(job_data):
# Categorizing job titles into specialization type.
job_data['Job_Title_Category'] = np.nan
job_data['job_title'] = job_data['job_title'].str.lower()
job_data.loc[job_data['job_title'].str.contains(
'data scientist|data science|data science & insights|data science and insights'), 'Job_Title_Category'] = 'Data Scientist'
job_data.loc[job_data['job_title'].str.contains(
'analyst|analytics|analysis'), 'Job_Title_Category'] = 'Data Analyst'
job_data.loc[job_data['job_title'].str.contains(
'intern|internship|university|graduate|coop|student|co-op'), 'Job_Title_Category'] = 'Data Science Intern/ University Graduate'
job_data.loc[job_data['job_title'].str.contains(
'jr|junior|entry level|early career'), 'Job_Title_Category'] = 'Junior Data Scientist'
job_data.loc[job_data['job_title'].str.contains(
'sr|senior|phd|research'), 'Job_Title_Category'] = 'Senior Data Scientist'
job_data.loc[job_data['job_title'].str.contains(
'machine learning|machine_learning|deep|ai|artificial intelligence'), 'Job_Title_Category'] = 'Machine Learning/ AI/ Deep Learning'
job_data.loc[job_data['job_title'].str.contains(
'health|biomedical|bio|bioengineer|bioinformatics|neuro'), 'Job_Title_Category'] = 'Health/ Biomedical Data Science'
job_data.loc[job_data['job_title'].str.contains(
'nlp|language'), 'Job_Title_Category'] = 'Natural Language Processing'
job_data.loc[job_data['job_title'].str.contains(
'market|quantitative|digital marketing|search|supply chain|payment|advertising'), 'Job_Title_Category'] = 'Data Science-Marketing'
job_data['Job_Title_Category'] = job_data.Job_Title_Category.replace(np.nan, 'Others', regex=True)
return job_data
def plot_mean_salary_per_state(indeed_data):
indeed_data = indeed_data[['mean_salary', 'state']].dropna()
fig, ax = plt.subplots()
sns.boxplot(x="state", y="mean_salary", data=indeed_data, ax=ax)
plt.xlabel("States")
plt.ylabel("Mean Salary")
plt.title("Mean Salary per State")
img_data = BytesIO()
plt.savefig(img_data, format='png')
img_data.seek(0)
save_plot_to_s3('indeed-analysis', 'mean_salary_per_state.png', img_data)
def plot_designation_cnt(indeed_data):
df = \
indeed_data.groupby('state').count().sort_values(['Job_Title_Category'], ascending=False).head(15
|
for a in div.find_all(name='a', attrs={'data-tn-element': 'jobTitle'}):
jobs.append(a['title'])
|
conditional_block
|
|
store.go
|
we'll walk back numHeaders distance to collect each header,
// then return the final header specified by the stop hash. We'll also return
// the starting height of the header range as well so callers can compute the
// height of each header without knowing the height of the stop hash.
//
// NOTE: Part of the BlockHeaderStore interface.
func (h *NeutrinoDBStore) FetchBlockHeaderAncestors(
numHeaders uint32,
stopHash *chainhash.Hash,
) ([]wire.BlockHeader, uint32, er.R) {
var headers []wire.BlockHeader
var startHeight uint32
return headers, startHeight, walletdb.View(h.Db, func(tx walletdb.ReadTx) er.R {
// First, we'll find the final header in the range, this will be the
// ending height of our scan.
endEntry, err := h.headerEntryByHash(tx, stopHash)
if err != nil {
return err
}
startHeight = endEntry.Height - numHeaders
if headers, err = h.readBlockHeaderRange(tx, startHeight, endEntry.Height); err != nil {
return err
} else if len(headers) == 0 {
return er.Errorf("Fetching %v headers up to %v - no results",
numHeaders, stopHash)
} else if realHash := headers[len(headers)-1].BlockHash(); realHash != endEntry.Header.blockHeader.BlockHash() {
return er.Errorf("Fetching %v headers up to %v - hash mismatch, got %v",
numHeaders, stopHash, realHash)
}
return err
})
}
// HeightFromHash returns the height of a particular block header given its
// hash.
//
// NOTE: Part of the BlockHeaderStore interface.
func (h *NeutrinoDBStore) HeightFromHash(hash *chainhash.Hash) (uint32, er.R) {
var height uint32
return height, walletdb.View(h.Db, func(tx walletdb.ReadTx) er.R {
if he, err := h.headerEntryByHash(tx, hash); err != nil {
return err
} else {
height = he.Height
return nil
}
})
}
func (h *NeutrinoDBStore) RollbackLastBlock(tx walletdb.ReadWriteTx) (*RollbackHeader, er.R) {
result := RollbackHeader{}
prev, err := h.truncateBlockIndex(tx)
if err != nil {
result.BlockHeader = nil
result.FilterHeader = nil
return &result, err
} else {
result.BlockHeader = &waddrmgr.BlockStamp{}
result.FilterHeader = &chainhash.Hash{}
result.BlockHeader.Hash = prev.Header.blockHeader.BlockHash()
result.BlockHeader.Height = int32(prev.Height)
result.FilterHeader = prev.Header.filterHeader
}
return &result, nil
}
// BlockHeader is a Bitcoin block header that also has its height included.
type BlockHeader struct {
*wire.BlockHeader
// Height is the height of this block header within the current main
// chain.
Height uint32
}
// toIndexEntry converts the BlockHeader into a matching headerEntry. This
// method is used when a header is to be written to disk.
func (b *BlockHeader) toIndexEntry() *headerEntry {
var buf [80]byte
hb := bytes.NewBuffer(buf[:])
hb.Reset()
// Finally, decode the raw bytes into a proper bitcoin header.
if err := b.Serialize(hb); err != nil {
panic(er.Errorf("Failed to serialize header %v", err))
}
return &headerEntry{
blockHeader: *b.BlockHeader,
}
}
func blockHeaderFromHe(he *headerEntryWithHeight) (*BlockHeader, er.R) {
var ret wire.BlockHeader
if err := ret.Deserialize(bytes.NewReader(he.Header.Bytes())); err != nil {
return nil, err
}
return &BlockHeader{&ret, he.Height}, nil
}
// WriteHeaders writes a set of headers to disk.
//
// NOTE: Part of the BlockHeaderStore interface.
func (h *NeutrinoDBStore) WriteBlockHeaders(tx walletdb.ReadWriteTx, hdrs ...BlockHeader) er.R {
headerLocs := make([]headerEntryWithHeight, len(hdrs))
for i, header := range hdrs {
headerLocs[i].Header = header.toIndexEntry()
headerLocs[i].Height = header.Height
}
return h.addBlockHeaders(tx, headerLocs, false)
}
// blockLocatorFromHash takes a given block hash and then creates a block
// locator using it as the root of the locator. We'll start by taking a single
// step backwards, then keep doubling the distance until genesis after we get
// 10 locators.
//
// TODO(roasbeef): make into single transaction.
func (h *NeutrinoDBStore) blockLocatorFromHash(tx walletdb.ReadTx, he *headerEntry) (
blockchain.BlockLocator, er.R) {
var locator blockchain.BlockLocator
hash := he.blockHeader.BlockHash()
// Append the initial hash
locator = append(locator, &hash)
// If hash isn't found in DB or this is the genesis block, return the
// locator as is
hewh, err := h.headerEntryByHash(tx, &hash)
if err != nil {
//???
}
height := hewh.Height
if height == 0 {
return locator, nil
}
decrement := uint32(1)
for height > 0 && len(locator) < wire.MaxBlockLocatorsPerMsg {
// Decrement by 1 for the first 10 blocks, then double the jump
// until we get to the genesis hash
if len(locator) > 10 {
decrement *= 2
}
if decrement > height {
height = 0
} else {
height -= decrement
}
he, err := h.readHeader(tx, height)
if err != nil {
return locator, err
}
headerHash := he.Header.blockHeader.BlockHash()
locator = append(locator, &headerHash)
}
return locator, nil
}
// LatestBlockLocator returns the latest block locator object based on the tip
// of the current main chain from the PoV of the database and flat files.
//
// NOTE: Part of the BlockHeaderStore interface.
func (h *NeutrinoDBStore) LatestBlockLocator() (blockchain.BlockLocator, er.R) {
var locator blockchain.BlockLocator
return locator, walletdb.View(h.Db, func(tx walletdb.ReadTx) er.R {
if ct, err := h.chainTip(tx, bucketNameBlockTip); err != nil {
return err
} else {
he := headerEntry{
blockHeader: ct.Header.blockHeader,
filterHeader: ct.Header.filterHeader,
}
locator, err = h.blockLocatorFromHash(tx, &he)
return err
}
})
}
// maybeResetHeaderState will reset the header state if the header assertion
// fails, but only if the target height is found. The boolean returned indicates
// that header state was reset.
func (f *NeutrinoDBStore) maybeResetHeaderState(
tx walletdb.ReadWriteTx,
headerStateAssertion *FilterHeader,
nhs *NeutrinoDBStore,
) (bool, er.R) {
failed := false
if headerStateAssertion != nil {
// First, we'll attempt to locate the header at this height. If no such
// header is found, then we'll exit early.
assertedHeader, err := f.FetchFilterHeaderByHeight(headerStateAssertion.Height)
if assertedHeader == nil {
if !ErrHeaderNotFound.Is(err) {
return false, err
}
} else if *assertedHeader != headerStateAssertion.FilterHash {
log.Warnf("Filter header at height %v is not %v, assertion failed, resyncing filters",
headerStateAssertion.Height, headerStateAssertion.HeaderHash)
failed = true
}
}
if !failed && nhs != nil {
hdr, err := f.chainTip(tx, bucketNameFilterTip)
if err != nil {
return false, err
}
for
|
break
} else if hdr.Height == 0 {
break
}
height
|
{
hdrhash := hdr.Header.blockHeader.BlockHash()
he := hdr.Header
if bh, err := nhs.FetchBlockHeaderByHeight1(tx, hdr.Height); err != nil {
if ErrHashNotFound.Is(err) {
log.Warnf("We have filter header number %v but no block header, "+
"resetting filter headers", hdr.Height)
failed = true
break
}
return false, err
} else if bh := bh.BlockHash(); !hdrhash.IsEqual(&bh) {
log.Warnf("Filter header / block header mismatch at height %v: %v != %v",
hdr.Height, hdrhash, bh)
failed = true
break
} else if len(he.Bytes()) != 32 {
log.Warnf("Filter header at height %v is not 32 bytes: %v",
hdr.Height, hex.EncodeToString(he.Bytes()))
failed = true
|
conditional_block
|
store.go
|
blockchain.BlockLocator
hash := he.blockHeader.BlockHash()
// Append the initial hash
locator = append(locator, &hash)
// If hash isn't found in DB or this is the genesis block, return the
// locator as is
hewh, err := h.headerEntryByHash(tx, &hash)
if err != nil {
//???
}
height := hewh.Height
if height == 0 {
return locator, nil
}
decrement := uint32(1)
for height > 0 && len(locator) < wire.MaxBlockLocatorsPerMsg {
// Decrement by 1 for the first 10 blocks, then double the jump
// until we get to the genesis hash
if len(locator) > 10 {
decrement *= 2
}
if decrement > height {
height = 0
} else {
height -= decrement
}
he, err := h.readHeader(tx, height)
if err != nil {
return locator, err
}
headerHash := he.Header.blockHeader.BlockHash()
locator = append(locator, &headerHash)
}
return locator, nil
}
// LatestBlockLocator returns the latest block locator object based on the tip
// of the current main chain from the PoV of the database and flat files.
//
// NOTE: Part of the BlockHeaderStore interface.
func (h *NeutrinoDBStore) LatestBlockLocator() (blockchain.BlockLocator, er.R) {
var locator blockchain.BlockLocator
return locator, walletdb.View(h.Db, func(tx walletdb.ReadTx) er.R {
if ct, err := h.chainTip(tx, bucketNameBlockTip); err != nil {
return err
} else {
he := headerEntry{
blockHeader: ct.Header.blockHeader,
filterHeader: ct.Header.filterHeader,
}
locator, err = h.blockLocatorFromHash(tx, &he)
return err
}
})
}
// maybeResetHeaderState will reset the header state if the header assertion
// fails, but only if the target height is found. The boolean returned indicates
// that header state was reset.
func (f *NeutrinoDBStore) maybeResetHeaderState(
tx walletdb.ReadWriteTx,
headerStateAssertion *FilterHeader,
nhs *NeutrinoDBStore,
) (bool, er.R) {
failed := false
if headerStateAssertion != nil {
// First, we'll attempt to locate the header at this height. If no such
// header is found, then we'll exit early.
assertedHeader, err := f.FetchFilterHeaderByHeight(headerStateAssertion.Height)
if assertedHeader == nil {
if !ErrHeaderNotFound.Is(err) {
return false, err
}
} else if *assertedHeader != headerStateAssertion.FilterHash {
log.Warnf("Filter header at height %v is not %v, assertion failed, resyncing filters",
headerStateAssertion.Height, headerStateAssertion.HeaderHash)
failed = true
}
}
if !failed && nhs != nil {
hdr, err := f.chainTip(tx, bucketNameFilterTip)
if err != nil {
return false, err
}
for {
hdrhash := hdr.Header.blockHeader.BlockHash()
he := hdr.Header
if bh, err := nhs.FetchBlockHeaderByHeight1(tx, hdr.Height); err != nil {
if ErrHashNotFound.Is(err) {
log.Warnf("We have filter header number %v but no block header, "+
"resetting filter headers", hdr.Height)
failed = true
break
}
return false, err
} else if bh := bh.BlockHash(); !hdrhash.IsEqual(&bh) {
log.Warnf("Filter header / block header mismatch at height %v: %v != %v",
hdr.Height, hdrhash, bh)
failed = true
break
} else if len(he.Bytes()) != 32 {
log.Warnf("Filter header at height %v is not 32 bytes: %v",
hdr.Height, hex.EncodeToString(he.Bytes()))
failed = true
break
} else if hdr.Height == 0 {
break
}
height := hdr.Height - 1
hdr, err = f.readHeader(tx, height)
if err != nil {
log.Warnf("Filter header missing at height %v (%v), resyncing filter headers",
height, err)
failed = true
break
}
}
}
// If our on disk state and the provided header assertion don't match,
// then we'll purge this state so we can sync it anew once we fully
// start up.
if failed {
if err := f.deleteBuckets(tx); err != nil {
return true, err
} else {
return true, f.createBuckets(tx)
}
}
return false, nil
}
// FetchHeader returns the filter header that corresponds to the passed block
// height.
func (f *NeutrinoDBStore) FetchFilterHeader(hash *chainhash.Hash) (*chainhash.Hash, er.R) {
var out *chainhash.Hash
return out, walletdb.View(f.Db, func(tx walletdb.ReadTx) er.R {
var err er.R
out, err = f.FetchFilterHeader1(tx, hash)
return err
})
}
func (f *NeutrinoDBStore) FetchFilterHeader1(tx walletdb.ReadTx, hash *chainhash.Hash) (*chainhash.Hash, er.R) {
if hdr, err := f.headerEntryByHash(tx, hash); err != nil {
return nil, err
} else if h, err := chainhash.NewHash(hdr.Header.filterHeader[:]); err != nil {
return nil, err
} else {
return h, nil
}
}
// FetchHeaderByHeight returns the filter header for a particular block height.
func (f *NeutrinoDBStore) FetchFilterHeaderByHeight(height uint32) (*chainhash.Hash, er.R) {
var hash *chainhash.Hash
return hash, walletdb.View(f.Db, func(tx walletdb.ReadTx) er.R {
var h *chainhash.Hash
if hdr, err := f.readHeader(tx, height); err != nil {
return err
} else if hdr.Header.filterHeader != nil {
h, err = chainhash.NewHash(hdr.Header.filterHeader[:])
if err != nil {
return err
}
}
hash = h
return nil
})
}
// FetchHeaderAncestors fetches the numHeaders filter headers that are the
// ancestors of the target stop block hash. A total of numHeaders+1 headers will be
// returned, as we'll walk back numHeaders distance to collect each header,
// then return the final header specified by the stop hash. We'll also return
// the starting height of the header range as well so callers can compute the
// height of each header without knowing the height of the stop hash.
func (f *NeutrinoDBStore) FetchFilterHeaderAncestors(
numHeaders uint32,
stopHash *chainhash.Hash,
) ([]chainhash.Hash, uint32, er.R) {
var hashes []chainhash.Hash
var height uint32
return hashes, height, walletdb.View(f.Db, func(tx walletdb.ReadTx) er.R {
// First, we'll find the final header in the range, this will be the
// ending height of our scan.
endEntry, err := f.headerEntryByHash(tx, stopHash)
if err != nil {
return err
}
startHeight := endEntry.Height - numHeaders
hashes, err = f.readFilterHeaderRange(tx, startHeight, endEntry.Height)
if err != nil {
return err
}
// for i, h := range hashes {
// log.Debugf("Load filter header %d => [%s]", startHeight+uint32(i), h)
// }
if !bytes.Equal(hashes[len(hashes)-1][:], endEntry.Header.filterHeader[:]) {
return er.Errorf("Hash mismatch on %v: %v %v", endEntry.Height,
hashes[len(hashes)-1], endEntry.Header.Bytes())
}
return nil
})
}
// FilterHeader represents a filter header (basic or extended). The filter
// header itself is coupled with the block height and hash of the filter's
// block.
type FilterHeader struct {
// HeaderHash is the hash of the block header that this filter header
// corresponds to.
HeaderHash chainhash.Hash
// FilterHash is the filter header itself.
FilterHash chainhash.Hash
// Height is the block height of the filter header in the main chain.
Height uint32
}
// WriteHeaders writes a batch of filter headers to persistent storage. The
// headers themselves are appended to the flat file, and then the index updated
// to reflect the new entires.
func (f *NeutrinoDBStore) WriteFilterHeaders(tx walletdb.ReadWriteTx, hdrs ...FilterHeader) er.R {
return f.addFilterHeaders(tx, hdrs, false)
}
|
/////
/////
|
random_line_split
|
|
store.go
|
we'll walk back numHeaders distance to collect each header,
// then return the final header specified by the stop hash. We'll also return
// the starting height of the header range as well so callers can compute the
// height of each header without knowing the height of the stop hash.
//
// NOTE: Part of the BlockHeaderStore interface.
func (h *NeutrinoDBStore) FetchBlockHeaderAncestors(
numHeaders uint32,
stopHash *chainhash.Hash,
) ([]wire.BlockHeader, uint32, er.R) {
var headers []wire.BlockHeader
var startHeight uint32
return headers, startHeight, walletdb.View(h.Db, func(tx walletdb.ReadTx) er.R {
// First, we'll find the final header in the range, this will be the
// ending height of our scan.
endEntry, err := h.headerEntryByHash(tx, stopHash)
if err != nil {
return err
}
startHeight = endEntry.Height - numHeaders
if headers, err = h.readBlockHeaderRange(tx, startHeight, endEntry.Height); err != nil {
return err
} else if len(headers) == 0 {
return er.Errorf("Fetching %v headers up to %v - no results",
numHeaders, stopHash)
} else if realHash := headers[len(headers)-1].BlockHash(); realHash != endEntry.Header.blockHeader.BlockHash() {
return er.Errorf("Fetching %v headers up to %v - hash mismatch, got %v",
numHeaders, stopHash, realHash)
}
return err
})
}
// HeightFromHash returns the height of a particular block header given its
// hash.
//
// NOTE: Part of the BlockHeaderStore interface.
func (h *NeutrinoDBStore) HeightFromHash(hash *chainhash.Hash) (uint32, er.R) {
var height uint32
return height, walletdb.View(h.Db, func(tx walletdb.ReadTx) er.R {
if he, err := h.headerEntryByHash(tx, hash); err != nil {
return err
} else {
height = he.Height
return nil
}
})
}
func (h *NeutrinoDBStore) RollbackLastBlock(tx walletdb.ReadWriteTx) (*RollbackHeader, er.R) {
result := RollbackHeader{}
prev, err := h.truncateBlockIndex(tx)
if err != nil {
result.BlockHeader = nil
result.FilterHeader = nil
return &result, err
} else {
result.BlockHeader = &waddrmgr.BlockStamp{}
result.FilterHeader = &chainhash.Hash{}
result.BlockHeader.Hash = prev.Header.blockHeader.BlockHash()
result.BlockHeader.Height = int32(prev.Height)
result.FilterHeader = prev.Header.filterHeader
}
return &result, nil
}
// BlockHeader is a Bitcoin block header that also has its height included.
type BlockHeader struct {
*wire.BlockHeader
// Height is the height of this block header within the current main
// chain.
Height uint32
}
// toIndexEntry converts the BlockHeader into a matching headerEntry. This
// method is used when a header is to be written to disk.
func (b *BlockHeader) toIndexEntry() *headerEntry {
var buf [80]byte
hb := bytes.NewBuffer(buf[:])
hb.Reset()
// Finally, decode the raw bytes into a proper bitcoin header.
if err := b.Serialize(hb); err != nil {
panic(er.Errorf("Failed to serialize header %v", err))
}
return &headerEntry{
blockHeader: *b.BlockHeader,
}
}
func blockHeaderFromHe(he *headerEntryWithHeight) (*BlockHeader, er.R) {
var ret wire.BlockHeader
if err := ret.Deserialize(bytes.NewReader(he.Header.Bytes())); err != nil {
return nil, err
}
return &BlockHeader{&ret, he.Height}, nil
}
// WriteHeaders writes a set of headers to disk.
//
// NOTE: Part of the BlockHeaderStore interface.
func (h *NeutrinoDBStore) WriteBlockHeaders(tx walletdb.ReadWriteTx, hdrs ...BlockHeader) er.R {
headerLocs := make([]headerEntryWithHeight, len(hdrs))
for i, header := range hdrs {
headerLocs[i].Header = header.toIndexEntry()
headerLocs[i].Height = header.Height
}
return h.addBlockHeaders(tx, headerLocs, false)
}
// blockLocatorFromHash takes a given block hash and then creates a block
// locator using it as the root of the locator. We'll start by taking a single
// step backwards, then keep doubling the distance until genesis after we get
// 10 locators.
//
// TODO(roasbeef): make into single transaction.
func (h *NeutrinoDBStore) blockLocatorFromHash(tx walletdb.ReadTx, he *headerEntry) (
blockchain.BlockLocator, er.R) {
var locator blockchain.BlockLocator
hash := he.blockHeader.BlockHash()
// Append the initial hash
locator = append(locator, &hash)
// If hash isn't found in DB or this is the genesis block, return the
// locator as is
hewh, err := h.headerEntryByHash(tx, &hash)
if err != nil {
//???
}
height := hewh.Height
if height == 0 {
return locator, nil
}
decrement := uint32(1)
for height > 0 && len(locator) < wire.MaxBlockLocatorsPerMsg {
// Decrement by 1 for the first 10 blocks, then double the jump
// until we get to the genesis hash
if len(locator) > 10 {
decrement *= 2
}
if decrement > height {
height = 0
} else {
height -= decrement
}
he, err := h.readHeader(tx, height)
if err != nil {
return locator, err
}
headerHash := he.Header.blockHeader.BlockHash()
locator = append(locator, &headerHash)
}
return locator, nil
}
// LatestBlockLocator returns the latest block locator object based on the tip
// of the current main chain from the PoV of the database and flat files.
//
// NOTE: Part of the BlockHeaderStore interface.
func (h *NeutrinoDBStore) LatestBlockLocator() (blockchain.BlockLocator, er.R) {
var locator blockchain.BlockLocator
return locator, walletdb.View(h.Db, func(tx walletdb.ReadTx) er.R {
if ct, err := h.chainTip(tx, bucketNameBlockTip); err != nil {
return err
} else {
he := headerEntry{
blockHeader: ct.Header.blockHeader,
filterHeader: ct.Header.filterHeader,
}
locator, err = h.blockLocatorFromHash(tx, &he)
return err
}
})
}
// maybeResetHeaderState will reset the header state if the header assertion
// fails, but only if the target height is found. The boolean returned indicates
// that header state was reset.
func (f *NeutrinoDBStore)
|
(
tx walletdb.ReadWriteTx,
headerStateAssertion *FilterHeader,
nhs *NeutrinoDBStore,
) (bool, er.R) {
failed := false
if headerStateAssertion != nil {
// First, we'll attempt to locate the header at this height. If no such
// header is found, then we'll exit early.
assertedHeader, err := f.FetchFilterHeaderByHeight(headerStateAssertion.Height)
if assertedHeader == nil {
if !ErrHeaderNotFound.Is(err) {
return false, err
}
} else if *assertedHeader != headerStateAssertion.FilterHash {
log.Warnf("Filter header at height %v is not %v, assertion failed, resyncing filters",
headerStateAssertion.Height, headerStateAssertion.HeaderHash)
failed = true
}
}
if !failed && nhs != nil {
hdr, err := f.chainTip(tx, bucketNameFilterTip)
if err != nil {
return false, err
}
for {
hdrhash := hdr.Header.blockHeader.BlockHash()
he := hdr.Header
if bh, err := nhs.FetchBlockHeaderByHeight1(tx, hdr.Height); err != nil {
if ErrHashNotFound.Is(err) {
log.Warnf("We have filter header number %v but no block header, "+
"resetting filter headers", hdr.Height)
failed = true
break
}
return false, err
} else if bh := bh.BlockHash(); !hdrhash.IsEqual(&bh) {
log.Warnf("Filter header / block header mismatch at height %v: %v != %v",
hdr.Height, hdrhash, bh)
failed = true
break
} else if len(he.Bytes()) != 32 {
log.Warnf("Filter header at height %v is not 32 bytes: %v",
hdr.Height, hex.EncodeToString(he.Bytes()))
failed = true
break
} else if hdr.Height == 0 {
break
}
|
maybeResetHeaderState
|
identifier_name
|
store.go
|
we'll walk back numHeaders distance to collect each header,
// then return the final header specified by the stop hash. We'll also return
// the starting height of the header range as well so callers can compute the
// height of each header without knowing the height of the stop hash.
//
// NOTE: Part of the BlockHeaderStore interface.
func (h *NeutrinoDBStore) FetchBlockHeaderAncestors(
numHeaders uint32,
stopHash *chainhash.Hash,
) ([]wire.BlockHeader, uint32, er.R) {
var headers []wire.BlockHeader
var startHeight uint32
return headers, startHeight, walletdb.View(h.Db, func(tx walletdb.ReadTx) er.R {
// First, we'll find the final header in the range, this will be the
// ending height of our scan.
endEntry, err := h.headerEntryByHash(tx, stopHash)
if err != nil {
return err
}
startHeight = endEntry.Height - numHeaders
if headers, err = h.readBlockHeaderRange(tx, startHeight, endEntry.Height); err != nil {
return err
} else if len(headers) == 0 {
return er.Errorf("Fetching %v headers up to %v - no results",
numHeaders, stopHash)
} else if realHash := headers[len(headers)-1].BlockHash(); realHash != endEntry.Header.blockHeader.BlockHash() {
return er.Errorf("Fetching %v headers up to %v - hash mismatch, got %v",
numHeaders, stopHash, realHash)
}
return err
})
}
// HeightFromHash returns the height of a particular block header given its
// hash.
//
// NOTE: Part of the BlockHeaderStore interface.
func (h *NeutrinoDBStore) HeightFromHash(hash *chainhash.Hash) (uint32, er.R) {
var height uint32
return height, walletdb.View(h.Db, func(tx walletdb.ReadTx) er.R {
if he, err := h.headerEntryByHash(tx, hash); err != nil {
return err
} else {
height = he.Height
return nil
}
})
}
func (h *NeutrinoDBStore) RollbackLastBlock(tx walletdb.ReadWriteTx) (*RollbackHeader, er.R) {
result := RollbackHeader{}
prev, err := h.truncateBlockIndex(tx)
if err != nil {
result.BlockHeader = nil
result.FilterHeader = nil
return &result, err
} else {
result.BlockHeader = &waddrmgr.BlockStamp{}
result.FilterHeader = &chainhash.Hash{}
result.BlockHeader.Hash = prev.Header.blockHeader.BlockHash()
result.BlockHeader.Height = int32(prev.Height)
result.FilterHeader = prev.Header.filterHeader
}
return &result, nil
}
// BlockHeader is a Bitcoin block header that also has its height included.
type BlockHeader struct {
*wire.BlockHeader
// Height is the height of this block header within the current main
// chain.
Height uint32
}
// toIndexEntry converts the BlockHeader into a matching headerEntry. This
// method is used when a header is to be written to disk.
func (b *BlockHeader) toIndexEntry() *headerEntry {
var buf [80]byte
hb := bytes.NewBuffer(buf[:])
hb.Reset()
// Finally, decode the raw bytes into a proper bitcoin header.
if err := b.Serialize(hb); err != nil {
panic(er.Errorf("Failed to serialize header %v", err))
}
return &headerEntry{
blockHeader: *b.BlockHeader,
}
}
func blockHeaderFromHe(he *headerEntryWithHeight) (*BlockHeader, er.R) {
var ret wire.BlockHeader
if err := ret.Deserialize(bytes.NewReader(he.Header.Bytes())); err != nil {
return nil, err
}
return &BlockHeader{&ret, he.Height}, nil
}
// WriteHeaders writes a set of headers to disk.
//
// NOTE: Part of the BlockHeaderStore interface.
func (h *NeutrinoDBStore) WriteBlockHeaders(tx walletdb.ReadWriteTx, hdrs ...BlockHeader) er.R {
headerLocs := make([]headerEntryWithHeight, len(hdrs))
for i, header := range hdrs {
headerLocs[i].Header = header.toIndexEntry()
headerLocs[i].Height = header.Height
}
return h.addBlockHeaders(tx, headerLocs, false)
}
// blockLocatorFromHash takes a given block hash and then creates a block
// locator using it as the root of the locator. We'll start by taking a single
// step backwards, then keep doubling the distance until genesis after we get
// 10 locators.
//
// TODO(roasbeef): make into single transaction.
func (h *NeutrinoDBStore) blockLocatorFromHash(tx walletdb.ReadTx, he *headerEntry) (
blockchain.BlockLocator, er.R) {
var locator blockchain.BlockLocator
hash := he.blockHeader.BlockHash()
// Append the initial hash
locator = append(locator, &hash)
// If hash isn't found in DB or this is the genesis block, return the
// locator as is
hewh, err := h.headerEntryByHash(tx, &hash)
if err != nil {
//???
}
height := hewh.Height
if height == 0 {
return locator, nil
}
decrement := uint32(1)
for height > 0 && len(locator) < wire.MaxBlockLocatorsPerMsg {
// Decrement by 1 for the first 10 blocks, then double the jump
// until we get to the genesis hash
if len(locator) > 10 {
decrement *= 2
}
if decrement > height {
height = 0
} else {
height -= decrement
}
he, err := h.readHeader(tx, height)
if err != nil {
return locator, err
}
headerHash := he.Header.blockHeader.BlockHash()
locator = append(locator, &headerHash)
}
return locator, nil
}
// LatestBlockLocator returns the latest block locator object based on the tip
// of the current main chain from the PoV of the database and flat files.
//
// NOTE: Part of the BlockHeaderStore interface.
func (h *NeutrinoDBStore) LatestBlockLocator() (blockchain.BlockLocator, er.R) {
var locator blockchain.BlockLocator
return locator, walletdb.View(h.Db, func(tx walletdb.ReadTx) er.R {
if ct, err := h.chainTip(tx, bucketNameBlockTip); err != nil {
return err
} else {
he := headerEntry{
blockHeader: ct.Header.blockHeader,
filterHeader: ct.Header.filterHeader,
}
locator, err = h.blockLocatorFromHash(tx, &he)
return err
}
})
}
// maybeResetHeaderState will reset the header state if the header assertion
// fails, but only if the target height is found. The boolean returned indicates
// that header state was reset.
func (f *NeutrinoDBStore) maybeResetHeaderState(
tx walletdb.ReadWriteTx,
headerStateAssertion *FilterHeader,
nhs *NeutrinoDBStore,
) (bool, er.R)
|
hdr, err := f.chainTip(tx, bucketNameFilterTip)
if err != nil {
return false, err
}
for {
hdrhash := hdr.Header.blockHeader.BlockHash()
he := hdr.Header
if bh, err := nhs.FetchBlockHeaderByHeight1(tx, hdr.Height); err != nil {
if ErrHashNotFound.Is(err) {
log.Warnf("We have filter header number %v but no block header, "+
"resetting filter headers", hdr.Height)
failed = true
break
}
return false, err
} else if bh := bh.BlockHash(); !hdrhash.IsEqual(&bh) {
log.Warnf("Filter header / block header mismatch at height %v: %v != %v",
hdr.Height, hdrhash, bh)
failed = true
break
} else if len(he.Bytes()) != 32 {
log.Warnf("Filter header at height %v is not 32 bytes: %v",
hdr.Height, hex.EncodeToString(he.Bytes()))
failed = true
break
} else if hdr.Height == 0 {
break
}
|
{
failed := false
if headerStateAssertion != nil {
// First, we'll attempt to locate the header at this height. If no such
// header is found, then we'll exit early.
assertedHeader, err := f.FetchFilterHeaderByHeight(headerStateAssertion.Height)
if assertedHeader == nil {
if !ErrHeaderNotFound.Is(err) {
return false, err
}
} else if *assertedHeader != headerStateAssertion.FilterHash {
log.Warnf("Filter header at height %v is not %v, assertion failed, resyncing filters",
headerStateAssertion.Height, headerStateAssertion.HeaderHash)
failed = true
}
}
if !failed && nhs != nil {
|
identifier_body
|
algo_2_2.py
|
(df):
global NRML
result = {}
for g1,n1 in df.groupby([df.index.month]) :
result[g1] = {}
mxs = {}
for g2,n2 in n1.groupby([n1.index.weekday]) :
result[g1][g2] = {}
for g3,n3 in n2.groupby([n2.index.hour]) :
result[g1][g2][g3] = {}
for g4,n4 in n3.groupby([n3['house_id']]):
mean = n4['value'].mean()
result[g1][g2][g3][g4] = n4['value'].mean()
mxs[g4] = mean if mean > mxs.get(g4,0) else mxs.get(g4,0)
for wd in result[g1] :
for hr in result[g1][wd]:
for hs in mxs :
result[g1][wd][hr][hs] = result[g1][wd][hr][hs]/mxs[hs]*1.0
NRML = result
return result
def calc_normalized():
df = create_df()
print ('Creating Normalized list')
group_A(df)
print (group_A(df))
print ('Done')
def form_groups(date_hour_group,avg_h_cons,cut,shedding):
global take_always
take_always = None
if NO_LISTS:
houses = [[row['house_id'],row['value'],datetime.strptime(row['date']+'-'+str(row['hour']),DATE_FORMAT)] for index,row in date_hour_group.iterrows()]
else :
#Iterate the rows
if len(exclude_a) == len(date_hour_group.index) :
exclude_a.clear()
houses = [[row['house_id'],row['value'],datetime.strptime(row['date']+'-'+str(row['hour']),DATE_FORMAT)] for index,row in date_hour_group.iterrows() if row['house_id'] not in exclude_a and row['house_id'] not in exclude_b]
if sum( [h[1] for h in houses]) < cut :
exclude_a.clear()
take_always = houses
houses = [[row['house_id'],row['value'],datetime.strptime(row['date']+'-'+str(row['hour']),DATE_FORMAT)] for index,row in date_hour_group.iterrows() if \
row['house_id'] not in exclude_a and \
row['house_id'] not in exclude_b and \
row['house_id'] not in [x[0] for x in take_always]
]
groups = []
#Get the right normalized group
nrml = NRML[houses[0][2].month][houses[0][2].weekday()][houses[0][2].hour]
nrml = {x:nrml[x] for x in nrml if x in [y[0] for y in houses]}
nrml = list([list(x) for x in sorted(nrml.items(), key=operator.itemgetter(1))])
for x in nrml :
value = [y[1] for y in houses if y[0]==x[0]][0]
x.insert(1,value)
houses = nrml
if take_always and len(take_always):
#Do same for take_always
nrml = NRML[take_always[0][2].month][take_always[0][2].weekday()][take_always[0][2].hour]
nrml = {x:nrml[x] for x in nrml if x in [y[0] for y in take_always]}
nrml = list([list(x) for x in sorted(nrml.items(), key=operator.itemgetter(1))])
for x in nrml :
value = [y[1] for y in take_always if y[0]==x[0]][0]
x.insert(1,value)
take_always = nrml
houses = sorted(houses, reverse=True, key=operator.itemgetter(1))
# print(houses)
while len(houses) :
group = []
if take_always:
for al_tk in take_always :
group.append(al_tk+[shedding.get(al_tk[0],0)])
shedding[al_tk[0]] = shedding.get(al_tk[0],0)
while sum( [h[1] for h in group]) <= cut and len(houses) :
i = 0
group.append(houses[i]+[shedding.get(houses[i][0],0)])
shedding[houses[i][0]] = shedding.get(houses[i][0],0)
del houses[i]
groups.append(group)
if sum([ h[1] for h in groups[-1] ]) < cut :
groups = groups[:-1]
return groups[:1],shedding
def load_set():
'''
Load the data
'''
df = pd.read_hdf(FILE_PATH)
calc_normalized()
'''
Calculate the hourly average
'''
date_hour_groups = df.groupby(['date','hour'])
total_cons = [date_hour_groups.get_group((a,b))['value'].sum() for a,b in date_hour_groups.groups ]
avg_h_cons = sum(total_cons)/len(total_cons) *1.0
house_count = len(df['house_id'].unique())
'''
Create the groups
'''
shedding = {}
#For each hour
loads = 1
last_df = None
full_df = None
number_shed = 0
x = []
y = []
ym = []
yx = []
deficits = [] ##
loads_cut = [] ##
numbers_shed = [] ##
discoms_i = [] ##
discomforts2 = [] ## To calculate discomforts for every x load sheds!
DISCOMS = [] ## To calculate discomforts for every x load sheds!
discomforts = {} ##
for a,b in date_hour_groups.groups :
print ('*'*60)
print ('{} - {} - {}'.format(loads,a,b))
try :
avg_h_cons = df.loc[df['date'] == a]['value'].mean()*house_count
date_hour_group = date_hour_groups.get_group((a,b))
h_cons = date_hour_group['value'].sum()
cut = h_cons - avg_h_cons
load_cut = 0 ##
discomfort = 0 ##
if h_cons >= avg_h_cons :
deficits.append(cut) ##
#Form groups
groups,shedding = form_groups(date_hour_group,avg_h_cons,cut,shedding)
#Shed, by the cumulative number of sheds in the group
shed_sums = [[sum([h[3] for h in groups[i]]),i] for i in range(0,len(groups))]
min_shed = min([g[0] for g in shed_sums])
g_index = [g[1] for g in shed_sums if g[0] == min_shed][0]
#shed
for h in groups[g_index] :
h[3] += 1
# if np.isinf(h[2]) or np.isnan(h[2]) or h[2] < 0: ##
# h[2] = 0
discomforts[h[0]] = discomforts.get(h[0], 0) + h[2] # Just changed
shedding[h[0]] = h[3]
if not NO_LISTS :
if not take_always or h[0] not in [x[0] for x in take_always]:
exclude_a.append(h[0])
for hs in groups[g_index] :
load_cut += hs[1] ##
# if np.isinf(hs[2]) or np.isnan(hs[2]): ##
# hs[2] = 0 ##
discomfort += hs[2] ##
print('ID : {:>10.0f}, CONS : {:>10.2f}, NRMLZD AVERAGE : {:>10.2f}, SHED : {:>10.2f}, DISC : {:>10.2f}'.format(hs[0],hs[1],hs[2],hs[3],discomforts.get(hs[0],0)))
number_shed +=len(groups[g_index])
num_shed = len(groups[g_index]) ##
print ('CUT : {:>10.2f}, CONSUMPTION {:>10.2f}'.format(cut,h_cons))
print ('Excluded SHED')
print (exclude_a)
print ('Excluded STD')
print (exclude_b)
loads +=1
loads_cut.append(load_cut) ##
numbers_shed.append(num_shed) ##
discoms_i.append(discomfort) ##
discomforts2.append(discomfort) ## To calculate discomforts for every x load sheds!
if loads % LOAD_SEG == 0 and loads not in x :
full_df = pd.DataFrame(list(shedding.items()),columns = ['house','shedding']).set_index('house')
if last_df is None :
last_df = full_df.copy(True)
last_df['shed
|
group_A
|
identifier_name
|
|
algo_2_2.py
|
discomforts for every x load sheds!
DISCOMS = [] ## To calculate discomforts for every x load sheds!
discomforts = {} ##
for a,b in date_hour_groups.groups :
print ('*'*60)
print ('{} - {} - {}'.format(loads,a,b))
try :
avg_h_cons = df.loc[df['date'] == a]['value'].mean()*house_count
date_hour_group = date_hour_groups.get_group((a,b))
h_cons = date_hour_group['value'].sum()
cut = h_cons - avg_h_cons
load_cut = 0 ##
discomfort = 0 ##
if h_cons >= avg_h_cons :
deficits.append(cut) ##
#Form groups
groups,shedding = form_groups(date_hour_group,avg_h_cons,cut,shedding)
#Shed, by the cumulative number of sheds in the group
shed_sums = [[sum([h[3] for h in groups[i]]),i] for i in range(0,len(groups))]
min_shed = min([g[0] for g in shed_sums])
g_index = [g[1] for g in shed_sums if g[0] == min_shed][0]
#shed
for h in groups[g_index] :
h[3] += 1
# if np.isinf(h[2]) or np.isnan(h[2]) or h[2] < 0: ##
# h[2] = 0
discomforts[h[0]] = discomforts.get(h[0], 0) + h[2] # Just changed
shedding[h[0]] = h[3]
if not NO_LISTS :
if not take_always or h[0] not in [x[0] for x in take_always]:
exclude_a.append(h[0])
for hs in groups[g_index] :
load_cut += hs[1] ##
# if np.isinf(hs[2]) or np.isnan(hs[2]): ##
# hs[2] = 0 ##
discomfort += hs[2] ##
print('ID : {:>10.0f}, CONS : {:>10.2f}, NRMLZD AVERAGE : {:>10.2f}, SHED : {:>10.2f}, DISC : {:>10.2f}'.format(hs[0],hs[1],hs[2],hs[3],discomforts.get(hs[0],0)))
number_shed +=len(groups[g_index])
num_shed = len(groups[g_index]) ##
print ('CUT : {:>10.2f}, CONSUMPTION {:>10.2f}'.format(cut,h_cons))
print ('Excluded SHED')
print (exclude_a)
print ('Excluded STD')
print (exclude_b)
loads +=1
loads_cut.append(load_cut) ##
numbers_shed.append(num_shed) ##
discoms_i.append(discomfort) ##
discomforts2.append(discomfort) ## To calculate discomforts for every x load sheds!
if loads % LOAD_SEG == 0 and loads not in x :
full_df = pd.DataFrame(list(shedding.items()),columns = ['house','shedding']).set_index('house')
if last_df is None :
last_df = full_df.copy(True)
last_df['shedding'] = 0
now_df = full_df.subtract(last_df,axis=1)
now_df['total'] = full_df['shedding']
last_df = full_df.copy(True)
print ('*'*60)
print ('LAST {}/{} LOADS '.format(LOAD_SEG,loads))
print ('MAX : HOUSE {}, SHEDS {}'.format(now_df['shedding'].argmax(),now_df['shedding'].max()))
print ('MIN : HOUSE {}, SHEDS {}'.format(now_df['shedding'].argmin(),now_df['shedding'].min()))
print ('NUMBER OF HOUSES SHED : {}'.format(number_shed))
x.append(loads)
y.append(number_shed)
DISCOMS.append(sum(discomforts2))
yx.append(now_df['shedding'].max())
ym.append(now_df['shedding'].min())
number_shed = 0
discomforts2 = [] ## To calculate discomforts for every x load sheds!
if PAUSE_PRINT :
input()
except Exception as e :
print (e)
pass
print ('*'*60)
print ('TOTAL LOADS')
print ('MAX : HOUSE {}, SHEDS {}'.format(full_df['shedding'].argmax(),full_df['shedding'].max()))
print ('MIN : HOUSE {}, SHEDS {}'.format(full_df['shedding'].argmin(),full_df['shedding'].min()))
total_shed = len(full_df.loc[full_df['shedding'] != 0])
print ('TOTAL HOUSES SHED : {}'.format(total_shed))
# print (x)
# print (yx)
# print (ym)
# print (y)
print(discomforts) ### This is a dictionary with ID as key and aggregated discomfort as value over the entire shedding period
print(sorted(discomforts.items(), key=operator.itemgetter(1)))
max_value = max(discomforts.values()) # Getting the maximum discomfort value from dictionary
max_keys = [k for k, v in discomforts.items() if v == max_value] # getting all keys containing the maximum
# print(max_value, max_keys)
min_value = min(discomforts.values()) # Getting the minimum discomfort value from dictionary
min_keys = [k for k, v in discomforts.items() if v == min_value] # getting all keys containing the minimum
# print(min_value, min_keys)
overall_discomfort = sum(discomforts.values()) # Summing up all discomfort values in dictionary
# print(overall_discomfort)
utilitarian = overall_discomfort
egalitarian = max_value
envy_freeness = max_value - min_value
# print (utilitarian, egalitarian, envyness)
print('Utilitarian : {:>10.2f}, Egalitarian : {:>10.2f}, Envy-freeness : {:>10.2f}'.format(utilitarian, egalitarian, envy_freeness))
print ('Number of houses shed : {:>10.0f}'.format(sum(y)))
print('Discomfort caused per house shed : {:>10.2f}'.format(utilitarian/sum(y)))
# print(discoms_i)
# overall_discom = sum(discoms_i)
# print(overall_discom)
# print(DISCOMS) ### DISCOMS IS PER X NUMBER OF SHEDS!!!
# overall_DISCOMS = sum(DISCOMS)
# print(overall_DISCOMS)
# plt.bar(x,y,width=LOAD_SEG/2.0,color='r',align='center')
# plt.title('Sheds')
# plt.show()
#
# p1 = plt.bar(x, yx, LOAD_SEG/2.0, color='g' )
# plt.title('Max')
# plt.show()
#
# p2 = plt.bar(x, ym, LOAD_SEG/2.0, color='b')
# plt.title('min')
# plt.show()
# ticks = [100, 200, 300, 400, 500, 600, 700, 800, 900, 1000]
#
# plt.bar(x,y,width=LOAD_SEG/2.0,color='b',align='center',label = 'Sheds')
# plt.xlabel('Every 100 shedding events')
# plt.ylabel('Number of households shed')
# plt.xticks(ticks, rotation='horizontal')
# plt.ylim([0, max(y)* 1.3])
# plt.show()
#
# p1 = plt.bar(x, yx, LOAD_SEG/2.0, color='b',label = 'Max')
# p2 = plt.bar(x, ym, LOAD_SEG/2.0, color='r',bottom=yx)
# plt.xlabel('Every 100 shedding events')
# plt.ylabel('Number of households shed')
# plt.xticks(ticks, rotation='horizontal')
# plt.legend((p1[0], p2[0]),('Number of sheds of household most shed','Number of sheds of household least shed'),
# fontsize=10, ncol = 1, framealpha = 0, fancybox = True)
# plt.ylim([0, max([sum(x) for x in zip(yx,ym)])*1.3])
# plt.show()
#
#
#
# plt.bar(x,y,width=LOAD_SEG/2.0,color='g',align='center',label = 'Sheds')
# plt.xlabel('Every 100 shedding events')
# plt.ylabel('Number of households shed')
# plt.xticks(x, rotation='horizontal')
|
# plt.ylim([0, max(y)* 1.3])
# plt.show()
|
random_line_split
|
|
algo_2_2.py
|
]
groups = []
#Get the right normalized group
nrml = NRML[houses[0][2].month][houses[0][2].weekday()][houses[0][2].hour]
nrml = {x:nrml[x] for x in nrml if x in [y[0] for y in houses]}
nrml = list([list(x) for x in sorted(nrml.items(), key=operator.itemgetter(1))])
for x in nrml :
value = [y[1] for y in houses if y[0]==x[0]][0]
x.insert(1,value)
houses = nrml
if take_always and len(take_always):
#Do same for take_always
nrml = NRML[take_always[0][2].month][take_always[0][2].weekday()][take_always[0][2].hour]
nrml = {x:nrml[x] for x in nrml if x in [y[0] for y in take_always]}
nrml = list([list(x) for x in sorted(nrml.items(), key=operator.itemgetter(1))])
for x in nrml :
value = [y[1] for y in take_always if y[0]==x[0]][0]
x.insert(1,value)
take_always = nrml
houses = sorted(houses, reverse=True, key=operator.itemgetter(1))
# print(houses)
while len(houses) :
group = []
if take_always:
for al_tk in take_always :
group.append(al_tk+[shedding.get(al_tk[0],0)])
shedding[al_tk[0]] = shedding.get(al_tk[0],0)
while sum( [h[1] for h in group]) <= cut and len(houses) :
i = 0
group.append(houses[i]+[shedding.get(houses[i][0],0)])
shedding[houses[i][0]] = shedding.get(houses[i][0],0)
del houses[i]
groups.append(group)
if sum([ h[1] for h in groups[-1] ]) < cut :
groups = groups[:-1]
return groups[:1],shedding
def load_set():
'''
Load the data
'''
df = pd.read_hdf(FILE_PATH)
calc_normalized()
'''
Calculate the hourly average
'''
date_hour_groups = df.groupby(['date','hour'])
total_cons = [date_hour_groups.get_group((a,b))['value'].sum() for a,b in date_hour_groups.groups ]
avg_h_cons = sum(total_cons)/len(total_cons) *1.0
house_count = len(df['house_id'].unique())
'''
Create the groups
'''
shedding = {}
#For each hour
loads = 1
last_df = None
full_df = None
number_shed = 0
x = []
y = []
ym = []
yx = []
deficits = [] ##
loads_cut = [] ##
numbers_shed = [] ##
discoms_i = [] ##
discomforts2 = [] ## To calculate discomforts for every x load sheds!
DISCOMS = [] ## To calculate discomforts for every x load sheds!
discomforts = {} ##
for a,b in date_hour_groups.groups :
print ('*'*60)
print ('{} - {} - {}'.format(loads,a,b))
try :
avg_h_cons = df.loc[df['date'] == a]['value'].mean()*house_count
date_hour_group = date_hour_groups.get_group((a,b))
h_cons = date_hour_group['value'].sum()
cut = h_cons - avg_h_cons
load_cut = 0 ##
discomfort = 0 ##
if h_cons >= avg_h_cons :
deficits.append(cut) ##
#Form groups
groups,shedding = form_groups(date_hour_group,avg_h_cons,cut,shedding)
#Shed, by the cumulative number of sheds in the group
shed_sums = [[sum([h[3] for h in groups[i]]),i] for i in range(0,len(groups))]
min_shed = min([g[0] for g in shed_sums])
g_index = [g[1] for g in shed_sums if g[0] == min_shed][0]
#shed
for h in groups[g_index] :
h[3] += 1
# if np.isinf(h[2]) or np.isnan(h[2]) or h[2] < 0: ##
# h[2] = 0
discomforts[h[0]] = discomforts.get(h[0], 0) + h[2] # Just changed
shedding[h[0]] = h[3]
if not NO_LISTS :
if not take_always or h[0] not in [x[0] for x in take_always]:
exclude_a.append(h[0])
for hs in groups[g_index] :
load_cut += hs[1] ##
# if np.isinf(hs[2]) or np.isnan(hs[2]): ##
# hs[2] = 0 ##
discomfort += hs[2] ##
print('ID : {:>10.0f}, CONS : {:>10.2f}, NRMLZD AVERAGE : {:>10.2f}, SHED : {:>10.2f}, DISC : {:>10.2f}'.format(hs[0],hs[1],hs[2],hs[3],discomforts.get(hs[0],0)))
number_shed +=len(groups[g_index])
num_shed = len(groups[g_index]) ##
print ('CUT : {:>10.2f}, CONSUMPTION {:>10.2f}'.format(cut,h_cons))
print ('Excluded SHED')
print (exclude_a)
print ('Excluded STD')
print (exclude_b)
loads +=1
loads_cut.append(load_cut) ##
numbers_shed.append(num_shed) ##
discoms_i.append(discomfort) ##
discomforts2.append(discomfort) ## To calculate discomforts for every x load sheds!
if loads % LOAD_SEG == 0 and loads not in x :
full_df = pd.DataFrame(list(shedding.items()),columns = ['house','shedding']).set_index('house')
if last_df is None :
last_df = full_df.copy(True)
last_df['shedding'] = 0
now_df = full_df.subtract(last_df,axis=1)
now_df['total'] = full_df['shedding']
last_df = full_df.copy(True)
print ('*'*60)
print ('LAST {}/{} LOADS '.format(LOAD_SEG,loads))
print ('MAX : HOUSE {}, SHEDS {}'.format(now_df['shedding'].argmax(),now_df['shedding'].max()))
print ('MIN : HOUSE {}, SHEDS {}'.format(now_df['shedding'].argmin(),now_df['shedding'].min()))
print ('NUMBER OF HOUSES SHED : {}'.format(number_shed))
x.append(loads)
y.append(number_shed)
DISCOMS.append(sum(discomforts2))
yx.append(now_df['shedding'].max())
ym.append(now_df['shedding'].min())
number_shed = 0
discomforts2 = [] ## To calculate discomforts for every x load sheds!
if PAUSE_PRINT :
input()
except Exception as e :
print (e)
pass
print ('*'*60)
print ('TOTAL LOADS')
print ('MAX : HOUSE {}, SHEDS {}'.format(full_df['shedding'].argmax(),full_df['shedding'].max()))
print ('MIN : HOUSE {}, SHEDS {}'.format(full_df['shed
|
global take_always
take_always = None
if NO_LISTS:
houses = [[row['house_id'],row['value'],datetime.strptime(row['date']+'-'+str(row['hour']),DATE_FORMAT)] for index,row in date_hour_group.iterrows()]
else :
#Iterate the rows
if len(exclude_a) == len(date_hour_group.index) :
exclude_a.clear()
houses = [[row['house_id'],row['value'],datetime.strptime(row['date']+'-'+str(row['hour']),DATE_FORMAT)] for index,row in date_hour_group.iterrows() if row['house_id'] not in exclude_a and row['house_id'] not in exclude_b]
if sum( [h[1] for h in houses]) < cut :
exclude_a.clear()
take_always = houses
houses = [[row['house_id'],row['value'],datetime.strptime(row['date']+'-'+str(row['hour']),DATE_FORMAT)] for index,row in date_hour_group.iterrows() if \
row['house_id'] not in exclude_a and \
row['house_id'] not in exclude_b and \
row['house_id'] not in [x[0] for x in take_always]
|
identifier_body
|
|
algo_2_2.py
|
( [h[1] for h in houses]) < cut :
exclude_a.clear()
take_always = houses
houses = [[row['house_id'],row['value'],datetime.strptime(row['date']+'-'+str(row['hour']),DATE_FORMAT)] for index,row in date_hour_group.iterrows() if \
row['house_id'] not in exclude_a and \
row['house_id'] not in exclude_b and \
row['house_id'] not in [x[0] for x in take_always]
]
groups = []
#Get the right normalized group
nrml = NRML[houses[0][2].month][houses[0][2].weekday()][houses[0][2].hour]
nrml = {x:nrml[x] for x in nrml if x in [y[0] for y in houses]}
nrml = list([list(x) for x in sorted(nrml.items(), key=operator.itemgetter(1))])
for x in nrml :
value = [y[1] for y in houses if y[0]==x[0]][0]
x.insert(1,value)
houses = nrml
if take_always and len(take_always):
#Do same for take_always
nrml = NRML[take_always[0][2].month][take_always[0][2].weekday()][take_always[0][2].hour]
nrml = {x:nrml[x] for x in nrml if x in [y[0] for y in take_always]}
nrml = list([list(x) for x in sorted(nrml.items(), key=operator.itemgetter(1))])
for x in nrml :
value = [y[1] for y in take_always if y[0]==x[0]][0]
x.insert(1,value)
take_always = nrml
houses = sorted(houses, reverse=True, key=operator.itemgetter(1))
# print(houses)
while len(houses) :
group = []
if take_always:
for al_tk in take_always :
group.append(al_tk+[shedding.get(al_tk[0],0)])
shedding[al_tk[0]] = shedding.get(al_tk[0],0)
while sum( [h[1] for h in group]) <= cut and len(houses) :
|
groups.append(group)
if sum([ h[1] for h in groups[-1] ]) < cut :
groups = groups[:-1]
return groups[:1],shedding
def load_set():
'''
Load the data
'''
df = pd.read_hdf(FILE_PATH)
calc_normalized()
'''
Calculate the hourly average
'''
date_hour_groups = df.groupby(['date','hour'])
total_cons = [date_hour_groups.get_group((a,b))['value'].sum() for a,b in date_hour_groups.groups ]
avg_h_cons = sum(total_cons)/len(total_cons) *1.0
house_count = len(df['house_id'].unique())
'''
Create the groups
'''
shedding = {}
#For each hour
loads = 1
last_df = None
full_df = None
number_shed = 0
x = []
y = []
ym = []
yx = []
deficits = [] ##
loads_cut = [] ##
numbers_shed = [] ##
discoms_i = [] ##
discomforts2 = [] ## To calculate discomforts for every x load sheds!
DISCOMS = [] ## To calculate discomforts for every x load sheds!
discomforts = {} ##
for a,b in date_hour_groups.groups :
print ('*'*60)
print ('{} - {} - {}'.format(loads,a,b))
try :
avg_h_cons = df.loc[df['date'] == a]['value'].mean()*house_count
date_hour_group = date_hour_groups.get_group((a,b))
h_cons = date_hour_group['value'].sum()
cut = h_cons - avg_h_cons
load_cut = 0 ##
discomfort = 0 ##
if h_cons >= avg_h_cons :
deficits.append(cut) ##
#Form groups
groups,shedding = form_groups(date_hour_group,avg_h_cons,cut,shedding)
#Shed, by the cumulative number of sheds in the group
shed_sums = [[sum([h[3] for h in groups[i]]),i] for i in range(0,len(groups))]
min_shed = min([g[0] for g in shed_sums])
g_index = [g[1] for g in shed_sums if g[0] == min_shed][0]
#shed
for h in groups[g_index] :
h[3] += 1
# if np.isinf(h[2]) or np.isnan(h[2]) or h[2] < 0: ##
# h[2] = 0
discomforts[h[0]] = discomforts.get(h[0], 0) + h[2] # Just changed
shedding[h[0]] = h[3]
if not NO_LISTS :
if not take_always or h[0] not in [x[0] for x in take_always]:
exclude_a.append(h[0])
for hs in groups[g_index] :
load_cut += hs[1] ##
# if np.isinf(hs[2]) or np.isnan(hs[2]): ##
# hs[2] = 0 ##
discomfort += hs[2] ##
print('ID : {:>10.0f}, CONS : {:>10.2f}, NRMLZD AVERAGE : {:>10.2f}, SHED : {:>10.2f}, DISC : {:>10.2f}'.format(hs[0],hs[1],hs[2],hs[3],discomforts.get(hs[0],0)))
number_shed +=len(groups[g_index])
num_shed = len(groups[g_index]) ##
print ('CUT : {:>10.2f}, CONSUMPTION {:>10.2f}'.format(cut,h_cons))
print ('Excluded SHED')
print (exclude_a)
print ('Excluded STD')
print (exclude_b)
loads +=1
loads_cut.append(load_cut) ##
numbers_shed.append(num_shed) ##
discoms_i.append(discomfort) ##
discomforts2.append(discomfort) ## To calculate discomforts for every x load sheds!
if loads % LOAD_SEG == 0 and loads not in x :
full_df = pd.DataFrame(list(shedding.items()),columns = ['house','shedding']).set_index('house')
if last_df is None :
last_df = full_df.copy(True)
last_df['shedding'] = 0
now_df = full_df.subtract(last_df,axis=1)
now_df['total'] = full_df['shedding']
last_df = full_df.copy(True)
print ('*'*60)
print ('LAST {}/{} LOADS '.format(LOAD_SEG,loads))
print ('MAX : HOUSE {}, SHEDS {}'.format(now_df['shedding'].argmax(),now_df['shedding'].max()))
print ('MIN : HOUSE {}, SHEDS {}'.format(now_df['shedding'].argmin(),now_df['shedding'].min()))
print ('NUMBER OF HOUSES SHED : {}'.format(number_shed))
x.append(loads)
y.append(number_shed)
DISCOMS.append(sum(discomforts2))
yx.append(now_df['shedding'].max())
ym.append(now_df['shedding'].min())
number_shed = 0
discomforts2 = [] ## To calculate discomforts for every x load sheds!
if PAUSE_PRINT :
input()
except Exception as e :
print (e)
pass
print ('*'*60)
print ('TOTAL LOADS')
print ('MAX : HOUSE {}, SHEDS {}'.format(full_df['shedding'].argmax(),full_df['shedding'].max()))
print ('MIN : HOUSE {}, SHEDS {}'.format(full_df['shedding'].argmin(),full_df['shedding'].min()))
total_shed = len(full_df.loc[full_df['shedding'] != 0])
print ('TOTAL HOUSES SHED : {}'.format(total_shed))
# print (x)
# print (yx)
# print (ym)
# print (y)
print(discomforts) ### This is a dictionary with ID as key and aggregated discomfort as value over the entire shedding period
print(sorted(discomforts.items(), key=operator.itemgetter(1)))
max_value = max(discomforts.values()) # Getting the maximum discomfort value from dictionary
max_keys = [k for k, v in discomforts.items() if
|
i = 0
group.append(houses[i]+[shedding.get(houses[i][0],0)])
shedding[houses[i][0]] = shedding.get(houses[i][0],0)
del houses[i]
|
conditional_block
|
api.go
|
aturity: int32(api.node.node.Params.CoinbaseMaturity),
Modules: []string{cmds.DefaultServiceNameSpace, cmds.MinerNameSpace, cmds.TestNameSpace, cmds.LogNameSpace},
}
ret.GraphState = GetGraphStateResult(best.GraphState)
hostdns := api.node.node.peerServer.HostDNS()
if hostdns != nil {
ret.DNS = hostdns.String()
}
if api.node.node.peerServer.Node() != nil {
ret.QNR = api.node.node.peerServer.Node().String()
}
if len(api.node.node.peerServer.HostAddress()) > 0 {
ret.Addresss = api.node.node.peerServer.HostAddress()
}
// soft forks
ret.ConsensusDeployment = make(map[string]*json.ConsensusDeploymentDesc)
for deployment, deploymentDetails := range params.ActiveNetParams.Deployments {
// Map the integer deployment ID into a human readable
// fork-name.
var forkName string
switch deployment {
case params.DeploymentTestDummy:
forkName = "dummy"
case params.DeploymentToken:
forkName = "token"
default:
return nil, fmt.Errorf("Unknown deployment %v detected\n", deployment)
}
// Query the chain for the current status of the deployment as
// identified by its deployment ID.
deploymentStatus, err := api.node.blockManager.GetChain().ThresholdState(uint32(deployment))
if err != nil {
return nil, fmt.Errorf("Failed to obtain deployment status\n")
}
// Finally, populate the soft-fork description with all the
// information gathered above.
ret.ConsensusDeployment[forkName] = &json.ConsensusDeploymentDesc{
Status: deploymentStatus.HumanString(),
Bit: deploymentDetails.BitNumber,
StartTime: int64(deploymentDetails.StartTime),
Timeout: int64(deploymentDetails.ExpireTime),
}
if deploymentDetails.PerformTime != 0 {
ret.ConsensusDeployment[forkName].Perform = int64(deploymentDetails.PerformTime)
}
if deploymentDetails.StartTime >= blockchain.CheckerTimeThreshold {
if time.Unix(int64(deploymentDetails.ExpireTime), 0).After(best.MedianTime) {
startTime := time.Unix(int64(deploymentDetails.StartTime), 0)
ret.ConsensusDeployment[forkName].Since = best.MedianTime.Sub(startTime).String()
}
}
}
return ret, nil
}
// getDifficultyRatio returns the proof-of-work difficulty as a multiple of the
// minimum difficulty using the passed bits field from the header of a block.
func getDifficultyRatio(target *big.Int, params *params.Params, powType pow.PowType) float64 {
instance := pow.GetInstance(powType, 0, []byte{})
instance.SetParams(params.PowConfig)
// The minimum difficulty is the max possible proof-of-work limit bits
// converted back to a number. Note this is not the same as the proof of
// work limit directly because the block difficulty is encoded in a block
// with the compact form which loses precision.
base := instance.GetSafeDiff(0)
var difficulty *big.Rat
if powType == pow.BLAKE2BD || powType == pow.MEERXKECCAKV1 ||
powType == pow.QITMEERKECCAK256 ||
powType == pow.X8R16 ||
powType == pow.X16RV3 ||
powType == pow.CRYPTONIGHT {
if target.Cmp(big.NewInt(0)) > 0 {
difficulty = new(big.Rat).SetFrac(base, target)
}
} else {
difficulty = new(big.Rat).SetFrac(target, base)
}
outString := difficulty.FloatString(8)
diff, err := strconv.ParseFloat(outString, 64)
if err != nil {
log.Error(fmt.Sprintf("Cannot get difficulty: %v", err))
return 0
}
return diff
}
// Return the peer info
func (api *PublicBlockChainAPI) GetPeerInfo(verbose *bool, network *string) (interface{}, error) {
vb := false
if verbose != nil {
vb = *verbose
}
networkName := ""
if network != nil {
networkName = *network
}
if len(networkName) <= 0 {
networkName = params.ActiveNetParams.Name
}
ps := api.node.node.peerServer
peers := ps.Peers().StatsSnapshots()
infos := make([]*json.GetPeerInfoResult, 0, len(peers))
for _, p := range peers {
if len(networkName) != 0 && networkName != "all" {
if p.Network != networkName {
continue
}
}
if !vb {
if !p.State.IsConnected() {
continue
}
}
info := &json.GetPeerInfoResult{
ID: p.PeerID,
Name: p.Name,
Address: p.Address,
BytesSent: p.BytesSent,
BytesRecv: p.BytesRecv,
Circuit: p.IsCircuit,
Bads: p.Bads,
}
info.Protocol = p.Protocol
info.Services = p.Services.String()
if p.Genesis != nil {
info.Genesis = p.Genesis.String()
}
if p.IsTheSameNetwork() {
info.State = p.State.String()
}
if len(p.Version) > 0 {
info.Version = p.Version
}
if len(p.Network) > 0 {
info.Network = p.Network
}
if p.State.IsConnected() {
info.TimeOffset = p.TimeOffset
if p.Genesis != nil {
info.Genesis = p.Genesis.String()
}
info.Direction = p.Direction.String()
if p.GraphState != nil {
info.GraphState = GetGraphStateResult(p.GraphState)
}
if ps.PeerSync().SyncPeer() != nil {
info.SyncNode = p.PeerID == ps.PeerSync().SyncPeer().GetID().String()
} else {
info.SyncNode = false
}
info.ConnTime = p.ConnTime.Truncate(time.Second).String()
info.GSUpdate = p.GraphStateDur.Truncate(time.Second).String()
}
if !p.LastSend.IsZero() {
info.LastSend = p.LastSend.String()
}
if !p.LastRecv.IsZero() {
info.LastRecv = p.LastRecv.String()
}
if len(p.QNR) > 0 {
info.QNR = p.QNR
}
infos = append(infos, info)
}
return infos, nil
}
// Return the RPC info
func (api *PublicBlockChainAPI) GetRpcInfo() (interface{}, error) {
rs := api.node.node.rpcServer.ReqStatus
jrs := []*cmds.JsonRequestStatus{}
for _, v := range rs {
jrs = append(jrs, v.ToJson())
}
return jrs, nil
}
func GetGraphStateResult(gs *blockdag.GraphState) *json.GetGraphStateResult {
if gs != nil {
mainTip := gs.GetMainChainTip()
tips := []string{mainTip.String() + " main"}
for k := range gs.GetTips().GetMap() {
if k.IsEqual(mainTip) {
continue
}
tips = append(tips, k.String())
}
return &json.GetGraphStateResult{
Tips: tips,
MainOrder: uint32(gs.GetMainOrder()),
Layer: uint32(gs.GetLayer()),
MainHeight: uint32(gs.GetMainHeight()),
}
}
return nil
}
func (api *PublicBlockChainAPI) GetTimeInfo() (interface{}, error) {
return fmt.Sprintf("Now:%s offset:%s", roughtime.Now(), roughtime.Offset()), nil
}
func (api *PublicBlockChainAPI) GetNetworkInfo() (interface{}, error) {
ps := api.node.node.peerServer
peers := ps.Peers().StatsSnapshots()
nstat := &json.NetworkStat{MaxConnected: ps.Config().MaxPeers,
MaxInbound: ps.Config().MaxInbound, Infos: []*json.NetworkInfo{}}
infos := map[string]*json.NetworkInfo{}
gsups := map[string][]time.Duration{}
for _, p := range peers {
nstat.TotalPeers++
if p.Services&protocol.Relay > 0 {
nstat.TotalRelays++
}
//
if len(p.Network) <= 0 {
continue
}
info, ok := infos[p.Network]
if !ok {
info = &json.NetworkInfo{Name: p.Network}
infos[p.Network] = info
nstat.Infos = append(nstat.Infos, info)
gsups[p.Network] = []time.Duration{0, 0, math.MaxInt64}
}
info.Peers++
if p.State.IsConnected() {
info.Connecteds++
nstat.TotalConnected++
|
gsups[p.Network][0] = gsups[p.Network][0] + p.GraphStateDur
if p.GraphStateDur > gsups[p.Network][1] {
|
random_line_split
|
|
api.go
|
00000*version.Major + 10000*version.Minor + 100*version.Patch),
BuildVersion: version.String(),
ProtocolVersion: int32(protocol.ProtocolVersion),
TotalSubsidy: best.TotalSubsidy,
TimeOffset: int64(api.node.blockManager.GetChain().TimeSource().Offset().Seconds()),
Connections: int32(len(api.node.node.peerServer.Peers().Connected())),
PowDiff: &json.PowDiff{
CurrentDiff: getDifficultyRatio(powNodes, api.node.node.Params, pow.MEERXKECCAKV1),
},
Network: params.ActiveNetParams.Name,
Confirmations: blockdag.StableConfirmations,
CoinbaseMaturity: int32(api.node.node.Params.CoinbaseMaturity),
Modules: []string{cmds.DefaultServiceNameSpace, cmds.MinerNameSpace, cmds.TestNameSpace, cmds.LogNameSpace},
}
ret.GraphState = GetGraphStateResult(best.GraphState)
hostdns := api.node.node.peerServer.HostDNS()
if hostdns != nil {
ret.DNS = hostdns.String()
}
if api.node.node.peerServer.Node() != nil {
ret.QNR = api.node.node.peerServer.Node().String()
}
if len(api.node.node.peerServer.HostAddress()) > 0 {
ret.Addresss = api.node.node.peerServer.HostAddress()
}
// soft forks
ret.ConsensusDeployment = make(map[string]*json.ConsensusDeploymentDesc)
for deployment, deploymentDetails := range params.ActiveNetParams.Deployments {
// Map the integer deployment ID into a human readable
// fork-name.
var forkName string
switch deployment {
case params.DeploymentTestDummy:
forkName = "dummy"
case params.DeploymentToken:
forkName = "token"
default:
return nil, fmt.Errorf("Unknown deployment %v detected\n", deployment)
}
// Query the chain for the current status of the deployment as
// identified by its deployment ID.
deploymentStatus, err := api.node.blockManager.GetChain().ThresholdState(uint32(deployment))
if err != nil {
return nil, fmt.Errorf("Failed to obtain deployment status\n")
}
// Finally, populate the soft-fork description with all the
// information gathered above.
ret.ConsensusDeployment[forkName] = &json.ConsensusDeploymentDesc{
Status: deploymentStatus.HumanString(),
Bit: deploymentDetails.BitNumber,
StartTime: int64(deploymentDetails.StartTime),
Timeout: int64(deploymentDetails.ExpireTime),
}
if deploymentDetails.PerformTime != 0 {
ret.ConsensusDeployment[forkName].Perform = int64(deploymentDetails.PerformTime)
}
if deploymentDetails.StartTime >= blockchain.CheckerTimeThreshold {
if time.Unix(int64(deploymentDetails.ExpireTime), 0).After(best.MedianTime) {
startTime := time.Unix(int64(deploymentDetails.StartTime), 0)
ret.ConsensusDeployment[forkName].Since = best.MedianTime.Sub(startTime).String()
}
}
}
return ret, nil
}
// getDifficultyRatio returns the proof-of-work difficulty as a multiple of the
// minimum difficulty using the passed bits field from the header of a block.
func getDifficultyRatio(target *big.Int, params *params.Params, powType pow.PowType) float64 {
instance := pow.GetInstance(powType, 0, []byte{})
instance.SetParams(params.PowConfig)
// The minimum difficulty is the max possible proof-of-work limit bits
// converted back to a number. Note this is not the same as the proof of
// work limit directly because the block difficulty is encoded in a block
// with the compact form which loses precision.
base := instance.GetSafeDiff(0)
var difficulty *big.Rat
if powType == pow.BLAKE2BD || powType == pow.MEERXKECCAKV1 ||
powType == pow.QITMEERKECCAK256 ||
powType == pow.X8R16 ||
powType == pow.X16RV3 ||
powType == pow.CRYPTONIGHT {
if target.Cmp(big.NewInt(0)) > 0 {
difficulty = new(big.Rat).SetFrac(base, target)
}
} else {
difficulty = new(big.Rat).SetFrac(target, base)
}
outString := difficulty.FloatString(8)
diff, err := strconv.ParseFloat(outString, 64)
if err != nil {
log.Error(fmt.Sprintf("Cannot get difficulty: %v", err))
return 0
}
return diff
}
// Return the peer info
func (api *PublicBlockChainAPI) GetPeerInfo(verbose *bool, network *string) (interface{}, error) {
vb := false
if verbose != nil {
vb = *verbose
}
networkName := ""
if network != nil {
networkName = *network
}
if len(networkName) <= 0 {
networkName = params.ActiveNetParams.Name
}
ps := api.node.node.peerServer
peers := ps.Peers().StatsSnapshots()
infos := make([]*json.GetPeerInfoResult, 0, len(peers))
for _, p := range peers {
if len(networkName) != 0 && networkName != "all" {
if p.Network != networkName {
continue
}
}
if !vb {
if !p.State.IsConnected() {
continue
}
}
info := &json.GetPeerInfoResult{
ID: p.PeerID,
Name: p.Name,
Address: p.Address,
BytesSent: p.BytesSent,
BytesRecv: p.BytesRecv,
Circuit: p.IsCircuit,
Bads: p.Bads,
}
info.Protocol = p.Protocol
info.Services = p.Services.String()
if p.Genesis != nil {
info.Genesis = p.Genesis.String()
}
if p.IsTheSameNetwork() {
info.State = p.State.String()
}
if len(p.Version) > 0 {
info.Version = p.Version
}
if len(p.Network) > 0 {
info.Network = p.Network
}
if p.State.IsConnected() {
info.TimeOffset = p.TimeOffset
if p.Genesis != nil {
info.Genesis = p.Genesis.String()
}
info.Direction = p.Direction.String()
if p.GraphState != nil {
info.GraphState = GetGraphStateResult(p.GraphState)
}
if ps.PeerSync().SyncPeer() != nil {
info.SyncNode = p.PeerID == ps.PeerSync().SyncPeer().GetID().String()
} else {
info.SyncNode = false
}
info.ConnTime = p.ConnTime.Truncate(time.Second).String()
info.GSUpdate = p.GraphStateDur.Truncate(time.Second).String()
}
if !p.LastSend.IsZero() {
info.LastSend = p.LastSend.String()
}
if !p.LastRecv.IsZero() {
info.LastRecv = p.LastRecv.String()
}
if len(p.QNR) > 0 {
info.QNR = p.QNR
}
infos = append(infos, info)
}
return infos, nil
}
// Return the RPC info
func (api *PublicBlockChainAPI) GetRpcInfo() (interface{}, error) {
rs := api.node.node.rpcServer.ReqStatus
jrs := []*cmds.JsonRequestStatus{}
for _, v := range rs
|
return jrs, nil
}
func GetGraphStateResult(gs *blockdag.GraphState) *json.GetGraphStateResult {
if gs != nil {
mainTip := gs.GetMainChainTip()
tips := []string{mainTip.String() + " main"}
for k := range gs.GetTips().GetMap() {
if k.IsEqual(mainTip) {
continue
}
tips = append(tips, k.String())
}
return &json.GetGraphStateResult{
Tips: tips,
MainOrder: uint32(gs.GetMainOrder()),
Layer: uint32(gs.GetLayer()),
MainHeight: uint32(gs.GetMainHeight()),
}
}
return nil
}
func (api *PublicBlockChainAPI) GetTimeInfo() (interface{}, error) {
return fmt.Sprintf("Now:%s offset:%s", roughtime.Now(), roughtime.Offset()), nil
}
func (api *PublicBlockChainAPI) GetNetworkInfo() (interface{}, error) {
ps := api.node.node.peerServer
peers := ps.Peers().StatsSnapshots()
nstat := &json.NetworkStat{MaxConnected: ps.Config().MaxPeers,
MaxInbound: ps.Config().MaxInbound, Infos: []*json.NetworkInfo{}}
infos := map[string]*json.NetworkInfo{}
gsups := map[string][]time.Duration{}
for _, p := range peers {
nstat.TotalPeers++
if
|
{
jrs = append(jrs, v.ToJson())
}
|
conditional_block
|
api.go
|
Result{
ID: p.PeerID,
Name: p.Name,
Address: p.Address,
BytesSent: p.BytesSent,
BytesRecv: p.BytesRecv,
Circuit: p.IsCircuit,
Bads: p.Bads,
}
info.Protocol = p.Protocol
info.Services = p.Services.String()
if p.Genesis != nil {
info.Genesis = p.Genesis.String()
}
if p.IsTheSameNetwork() {
info.State = p.State.String()
}
if len(p.Version) > 0 {
info.Version = p.Version
}
if len(p.Network) > 0 {
info.Network = p.Network
}
if p.State.IsConnected() {
info.TimeOffset = p.TimeOffset
if p.Genesis != nil {
info.Genesis = p.Genesis.String()
}
info.Direction = p.Direction.String()
if p.GraphState != nil {
info.GraphState = GetGraphStateResult(p.GraphState)
}
if ps.PeerSync().SyncPeer() != nil {
info.SyncNode = p.PeerID == ps.PeerSync().SyncPeer().GetID().String()
} else {
info.SyncNode = false
}
info.ConnTime = p.ConnTime.Truncate(time.Second).String()
info.GSUpdate = p.GraphStateDur.Truncate(time.Second).String()
}
if !p.LastSend.IsZero() {
info.LastSend = p.LastSend.String()
}
if !p.LastRecv.IsZero() {
info.LastRecv = p.LastRecv.String()
}
if len(p.QNR) > 0 {
info.QNR = p.QNR
}
infos = append(infos, info)
}
return infos, nil
}
// Return the RPC info
func (api *PublicBlockChainAPI) GetRpcInfo() (interface{}, error) {
rs := api.node.node.rpcServer.ReqStatus
jrs := []*cmds.JsonRequestStatus{}
for _, v := range rs {
jrs = append(jrs, v.ToJson())
}
return jrs, nil
}
func GetGraphStateResult(gs *blockdag.GraphState) *json.GetGraphStateResult {
if gs != nil {
mainTip := gs.GetMainChainTip()
tips := []string{mainTip.String() + " main"}
for k := range gs.GetTips().GetMap() {
if k.IsEqual(mainTip) {
continue
}
tips = append(tips, k.String())
}
return &json.GetGraphStateResult{
Tips: tips,
MainOrder: uint32(gs.GetMainOrder()),
Layer: uint32(gs.GetLayer()),
MainHeight: uint32(gs.GetMainHeight()),
}
}
return nil
}
func (api *PublicBlockChainAPI) GetTimeInfo() (interface{}, error) {
return fmt.Sprintf("Now:%s offset:%s", roughtime.Now(), roughtime.Offset()), nil
}
func (api *PublicBlockChainAPI) GetNetworkInfo() (interface{}, error) {
ps := api.node.node.peerServer
peers := ps.Peers().StatsSnapshots()
nstat := &json.NetworkStat{MaxConnected: ps.Config().MaxPeers,
MaxInbound: ps.Config().MaxInbound, Infos: []*json.NetworkInfo{}}
infos := map[string]*json.NetworkInfo{}
gsups := map[string][]time.Duration{}
for _, p := range peers {
nstat.TotalPeers++
if p.Services&protocol.Relay > 0 {
nstat.TotalRelays++
}
//
if len(p.Network) <= 0 {
continue
}
info, ok := infos[p.Network]
if !ok {
info = &json.NetworkInfo{Name: p.Network}
infos[p.Network] = info
nstat.Infos = append(nstat.Infos, info)
gsups[p.Network] = []time.Duration{0, 0, math.MaxInt64}
}
info.Peers++
if p.State.IsConnected() {
info.Connecteds++
nstat.TotalConnected++
gsups[p.Network][0] = gsups[p.Network][0] + p.GraphStateDur
if p.GraphStateDur > gsups[p.Network][1] {
gsups[p.Network][1] = p.GraphStateDur
}
if p.GraphStateDur < gsups[p.Network][2] {
gsups[p.Network][2] = p.GraphStateDur
}
}
if p.Services&protocol.Relay > 0 {
info.Relays++
}
}
for k, gu := range gsups {
info, ok := infos[k]
if !ok {
continue
}
if info.Connecteds > 0 {
avegs := time.Duration(0)
if info.Connecteds > 2 {
avegs = gu[0] - gu[1] - gu[2]
if avegs < 0 {
avegs = 0
}
cons := info.Connecteds - 2
avegs = time.Duration(int64(avegs) / int64(cons))
} else {
avegs = time.Duration(int64(gu[0]) / int64(info.Connecteds))
}
info.AverageGS = avegs.Truncate(time.Second).String()
info.MaxGS = gu[1].Truncate(time.Second).String()
info.MinGS = gu[2].Truncate(time.Second).String()
}
}
return nstat, nil
}
func (api *PublicBlockChainAPI) GetSubsidy() (interface{}, error) {
best := api.node.blockManager.GetChain().BestSnapshot()
sc := api.node.blockManager.GetChain().GetSubsidyCache()
info := &json.SubsidyInfo{Mode: sc.GetMode(), TotalSubsidy: best.TotalSubsidy, BaseSubsidy: params.ActiveNetParams.BaseSubsidy}
if params.ActiveNetParams.TargetTotalSubsidy > 0 {
info.TargetTotalSubsidy = params.ActiveNetParams.TargetTotalSubsidy
info.LeftTotalSubsidy = info.TargetTotalSubsidy - int64(info.TotalSubsidy)
if info.LeftTotalSubsidy < 0 {
info.TargetTotalSubsidy = 0
}
totalTime := time.Duration(info.TargetTotalSubsidy / info.BaseSubsidy * int64(params.ActiveNetParams.TargetTimePerBlock))
info.TotalTime = totalTime.Truncate(time.Second).String()
firstMBlock := api.node.blockManager.GetChain().BlockDAG().GetBlockByOrder(1)
startTime := time.Unix(firstMBlock.GetData().GetTimestamp(), 0)
leftTotalTime := totalTime - time.Since(startTime)
if leftTotalTime < 0 {
leftTotalTime = 0
}
info.LeftTotalTime = leftTotalTime.Truncate(time.Second).String()
}
info.NextSubsidy = sc.CalcBlockSubsidy(api.node.blockManager.GetChain().BlockDAG().GetBlueInfo(api.node.blockManager.GetChain().BlockDAG().GetMainChainTip()))
return info, nil
}
type PrivateBlockChainAPI struct {
node *QitmeerFull
}
func NewPrivateBlockChainAPI(node *QitmeerFull) *PrivateBlockChainAPI {
return &PrivateBlockChainAPI{node}
}
// Stop the node
func (api *PrivateBlockChainAPI) Stop() (interface{}, error) {
select {
case api.node.node.rpcServer.RequestedProcessShutdown() <- struct{}{}:
default:
}
return "Qitmeer stopping.", nil
}
// Banlist
func (api *PrivateBlockChainAPI) Banlist() (interface{}, error) {
bl := api.node.node.peerServer.GetBanlist()
bls := []*json.GetBanlistResult{}
for k, v := range bl {
bls = append(bls, &json.GetBanlistResult{ID: k, Bads: v})
}
return bls, nil
}
// RemoveBan
func (api *PrivateBlockChainAPI) RemoveBan(id *string) (interface{}, error) {
ho := ""
if id != nil {
ho = *id
}
api.node.node.peerServer.RemoveBan(ho)
return true, nil
}
// SetRpcMaxClients
func (api *PrivateBlockChainAPI) SetRpcMaxClients(max int) (interface{}, error) {
if max <= 0 {
err := fmt.Errorf("error:Must greater than 0 (cur max =%d)", api.node.node.Config.RPCMaxClients)
return api.node.node.Config.RPCMaxClients, err
}
api.node.node.Config.RPCMaxClients = max
return api.node.node.Config.RPCMaxClients, nil
}
type PrivateLogAPI struct {
node *QitmeerFull
}
func NewPrivateLogAPI(node *QitmeerFull) *PrivateLogAPI {
return &PrivateLogAPI{node}
}
// set log
func (api *PrivateLogAPI) SetLogLevel(level string) (interface{}, error)
|
{
err := common.ParseAndSetDebugLevels(level)
if err != nil {
return nil, err
}
return level, nil
}
|
identifier_body
|
|
api.go
|
owType == pow.CRYPTONIGHT {
if target.Cmp(big.NewInt(0)) > 0 {
difficulty = new(big.Rat).SetFrac(base, target)
}
} else {
difficulty = new(big.Rat).SetFrac(target, base)
}
outString := difficulty.FloatString(8)
diff, err := strconv.ParseFloat(outString, 64)
if err != nil {
log.Error(fmt.Sprintf("Cannot get difficulty: %v", err))
return 0
}
return diff
}
// Return the peer info
func (api *PublicBlockChainAPI) GetPeerInfo(verbose *bool, network *string) (interface{}, error) {
vb := false
if verbose != nil {
vb = *verbose
}
networkName := ""
if network != nil {
networkName = *network
}
if len(networkName) <= 0 {
networkName = params.ActiveNetParams.Name
}
ps := api.node.node.peerServer
peers := ps.Peers().StatsSnapshots()
infos := make([]*json.GetPeerInfoResult, 0, len(peers))
for _, p := range peers {
if len(networkName) != 0 && networkName != "all" {
if p.Network != networkName {
continue
}
}
if !vb {
if !p.State.IsConnected() {
continue
}
}
info := &json.GetPeerInfoResult{
ID: p.PeerID,
Name: p.Name,
Address: p.Address,
BytesSent: p.BytesSent,
BytesRecv: p.BytesRecv,
Circuit: p.IsCircuit,
Bads: p.Bads,
}
info.Protocol = p.Protocol
info.Services = p.Services.String()
if p.Genesis != nil {
info.Genesis = p.Genesis.String()
}
if p.IsTheSameNetwork() {
info.State = p.State.String()
}
if len(p.Version) > 0 {
info.Version = p.Version
}
if len(p.Network) > 0 {
info.Network = p.Network
}
if p.State.IsConnected() {
info.TimeOffset = p.TimeOffset
if p.Genesis != nil {
info.Genesis = p.Genesis.String()
}
info.Direction = p.Direction.String()
if p.GraphState != nil {
info.GraphState = GetGraphStateResult(p.GraphState)
}
if ps.PeerSync().SyncPeer() != nil {
info.SyncNode = p.PeerID == ps.PeerSync().SyncPeer().GetID().String()
} else {
info.SyncNode = false
}
info.ConnTime = p.ConnTime.Truncate(time.Second).String()
info.GSUpdate = p.GraphStateDur.Truncate(time.Second).String()
}
if !p.LastSend.IsZero() {
info.LastSend = p.LastSend.String()
}
if !p.LastRecv.IsZero() {
info.LastRecv = p.LastRecv.String()
}
if len(p.QNR) > 0 {
info.QNR = p.QNR
}
infos = append(infos, info)
}
return infos, nil
}
// Return the RPC info
func (api *PublicBlockChainAPI) GetRpcInfo() (interface{}, error) {
rs := api.node.node.rpcServer.ReqStatus
jrs := []*cmds.JsonRequestStatus{}
for _, v := range rs {
jrs = append(jrs, v.ToJson())
}
return jrs, nil
}
func GetGraphStateResult(gs *blockdag.GraphState) *json.GetGraphStateResult {
if gs != nil {
mainTip := gs.GetMainChainTip()
tips := []string{mainTip.String() + " main"}
for k := range gs.GetTips().GetMap() {
if k.IsEqual(mainTip) {
continue
}
tips = append(tips, k.String())
}
return &json.GetGraphStateResult{
Tips: tips,
MainOrder: uint32(gs.GetMainOrder()),
Layer: uint32(gs.GetLayer()),
MainHeight: uint32(gs.GetMainHeight()),
}
}
return nil
}
func (api *PublicBlockChainAPI) GetTimeInfo() (interface{}, error) {
return fmt.Sprintf("Now:%s offset:%s", roughtime.Now(), roughtime.Offset()), nil
}
func (api *PublicBlockChainAPI) GetNetworkInfo() (interface{}, error) {
ps := api.node.node.peerServer
peers := ps.Peers().StatsSnapshots()
nstat := &json.NetworkStat{MaxConnected: ps.Config().MaxPeers,
MaxInbound: ps.Config().MaxInbound, Infos: []*json.NetworkInfo{}}
infos := map[string]*json.NetworkInfo{}
gsups := map[string][]time.Duration{}
for _, p := range peers {
nstat.TotalPeers++
if p.Services&protocol.Relay > 0 {
nstat.TotalRelays++
}
//
if len(p.Network) <= 0 {
continue
}
info, ok := infos[p.Network]
if !ok {
info = &json.NetworkInfo{Name: p.Network}
infos[p.Network] = info
nstat.Infos = append(nstat.Infos, info)
gsups[p.Network] = []time.Duration{0, 0, math.MaxInt64}
}
info.Peers++
if p.State.IsConnected() {
info.Connecteds++
nstat.TotalConnected++
gsups[p.Network][0] = gsups[p.Network][0] + p.GraphStateDur
if p.GraphStateDur > gsups[p.Network][1] {
gsups[p.Network][1] = p.GraphStateDur
}
if p.GraphStateDur < gsups[p.Network][2] {
gsups[p.Network][2] = p.GraphStateDur
}
}
if p.Services&protocol.Relay > 0 {
info.Relays++
}
}
for k, gu := range gsups {
info, ok := infos[k]
if !ok {
continue
}
if info.Connecteds > 0 {
avegs := time.Duration(0)
if info.Connecteds > 2 {
avegs = gu[0] - gu[1] - gu[2]
if avegs < 0 {
avegs = 0
}
cons := info.Connecteds - 2
avegs = time.Duration(int64(avegs) / int64(cons))
} else {
avegs = time.Duration(int64(gu[0]) / int64(info.Connecteds))
}
info.AverageGS = avegs.Truncate(time.Second).String()
info.MaxGS = gu[1].Truncate(time.Second).String()
info.MinGS = gu[2].Truncate(time.Second).String()
}
}
return nstat, nil
}
func (api *PublicBlockChainAPI) GetSubsidy() (interface{}, error) {
best := api.node.blockManager.GetChain().BestSnapshot()
sc := api.node.blockManager.GetChain().GetSubsidyCache()
info := &json.SubsidyInfo{Mode: sc.GetMode(), TotalSubsidy: best.TotalSubsidy, BaseSubsidy: params.ActiveNetParams.BaseSubsidy}
if params.ActiveNetParams.TargetTotalSubsidy > 0 {
info.TargetTotalSubsidy = params.ActiveNetParams.TargetTotalSubsidy
info.LeftTotalSubsidy = info.TargetTotalSubsidy - int64(info.TotalSubsidy)
if info.LeftTotalSubsidy < 0 {
info.TargetTotalSubsidy = 0
}
totalTime := time.Duration(info.TargetTotalSubsidy / info.BaseSubsidy * int64(params.ActiveNetParams.TargetTimePerBlock))
info.TotalTime = totalTime.Truncate(time.Second).String()
firstMBlock := api.node.blockManager.GetChain().BlockDAG().GetBlockByOrder(1)
startTime := time.Unix(firstMBlock.GetData().GetTimestamp(), 0)
leftTotalTime := totalTime - time.Since(startTime)
if leftTotalTime < 0 {
leftTotalTime = 0
}
info.LeftTotalTime = leftTotalTime.Truncate(time.Second).String()
}
info.NextSubsidy = sc.CalcBlockSubsidy(api.node.blockManager.GetChain().BlockDAG().GetBlueInfo(api.node.blockManager.GetChain().BlockDAG().GetMainChainTip()))
return info, nil
}
type PrivateBlockChainAPI struct {
node *QitmeerFull
}
func NewPrivateBlockChainAPI(node *QitmeerFull) *PrivateBlockChainAPI {
return &PrivateBlockChainAPI{node}
}
// Stop the node
func (api *PrivateBlockChainAPI) Stop() (interface{}, error) {
select {
case api.node.node.rpcServer.RequestedProcessShutdown() <- struct{}{}:
default:
}
return "Qitmeer stopping.", nil
}
// Banlist
func (api *PrivateBlockChainAPI)
|
Banlist
|
identifier_name
|
|
createHeatmap.py
|
ypassing summary file creation'
os.system('cat ' + headJobName + ' > ' + verboseSummaryFile)
# Defined by 'print_crossCorrBinaryline_to_str' in CrossCorrToplist.c
# Not by the CrossCorrBinaryOutputEntry struct in CrossCorrToplist, h, confusingly
dataDTypes = np.dtype({'names':['freq','tp','argp','asini','ecc','period','estSens','evSquared','rho'],'formats':['f8','f8','f8','f8','f8','f8','f8','f8','f8']})
verboseData = np.loadtxt(verboseSummaryFile,dtype=dataDTypes)
fArray = verboseData['freq']
asiniArray = verboseData['asini']
pArray = verboseData['period']
if args.dFnotA:
dfArray = 2 * np.pi * fArray * asiniArray / pArray
modArray = dfArray
else:
modArray = asiniArray
tpArray = verboseData['tp']
RArray = verboseData['rho']
ESArray = verboseData['estSens']
# Note, we now have to deal with three-dimensionality
# For now, just make it two-dimensional with a command such as
# ./wrapCrossCorr.py --InjTpBand 0.001
# in ../example/
if (args.plot):
# The most confusing question is how to flip axes
# Reshaping, we realize quickly enough, depends on
# the layout of the data, which as noted elsewhere
# is listed f, t, a in the toplist
# We later remove one axes from this ordering
# Plotting (f, a), without t, the ordered entries
# are still in the right order (flipAxes = 0), but
# plotting (a, t), or (t, f), we have to reverse
# the order from the toplist (flipAxes = 1)
# Also, takeAxisIndex seems to run backwards from the
# (f, t, a) order: that is because a transpose is taken
# immediately after reshaping, yielding (a, t, f) order.
# We do this transpose to comply with pyplot.imshow
if args.TF:
xArray = tpArray
yArray = fArray
zArray = modArray
zIndex = args.AIndex
takeAxisIndex = 0
flipAxes = 1
elif args.AT:
xArray = modArray
yArray = tpArray
zArray = fArray
zIndex = args.FIndex
takeAxisIndex = 2
flipAxes = 1
elif args.FA:
xArray = fArray
yArray = modArray
zArray = tpArray
zIndex = args.TIndex
takeAxisIndex = 1
flipAxes = 0
else:
print 'Incompatible options'
# In the toplist, columns change from the right (big-endian)
# They are listed f, t, a,
# Python reads in entries in the order f0t0a0, f0t0a1, f0t1a0,...
FLen = len(np.unique(fArray))
TLen = len(np.unique(tpArray))
ALen = len(np.unique(modArray))
if args.dFnotA:
modLabel = 'Modulation depth: df (Hz)'
# This override is necessary because the
# df are, for it, all unique. However, this makes it tricky because
# the graph really is skewed
ALen = len(np.unique(asiniArray))
else:
modLabel = 'Projected semi-major axis (light-s)'
if args.TF:
print 'T-F plot'
figXLabel = 'Periapsis time: tp (s)'
figYLabel = 'Frequency: f (Hz)'
graphHead = 'TF'
elif args.AT:
print 'A-T plot'
figXLabel = modLabel
figYLabel = 'Periapsis time: tp (s)'
graphHead = 'AT'
elif args.FA:
print 'F-A plot'
figXLabel = 'Frequency: f (Hz)'
figYLabel = modLabel
graphHead = 'FA'
else:
print 'Incompatible options'
xShaped3D = np.reshape(xArray, (FLen, TLen, ALen)).T
yShaped3D = np.reshape(yArray, (FLen, TLen, ALen)).T
zShaped3D = np.reshape(zArray, (FLen, TLen, ALen)).T
print 'Number of bins in data arrays (F,T,A): ' + str(xShaped3D.T.shape)
ESShaped3D = np.reshape(ESArray, (FLen, TLen, ALen)).T
RShaped3D = np.reshape(RArray, (FLen, TLen, ALen)).T
# Reduce to 2D
xShaped = takeAxisAndRotate(xShaped3D, zIndex, takeAxisIndex, flipAxes)
yShaped = takeAxisAndRotate(yShaped3D, zIndex, takeAxisIndex, flipAxes)
ESShaped = takeAxisAndRotate(ESShaped3D, zIndex, takeAxisIndex, flipAxes)
RShaped = takeAxisAndRotate(RShaped3D, zIndex, takeAxisIndex, flipAxes)
#x, y = np.meshgrid(xShaped[0, :], yShaped[:, 0])
extensions = [xShaped[0, 0], xShaped[-1, -1], yShaped[0, 0], yShaped[-1, -1]]
ESCenter = ESShaped.max()
RCenter = RShaped.max()
centerString = 'maximum value: '
centerESSpotX = str(xShaped.compress((ESShaped == ESCenter).flat)[0])
centerESSpotY = str(yShaped.compress((ESShaped == ESCenter).flat)[0])
centerRSpotX = str(xShaped.compress((RShaped == RCenter).flat)[0])
centerRSpotY = str(yShaped.compress((RShaped == RCenter).flat)[0])
pulsarName = "band-" + str(args.fCenter)
#plotImshow(args, 'ES', 'estSens', xArray, yArray, ESShaped, xShaped, yShaped, graphHead, pulsarName, figXLabel, figYLabel, ESCenter, centerESSpotX, centerESSpotY, centerString, extensions)
plotImshow(args, 'R', 'Rho statistic', xArray, yArray, RShaped, xShaped, yShaped, graphHead, pulsarName, figXLabel, figYLabel, RCenter, centerRSpotX, centerRSpotY, centerString, extensions)
def plotImshow(args, graphKind, graphKindLong, xArray, yArray, shaped, xShaped, yShaped, graphHead, pulsarName, figXLabel, figYLabel, center, centerSpotX, centerSpotY, centerString, extensions):
fig = plt.figure(figsize=(12,12))
ax = fig.add_subplot(111)
if (args.threeD == True):
ax.plot(xArray,yArray)
else:
paramSpacePixelMap = ax.imshow(shaped, origin = 'lower', \
interpolation = 'nearest', extent = extensions, cmap = args.colorMap)
paramSpacePixelMap = fig.colorbar(paramSpacePixelMap, shrink = 0.5, extend = 'both')
#print 'Skipping grid lines'
ax.set_aspect('auto')
ax.set_xlabel(figXLabel)
ax.set_ylabel(figYLabel)
ax.set_title(' ' + graphKindLong + \
' vs parameters for band centered ' + str(args.fCenter) + ' Hz at ' + ' \n \
' + centerString + str(center) + ' at (x, y) = (' + centerSpotX +', ' + centerSpotY + ') \n \
Number of bins in data arrays (x, y): ' + str(xShaped.T.shape) + ' \n \
')
if args.texMode:
print 'SHOULD BE Skipping title to conform with journal style'
if args.texMode:
plt.savefig(graphHead + 'results' + graphKind + '-' + pulsarName + '.eps', format='eps', dpi=720, bbox_inches='tight')
plt.savefig(graphHead + 'results' + graphKind + '-' + pulsarName + '.png')
plt.savefig(graphHead + 'results' + graphKind + '-' + pulsarName + '.pdf')
plt.close()
plt.clf()
|
def takeAxisAndRotate(threeDarray, zIndex, takeAxisIndex, flipAxes):
firstTwoDarray = np.take(threeDarray, zIndex, takeAxisIndex)
if (flipAxes == 1):
twoDarray = firstTwoDarray.T
elif (flipAxes == 0):
|
random_line_split
|
|
createHeatmap.py
|
Search band, or standalone: make plots appropriate for a 5 Hz band')
parser.add_argument('--elsewhere', help='Path to directory containing the output files')
parser.add_argument('--massiveSummary',action='store_true', help='Deal with large output directories')
parser.add_argument('--plotSkyContour',type=float, help='Plot a circle around this point in the sky: two arguments, alpha and delta in radians', nargs=2)
parser.add_argument('--noiseTest',action='store_true', help='Make histrogram noise plots')
parser.add_argument('--templateSearch',action='store_true', help='Change commands in way needed for templateSearch')
parser.add_argument('--multiTemplateSearch',type=str, help='use instead of --templateSearch, specify number of bands in output directory')
parser.add_argument('--closed',action='store_true', help='Can be used, especially with --multiTemplateSearch, for working with closed pulsars')
parser.add_argument('--skipPlotH0',action='store_true',help='Use to skip plotting h0 in a given band, if desired for memory or speed.',default=False)
parser.add_argument('--colorMap',type=str,help='Colormap option for plots; jet is default for web, Greys better for papers',default='jet')
parser.add_argument('--texMode',action='store_true',help='Avoids plotting the head title, so as to fit many journal styles. Invokes the rc families for nice plots',default=False)
parser.add_argument('--plot',action='store_true',help='Generate the plots',default=True)
parser.add_argument('--FA',action='store_true', help='Plot the F-A plane', default=True)
parser.add_argument('--TF',action='store_true', help='Plot the T-F plane', default=False)
parser.add_argument('--AT',action='store_true', help='Plot the A-T plane', default=False)
parser.add_argument('--dFnotA',action='store_true', help='Plot dF instead of a sin i', default=False)
parser.add_argument('--FIndex',type=int, help='F index value to use for A-T plots', default=0)
parser.add_argument('--AIndex',type=int, help='A index value to use for T-F plots', default=0)
parser.add_argument('--TIndex',type=int, help='T index value to use for F-A plots', default=0)
parser.add_argument('--threeD', action='store_true', help='Attempt 3D plot -- currently useless', default=False)
args = parser.parse_args()
def
|
(args):
headJobName = args.nameToplist
verboseSummaryFile = 'verbose_summary-' + str(args.fCenter) +'.txt'
if args.bypassSummary:
print 'Bypassing summary file creation'
os.system('cat ' + headJobName + ' > ' + verboseSummaryFile)
# Defined by 'print_crossCorrBinaryline_to_str' in CrossCorrToplist.c
# Not by the CrossCorrBinaryOutputEntry struct in CrossCorrToplist, h, confusingly
dataDTypes = np.dtype({'names':['freq','tp','argp','asini','ecc','period','estSens','evSquared','rho'],'formats':['f8','f8','f8','f8','f8','f8','f8','f8','f8']})
verboseData = np.loadtxt(verboseSummaryFile,dtype=dataDTypes)
fArray = verboseData['freq']
asiniArray = verboseData['asini']
pArray = verboseData['period']
if args.dFnotA:
dfArray = 2 * np.pi * fArray * asiniArray / pArray
modArray = dfArray
else:
modArray = asiniArray
tpArray = verboseData['tp']
RArray = verboseData['rho']
ESArray = verboseData['estSens']
# Note, we now have to deal with three-dimensionality
# For now, just make it two-dimensional with a command such as
# ./wrapCrossCorr.py --InjTpBand 0.001
# in ../example/
if (args.plot):
# The most confusing question is how to flip axes
# Reshaping, we realize quickly enough, depends on
# the layout of the data, which as noted elsewhere
# is listed f, t, a in the toplist
# We later remove one axes from this ordering
# Plotting (f, a), without t, the ordered entries
# are still in the right order (flipAxes = 0), but
# plotting (a, t), or (t, f), we have to reverse
# the order from the toplist (flipAxes = 1)
# Also, takeAxisIndex seems to run backwards from the
# (f, t, a) order: that is because a transpose is taken
# immediately after reshaping, yielding (a, t, f) order.
# We do this transpose to comply with pyplot.imshow
if args.TF:
xArray = tpArray
yArray = fArray
zArray = modArray
zIndex = args.AIndex
takeAxisIndex = 0
flipAxes = 1
elif args.AT:
xArray = modArray
yArray = tpArray
zArray = fArray
zIndex = args.FIndex
takeAxisIndex = 2
flipAxes = 1
elif args.FA:
xArray = fArray
yArray = modArray
zArray = tpArray
zIndex = args.TIndex
takeAxisIndex = 1
flipAxes = 0
else:
print 'Incompatible options'
# In the toplist, columns change from the right (big-endian)
# They are listed f, t, a,
# Python reads in entries in the order f0t0a0, f0t0a1, f0t1a0,...
FLen = len(np.unique(fArray))
TLen = len(np.unique(tpArray))
ALen = len(np.unique(modArray))
if args.dFnotA:
modLabel = 'Modulation depth: df (Hz)'
# This override is necessary because the
# df are, for it, all unique. However, this makes it tricky because
# the graph really is skewed
ALen = len(np.unique(asiniArray))
else:
modLabel = 'Projected semi-major axis (light-s)'
if args.TF:
print 'T-F plot'
figXLabel = 'Periapsis time: tp (s)'
figYLabel = 'Frequency: f (Hz)'
graphHead = 'TF'
elif args.AT:
print 'A-T plot'
figXLabel = modLabel
figYLabel = 'Periapsis time: tp (s)'
graphHead = 'AT'
elif args.FA:
print 'F-A plot'
figXLabel = 'Frequency: f (Hz)'
figYLabel = modLabel
graphHead = 'FA'
else:
print 'Incompatible options'
xShaped3D = np.reshape(xArray, (FLen, TLen, ALen)).T
yShaped3D = np.reshape(yArray, (FLen, TLen, ALen)).T
zShaped3D = np.reshape(zArray, (FLen, TLen, ALen)).T
print 'Number of bins in data arrays (F,T,A): ' + str(xShaped3D.T.shape)
ESShaped3D = np.reshape(ESArray, (FLen, TLen, ALen)).T
RShaped3D = np.reshape(RArray, (FLen, TLen, ALen)).T
# Reduce to 2D
xShaped = takeAxisAndRotate(xShaped3D, zIndex, takeAxisIndex, flipAxes)
yShaped = takeAxisAndRotate(yShaped3D, zIndex, takeAxisIndex, flipAxes)
ESShaped = takeAxisAndRotate(ESShaped3D, zIndex, takeAxisIndex, flipAxes)
RShaped = takeAxisAndRotate(RShaped3D, zIndex, takeAxisIndex, flipAxes)
#x, y = np.meshgrid(xShaped[0, :], yShaped[:, 0])
extensions = [xShaped[0, 0], xShaped[-1, -1], yShaped[0, 0], yShaped[-1, -1]]
ESCenter = ESShaped.max()
RCenter = RShaped.max()
centerString = 'maximum value: '
centerESSpotX = str(xShaped.compress((ESShaped == ESCenter).flat)[0])
centerESSpotY = str(yShaped.compress((ESShaped == ESCenter).flat)[0])
centerRSpotX = str(xShaped.compress((RShaped == RCenter).flat)[0])
centerRSpotY = str(yShaped.compress((RShaped == RCenter).flat)[0])
pulsarName = "band-" + str(args.fCenter)
#plotImshow(args, 'ES', 'estSens', xArray, yArray, ESShaped, xShaped, y
|
summarizer
|
identifier_name
|
createHeatmap.py
|
# Defined by 'print_crossCorrBinaryline_to_str' in CrossCorrToplist.c
# Not by the CrossCorrBinaryOutputEntry struct in CrossCorrToplist, h, confusingly
dataDTypes = np.dtype({'names':['freq','tp','argp','asini','ecc','period','estSens','evSquared','rho'],'formats':['f8','f8','f8','f8','f8','f8','f8','f8','f8']})
verboseData = np.loadtxt(verboseSummaryFile,dtype=dataDTypes)
fArray = verboseData['freq']
asiniArray = verboseData['asini']
pArray = verboseData['period']
if args.dFnotA:
dfArray = 2 * np.pi * fArray * asiniArray / pArray
modArray = dfArray
else:
modArray = asiniArray
tpArray = verboseData['tp']
RArray = verboseData['rho']
ESArray = verboseData['estSens']
# Note, we now have to deal with three-dimensionality
# For now, just make it two-dimensional with a command such as
# ./wrapCrossCorr.py --InjTpBand 0.001
# in ../example/
if (args.plot):
# The most confusing question is how to flip axes
# Reshaping, we realize quickly enough, depends on
# the layout of the data, which as noted elsewhere
# is listed f, t, a in the toplist
# We later remove one axes from this ordering
# Plotting (f, a), without t, the ordered entries
# are still in the right order (flipAxes = 0), but
# plotting (a, t), or (t, f), we have to reverse
# the order from the toplist (flipAxes = 1)
# Also, takeAxisIndex seems to run backwards from the
# (f, t, a) order: that is because a transpose is taken
# immediately after reshaping, yielding (a, t, f) order.
# We do this transpose to comply with pyplot.imshow
if args.TF:
xArray = tpArray
yArray = fArray
zArray = modArray
zIndex = args.AIndex
takeAxisIndex = 0
flipAxes = 1
elif args.AT:
xArray = modArray
yArray = tpArray
zArray = fArray
zIndex = args.FIndex
takeAxisIndex = 2
flipAxes = 1
elif args.FA:
xArray = fArray
yArray = modArray
zArray = tpArray
zIndex = args.TIndex
takeAxisIndex = 1
flipAxes = 0
else:
print 'Incompatible options'
# In the toplist, columns change from the right (big-endian)
# They are listed f, t, a,
# Python reads in entries in the order f0t0a0, f0t0a1, f0t1a0,...
FLen = len(np.unique(fArray))
TLen = len(np.unique(tpArray))
ALen = len(np.unique(modArray))
if args.dFnotA:
modLabel = 'Modulation depth: df (Hz)'
# This override is necessary because the
# df are, for it, all unique. However, this makes it tricky because
# the graph really is skewed
ALen = len(np.unique(asiniArray))
else:
modLabel = 'Projected semi-major axis (light-s)'
if args.TF:
print 'T-F plot'
figXLabel = 'Periapsis time: tp (s)'
figYLabel = 'Frequency: f (Hz)'
graphHead = 'TF'
elif args.AT:
print 'A-T plot'
figXLabel = modLabel
figYLabel = 'Periapsis time: tp (s)'
graphHead = 'AT'
elif args.FA:
print 'F-A plot'
figXLabel = 'Frequency: f (Hz)'
figYLabel = modLabel
graphHead = 'FA'
else:
print 'Incompatible options'
xShaped3D = np.reshape(xArray, (FLen, TLen, ALen)).T
yShaped3D = np.reshape(yArray, (FLen, TLen, ALen)).T
zShaped3D = np.reshape(zArray, (FLen, TLen, ALen)).T
print 'Number of bins in data arrays (F,T,A): ' + str(xShaped3D.T.shape)
ESShaped3D = np.reshape(ESArray, (FLen, TLen, ALen)).T
RShaped3D = np.reshape(RArray, (FLen, TLen, ALen)).T
# Reduce to 2D
xShaped = takeAxisAndRotate(xShaped3D, zIndex, takeAxisIndex, flipAxes)
yShaped = takeAxisAndRotate(yShaped3D, zIndex, takeAxisIndex, flipAxes)
ESShaped = takeAxisAndRotate(ESShaped3D, zIndex, takeAxisIndex, flipAxes)
RShaped = takeAxisAndRotate(RShaped3D, zIndex, takeAxisIndex, flipAxes)
#x, y = np.meshgrid(xShaped[0, :], yShaped[:, 0])
extensions = [xShaped[0, 0], xShaped[-1, -1], yShaped[0, 0], yShaped[-1, -1]]
ESCenter = ESShaped.max()
RCenter = RShaped.max()
centerString = 'maximum value: '
centerESSpotX = str(xShaped.compress((ESShaped == ESCenter).flat)[0])
centerESSpotY = str(yShaped.compress((ESShaped == ESCenter).flat)[0])
centerRSpotX = str(xShaped.compress((RShaped == RCenter).flat)[0])
centerRSpotY = str(yShaped.compress((RShaped == RCenter).flat)[0])
pulsarName = "band-" + str(args.fCenter)
#plotImshow(args, 'ES', 'estSens', xArray, yArray, ESShaped, xShaped, yShaped, graphHead, pulsarName, figXLabel, figYLabel, ESCenter, centerESSpotX, centerESSpotY, centerString, extensions)
plotImshow(args, 'R', 'Rho statistic', xArray, yArray, RShaped, xShaped, yShaped, graphHead, pulsarName, figXLabel, figYLabel, RCenter, centerRSpotX, centerRSpotY, centerString, extensions)
def plotImshow(args, graphKind, graphKindLong, xArray, yArray, shaped, xShaped, yShaped, graphHead, pulsarName, figXLabel, figYLabel, center, centerSpotX, centerSpotY, centerString, extensions):
fig = plt.figure(figsize=(12,12))
ax = fig.add_subplot(111)
if (args.threeD == True):
ax.plot(xArray,yArray)
else:
paramSpacePixelMap = ax.imshow(shaped, origin = 'lower', \
interpolation = 'nearest', extent = extensions, cmap = args.colorMap)
paramSpacePixelMap = fig.colorbar(paramSpacePixelMap, shrink = 0.5, extend = 'both')
#print 'Skipping grid lines'
ax.set_aspect('auto')
ax.set_xlabel(figXLabel)
ax.set_ylabel(figYLabel)
ax.set_title(' ' + graphKindLong + \
' vs parameters for band centered ' + str(args.fCenter) + ' Hz at ' + ' \n \
' + centerString + str(center) + ' at (x, y) = (' + centerSpotX +', ' + centerSpotY + ') \n \
Number of bins in data arrays (x, y): ' + str(xShaped.T.shape) + ' \n \
')
if args.texMode:
print 'SHOULD BE Skipping title to conform with journal style'
if args.texMode:
plt.savefig(graphHead + 'results' + graphKind + '-' + pulsarName + '.eps', format='eps', dpi=720, bbox_inches='tight')
plt.savefig(graphHead + 'results' + graphKind + '-' + pulsarName + '.png')
plt.savefig(graphHead + 'results' + graphKind + '-' + pulsarName + '.pdf')
plt.close()
plt.clf()
def takeAxisAndRotate(threeDarray, zIndex, takeAxisIndex, flipAxes):
|
firstTwoDarray = np.take(threeDarray, zIndex, takeAxisIndex)
if (flipAxes == 1):
twoDarray = firstTwoDarray.T
elif (flipAxes == 0):
twoDarray = firstTwoDarray
else:
'flipAxes not configured correctly'
return twoDarray
|
identifier_body
|
|
createHeatmap.py
|
templateSearch band, or standalone: make plots appropriate for a 5 Hz band')
parser.add_argument('--elsewhere', help='Path to directory containing the output files')
parser.add_argument('--massiveSummary',action='store_true', help='Deal with large output directories')
parser.add_argument('--plotSkyContour',type=float, help='Plot a circle around this point in the sky: two arguments, alpha and delta in radians', nargs=2)
parser.add_argument('--noiseTest',action='store_true', help='Make histrogram noise plots')
parser.add_argument('--templateSearch',action='store_true', help='Change commands in way needed for templateSearch')
parser.add_argument('--multiTemplateSearch',type=str, help='use instead of --templateSearch, specify number of bands in output directory')
parser.add_argument('--closed',action='store_true', help='Can be used, especially with --multiTemplateSearch, for working with closed pulsars')
parser.add_argument('--skipPlotH0',action='store_true',help='Use to skip plotting h0 in a given band, if desired for memory or speed.',default=False)
parser.add_argument('--colorMap',type=str,help='Colormap option for plots; jet is default for web, Greys better for papers',default='jet')
parser.add_argument('--texMode',action='store_true',help='Avoids plotting the head title, so as to fit many journal styles. Invokes the rc families for nice plots',default=False)
parser.add_argument('--plot',action='store_true',help='Generate the plots',default=True)
parser.add_argument('--FA',action='store_true', help='Plot the F-A plane', default=True)
parser.add_argument('--TF',action='store_true', help='Plot the T-F plane', default=False)
parser.add_argument('--AT',action='store_true', help='Plot the A-T plane', default=False)
parser.add_argument('--dFnotA',action='store_true', help='Plot dF instead of a sin i', default=False)
parser.add_argument('--FIndex',type=int, help='F index value to use for A-T plots', default=0)
parser.add_argument('--AIndex',type=int, help='A index value to use for T-F plots', default=0)
parser.add_argument('--TIndex',type=int, help='T index value to use for F-A plots', default=0)
parser.add_argument('--threeD', action='store_true', help='Attempt 3D plot -- currently useless', default=False)
args = parser.parse_args()
def summarizer(args):
headJobName = args.nameToplist
verboseSummaryFile = 'verbose_summary-' + str(args.fCenter) +'.txt'
if args.bypassSummary:
print 'Bypassing summary file creation'
os.system('cat ' + headJobName + ' > ' + verboseSummaryFile)
# Defined by 'print_crossCorrBinaryline_to_str' in CrossCorrToplist.c
# Not by the CrossCorrBinaryOutputEntry struct in CrossCorrToplist, h, confusingly
dataDTypes = np.dtype({'names':['freq','tp','argp','asini','ecc','period','estSens','evSquared','rho'],'formats':['f8','f8','f8','f8','f8','f8','f8','f8','f8']})
verboseData = np.loadtxt(verboseSummaryFile,dtype=dataDTypes)
fArray = verboseData['freq']
asiniArray = verboseData['asini']
pArray = verboseData['period']
if args.dFnotA:
dfArray = 2 * np.pi * fArray * asiniArray / pArray
modArray = dfArray
else:
modArray = asiniArray
tpArray = verboseData['tp']
RArray = verboseData['rho']
ESArray = verboseData['estSens']
# Note, we now have to deal with three-dimensionality
# For now, just make it two-dimensional with a command such as
# ./wrapCrossCorr.py --InjTpBand 0.001
# in ../example/
if (args.plot):
# The most confusing question is how to flip axes
# Reshaping, we realize quickly enough, depends on
# the layout of the data, which as noted elsewhere
# is listed f, t, a in the toplist
# We later remove one axes from this ordering
# Plotting (f, a), without t, the ordered entries
# are still in the right order (flipAxes = 0), but
# plotting (a, t), or (t, f), we have to reverse
# the order from the toplist (flipAxes = 1)
# Also, takeAxisIndex seems to run backwards from the
# (f, t, a) order: that is because a transpose is taken
# immediately after reshaping, yielding (a, t, f) order.
# We do this transpose to comply with pyplot.imshow
if args.TF:
xArray = tpArray
yArray = fArray
zArray = modArray
zIndex = args.AIndex
takeAxisIndex = 0
flipAxes = 1
elif args.AT:
xArray = modArray
yArray = tpArray
zArray = fArray
zIndex = args.FIndex
takeAxisIndex = 2
flipAxes = 1
elif args.FA:
|
else:
print 'Incompatible options'
# In the toplist, columns change from the right (big-endian)
# They are listed f, t, a,
# Python reads in entries in the order f0t0a0, f0t0a1, f0t1a0,...
FLen = len(np.unique(fArray))
TLen = len(np.unique(tpArray))
ALen = len(np.unique(modArray))
if args.dFnotA:
modLabel = 'Modulation depth: df (Hz)'
# This override is necessary because the
# df are, for it, all unique. However, this makes it tricky because
# the graph really is skewed
ALen = len(np.unique(asiniArray))
else:
modLabel = 'Projected semi-major axis (light-s)'
if args.TF:
print 'T-F plot'
figXLabel = 'Periapsis time: tp (s)'
figYLabel = 'Frequency: f (Hz)'
graphHead = 'TF'
elif args.AT:
print 'A-T plot'
figXLabel = modLabel
figYLabel = 'Periapsis time: tp (s)'
graphHead = 'AT'
elif args.FA:
print 'F-A plot'
figXLabel = 'Frequency: f (Hz)'
figYLabel = modLabel
graphHead = 'FA'
else:
print 'Incompatible options'
xShaped3D = np.reshape(xArray, (FLen, TLen, ALen)).T
yShaped3D = np.reshape(yArray, (FLen, TLen, ALen)).T
zShaped3D = np.reshape(zArray, (FLen, TLen, ALen)).T
print 'Number of bins in data arrays (F,T,A): ' + str(xShaped3D.T.shape)
ESShaped3D = np.reshape(ESArray, (FLen, TLen, ALen)).T
RShaped3D = np.reshape(RArray, (FLen, TLen, ALen)).T
# Reduce to 2D
xShaped = takeAxisAndRotate(xShaped3D, zIndex, takeAxisIndex, flipAxes)
yShaped = takeAxisAndRotate(yShaped3D, zIndex, takeAxisIndex, flipAxes)
ESShaped = takeAxisAndRotate(ESShaped3D, zIndex, takeAxisIndex, flipAxes)
RShaped = takeAxisAndRotate(RShaped3D, zIndex, takeAxisIndex, flipAxes)
#x, y = np.meshgrid(xShaped[0, :], yShaped[:, 0])
extensions = [xShaped[0, 0], xShaped[-1, -1], yShaped[0, 0], yShaped[-1, -1]]
ESCenter = ESShaped.max()
RCenter = RShaped.max()
centerString = 'maximum value: '
centerESSpotX = str(xShaped.compress((ESShaped == ESCenter).flat)[0])
centerESSpotY = str(yShaped.compress((ESShaped == ESCenter).flat)[0])
centerRSpotX = str(xShaped.compress((RShaped == RCenter).flat)[0])
centerRSpotY = str(yShaped.compress((RShaped == RCenter).flat)[0])
pulsarName = "band-" + str(args.fCenter)
#plotImshow(args, 'ES', 'estSens', xArray, yArray, ESShaped, xShaped, ySh
|
xArray = fArray
yArray = modArray
zArray = tpArray
zIndex = args.TIndex
takeAxisIndex = 1
flipAxes = 0
|
conditional_block
|
entitymap.go
|
r err os.Error
var num int
entity = entity[2 : len(entity)-1]
if num, err = strconv.Atoi(entity); err != nil {
return "&#" + entity + ";"
}
var arr [4]byte
if size := utf8.EncodeRune(arr[:], num); size == 0 {
return "&#" + entity + ";"
}
return string(arr[:])
}
// Converts a single Go utf8-token to a Html entity.
func Utf8ToEntity(entity string) string {
if rune, size := utf8.DecodeRuneInString(entity); size != 0 {
return fmt.Sprintf("&#%d;", rune)
}
return entity
}
/*
http://www.w3.org/TR/html4/sgml/entities.html
Portions © International Organization for Standardization 1986
Permission to copy in any form is granted for use with
conforming SGML systems and applications as defined in
ISO 8879, provided this notice is included in all copies.
Fills the supplied map with html entities mapped to their Go utf8
equivalents. This map can be assigned to xml.Parser.Entity
It will be used to map non-standard xml entities to a proper value.
If the parser encounters any unknown entities, it will throw a syntax
error and abort the parsing. Hence the ability to supply this map.
*/
func loadNonStandardEntities(em map[string]string) {
em["pi"] = "\u03c0"
em["nabla"] = "\u2207"
em["isin"] = "\u2208"
em["loz"] = "\u25ca"
em["prop"] = "\u221d"
em["para"] = "\u00b6"
em["Aring"] = "\u00c5"
em["euro"] = "\u20ac"
em["sup3"] = "\u00b3"
em["sup2"] = "\u00b2"
em["sup1"] = "\u00b9"
em["prod"] = "\u220f"
em["gamma"] = "\u03b3"
em["perp"] = "\u22a5"
em["lfloor"] = "\u230a"
em["fnof"] = "\u0192"
em["frasl"] = "\u2044"
em["rlm"] = "\u200f"
em["omega"] = "\u03c9"
em["part"] = "\u2202"
em["euml"] = "\u00eb"
em["Kappa"] = "\u039a"
em["nbsp"] = "\u00a0"
em["Eacute"] = "\u00c9"
em["brvbar"] = "\u00a6"
em["otimes"] = "\u2297"
em["ndash"] = "\u2013"
em["thinsp"] = "\u2009"
em["nu"] = "\u03bd"
em["Upsilon"] = "\u03a5"
em["upsih"] = "\u03d2"
em["raquo"] = "\u00bb"
em["yacute"] = "\u00fd"
em["delta"] = "\u03b4"
em["eth"] = "\u00f0"
em["supe"] = "\u2287"
em["ne"] = "\u2260"
em["ni"] = "\u220b"
em["eta"] = "\u03b7"
em["uArr"] = "\u21d1"
em["image"] = "\u2111"
em["asymp"] = "\u2248"
em["oacute"] = "\u00f3"
em["rarr"] = "\u2192"
em["emsp"] = "\u2003"
em["acirc"] = "\u00e2"
em["shy"] = "\u00ad"
em["yuml"] = "\u00ff"
em["acute"] = "\u00b4"
em["int"] = "\u222b"
em["ccedil"] = "\u00e7"
em["Acirc"] = "\u00c2"
em["Ograve"] = "\u00d2"
em["times"] = "\u00d7"
em["weierp"] = "\u2118"
em["Tau"] = "\u03a4"
em["omicron"] = "\u03bf"
em["lt"] = "\u003c"
em["Mu"] = "\u039c"
em["Ucirc"] = "\u00db"
em["sub"] = "\u2282"
em["le"] = "\u2264"
em["sum"] = "\u2211"
em["sup"] = "\u2283"
em["lrm"] = "\u200e"
em["frac34"] = "\u00be"
em["Iota"] = "\u0399"
em["Ugrave"] = "\u00d9"
em["THORN"] = "\u00de"
em["rsaquo"] = "\u203a"
em["not"] = "\u00ac"
em["sigma"] = "\u03c3"
em["iuml"] = "\u00ef"
em["epsilon"] = "\u03b5"
em["spades"] = "\u2660"
em["theta"] = "\u03b8"
em["divide"] = "\u00f7"
em["Atilde"] = "\u00c3"
em["uacute"] = "\u00fa"
em["Rho"] = "\u03a1"
em["trade"] = "\u2122"
em["chi"] = "\u03c7"
em["agrave"] = "\u00e0"
em["or"] = "\u2228"
em["circ"] = "\u02c6"
em["middot"] = "\u00b7"
em["plusmn"] = "\u00b1"
em["aring"] = "\u00e5"
em["lsquo"] = "\u2018"
em["Yacute"] = "\u00dd"
em["oline"] = "\u203e"
em["copy"] = "\u00a9"
em["icirc"] = "\u00ee"
em["lowast"] = "\u2217"
em["Oacute"] = "\u00d3"
em["aacute"] = "\u00e1"
em["oplus"] = "\u2295"
em["crarr"] = "\u21b5"
em["thetasym"] = "\u03d1"
em["Beta"] = "\u0392"
em["laquo"] = "\u00ab"
em["rang"] = "\u232a"
em["tilde"] = "\u02dc"
em["Uuml"] = "\u00dc"
em["zwj"] = "\u200d"
em["mu"] = "\u03bc"
em["Ccedil"] = "\u00c7"
em["infin"] = "\u221e"
em["ouml"] = "\u00f6"
em["rfloor"] = "\u230b"
em["pound"] = "\u00a3"
em["szlig"] = "\u00df"
em["thorn"] = "\u00fe"
em["forall"] = "\u2200"
em["piv"] = "\u03d6"
em["rdquo"] = "\u201d"
em["frac12"] = "\u00bd"
em["frac14"] = "\u00bc"
em["Ocirc"] = "\u00d4"
em["Ecirc"] = "\u00ca"
em["kappa"] = "\u03ba"
em["Euml"] = "\u00cb"
em["minus"] = "\u2212"
em["cong"] = "\u2245"
em["hellip"] = "\u2026"
em["equiv"] = "\u2261"
em["cent"] = "\u00a2"
em["Uacute"] = "\u00da"
em["darr"] = "\u2193"
em["Eta"] = "\u0397"
em["sbquo"] = "\u201a"
em["rArr"] = "\u21d2"
em["igrave"] = "\u00ec"
em["uml"] = "\u00a8"
em["lambda"] = "\u03bb"
em["oelig"] = "\u0153"
em["harr"] = "\u2194"
em["ang"] = "\u2220"
em
|
eturn "&" + entity[2:len(entity)-1] + ";"
}
va
|
conditional_block
|
|
entitymap.go
|
em["euml"] = "\u00eb"
em["Kappa"] = "\u039a"
em["nbsp"] = "\u00a0"
em["Eacute"] = "\u00c9"
em["brvbar"] = "\u00a6"
em["otimes"] = "\u2297"
em["ndash"] = "\u2013"
em["thinsp"] = "\u2009"
em["nu"] = "\u03bd"
em["Upsilon"] = "\u03a5"
em["upsih"] = "\u03d2"
em["raquo"] = "\u00bb"
em["yacute"] = "\u00fd"
em["delta"] = "\u03b4"
em["eth"] = "\u00f0"
em["supe"] = "\u2287"
em["ne"] = "\u2260"
em["ni"] = "\u220b"
em["eta"] = "\u03b7"
em["uArr"] = "\u21d1"
em["image"] = "\u2111"
em["asymp"] = "\u2248"
em["oacute"] = "\u00f3"
em["rarr"] = "\u2192"
em["emsp"] = "\u2003"
em["acirc"] = "\u00e2"
em["shy"] = "\u00ad"
em["yuml"] = "\u00ff"
em["acute"] = "\u00b4"
em["int"] = "\u222b"
em["ccedil"] = "\u00e7"
em["Acirc"] = "\u00c2"
em["Ograve"] = "\u00d2"
em["times"] = "\u00d7"
em["weierp"] = "\u2118"
em["Tau"] = "\u03a4"
em["omicron"] = "\u03bf"
em["lt"] = "\u003c"
em["Mu"] = "\u039c"
em["Ucirc"] = "\u00db"
em["sub"] = "\u2282"
em["le"] = "\u2264"
em["sum"] = "\u2211"
em["sup"] = "\u2283"
em["lrm"] = "\u200e"
em["frac34"] = "\u00be"
em["Iota"] = "\u0399"
em["Ugrave"] = "\u00d9"
em["THORN"] = "\u00de"
em["rsaquo"] = "\u203a"
em["not"] = "\u00ac"
em["sigma"] = "\u03c3"
em["iuml"] = "\u00ef"
em["epsilon"] = "\u03b5"
em["spades"] = "\u2660"
em["theta"] = "\u03b8"
em["divide"] = "\u00f7"
em["Atilde"] = "\u00c3"
em["uacute"] = "\u00fa"
em["Rho"] = "\u03a1"
em["trade"] = "\u2122"
em["chi"] = "\u03c7"
em["agrave"] = "\u00e0"
em["or"] = "\u2228"
em["circ"] = "\u02c6"
em["middot"] = "\u00b7"
em["plusmn"] = "\u00b1"
em["aring"] = "\u00e5"
em["lsquo"] = "\u2018"
em["Yacute"] = "\u00dd"
em["oline"] = "\u203e"
em["copy"] = "\u00a9"
em["icirc"] = "\u00ee"
em["lowast"] = "\u2217"
em["Oacute"] = "\u00d3"
em["aacute"] = "\u00e1"
em["oplus"] = "\u2295"
em["crarr"] = "\u21b5"
em["thetasym"] = "\u03d1"
em["Beta"] = "\u0392"
em["laquo"] = "\u00ab"
em["rang"] = "\u232a"
em["tilde"] = "\u02dc"
em["Uuml"] = "\u00dc"
em["zwj"] = "\u200d"
em["mu"] = "\u03bc"
em["Ccedil"] = "\u00c7"
em["infin"] = "\u221e"
em["ouml"] = "\u00f6"
em["rfloor"] = "\u230b"
em["pound"] = "\u00a3"
em["szlig"] = "\u00df"
em["thorn"] = "\u00fe"
em["forall"] = "\u2200"
em["piv"] = "\u03d6"
em["rdquo"] = "\u201d"
em["frac12"] = "\u00bd"
em["frac14"] = "\u00bc"
em["Ocirc"] = "\u00d4"
em["Ecirc"] = "\u00ca"
em["kappa"] = "\u03ba"
em["Euml"] = "\u00cb"
em["minus"] = "\u2212"
em["cong"] = "\u2245"
em["hellip"] = "\u2026"
em["equiv"] = "\u2261"
em["cent"] = "\u00a2"
em["Uacute"] = "\u00da"
em["darr"] = "\u2193"
em["Eta"] = "\u0397"
em["sbquo"] = "\u201a"
em["rArr"] = "\u21d2"
em["igrave"] = "\u00ec"
em["uml"] = "\u00a8"
em["lambda"] = "\u03bb"
em["oelig"] = "\u0153"
em["harr"] = "\u2194"
em["ang"] = "\u2220"
em["clubs"] = "\u2663"
em["and"] = "\u2227"
em["permil"] = "\u2030"
em["larr"] = "\u2190"
em["Yuml"] = "\u0178"
em["cup"] = "\u222a"
em["Xi"] = "\u039e"
em["Alpha"] = "\u0391"
em["phi"] = "\u03c6"
em["ucirc"] = "\u00fb"
em["oslash"] = "\u00f8"
em["rsquo"] = "\u2019"
em["AElig"] = "\u00c6"
em["mdash"] = "\u2014"
em["psi"] = "\u03c8"
em["eacute"] = "\u00e9"
em["otilde"] = "\u00f5"
em["yen"] = "\u00a5"
em["gt"] = "\u003e"
em["Iuml"] = "\u00cf"
em["Prime"] = "\u2033"
em["Chi"] = "\u03a7"
em["ge"] = "\u2265"
em["reg"] = "\u00ae"
em["hearts"] = "\u2665"
em["auml"] = "\u00e4"
em["
|
"pi"] = "\u03c0"
em["nabla"] = "\u2207"
em["isin"] = "\u2208"
em["loz"] = "\u25ca"
em["prop"] = "\u221d"
em["para"] = "\u00b6"
em["Aring"] = "\u00c5"
em["euro"] = "\u20ac"
em["sup3"] = "\u00b3"
em["sup2"] = "\u00b2"
em["sup1"] = "\u00b9"
em["prod"] = "\u220f"
em["gamma"] = "\u03b3"
em["perp"] = "\u22a5"
em["lfloor"] = "\u230a"
em["fnof"] = "\u0192"
em["frasl"] = "\u2044"
em["rlm"] = "\u200f"
em["omega"] = "\u03c9"
em["part"] = "\u2202"
|
identifier_body
|
|
entitymap.go
|
ty string) string {
var ok bool
if ok = reg_entnamed.MatchString(entity); ok {
return namedEntityToUtf8(entity[1 : len(entity)-1])
}
if ok = reg_entnumeric.MatchString(entity); !ok {
return "&" + entity[2:len(entity)-1] + ";"
}
var err os.Error
var num int
entity = entity[2 : len(entity)-1]
if num, err = strconv.Atoi(entity); err != nil {
return "&#" + entity + ";"
}
var arr [4]byte
if size := utf8.EncodeRune(arr[:], num); size == 0 {
return "&#" + entity + ";"
}
return string(arr[:])
}
// Converts a single Go utf8-token to a Html entity.
func Utf8ToEntity(entity string) string {
if rune, size := utf8.DecodeRuneInString(entity); size != 0 {
return fmt.Sprintf("&#%d;", rune)
}
return entity
}
/*
http://www.w3.org/TR/html4/sgml/entities.html
Portions © International Organization for Standardization 1986
Permission to copy in any form is granted for use with
conforming SGML systems and applications as defined in
ISO 8879, provided this notice is included in all copies.
Fills the supplied map with html entities mapped to their Go utf8
equivalents. This map can be assigned to xml.Parser.Entity
It will be used to map non-standard xml entities to a proper value.
If the parser encounters any unknown entities, it will throw a syntax
error and abort the parsing. Hence the ability to supply this map.
*/
func loadNonStandardEntities(em map[string]string) {
em["pi"] = "\u03c0"
em["nabla"] = "\u2207"
em["isin"] = "\u2208"
em["loz"] = "\u25ca"
em["prop"] = "\u221d"
em["para"] = "\u00b6"
em["Aring"] = "\u00c5"
em["euro"] = "\u20ac"
em["sup3"] = "\u00b3"
em["sup2"] = "\u00b2"
em["sup1"] = "\u00b9"
em["prod"] = "\u220f"
em["gamma"] = "\u03b3"
em["perp"] = "\u22a5"
em["lfloor"] = "\u230a"
em["fnof"] = "\u0192"
em["frasl"] = "\u2044"
em["rlm"] = "\u200f"
em["omega"] = "\u03c9"
em["part"] = "\u2202"
em["euml"] = "\u00eb"
em["Kappa"] = "\u039a"
em["nbsp"] = "\u00a0"
em["Eacute"] = "\u00c9"
em["brvbar"] = "\u00a6"
em["otimes"] = "\u2297"
em["ndash"] = "\u2013"
em["thinsp"] = "\u2009"
em["nu"] = "\u03bd"
em["Upsilon"] = "\u03a5"
em["upsih"] = "\u03d2"
em["raquo"] = "\u00bb"
em["yacute"] = "\u00fd"
em["delta"] = "\u03b4"
em["eth"] = "\u00f0"
em["supe"] = "\u2287"
em["ne"] = "\u2260"
em["ni"] = "\u220b"
em["eta"] = "\u03b7"
em["uArr"] = "\u21d1"
em["image"] = "\u2111"
em["asymp"] = "\u2248"
em["oacute"] = "\u00f3"
em["rarr"] = "\u2192"
em["emsp"] = "\u2003"
em["acirc"] = "\u00e2"
em["shy"] = "\u00ad"
em["yuml"] = "\u00ff"
em["acute"] = "\u00b4"
em["int"] = "\u222b"
em["ccedil"] = "\u00e7"
em["Acirc"] = "\u00c2"
em["Ograve"] = "\u00d2"
em["times"] = "\u00d7"
em["weierp"] = "\u2118"
em["Tau"] = "\u03a4"
em["omicron"] = "\u03bf"
em["lt"] = "\u003c"
em["Mu"] = "\u039c"
em["Ucirc"] = "\u00db"
em["sub"] = "\u2282"
em["le"] = "\u2264"
em["sum"] = "\u2211"
em["sup"] = "\u2283"
em["lrm"] = "\u200e"
em["frac34"] = "\u00be"
em["Iota"] = "\u0399"
em["Ugrave"] = "\u00d9"
em["THORN"] = "\u00de"
em["rsaquo"] = "\u203a"
em["not"] = "\u00ac"
em["sigma"] = "\u03c3"
em["iuml"] = "\u00ef"
em["epsilon"] = "\u03b5"
em["spades"] = "\u2660"
em["theta"] = "\u03b8"
em["divide"] = "\u00f7"
em["Atilde"] = "\u00c3"
em["uacute"] = "\u00fa"
em["Rho"] = "\u03a1"
em["trade"] = "\u2122"
em["chi"] = "\u03c7"
em["agrave"] = "\u00e0"
em["or"] = "\u2228"
em["circ"] = "\u02c6"
em["middot"] = "\u00b7"
em["plusmn"] = "\u00b1"
em["aring"] = "\u00e5"
em["lsquo"] = "\u2018"
em["Yacute"] = "\u00dd"
em["oline"] = "\u203e"
em["copy"] = "\u00a9"
em["icirc"] = "\u00ee"
em["lowast"] = "\u2217"
em["Oacute"] = "\u00d3"
em["aacute"] = "\u00e1"
em["oplus"] = "\u2295"
em["crarr"] = "\u21b5"
em["thetasym"] = "\u03d1"
em["Beta"] = "\u0392"
em["laquo"] = "\u00ab"
em["rang"] = "\u232a"
em["tilde"] = "\u02dc"
em["Uuml"] = "\u00dc"
em["zwj"] = "\u200d"
em["mu"] = "\u03bc"
em["Ccedil"] = "\u00c7"
em["infin"] = "\u221e"
em["ouml"] = "\u00f6"
em["rfloor"] = "\u230b"
em["pound"] = "\u00a3"
em["szlig"] = "\u00df"
em["thorn"] = "\u00fe"
em["forall"] = "\u2200"
em["piv"] = "\u03d6"
em["rdquo"] = "\u201d"
em["frac12"] = "\u00bd"
em["frac14"] = "\u00bc"
em["Ocirc"] = "\u00d4"
em["Ecirc"] = "\u00ca"
em["kappa"] = "\u03ba"
em["Euml"] = "\u00cb"
em["minus"] = "\u2212"
em["cong"] = "\u2245"
em["hellip"] = "\u2026"
em["equiv"] = "\u2261"
em["cent"] = "\u00a2"
em["Uacute"] = "\u00da"
em["darr"] = "\u2193"
em["Eta"] = "\u0397"
em["sbquo"] = "\u201a"
em["rArr"] = "\u21d2"
em["igrave"] = "\u00ec"
em["uml"]
|
yToUtf8(enti
|
identifier_name
|
|
entitymap.go
|
135"
em["notin"] = "\u2209"
em["Pi"] = "\u03a0"
em["sdot"] = "\u22c5"
em["upsilon"] = "\u03c5"
em["iota"] = "\u03b9"
em["hArr"] = "\u21d4"
em["Sigma"] = "\u03a3"
em["lang"] = "\u2329"
em["curren"] = "\u00a4"
em["Theta"] = "\u0398"
em["lArr"] = "\u21d0"
em["Phi"] = "\u03a6"
em["Nu"] = "\u039d"
em["rho"] = "\u03c1"
em["alpha"] = "\u03b1"
em["iexcl"] = "\u00a1"
em["micro"] = "\u00b5"
em["cedil"] = "\u00b8"
em["Ntilde"] = "\u00d1"
em["Psi"] = "\u03a8"
em["Dagger"] = "\u2021"
em["Egrave"] = "\u00c8"
em["Icirc"] = "\u00ce"
em["nsub"] = "\u2284"
em["bdquo"] = "\u201e"
em["empty"] = "\u2205"
em["aelig"] = "\u00e6"
em["ograve"] = "\u00f2"
em["macr"] = "\u00af"
em["Zeta"] = "\u0396"
em["beta"] = "\u03b2"
em["sim"] = "\u223c"
em["uuml"] = "\u00fc"
em["Aacute"] = "\u00c1"
em["Iacute"] = "\u00cd"
em["exist"] = "\u2203"
em["prime"] = "\u2032"
em["rceil"] = "\u2309"
em["real"] = "\u211c"
em["zwnj"] = "\u200c"
em["bull"] = "\u2022"
em["quot"] = "\u0022"
em["Scaron"] = "\u0160"
em["ugrave"] = "\u00f9"
}
/*
http://www.w3.org/TR/html4/sgml/entities.html
Portions © International Organization for Standardization 1986
Permission to copy in any form is granted for use with
conforming SGML systems and applications as defined in
ISO 8879, provided this notice is included in all copies.
*/
func namedEntityToUtf8(name string) string {
switch name {
case "pi":
return "\u03c0"
case "nabla":
return "\u2207"
case "isin":
return "\u2208"
case "loz":
return "\u25ca"
case "prop":
return "\u221d"
case "para":
return "\u00b6"
case "Aring":
return "\u00c5"
case "euro":
return "\u20ac"
case "sup3":
return "\u00b3"
case "sup2":
return "\u00b2"
case "sup1":
return "\u00b9"
case "prod":
return "\u220f"
case "gamma":
return "\u03b3"
case "perp":
return "\u22a5"
case "lfloor":
return "\u230a"
case "fnof":
return "\u0192"
case "frasl":
return "\u2044"
case "rlm":
return "\u200f"
case "omega":
return "\u03c9"
case "part":
return "\u2202"
case "euml":
return "\u00eb"
case "Kappa":
return "\u039a"
case "nbsp":
return "\u00a0"
case "Eacute":
return "\u00c9"
case "brvbar":
return "\u00a6"
case "otimes":
return "\u2297"
case "ndash":
return "\u2013"
case "thinsp":
return "\u2009"
case "nu":
return "\u03bd"
case "Upsilon":
return "\u03a5"
case "upsih":
return "\u03d2"
case "raquo":
return "\u00bb"
case "yacute":
return "\u00fd"
case "delta":
return "\u03b4"
case "eth":
return "\u00f0"
case "supe":
return "\u2287"
case "ne":
return "\u2260"
case "ni":
return "\u220b"
case "eta":
return "\u03b7"
case "uArr":
return "\u21d1"
case "image":
return "\u2111"
case "asymp":
return "\u2248"
case "oacute":
return "\u00f3"
case "rarr":
return "\u2192"
case "emsp":
return "\u2003"
case "acirc":
return "\u00e2"
case "shy":
return "\u00ad"
case "yuml":
return "\u00ff"
case "acute":
return "\u00b4"
case "int":
return "\u222b"
case "ccedil":
return "\u00e7"
case "Acirc":
return "\u00c2"
case "Ograve":
return "\u00d2"
case "times":
return "\u00d7"
case "weierp":
return "\u2118"
case "Tau":
return "\u03a4"
case "omicron":
return "\u03bf"
case "lt":
return "\u003c"
case "Mu":
return "\u039c"
case "Ucirc":
return "\u00db"
case "sub":
return "\u2282"
case "le":
return "\u2264"
case "sum":
return "\u2211"
case "sup":
return "\u2283"
case "lrm":
return "\u200e"
case "frac34":
return "\u00be"
case "Iota":
return "\u0399"
case "Ugrave":
return "\u00d9"
case "THORN":
return "\u00de"
case "rsaquo":
return "\u203a"
case "not":
return "\u00ac"
case "sigma":
return "\u03c3"
case "iuml":
return "\u00ef"
case "epsilon":
return "\u03b5"
case "spades":
return "\u2660"
case "theta":
return "\u03b8"
case "divide":
return "\u00f7"
case "Atilde":
return "\u00c3"
case "uacute":
return "\u00fa"
case "Rho":
return "\u03a1"
case "trade":
return "\u2122"
case "chi":
return "\u03c7"
case "agrave":
return "\u00e0"
case "or":
return "\u2228"
case "circ":
return "\u02c6"
case "middot":
return "\u00b7"
case "plusmn":
return "\u00b1"
case "aring":
return "\u00e5"
case "lsquo":
return "\u2018"
case "Yacute":
return "\u00dd"
case "oline":
return "\u203e"
case "copy":
return "\u00a9"
case "icirc":
return "\u00ee"
case "lowast":
return "\u2217"
case "Oacute":
return "\u00d3"
case "aacute":
return "\u00e1"
case "oplus":
return "\u2295"
case "crarr":
return "\u21b5"
case "thetasym":
return "\u03d1"
case "Beta":
return "\u0392"
case "laquo":
return "\u00ab"
case "rang":
return "\u232a"
|
case "tilde":
return "\u02dc"
case "Uuml":
return "\u00dc"
|
random_line_split
|
|
parser.rs
|
inator::opt(character::line_ending)(i)?;
Ok((i, ln))
}
#[derive(Debug, Deserialize, Clone)]
pub enum IssueProvider {
GitHub,
}
#[derive(Debug, Clone)]
pub enum ParsingSource {
MarkdownFile,
SourceCode,
IssueAt(IssueProvider),
}
#[derive(Debug, Clone)]
pub struct IssueHead<K> {
pub title: String,
pub assignees: Vec<String>,
pub external_id: K,
}
#[derive(Debug, Clone, PartialEq)]
pub struct IssueBody<T> {
pub descs_and_srcs: Vec<(Vec<String>, T)>,
pub branches: Vec<String>,
}
impl IssueBody<FileTodoLocation> {
pub fn to_github_string(
&self,
cwd: &str,
owner: &str,
repo: &str,
checkout: &str,
) -> Result<String, String> {
let mut lines: Vec<String> = vec![];
for (desc_lines, loc) in self.descs_and_srcs.iter() {
let desc = desc_lines.clone().join("\n");
let link = loc.to_github_link(cwd, owner, repo, checkout)?;
lines.push(vec![desc, link].join("\n"));
}
Ok(lines.join("\n"))
}
}
#[derive(Debug, Clone)]
pub struct Issue<ExternalId, TodoLocation: PartialEq + Eq> {
pub head: IssueHead<ExternalId>,
pub body: IssueBody<TodoLocation>,
}
impl<ExId, Loc: PartialEq + Eq> Issue<ExId, Loc> {
pub fn new(id: ExId, title: String) -> Self {
Issue {
head: IssueHead {
title,
assignees: vec![],
external_id: id,
},
body: IssueBody {
descs_and_srcs: vec![],
branches: vec![],
},
}
}
}
#[derive(Debug, Clone)]
pub struct IssueMap<ExternalId, TodoLocation: PartialEq + Eq> {
pub parsed_from: ParsingSource,
pub todos: HashMap<String, Issue<ExternalId, TodoLocation>>,
}
/// A todo location in the local filesystem.
#[derive(Debug, Clone, PartialEq, Eq, Hash)]
pub struct FileTodoLocation {
pub file: String,
pub src_span: (usize, Option<usize>),
}
impl FileTodoLocation {
/// ```rust
/// use todo_finder_lib::parser::FileTodoLocation;
///
/// let loc = FileTodoLocation {
/// file: "/total/path/src/file.rs".into(),
/// src_span: (666, Some(1337)),
/// };
///
/// let string = loc
/// .to_github_link("/total/path", "schell", "my_repo", "1234567890")
/// .unwrap();
///
/// assert_eq!(
/// &string,
/// "https://github.com/schell/my_repo/blob/1234567890/src/file.rs#L666-L1337"
/// );
/// ```
pub fn to_github_link(
&self,
cwd: &str,
owner: &str,
repo: &str,
checkout: &str,
) -> Result<String, String> {
let path: &Path = Path::new(&self.file);
let relative: &Path = path
.strip_prefix(cwd)
.map_err(|e| format!("could not relativize path {:#?}: {}", path, e))?;
let file_and_range = vec![
format!("{}", relative.display()),
format!("#L{}", self.src_span.0),
if let Some(end) = self.src_span.1 {
format!("-L{}", end)
} else {
String::new()
},
]
.concat();
let parts = vec![
"https://github.com",
owner,
repo,
"blob",
checkout,
&file_and_range,
];
Ok(parts.join("/"))
}
}
impl<K, V: Eq> IssueMap<K, V> {
pub fn new(parsed_from: ParsingSource) -> IssueMap<K, V> {
IssueMap {
parsed_from,
todos: HashMap::new(),
}
}
}
impl IssueMap<u64, GitHubTodoLocation> {
pub fn new_github_todos() -> Self {
IssueMap {
parsed_from: ParsingSource::IssueAt(IssueProvider::GitHub),
todos: HashMap::new(),
}
}
pub fn add_issue(&mut self, github_issue: &GitHubIssue) {
if let Ok((_, body)) = issue::issue_body(&github_issue.body) {
let mut issue = Issue::new(github_issue.number, github_issue.title.clone());
issue.body = body;
self.todos.insert(github_issue.title.clone(), issue);
}
}
pub fn prepare_patch(&self, local: IssueMap<(), FileTodoLocation>) -> GitHubPatch {
let mut create = IssueMap::new_source_todos();
let mut edit: IssueMap<u64, FileTodoLocation> = IssueMap::new(ParsingSource::SourceCode);
let mut dont_delete = vec![];
for (title, local_issue) in local.todos.into_iter() {
if let Some(remote_issue) = self.todos.get(&title) {
// They both have it
let id = remote_issue.head.external_id.clone();
dont_delete.push(id);
let issue = Issue {
head: remote_issue.head.clone(),
body: local_issue.body,
};
edit.todos.insert(title, issue);
} else {
// Must be created
create.todos.insert(title, local_issue);
}
}
let delete = self
.todos
.values()
.filter_map(|issue| {
let id = issue.head.external_id;
if dont_delete.contains(&id) {
None
} else {
Some(id)
}
})
.collect::<Vec<_>>();
return GitHubPatch {
create,
edit,
delete,
};
}
}
impl IssueMap<(), FileTodoLocation> {
pub fn new_source_todos() -> Self {
IssueMap {
parsed_from: ParsingSource::SourceCode,
todos: HashMap::new(),
}
}
pub fn distinct_len(&self) -> usize {
self.todos.len()
}
pub fn add_parsed_todo(&mut self, todo: &ParsedTodo, loc: FileTodoLocation) {
let title = todo.title.to_string();
let issue = self
.todos
.entry(title.clone())
.or_insert(Issue::new((), title));
if let Some(assignee) = todo.assignee.map(|s| s.to_string()) {
if !issue.head.assignees.contains(&assignee) {
issue.head.assignees.push(assignee);
}
}
|
let desc_lines = todo
.desc_lines
.iter()
.map(|s| s.to_string())
.collect::<Vec<_>>();
issue.body.descs_and_srcs.push((desc_lines, loc));
}
pub fn from_files_in_directory(
dir: &str,
excludes: &Vec<String>,
) -> Result<IssueMap<(), FileTodoLocation>, String> {
let possible_todos = FileSearcher::find(dir, excludes)?;
let mut todos = IssueMap::new_source_todos();
let language_map = langs::language_map();
for possible_todo in possible_todos.into_iter() {
let path = Path::new(&possible_todo.file);
// Get our parser for this extension
let ext: Option<_> = path.extension();
if ext.is_none() {
continue;
}
let ext: &str = ext
.expect("impossible!")
.to_str()
.expect("could not get extension as str");
let languages = language_map.get(ext);
if languages.is_none() {
// TODO: Deadletter the file name as unsupported
println!("possible TODO found in unsupported file: {:#?}", path);
continue;
}
let languages = languages.expect("impossible!");
// Open the file and load the contents
let mut file = File::open(path)
.map_err(|e| format!("could not open file: {}\n{}", path.display(), e))?;
let mut contents = String::new();
file.read_to_string(&mut contents)
.map_err(|e| format!("could not read file {:#?}: {}", path, e))?;
let mut current_line = 1;
let mut i = contents.as_str();
for line in possible_todo.lines_to_search.into_iter() {
// Seek to the correct line...
while line > current_line {
let (j, _) =
take_to_eol(i).map_err(|e| format!("couldn't take line:\n{}", e))?;
i = j;
current_line += 1;
}
// Try parsing in each language until we get a match
for language in languages.iter() {
let parser_config = language.as_todo_parser_config();
let parser = source::parse_todo(parser_config);
if let Ok((j, parsed_todo)) = parser(i) {
let num_lines = i.trim_end_matches(j).lines().fold(0, |n, _| n + 1);
let loc = FileTodoLocation {
file: possible_todo.file.to_string(),
src_span: (
line,
if num_lines
|
random_line_split
|
|
parser.rs
|
inator::opt(character::line_ending)(i)?;
Ok((i, ln))
}
#[derive(Debug, Deserialize, Clone)]
pub enum IssueProvider {
GitHub,
}
#[derive(Debug, Clone)]
pub enum ParsingSource {
MarkdownFile,
SourceCode,
IssueAt(IssueProvider),
}
#[derive(Debug, Clone)]
pub struct IssueHead<K> {
pub title: String,
pub assignees: Vec<String>,
pub external_id: K,
}
#[derive(Debug, Clone, PartialEq)]
pub struct IssueBody<T> {
pub descs_and_srcs: Vec<(Vec<String>, T)>,
pub branches: Vec<String>,
}
impl IssueBody<FileTodoLocation> {
pub fn to_github_string(
&self,
cwd: &str,
owner: &str,
repo: &str,
checkout: &str,
) -> Result<String, String> {
let mut lines: Vec<String> = vec![];
for (desc_lines, loc) in self.descs_and_srcs.iter() {
let desc = desc_lines.clone().join("\n");
let link = loc.to_github_link(cwd, owner, repo, checkout)?;
lines.push(vec![desc, link].join("\n"));
}
Ok(lines.join("\n"))
}
}
#[derive(Debug, Clone)]
pub struct Issue<ExternalId, TodoLocation: PartialEq + Eq> {
pub head: IssueHead<ExternalId>,
pub body: IssueBody<TodoLocation>,
}
impl<ExId, Loc: PartialEq + Eq> Issue<ExId, Loc> {
pub fn new(id: ExId, title: String) -> Self {
Issue {
head: IssueHead {
title,
assignees: vec![],
external_id: id,
},
body: IssueBody {
descs_and_srcs: vec![],
branches: vec![],
},
}
}
}
#[derive(Debug, Clone)]
pub struct IssueMap<ExternalId, TodoLocation: PartialEq + Eq> {
pub parsed_from: ParsingSource,
pub todos: HashMap<String, Issue<ExternalId, TodoLocation>>,
}
/// A todo location in the local filesystem.
#[derive(Debug, Clone, PartialEq, Eq, Hash)]
pub struct FileTodoLocation {
pub file: String,
pub src_span: (usize, Option<usize>),
}
impl FileTodoLocation {
/// ```rust
/// use todo_finder_lib::parser::FileTodoLocation;
///
/// let loc = FileTodoLocation {
/// file: "/total/path/src/file.rs".into(),
/// src_span: (666, Some(1337)),
/// };
///
/// let string = loc
/// .to_github_link("/total/path", "schell", "my_repo", "1234567890")
/// .unwrap();
///
/// assert_eq!(
/// &string,
/// "https://github.com/schell/my_repo/blob/1234567890/src/file.rs#L666-L1337"
/// );
/// ```
pub fn to_github_link(
&self,
cwd: &str,
owner: &str,
repo: &str,
checkout: &str,
) -> Result<String, String> {
let path: &Path = Path::new(&self.file);
let relative: &Path = path
.strip_prefix(cwd)
.map_err(|e| format!("could not relativize path {:#?}: {}", path, e))?;
let file_and_range = vec![
format!("{}", relative.display()),
format!("#L{}", self.src_span.0),
if let Some(end) = self.src_span.1 {
format!("-L{}", end)
} else {
String::new()
},
]
.concat();
let parts = vec![
"https://github.com",
owner,
repo,
"blob",
checkout,
&file_and_range,
];
Ok(parts.join("/"))
}
}
impl<K, V: Eq> IssueMap<K, V> {
pub fn new(parsed_from: ParsingSource) -> IssueMap<K, V> {
IssueMap {
parsed_from,
todos: HashMap::new(),
}
}
}
impl IssueMap<u64, GitHubTodoLocation> {
pub fn new_github_todos() -> Self {
IssueMap {
parsed_from: ParsingSource::IssueAt(IssueProvider::GitHub),
todos: HashMap::new(),
}
}
pub fn add_issue(&mut self, github_issue: &GitHubIssue) {
if let Ok((_, body)) = issue::issue_body(&github_issue.body) {
let mut issue = Issue::new(github_issue.number, github_issue.title.clone());
issue.body = body;
self.todos.insert(github_issue.title.clone(), issue);
}
}
pub fn prepare_patch(&self, local: IssueMap<(), FileTodoLocation>) -> GitHubPatch {
let mut create = IssueMap::new_source_todos();
let mut edit: IssueMap<u64, FileTodoLocation> = IssueMap::new(ParsingSource::SourceCode);
let mut dont_delete = vec![];
for (title, local_issue) in local.todos.into_iter() {
if let Some(remote_issue) = self.todos.get(&title) {
// They both have it
let id = remote_issue.head.external_id.clone();
dont_delete.push(id);
let issue = Issue {
head: remote_issue.head.clone(),
body: local_issue.body,
};
edit.todos.insert(title, issue);
} else {
// Must be created
create.todos.insert(title, local_issue);
}
}
let delete = self
.todos
.values()
.filter_map(|issue| {
let id = issue.head.external_id;
if dont_delete.contains(&id) {
None
} else {
Some(id)
}
})
.collect::<Vec<_>>();
return GitHubPatch {
create,
edit,
delete,
};
}
}
impl IssueMap<(), FileTodoLocation> {
pub fn new_source_todos() -> Self {
IssueMap {
parsed_from: ParsingSource::SourceCode,
todos: HashMap::new(),
}
}
pub fn distinct_len(&self) -> usize {
self.todos.len()
}
pub fn
|
(&mut self, todo: &ParsedTodo, loc: FileTodoLocation) {
let title = todo.title.to_string();
let issue = self
.todos
.entry(title.clone())
.or_insert(Issue::new((), title));
if let Some(assignee) = todo.assignee.map(|s| s.to_string()) {
if !issue.head.assignees.contains(&assignee) {
issue.head.assignees.push(assignee);
}
}
let desc_lines = todo
.desc_lines
.iter()
.map(|s| s.to_string())
.collect::<Vec<_>>();
issue.body.descs_and_srcs.push((desc_lines, loc));
}
pub fn from_files_in_directory(
dir: &str,
excludes: &Vec<String>,
) -> Result<IssueMap<(), FileTodoLocation>, String> {
let possible_todos = FileSearcher::find(dir, excludes)?;
let mut todos = IssueMap::new_source_todos();
let language_map = langs::language_map();
for possible_todo in possible_todos.into_iter() {
let path = Path::new(&possible_todo.file);
// Get our parser for this extension
let ext: Option<_> = path.extension();
if ext.is_none() {
continue;
}
let ext: &str = ext
.expect("impossible!")
.to_str()
.expect("could not get extension as str");
let languages = language_map.get(ext);
if languages.is_none() {
// TODO: Deadletter the file name as unsupported
println!("possible TODO found in unsupported file: {:#?}", path);
continue;
}
let languages = languages.expect("impossible!");
// Open the file and load the contents
let mut file = File::open(path)
.map_err(|e| format!("could not open file: {}\n{}", path.display(), e))?;
let mut contents = String::new();
file.read_to_string(&mut contents)
.map_err(|e| format!("could not read file {:#?}: {}", path, e))?;
let mut current_line = 1;
let mut i = contents.as_str();
for line in possible_todo.lines_to_search.into_iter() {
// Seek to the correct line...
while line > current_line {
let (j, _) =
take_to_eol(i).map_err(|e| format!("couldn't take line:\n{}", e))?;
i = j;
current_line += 1;
}
// Try parsing in each language until we get a match
for language in languages.iter() {
let parser_config = language.as_todo_parser_config();
let parser = source::parse_todo(parser_config);
if let Ok((j, parsed_todo)) = parser(i) {
let num_lines = i.trim_end_matches(j).lines().fold(0, |n, _| n + 1);
let loc = FileTodoLocation {
file: possible_todo.file.to_string(),
src_span: (
line,
if num
|
add_parsed_todo
|
identifier_name
|
parser.rs
|
::opt(character::line_ending)(i)?;
Ok((i, ln))
}
#[derive(Debug, Deserialize, Clone)]
pub enum IssueProvider {
GitHub,
}
#[derive(Debug, Clone)]
pub enum ParsingSource {
MarkdownFile,
SourceCode,
IssueAt(IssueProvider),
}
#[derive(Debug, Clone)]
pub struct IssueHead<K> {
pub title: String,
pub assignees: Vec<String>,
pub external_id: K,
}
#[derive(Debug, Clone, PartialEq)]
pub struct IssueBody<T> {
pub descs_and_srcs: Vec<(Vec<String>, T)>,
pub branches: Vec<String>,
}
impl IssueBody<FileTodoLocation> {
pub fn to_github_string(
&self,
cwd: &str,
owner: &str,
repo: &str,
checkout: &str,
) -> Result<String, String> {
let mut lines: Vec<String> = vec![];
for (desc_lines, loc) in self.descs_and_srcs.iter() {
let desc = desc_lines.clone().join("\n");
let link = loc.to_github_link(cwd, owner, repo, checkout)?;
lines.push(vec![desc, link].join("\n"));
}
Ok(lines.join("\n"))
}
}
#[derive(Debug, Clone)]
pub struct Issue<ExternalId, TodoLocation: PartialEq + Eq> {
pub head: IssueHead<ExternalId>,
pub body: IssueBody<TodoLocation>,
}
impl<ExId, Loc: PartialEq + Eq> Issue<ExId, Loc> {
pub fn new(id: ExId, title: String) -> Self {
Issue {
head: IssueHead {
title,
assignees: vec![],
external_id: id,
},
body: IssueBody {
descs_and_srcs: vec![],
branches: vec![],
},
}
}
}
#[derive(Debug, Clone)]
pub struct IssueMap<ExternalId, TodoLocation: PartialEq + Eq> {
pub parsed_from: ParsingSource,
pub todos: HashMap<String, Issue<ExternalId, TodoLocation>>,
}
/// A todo location in the local filesystem.
#[derive(Debug, Clone, PartialEq, Eq, Hash)]
pub struct FileTodoLocation {
pub file: String,
pub src_span: (usize, Option<usize>),
}
impl FileTodoLocation {
/// ```rust
/// use todo_finder_lib::parser::FileTodoLocation;
///
/// let loc = FileTodoLocation {
/// file: "/total/path/src/file.rs".into(),
/// src_span: (666, Some(1337)),
/// };
///
/// let string = loc
/// .to_github_link("/total/path", "schell", "my_repo", "1234567890")
/// .unwrap();
///
/// assert_eq!(
/// &string,
/// "https://github.com/schell/my_repo/blob/1234567890/src/file.rs#L666-L1337"
/// );
/// ```
pub fn to_github_link(
&self,
cwd: &str,
owner: &str,
repo: &str,
checkout: &str,
) -> Result<String, String> {
let path: &Path = Path::new(&self.file);
let relative: &Path = path
.strip_prefix(cwd)
.map_err(|e| format!("could not relativize path {:#?}: {}", path, e))?;
let file_and_range = vec![
format!("{}", relative.display()),
format!("#L{}", self.src_span.0),
if let Some(end) = self.src_span.1 {
format!("-L{}", end)
} else {
String::new()
},
]
.concat();
let parts = vec![
"https://github.com",
owner,
repo,
"blob",
checkout,
&file_and_range,
];
Ok(parts.join("/"))
}
}
impl<K, V: Eq> IssueMap<K, V> {
pub fn new(parsed_from: ParsingSource) -> IssueMap<K, V> {
IssueMap {
parsed_from,
todos: HashMap::new(),
}
}
}
impl IssueMap<u64, GitHubTodoLocation> {
pub fn new_github_todos() -> Self {
IssueMap {
parsed_from: ParsingSource::IssueAt(IssueProvider::GitHub),
todos: HashMap::new(),
}
}
pub fn add_issue(&mut self, github_issue: &GitHubIssue) {
if let Ok((_, body)) = issue::issue_body(&github_issue.body) {
let mut issue = Issue::new(github_issue.number, github_issue.title.clone());
issue.body = body;
self.todos.insert(github_issue.title.clone(), issue);
}
}
pub fn prepare_patch(&self, local: IssueMap<(), FileTodoLocation>) -> GitHubPatch {
let mut create = IssueMap::new_source_todos();
let mut edit: IssueMap<u64, FileTodoLocation> = IssueMap::new(ParsingSource::SourceCode);
let mut dont_delete = vec![];
for (title, local_issue) in local.todos.into_iter() {
if let Some(remote_issue) = self.todos.get(&title) {
// They both have it
let id = remote_issue.head.external_id.clone();
dont_delete.push(id);
let issue = Issue {
head: remote_issue.head.clone(),
body: local_issue.body,
};
edit.todos.insert(title, issue);
} else {
// Must be created
create.todos.insert(title, local_issue);
}
}
let delete = self
.todos
.values()
.filter_map(|issue| {
let id = issue.head.external_id;
if dont_delete.contains(&id) {
None
} else {
Some(id)
}
})
.collect::<Vec<_>>();
return GitHubPatch {
create,
edit,
delete,
};
}
}
impl IssueMap<(), FileTodoLocation> {
pub fn new_source_todos() -> Self {
IssueMap {
parsed_from: ParsingSource::SourceCode,
todos: HashMap::new(),
}
}
pub fn distinct_len(&self) -> usize {
self.todos.len()
}
pub fn add_parsed_todo(&mut self, todo: &ParsedTodo, loc: FileTodoLocation) {
let title = todo.title.to_string();
let issue = self
.todos
.entry(title.clone())
.or_insert(Issue::new((), title));
if let Some(assignee) = todo.assignee.map(|s| s.to_string()) {
if !issue.head.assignees.contains(&assignee) {
issue.head.assignees.push(assignee);
}
}
let desc_lines = todo
.desc_lines
.iter()
.map(|s| s.to_string())
.collect::<Vec<_>>();
issue.body.descs_and_srcs.push((desc_lines, loc));
}
pub fn from_files_in_directory(
dir: &str,
excludes: &Vec<String>,
) -> Result<IssueMap<(), FileTodoLocation>, String> {
let possible_todos = FileSearcher::find(dir, excludes)?;
let mut todos = IssueMap::new_source_todos();
let language_map = langs::language_map();
for possible_todo in possible_todos.into_iter() {
let path = Path::new(&possible_todo.file);
// Get our parser for this extension
let ext: Option<_> = path.extension();
if ext.is_none() {
continue;
}
let ext: &str = ext
.expect("impossible!")
.to_str()
.expect("could not get extension as str");
let languages = language_map.get(ext);
if languages.is_none()
|
let languages = languages.expect("impossible!");
// Open the file and load the contents
let mut file = File::open(path)
.map_err(|e| format!("could not open file: {}\n{}", path.display(), e))?;
let mut contents = String::new();
file.read_to_string(&mut contents)
.map_err(|e| format!("could not read file {:#?}: {}", path, e))?;
let mut current_line = 1;
let mut i = contents.as_str();
for line in possible_todo.lines_to_search.into_iter() {
// Seek to the correct line...
while line > current_line {
let (j, _) =
take_to_eol(i).map_err(|e| format!("couldn't take line:\n{}", e))?;
i = j;
current_line += 1;
}
// Try parsing in each language until we get a match
for language in languages.iter() {
let parser_config = language.as_todo_parser_config();
let parser = source::parse_todo(parser_config);
if let Ok((j, parsed_todo)) = parser(i) {
let num_lines = i.trim_end_matches(j).lines().fold(0, |n, _| n + 1);
let loc = FileTodoLocation {
file: possible_todo.file.to_string(),
src_span: (
line,
if num
|
{
// TODO: Deadletter the file name as unsupported
println!("possible TODO found in unsupported file: {:#?}", path);
continue;
}
|
conditional_block
|
nvm_buffer.rs
|
指す」という点では`position`フィールドと似ているが、
// `position`は読み書きやシーク操作の度に値が更新されるのに対して、
// `write_buf_offset`は、書き込みバッファの内容がフラッシュされるまでは、
// 固定の値が使用され続ける。
write_buf_offset: u64,
// 書き込みバッファ内にデータが溜まっているかどうかを判定するためのフラグ
//
// 一度でも書き込みバッファにデータが書かれたら`true`に設定され、
// 内部NVMにバッファ内のデータがフラッシュされた後は`false`に設定される。
maybe_dirty: bool,
// 読み込みバッファ
//
// ジャーナル領域が発行した読み込み要求を、
// 内部NVMのブロック境界に合うようにアライメントするために使用される。
read_buf: AlignedBytes,
}
impl<N: NonVolatileMemory> JournalNvmBuffer<N> {
/// 新しい`JournalNvmBuffer`インスタンスを生成する.
///
/// これは実際に読み書きには`nvm`を使用する.
///
/// なお`nvm`へのアクセス時に、それが`nvm`が要求するブロック境界にアライメントされていることは、
/// `JournalNvmBuffer`が保証するため、利用者が気にする必要はない.
///
/// ただし、シーク時には、シーク地点を含まない次のブロック境界までのデータは
/// 上書きされてしまうので注意が必要.
pub fn new(nvm: N) -> Self {
let block_size = nvm.block_size();
JournalNvmBuffer {
inner: nvm,
position: 0,
maybe_dirty: false,
write_buf_offset: 0,
write_buf: AlignedBytes::new(0, block_size),
read_buf: AlignedBytes::new(0, block_size),
}
}
#[cfg(test)]
pub fn nvm(&self) -> &N {
&self.inner
}
fn is_dirty_area(&self, offset: u64, length: usize) -> bool {
if !self.maybe_dirty || length == 0 || self.write_buf.is_empty() {
return false;
}
if self.write_buf_offset < offset {
let buf_end = self.write_buf_offset + self.write_buf.len() as u64;
offset < buf_end
} else {
let end = offset + length as u64;
self.write_buf_offset < end
}
}
fn flush_write_buf(&mut self) -> Result<()> {
if self.write_buf.is_empty() || !self.maybe_dirty {
return Ok(());
}
track_io!(self.inner.seek(SeekFrom::Start(self.write_buf_offset)))?;
track_io!(self.inner.write(&self.write_buf))?;
if self.write_buf.len() > self.block_size().as_u16() as usize {
// このif節では、
// バッファに末端のalignmentバイト分(= new_len)の情報を残す。
// write_buf_offsetは、write_buf.len() - new_len(= drop_len)分だけ進められる。
//
// write_buf_offsetを、書き出しに成功したwrite_buf.len()分だけ進めて、
// write_bufをクリアすることもできるが、
// ブロック長でしか書き出すことができないため、その場合は次回の書き込み時に
// NVMに一度アクセスしてブロック全体を取得しなくてはならない。
// この読み込みを避けるため、現在の実装の形をとっている。
let new_len = self.block_size().as_u16() as usize;
let drop_len = self.write_buf.len() - new_len;
unsafe {
// This nonoverlappingness is guranteed by the callers.
ptr::copy(
self.write_buf.as_ptr().add(drop_len), // src
self.write_buf.as_mut_ptr(), // dst
new_len,
);
}
self.write_buf.truncate(new_len);
self.write_buf_offset += drop_len as u64;
}
self.maybe_dirty = false;
Ok(())
}
fn check_overflow(&self, write_len: usize) -> Result<()> {
let next_position = self.position() + write_len as u64;
track_assert!(
next_position <= self.capacity(
|
tState,
"self.position={}, write_len={}, self.len={}",
self.position(),
write_len,
self.capacity()
);
Ok(())
}
}
impl<N: NonVolatileMemory> NonVolatileMemory for JournalNvmBuffer<N> {
fn sync(&mut self) -> Result<()> {
track!(self.flush_write_buf())?;
self.inner.sync()
}
fn position(&self) -> u64 {
self.position
}
fn capacity(&self) -> u64 {
self.inner.capacity()
}
fn block_size(&self) -> BlockSize {
self.inner.block_size()
}
fn split(self, _: u64) -> Result<(Self, Self)> {
unreachable!()
}
}
impl<N: NonVolatileMemory> Drop for JournalNvmBuffer<N> {
fn drop(&mut self) {
let _ = self.sync();
}
}
impl<N: NonVolatileMemory> Seek for JournalNvmBuffer<N> {
fn seek(&mut self, pos: SeekFrom) -> io::Result<u64> {
let offset = track!(self.convert_to_offset(pos))?;
self.position = offset;
Ok(offset)
}
}
impl<N: NonVolatileMemory> Read for JournalNvmBuffer<N> {
fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
if self.is_dirty_area(self.position, buf.len()) {
track!(self.flush_write_buf())?;
}
let aligned_start = self.block_size().floor_align(self.position);
let aligned_end = self
.block_size()
.ceil_align(self.position + buf.len() as u64);
self.read_buf
.aligned_resize((aligned_end - aligned_start) as usize);
self.inner.seek(SeekFrom::Start(aligned_start))?;
let inner_read_size = self.inner.read(&mut self.read_buf)?;
let start = (self.position - aligned_start) as usize;
let end = cmp::min(inner_read_size, start + buf.len());
let read_size = end - start;
(&mut buf[..read_size]).copy_from_slice(&self.read_buf[start..end]);
self.position += read_size as u64;
Ok(read_size)
}
}
impl<N: NonVolatileMemory> Write for JournalNvmBuffer<N> {
fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
track!(self.check_overflow(buf.len()))?;
let write_buf_start = self.write_buf_offset;
let write_buf_end = write_buf_start + self.write_buf.len() as u64;
if write_buf_start <= self.position && self.position <= write_buf_end {
// 領域が重複しており、バッファの途中から追記可能
// (i.e., 書き込みバッファのフラッシュが不要)
let start = (self.position - self.write_buf_offset) as usize;
let end = start + buf.len();
self.write_buf.aligned_resize(end);
(&mut self.write_buf[start..end]).copy_from_slice(buf);
self.position += buf.len() as u64;
self.maybe_dirty = true;
Ok(buf.len())
} else {
// 領域に重複がないので、一度バッファの中身を書き戻す
track!(self.flush_write_buf())?;
if self.block_size().is_aligned(self.position) {
self.write_buf_offset = self.position;
self.write_buf.aligned_resize(0);
} else {
// シーク位置より前方の既存データが破棄されてしまわないように、一度読み込みを行う.
let size = self.block_size().as_u16();
self.write_buf_offset = self.block_size().floor_align(self.position);
self.write_buf.aligned_resize(size as usize);
self.inner.seek(SeekFrom::Start(self.write_buf_offset))?;
self.inner.read_exact(&mut self.write_buf)?;
}
self.write(buf)
}
}
fn flush(&mut self) -> io::Result<()> {
track!(self.flush_write_buf())?;
Ok(())
}
}
#[cfg(test)]
mod tests {
use std::io::{Read, Seek, SeekFrom, Write};
use trackable::result::TestResult;
use super::*;
use nvm::MemoryNvm;
#[test]
fn write_write_flush() -> TestResult {
// 連続領域の書き込みは`flush`するまでバッファに残り続ける
let mut buffer = new_buffer();
track_io!(buffer.write_all(b"foo"))?;
assert_eq!(&buffer.nvm().as_bytes()[0..3], &[0; 3][..]);
track_io!(buffer.write_all(b"bar"))?;
|
),
ErrorKind::Inconsisten
|
conditional_block
|
nvm_buffer.rs
|
バッファの内容がフラッシュされるまでは、
// 固定の値が使用され続ける。
write_buf_offset: u64,
// 書き込みバッファ内にデータが溜まっているかどうかを判定するためのフラグ
//
// 一度でも書き込みバッファにデータが書かれたら`true`に設定され、
// 内部NVMにバッファ内のデータがフラッシュされた後は`false`に設定される。
maybe_dirty: bool,
// 読み込みバッファ
//
// ジャーナル領域が発行した読み込み要求を、
// 内部NVMのブロック境界に合うようにアライメントするために使用される。
read_buf: AlignedBytes,
}
impl<N: NonVolatileMemory> JournalNvmBuffer<N> {
/// 新しい`JournalNvmBuffer`インスタンスを生成する.
///
/// これは実際に読み書きには`nvm`を使用する.
///
/// なお`nvm`へのアクセス時に、それが`nvm`が要求するブロック境界にアライメントされていることは、
/// `JournalNvmBuffer`が保証するため、利用者が気にする必要はない.
///
/// ただし、シーク時には、シーク地点を含まない次のブロック境界までのデータは
/// 上書きされてしまうので注意が必要.
pub fn new(nvm: N) -> Self {
let block_size = nvm.block_size();
JournalNvmBuffer {
inner: nvm,
position: 0,
maybe_dirty: false,
write_buf_offset: 0,
write_buf: AlignedBytes::new(0, block_size),
read_buf: AlignedBytes::new(0, block_size),
}
}
#[cfg(test)]
pub fn nvm(&self) -> &N {
&self.inner
}
fn is_dirty_area(&self, offset: u64, length: usize) -> bool {
if !self.maybe_dirty || length == 0 || self.write_buf.is_empty() {
return false;
}
if self.write_buf_offset < offset {
let buf_end = self.write_buf_offset + self.write_buf.len() as u64;
offset < buf_end
} else {
let end = offset + length as u64;
self.write_buf_offset < end
}
}
fn flush_write_buf(&mut self) -> Result<()> {
if self.write_buf.is_empty() || !self.maybe_dirty {
return Ok(());
}
track_io!(self.inner.seek(SeekFrom::Start(self.write_buf_offset)))?;
track_io!(self.inner.write(&self.write_buf))?;
if self.write_buf.len() > self.block_size().as_u16() as usize {
// このif節では、
// バッファに末端のalignmentバイト分(= new_len)の情報を残す。
// write_buf_offsetは、write_buf.len() - new_len(= drop_len)分だけ進められる。
//
// write_buf_offsetを、書き出しに成功したwrite_buf.len()分だけ進めて、
// write_bufをクリアすることもできるが、
// ブロック長でしか書き出すことができないため、その場合は次回の書き込み時に
// NVMに一度アクセスしてブロック全体を取得しなくてはならない。
// この読み込みを避けるため、現在の実装の形をとっている。
let new_len = self.block_size().as_u16() as usize;
let drop_len = self.write_buf.len() - new_len;
unsafe {
// This nonoverlappingness is guranteed by the callers.
ptr::copy(
self.write_buf.as_ptr().add(drop_len), // src
self.write_buf.as_mut_ptr(), // dst
new_len,
);
}
self.write_buf.truncate(new_len);
self.write_buf_offset += drop_len as u64;
}
self.maybe_dirty = false;
Ok(())
}
fn check_overflow(&self, write_len: usize) -> Result<()> {
let next_position = self.position() + write_len as u64;
track_assert!(
next_position <= self.capacity(),
ErrorKind::InconsistentState,
"self.position={}, write_len={}, self.len={}",
self.position(),
write_len,
self.capacity()
);
Ok(())
}
}
impl<N: NonVolatileMemory> NonVolatileMemory for JournalNvmBuffer<N> {
fn sync(&mut self) -> Result<()> {
track!(self.flush_write_buf())?;
self.inner.sync()
}
fn position(&self) -> u64 {
self.position
}
fn capacity(&self) -> u64 {
self.inner.capacity()
}
fn block_size(&self) -> BlockSize {
self.inner.block_size()
}
fn split(self, _: u64) -> Result<(Self, Self)> {
unreachable!()
}
}
impl<N: NonVolatileMemory> Drop for JournalNvmBuffer<N> {
fn drop(&mut self) {
let _ = self.sync();
}
}
impl<N: NonVolatileMemory> Seek for JournalNvmBuffer<N> {
fn seek(&mut self, pos: SeekFrom) -> io::Result<u64> {
let offset = track!(self.convert_to_offset(pos))?;
self.position = offset;
Ok(offset)
}
}
impl<N: NonVolatileMemory> Read for JournalNvmBuffer<N> {
fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
if self.is_dirty_area(self.position, buf.len()) {
track!(self.flush_write_buf())?;
}
let aligned_start = self.block_size().floor_align(self.position);
let aligned_end = self
.block_size()
.ceil_align(self.position + buf.len() as u64);
self.read_buf
.aligned_resize((aligned_end - aligned_start) as usize);
self.inner.seek(SeekFrom::Start(aligned_start))?;
let inner_read_size = self.inner.read(&mut self.read_buf)?;
let start = (self.position - aligned_start) as usize;
let end = cmp::min(inner_read_size, start + buf.len());
let read_size = end - start;
(&mut buf[..read_size]).copy_from_slice(&self.read_buf[start..end]);
self.position += read_size as u64;
Ok(read_size)
}
}
impl<N: NonVolatileMemory> Write for JournalNvmBuffer<N> {
fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
track!(self.check_overflow(buf.len()))?;
let write_buf_start = self.write_buf_offset;
let write_buf_end = write_buf_start + self.write_buf.len() as u64;
if write_buf_start <= self.position && self.position <= write_buf_end {
// 領域が重複しており、バッファの途中から追記可能
// (i.e., 書き込みバッファのフラッシュが不要)
let start = (self.position - self.write_buf_offset) as usize;
let end = start + buf.len();
self.write_buf.aligned_resize(end);
(&mut self.write_buf[start..end]).copy_from_slice(buf);
self.position += buf.len() as u64;
self.maybe_dirty = true;
Ok(buf.len())
} else {
// 領域に重複がないので、一度バッファの中身を書き戻す
track!(self.flush_write_buf())?;
if self.block_size().is_aligned(self.position) {
self.write_buf_offset = self.position;
self.write_buf.aligned_resize(0);
} else {
// シーク位置より前方の既存データが破棄されてしまわないように、一度読み込みを行う.
let size = self.block_size().as_u16();
self.write_buf_offset = self.block_size().floor_align(self.position);
self.write_buf.aligned_resize(size as usize);
self.inner.seek(SeekFrom::Start(self.write_buf_offset))?;
self.inner.read_exact(&mut self.write_buf)?;
}
self.write(buf)
}
}
fn flush(&mut self) -> io::Result<()> {
track!(self.flush_write_buf())?;
Ok(())
}
}
#[cfg(test)]
mod tests {
use std::io::{Read, Seek, SeekFrom, Write};
use trackable::result::TestResult;
use super::*;
use nvm::MemoryNvm;
#[test]
fn write_write_flush() -> TestResult {
// 連続領域の書き込みは`flush`するまでバッファに残り続ける
let mut buffer = new_buffer();
track_io!(buffer.write_all(b"foo"))?;
assert_eq!(&buffer.nvm().as_bytes()[0..3], &[0; 3][..]);
track_io!(buffer.write_all(b"bar"))?;
assert_eq!(&buffer.nvm().as_bytes()[0..3], &[0; 3][..]);
assert_eq!(&buffer.nvm().as_bytes()[3..6], &[0; 3][..]);
track_io!(buffer.f
|
lush(
|
identifier_name
|
|
nvm_buffer.rs
|
// - 書き込みバッファのカバー範囲に重複する領域に対して、読み込み要求が発行された場合:
// - 書き込みバッファの内容をフラッシュして、内部NVMに同期した後に、該当読み込み命令を処理
// - 書き込みバッファのカバー範囲に重複しない領域に対して、書き込み要求が発行された場合:
// - 現状の書き込みバッファのデータ構造では、ギャップ(i.e., 連続しない複数部分領域)を表現することはできない
// - そのため、一度古いバッファの内容をフラッシュした後に、該当書き込み要求を処理するためのバッファを作成する
//
// ジャーナル領域が発行した書き込み要求を、
// 内部NVMのブロック境界に合うようにアライメントする役目も担っている。
write_buf: AlignedBytes,
// `write_buf`の始端が、内部NVM上のどの位置に対応するかを保持するためのフィールド
//
// 「内部NVM上での位置を指す」という点では`position`フィールドと似ているが、
// `position`は読み書きやシーク操作の度に値が更新されるのに対して、
// `write_buf_offset`は、書き込みバッファの内容がフラッシュされるまでは、
// 固定の値が使用され続ける。
write_buf_offset: u64,
// 書き込みバッファ内にデータが溜まっているかどうかを判定するためのフラグ
//
// 一度でも書き込みバッファにデータが書かれたら`true`に設定され、
// 内部NVMにバッファ内のデータがフラッシュされた後は`false`に設定される。
maybe_dirty: bool,
// 読み込みバッファ
//
// ジャーナル領域が発行した読み込み要求を、
// 内部NVMのブロック境界に合うようにアライメントするために使用される。
read_buf: AlignedBytes,
}
impl<N: NonVolatileMemory> JournalNvmBuffer<N> {
/// 新しい`JournalNvmBuffer`インスタンスを生成する.
///
/// これは実際に読み書きには`nvm`を使用する.
///
/// なお`nvm`へのアクセス時に、それが`nvm`が要求するブロック境界にアライメントされていることは、
/// `JournalNvmBuffer`が保証するため、利用者が気にする必要はない.
///
/// ただし、シーク時には、シーク地点を含まない次のブロック境界までのデータは
/// 上書きされてしまうので注意が必要.
pub fn new(nvm: N) -> Self {
let block_size = nvm.block_size();
JournalNvmBuffer {
inner: nvm,
position: 0,
maybe_dirty: false,
write_buf_offset: 0,
write_buf: AlignedBytes::new(0, block_size),
read_buf: AlignedBytes::new(0, block_size),
}
}
#[cfg(test)]
pub fn nvm(&self) -> &N {
&self.inner
}
fn is_dirty_area(&self, offset: u64, length: usize) -> bool {
if !self.maybe_dirty || length == 0 || self.write_buf.is_empty() {
return false;
}
if self.write_buf_offset < offset {
let buf_end = self.write_buf_offset + self.write_buf.len() as u64;
offset < buf_end
} else {
let end = offset + length as u64;
self.write_buf_offset < end
}
}
fn flush_write_buf(&mut self) -> Result<()> {
if self.write_buf.is_empty() || !self.maybe_dirty {
return Ok(());
}
track_io!(self.inner.seek(SeekFrom::Start(self.write_buf_offset)))?;
track_io!(self.inner.write(&self.write_buf))?;
if self.write_buf.len() > self.block_size().as_u16() as usize {
// このif節では、
// バッファに末端のalignmentバイト分(= new_len)の情報を残す。
// write_buf_offsetは、write_buf.len() - new_len(= drop_len)分だけ進められる。
//
// write_buf_offsetを、書き出しに成功したwrite_buf.len()分だけ進めて、
// write_bufをクリアすることもできるが、
// ブロック長でしか書き出すことができないため、その場合は次回の書き込み時に
// NVMに一度アクセスしてブロック全体を取得しなくてはならない。
// この読み込みを避けるため、現在の実装の形をとっている。
let new_len = self.block_size().as_u16() as usize;
let drop_len = self.write_buf.len() - new_len;
unsafe {
// This nonoverlappingness is guranteed by the callers.
ptr::copy(
self.write_buf.as_ptr().add(drop_len), // src
self.write_buf.as_mut_ptr(), // dst
new_len,
);
}
self.write_buf.truncate(new_len);
self.write_buf_offset += drop_len as u64;
}
self.maybe_dirty = false;
Ok(())
}
fn check_overflow(&self, write_len: usize) -> Result<()> {
let next_position = self.position() + write_len as u64;
track_assert!(
next_position <= self.capacity(),
ErrorKind::InconsistentState,
"self.position={}, write_len={}, self.len={}",
self.position(),
write_len,
self.capacity()
);
Ok(())
}
}
impl<N: NonVolatileMemory> NonVolatileMemory for JournalNvmBuffer<N> {
fn sync(&mut self) -> Result<()> {
track!(self.flush_write_buf())?;
self.inner.sync()
}
fn position(&self) -> u64 {
self.position
}
fn capacity(&self) -> u64 {
self.inner.capacity()
}
fn block_size(&self) -> BlockSize {
self.inner.block_size()
}
fn split(self, _: u64) -> Result<(Self, Self)> {
unreachable!()
}
}
impl<N: NonVolatileMemory> Drop for JournalNvmBuffer<N> {
fn drop(&mut self) {
let _ = self.sync();
}
}
impl<N: NonVolatileMemory> Seek for JournalNvmBuffer<N> {
fn seek(&mut self, pos: SeekFrom) -> io::Result<u64> {
let offset = track!(self.convert_to_offset(pos))?;
self.position = offset;
Ok(offset)
}
}
impl<N: NonVolatileMemory> Read for JournalNvmBuffer<N> {
fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
if self.is_dirty_area(self.position, buf.len()) {
track!(self.flush_write_buf())?;
}
let aligned_start = self.block_size().floor_align(self.position);
let aligned_end = self
.block_size()
.ceil_align(self.position + buf.len() as u64);
self.read_buf
.aligned_resize((aligned_end - aligned_start) as usize);
self.inner.seek(SeekFrom::Start(aligned_start))?;
let inner_read_size = self.inner.read(&mut self.read_buf)?;
let start = (self.position - aligned_start) as usize;
let end = cmp::min(inner_read_size, start + buf.len());
let read_size = end - start;
(&mut buf[..read_size]).copy_from_slice(&self.read_buf[start..end]);
self.position += read_size as u64;
Ok(read_size)
}
}
impl<N: NonVolatileMemory> Write for JournalNvmBuffer<N> {
fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
track!(self.check_overflow(buf.len()))?;
let write_buf_start = self.write_buf_offset;
let write_buf_end = write_buf_start + self.write_buf.len() as u64;
if write_buf_start <= self.position && self.position <= write_buf_end {
// 領域が重複しており、バッファの途中から追記可能
// (i.e., 書き込みバッファのフラッシュが不要)
let start = (self.position - self.write_buf_offset) as usize;
let end = start + buf.len();
self.write_buf.aligned_resize(end);
(&mut self.write_buf[start..end]).copy_from_slice(buf);
self.position += buf.len() as u64;
self.maybe_dirty = true;
Ok(buf.len())
} else {
// 領域に重複がないので、一度バッファの中身を書き戻す
track!(self.flush_write_buf())?;
if self.block_size().is_aligned
|
// - ジャーナル領域は定期的に本メソッドを呼び出す
|
random_line_split
|
|
nvm_buffer.rs
|
指す」という点では`position`フィールドと似ているが、
// `position`は読み書きやシーク操作の度に値が更新されるのに対して、
// `write_buf_offset`は、書き込みバッファの内容がフラッシュされるまでは、
// 固定の値が使用され続ける。
write_buf_offset: u64,
// 書き込みバッファ内にデータが溜まっているかどうかを判定するためのフラグ
//
// 一度でも書き込みバッファにデータが書かれたら`true`に設定され、
// 内部NVMにバッファ内のデータがフラッシュされた後は`false`に設定される。
maybe_dirty: bool,
// 読み込みバッファ
//
// ジャーナル領域が発行した読み込み要求を、
// 内部NVMのブロック境界に合うようにアライメントするために使用される。
read_buf: AlignedBytes,
}
impl<N: NonVolatileMemory> JournalNvmBuffer<N> {
/// 新しい`JournalNvmBuffer`インスタンスを生成する.
///
/// これは実際に読み書きには`nvm`を使用する.
///
/// なお`nvm`へのアクセス時に、それが`nvm`が要求するブロック境界にアライメントされていることは、
/// `JournalNvmBuffer`が保証するため、利用者が気にする必要はない.
///
/// ただし、シーク時には、シーク地点を含まない次のブロック境界までのデータは
/// 上書きされてしまうので注意が必要.
pub fn new(nvm: N) -> Self {
let block_size = nvm.block_size();
JournalNvmBuffer {
inner: nvm,
position: 0,
maybe_dirty: false,
write_buf_offset: 0,
write_buf: AlignedBytes::new(0, block_size),
read_buf: AlignedBytes::new(0, block_size),
}
}
#[cfg(test)]
pub fn nvm(&self) -> &N {
&self.inner
}
fn is_dirty_area(&self, offset: u64, length: usize) -> bool {
if !self.maybe_dirty || length == 0 || self.write_buf.is_empty() {
return false;
}
if self.write_buf_offset < offset {
let buf_end = self.write_buf_offset + self.write_buf.len() as u64;
offset < buf_end
} else {
let end = offset + length as u64;
self.write_buf_offset < end
}
}
fn flush_write_buf(&mut self) -> Result<()> {
if self.write_buf.is_empty() || !self.maybe_dirty {
return Ok(());
}
track_io!(self.inner.seek(SeekFrom::Start(self.write_buf_offset)))?;
track_io!(self.inner.write(&self.write_buf))?;
if self.write_buf.len() > self.block_size().as_u16() as usize {
// このif節では、
// バッファに末端のalignmentバイト分(= new_len)の情報を残す。
// write_buf_offsetは、write_buf.len() - new_len(= drop_len)分だけ進められる。
//
// write_buf_offsetを、書き出しに成功したwrite_buf.len()分だけ進めて、
// write_bufをクリアすることもできるが、
// ブロック長でしか書き出すことができないため、その場合は次回の書き込み時に
// NVMに一度アクセスしてブロック全体を取得しなくてはならない。
// この読み込みを避けるため、現在の実装の形をとっている。
let new_len = self.block_size().as_u16() as usize;
let drop_len = self.write_buf.len() - new_len;
unsafe {
// This nonoverlappingness is guranteed by the callers.
ptr::copy(
self.write_buf.as_ptr().add(drop_len), // src
self.write_buf.as_mut_ptr(), // dst
new_len,
);
}
self.write_buf.truncate(new_len);
self.write_buf_offset += drop_len as u64;
}
self.maybe_dirty = false;
Ok(())
}
fn check_overflow(&self, write_len: usize) -> Result<()> {
let next_position = self.position() + write_len as u64;
track_assert!(
next_position <= self.capacity(),
ErrorKind::InconsistentState,
"self.position={}, write_len={}, self.len={}",
self.position(),
write_len,
self.capacity()
);
Ok(())
}
}
impl<N: NonVolatileMemory> NonVolatileMemory for JournalNvmBuffer<N> {
fn sync(&mut self) -> Result<()> {
track!(self.flush_write_buf())?;
self.inner.sync()
}
fn position(&self) -> u64 {
self.position
}
fn capacity(&self) -> u64 {
self.inner.capacity()
}
fn block_size(&self) -> BlockSize {
self.inner.block_size()
}
fn split(self, _: u64) -> Result<(Self, Self)> {
unreachable!()
}
}
impl<N: NonVolatileMemory> Drop for JournalNvmBuffer<N> {
fn drop(&mut self) {
let _ = self.sync();
}
}
impl<N: NonVolatileMemory> Seek for JournalNvmBuffer<N> {
fn seek(&mut self, pos: SeekFrom) -> io::Result<u64> {
let offset = track!(self.convert_to_offset(pos))?;
self.position = offset;
Ok(offset)
}
}
impl<N: NonVolatileMemory> Read for JournalNvmBuffer<N> {
fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
if self.is_dirty_area(self.position, buf.len()) {
track!(self.flush_write_buf())?;
}
let aligned_start = self.block_size().floor_align(self.position);
let aligned_end = self
.block_size()
.ceil_align(self.position + buf.len() as u64);
self.read_buf
.aligned_resize((aligned_end - aligned_start) as usize);
self.inner.seek(SeekFrom::Start(aligned_start))?;
let inner_read_size = self.inner.read(&mut self.read_buf)?;
let start = (self.position - aligned_start) as usize;
let end = cmp::min(inner_read_size, start + buf.len());
let read_size = end - start;
(&mut buf[..read_size]).copy_from_slice(&self.read_buf[start..end]);
self.position += read_size as u64;
Ok(read_size)
}
}
impl<N: NonVolatileMemory> Write for JournalNvmBuffer<N> {
fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
track!(self.check_overflow(buf.len()))?;
let write_buf_start = self.write_buf_offset;
let write_buf_end = write_buf_start + self.write_buf.len() as u64;
if write_buf_start <= self.position && self.position <= write_buf_end {
// 領域が重複しており、バッファの途中から追記可能
// (i.e., 書き込みバッファのフラッシュが不要)
let start = (self.position - self.write_buf_offs
|
ite_buf.aligned_resize(end);
(&mut self.write_buf[start..end]).copy_from_slice(buf);
self.position += buf.len() as u64;
self.maybe_dirty = true;
Ok(buf.len())
} else {
// 領域に重複がないので、一度バッファの中身を書き戻す
track!(self.flush_write_buf())?;
if self.block_size().is_aligned(self.position) {
self.write_buf_offset = self.position;
self.write_buf.aligned_resize(0);
} else {
// シーク位置より前方の既存データが破棄されてしまわないように、一度読み込みを行う.
let size = self.block_size().as_u16();
self.write_buf_offset = self.block_size().floor_align(self.position);
self.write_buf.aligned_resize(size as usize);
self.inner.seek(SeekFrom::Start(self.write_buf_offset))?;
self.inner.read_exact(&mut self.write_buf)?;
}
self.write(buf)
}
}
fn flush(&mut self) -> io::Result<()> {
track!(self.flush_write_buf())?;
Ok(())
}
}
#[cfg(test)]
mod tests {
use std::io::{Read, Seek, SeekFrom, Write};
use trackable::result::TestResult;
use super::*;
use nvm::MemoryNvm;
#[test]
fn write_write_flush() -> TestResult {
// 連続領域の書き込みは`flush`するまでバッファに残り続ける
let mut buffer = new_buffer();
track_io!(buffer.write_all(b"foo"))?;
assert_eq!(&buffer.nvm().as_bytes()[0..3], &[0; 3][..]);
track_io!(buffer.write_all(b"bar"))?;
|
et) as usize;
let end = start + buf.len();
self.wr
|
identifier_body
|
bootstrap.py
|
, path, requirements, hgrc, heads):
"""Wraps process_hg_sync to provide logging"""
logger.info('syncing repo: %s' % path)
try:
return process_hg_sync(config, path, requirements, hgrc, heads, create=True)
finally:
logger.info('exiting sync for: %s' % path)
def seqmap(message_handler, events):
'''Process events using the message handler in the order they
arrived in the queue
'''
for config, payload in events:
message_handler(config, payload)
def hgssh():
'''hgssh component of the vcsreplicator bootstrap procedure.'''
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('config', help='Path to config file')
parser.add_argument('hg', help='Path to hg executable for use in bootstrap process')
parser.add_argument('--workers', help='Number of concurrent workers to use for publishing messages', type=int,
default=multiprocessing.cpu_count())
parser.add_argument('--output', help='Output file path for hgssh JSON')
args = parser.parse_args()
config = Config(filename=args.config)
topic = config.get('replicationproducer', 'topic')
# Create consumer to gather partition offsets
consumer_config = {
# set this so offsets are committed to Zookeeper
'api_version': (0, 8, 1),
'bootstrap_servers': [
host.strip()
for host in config.get('replicationproducer', 'hosts').split(',')
],
|
'enable_auto_commit': False, # We don't actually commit but this is just for good measure
}
consumer = KafkaConsumer(**consumer_config)
# This call populates topic metadata for all topics in the cluster.
# Needed as missing topic metadata can cause the below call to retrieve
# partition information to fail.
consumer.topics()
partitions = consumer.partitions_for_topic(topic)
if not partitions:
logger.critical('could not get partitions for %s' % topic)
sys.exit(1)
# Gather the initial offsets
topicpartitions = [
TopicPartition(topic, partition_number)
for partition_number in sorted(partitions)
]
offsets_start = consumer.end_offsets(topicpartitions)
logger.info('gathered initial Kafka offsets')
# Mapping of `replicatesync` future to corresponding repo name
replicatesync_futures = {}
with futures.ThreadPoolExecutor(args.workers) as e:
# Create a future which makes a `replicatesync` call
# for each repo on hg.mo
for repo in find_hg_repos(REPOS_DIR):
# Create a future to call `replicatesync` for this repo
replicatesync_args = [
args.hg,
'-R', repo,
'replicatesync',
'--bootstrap',
]
replicatesync_futures.update({
e.submit(subprocess.check_output, replicatesync_args): repo
})
logger.info('calling `replicatesync --bootstrap` on %s' % repo)
# Execute the futures and raise an Exception on fail
for future in futures.as_completed(replicatesync_futures):
repo = replicatesync_futures[future]
exc = future.exception()
if exc:
logger.error('error occurred calling `replicatesync --bootstrap` on %s: %s' % (repo, exc))
raise Exception('error triggering replication of Mercurial repo %s: %s' %
(repo, exc))
logger.info('called `replicatesync --bootstrap` on %s successfully' % repo)
# Gather the final offsets
offsets_end = consumer.end_offsets(topicpartitions)
logger.info('gathered final Kafka offsets')
# Create map of partition numbers to (start, end) offset tuples
offsets_combined = {
int(topicpartition.partition): (offsets_start[topicpartition], offsets_end[topicpartition])
for topicpartition in topicpartitions
}
# Create JSON for processing in ansible and print to stdout
# Convert repo paths into their wire representations
output = {
'offsets': offsets_combined,
'repositories': sorted([
config.get_replication_path_rewrite(repo)
for repo in replicatesync_futures.values()
]),
}
print(json.dumps(output))
logger.info('hgssh bootstrap process complete!')
# Send output to a file if requested
if args.output:
logger.info('writing output to %s' % args.output)
with open(args.output, 'w') as f:
json.dump(output, f)
def hgweb():
'''hgweb component of the vcsreplicator bootstrap procedure. Takes a
vcsreplicator config path on the CLI and takes a JSON data structure
on stdin'''
import argparse
# Parse CLI args
parser = argparse.ArgumentParser()
parser.add_argument('config', help='Path of config file to load')
parser.add_argument('input', help='JSON data input (output from the hgssh bootstrap procedure) file path')
parser.add_argument('--workers', help='Number of concurrent workers to use for performing clones', type=int,
default=multiprocessing.cpu_count())
args = parser.parse_args()
logger.info('reading hgssh JSON document')
with open(args.input, 'r') as f:
hgssh_data = json.loads(f.read())
logger.info('JSON document read')
# Convert the JSON keys to integers
hgssh_data['offsets'] = {
int(k): v
for k, v in hgssh_data['offsets'].items()
}
config = Config(filename=args.config)
consumer_config = {
# set this so offsets are committed to Zookeeper
'api_version': (0, 8, 1),
'bootstrap_servers': [
host.strip()
for host in config.get('replicationproducer', 'hosts').split(',')
],
'client_id': config.get('consumer', 'client_id'),
'enable_auto_commit': False,
'group_id': config.get('consumer', 'group'),
'max_partition_fetch_bytes': MAX_BUFFER_SIZE,
'value_deserializer': value_deserializer,
}
topic = config.get('consumer', 'topic')
topicpartitions = [
TopicPartition(topic, partition)
for partition, (start_offset, end_offset)
in sorted(hgssh_data['offsets'].items())
# there is no need to do an assignment if the length of the
# bootstrap message range is 0
if start_offset != end_offset
]
consumer = KafkaConsumer(**consumer_config)
# This call populates topic metadata for all topics in the cluster.
consumer.topics()
outputdata = collections.defaultdict(list)
# We will remove repos from this set as we replicate them
# Once this is an empty set we are done
repositories_to_clone = set()
for repo in hgssh_data['repositories']:
filterresult = config.filter(repo)
if filterresult.passes_filter:
repositories_to_clone.add(repo)
else:
outputdata[repo].append('filtered by rule %s' % filterresult.rule)
extra_messages = collections.defaultdict(collections.deque) # maps repo names to extra processing messages
clone_futures_repo_mapping = {} # maps cloning futures to repo names
extra_messages_futures_repo_mapping = {} # maps extra messages futures to repo names
# Overwrite default hglib path so handle_message_main and it's derivatives
# use the correct virtualenv
hglib.HGPATH = config.get('programs', 'hg')
# Maps partitions to the list of messages within the bootstrap range
aggregate_messages_by_topicpartition = {
tp.partition: []
for tp in topicpartitions
}
# Gather all the Kafka messages within the bootstrap range for each partition
for topicpartition in topicpartitions:
start_offset, end_offset = hgssh_data['offsets'][topicpartition.partition]
end_offset -= 1
# Assign the consumer to the next partition and move to the start offset
logger.info('assigning the consumer to partition %s' % topicpartition.partition)
consumer.assign([topicpartition])
logger.info('seeking the consumer to offset %s' % start_offset)
consumer.seek(topicpartition, start_offset)
consumer.commit(offsets={
topicpartition: OffsetAndMetadata(start_offset, '')
})
logger.info('partition %s of topic %s moved to offset %s' %
(topicpartition.partition, topicpartition.topic, start_offset))
# Get all the messages we need to process from kafka
for message in consumer:
# Check if the message we are processing is within the range of accepted messages
# If we are in the range, add this message to the list of messages on this partition
# If we are at the end of the range, break from the loop and move on to the next partition
if message.offset <= end_offset:
aggregate_messages_by_topicpartition[message.partition].append(message)
logger.info('message on partition %s, offset %s has been collected' % (message.partition, message.offset))
consumer.commit(offsets={
TopicPartition(topic, message.partition): OffsetAndMetadata(message.offset + 1, ''),
})
if message.offset >= end_offset:
logger.info('finished retrieving messages on partition %s' % message.partition)
break
logger.info('finished retrieving messages from Kafka')
# Process the previously collected messages
with futures.Thread
|
random_line_split
|
|
bootstrap.py
|
type=int,
default=multiprocessing.cpu_count())
parser.add_argument('--output', help='Output file path for hgssh JSON')
args = parser.parse_args()
config = Config(filename=args.config)
topic = config.get('replicationproducer', 'topic')
# Create consumer to gather partition offsets
consumer_config = {
# set this so offsets are committed to Zookeeper
'api_version': (0, 8, 1),
'bootstrap_servers': [
host.strip()
for host in config.get('replicationproducer', 'hosts').split(',')
],
'enable_auto_commit': False, # We don't actually commit but this is just for good measure
}
consumer = KafkaConsumer(**consumer_config)
# This call populates topic metadata for all topics in the cluster.
# Needed as missing topic metadata can cause the below call to retrieve
# partition information to fail.
consumer.topics()
partitions = consumer.partitions_for_topic(topic)
if not partitions:
logger.critical('could not get partitions for %s' % topic)
sys.exit(1)
# Gather the initial offsets
topicpartitions = [
TopicPartition(topic, partition_number)
for partition_number in sorted(partitions)
]
offsets_start = consumer.end_offsets(topicpartitions)
logger.info('gathered initial Kafka offsets')
# Mapping of `replicatesync` future to corresponding repo name
replicatesync_futures = {}
with futures.ThreadPoolExecutor(args.workers) as e:
# Create a future which makes a `replicatesync` call
# for each repo on hg.mo
for repo in find_hg_repos(REPOS_DIR):
# Create a future to call `replicatesync` for this repo
replicatesync_args = [
args.hg,
'-R', repo,
'replicatesync',
'--bootstrap',
]
replicatesync_futures.update({
e.submit(subprocess.check_output, replicatesync_args): repo
})
logger.info('calling `replicatesync --bootstrap` on %s' % repo)
# Execute the futures and raise an Exception on fail
for future in futures.as_completed(replicatesync_futures):
repo = replicatesync_futures[future]
exc = future.exception()
if exc:
logger.error('error occurred calling `replicatesync --bootstrap` on %s: %s' % (repo, exc))
raise Exception('error triggering replication of Mercurial repo %s: %s' %
(repo, exc))
logger.info('called `replicatesync --bootstrap` on %s successfully' % repo)
# Gather the final offsets
offsets_end = consumer.end_offsets(topicpartitions)
logger.info('gathered final Kafka offsets')
# Create map of partition numbers to (start, end) offset tuples
offsets_combined = {
int(topicpartition.partition): (offsets_start[topicpartition], offsets_end[topicpartition])
for topicpartition in topicpartitions
}
# Create JSON for processing in ansible and print to stdout
# Convert repo paths into their wire representations
output = {
'offsets': offsets_combined,
'repositories': sorted([
config.get_replication_path_rewrite(repo)
for repo in replicatesync_futures.values()
]),
}
print(json.dumps(output))
logger.info('hgssh bootstrap process complete!')
# Send output to a file if requested
if args.output:
logger.info('writing output to %s' % args.output)
with open(args.output, 'w') as f:
json.dump(output, f)
def hgweb():
'''hgweb component of the vcsreplicator bootstrap procedure. Takes a
vcsreplicator config path on the CLI and takes a JSON data structure
on stdin'''
import argparse
# Parse CLI args
parser = argparse.ArgumentParser()
parser.add_argument('config', help='Path of config file to load')
parser.add_argument('input', help='JSON data input (output from the hgssh bootstrap procedure) file path')
parser.add_argument('--workers', help='Number of concurrent workers to use for performing clones', type=int,
default=multiprocessing.cpu_count())
args = parser.parse_args()
logger.info('reading hgssh JSON document')
with open(args.input, 'r') as f:
hgssh_data = json.loads(f.read())
logger.info('JSON document read')
# Convert the JSON keys to integers
hgssh_data['offsets'] = {
int(k): v
for k, v in hgssh_data['offsets'].items()
}
config = Config(filename=args.config)
consumer_config = {
# set this so offsets are committed to Zookeeper
'api_version': (0, 8, 1),
'bootstrap_servers': [
host.strip()
for host in config.get('replicationproducer', 'hosts').split(',')
],
'client_id': config.get('consumer', 'client_id'),
'enable_auto_commit': False,
'group_id': config.get('consumer', 'group'),
'max_partition_fetch_bytes': MAX_BUFFER_SIZE,
'value_deserializer': value_deserializer,
}
topic = config.get('consumer', 'topic')
topicpartitions = [
TopicPartition(topic, partition)
for partition, (start_offset, end_offset)
in sorted(hgssh_data['offsets'].items())
# there is no need to do an assignment if the length of the
# bootstrap message range is 0
if start_offset != end_offset
]
consumer = KafkaConsumer(**consumer_config)
# This call populates topic metadata for all topics in the cluster.
consumer.topics()
outputdata = collections.defaultdict(list)
# We will remove repos from this set as we replicate them
# Once this is an empty set we are done
repositories_to_clone = set()
for repo in hgssh_data['repositories']:
filterresult = config.filter(repo)
if filterresult.passes_filter:
repositories_to_clone.add(repo)
else:
outputdata[repo].append('filtered by rule %s' % filterresult.rule)
extra_messages = collections.defaultdict(collections.deque) # maps repo names to extra processing messages
clone_futures_repo_mapping = {} # maps cloning futures to repo names
extra_messages_futures_repo_mapping = {} # maps extra messages futures to repo names
# Overwrite default hglib path so handle_message_main and it's derivatives
# use the correct virtualenv
hglib.HGPATH = config.get('programs', 'hg')
# Maps partitions to the list of messages within the bootstrap range
aggregate_messages_by_topicpartition = {
tp.partition: []
for tp in topicpartitions
}
# Gather all the Kafka messages within the bootstrap range for each partition
for topicpartition in topicpartitions:
start_offset, end_offset = hgssh_data['offsets'][topicpartition.partition]
end_offset -= 1
# Assign the consumer to the next partition and move to the start offset
logger.info('assigning the consumer to partition %s' % topicpartition.partition)
consumer.assign([topicpartition])
logger.info('seeking the consumer to offset %s' % start_offset)
consumer.seek(topicpartition, start_offset)
consumer.commit(offsets={
topicpartition: OffsetAndMetadata(start_offset, '')
})
logger.info('partition %s of topic %s moved to offset %s' %
(topicpartition.partition, topicpartition.topic, start_offset))
# Get all the messages we need to process from kafka
for message in consumer:
# Check if the message we are processing is within the range of accepted messages
# If we are in the range, add this message to the list of messages on this partition
# If we are at the end of the range, break from the loop and move on to the next partition
if message.offset <= end_offset:
aggregate_messages_by_topicpartition[message.partition].append(message)
logger.info('message on partition %s, offset %s has been collected' % (message.partition, message.offset))
consumer.commit(offsets={
TopicPartition(topic, message.partition): OffsetAndMetadata(message.offset + 1, ''),
})
if message.offset >= end_offset:
logger.info('finished retrieving messages on partition %s' % message.partition)
break
logger.info('finished retrieving messages from Kafka')
# Process the previously collected messages
with futures.ThreadPoolExecutor(args.workers) as e:
for partition, messages in sorted(aggregate_messages_by_topicpartition.items()):
|
logger.info('processing messages for partition %s' % partition)
for message in messages:
payload = message.value
# Ignore heartbeat messages
if payload['name'] == 'heartbeat-1':
continue
if payload['path'] in repositories_to_clone:
# If we have not yet replicated the repository for this message,
# of the repo sync message is not tagged with the bootstrap flag,
# move on to the next message. The assumed upcoming hg-repo-sync-2
# message will clone the data represented in this message anyways.
if payload['name'] != 'hg-repo-sync-2' or not payload['bootstrap']:
continue
logger.info('scheduled clone for %s' % payload['path'])
# Schedule the repo sync
clone_future = e.submit(clone_repo, config, payload['path'],
|
conditional_block
|
|
bootstrap.py
|
, path, requirements, hgrc, heads):
"""Wraps process_hg_sync to provide logging"""
logger.info('syncing repo: %s' % path)
try:
return process_hg_sync(config, path, requirements, hgrc, heads, create=True)
finally:
logger.info('exiting sync for: %s' % path)
def seqmap(message_handler, events):
'''Process events using the message handler in the order they
arrived in the queue
'''
for config, payload in events:
message_handler(config, payload)
def hgssh():
'''hgssh component of the vcsreplicator bootstrap procedure.'''
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('config', help='Path to config file')
parser.add_argument('hg', help='Path to hg executable for use in bootstrap process')
parser.add_argument('--workers', help='Number of concurrent workers to use for publishing messages', type=int,
default=multiprocessing.cpu_count())
parser.add_argument('--output', help='Output file path for hgssh JSON')
args = parser.parse_args()
config = Config(filename=args.config)
topic = config.get('replicationproducer', 'topic')
# Create consumer to gather partition offsets
consumer_config = {
# set this so offsets are committed to Zookeeper
'api_version': (0, 8, 1),
'bootstrap_servers': [
host.strip()
for host in config.get('replicationproducer', 'hosts').split(',')
],
'enable_auto_commit': False, # We don't actually commit but this is just for good measure
}
consumer = KafkaConsumer(**consumer_config)
# This call populates topic metadata for all topics in the cluster.
# Needed as missing topic metadata can cause the below call to retrieve
# partition information to fail.
consumer.topics()
partitions = consumer.partitions_for_topic(topic)
if not partitions:
logger.critical('could not get partitions for %s' % topic)
sys.exit(1)
# Gather the initial offsets
topicpartitions = [
TopicPartition(topic, partition_number)
for partition_number in sorted(partitions)
]
offsets_start = consumer.end_offsets(topicpartitions)
logger.info('gathered initial Kafka offsets')
# Mapping of `replicatesync` future to corresponding repo name
replicatesync_futures = {}
with futures.ThreadPoolExecutor(args.workers) as e:
# Create a future which makes a `replicatesync` call
# for each repo on hg.mo
for repo in find_hg_repos(REPOS_DIR):
# Create a future to call `replicatesync` for this repo
replicatesync_args = [
args.hg,
'-R', repo,
'replicatesync',
'--bootstrap',
]
replicatesync_futures.update({
e.submit(subprocess.check_output, replicatesync_args): repo
})
logger.info('calling `replicatesync --bootstrap` on %s' % repo)
# Execute the futures and raise an Exception on fail
for future in futures.as_completed(replicatesync_futures):
repo = replicatesync_futures[future]
exc = future.exception()
if exc:
logger.error('error occurred calling `replicatesync --bootstrap` on %s: %s' % (repo, exc))
raise Exception('error triggering replication of Mercurial repo %s: %s' %
(repo, exc))
logger.info('called `replicatesync --bootstrap` on %s successfully' % repo)
# Gather the final offsets
offsets_end = consumer.end_offsets(topicpartitions)
logger.info('gathered final Kafka offsets')
# Create map of partition numbers to (start, end) offset tuples
offsets_combined = {
int(topicpartition.partition): (offsets_start[topicpartition], offsets_end[topicpartition])
for topicpartition in topicpartitions
}
# Create JSON for processing in ansible and print to stdout
# Convert repo paths into their wire representations
output = {
'offsets': offsets_combined,
'repositories': sorted([
config.get_replication_path_rewrite(repo)
for repo in replicatesync_futures.values()
]),
}
print(json.dumps(output))
logger.info('hgssh bootstrap process complete!')
# Send output to a file if requested
if args.output:
logger.info('writing output to %s' % args.output)
with open(args.output, 'w') as f:
json.dump(output, f)
def
|
():
'''hgweb component of the vcsreplicator bootstrap procedure. Takes a
vcsreplicator config path on the CLI and takes a JSON data structure
on stdin'''
import argparse
# Parse CLI args
parser = argparse.ArgumentParser()
parser.add_argument('config', help='Path of config file to load')
parser.add_argument('input', help='JSON data input (output from the hgssh bootstrap procedure) file path')
parser.add_argument('--workers', help='Number of concurrent workers to use for performing clones', type=int,
default=multiprocessing.cpu_count())
args = parser.parse_args()
logger.info('reading hgssh JSON document')
with open(args.input, 'r') as f:
hgssh_data = json.loads(f.read())
logger.info('JSON document read')
# Convert the JSON keys to integers
hgssh_data['offsets'] = {
int(k): v
for k, v in hgssh_data['offsets'].items()
}
config = Config(filename=args.config)
consumer_config = {
# set this so offsets are committed to Zookeeper
'api_version': (0, 8, 1),
'bootstrap_servers': [
host.strip()
for host in config.get('replicationproducer', 'hosts').split(',')
],
'client_id': config.get('consumer', 'client_id'),
'enable_auto_commit': False,
'group_id': config.get('consumer', 'group'),
'max_partition_fetch_bytes': MAX_BUFFER_SIZE,
'value_deserializer': value_deserializer,
}
topic = config.get('consumer', 'topic')
topicpartitions = [
TopicPartition(topic, partition)
for partition, (start_offset, end_offset)
in sorted(hgssh_data['offsets'].items())
# there is no need to do an assignment if the length of the
# bootstrap message range is 0
if start_offset != end_offset
]
consumer = KafkaConsumer(**consumer_config)
# This call populates topic metadata for all topics in the cluster.
consumer.topics()
outputdata = collections.defaultdict(list)
# We will remove repos from this set as we replicate them
# Once this is an empty set we are done
repositories_to_clone = set()
for repo in hgssh_data['repositories']:
filterresult = config.filter(repo)
if filterresult.passes_filter:
repositories_to_clone.add(repo)
else:
outputdata[repo].append('filtered by rule %s' % filterresult.rule)
extra_messages = collections.defaultdict(collections.deque) # maps repo names to extra processing messages
clone_futures_repo_mapping = {} # maps cloning futures to repo names
extra_messages_futures_repo_mapping = {} # maps extra messages futures to repo names
# Overwrite default hglib path so handle_message_main and it's derivatives
# use the correct virtualenv
hglib.HGPATH = config.get('programs', 'hg')
# Maps partitions to the list of messages within the bootstrap range
aggregate_messages_by_topicpartition = {
tp.partition: []
for tp in topicpartitions
}
# Gather all the Kafka messages within the bootstrap range for each partition
for topicpartition in topicpartitions:
start_offset, end_offset = hgssh_data['offsets'][topicpartition.partition]
end_offset -= 1
# Assign the consumer to the next partition and move to the start offset
logger.info('assigning the consumer to partition %s' % topicpartition.partition)
consumer.assign([topicpartition])
logger.info('seeking the consumer to offset %s' % start_offset)
consumer.seek(topicpartition, start_offset)
consumer.commit(offsets={
topicpartition: OffsetAndMetadata(start_offset, '')
})
logger.info('partition %s of topic %s moved to offset %s' %
(topicpartition.partition, topicpartition.topic, start_offset))
# Get all the messages we need to process from kafka
for message in consumer:
# Check if the message we are processing is within the range of accepted messages
# If we are in the range, add this message to the list of messages on this partition
# If we are at the end of the range, break from the loop and move on to the next partition
if message.offset <= end_offset:
aggregate_messages_by_topicpartition[message.partition].append(message)
logger.info('message on partition %s, offset %s has been collected' % (message.partition, message.offset))
consumer.commit(offsets={
TopicPartition(topic, message.partition): OffsetAndMetadata(message.offset + 1, ''),
})
if message.offset >= end_offset:
logger.info('finished retrieving messages on partition %s' % message.partition)
break
logger.info('finished retrieving messages from Kafka')
# Process the previously collected messages
with futures
|
hgweb
|
identifier_name
|
bootstrap.py
|
, path, requirements, hgrc, heads):
"""Wraps process_hg_sync to provide logging"""
logger.info('syncing repo: %s' % path)
try:
return process_hg_sync(config, path, requirements, hgrc, heads, create=True)
finally:
logger.info('exiting sync for: %s' % path)
def seqmap(message_handler, events):
|
def hgssh():
'''hgssh component of the vcsreplicator bootstrap procedure.'''
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('config', help='Path to config file')
parser.add_argument('hg', help='Path to hg executable for use in bootstrap process')
parser.add_argument('--workers', help='Number of concurrent workers to use for publishing messages', type=int,
default=multiprocessing.cpu_count())
parser.add_argument('--output', help='Output file path for hgssh JSON')
args = parser.parse_args()
config = Config(filename=args.config)
topic = config.get('replicationproducer', 'topic')
# Create consumer to gather partition offsets
consumer_config = {
# set this so offsets are committed to Zookeeper
'api_version': (0, 8, 1),
'bootstrap_servers': [
host.strip()
for host in config.get('replicationproducer', 'hosts').split(',')
],
'enable_auto_commit': False, # We don't actually commit but this is just for good measure
}
consumer = KafkaConsumer(**consumer_config)
# This call populates topic metadata for all topics in the cluster.
# Needed as missing topic metadata can cause the below call to retrieve
# partition information to fail.
consumer.topics()
partitions = consumer.partitions_for_topic(topic)
if not partitions:
logger.critical('could not get partitions for %s' % topic)
sys.exit(1)
# Gather the initial offsets
topicpartitions = [
TopicPartition(topic, partition_number)
for partition_number in sorted(partitions)
]
offsets_start = consumer.end_offsets(topicpartitions)
logger.info('gathered initial Kafka offsets')
# Mapping of `replicatesync` future to corresponding repo name
replicatesync_futures = {}
with futures.ThreadPoolExecutor(args.workers) as e:
# Create a future which makes a `replicatesync` call
# for each repo on hg.mo
for repo in find_hg_repos(REPOS_DIR):
# Create a future to call `replicatesync` for this repo
replicatesync_args = [
args.hg,
'-R', repo,
'replicatesync',
'--bootstrap',
]
replicatesync_futures.update({
e.submit(subprocess.check_output, replicatesync_args): repo
})
logger.info('calling `replicatesync --bootstrap` on %s' % repo)
# Execute the futures and raise an Exception on fail
for future in futures.as_completed(replicatesync_futures):
repo = replicatesync_futures[future]
exc = future.exception()
if exc:
logger.error('error occurred calling `replicatesync --bootstrap` on %s: %s' % (repo, exc))
raise Exception('error triggering replication of Mercurial repo %s: %s' %
(repo, exc))
logger.info('called `replicatesync --bootstrap` on %s successfully' % repo)
# Gather the final offsets
offsets_end = consumer.end_offsets(topicpartitions)
logger.info('gathered final Kafka offsets')
# Create map of partition numbers to (start, end) offset tuples
offsets_combined = {
int(topicpartition.partition): (offsets_start[topicpartition], offsets_end[topicpartition])
for topicpartition in topicpartitions
}
# Create JSON for processing in ansible and print to stdout
# Convert repo paths into their wire representations
output = {
'offsets': offsets_combined,
'repositories': sorted([
config.get_replication_path_rewrite(repo)
for repo in replicatesync_futures.values()
]),
}
print(json.dumps(output))
logger.info('hgssh bootstrap process complete!')
# Send output to a file if requested
if args.output:
logger.info('writing output to %s' % args.output)
with open(args.output, 'w') as f:
json.dump(output, f)
def hgweb():
'''hgweb component of the vcsreplicator bootstrap procedure. Takes a
vcsreplicator config path on the CLI and takes a JSON data structure
on stdin'''
import argparse
# Parse CLI args
parser = argparse.ArgumentParser()
parser.add_argument('config', help='Path of config file to load')
parser.add_argument('input', help='JSON data input (output from the hgssh bootstrap procedure) file path')
parser.add_argument('--workers', help='Number of concurrent workers to use for performing clones', type=int,
default=multiprocessing.cpu_count())
args = parser.parse_args()
logger.info('reading hgssh JSON document')
with open(args.input, 'r') as f:
hgssh_data = json.loads(f.read())
logger.info('JSON document read')
# Convert the JSON keys to integers
hgssh_data['offsets'] = {
int(k): v
for k, v in hgssh_data['offsets'].items()
}
config = Config(filename=args.config)
consumer_config = {
# set this so offsets are committed to Zookeeper
'api_version': (0, 8, 1),
'bootstrap_servers': [
host.strip()
for host in config.get('replicationproducer', 'hosts').split(',')
],
'client_id': config.get('consumer', 'client_id'),
'enable_auto_commit': False,
'group_id': config.get('consumer', 'group'),
'max_partition_fetch_bytes': MAX_BUFFER_SIZE,
'value_deserializer': value_deserializer,
}
topic = config.get('consumer', 'topic')
topicpartitions = [
TopicPartition(topic, partition)
for partition, (start_offset, end_offset)
in sorted(hgssh_data['offsets'].items())
# there is no need to do an assignment if the length of the
# bootstrap message range is 0
if start_offset != end_offset
]
consumer = KafkaConsumer(**consumer_config)
# This call populates topic metadata for all topics in the cluster.
consumer.topics()
outputdata = collections.defaultdict(list)
# We will remove repos from this set as we replicate them
# Once this is an empty set we are done
repositories_to_clone = set()
for repo in hgssh_data['repositories']:
filterresult = config.filter(repo)
if filterresult.passes_filter:
repositories_to_clone.add(repo)
else:
outputdata[repo].append('filtered by rule %s' % filterresult.rule)
extra_messages = collections.defaultdict(collections.deque) # maps repo names to extra processing messages
clone_futures_repo_mapping = {} # maps cloning futures to repo names
extra_messages_futures_repo_mapping = {} # maps extra messages futures to repo names
# Overwrite default hglib path so handle_message_main and it's derivatives
# use the correct virtualenv
hglib.HGPATH = config.get('programs', 'hg')
# Maps partitions to the list of messages within the bootstrap range
aggregate_messages_by_topicpartition = {
tp.partition: []
for tp in topicpartitions
}
# Gather all the Kafka messages within the bootstrap range for each partition
for topicpartition in topicpartitions:
start_offset, end_offset = hgssh_data['offsets'][topicpartition.partition]
end_offset -= 1
# Assign the consumer to the next partition and move to the start offset
logger.info('assigning the consumer to partition %s' % topicpartition.partition)
consumer.assign([topicpartition])
logger.info('seeking the consumer to offset %s' % start_offset)
consumer.seek(topicpartition, start_offset)
consumer.commit(offsets={
topicpartition: OffsetAndMetadata(start_offset, '')
})
logger.info('partition %s of topic %s moved to offset %s' %
(topicpartition.partition, topicpartition.topic, start_offset))
# Get all the messages we need to process from kafka
for message in consumer:
# Check if the message we are processing is within the range of accepted messages
# If we are in the range, add this message to the list of messages on this partition
# If we are at the end of the range, break from the loop and move on to the next partition
if message.offset <= end_offset:
aggregate_messages_by_topicpartition[message.partition].append(message)
logger.info('message on partition %s, offset %s has been collected' % (message.partition, message.offset))
consumer.commit(offsets={
TopicPartition(topic, message.partition): OffsetAndMetadata(message.offset + 1, ''),
})
if message.offset >= end_offset:
logger.info('finished retrieving messages on partition %s' % message.partition)
break
logger.info('finished retrieving messages from Kafka')
# Process the previously collected messages
with futures
|
'''Process events using the message handler in the order they
arrived in the queue
'''
for config, payload in events:
message_handler(config, payload)
|
identifier_body
|
linked_list.rs
|
<L> {
pub fn empty() -> ListLink<L> {
ListLink {
push_lock: AtomicBool::new(true),
next: Cell::new(None),
prev: Cell::new(None),
_pd: PhantomData,
}
}
pub fn is_in_use(&self) -> bool {
match (self.next.get(), self.prev.get()) {
(Some(_), Some(_)) => true,
(None, None) => false,
_ => unreachable!(),
}
}
fn next(&self) -> Option<NonNull<ListLink<L>>> {
self.next.get()
}
}
impl<L: ListNode> Debug for ListLink<L> {
fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
write!(
f,
"ListLink<{}>",
if self.is_in_use() {
"in list"
} else {
"not in list"
}
)
}
}
impl<L: ListNode> Default for ListLink<L> {
fn default() -> Self {
ListLink::empty()
}
}
// SAFETY: ListLink is protected by `push_lock`.
unsafe impl<L: ListNode + Sync> Sync for ListLink<L> {}
#[macro_export(local_inner_macros)]
macro_rules! define_list_node {
($list_name:ident, $elem:ty, $field:ident) => {
struct $list_name;
impl $crate::linked_list::ListNode for $list_name {
type Elem = $elem;
fn elem_to_link(elem: Self::Elem) -> core::ptr::NonNull<ListLink<Self>> {
unsafe {
core::ptr::NonNull::new_unchecked(
&mut ((*$crate::Static::into_nonnull(elem).as_mut()).$field) as *mut _,
)
}
}
fn from_link_to_elem(
link: core::ptr::NonNull<$crate::linked_list::ListLink<Self>>,
) -> Self::Elem {
let nonnull = Self::from_link_to_nonnull(link);
unsafe { $crate::Static::from_nonnull(nonnull) }
}
fn from_link_to_nonnull(
link: core::ptr::NonNull<$crate::linked_list::ListLink<Self>>,
) -> core::ptr::NonNull<<Self::Elem as $crate::Static>::Inner> {
let offset =
$crate::linked_list::offset_of!(<Self::Elem as $crate::Static>::Inner, $field);
// SAFETY: It won't be null since link is nonnull.
unsafe {
core::ptr::NonNull::new_unchecked(
(link.as_ptr() as *mut u8).offset(-offset) as *mut _
)
}
}
}
};
}
/// An intrusive linked list.
pub struct List<L: ListNode> {
head: Option<NonNull<ListLink<L>>>,
tail: Option<NonNull<ListLink<L>>>,
_pd: PhantomData<L>,
}
impl<L: ListNode> List<L> {
/// Creates an empty linked list.
pub const fn new() -> List<L> {
List {
head: None,
tail: None,
_pd: PhantomData,
}
}
/// Returns `true` if the list is empty. `O(1)`.
pub fn is_empty(&self) -> bool {
self.head.is_some()
}
/// Returns the number of elements. `O(n)`.
pub fn len(&self) -> usize {
let mut len = 0;
for _ in self.iter() {
len += 1;
}
len
}
/// Removes and returns the first element satisfying the predicate `pred`. It
/// returns `None` the list is empty or `f` returned only `false` on all
/// elements. `O(n)`.
pub fn remove_first_if<F>(&mut self, pred: F) -> Option<L::Elem>
where
F: Fn(&<L::Elem as Static>::Inner) -> bool,
{
let mut current = self.head;
while let Some(link) = current {
if pred(unsafe { L::from_link_to_nonnull(link).as_ref() }) {
self.remove(unsafe { link.as_ref() });
return Some(L::from_link_to_elem(link));
}
current = unsafe { link.as_ref().next() };
}
None
}
/// Calls the callback for each element. `O(n)`.
fn walk_links<F, R>(&self, mut f: F) -> Option<R>
where
F: FnMut(NonNull<ListLink<L>>) -> ControlFlow<R>,
{
let mut current = self.head;
while let Some(link) = current {
if let ControlFlow::Break(value) = f(link) {
return Some(value);
}
current = unsafe { link.as_ref().next() };
}
None
}
/// Inserts an element at the end the list. Returns `Err(elem)` if any other
/// thread have just inserted the element to a (possibly another) list using
/// the same link as defined in `L`. `O(1)`.
pub fn push_back(&mut self, elem: L::Elem) -> Result<(), L::Elem> {
unsafe {
let link_ptr = L::elem_to_link(elem);
let link = link_ptr.as_ref();
// Prevent multiple threads from inserting the same link at once.
//
// Say CPU 1 and CPU 2 are trying adding the thread A to their own
// runqueues simultaneously:
//
// CPU 1: runqueue1.push_back(thread_A.clone());
// CPU 2: runqueue2.push_back(thread_A.clone());
//
// In this case, one of the threads (CPU1 or CPU2) fail to insert
// the element.
if !link.push_lock.swap(false, Ordering::SeqCst) {
return Err(L::from_link_to_elem(link_ptr));
}
assert!(
!link.is_in_use(),
"tried to insert an already inserted link to another list"
);
if let Some(tail) = self.tail {
tail.as_ref().next.set(Some(link_ptr));
}
if self.head.is_none()
|
link.prev.set(self.tail);
link.next.set(None);
self.tail = Some(link_ptr);
Ok(())
}
}
/// Pops the element at the beginning of the list. `O(1)`.
pub fn pop_front(&mut self) -> Option<L::Elem> {
match self.head {
Some(head) => unsafe {
self.remove(head.as_ref());
Some(L::from_link_to_elem(head))
},
None => None,
}
}
pub fn is_link_in_list(&mut self, link: &ListLink<L>) -> bool {
let elem_nonnull = unsafe { NonNull::new_unchecked(link as *const _ as *mut _) };
self.walk_links(|link| {
if link == elem_nonnull {
ControlFlow::Break(true)
} else {
ControlFlow::Continue(())
}
})
.unwrap_or(false)
}
/// Removes an element in the list. `O(1)`.
///
/// Caller must make sure that the element is in the list.
pub fn remove(&mut self, link: &ListLink<L>) {
// Because we don't need the access to `self`, we can define this
// method as `List::remove(elem: L::Elem)`. However, since it allows
// simultaneous removals and it would break links, we intentionally
// require `&mut self` to prevent such a race.
// Make sure the element is in the list or this method would mutate other
// lists.
debug_assert!(self.is_link_in_list(link));
match (link.prev.get(), link.next.get()) {
(Some(prev), Some(next)) => unsafe {
next.as_ref().prev.set(Some(prev));
prev.as_ref().next.set(Some(next));
},
(None, Some(next)) => unsafe {
next.as_ref().prev.set(None);
self.head = Some(next);
},
(Some(prev), None) => unsafe {
prev.as_ref().next.set(None);
self.tail = Some(prev);
},
(None, None) => {
self.head = None;
self.tail = None;
}
}
link.prev.set(None);
link.next.set(None);
debug_assert!(!link.push_lock.swap(true, Ordering::SeqCst));
}
fn iter(&self) -> Iter<'_, L> {
Iter {
current: self.head,
_pd: &PhantomData,
}
}
}
impl<L: ListNode> Default for List<L> {
fn default() -> Self {
Self::new()
}
}
pub struct Iter<'a, L: ListNode> {
current: Option<NonNull<ListLink<L>>>,
_pd: &'a PhantomData<L>,
}
impl<'a, L: ListNode> Iterator for Iter<'a, L> {
type Item = &'a <L::Elem as Static>::Inner;
fn next(&mut self) -> Option<&'a <L::Elem as Static>::Inner> {
self.current.map(|current| unsafe {
self.current = current.as_ref().next();
L::from_link_to_nonnull(current).as_ref()
})
}
}
impl<'a, L: ListNode
|
{
self.head = Some(link_ptr);
}
|
conditional_block
|
linked_list.rs
|
//!
//! let mut threads = List::<ThreadsNode>::new();
//! let thread1 = Arc::new(Thread { id: 1, link: Default::default() });
//! threads.push_back(thread1);
//! ```
//!
use core::cell::Cell;
use core::fmt::{self, Debug, Formatter};
use core::marker::PhantomData;
use core::ops::ControlFlow;
use core::ptr::NonNull;
use core::sync::atomic::{AtomicBool, Ordering};
pub use etc::offset_of;
use crate::Static;
/// A trait represents a container that can be inserted into the linked list.
pub trait ListNode {
type Elem: Static;
fn elem_to_link(elem: Self::Elem) -> NonNull<ListLink<Self>>;
fn from_link_to_elem(link: NonNull<ListLink<Self>>) -> Self::Elem;
fn from_link_to_nonnull(
link: NonNull<ListLink<Self>>,
) -> NonNull<<Self::Elem as Static>::Inner>;
}
/// A link fields of the linked list embedded in a container.
pub struct ListLink<L: ?Sized> {
push_lock: AtomicBool,
next: Cell<Option<NonNull<ListLink<L>>>>,
prev: Cell<Option<NonNull<ListLink<L>>>>,
_pd: PhantomData<L>,
}
impl<L: ListNode> ListLink<L> {
pub fn empty() -> ListLink<L> {
ListLink {
push_lock: AtomicBool::new(true),
next: Cell::new(None),
prev: Cell::new(None),
_pd: PhantomData,
}
}
pub fn is_in_use(&self) -> bool {
match (self.next.get(), self.prev.get()) {
(Some(_), Some(_)) => true,
(None, None) => false,
_ => unreachable!(),
}
}
fn next(&self) -> Option<NonNull<ListLink<L>>> {
self.next.get()
}
}
impl<L: ListNode> Debug for ListLink<L> {
fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
write!(
f,
"ListLink<{}>",
if self.is_in_use() {
"in list"
} else {
"not in list"
}
)
}
}
impl<L: ListNode> Default for ListLink<L> {
fn default() -> Self {
ListLink::empty()
}
}
// SAFETY: ListLink is protected by `push_lock`.
unsafe impl<L: ListNode + Sync> Sync for ListLink<L> {}
#[macro_export(local_inner_macros)]
macro_rules! define_list_node {
($list_name:ident, $elem:ty, $field:ident) => {
struct $list_name;
impl $crate::linked_list::ListNode for $list_name {
type Elem = $elem;
fn elem_to_link(elem: Self::Elem) -> core::ptr::NonNull<ListLink<Self>> {
unsafe {
core::ptr::NonNull::new_unchecked(
&mut ((*$crate::Static::into_nonnull(elem).as_mut()).$field) as *mut _,
)
}
}
fn from_link_to_elem(
link: core::ptr::NonNull<$crate::linked_list::ListLink<Self>>,
) -> Self::Elem {
let nonnull = Self::from_link_to_nonnull(link);
unsafe { $crate::Static::from_nonnull(nonnull) }
}
fn from_link_to_nonnull(
link: core::ptr::NonNull<$crate::linked_list::ListLink<Self>>,
) -> core::ptr::NonNull<<Self::Elem as $crate::Static>::Inner> {
let offset =
$crate::linked_list::offset_of!(<Self::Elem as $crate::Static>::Inner, $field);
// SAFETY: It won't be null since link is nonnull.
unsafe {
core::ptr::NonNull::new_unchecked(
(link.as_ptr() as *mut u8).offset(-offset) as *mut _
)
}
}
}
};
}
/// An intrusive linked list.
pub struct List<L: ListNode> {
head: Option<NonNull<ListLink<L>>>,
tail: Option<NonNull<ListLink<L>>>,
_pd: PhantomData<L>,
}
impl<L: ListNode> List<L> {
/// Creates an empty linked list.
pub const fn new() -> List<L> {
List {
head: None,
tail: None,
_pd: PhantomData,
}
}
/// Returns `true` if the list is empty. `O(1)`.
pub fn is_empty(&self) -> bool {
self.head.is_some()
}
/// Returns the number of elements. `O(n)`.
pub fn len(&self) -> usize {
let mut len = 0;
for _ in self.iter() {
len += 1;
}
len
}
/// Removes and returns the first element satisfying the predicate `pred`. It
/// returns `None` the list is empty or `f` returned only `false` on all
/// elements. `O(n)`.
pub fn remove_first_if<F>(&mut self, pred: F) -> Option<L::Elem>
where
F: Fn(&<L::Elem as Static>::Inner) -> bool,
{
let mut current = self.head;
while let Some(link) = current {
if pred(unsafe { L::from_link_to_nonnull(link).as_ref() }) {
self.remove(unsafe { link.as_ref() });
return Some(L::from_link_to_elem(link));
}
current = unsafe { link.as_ref().next() };
}
None
}
/// Calls the callback for each element. `O(n)`.
fn walk_links<F, R>(&self, mut f: F) -> Option<R>
where
F: FnMut(NonNull<ListLink<L>>) -> ControlFlow<R>,
{
let mut current = self.head;
while let Some(link) = current {
if let ControlFlow::Break(value) = f(link) {
return Some(value);
}
current = unsafe { link.as_ref().next() };
}
None
}
/// Inserts an element at the end the list. Returns `Err(elem)` if any other
/// thread have just inserted the element to a (possibly another) list using
/// the same link as defined in `L`. `O(1)`.
pub fn push_back(&mut self, elem: L::Elem) -> Result<(), L::Elem> {
unsafe {
let link_ptr = L::elem_to_link(elem);
let link = link_ptr.as_ref();
// Prevent multiple threads from inserting the same link at once.
//
// Say CPU 1 and CPU 2 are trying adding the thread A to their own
// runqueues simultaneously:
//
// CPU 1: runqueue1.push_back(thread_A.clone());
// CPU 2: runqueue2.push_back(thread_A.clone());
//
// In this case, one of the threads (CPU1 or CPU2) fail to insert
// the element.
if !link.push_lock.swap(false, Ordering::SeqCst) {
return Err(L::from_link_to_elem(link_ptr));
}
assert!(
!link.is_in_use(),
"tried to insert an already inserted link to another list"
);
if let Some(tail) = self.tail {
tail.as_ref().next.set(Some(link_ptr));
}
if self.head.is_none() {
self.head = Some(link_ptr);
}
link.prev.set(self.tail);
link.next.set(None);
self.tail = Some(link_ptr);
Ok(())
}
}
/// Pops the element at the beginning of the list. `O(1)`.
pub fn pop_front(&mut self) -> Option<L::Elem> {
match self.head {
Some(head) => unsafe {
self.remove(head.as_ref());
Some(L::from_link_to_elem(head))
},
None => None,
}
}
pub fn is_link_in_list(&mut self, link: &ListLink<L>) -> bool {
let elem_nonnull = unsafe { NonNull::new_unchecked(link as *const _ as *mut _) };
self.walk_links(|link| {
if link == elem_nonnull {
ControlFlow::Break(true)
} else {
ControlFlow::Continue(())
}
})
.unwrap_or(false)
}
/// Removes an element in the list. `O(1)`.
///
/// Caller must make sure that the element is in the list.
pub fn remove(&mut self, link: &ListLink<L>) {
// Because we don't need the access to `self`, we can define this
// method as `List::remove(elem: L::Elem)`. However, since it allows
// simultaneous removals and it would break links, we intentionally
// require `&mut self` to prevent such a race.
// Make sure the element is in the list or this method would mutate other
// lists.
debug_assert!(self.is_link_in_list(link));
match (link.prev.get(), link.next.get()) {
(Some(prev), Some(next)) => unsafe {
next.as_ref().prev.set(Some(prev));
prev.as_ref().next.set(Some(next));
},
(None
|
//! link: ListLink<ThreadsNode>,
//! }
//!
//! define_list_node!(ThreadsNode, Arc<Thread>, link);
|
random_line_split
|
|
linked_list.rs
|
Link<L> {
pub fn empty() -> ListLink<L> {
ListLink {
push_lock: AtomicBool::new(true),
next: Cell::new(None),
prev: Cell::new(None),
_pd: PhantomData,
}
}
pub fn is_in_use(&self) -> bool {
match (self.next.get(), self.prev.get()) {
(Some(_), Some(_)) => true,
(None, None) => false,
_ => unreachable!(),
}
}
fn next(&self) -> Option<NonNull<ListLink<L>>> {
self.next.get()
}
}
impl<L: ListNode> Debug for ListLink<L> {
fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
write!(
f,
"ListLink<{}>",
if self.is_in_use() {
"in list"
} else {
"not in list"
}
)
}
}
impl<L: ListNode> Default for ListLink<L> {
fn default() -> Self {
ListLink::empty()
}
}
// SAFETY: ListLink is protected by `push_lock`.
unsafe impl<L: ListNode + Sync> Sync for ListLink<L> {}
#[macro_export(local_inner_macros)]
macro_rules! define_list_node {
($list_name:ident, $elem:ty, $field:ident) => {
struct $list_name;
impl $crate::linked_list::ListNode for $list_name {
type Elem = $elem;
fn elem_to_link(elem: Self::Elem) -> core::ptr::NonNull<ListLink<Self>> {
unsafe {
core::ptr::NonNull::new_unchecked(
&mut ((*$crate::Static::into_nonnull(elem).as_mut()).$field) as *mut _,
)
}
}
fn from_link_to_elem(
link: core::ptr::NonNull<$crate::linked_list::ListLink<Self>>,
) -> Self::Elem {
let nonnull = Self::from_link_to_nonnull(link);
unsafe { $crate::Static::from_nonnull(nonnull) }
}
fn from_link_to_nonnull(
link: core::ptr::NonNull<$crate::linked_list::ListLink<Self>>,
) -> core::ptr::NonNull<<Self::Elem as $crate::Static>::Inner> {
let offset =
$crate::linked_list::offset_of!(<Self::Elem as $crate::Static>::Inner, $field);
// SAFETY: It won't be null since link is nonnull.
unsafe {
core::ptr::NonNull::new_unchecked(
(link.as_ptr() as *mut u8).offset(-offset) as *mut _
)
}
}
}
};
}
/// An intrusive linked list.
pub struct List<L: ListNode> {
head: Option<NonNull<ListLink<L>>>,
tail: Option<NonNull<ListLink<L>>>,
_pd: PhantomData<L>,
}
impl<L: ListNode> List<L> {
/// Creates an empty linked list.
pub const fn new() -> List<L> {
List {
head: None,
tail: None,
_pd: PhantomData,
}
}
/// Returns `true` if the list is empty. `O(1)`.
pub fn is_empty(&self) -> bool {
self.head.is_some()
}
/// Returns the number of elements. `O(n)`.
pub fn len(&self) -> usize {
let mut len = 0;
for _ in self.iter() {
len += 1;
}
len
}
/// Removes and returns the first element satisfying the predicate `pred`. It
/// returns `None` the list is empty or `f` returned only `false` on all
/// elements. `O(n)`.
pub fn remove_first_if<F>(&mut self, pred: F) -> Option<L::Elem>
where
F: Fn(&<L::Elem as Static>::Inner) -> bool,
{
let mut current = self.head;
while let Some(link) = current {
if pred(unsafe { L::from_link_to_nonnull(link).as_ref() }) {
self.remove(unsafe { link.as_ref() });
return Some(L::from_link_to_elem(link));
}
current = unsafe { link.as_ref().next() };
}
None
}
/// Calls the callback for each element. `O(n)`.
fn walk_links<F, R>(&self, mut f: F) -> Option<R>
where
F: FnMut(NonNull<ListLink<L>>) -> ControlFlow<R>,
{
let mut current = self.head;
while let Some(link) = current {
if let ControlFlow::Break(value) = f(link) {
return Some(value);
}
current = unsafe { link.as_ref().next() };
}
None
}
/// Inserts an element at the end the list. Returns `Err(elem)` if any other
/// thread have just inserted the element to a (possibly another) list using
/// the same link as defined in `L`. `O(1)`.
pub fn push_back(&mut self, elem: L::Elem) -> Result<(), L::Elem> {
unsafe {
let link_ptr = L::elem_to_link(elem);
let link = link_ptr.as_ref();
// Prevent multiple threads from inserting the same link at once.
//
// Say CPU 1 and CPU 2 are trying adding the thread A to their own
// runqueues simultaneously:
//
// CPU 1: runqueue1.push_back(thread_A.clone());
// CPU 2: runqueue2.push_back(thread_A.clone());
//
// In this case, one of the threads (CPU1 or CPU2) fail to insert
// the element.
if !link.push_lock.swap(false, Ordering::SeqCst) {
return Err(L::from_link_to_elem(link_ptr));
}
assert!(
!link.is_in_use(),
"tried to insert an already inserted link to another list"
);
if let Some(tail) = self.tail {
tail.as_ref().next.set(Some(link_ptr));
}
if self.head.is_none() {
self.head = Some(link_ptr);
}
link.prev.set(self.tail);
link.next.set(None);
self.tail = Some(link_ptr);
Ok(())
}
}
/// Pops the element at the beginning of the list. `O(1)`.
pub fn pop_front(&mut self) -> Option<L::Elem> {
match self.head {
Some(head) => unsafe {
self.remove(head.as_ref());
Some(L::from_link_to_elem(head))
},
None => None,
}
}
pub fn is_link_in_list(&mut self, link: &ListLink<L>) -> bool {
let elem_nonnull = unsafe { NonNull::new_unchecked(link as *const _ as *mut _) };
self.walk_links(|link| {
if link == elem_nonnull {
ControlFlow::Break(true)
} else {
ControlFlow::Continue(())
}
})
.unwrap_or(false)
}
/// Removes an element in the list. `O(1)`.
///
/// Caller must make sure that the element is in the list.
pub fn remove(&mut self, link: &ListLink<L>) {
// Because we don't need the access to `self`, we can define this
// method as `List::remove(elem: L::Elem)`. However, since it allows
// simultaneous removals and it would break links, we intentionally
// require `&mut self` to prevent such a race.
// Make sure the element is in the list or this method would mutate other
// lists.
debug_assert!(self.is_link_in_list(link));
match (link.prev.get(), link.next.get()) {
(Some(prev), Some(next)) => unsafe {
next.as_ref().prev.set(Some(prev));
prev.as_ref().next.set(Some(next));
},
(None, Some(next)) => unsafe {
next.as_ref().prev.set(None);
self.head = Some(next);
},
(Some(prev), None) => unsafe {
prev.as_ref().next.set(None);
self.tail = Some(prev);
},
(None, None) => {
self.head = None;
self.tail = None;
}
}
link.prev.set(None);
link.next.set(None);
debug_assert!(!link.push_lock.swap(true, Ordering::SeqCst));
}
fn
|
(&self) -> Iter<'_, L> {
Iter {
current: self.head,
_pd: &PhantomData,
}
}
}
impl<L: ListNode> Default for List<L> {
fn default() -> Self {
Self::new()
}
}
pub struct Iter<'a, L: ListNode> {
current: Option<NonNull<ListLink<L>>>,
_pd: &'a PhantomData<L>,
}
impl<'a, L: ListNode> Iterator for Iter<'a, L> {
type Item = &'a <L::Elem as Static>::Inner;
fn next(&mut self) -> Option<&'a <L::Elem as Static>::Inner> {
self.current.map(|current| unsafe {
self.current = current.as_ref().next();
L::from_link_to_nonnull(current).as_ref()
})
}
}
impl<'a, L: ListNode
|
iter
|
identifier_name
|
view.rs
|
(ImageViewType::Dim2dArray, ImageDimensions::Dim3d { .. }, _, 1)
if flags.array_2d_compatible =>
{
()
}
_ => return Err(ImageViewCreationError::IncompatibleType),
}
let inner =
unsafe { UnsafeImageView::new(image_inner, ty, mipmap_levels, array_layers.clone())? };
Ok(Arc::new(ImageView {
image,
inner,
array_layers,
format,
identity_swizzle: true, // FIXME:
ty,
}))
}
/// Returns the wrapped image that this image view was created from.
pub fn image(&self) -> &I {
&self.image
}
}
/// Error that can happen when creating an image view.
#[derive(Clone, Debug, PartialEq, Eq)]
pub enum ImageViewCreationError {
/// Allocating memory failed.
AllocError(DeviceMemoryAllocError),
/// The specified range of array layers was out of range for the image.
ArrayLayersOutOfRange,
/// The specified range of mipmap levels was out of range for the image.
MipMapLevelsOutOfRange,
/// The requested [`ImageViewType`] was not compatible with the image, or with the specified ranges of array layers and mipmap levels.
IncompatibleType,
/// The image was not created with
/// [one of the required usages](https://www.khronos.org/registry/vulkan/specs/1.2-extensions/html/vkspec.html#valid-imageview-imageusage)
/// for image views.
InvalidImageUsage,
}
impl error::Error for ImageViewCreationError {
#[inline]
fn source(&self) -> Option<&(dyn error::Error + 'static)> {
match *self {
ImageViewCreationError::AllocError(ref err) => Some(err),
_ => None,
}
}
}
impl fmt::Display for ImageViewCreationError {
#[inline]
fn fmt(&self, fmt: &mut fmt::Formatter) -> Result<(), fmt::Error> {
write!(
fmt,
"{}",
match *self {
ImageViewCreationError::AllocError(err) => "allocating memory failed",
ImageViewCreationError::ArrayLayersOutOfRange => "array layers are out of range",
ImageViewCreationError::MipMapLevelsOutOfRange => "mipmap levels are out of range",
ImageViewCreationError::IncompatibleType =>
"image view type is not compatible with image, array layers or mipmap levels",
ImageViewCreationError::InvalidImageUsage =>
"the usage of the image is not compatible with image views",
}
)
}
}
impl From<OomError> for ImageViewCreationError {
#[inline]
fn from(err: OomError) -> ImageViewCreationError {
ImageViewCreationError::AllocError(DeviceMemoryAllocError::OomError(err))
}
}
/// A low-level wrapper around a `vkImageView`.
pub struct UnsafeImageView {
view: vk::ImageView,
device: Arc<Device>,
}
impl UnsafeImageView {
/// Creates a new view from an image.
///
/// # Safety
/// - The returned `UnsafeImageView` must not outlive `image`.
/// - `image` must have a usage that is compatible with image views.
/// - `ty` must be compatible with the dimensions and flags of the image.
/// - `mipmap_levels` must not be empty, must be within the range of levels of the image, and be compatible with the requested `ty`.
/// - `array_layers` must not be empty, must be within the range of layers of the image, and be compatible with the requested `ty`.
///
/// # Panics
/// Panics if the image is a YcbCr image, since the Vulkano API is not yet flexible enough to
/// specify the aspect of image.
pub unsafe fn new(
image: &UnsafeImage,
ty: ImageViewType,
mipmap_levels: Range<u32>,
array_layers: Range<u32>,
) -> Result<UnsafeImageView, OomError> {
let vk = image.device().pointers();
debug_assert!(mipmap_levels.end > mipmap_levels.start);
debug_assert!(mipmap_levels.end <= image.mipmap_levels());
debug_assert!(array_layers.end > array_layers.start);
debug_assert!(array_layers.end <= image.dimensions().array_layers());
let aspect_mask = match image.format().ty() {
FormatTy::Float | FormatTy::Uint | FormatTy::Sint | FormatTy::Compressed => {
vk::IMAGE_ASPECT_COLOR_BIT
}
FormatTy::Depth => vk::IMAGE_ASPECT_DEPTH_BIT,
FormatTy::Stencil => vk::IMAGE_ASPECT_STENCIL_BIT,
FormatTy::DepthStencil => vk::IMAGE_ASPECT_DEPTH_BIT | vk::IMAGE_ASPECT_STENCIL_BIT,
// Not yet supported --> would require changes to ImmutableImage API :-)
FormatTy::Ycbcr => unimplemented!(),
};
let view = {
let infos = vk::ImageViewCreateInfo {
sType: vk::STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO,
pNext: ptr::null(),
flags: 0, // reserved
image: image.internal_object(),
viewType: ty.into(),
format: image.format() as u32,
components: vk::ComponentMapping {
r: 0,
g: 0,
b: 0,
a: 0,
}, // FIXME:
subresourceRange: vk::ImageSubresourceRange {
aspectMask: aspect_mask,
baseMipLevel: mipmap_levels.start,
levelCount: mipmap_levels.end - mipmap_levels.start,
baseArrayLayer: array_layers.start,
layerCount: array_layers.end - array_layers.start,
},
};
let mut output = MaybeUninit::uninit();
check_errors(vk.CreateImageView(
image.device().internal_object(),
&infos,
ptr::null(),
output.as_mut_ptr(),
))?;
output.assume_init()
};
Ok(UnsafeImageView {
view,
device: image.device().clone(),
})
}
}
unsafe impl VulkanObject for UnsafeImageView {
type Object = vk::ImageView;
const TYPE: vk::ObjectType = vk::OBJECT_TYPE_IMAGE_VIEW;
#[inline]
fn internal_object(&self) -> vk::ImageView {
self.view
}
}
impl fmt::Debug for UnsafeImageView {
#[inline]
fn fmt(&self, fmt: &mut fmt::Formatter) -> Result<(), fmt::Error> {
write!(fmt, "<Vulkan image view {:?}>", self.view)
}
}
impl Drop for UnsafeImageView {
#[inline]
fn drop(&mut self) {
unsafe {
let vk = self.device.pointers();
vk.DestroyImageView(self.device.internal_object(), self.view, ptr::null());
}
}
}
impl PartialEq for UnsafeImageView {
#[inline]
fn eq(&self, other: &Self) -> bool {
self.view == other.view && self.device == other.device
}
}
impl Eq for UnsafeImageView {}
impl Hash for UnsafeImageView {
#[inline]
fn hash<H: Hasher>(&self, state: &mut H) {
self.view.hash(state);
self.device.hash(state);
}
}
/// The geometry type of an image view.
#[derive(Copy, Clone, Debug, PartialEq, Eq)]
pub enum ImageViewType {
Dim1d,
Dim1dArray,
Dim2d,
Dim2dArray,
Dim3d,
Cubemap,
CubemapArray,
}
impl From<ImageViewType> for vk::ImageViewType {
fn from(image_view_type: ImageViewType) -> Self {
match image_view_type {
ImageViewType::Dim1d => vk::IMAGE_VIEW_TYPE_1D,
ImageViewType::Dim1dArray => vk::IMAGE_VIEW_TYPE_1D_ARRAY,
ImageViewType::Dim2d => vk::IMAGE_VIEW_TYPE_2D,
ImageViewType::Dim2dArray => vk::IMAGE_VIEW_TYPE_2D_ARRAY,
ImageViewType::Dim3d => vk::IMAGE_VIEW_TYPE_3D,
ImageViewType::Cubemap => vk::IMAGE_VIEW_TYPE_CUBE,
ImageViewType::CubemapArray => vk::IMAGE_VIEW_TYPE_CUBE_ARRAY,
}
}
}
/// Trait for types that represent the GPU can access an image view.
pub unsafe trait ImageViewAbstract {
/// Returns the wrapped image that this image view was created from.
fn image(&self) -> &dyn ImageAccess;
/// Returns the inner unsafe image view object used by this image view.
fn inner(&self) -> &UnsafeImageView;
/// Returns the range of array layers of the wrapped image that this view exposes.
fn array_layers(&self) -> Range<u32>;
/// Returns the format of this view. This can be different from the parent's format.
fn format(&self) -> Format;
/// Returns true if the view doesn't use components swizzling.
///
/// Must be true when the view is used as a framebuffer attachment or TODO: I don't remember
/// the other thing.
fn identity_swizzle(&self) -> bool;
/// Returns the [`ImageViewType`] of this image view.
fn ty(&self) -> ImageViewType;
/// Returns true if the given sampler can be used with this image view.
///
/// This method should check whether the sampler's configuration can be used with the format
/// of the view.
// TODO: return a Result and propagate it when binding to a descriptor set
fn
|
can_be_sampled
|
identifier_name
|
|
view.rs
|
Type::Cubemap, ImageDimensions::Dim2d { .. }, 6, _)
if flags.cube_compatible =>
{
()
}
(ImageViewType::CubemapArray, ImageDimensions::Dim2d { .. }, n, _)
if flags.cube_compatible && n % 6 == 0 =>
{
()
}
(ImageViewType::Dim3d, ImageDimensions::Dim3d { .. }, 1, _) => (),
(ImageViewType::Dim2d, ImageDimensions::Dim3d { .. }, 1, 1)
if flags.array_2d_compatible =>
{
()
}
(ImageViewType::Dim2dArray, ImageDimensions::Dim3d { .. }, _, 1)
if flags.array_2d_compatible =>
{
()
}
_ => return Err(ImageViewCreationError::IncompatibleType),
}
let inner =
unsafe { UnsafeImageView::new(image_inner, ty, mipmap_levels, array_layers.clone())? };
Ok(Arc::new(ImageView {
image,
inner,
array_layers,
format,
identity_swizzle: true, // FIXME:
ty,
}))
}
/// Returns the wrapped image that this image view was created from.
pub fn image(&self) -> &I {
&self.image
}
}
/// Error that can happen when creating an image view.
#[derive(Clone, Debug, PartialEq, Eq)]
pub enum ImageViewCreationError {
/// Allocating memory failed.
AllocError(DeviceMemoryAllocError),
/// The specified range of array layers was out of range for the image.
ArrayLayersOutOfRange,
/// The specified range of mipmap levels was out of range for the image.
MipMapLevelsOutOfRange,
/// The requested [`ImageViewType`] was not compatible with the image, or with the specified ranges of array layers and mipmap levels.
IncompatibleType,
/// The image was not created with
/// [one of the required usages](https://www.khronos.org/registry/vulkan/specs/1.2-extensions/html/vkspec.html#valid-imageview-imageusage)
/// for image views.
InvalidImageUsage,
}
impl error::Error for ImageViewCreationError {
#[inline]
fn source(&self) -> Option<&(dyn error::Error + 'static)> {
match *self {
ImageViewCreationError::AllocError(ref err) => Some(err),
_ => None,
}
}
}
impl fmt::Display for ImageViewCreationError {
#[inline]
fn fmt(&self, fmt: &mut fmt::Formatter) -> Result<(), fmt::Error> {
write!(
fmt,
"{}",
match *self {
ImageViewCreationError::AllocError(err) => "allocating memory failed",
ImageViewCreationError::ArrayLayersOutOfRange => "array layers are out of range",
ImageViewCreationError::MipMapLevelsOutOfRange => "mipmap levels are out of range",
ImageViewCreationError::IncompatibleType =>
"image view type is not compatible with image, array layers or mipmap levels",
ImageViewCreationError::InvalidImageUsage =>
"the usage of the image is not compatible with image views",
}
)
}
}
impl From<OomError> for ImageViewCreationError {
#[inline]
fn from(err: OomError) -> ImageViewCreationError {
ImageViewCreationError::AllocError(DeviceMemoryAllocError::OomError(err))
}
}
/// A low-level wrapper around a `vkImageView`.
pub struct UnsafeImageView {
view: vk::ImageView,
device: Arc<Device>,
}
impl UnsafeImageView {
/// Creates a new view from an image.
///
/// # Safety
/// - The returned `UnsafeImageView` must not outlive `image`.
/// - `image` must have a usage that is compatible with image views.
/// - `ty` must be compatible with the dimensions and flags of the image.
/// - `mipmap_levels` must not be empty, must be within the range of levels of the image, and be compatible with the requested `ty`.
/// - `array_layers` must not be empty, must be within the range of layers of the image, and be compatible with the requested `ty`.
///
/// # Panics
/// Panics if the image is a YcbCr image, since the Vulkano API is not yet flexible enough to
/// specify the aspect of image.
pub unsafe fn new(
image: &UnsafeImage,
ty: ImageViewType,
mipmap_levels: Range<u32>,
array_layers: Range<u32>,
) -> Result<UnsafeImageView, OomError> {
let vk = image.device().pointers();
debug_assert!(mipmap_levels.end > mipmap_levels.start);
debug_assert!(mipmap_levels.end <= image.mipmap_levels());
debug_assert!(array_layers.end > array_layers.start);
debug_assert!(array_layers.end <= image.dimensions().array_layers());
let aspect_mask = match image.format().ty() {
FormatTy::Float | FormatTy::Uint | FormatTy::Sint | FormatTy::Compressed => {
vk::IMAGE_ASPECT_COLOR_BIT
}
FormatTy::Depth => vk::IMAGE_ASPECT_DEPTH_BIT,
FormatTy::Stencil => vk::IMAGE_ASPECT_STENCIL_BIT,
FormatTy::DepthStencil => vk::IMAGE_ASPECT_DEPTH_BIT | vk::IMAGE_ASPECT_STENCIL_BIT,
// Not yet supported --> would require changes to ImmutableImage API :-)
FormatTy::Ycbcr => unimplemented!(),
};
let view = {
let infos = vk::ImageViewCreateInfo {
sType: vk::STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO,
pNext: ptr::null(),
flags: 0, // reserved
image: image.internal_object(),
viewType: ty.into(),
format: image.format() as u32,
components: vk::ComponentMapping {
r: 0,
g: 0,
b: 0,
a: 0,
}, // FIXME:
subresourceRange: vk::ImageSubresourceRange {
aspectMask: aspect_mask,
baseMipLevel: mipmap_levels.start,
levelCount: mipmap_levels.end - mipmap_levels.start,
baseArrayLayer: array_layers.start,
layerCount: array_layers.end - array_layers.start,
},
};
let mut output = MaybeUninit::uninit();
check_errors(vk.CreateImageView(
image.device().internal_object(),
&infos,
ptr::null(),
output.as_mut_ptr(),
))?;
output.assume_init()
};
Ok(UnsafeImageView {
view,
device: image.device().clone(),
})
}
}
unsafe impl VulkanObject for UnsafeImageView {
type Object = vk::ImageView;
const TYPE: vk::ObjectType = vk::OBJECT_TYPE_IMAGE_VIEW;
#[inline]
fn internal_object(&self) -> vk::ImageView {
self.view
}
}
impl fmt::Debug for UnsafeImageView {
#[inline]
fn fmt(&self, fmt: &mut fmt::Formatter) -> Result<(), fmt::Error> {
write!(fmt, "<Vulkan image view {:?}>", self.view)
}
}
impl Drop for UnsafeImageView {
#[inline]
fn drop(&mut self) {
unsafe {
let vk = self.device.pointers();
vk.DestroyImageView(self.device.internal_object(), self.view, ptr::null());
}
}
}
impl PartialEq for UnsafeImageView {
#[inline]
fn eq(&self, other: &Self) -> bool {
self.view == other.view && self.device == other.device
}
}
impl Eq for UnsafeImageView {}
impl Hash for UnsafeImageView {
#[inline]
fn hash<H: Hasher>(&self, state: &mut H) {
self.view.hash(state);
self.device.hash(state);
}
}
/// The geometry type of an image view.
#[derive(Copy, Clone, Debug, PartialEq, Eq)]
pub enum ImageViewType {
Dim1d,
Dim1dArray,
Dim2d,
Dim2dArray,
Dim3d,
Cubemap,
CubemapArray,
}
impl From<ImageViewType> for vk::ImageViewType {
fn from(image_view_type: ImageViewType) -> Self {
match image_view_type {
ImageViewType::Dim1d => vk::IMAGE_VIEW_TYPE_1D,
ImageViewType::Dim1dArray => vk::IMAGE_VIEW_TYPE_1D_ARRAY,
ImageViewType::Dim2d => vk::IMAGE_VIEW_TYPE_2D,
ImageViewType::Dim2dArray => vk::IMAGE_VIEW_TYPE_2D_ARRAY,
ImageViewType::Dim3d => vk::IMAGE_VIEW_TYPE_3D,
ImageViewType::Cubemap => vk::IMAGE_VIEW_TYPE_CUBE,
ImageViewType::CubemapArray => vk::IMAGE_VIEW_TYPE_CUBE_ARRAY,
}
}
}
/// Trait for types that represent the GPU can access an image view.
pub unsafe trait ImageViewAbstract {
/// Returns the wrapped image that this image view was created from.
fn image(&self) -> &dyn ImageAccess;
/// Returns the inner unsafe image view object used by this image view.
fn inner(&self) -> &UnsafeImageView;
/// Returns the range of array layers of the wrapped image that this view exposes.
|
fn array_layers(&self) -> Range<u32>;
/// Returns the format of this view. This can be different from the parent's format.
fn format(&self) -> Format;
|
random_line_split
|
|
view.rs
|
of the required usages](https://www.khronos.org/registry/vulkan/specs/1.2-extensions/html/vkspec.html#valid-imageview-imageusage)
/// for image views.
InvalidImageUsage,
}
impl error::Error for ImageViewCreationError {
#[inline]
fn source(&self) -> Option<&(dyn error::Error + 'static)> {
match *self {
ImageViewCreationError::AllocError(ref err) => Some(err),
_ => None,
}
}
}
impl fmt::Display for ImageViewCreationError {
#[inline]
fn fmt(&self, fmt: &mut fmt::Formatter) -> Result<(), fmt::Error> {
write!(
fmt,
"{}",
match *self {
ImageViewCreationError::AllocError(err) => "allocating memory failed",
ImageViewCreationError::ArrayLayersOutOfRange => "array layers are out of range",
ImageViewCreationError::MipMapLevelsOutOfRange => "mipmap levels are out of range",
ImageViewCreationError::IncompatibleType =>
"image view type is not compatible with image, array layers or mipmap levels",
ImageViewCreationError::InvalidImageUsage =>
"the usage of the image is not compatible with image views",
}
)
}
}
impl From<OomError> for ImageViewCreationError {
#[inline]
fn from(err: OomError) -> ImageViewCreationError {
ImageViewCreationError::AllocError(DeviceMemoryAllocError::OomError(err))
}
}
/// A low-level wrapper around a `vkImageView`.
pub struct UnsafeImageView {
view: vk::ImageView,
device: Arc<Device>,
}
impl UnsafeImageView {
/// Creates a new view from an image.
///
/// # Safety
/// - The returned `UnsafeImageView` must not outlive `image`.
/// - `image` must have a usage that is compatible with image views.
/// - `ty` must be compatible with the dimensions and flags of the image.
/// - `mipmap_levels` must not be empty, must be within the range of levels of the image, and be compatible with the requested `ty`.
/// - `array_layers` must not be empty, must be within the range of layers of the image, and be compatible with the requested `ty`.
///
/// # Panics
/// Panics if the image is a YcbCr image, since the Vulkano API is not yet flexible enough to
/// specify the aspect of image.
pub unsafe fn new(
image: &UnsafeImage,
ty: ImageViewType,
mipmap_levels: Range<u32>,
array_layers: Range<u32>,
) -> Result<UnsafeImageView, OomError> {
let vk = image.device().pointers();
debug_assert!(mipmap_levels.end > mipmap_levels.start);
debug_assert!(mipmap_levels.end <= image.mipmap_levels());
debug_assert!(array_layers.end > array_layers.start);
debug_assert!(array_layers.end <= image.dimensions().array_layers());
let aspect_mask = match image.format().ty() {
FormatTy::Float | FormatTy::Uint | FormatTy::Sint | FormatTy::Compressed => {
vk::IMAGE_ASPECT_COLOR_BIT
}
FormatTy::Depth => vk::IMAGE_ASPECT_DEPTH_BIT,
FormatTy::Stencil => vk::IMAGE_ASPECT_STENCIL_BIT,
FormatTy::DepthStencil => vk::IMAGE_ASPECT_DEPTH_BIT | vk::IMAGE_ASPECT_STENCIL_BIT,
// Not yet supported --> would require changes to ImmutableImage API :-)
FormatTy::Ycbcr => unimplemented!(),
};
let view = {
let infos = vk::ImageViewCreateInfo {
sType: vk::STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO,
pNext: ptr::null(),
flags: 0, // reserved
image: image.internal_object(),
viewType: ty.into(),
format: image.format() as u32,
components: vk::ComponentMapping {
r: 0,
g: 0,
b: 0,
a: 0,
}, // FIXME:
subresourceRange: vk::ImageSubresourceRange {
aspectMask: aspect_mask,
baseMipLevel: mipmap_levels.start,
levelCount: mipmap_levels.end - mipmap_levels.start,
baseArrayLayer: array_layers.start,
layerCount: array_layers.end - array_layers.start,
},
};
let mut output = MaybeUninit::uninit();
check_errors(vk.CreateImageView(
image.device().internal_object(),
&infos,
ptr::null(),
output.as_mut_ptr(),
))?;
output.assume_init()
};
Ok(UnsafeImageView {
view,
device: image.device().clone(),
})
}
}
unsafe impl VulkanObject for UnsafeImageView {
type Object = vk::ImageView;
const TYPE: vk::ObjectType = vk::OBJECT_TYPE_IMAGE_VIEW;
#[inline]
fn internal_object(&self) -> vk::ImageView {
self.view
}
}
impl fmt::Debug for UnsafeImageView {
#[inline]
fn fmt(&self, fmt: &mut fmt::Formatter) -> Result<(), fmt::Error> {
write!(fmt, "<Vulkan image view {:?}>", self.view)
}
}
impl Drop for UnsafeImageView {
#[inline]
fn drop(&mut self) {
unsafe {
let vk = self.device.pointers();
vk.DestroyImageView(self.device.internal_object(), self.view, ptr::null());
}
}
}
impl PartialEq for UnsafeImageView {
#[inline]
fn eq(&self, other: &Self) -> bool {
self.view == other.view && self.device == other.device
}
}
impl Eq for UnsafeImageView {}
impl Hash for UnsafeImageView {
#[inline]
fn hash<H: Hasher>(&self, state: &mut H) {
self.view.hash(state);
self.device.hash(state);
}
}
/// The geometry type of an image view.
#[derive(Copy, Clone, Debug, PartialEq, Eq)]
pub enum ImageViewType {
Dim1d,
Dim1dArray,
Dim2d,
Dim2dArray,
Dim3d,
Cubemap,
CubemapArray,
}
impl From<ImageViewType> for vk::ImageViewType {
fn from(image_view_type: ImageViewType) -> Self {
match image_view_type {
ImageViewType::Dim1d => vk::IMAGE_VIEW_TYPE_1D,
ImageViewType::Dim1dArray => vk::IMAGE_VIEW_TYPE_1D_ARRAY,
ImageViewType::Dim2d => vk::IMAGE_VIEW_TYPE_2D,
ImageViewType::Dim2dArray => vk::IMAGE_VIEW_TYPE_2D_ARRAY,
ImageViewType::Dim3d => vk::IMAGE_VIEW_TYPE_3D,
ImageViewType::Cubemap => vk::IMAGE_VIEW_TYPE_CUBE,
ImageViewType::CubemapArray => vk::IMAGE_VIEW_TYPE_CUBE_ARRAY,
}
}
}
/// Trait for types that represent the GPU can access an image view.
pub unsafe trait ImageViewAbstract {
/// Returns the wrapped image that this image view was created from.
fn image(&self) -> &dyn ImageAccess;
/// Returns the inner unsafe image view object used by this image view.
fn inner(&self) -> &UnsafeImageView;
/// Returns the range of array layers of the wrapped image that this view exposes.
fn array_layers(&self) -> Range<u32>;
/// Returns the format of this view. This can be different from the parent's format.
fn format(&self) -> Format;
/// Returns true if the view doesn't use components swizzling.
///
/// Must be true when the view is used as a framebuffer attachment or TODO: I don't remember
/// the other thing.
fn identity_swizzle(&self) -> bool;
/// Returns the [`ImageViewType`] of this image view.
fn ty(&self) -> ImageViewType;
/// Returns true if the given sampler can be used with this image view.
///
/// This method should check whether the sampler's configuration can be used with the format
/// of the view.
// TODO: return a Result and propagate it when binding to a descriptor set
fn can_be_sampled(&self, _sampler: &Sampler) -> bool {
true /* FIXME */
}
}
unsafe impl<I> ImageViewAbstract for ImageView<I>
where
I: ImageAccess,
{
#[inline]
fn image(&self) -> &dyn ImageAccess {
&self.image
}
#[inline]
fn inner(&self) -> &UnsafeImageView {
&self.inner
}
#[inline]
fn array_layers(&self) -> Range<u32> {
self.array_layers.clone()
}
#[inline]
fn format(&self) -> Format {
// TODO: remove this default impl
self.format
}
#[inline]
fn identity_swizzle(&self) -> bool {
self.identity_swizzle
}
#[inline]
fn ty(&self) -> ImageViewType {
self.ty
}
}
unsafe impl<T> ImageViewAbstract for T
where
T: SafeDeref,
T::Target: ImageViewAbstract,
{
#[inline]
fn image(&self) -> &dyn ImageAccess {
(**self).image()
}
#[inline]
fn inner(&self) -> &UnsafeImageView {
(**self).inner()
}
#[inline]
fn array_layers(&self) -> Range<u32>
|
{
(**self).array_layers()
}
|
identifier_body
|
|
documentDeltaConnection.ts
|
extends EventEmitterWithErrorHandling<IDocumentDeltaConnectionEvents>
implements IDocumentDeltaConnection, IDisposable
{
static readonly eventsToForward = ["nack", "op", "signal", "pong"];
// WARNING: These are critical events that we can't miss, so registration for them has to be in place at all times!
// Including before handshake is over, and after that (but before DeltaManager had a chance to put its own handlers)
static readonly eventsAlwaysForwarded = ["disconnect", "error"];
/**
* Last known sequence number to ordering service at the time of connection
* It may lap actual last sequence number (quite a bit, if container is very active).
* But it's best information for client to figure out how far it is behind, at least
* for "read" connections. "write" connections may use own "join" op to similar information,
* that is likely to be more up-to-date.
*/
public checkpointSequenceNumber: number | undefined;
// Listen for ops sent before we receive a response to connect_document
protected readonly queuedMessages: ISequencedDocumentMessage[] = [];
protected readonly queuedSignals: ISignalMessage[] = [];
/**
* A flag to indicate whether we have our handler attached. If it's attached, we're queueing incoming ops
* to later be retrieved via initialMessages.
*/
private earlyOpHandlerAttached: boolean = false;
private socketConnectionTimeout: ReturnType<typeof setTimeout> | undefined;
private _details: IConnected | undefined;
private trackLatencyTimeout: number | undefined;
// Listeners only needed while the connection is in progress
private readonly connectionListeners: Map<string, (...args: any[]) => void> = new Map();
// Listeners used throughout the lifetime of the DocumentDeltaConnection
private readonly trackedListeners: Map<string, (...args: any[]) => void> = new Map();
protected get hasDetails(): boolean {
return !!this._details;
}
public get disposed() {
assert(
this._disposed || this.socket.connected,
0x244 /* "Socket is closed, but connection is not!" */,
);
return this._disposed;
}
/**
* Flag to indicate whether the DocumentDeltaConnection is expected to still be capable of sending messages.
* After disconnection, we flip this to prevent any stale messages from being emitted.
*/
protected _disposed: boolean = false;
private readonly mc: MonitoringContext;
/**
* @deprecated Implementors should manage their own logger or monitoring context
*/
protected get logger(): ITelemetryLoggerExt {
return this.mc.logger;
}
public get details(): IConnected {
if (!this._details) {
throw new Error("Internal error: calling method before _details is initialized!");
}
return this._details;
}
/**
* @param socket - websocket to be used
* @param documentId - ID of the document
* @param logger - for reporting telemetry events
* @param enableLongPollingDowngrades - allow connection to be downgraded to long-polling on websocket failure
*/
protected constructor(
protected readonly socket: Socket,
public documentId: string,
logger: ITelemetryLoggerExt,
private readonly enableLongPollingDowngrades: boolean = false,
protected readonly connectionId?: string,
) {
super((name, error) => {
this.addPropsToError(error);
logger.sendErrorEvent(
{
eventName: "DeltaConnection:EventException",
name,
},
error,
);
});
this.mc = createChildMonitoringContext({ logger, namespace: "DeltaConnection" });
this.on("newListener", (event, _listener) => {
assert(!this.disposed, 0x20a /* "register for event on disposed object" */);
// Some events are already forwarded - see this.addTrackedListener() calls in initialize().
if (DocumentDeltaConnection.eventsAlwaysForwarded.includes(event)) {
assert(this.trackedListeners.has(event), 0x245 /* "tracked listener" */);
return;
}
if (!DocumentDeltaConnection.eventsToForward.includes(event)) {
throw new Error(`DocumentDeltaConnection: Registering for unknown event: ${event}`);
}
// Whenever listener is added, we should subscribe on same event on socket, so these two things
// should be in sync. This currently assumes that nobody unregisters and registers back listeners,
// and that there are no "internal" listeners installed (like "error" case we skip above)
// Better flow might be to always unconditionally register all handlers on successful connection,
// though some logic (naming assert in initialMessages getter) might need to be adjusted (it becomes noop)
assert(
(this.listeners(event).length !== 0) === this.trackedListeners.has(event),
0x20b /* "mismatch" */,
);
if (!this.trackedListeners.has(event)) {
if (event === "pong") {
// Empty callback for tracking purposes in this class
this.trackedListeners.set("pong", () => {});
const sendPingLoop = () => {
const start = Date.now();
this.socket.volatile?.emit("ping", () => {
this.emit("pong", Date.now() - start);
// Schedule another ping event in 1 minute
this.trackLatencyTimeout = setTimeout(() => {
sendPingLoop();
}, 1000 * 60);
});
};
sendPingLoop();
} else {
this.addTrackedListener(event, (...args: any[]) => {
this.emit(event, ...args);
});
}
}
});
}
/**
* Get the ID of the client who is sending the message
*
* @returns the client ID
*/
public get clientId(): string {
return this.details.clientId;
}
/**
* Get the mode of the client
*
* @returns the client mode
*/
public get mode(): ConnectionMode {
return this.details.mode;
}
/**
* Get the claims of the client who is sending the message
*
* @returns client claims
*/
public get claims(): ITokenClaims {
return this.details.claims;
}
/**
* Get whether or not this is an existing document
*
* @returns true if the document exists
*/
public get existing(): boolean {
return this.details.existing;
}
/**
* Get the maximum size of a message before chunking is required
*
* @returns the maximum size of a message before chunking is required
*/
public get maxMessageSize(): number {
return this.details.serviceConfiguration.maxMessageSize;
}
/**
* Semver of protocol being used with the service
*/
public get version(): string {
return this.details.version;
}
/**
* Configuration details provided by the service
*/
public get serviceConfiguration(): IClientConfiguration {
return this.details.serviceConfiguration;
}
private checkNotDisposed() {
assert(!this.disposed, 0x20c /* "connection disposed" */);
}
/**
* Get messages sent during the connection
*
* @returns messages sent during the connection
*/
public get initialMessages(): ISequencedDocumentMessage[] {
this.checkNotDisposed();
// If we call this when the earlyOpHandler is not attached, then the queuedMessages may not include the
// latest ops. This could possibly indicate that initialMessages was called twice.
assert(this.earlyOpHandlerAttached, 0x08e /* "Potentially missed initial messages" */);
// We will lose ops and perf will tank as we need to go to storage to become current!
assert(this.listeners("op").length !== 0, 0x08f /* "No op handler is setup!" */);
this.removeEarlyOpHandler();
if (this.queuedMessages.length > 0) {
// Some messages were queued.
// add them to the list of initialMessages to be processed
this.details.initialMessages.push(...this.queuedMessages);
this.details.initialMessages.sort((a, b) => a.sequenceNumber - b.sequenceNumber);
this.queuedMessages.length = 0;
}
return this.details.initialMessages;
}
/**
* Get signals sent during the connection
*
* @returns signals sent during the connection
*/
public get initialSignals(): ISignalMessage[] {
this.checkNotDisposed();
assert(this.listeners("signal").length !== 0, 0x090 /* "No signal handler is setup!" */);
this.removeEarlySignalHandler();
if (this.queuedSignals.length > 0) {
// Some signals were queued.
// add them to the list of initialSignals to be processed
this.details.initialSignals.push(...this.queuedSignals);
this.queuedSignals.length = 0;
}
return this.details.initialSignals;
}
/**
* Get initial client list
*
* @returns initial client list sent during the connection
*/
public get initialClients(): ISignalClient[] {
this.checkNotDisposed();
return this.details.initialClients;
}
protected emitMessages(type: string, messages: IDocumentMessage[][]) {
// Although the implementation here disconnects the socket and does not reuse it,
|
*/
export class DocumentDeltaConnection
|
random_line_split
|
|
documentDeltaConnection.ts
|
() {
assert(
this._disposed || this.socket.connected,
0x244 /* "Socket is closed, but connection is not!" */,
);
return this._disposed;
}
/**
* Flag to indicate whether the DocumentDeltaConnection is expected to still be capable of sending messages.
* After disconnection, we flip this to prevent any stale messages from being emitted.
*/
protected _disposed: boolean = false;
private readonly mc: MonitoringContext;
/**
* @deprecated Implementors should manage their own logger or monitoring context
*/
protected get logger(): ITelemetryLoggerExt {
return this.mc.logger;
}
public get details(): IConnected {
if (!this._details) {
throw new Error("Internal error: calling method before _details is initialized!");
}
return this._details;
}
/**
* @param socket - websocket to be used
* @param documentId - ID of the document
* @param logger - for reporting telemetry events
* @param enableLongPollingDowngrades - allow connection to be downgraded to long-polling on websocket failure
*/
protected constructor(
protected readonly socket: Socket,
public documentId: string,
logger: ITelemetryLoggerExt,
private readonly enableLongPollingDowngrades: boolean = false,
protected readonly connectionId?: string,
) {
super((name, error) => {
this.addPropsToError(error);
logger.sendErrorEvent(
{
eventName: "DeltaConnection:EventException",
name,
},
error,
);
});
this.mc = createChildMonitoringContext({ logger, namespace: "DeltaConnection" });
this.on("newListener", (event, _listener) => {
assert(!this.disposed, 0x20a /* "register for event on disposed object" */);
// Some events are already forwarded - see this.addTrackedListener() calls in initialize().
if (DocumentDeltaConnection.eventsAlwaysForwarded.includes(event)) {
assert(this.trackedListeners.has(event), 0x245 /* "tracked listener" */);
return;
}
if (!DocumentDeltaConnection.eventsToForward.includes(event)) {
throw new Error(`DocumentDeltaConnection: Registering for unknown event: ${event}`);
}
// Whenever listener is added, we should subscribe on same event on socket, so these two things
// should be in sync. This currently assumes that nobody unregisters and registers back listeners,
// and that there are no "internal" listeners installed (like "error" case we skip above)
// Better flow might be to always unconditionally register all handlers on successful connection,
// though some logic (naming assert in initialMessages getter) might need to be adjusted (it becomes noop)
assert(
(this.listeners(event).length !== 0) === this.trackedListeners.has(event),
0x20b /* "mismatch" */,
);
if (!this.trackedListeners.has(event)) {
if (event === "pong") {
// Empty callback for tracking purposes in this class
this.trackedListeners.set("pong", () => {});
const sendPingLoop = () => {
const start = Date.now();
this.socket.volatile?.emit("ping", () => {
this.emit("pong", Date.now() - start);
// Schedule another ping event in 1 minute
this.trackLatencyTimeout = setTimeout(() => {
sendPingLoop();
}, 1000 * 60);
});
};
sendPingLoop();
} else {
this.addTrackedListener(event, (...args: any[]) => {
this.emit(event, ...args);
});
}
}
});
}
/**
* Get the ID of the client who is sending the message
*
* @returns the client ID
*/
public get clientId(): string {
return this.details.clientId;
}
/**
* Get the mode of the client
*
* @returns the client mode
*/
public get mode(): ConnectionMode {
return this.details.mode;
}
/**
* Get the claims of the client who is sending the message
*
* @returns client claims
*/
public get claims(): ITokenClaims {
return this.details.claims;
}
/**
* Get whether or not this is an existing document
*
* @returns true if the document exists
*/
public get existing(): boolean {
return this.details.existing;
}
/**
* Get the maximum size of a message before chunking is required
*
* @returns the maximum size of a message before chunking is required
*/
public get maxMessageSize(): number {
return this.details.serviceConfiguration.maxMessageSize;
}
/**
* Semver of protocol being used with the service
*/
public get version(): string {
return this.details.version;
}
/**
* Configuration details provided by the service
*/
public get serviceConfiguration(): IClientConfiguration {
return this.details.serviceConfiguration;
}
private checkNotDisposed() {
assert(!this.disposed, 0x20c /* "connection disposed" */);
}
/**
* Get messages sent during the connection
*
* @returns messages sent during the connection
*/
public get initialMessages(): ISequencedDocumentMessage[] {
this.checkNotDisposed();
// If we call this when the earlyOpHandler is not attached, then the queuedMessages may not include the
// latest ops. This could possibly indicate that initialMessages was called twice.
assert(this.earlyOpHandlerAttached, 0x08e /* "Potentially missed initial messages" */);
// We will lose ops and perf will tank as we need to go to storage to become current!
assert(this.listeners("op").length !== 0, 0x08f /* "No op handler is setup!" */);
this.removeEarlyOpHandler();
if (this.queuedMessages.length > 0) {
// Some messages were queued.
// add them to the list of initialMessages to be processed
this.details.initialMessages.push(...this.queuedMessages);
this.details.initialMessages.sort((a, b) => a.sequenceNumber - b.sequenceNumber);
this.queuedMessages.length = 0;
}
return this.details.initialMessages;
}
/**
* Get signals sent during the connection
*
* @returns signals sent during the connection
*/
public get initialSignals(): ISignalMessage[] {
this.checkNotDisposed();
assert(this.listeners("signal").length !== 0, 0x090 /* "No signal handler is setup!" */);
this.removeEarlySignalHandler();
if (this.queuedSignals.length > 0) {
// Some signals were queued.
// add them to the list of initialSignals to be processed
this.details.initialSignals.push(...this.queuedSignals);
this.queuedSignals.length = 0;
}
return this.details.initialSignals;
}
/**
* Get initial client list
*
* @returns initial client list sent during the connection
*/
public get initialClients(): ISignalClient[] {
this.checkNotDisposed();
return this.details.initialClients;
}
protected emitMessages(type: string, messages: IDocumentMessage[][]) {
// Although the implementation here disconnects the socket and does not reuse it, other subclasses
// (e.g. OdspDocumentDeltaConnection) may reuse the socket. In these cases, we need to avoid emitting
// on the still-live socket.
if (!this.disposed) {
this.socket.emit(type, this.clientId, messages);
}
}
/**
* Submits a new delta operation to the server
*
* @param message - delta operation to submit
*/
public submit(messages: IDocumentMessage[]): void {
this.checkNotDisposed();
this.emitMessages("submitOp", [messages]);
}
/**
* Submits a new signal to the server
*
* @param message - signal to submit
*/
public submitSignal(message: IDocumentMessage): void {
this.checkNotDisposed();
this.emitMessages("submitSignal", [[message]]);
}
/**
* Disconnect from the websocket and close the websocket too.
*/
private closeSocket(error: IAnyDriverError) {
if (this._disposed) {
// This would be rare situation due to complexity around socket emitting events.
return;
}
this.closeSocketCore(error);
}
protected closeSocketCore(error: IAnyDriverError) {
this.disconnect(error);
}
/**
* Disconnect from the websocket, and permanently disable this DocumentDeltaConnection and close the socket.
* However the OdspDocumentDeltaConnection differ in dispose as in there we don't close the socket. There is no
* multiplexing here, so we need to close the socket here.
*/
public dispose() {
this.logger.sendTelemetryEvent({
eventName: "ClientClosingDeltaConnection",
driverVersion,
details: JSON.stringify({
...this.getConnectionDetailsProps(),
}),
});
this.disconnect(
createGenericNetworkError(
// pre-0.58 error message: clientClosingConnection
"Client closing delta connection",
{ canRetry: true },
|
disposed
|
identifier_name
|
|
documentDeltaConnection.ts
|
* @param reason - reason for disconnect
*/
protected disconnectCore() {
this.socket.disconnect();
}
protected async initialize(connectMessage: IConnect, timeout: number) {
this.socket.on("op", this.earlyOpHandler);
this.socket.on("signal", this.earlySignalHandler);
this.earlyOpHandlerAttached = true;
// Socket.io's reconnect_attempt event is unreliable, so we track connect_error count instead.
let internalSocketConnectionFailureCount: number = 0;
const isInternalSocketReconnectionEnabled = (): boolean => this.socket.io.reconnection();
const getMaxInternalSocketReconnectionAttempts = (): number =>
isInternalSocketReconnectionEnabled() ? this.socket.io.reconnectionAttempts() : 0;
const getMaxAllowedInternalSocketConnectionFailures = (): number =>
getMaxInternalSocketReconnectionAttempts() + 1;
this._details = await new Promise<IConnected>((resolve, reject) => {
const failAndCloseSocket = (err: IAnyDriverError) => {
try {
this.closeSocket(err);
} catch (failError) {
const normalizedError = this.addPropsToError(failError);
this.logger.sendErrorEvent({ eventName: "CloseSocketError" }, normalizedError);
}
reject(err);
};
const failConnection = (err: IAnyDriverError) => {
try {
this.disconnect(err);
} catch (failError) {
const normalizedError = this.addPropsToError(failError);
this.logger.sendErrorEvent(
{ eventName: "FailConnectionError" },
normalizedError,
);
}
reject(err);
};
// Immediately set the connection timeout.
// Give extra 2 seconds for handshake on top of socket connection timeout.
this.socketConnectionTimeout = setTimeout(() => {
failConnection(this.createErrorObject("orderingServiceHandshakeTimeout"));
}, timeout + 2000);
// Listen for connection issues
this.addConnectionListener("connect_error", (error) => {
internalSocketConnectionFailureCount++;
let isWebSocketTransportError = false;
try {
const description = error?.description;
const context = error?.context;
if (context && typeof context === "object") {
const statusText = context.statusText?.code;
// Self-Signed Certificate ErrorCode Found in error.context
if (statusText === "DEPTH_ZERO_SELF_SIGNED_CERT") {
failAndCloseSocket(
this.createErrorObject("connect_error", error, false),
);
return;
}
} else if (description && typeof description === "object") {
const errorCode = description.error?.code;
// Self-Signed Certificate ErrorCode Found in error.description
if (errorCode === "DEPTH_ZERO_SELF_SIGNED_CERT") {
failAndCloseSocket(
this.createErrorObject("connect_error", error, false),
);
return;
}
if (error.type === "TransportError") {
isWebSocketTransportError = true;
}
// That's a WebSocket. Clear it as we can't log it.
description.target = undefined;
}
} catch (_e) {}
// Handle socket transport downgrading when not offline.
if (
isWebSocketTransportError &&
this.enableLongPollingDowngrades &&
this.socket.io.opts.transports?.[0] !== "polling"
) {
// Downgrade transports to polling upgrade mechanism.
this.socket.io.opts.transports = ["polling", "websocket"];
// Don't alter reconnection behavior if already enabled.
if (!isInternalSocketReconnectionEnabled()) {
// Allow single reconnection attempt using polling upgrade mechanism.
this.socket.io.reconnection(true);
this.socket.io.reconnectionAttempts(1);
}
}
// Allow built-in socket.io reconnection handling.
if (
isInternalSocketReconnectionEnabled() &&
internalSocketConnectionFailureCount <
getMaxAllowedInternalSocketConnectionFailures()
) {
// Reconnection is enabled and maximum reconnect attempts have not been reached.
return;
}
failAndCloseSocket(this.createErrorObject("connect_error", error));
});
// Listen for timeouts
this.addConnectionListener("connect_timeout", () => {
failAndCloseSocket(this.createErrorObject("connect_timeout"));
});
this.addConnectionListener("connect_document_success", (response: IConnected) => {
// If we sent a nonce and the server supports nonces, check that the nonces match
if (
connectMessage.nonce !== undefined &&
response.nonce !== undefined &&
response.nonce !== connectMessage.nonce
) {
return;
}
const requestedMode = connectMessage.mode;
const actualMode = response.mode;
const writingPermitted = response.claims.scopes.includes(ScopeType.DocWrite);
if (writingPermitted) {
// The only time we expect a mismatch in requested/actual is if we lack write permissions
// In this case we will get "read", even if we requested "write"
if (actualMode !== requestedMode) {
failConnection(
this.createErrorObject(
"connect_document_success",
"Connected in a different mode than was requested",
false,
),
);
return;
}
} else {
if (actualMode === "write") {
failConnection(
this.createErrorObject(
"connect_document_success",
"Connected in write mode without write permissions",
false,
),
);
return;
}
}
this.checkpointSequenceNumber = response.checkpointSequenceNumber;
this.removeConnectionListeners();
resolve(response);
});
// Socket can be disconnected while waiting for Fluid protocol messages
// (connect_document_error / connect_document_success), as well as before DeltaManager
// had a chance to register its handlers.
this.addTrackedListener("disconnect", (reason, details) => {
failAndCloseSocket(
this.createErrorObjectWithProps("disconnect", reason, {
socketErrorType: details?.context?.type,
// https://www.rfc-editor.org/rfc/rfc6455#section-7.4
socketCode: details?.context?.code,
}),
);
});
this.addTrackedListener("error", (error) => {
// This includes "Invalid namespace" error, which we consider critical (reconnecting will not help)
const err = this.createErrorObject("error", error, error !== "Invalid namespace");
// Disconnect socket - required if happened before initial handshake
failAndCloseSocket(err);
});
this.addConnectionListener("connect_document_error", (error) => {
// If we sent a nonce and the server supports nonces, check that the nonces match
if (
connectMessage.nonce !== undefined &&
error.nonce !== undefined &&
error.nonce !== connectMessage.nonce
) {
return;
}
// This is not an socket.io error - it's Fluid protocol error.
// In this case fail connection and indicate that we were unable to create connection
failConnection(this.createErrorObject("connect_document_error", error));
});
this.socket.emit("connect_document", connectMessage);
});
assert(!this.disposed, 0x246 /* "checking consistency of socket & _disposed flags" */);
}
private addPropsToError(errorToBeNormalized: unknown) {
const normalizedError = normalizeError(errorToBeNormalized, {
props: {
details: JSON.stringify({
...this.getConnectionDetailsProps(),
}),
},
});
return normalizedError;
}
protected getConnectionDetailsProps() {
return {
disposed: this._disposed,
socketConnected: this.socket?.connected,
clientId: this._details?.clientId,
connectionId: this.connectionId,
};
}
protected earlyOpHandler = (documentId: string, msgs: ISequencedDocumentMessage[]) => {
this.queuedMessages.push(...msgs);
};
protected earlySignalHandler = (msg: ISignalMessage) => {
this.queuedSignals.push(msg);
};
private removeEarlyOpHandler() {
this.socket.removeListener("op", this.earlyOpHandler);
this.earlyOpHandlerAttached = false;
}
private removeEarlySignalHandler() {
this.socket.removeListener("signal", this.earlySignalHandler);
}
private addConnectionListener(event: string, listener: (...args: any[]) => void) {
assert(
!DocumentDeltaConnection.eventsAlwaysForwarded.includes(event),
0x247 /* "Use addTrackedListener instead" */,
);
assert(
!DocumentDeltaConnection.eventsToForward.includes(event),
0x248 /* "should not subscribe to forwarded events" */,
);
this.socket.on(event, listener);
assert(!this.connectionListeners.has(event), 0x20d /* "double connection listener" */);
this.connectionListeners.set(event, listener);
}
protected addTrackedListener(event: string, listener: (...args: any[]) => void)
|
{
this.socket.on(event, listener);
assert(!this.trackedListeners.has(event), 0x20e /* "double tracked listener" */);
this.trackedListeners.set(event, listener);
}
|
identifier_body
|
|
documentDeltaConnection.ts
|
0x20a /* "register for event on disposed object" */);
// Some events are already forwarded - see this.addTrackedListener() calls in initialize().
if (DocumentDeltaConnection.eventsAlwaysForwarded.includes(event)) {
assert(this.trackedListeners.has(event), 0x245 /* "tracked listener" */);
return;
}
if (!DocumentDeltaConnection.eventsToForward.includes(event)) {
throw new Error(`DocumentDeltaConnection: Registering for unknown event: ${event}`);
}
// Whenever listener is added, we should subscribe on same event on socket, so these two things
// should be in sync. This currently assumes that nobody unregisters and registers back listeners,
// and that there are no "internal" listeners installed (like "error" case we skip above)
// Better flow might be to always unconditionally register all handlers on successful connection,
// though some logic (naming assert in initialMessages getter) might need to be adjusted (it becomes noop)
assert(
(this.listeners(event).length !== 0) === this.trackedListeners.has(event),
0x20b /* "mismatch" */,
);
if (!this.trackedListeners.has(event)) {
if (event === "pong") {
// Empty callback for tracking purposes in this class
this.trackedListeners.set("pong", () => {});
const sendPingLoop = () => {
const start = Date.now();
this.socket.volatile?.emit("ping", () => {
this.emit("pong", Date.now() - start);
// Schedule another ping event in 1 minute
this.trackLatencyTimeout = setTimeout(() => {
sendPingLoop();
}, 1000 * 60);
});
};
sendPingLoop();
} else {
this.addTrackedListener(event, (...args: any[]) => {
this.emit(event, ...args);
});
}
}
});
}
/**
* Get the ID of the client who is sending the message
*
* @returns the client ID
*/
public get clientId(): string {
return this.details.clientId;
}
/**
* Get the mode of the client
*
* @returns the client mode
*/
public get mode(): ConnectionMode {
return this.details.mode;
}
/**
* Get the claims of the client who is sending the message
*
* @returns client claims
*/
public get claims(): ITokenClaims {
return this.details.claims;
}
/**
* Get whether or not this is an existing document
*
* @returns true if the document exists
*/
public get existing(): boolean {
return this.details.existing;
}
/**
* Get the maximum size of a message before chunking is required
*
* @returns the maximum size of a message before chunking is required
*/
public get maxMessageSize(): number {
return this.details.serviceConfiguration.maxMessageSize;
}
/**
* Semver of protocol being used with the service
*/
public get version(): string {
return this.details.version;
}
/**
* Configuration details provided by the service
*/
public get serviceConfiguration(): IClientConfiguration {
return this.details.serviceConfiguration;
}
private checkNotDisposed() {
assert(!this.disposed, 0x20c /* "connection disposed" */);
}
/**
* Get messages sent during the connection
*
* @returns messages sent during the connection
*/
public get initialMessages(): ISequencedDocumentMessage[] {
this.checkNotDisposed();
// If we call this when the earlyOpHandler is not attached, then the queuedMessages may not include the
// latest ops. This could possibly indicate that initialMessages was called twice.
assert(this.earlyOpHandlerAttached, 0x08e /* "Potentially missed initial messages" */);
// We will lose ops and perf will tank as we need to go to storage to become current!
assert(this.listeners("op").length !== 0, 0x08f /* "No op handler is setup!" */);
this.removeEarlyOpHandler();
if (this.queuedMessages.length > 0) {
// Some messages were queued.
// add them to the list of initialMessages to be processed
this.details.initialMessages.push(...this.queuedMessages);
this.details.initialMessages.sort((a, b) => a.sequenceNumber - b.sequenceNumber);
this.queuedMessages.length = 0;
}
return this.details.initialMessages;
}
/**
* Get signals sent during the connection
*
* @returns signals sent during the connection
*/
public get initialSignals(): ISignalMessage[] {
this.checkNotDisposed();
assert(this.listeners("signal").length !== 0, 0x090 /* "No signal handler is setup!" */);
this.removeEarlySignalHandler();
if (this.queuedSignals.length > 0) {
// Some signals were queued.
// add them to the list of initialSignals to be processed
this.details.initialSignals.push(...this.queuedSignals);
this.queuedSignals.length = 0;
}
return this.details.initialSignals;
}
/**
* Get initial client list
*
* @returns initial client list sent during the connection
*/
public get initialClients(): ISignalClient[] {
this.checkNotDisposed();
return this.details.initialClients;
}
protected emitMessages(type: string, messages: IDocumentMessage[][]) {
// Although the implementation here disconnects the socket and does not reuse it, other subclasses
// (e.g. OdspDocumentDeltaConnection) may reuse the socket. In these cases, we need to avoid emitting
// on the still-live socket.
if (!this.disposed) {
this.socket.emit(type, this.clientId, messages);
}
}
/**
* Submits a new delta operation to the server
*
* @param message - delta operation to submit
*/
public submit(messages: IDocumentMessage[]): void {
this.checkNotDisposed();
this.emitMessages("submitOp", [messages]);
}
/**
* Submits a new signal to the server
*
* @param message - signal to submit
*/
public submitSignal(message: IDocumentMessage): void {
this.checkNotDisposed();
this.emitMessages("submitSignal", [[message]]);
}
/**
* Disconnect from the websocket and close the websocket too.
*/
private closeSocket(error: IAnyDriverError) {
if (this._disposed)
|
this.closeSocketCore(error);
}
protected closeSocketCore(error: IAnyDriverError) {
this.disconnect(error);
}
/**
* Disconnect from the websocket, and permanently disable this DocumentDeltaConnection and close the socket.
* However the OdspDocumentDeltaConnection differ in dispose as in there we don't close the socket. There is no
* multiplexing here, so we need to close the socket here.
*/
public dispose() {
this.logger.sendTelemetryEvent({
eventName: "ClientClosingDeltaConnection",
driverVersion,
details: JSON.stringify({
...this.getConnectionDetailsProps(),
}),
});
this.disconnect(
createGenericNetworkError(
// pre-0.58 error message: clientClosingConnection
"Client closing delta connection",
{ canRetry: true },
{ driverVersion },
),
);
}
protected disconnect(err: IAnyDriverError) {
// Can't check this.disposed here, as we get here on socket closure,
// so _disposed & socket.connected might be not in sync while processing
// "dispose" event.
if (this._disposed) {
return;
}
if (this.trackLatencyTimeout !== undefined) {
clearTimeout(this.trackLatencyTimeout);
this.trackLatencyTimeout = undefined;
}
// We set the disposed flag as a part of the contract for overriding the disconnect method. This is used by
// DocumentDeltaConnection to determine if emitting messages (ops) on the socket is allowed, which is
// important since OdspDocumentDeltaConnection reuses the socket rather than truly disconnecting it. Note that
// OdspDocumentDeltaConnection may still send disconnect_document which is allowed; this is only intended
// to prevent normal messages from being emitted.
this._disposed = true;
// Remove all listeners listening on the socket. These are listeners on socket and not on this connection
// object. Anyway since we have disposed this connection object, nobody should listen to event on socket
// anymore.
this.removeTrackedListeners();
// Clear the connection/socket before letting the deltaManager/connection manager know about the disconnect.
this.disconnectCore();
// Let user of connection object know about disconnect.
this.emit("disconnect", err);
}
/**
* Disconnect from the websocket.
* @param reason - reason for disconnect
*/
protected disconnectCore() {
this.socket.disconnect();
}
protected async initialize(connectMessage: IConnect, timeout: number) {
this.socket.on("op", this.earlyOpHandler
|
{
// This would be rare situation due to complexity around socket emitting events.
return;
}
|
conditional_block
|
acc.js
|
();
return saveOwnership();
};
}
// 登记信息保存更新操作
function saveDjxx() {
var result =true;
var djbh = $("#djbh").val();
var djlx = $('input[name="djlx"]').val();
var djd = $('input[name="djd"]').val();
var ywms = $("#ywms").val();
var xmmc = $("#xmmc").val();
$("#djbh1").val(djbh);
$("#djlx1").val(djlx);
$("#djd1").val(djd);
$("#ywms1").val(ywms);
$("#xmmc1").val(xmmc);
$.ajax({
dataType:'json',
url:ctx+"/houseownership/initialreg!saveRegMessage.action?time="+new Date()+"&proc_id="+proc_id,
contentType:"application/x-www-form-urlencoded; charset=GBK",
//表单的序列化操作
data:{"oivo.reg_code":djbh,"oivo.reg_type":djlx,"oivo.reg_station":djd,"oivo.proc_name":ywms},
success:function(data){
if(data){
//alert(data);
top.$.messager.alert('保存成功提示',data.tipMessage,'info',function(){
});
}else {
top.$.messager.alert('保存失败提示',data.errorMessage,'error');
}
},error:function(data){
result = false;
}
});
return result;
}
//保存房地产证附记到缮证表中
function saveFdccfj(){
//$("#fdczfj1").val(fdczfj);
$.ajax({
dataType:'json',
url:ctx+"/houseownership/initialreg!saveCerRemark.action?time="+new Date()+"&proc_id="+proc_id,
contentType:"application/x-www-form-urlencoded; charset=GBK",
//表单的序列化操作
data:{"oivo.excursus":fdczfj},
success:function(data){
if(data){
top.$.messager.alert('保存成功提示',data.tipMessage,'info',function(){
});
}else {
top.$.messager.alert('保存失败提示',data.errorMessage,'error');
}
}
});
};
//获取从受理前置窗口传递的登记信息
function getPreRegMess(){
$.ajax({
dataType: 'json',
url:ctx+"/landuseright/landinitialreg!getRegMessage.action?time="+new Date()+"&proc_id="+proc_id,
success:function(data){
if(data){
$("#djbh").val(data.RegInfo.REG_CODE);
$("#djd").combodict('setValue',data.RegInfo.REG_STATION);
$("#ywms").val(data.RegInfo.PROC_NAME);
//$("#xmmc").val(data.PRO_NAME);
$("#djlx").combodict('setValue',data.RegInfo.REG_TYPE);
$("#fdczfj").text(data.excursus);
/*$("#djbh").val(data.reg_code);
$("#djd").combodict('setValue',data.reg_station);
$("#ywms").val(data.proc_name);
//$("#xmmc").val(data.pro_name);
//$("#djlx").val(data.reg_type);
$("#djlx").combodict('setValue',data.reg_type);
$("#fdczfj").text(data.excursus);*/
}
}
});
}
//保存房屋所有权相关登记信息
function saveOwnership(){
var result = true;
$.ajax({
dataType:'json',
url:ctx+"/houseownership/initialreg!saveOwnership.action?time="+new Date()+"&proc_id="+proc_id,
contentType:"application/x-www-form-urlencoded; charset=GBK",
//表单的序列化操作
data:$("#add_app_form").serialize(),
success:function(data){
if(data){
//alert(data);
top.$.messager.alert('保存成功提示',data.tipMessage,'info',function(){
});
}else {
top.$.messager.alert('保存失败提示',data.errorMessage,'error');
}
},error:function(data){
result = false;
}
});
return result;
}
//获取房屋所有权登记信息
function getBusownership(){
$.ajax({
dataType: 'json',
url:ctx+"/houseownership/initialreg!getBusownership.action?time="+new Date()+"&proc_id="+proc_id,
success:function(data){
if(data){
//alert(JSON.stringify(data));
$("#fdczh").val(data.cer_no);
$("#djjk").val(data.reg_value);
$("#qdfs").combodict('setValue',data.get_mode);
$("#synx").val(data.lu_term);
if(data.start_date){
var qsrq = data.start_date;
$("#qsrq").datebox('setValue',qsrq.substr(0,10));
}
if(data.end_date){
var zzrq = data.end_date;
$("#zzrq").datebox('setValue',zzrq.substr(0,10));
}
$("#fwxz").combodict('setValue',data.house_attr);
$("#yt").combodict('setValue',data.house_usage);
}
//_init_form_data = $("#add_app_form").serializeJson();
}
});
}
/**********************************************************************************
*函数名称: 页面校验方法
*功能说明: 验证页面上的非空 及数据格式
*参数说明: v_flag 1代表保存 提交不传值 用来区分保存和提交
*返 回 值: obj result(true通过 false不通过) message(消息) page_name(当前页面名字)
*函数作者: Joyon
*创建日期: 2014-03-01
*修改历史:
***********************************************************************************/
function validate(v_flag){
//返回结果对象
var result ={
result:false,
message:'',
page_name:'申请表'
}
var message;
if(activName == state1.string0){
var rowlen = $('#table_user').datagrid('getRows').length;
if(rowlen == 0){
message= '请录入申请人!';
result.message=message;
return result;
}
var djbh = $("#djbh").val();
var djlx = $('input[name="djlx"]').val();
var djd = $('input[name="djd"]').val();
var ywms = $("#ywms").val();
var xmmc = $("#xmmc").val();
if($.trim(djlx).length==0){
message= '请选择登记类型!';
result.message=message;
return result;
}
if($.trim(djd).length==0){
message= '请选择登记点!';
result.message=message;
return result;
}
if($.trim(ywms).length==0){
message= '请输入业务描述!';
result.message=message;
return result;
}
/*if($.trim(xmmc).length==0){
message= '请输入项目名称!';
result.message=message;
return result;
}*/
//如果是保存 重新序列化一次 数据初始化变量
if(v_flag){
_init_form_data = "";//$("#main_form").serializeJson();
}
//判断数据项是否己修改 如果己修改 则提示是否保存未保存数据
_cur_form_data = "";//$("#main_form").serializeJson();
var r = equal(_init_form_data,_cur_form_data);
if(!r){
var flag= 0 ;//用来确认 是否用户已经点击放弃保存 未点击 代表是在外面调用 返回false
message = '数据己修改!请先保存后提交!';
if(flag){
}else{
result.message=message;
result.result=false;
}
return result;
}
}else if(activName == state
|
1.string5){
// alert($("#add_app_form").serialize());
var djjk = $("#djjk").val();
var qdfs = $('input[name="get_mode"]').val();
var synx = $("#synx").val();
var qsrq = $('input[name="start_date"]').val();
var zzrq = $('input[name="end_date"]').val();
var fwxz = $('input[name="house_attr"]').val();
var yt = $('input[name="house_usage"]').val();
var fdczfj = $("#fdczfj").val();
// if($.trim(djjk).length==0){
//
// message= '请录入登记价款!';
// result.message=message;
// result.result=false;
// return result;
//
// }
if($.trim(qdfs).length==0){
message= '请选择取得方式!';
|
identifier_body
|
|
acc.js
|
_name'
}, {
title : '申请人类型',
field : 'app_type',formatter : dicts.format.app_type_format
}, {
title : '证件类型',
field : 'app_cer_type',formatter : dicts.format.app_cer_type_format
}, {
title : '证件编号',
field : 'app_cer_no'
}, {
title : '份额',
field : 'app_port'
}, {
title : '地址',
field : 'app_address'
|
field : 'app_tel'
}, {
title : '法定代表人',
field : 'legal_name'
}, {
title : '代理人',
field : 'agent_name'
},
{
title : '代理人证件类型',
field : 'agent_cer_type',
formatter : function(value) {
if(value == '001'){
return '身份证';
};
if(value == '002'){
return '军官证';
}
}
},
{
title : '代理人证件号码',
field : 'agent_cer'
}, {
title : '代理人联系电话',
field : 'agent_tel'
}
] ],
// 表头,添加工具栏。
toolbar : [ {
id : 'user_add',
text : '新增',
iconCls : 'icon-add',
handler : doAdd
}, '-', {
id : 'user_edit',
text : '编辑',
iconCls : 'icon-pencil',
disabled : true,
handler : doEdit
}, '-', {
id : 'user_delete',
text : '删除',
iconCls : 'icon-remove',
disabled : true,
handler : doDelete
}],
onClickRow : function() {
//点击列时激活“编辑”、“删除”按钮
if(activName == state1.string0){
$('#user_edit').linkbutton('enable');
$('#user_delete').linkbutton('enable');
}
},
onLoadSuccess : function() {
//加载完毕禁用“编辑”、“删除”按钮
$('#user_edit').linkbutton('disable');
$('#user_delete').linkbutton('disable');
}
});
//设置权限状态
setState(activName);
// 选择表格中某一行的数据。
function getSelected(func) {
var selectedrow = $('#table_user').datagrid('getSelected');
if (selectedrow) {
row = selectedrow;
// 调用相关函数
func.call(this, selectedrow);
} else {
$.messager.alert('提示:', '请点击选中表格中的某一行.');
}
}
;
// 新增
function doAdd() {
openInTopWindow({
// 窗口元素的id
id : 'add_user_win',
// 窗口iframe的src
src : ctx+'/jsp/common/applicant/addapplicant.jsp?time='+new Date(),
// 关闭时是否销毁窗口。不销毁的话,每次打开窗口都会添加一个新窗口元素。
destroy : true,
// 窗口标题
title : '新增申请人',
// 窗口宽
width : 700,
// 窗口高
height : 400,
modal : true,
// 窗口中iframe的window对象的onLoad回调函数设置
onLoad : function() {
// 此处将本窗口window对象赋值为打开的新窗口window对象的openerWindow属性。
// 因此,在新窗口中,可通过openerWindow属性调用本窗口,从而实现多窗口间的交互、传值。
this.openerWindow = window;
// 将参数传入打开窗口对象的parenter属性中,从而实现窗口间传递参数调用
this.args = {
userDataGrid : userDataGrid
};
this.init(proc_id);
}
});
};
// 编辑
function doEdit() {
var row = userDataGrid.datagrid('getSelected');
openInTopWindow({
// 窗口元素的id
id : 'edit_user_win',
// 窗口iframe的src
src : ctx+'/jsp/common/applicant/editapplicant.jsp',
// 关闭时是否销毁窗口。不销毁的话,每次打开窗口都会添加一个新窗口元素。
destroy : true,
// 窗口标题
title : '编辑申请人',
// 窗口宽
width : 700,
// 窗口高
height : 400,
modal : true,
// 窗口中iframe的window对象的onLoad回调函数设置
onLoad : function() {
// 此处将本窗口window对象赋值为打开的新窗口window对象的openerWindow属性。
// 因此,在新窗口中,可通过openerWindow属性调用本窗口,从而实现多窗口间的交互、传值。
this.openerWindow = window;
// 将参数传入打开窗口对象的parenter属性中,从而实现窗口间传递参数调用
this.args = {
user : row,
userDataGrid : userDataGrid
};
this.init(row);
}
});
};
// 删除
function doDelete() {
var row = userDataGrid.datagrid('getSelected');
top.$.messager.confirm('确认', '确定要删除申请人名称为[' + row.app_name + ']?', function(
result) {
if (result) {
$.ajax({
url : ctx+"/houseownership/initialreg!deleteApplicant.action?time="+new Date(),
type : 'post',
data : {
applicant_id : row.applicant_id
},
dataType : 'json',
success : function(data) {
if (data.success) {
top.$.messager.alert('提示', data.tipMessage, 'info',
function() {
//alert("删除之后刷新");
userDataGrid.datagrid('reload');
});
} else {
top.$.messager.alert('提示', data.errorMessage, 'error');
}
}
});
}
});
};
/**********************************************************************************
*函数名称: dowatch
*功能说明: 查看登记单元详细信息
*参数说明: 无
*返 回 值: 无
*函数作者: xuzz
*创建日期: 2014-03-27
*修改历史:
***********************************************************************************/
function dowatch(button){
var row = $('#table_house').datagrid('getSelected');
var obj={};
obj.WHERE_CODE=row.CODE;
obj.REG_UNIT_TYPE=row.TYPE;
//alert(JSON.stringify(row));
openInTopWindow({
// 窗口元素的id
id : 'add_user_win',
// 窗口iframe的src
src : ctx+'/bookmanage/book-manage!home.action?reg_unit_type='+row.TYPE+'&time='+new Date(),
// 关闭时是否销毁窗口。不销毁的话,每次打开窗口都会添加一个新窗口元素。
destroy : true,
// 窗口标题
title : '房地产信息',
// 窗口宽
width : 950,
// 窗口高
height : 600,
modal : true,
// 窗口中iframe的window对象的onLoad回调函数设置
onLoad : function() {
// 此处将本窗口window对象赋值为打开的新窗口window对象的openerWindow属性。
// 因此,在新窗口中,可通过openerWindow属性调用本窗口,从而实现多窗口间的交互、传值。
this.openerWindow = window;
// 将参数传入打开窗口对象的parenter属性中,从而实现窗口间传递参数调用
this.args = {
userDataGrid : userDataGrid,
regunit:row
};
this.init(obj);
}
});
}
// 双击表格中某一行的触发的事件
function rowDblclick(rowIndex, row) {
var i = 0;
var props = [];
for ( var p in row) {
props[i++] = p + ' = ' + row[p];
}
alert(props.join(',\n'));
// info(row);
}
;
// 定义流程实例查询
function searchProcint() {
var fields = $("#procinstSearchform").serializeArray();
var o = {};
jQuery.each(fields, function(i, field) {
if (o[this['name']]) {
o[this['name']] = o[this['name']] + "," + this['value'];
} else {
o[this['name']] = this['value'];
}
});
// console.debug(o);
|
}, {
title : '联系电话',
|
random_line_split
|
acc.js
|
_name'
}, {
title : '申请人类型',
field : 'app_type',formatter : dicts.format.app_type_format
}, {
title : '证件类型',
field : 'app_cer_type',formatter : dicts.format.app_cer_type_format
}, {
title : '证件编号',
field : 'app_cer_no'
}, {
title : '份额',
field : 'app_port'
}, {
title : '地址',
field : 'app_address'
}, {
title : '联系电话',
field : 'app_tel'
}, {
title : '法定代表人',
field : 'legal_name'
}, {
title : '代理人',
field : 'agent_name'
},
{
title : '代理人证件类型',
field : 'agent_cer_type',
formatter : function(value) {
if(value == '001'){
return '身份证';
};
if(value == '002'){
return '军官证';
}
}
},
{
title : '代理人证件号码',
field : 'agent_cer'
}, {
title : '代理人联系电话',
field : 'agent_tel'
}
] ],
// 表头,添加工具栏。
toolbar : [ {
id : 'user_add',
text : '新增',
iconCls : 'icon-add',
handler : doAdd
}, '-', {
id : 'user_edit',
text : '编辑',
iconCls : 'icon-pencil',
disabled : true,
handler : doEdit
}, '-', {
id : 'user_delete',
text : '删除',
iconCls : 'icon-remove',
disabled : true,
handler : doDelete
}],
onClickRow : function() {
//点击列时激活“编辑”、“删除”按钮
if(activName == state1.string0){
$('#user_edit').linkbutton('enable');
$('#user_delete').linkbutton('enable');
}
},
onLoadSuccess : function() {
//加载完毕禁用“编辑”、“删除”按钮
$('#user_edit').linkbutton('disable');
$('#user_delete').linkbutton('disable');
}
});
//设置权限状态
setState(activName);
// 选择表格中某一行的数据。
function getSelected(func) {
var selectedrow = $('#table_user').datagrid('getSelected');
if (selectedrow) {
row = selectedrow;
// 调用相关函数
func.call(this, selectedrow);
} else {
$.messager.alert('提示:', '请点击选中表格中的某一行.');
}
}
;
// 新增
function doAdd() {
openInTopWindow({
// 窗口元素的id
id : 'add_user_win',
// 窗口iframe的src
src : ctx+'/jsp/common/applicant/addapplicant.jsp?time='+new Date(),
// 关闭时是否销毁窗口。不销毁的话,每次打开窗口都会添加一个新窗口元素。
destroy : true,
// 窗口标题
title : '新增申请人',
// 窗口宽
width : 700,
// 窗口高
height : 400,
modal : true,
// 窗口中iframe的window对象的onLoad回调函数设置
onLoad : function() {
// 此处将本窗口window对象赋值为打开的新窗口window对象的openerWindow属性。
// 因此,在新窗口中,可通过openerWindow属性调用本窗口,从而实现多窗口间的交互、传值。
this.openerWindow = window;
// 将参数传入打开窗口对象的parenter属性中,从而实现窗口间传递参数调用
this.args = {
userDataGrid : userDataGrid
};
this.init(proc_id);
}
});
};
// 编辑
function doEdit() {
var row = userDataGrid.datagrid('getSelected');
openInTopWindow({
// 窗口元素的id
id : 'edit_user_win',
// 窗口iframe的src
src : ctx+'/jsp/common/applicant/editapplicant.jsp',
// 关闭时是否销毁窗口。不销毁的话,每次打开窗口都会添加一个新窗口元素。
destroy : true,
// 窗口标题
title : '编辑申请人',
// 窗口宽
width : 700,
// 窗口高
height : 400,
modal : true,
// 窗口中iframe的window对象的onLoad回调函数设置
onLoad : function() {
// 此处将本窗口window对象赋值为打开的新窗口window对象的openerWindow属性。
// 因此,在新窗口中,可通过openerWindow属性调用本窗口,从而实现多窗口间的交互、传值。
this.openerWindow = window;
// 将参数传入打开窗口对象的parenter属性中,从而实现窗口间传递参数调用
this.args = {
user : row,
userDataGrid : userDataGrid
};
this.init(row);
}
});
};
// 删除
function doDelete() {
var row = userDataGrid.datagrid('getSelected');
top.$.messager.confirm('确认', '确定要删除申请人名称为[' + row.app_name + ']?', function(
result) {
if (result) {
$.ajax({
url : ctx+"/houseownership/initialreg!deleteApplicant.action?time="+new Date(),
type : 'post',
data : {
applicant_id : row.applicant_id
},
dataType : 'json',
success : function(data) {
if (data.success) {
top.$.messager.alert('提示', data.tipMessage, 'info',
function() {
//alert("删除之后刷新");
userDataGrid.datagrid('reload');
});
} else {
top.$.messager.alert('提示', data.errorMessage, 'error');
}
}
});
}
});
};
/*********************************************************************************
|
名称: dowatch
*功能说明: 查看登记单元详细信息
*参数说明: 无
*返 回 值: 无
*函数作者: xuzz
*创建日期: 2014-03-27
*修改历史:
***********************************************************************************/
function dowatch(button){
var row = $('#table_house').datagrid('getSelected');
var obj={};
obj.WHERE_CODE=row.CODE;
obj.REG_UNIT_TYPE=row.TYPE;
//alert(JSON.stringify(row));
openInTopWindow({
// 窗口元素的id
id : 'add_user_win',
// 窗口iframe的src
src : ctx+'/bookmanage/book-manage!home.action?reg_unit_type='+row.TYPE+'&time='+new Date(),
// 关闭时是否销毁窗口。不销毁的话,每次打开窗口都会添加一个新窗口元素。
destroy : true,
// 窗口标题
title : '房地产信息',
// 窗口宽
width : 950,
// 窗口高
height : 600,
modal : true,
// 窗口中iframe的window对象的onLoad回调函数设置
onLoad : function() {
// 此处将本窗口window对象赋值为打开的新窗口window对象的openerWindow属性。
// 因此,在新窗口中,可通过openerWindow属性调用本窗口,从而实现多窗口间的交互、传值。
this.openerWindow = window;
// 将参数传入打开窗口对象的parenter属性中,从而实现窗口间传递参数调用
this.args = {
userDataGrid : userDataGrid,
regunit:row
};
this.init(obj);
}
});
}
// 双击表格中某一行的触发的事件
function rowDblclick(rowIndex, row) {
var i = 0;
var props = [];
for ( var p in row) {
props[i++] = p + ' = ' + row[p];
}
alert(props.join(',\n'));
// info(row);
}
;
// 定义流程实例查询
function searchProcint() {
var fields = $("#procinstSearchform").serializeArray();
var o = {};
jQuery.each(fields, function(i, field) {
if (o[this['name']]) {
o[this['name']] = o[this['name']] + "," + this['value'];
} else {
o[this['name']] = this['value'];
}
});
// console.debug(o
|
*
*函数
|
identifier_name
|
acc.js
|
��口高
height : 600,
modal : true,
// 窗口中iframe的window对象的onLoad回调函数设置
onLoad : function() {
// 此处将本窗口window对象赋值为打开的新窗口window对象的openerWindow属性。
// 因此,在新窗口中,可通过openerWindow属性调用本窗口,从而实现多窗口间的交互、传值。
this.openerWindow = window;
// 将参数传入打开窗口对象的parenter属性中,从而实现窗口间传递参数调用
this.args = {
userDataGrid : userDataGrid,
regunit:row
};
this.init(obj);
}
});
}
// 双击表格中某一行的触发的事件
function rowDblclick(rowIndex, row) {
var i = 0;
var props = [];
for ( var p in row) {
props[i++] = p + ' = ' + row[p];
}
alert(props.join(',\n'));
// info(row);
}
;
// 定义流程实例查询
function searchProcint() {
var fields = $("#procinstSearchform").serializeArray();
var o = {};
jQuery.each(fields, function(i, field) {
if (o[this['name']]) {
o[this['name']] = o[this['name']] + "," + this['value'];
} else {
o[this['name']] = this['value'];
}
});
// console.debug(o);
$('#dg_procinst').datagrid('load', o);
}
;
$('#simpleform').form({
dataType : 'json',
url : 'appDelegate/getUserList.run',
success : function(data) {
userDataGrid.datagrid('loadData', data);
}
});
test();
function test() {
var panel = $('.plui-layout').layout('panel', 'north');
panel.panel({
height : 143
});
$('.plui-layout').layout('resize');
}
;
});
function setState(activName) {
if(activName == state1.string5){
$("#djd").combo('disable');
$(".reg").attr("disabled", "disabled");
$("#fdczfj").removeAttr("disabled");
$('#user_edit').linkbutton('disable');
$('#user_delete').linkbutton('disable');
$('#user_add').linkbutton('disable');
};
if (!(activName == state1.string0)&&!(activName == state1.string5)) {
$("#djd").combo('disable');
$("#qdfs").combo('disable');
$("#qsrq").combo('disable');
$("#zzrq").combo('disable');
$("#fwxz").combo('disable');
$("#yt").combo('disable');
$(":input").attr("disabled", "disabled");
$('#user_edit').linkbutton('disable');
$('#user_delete').linkbutton('disable');
$('#user_add').linkbutton('disable');
}
;
// if (activName == state1.string4) {
// $('#user_edit').linkbutton('enable');
// $('#user_delete').linkbutton('enable');
//
// $('#user_add').linkbutton('enable');
//
// }
if(activName != state1.string0){
$(".initreg").css({display:"block"});
$(".remark").css({display:"block"});
//$("#pric").css({display:"block"});
}
if(activName == state1.string0){
//$(".initreg").css({display:"block"});
//$(".remark").css({display:"block"});
$(".tt").css({display:"none"});
}
}
// 获取地址栏参数
function GetQueryString(name) {
var reg = new RegExp("(^|&)" + name + "=([^&]*)(&|$)");
var r = window.location.search.substr(1).match(reg);
if (r != null)
return unescape(r[2]);
return null;
}
//判断执行的是saveDjxx()还是saveFdccfj(activName)
function submit(){
// var result = validate();
// if(!result.result){
// return false;
// }
if(activName == state1.string0){
return saveDjxx();
};
if(activName == state1.string5){
//saveFdccfj();
return saveOwnership();
};
}
// 登记信息保存更新操作
function saveDjxx() {
var result =true;
var djbh = $("#djbh").val();
var djlx = $('input[name="djlx"]').val();
var djd = $('input[name="djd"]').val();
var ywms = $("#ywms").val();
var xmmc = $("#xmmc").val();
$("#djbh1").val(djbh);
$("#djlx1").val(djlx);
$("#djd1").val(djd);
$("#ywms1").val(ywms);
$("#xmmc1").val(xmmc);
$.ajax({
dataType:'json',
url:ctx+"/houseownership/initialreg!saveRegMessage.action?time="+new Date()+"&proc_id="+proc_id,
contentType:"application/x-www-form-urlencoded; charset=GBK",
//表单的序列化操作
data:{"oivo.reg_code":djbh,"oivo.reg_type":djlx,"oivo.reg_station":djd,"oivo.proc_name":ywms},
success:function(data){
if(data){
//alert(data);
top.$.messager.alert('保存成功提示',data.tipMessage,'info',function(){
});
}else {
top.$.messager.alert('保存失败提示',data.errorMessage,'error');
}
},error:function(data){
result = false;
}
});
return result;
}
//保存房地产证附记到缮证表中
function saveFdccfj(){
//$("#fdczfj1").val(fdczfj);
$.ajax({
dataType:'json',
url:ctx+"/houseownership/initialreg!saveCerRemark.action?time="+new Date()+"&proc_id="+proc_id,
contentType:"application/x-www-form-urlencoded; charset=GBK",
//表单的序列化操作
data:{"oivo.excursus":fdczfj},
success:function(data){
if(data){
top.$.messager.alert('保存成功提示',data.tipMessage,'info',function(){
});
}else {
top.$.messager.alert('保存失败提示',data.errorMessage,'error');
}
}
});
};
//获取从受理前置窗口传递的登记信息
function getPreRegMess(){
$.ajax({
dataType: 'json',
url:ctx+"/landuseright/landinitialreg!getRegMessage.action?time="+new Date()+"&proc_id="+proc_id,
success:function(data){
if(data){
$("#djbh").val(data.RegInfo.REG_CODE);
$("#djd").combodict('setValue',data.RegInfo.REG_STATION);
$("#ywms").val(data.RegInfo.PROC_NAME);
//$("#xmmc").val(data.PRO_NAME);
$("#djlx").combodict('setValue',data.RegInfo.REG_TYPE);
$("#fdczfj").text(data.excursus);
/*$("#djbh").val(data.reg_code);
$("#djd").combodict('setValue',data.reg_station);
$("#ywms").val(data.proc_name);
//$("#xmmc").val(data.pro_name);
//$("#djlx").val(data.reg_type);
$("#djlx").combodict('setValue',data.reg_type);
$("#fdczfj").text(data.excursus);*/
}
}
});
}
//保存房屋所有权相关登记信息
function saveOwnership(){
var result = true;
$.ajax({
dataType:'json',
url:ctx+"/houseownership/initialreg!saveOwnership.action?time="+new Date()+"&proc_id="+proc_id,
contentType:"application/x-www-form-urlencoded; charset=GBK",
//表单的序列化操作
data:$("#add_app_form").serialize(),
success:function(data){
if(data){
//alert(data);
top.$.messager.alert('保存成功提示',data.tipMessage,'info',function(){
});
}else {
top.$.messager.alert('保存失败提示',data.errorMessage,'error');
}
},error:function(data){
result = false;
}
});
return result;
}
//获取房屋所有权登记信息
function getBusownership(){
$.ajax({
dataType: 'json',
url:ctx+"/houseownership/initialreg!getBusow
|
nership.action?time="+new Date()+"&proc_id="+proc_id,
success:function(data){
|
conditional_block
|
|
PerceptronClassifier.py
|
self.max_iter = max_iter
self.learnRate = 1
if training_data is not None:
self.fit(training_data, devel_data)
def fit(self, training_data, devel_data=None):
'''
Estimate the parameters for perceptron model. For multi-class perceptron, parameters can be
treated as a T \times D matrix W, where T is the number of labels and D is the number of
features.
'''
# feature_alphabet is a mapping from feature string to it's dimension in the feature space,
# e.g. feature_alphabet['U1=I']=3, which means 'U1=I' is in the third column of W
#
# W = [[ . . 1 . . .],
# ...
# [ . . 1 . . .]]
# ^
# |
# 'U1=I'
self.feature_alphabet = {'None': 0}
self.label_alphabet = {}
# Extract features, build the feature_alphabet, label_alphabet and training instance pairs.
# Each instance consist a tuple (X, Y) where X is the mapped features (list(int)), and Y is
# the index of the corresponding label.
instances = []
for words, tags in training_data:
L = len(words)
prev = '<s>'
for i in range(L):
# Your code here, extract features and give it into X, convert POStag to index and
# give it to Y
X = self.extract_features(words, i, prev)
Y = len(self.label_alphabet) if tags[i] not in self.label_alphabet.keys() else self.label_alphabet[tags[i]]
instances.append((X, Y))
if tags[i] not in self.label_alphabet.keys():
self.label_alphabet[tags[i]] = len(self.label_alphabet)
prev = tags[i]
# Build a mapping from index to label string to recover POStags.
self.labels = [-1 for k in self.label_alphabet]
for k in self.label_alphabet:
self.labels[self.label_alphabet[k]] = k
self.D, self.T = len(self.feature_alphabet), len(self.label_alphabet)
print('number of features : %d' % self.D)
print('number of labels: %d' % self.T)
# Allocate the weight matrix W
self.W = [[0 for j in range(self.D)] for i in range(self.T)]
self.best_W = copy(self.W)
best_acc = 0
for it in range(self.max_iter):
# The training part,
n_errors = 0
print('training iteration #%d' % it)
for X, Y in instances:
# Your code here, ake a prediction and give it to Z
|
# print '初始预测:', Z, self._score(X, Z), 'Y的分数', self._score(X, Y)
# print self.W[Y]
tmp = self._score(X,Y)
# Your code here. If the predict is incorrect, perform the perceptron update
n_errors += 1
for x in X:
self.W[Y][x] =self.W[Y][x] + 1*self.learnRate
# The perceptron update part.
for i in range(self.T):
if self._score(X, i) >= tmp and i!=Y:
for x in X:
self.W[i][x] = self.W[i][x] - 1 * self.learnRate
# print '调整后:',self._predict(X),'正确:',Y,'Y的分数',self._score(X,Y)
print('training error %d' % n_errors)
if devel_data is not None:
# Test accuracy on the development set if provided.
n_corr, n_total = 0, 0
for words, tags in devel_data:
prev = '<s>'
for i in range(len(words)):
Z = self.predict(words, i, prev)
Y = self.label_alphabet[tags[i]]
if Z == Y:
n_corr += 1
n_total += 1
prev = self.labels[Z]
print('accuracy: %f' % (float(n_corr) / n_total))
# print 'W0',self.W[10][:100]
if best_acc < float(n_corr) / n_total:
# If this round is better than before, save it.
best_acc = float(n_corr) / n_total
self.best_W = copy(self.W)
if self.best_W is None:
self.best_W = copy(self.W)
def extract_features(self, words, i, prev_tag=None, add=True):
'''
Extract features from words and prev POS tag, if `add` is True, also insert the feature
string to the feature_alphabet.
Parameters
----------
words: list(str)
The words list
i: int
The position
prev_tag: str
Previous POS tag
add: bool
If true, insert the feature to feature_alphabet.
Return
------
mapped_features: list(int)
The list of hashed features.
'''
L = len(words)
context = ['<s>' if i - 2 < 0 else words[i - 2],
'<s>' if i - 1 < 0 else words[i - 1],
words[i],
'<e>' if i + 1 >= L else words[i + 1],
'<e>' if i + 2 >= L else words[i + 1]]
raw_features = ['U1=%s' % context[0],
'U2=%s' % context[1],
'U3=%s' % context[2],
'U4=%s' % context[3],
'U5=%s' % context[4],
'U1,2=%s/%s' % (context[0], context[1]),
'U2,3=%s/%s' % (context[1], context[2]), # Your code here, extract the bigram raw feature,
'U3,4=%s/%s' % (context[2], context[3]), # Your code here, extract the bigram raw feature,
'U4,5=%s/%s' % (context[3], context[4]), # Your code here, extract the bigram raw feature,
]
if prev_tag is not None:
raw_features.append('B=%s' % prev_tag)
mapped_features = []
for f in raw_features:
if add and (f not in self.feature_alphabet):
# Your code here, insert the feature string to the feature_alphabet.
index = len(self.feature_alphabet)
self.feature_alphabet[f] = index
# Your code here, map the string feature to index.
# for item in self.feature_alphabet.values():
# mapped_features[self.feature_alphabet[item]] = 1
if f in self.feature_alphabet:
mapped_features.append(self.feature_alphabet[f])
return mapped_features
def _score(self, features, t):
'''
Calcuate score from the given features and label t
Parameters
----------
features: list(int)
The hashed features
t: int
The index of label
Return
------
s: int
The score
'''
# Your code here, compute the score.
s=0.0
for x in features:
s += self.W[t][x]
return s
def _predict(self, features):
'''
Calcuate score from the given features and label t
Parameters
----------
features: list(int)
The hashed features
t: int
The index of label
Return
------
best_y: int
The highest scored label's index
'''
pred_scores = [self._score(features, y) for y in range(self.T)]
best_score, best_y = -1e5, -1
# Your code here, find the highest scored class from pred_scores
# best_score = pred_scores[0]
# best_y = 0
for index,value in enumerate(pred_scores):
if value > best_score:
best_score = value
best_y = index
# print 'best:',best_score,best_y
# print max([math.fabs(sc - 10) for sc in pred_scores])
return best_y
def predict(self, words, i, prev_tag=None):
'''
Make prediction on list of words
Parameters
----------
words: list(str)
The words list
i: int
The position
prev_tag: str
Previous POS tag
Return
------
y: int
The predicted label's index
'''
X = self.extract_features(words, i, prev_tag, False)
y = self._predict(X)
return y
def greedy_search(words, classifier):
'''
Perform greedy search on the classifier.
Parameters
----------
words: list(str)
The word list
classifier: PerceptronClassifier
The classifier object.
'''
prev = '<s>'
ret = []
for i in range(len(words
|
Z = self._predict(X)
if Z != Y:
|
random_line_split
|
PerceptronClassifier.py
|
.max_iter = max_iter
self.learnRate = 1
if training_data is not None:
self.fit(training_data, devel_data)
def fit(self, training_data, devel_data=None):
'''
Estimate the parameters for perceptron model. For multi-class perceptron, parameters can be
treated as a T \times D matrix W, where T is the number of labels and D is the number of
features.
'''
# feature_alphabet is a mapping from feature string to it's dimension in the feature space,
# e.g. feature_alphabet['U1=I']=3, which means 'U1=I' is in the third column of W
#
# W = [[ . . 1 . . .],
# ...
# [ . . 1 . . .]]
# ^
# |
# 'U1=I'
self.feature_alphabet = {'None': 0}
self.label_alphabet = {}
# Extract features, build the feature_alphabet, label_alphabet and training instance pairs.
# Each instance consist a tuple (X, Y) where X is the mapped features (list(int)), and Y is
# the index of the corresponding label.
instances = []
for words, tags in training_data:
L = len(words)
prev = '<s>'
for i in range(L):
# Your code here, extract features and give it into X, convert POStag to index and
# give it to Y
X = self.extract_features(words, i, prev)
Y = len(self.label_alphabet) if tags[i] not in self.label_alphabet.keys() else self.label_alphabet[tags[i]]
instances.append((X, Y))
if tags[i] not in self.label_alphabet.keys():
self.label_alphabet[tags[i]] = len(self.label_alphabet)
prev = tags[i]
# Build a mapping from index to label string to recover POStags.
self.labels = [-1 for k in self.label_alphabet]
for k in self.label_alphabet:
self.labels[self.label_alphabet[k]] = k
self.D, self.T = len(self.feature_alphabet), len(self.label_alphabet)
print('number of features : %d' % self.D)
print('number of labels: %d' % self.T)
# Allocate the weight matrix W
self.W = [[0 for j in range(self.D)] for i in range(self.T)]
self.best_W = copy(self.W)
best_acc = 0
for it in range(self.max_iter):
# The training part,
n_er
|
# print '调整后:',self._predict(X),'正确:',Y,'Y的分数',self._score(X,Y)
print('training error %d' % n_errors)
if devel_data is not None:
# Test accuracy on the development set if provided.
n_corr, n_total = 0, 0
for words, tags in devel_data:
prev = '<s>'
for i in range(len(words)):
Z = self.predict(words, i, prev)
Y = self.label_alphabet[tags[i]]
if Z == Y:
n_corr += 1
n_total += 1
prev = self.labels[Z]
print('accuracy: %f' % (float(n_corr) / n_total))
# print 'W0',self.W[10][:100]
if best_acc < float(n_corr) / n_total:
# If this round is better than before, save it.
best_acc = float(n_corr) / n_total
self.best_W = copy(self.W)
if self.best_W is None:
self.best_W = copy(self.W)
def extract_features(self, words, i, prev_tag=None, add=True):
'''
Extract features from words and prev POS tag, if `add` is True, also insert the feature
string to the feature_alphabet.
Parameters
----------
words: list(str)
The words list
i: int
The position
prev_tag: str
Previous POS tag
add: bool
If true, insert the feature to feature_alphabet.
Return
------
mapped_features: list(int)
The list of hashed features.
'''
L = len(words)
context = ['<s>' if i - 2 < 0 else words[i - 2],
'<s>' if i - 1 < 0 else words[i - 1],
words[i],
'<e>' if i + 1 >= L else words[i + 1],
'<e>' if i + 2 >= L else words[i + 1]]
raw_features = ['U1=%s' % context[0],
'U2=%s' % context[1],
'U3=%s' % context[2],
'U4=%s' % context[3],
'U5=%s' % context[4],
'U1,2=%s/%s' % (context[0], context[1]),
'U2,3=%s/%s' % (context[1], context[2]), # Your code here, extract the bigram raw feature,
'U3,4=%s/%s' % (context[2], context[3]), # Your code here, extract the bigram raw feature,
'U4,5=%s/%s' % (context[3], context[4]), # Your code here, extract the bigram raw feature,
]
if prev_tag is not None:
raw_features.append('B=%s' % prev_tag)
mapped_features = []
for f in raw_features:
if add and (f not in self.feature_alphabet):
# Your code here, insert the feature string to the feature_alphabet.
index = len(self.feature_alphabet)
self.feature_alphabet[f] = index
# Your code here, map the string feature to index.
# for item in self.feature_alphabet.values():
# mapped_features[self.feature_alphabet[item]] = 1
if f in self.feature_alphabet:
mapped_features.append(self.feature_alphabet[f])
return mapped_features
def _score(self, features, t):
'''
Calcuate score from the given features and label t
Parameters
----------
features: list(int)
The hashed features
t: int
The index of label
Return
------
s: int
The score
'''
# Your code here, compute the score.
s=0.0
for x in features:
s += self.W[t][x]
return s
def _predict(self, features):
'''
Calcuate score from the given features and label t
Parameters
----------
features: list(int)
The hashed features
t: int
The index of label
Return
------
best_y: int
The highest scored label's index
'''
pred_scores = [self._score(features, y) for y in range(self.T)]
best_score, best_y = -1e5, -1
# Your code here, find the highest scored class from pred_scores
# best_score = pred_scores[0]
# best_y = 0
for index,value in enumerate(pred_scores):
if value > best_score:
best_score = value
best_y = index
# print 'best:',best_score,best_y
# print max([math.fabs(sc - 10) for sc in pred_scores])
return best_y
def predict(self, words, i, prev_tag=None):
'''
Make prediction on list of words
Parameters
----------
words: list(str)
The words list
i: int
The position
prev_tag: str
Previous POS tag
Return
------
y: int
The predicted label's index
'''
X = self.extract_features(words, i, prev_tag, False)
y = self._predict(X)
return y
def greedy_search(words, classifier):
'''
Perform greedy search on the classifier.
Parameters
----------
words: list(str)
The word list
classifier: PerceptronClassifier
The classifier object.
'''
prev = '<s>'
ret = []
for i in range(len
|
rors = 0
print('training iteration #%d' % it)
for X, Y in instances:
# Your code here, ake a prediction and give it to Z
Z = self._predict(X)
if Z != Y:
# print '初始预测:', Z, self._score(X, Z), 'Y的分数', self._score(X, Y)
# print self.W[Y]
tmp = self._score(X,Y)
# Your code here. If the predict is incorrect, perform the perceptron update
n_errors += 1
for x in X:
self.W[Y][x] =self.W[Y][x] + 1*self.learnRate
# The perceptron update part.
for i in range(self.T):
if self._score(X, i) >= tmp and i!=Y:
for x in X:
self.W[i][x] = self.W[i][x] - 1 * self.learnRate
|
conditional_block
|
PerceptronClassifier.py
|
.max_iter = max_iter
self.learnRate = 1
if training_data is not None:
self.fit(training_data, devel_data)
def fit(self, training_data, devel_data=None):
'''
Estimate the parameters for perceptron model. For multi-class perceptron, parameters can be
treated as a T \times D matrix W, where T is the number of labels and D is the number of
features.
'''
# feature_alphabet is a mapping from feature string to it's dimension in the feature space,
# e.g. feature_alphabet['U1=I']=3, which means 'U1=I' is in the third column of W
#
# W = [[ . . 1 . . .],
# ...
# [ . . 1 . . .]]
# ^
# |
# 'U1=I'
self.feature_alphabet = {'None': 0}
self.label_alphabet = {}
# Extract features, build the feature_alphabet, label_alphabet and training instance pairs.
# Each instance consist a tuple (X, Y) where X is the mapped features (list(int)), and Y is
# the index of the corresponding label.
instances = []
for words, tags in training_data:
L = len(words)
prev = '<s>'
for i in range(L):
# Your code here, extract features and give it into X, convert POStag to index and
# give it to Y
X = self.extract_features(words, i, prev)
Y = len(self.label_alphabet) if tags[i] not in self.label_alphabet.keys() else self.label_alphabet[tags[i]]
instances.append((X, Y))
if tags[i] not in self.label_alphabet.keys():
self.label_alphabet[tags[i]] = len(self.label_alphabet)
prev = tags[i]
# Build a mapping from index to label string to recover POStags.
self.labels = [-1 for k in self.label_alphabet]
for k in self.label_alphabet:
self.labels[self.label_alphabet[k]] = k
self.D, self.T = len(self.feature_alphabet), len(self.label_alphabet)
print('number of features : %d' % self.D)
print('number of labels: %d' % self.T)
# Allocate the weight matrix W
self.W = [[0 for j in range(self.D)] for i in range(self.T)]
self.best_W = copy(self.W)
best_acc = 0
for it in range(self.max_iter):
# The training part,
n_errors = 0
print('training iteration #%d' % it)
for X, Y in instances:
# Your code here, ake a prediction and give it to Z
Z = self._predict(X)
if Z != Y:
# print '初始预测:', Z, self._score(X, Z), 'Y的分数', self._score(X, Y)
# print self.W[Y]
tmp = self._score(X,Y)
# Your code here. If the predict is incorrect, perform the perceptron update
n_errors += 1
for x in X:
self.W[Y][x] =self.W[Y][x] + 1*self.learnRate
# The perceptron update part.
for i in range(self.T):
if self._score(X, i) >= tmp and i!=Y:
for x in X:
self.W[i][x] = self.W[i][x] - 1 * self.learnRate
# print '调整后:',self._predict(X),'正确:',Y,'Y的分数',self._score(X,Y)
print('training error %d' % n_errors)
if devel_data is not None:
# Test accuracy on the development set if provided.
n_corr, n_total = 0, 0
for words, tags in devel_data:
prev = '<s>'
for i in range(len(words)):
Z = self.predict(words, i, prev)
Y = self.label_alphabet[tags[i]]
if Z == Y:
n_corr += 1
n_total += 1
prev = self.labels[Z]
print('accuracy: %f' % (float(n_corr) / n_total))
# print 'W0',self.W[10][:100]
if best_acc < float(n_corr) / n_total:
# If this round is better than before, save it.
best_acc = float(n_corr) / n_total
self.best_W = copy(self.W)
if self.best_W is None:
self.best_W = copy(self.W)
def extract_features(self, words, i, prev_ta
|
):
'''
Extract features from words and prev POS tag, if `add` is True, also insert the feature
string to the feature_alphabet.
Parameters
----------
words: list(str)
The words list
i: int
The position
prev_tag: str
Previous POS tag
add: bool
If true, insert the feature to feature_alphabet.
Return
------
mapped_features: list(int)
The list of hashed features.
'''
L = len(words)
context = ['<s>' if i - 2 < 0 else words[i - 2],
'<s>' if i - 1 < 0 else words[i - 1],
words[i],
'<e>' if i + 1 >= L else words[i + 1],
'<e>' if i + 2 >= L else words[i + 1]]
raw_features = ['U1=%s' % context[0],
'U2=%s' % context[1],
'U3=%s' % context[2],
'U4=%s' % context[3],
'U5=%s' % context[4],
'U1,2=%s/%s' % (context[0], context[1]),
'U2,3=%s/%s' % (context[1], context[2]), # Your code here, extract the bigram raw feature,
'U3,4=%s/%s' % (context[2], context[3]), # Your code here, extract the bigram raw feature,
'U4,5=%s/%s' % (context[3], context[4]), # Your code here, extract the bigram raw feature,
]
if prev_tag is not None:
raw_features.append('B=%s' % prev_tag)
mapped_features = []
for f in raw_features:
if add and (f not in self.feature_alphabet):
# Your code here, insert the feature string to the feature_alphabet.
index = len(self.feature_alphabet)
self.feature_alphabet[f] = index
# Your code here, map the string feature to index.
# for item in self.feature_alphabet.values():
# mapped_features[self.feature_alphabet[item]] = 1
if f in self.feature_alphabet:
mapped_features.append(self.feature_alphabet[f])
return mapped_features
def _score(self, features, t):
'''
Calcuate score from the given features and label t
Parameters
----------
features: list(int)
The hashed features
t: int
The index of label
Return
------
s: int
The score
'''
# Your code here, compute the score.
s=0.0
for x in features:
s += self.W[t][x]
return s
def _predict(self, features):
'''
Calcuate score from the given features and label t
Parameters
----------
features: list(int)
The hashed features
t: int
The index of label
Return
------
best_y: int
The highest scored label's index
'''
pred_scores = [self._score(features, y) for y in range(self.T)]
best_score, best_y = -1e5, -1
# Your code here, find the highest scored class from pred_scores
# best_score = pred_scores[0]
# best_y = 0
for index,value in enumerate(pred_scores):
if value > best_score:
best_score = value
best_y = index
# print 'best:',best_score,best_y
# print max([math.fabs(sc - 10) for sc in pred_scores])
return best_y
def predict(self, words, i, prev_tag=None):
'''
Make prediction on list of words
Parameters
----------
words: list(str)
The words list
i: int
The position
prev_tag: str
Previous POS tag
Return
------
y: int
The predicted label's index
'''
X = self.extract_features(words, i, prev_tag, False)
y = self._predict(X)
return y
def greedy_search(words, classifier):
'''
Perform greedy search on the classifier.
Parameters
----------
words: list(str)
The word list
classifier: PerceptronClassifier
The classifier object.
'''
prev = '<s>'
ret = []
for i in range(len
|
g=None, add=True
|
identifier_name
|
PerceptronClassifier.py
|
_iter = max_iter
self.learnRate = 1
if training_data is not None:
self.fit(training_data, devel_data)
def fit(self, training_data, devel_data=None):
'''
Estimate the parameters for perceptron model. For multi-class perceptron, parameters can be
treated as a T \times D matrix W, where T is the number of labels and D is the number of
features.
'''
# feature_alphabet is a mapping from feature string to it's dimension in the feature space,
# e.g. feature_alphabet['U1=I']=3, which means 'U1=I' is in the third column of W
#
# W = [[ . . 1 . . .],
# ...
# [ . . 1 . . .]]
# ^
# |
# 'U1=I'
self.feature_alphabet = {'None': 0}
self.label_alphabet = {}
# Extract features, build the feature_alphabet, label_alphabet and training instance pairs.
# Each instance consist a tuple (X, Y) where X is the mapped features (list(int)), and Y is
# the index of the corresponding label.
instances = []
for words, tags in training_data:
L = len(words)
prev = '<s>'
for i in range(L):
# Your code here, extract features and give it into X, convert POStag to index and
# give it to Y
X = self.extract_features(words, i, prev)
Y = len(self.label_alphabet) if tags[i] not in self.label_alphabet.keys() else self.label_alphabet[tags[i]]
instances.append((X, Y))
if tags[i] not in self.label_alphabet.keys():
self.label_alphabet[tags[i]] = len(self.label_alphabet)
prev = tags[i]
# Build a mapping from index to label string to recover POStags.
self.labels = [-1 for k in self.label_alphabet]
for k in self.label_alphabet:
self.labels[self.label_alphabet[k]] = k
self.D, self.T = len(self.feature_alphabet), len(self.label_alphabet)
print('number of features : %d' % self.D)
print('number of labels: %d' % self.T)
# Allocate the weight matrix W
self.W = [[0 for j in range(self.D)] for i in range(self.T)]
self.best_W = copy(self.W)
best_acc = 0
for it in range(self.max_iter):
# The training part,
n_errors = 0
print('training iteration #%d' % it)
for X, Y in instances:
# Your code here, ake a prediction and give it to Z
Z = self._predict(X)
if Z != Y:
# print '初始预测:', Z, self._score(X, Z), 'Y的分数', self._score(X, Y)
# print self.W[Y]
tmp = self._score(X,Y)
# Your code here. If the predict is incorrect, perform the perceptron update
n_errors += 1
for x in X:
self.W[Y][x] =self.W[Y][x] + 1*self.learnRate
# The perceptron update part.
for i in range(self.T):
if self._score(X, i) >= tmp and i!=Y:
for x in X:
self.W[i][x] = self.W[i][x] - 1 * self.learnRate
# print '调整后:',self._predict(X),'正确:',Y,'Y的分数',self._score(X,Y)
print('training error %d' % n_errors)
if devel_data is not None:
# Test accuracy on the development set if provided.
n_corr, n_total = 0, 0
for words, tags in devel_data:
prev = '<s>'
for i in range(len(words)):
Z = self.predict(words, i, prev)
Y = self.label_alphabet[tags[i]]
if Z == Y:
n_corr += 1
n_total += 1
prev = self.labels[Z]
print('accuracy: %f' % (float(n_corr) / n_total))
# print 'W0',self.W[10][:100]
if best_acc < float(n_corr) / n_total:
# If this round is better than before, save it.
best_acc = float(n_corr) / n_total
self.best_W = copy(self.W)
if self.best_W is None:
self.best_W = copy(self.W)
def extract_features(self, words, i, prev_tag=None, add=True):
'''
Extract features from words and prev POS tag, if `add` is True, also insert the feature
string to the feature_alphabet.
Parameters
----------
words: list(str)
The words list
i: int
The position
prev_tag: str
Previous POS tag
add: bool
If true, insert the feature to feature_alphabet.
Return
------
mapped_features: list(int)
The list of hashed features.
'''
L = len(words)
context = ['<s>' if i - 2 < 0 else words[i - 2],
'<s>' if i - 1 < 0 else words[i - 1],
words[i],
'<e>' if i + 1 >= L else words[i + 1],
'<e>' if i + 2 >= L else words[i + 1]]
raw_features = ['U1=%s' % context[0],
'U2=%s' % context[1],
'U3=%s' % context[2],
'U4=%s' % context[3],
'U5=%s' % context[4],
'U1,2=%s/%s' % (context[0], context[1]),
'U2,3=%s/%s' % (context[1], context[2]), # Your code here, extract the bigram raw feature,
'U3,4=%s/%s' % (context[2], context[3]), # Your code here, extract the bigram raw feature,
'U4,5=%s/%s' % (context[3], context[4]), # Your code here, extract the bigram raw feature,
]
if prev_tag is not None:
raw_features.append('B=%s' % prev_tag)
mapped_features = []
for f in raw_features:
if add and (f not in self.feature_alphabet):
# Your code here, insert the feature string to the feature_alphabet.
index = len(self.feature_alphabet)
self.feature_alphabet[f] = index
# Your code here, map the string feature to index.
# for item in self.feature_alphabet.values():
# mapped_features[self.feature_alphabet[item]] = 1
if f in self.feature_alphabet:
mapped_features.append(self.feature_alphabet[f])
return mapped_features
def _score(self, features, t):
'''
Calcuate score from the given features and label t
Parameters
----------
features: list(int)
The hashed features
t: int
The index of label
Return
------
s: int
The score
'''
# Your code here, compute the score.
s=0.0
for x in features:
s += self.W[t][x]
return s
def _predict(self, features):
'''
Calcuate score from the give
|
if value > best_score:
best_score = value
best_y = index
# print 'best:',best_score,best_y
# print max([math.fabs(sc - 10) for sc in pred_scores])
return best_y
def predict(self, words, i, prev_t
ag=None):
'''
Make prediction on list of words
Parameters
----------
words: list(str)
The words list
i: int
The position
prev_tag: str
Previous POS tag
Return
------
y: int
The predicted label's index
'''
X = self.extract_features(words, i, prev_tag, False)
y = self._predict(X)
return y
def greedy_search(words, classifier):
'''
Perform greedy search on the classifier.
Parameters
----------
words: list(str)
The word list
classifier: PerceptronClassifier
The classifier object.
'''
prev = '<s>'
ret = []
for i in range
|
n features and label t
Parameters
----------
features: list(int)
The hashed features
t: int
The index of label
Return
------
best_y: int
The highest scored label's index
'''
pred_scores = [self._score(features, y) for y in range(self.T)]
best_score, best_y = -1e5, -1
# Your code here, find the highest scored class from pred_scores
# best_score = pred_scores[0]
# best_y = 0
for index,value in enumerate(pred_scores):
|
identifier_body
|
contSpec.py
|
GexpFile'])
if par['verbose']:
print('(*) Initial Set up...', end="")
# Set up some internal variables
n = len(t)
ns = par['ns'] # discretization of 'tau'
tmin = t[0];
tmax = t[n-1];
# determine frequency window
if par['FreqEnd'] == 1:
smin = np.exp(-np.pi/2) * tmin; smax = np.exp(np.pi/2) * tmax
elif par['FreqEnd'] == 2:
smin = tmin; smax = tmax
elif par['FreqEnd'] == 3:
smin = np.exp(+np.pi/2) * tmin; smax = np.exp(-np.pi/2) * tmax
hs = (smax/smin)**(1./(ns-1))
s = smin * hs**np.arange(ns)
kernMat = getKernMat(s, t)
tic = time.time()
# get an initial guess for Hgs, G0
if par['plateau']:
Hgs, G0 = InitializeH(Gexp, wexp, s, kernMat, np.min(Gexp))
else:
Hgs = InitializeH(Gexp, wexp, s, kernMat)
if par['verbose']:
te = time.time() - tic
print('\t({0:.1f} seconds)\n(*) Building the L-curve ...'.format(te), end="")
tic = time.time()
# Find Optimum Lambda with 'lcurve'
if par['lamC'] == 0:
if par['plateau']:
lamC, lam, rho, eta, logP, Hlam = lcurve(Gexp, wexp, Hgs, kernMat, par, G0)
else:
lamC, lam, rho, eta, logP, Hlam = lcurve(Gexp, wexp, Hgs, kernMat, par)
else:
lamC = par['lamC']
if par['verbose']:
te = time.time() - tic
print('({1:.1f} seconds)\n(*) Extracting CRS, ...\n\t... lamC = {0:0.3e}; '.
format(lamC, te), end="")
tic = time.time()
# Get the best spectrum
if par['plateau']:
H, G0 = getH(lamC, Gexp, wexp, Hgs, kernMat, G0);
print('G0 = {0:0.3e} ...'.format(G0), end="")
else:
H = getH(lamC, Gexp, wexp, Hgs, kernMat);
#----------------------
# Print some datafiles
#----------------------
if par['verbose']:
te = time.time() - tic
print('done ({0:.1f} seconds)\n(*) Writing and Printing, ...'.format(te), end="")
# Save inferred G(t)
if par['plateau']:
K = kernel_prestore(H, kernMat, G0);
np.savetxt('output/H.dat', np.c_[s, H], fmt='%e', header='G0 = {0:0.3e}'.format(G0))
else:
K = kernel_prestore(H, kernMat);
np.savetxt('output/H.dat', np.c_[s, H], fmt='%e')
np.savetxt('output/Gfit.dat', np.c_[t, K], fmt='%e')
# print Hlam, rho-eta, and logP if lcurve has been visited
if par['lamC'] == 0:
if os.path.exists("output/Hlam.dat"):
os.remove("output/Hlam.dat")
fHlam = open('output/Hlam.dat','ab')
for i, lamb in enumerate(lam):
np.savetxt(fHlam, Hlam[:,i])
fHlam.close()
# print logP
np.savetxt('output/logPlam.dat', np.c_[lam, logP])
# print rho-eta
np.savetxt('output/rho-eta.dat', np.c_[lam, rho, eta], fmt='%e')
#------------
# Graphing
#------------
if par['plotting']:
# plot spectrum "H.pdf" with errorbars
plt.clf()
plt.semilogx(s,H,'o-')
plt.xlabel(r'$s$')
plt.ylabel(r'$H(s)$')
# error bounds are only available if lcurve has been implemented
if par['lamC'] == 0:
plam = np.exp(logP); plam = plam/np.sum(plam)
Hm = np.zeros(len(s))
Hm2 = np.zeros(len(s))
cnt = 0
for i in range(len(lam)):
#~ Hm += plam[i]*Hlam[:,i]
#~ Hm2 += plam[i]*Hlam[:,i]**2
# count all spectra within a threshold
if plam[i] > 0.1:
Hm += Hlam[:,i]
Hm2 += Hlam[:,i]**2
cnt += 1
Hm = Hm/cnt
dH = np.sqrt(Hm2/cnt - Hm**2)
plt.semilogx(s,Hm+2.5*dH, c='gray', alpha=0.5)
plt.semilogx(s,Hm-2.5*dH, c='gray', alpha=0.5)
plt.tight_layout()
plt.savefig('output/H.pdf')
#
# plot comparison with input spectrum
#
plt.clf()
if par['plateau']:
K = kernel_prestore(H, kernMat, G0);
else:
K = kernel_prestore(H, kernMat);
plt.loglog(t, Gexp,'o',t, K, 'k-')
plt.xlabel(r'$t$')
plt.ylabel(r'$G(t)$')
plt.tight_layout()
plt.savefig('output/Gfit.pdf')
#
# if lam not explicitly specified then print rho-eta.pdf
#
try:
lam
except NameError:
print("lamC prespecified, so not printing rho-eta.pdf/dat")
else:
plt.clf()
plt.scatter(rho, eta, marker='x')
plt.plot(rho, eta)
rhost = np.exp(np.interp(np.log(lamC), np.log(lam), np.log(rho)))
etast = np.exp(np.interp(np.log(lamC), np.log(lam), np.log(eta)))
plt.plot(rhost, etast, 'o', color='k')
plt.xscale('log')
plt.yscale('log')
#~ print(rhost, etast)
plt.xlabel(r'$\rho$')
plt.ylabel(r'$\eta$')
plt.tight_layout()
plt.savefig('output/rho-eta.pdf')
if par['verbose']:
print('done\n(*) End\n')
return H, lamC
def guiFurnishGlobals(par):
"""Furnish Globals to accelerate interactive plot in jupyter notebooks"""
# plot settings
from matplotlib import rcParams
rcParams['axes.labelsize'] = 14
rcParams['xtick.labelsize'] = 12
rcParams['ytick.labelsize'] = 12
rcParams['legend.fontsize'] = 12
rcParams['lines.linewidth'] = 2
# experimental data
t, Gexp, wG = GetExpData(par['GexpFile'])
n = len(t)
ns = par['ns'] # discretization of 'tau'
tmin = t[0];
tmax = t[n-1];
# determine frequency window
if par['FreqEnd'] == 1:
smin = np.exp(-np.pi/2) * tmin; smax = np.exp(np.pi/2) * tmax
elif par['FreqEnd'] == 2:
smin = tmin; smax = tmax
elif par['FreqEnd'] == 3:
smin = np.exp(+np.pi/2) * tmin; smax = np.exp(-np.pi/2) * tmax
hs = (smax/smin)**(1./(ns-1))
s = smin * hs**np.arange(ns)
kernMat = getKernMat(s,t)
# toggle flags to prevent printing
par['verbose'] = False
par['plotting'] = False
# load lamda, rho, eta
lam, rho, eta = np.loadtxt('output/rho-eta.dat', unpack=True)
plt.clf()
return s, t, kernMat, Gexp, par, lam, rho, eta
#
# Main Driver: This part is not run when contSpec.py is imported as a module
# For example as part of GUI
#
if __name__ == '__main__':
#
|
# Read input parameters from file "inp.dat"
|
random_line_split
|
|
contSpec.py
|
len(Gexp)
ns = len(H)
nl = ns - 2
logPmax = -np.inf # so nothing surprises me!
Hlambda = np.zeros((ns, npoints))
# Error Analysis: Furnish A_matrix
Amat = getAmatrix(len(H))
_, LogDetN = np.linalg.slogdet(Amat)
#
# This is the costliest step
#
for i in reversed(range(len(lam))):
lamb = lam[i]
if par['plateau']:
H, G0 = getH(lamb, Gexp, wexp, H, kernMat, G0)
# rho[i] = np.linalg.norm((1. - kernel_prestore(H, kernMat, G0)/Gexp))
rho[i] = np.linalg.norm(wexp*(1. - kernel_prestore(H, kernMat, G0)/Gexp))
Bmat = getBmatrix(H, kernMat, Gexp, wexp, G0)
else:
H = getH(lamb, Gexp, wexp, H, kernMat)
# rho[i] = np.linalg.norm((1. - kernel_prestore(H,kernMat)/Gexp))
rho[i] = np.linalg.norm(wexp*(1. - kernel_prestore(H, kernMat)/Gexp))
Bmat = getBmatrix(H, kernMat, Gexp, wexp)
eta[i] = np.linalg.norm(np.diff(H, n=2))
Hlambda[:,i] = H
_, LogDetC = np.linalg.slogdet(lamb*Amat + Bmat)
V = rho[i]**2 + lamb * eta[i]**2
# this assumes a prior exp(-lam)
logP[i] = -V + 0.5 * (LogDetN + ns*np.log(lamb) - LogDetC) - lamb
if(logP[i] > logPmax):
logPmax = logP[i]
elif(logP[i] < logPmax - 18):
break
# truncate all to significant lambda
lam = lam[i:]
logP = logP[i:]
eta = eta[i:]
rho = rho[i:]
logP = logP - max(logP)
Hlambda = Hlambda[:,i:]
#
# currently using both schemes to get optimal lamC
# new lamM works better with actual experimental data
#
# lamC = oldLamC(par, lam, rho, eta)
plam = np.exp(logP); plam = plam/np.sum(plam)
lamM = np.exp(np.sum(plam*np.log(lam)))
#
# Dialling in the Smoothness Factor
#
if par['SmFacLam'] > 0:
lamM = np.exp(np.log(lamM) + par['SmFacLam']*(max(np.log(lam)) - np.log(lamM)));
elif par['SmFacLam'] < 0:
lamM = np.exp(np.log(lamM) + par['SmFacLam']*(np.log(lamM) - min(np.log(lam))));
#
# printing this here for now because storing lamC for sometime only
#
if par['plotting']:
plt.clf()
# plt.axvline(x=lamC, c='k', label=r'$\lambda_c$')
plt.axvline(x=lamM, c='gray', label=r'$\lambda_m$')
plt.ylim(-20,1)
plt.plot(lam, logP, 'o-')
plt.xscale('log')
plt.xlabel(r'$\lambda$')
plt.ylabel(r'$\log\,p(\lambda)$')
plt.legend(loc='upper left')
plt.tight_layout()
plt.savefig('output/logP.pdf')
return lamM, lam, rho, eta, logP, Hlambda
def getH(lam, Gexp, wexp, H, kernMat, *argv):
"""Purpose: Given a lambda, this function finds the H_lambda(s) that minimizes V(lambda)
V(lambda) := ||(Gexp - kernel(H)) * (wexp/Gexp)||^2 + lambda * ||L H||^2
Input : lambda = regularization parameter,
Gexp = experimental data,
wexp = weighting factors,
H = guessed H,
kernMat = matrix for faster kernel evaluation
G0 = optional
Output : H_lam, [G0]
Default uses Trust-Region Method with Jacobian supplied by jacobianLM
"""
# send Hplus = [H, G0], on return unpack H and G0
if len(argv) > 0:
Hplus= np.append(H, argv[0])
res_lsq = least_squares(residualLM, Hplus, jac=jacobianLM, args=(lam, Gexp, wexp, kernMat))
return res_lsq.x[:-1], res_lsq.x[-1]
# send normal H, and collect optimized H back
else:
res_lsq = least_squares(residualLM, H, jac=jacobianLM, args=(lam, Gexp, wexp, kernMat))
return res_lsq.x
def residualLM(H, lam, Gexp, wexp, kernMat):
"""
%
% HELPER FUNCTION: Gets Residuals r
Input : H = guessed H,
lambda = regularization parameter ,
Gexp = experimental data,
wexp = weighting factors,
kernMat = matrix for faster kernel evaluation
G0 = plateau
Output : a set of n+nl residuals,
the first n correspond to the kernel
the last nl correspond to the smoothness criterion
%"""
n = kernMat.shape[0];
ns = kernMat.shape[1];
nl = ns - 2;
r = np.zeros(n + nl);
# if plateau then unfurl G0
if len(H) > ns:
G0 = H[-1]
H = H[:-1]
# r[0:n] = (1. - kernel_prestore(H, kernMat, G0)/Gexp) # the Gt and
r[0:n] = wexp * (1. - kernel_prestore(H, kernMat, G0)/Gexp) # the Gt and
else:
# r[0:n] = (1. - kernel_prestore(H, kernMat)/Gexp)
r[0:n] = wexp * (1. - kernel_prestore(H, kernMat)/Gexp)
# the curvature constraint is not affected by G0
r[n:n+nl] = np.sqrt(lam) * np.diff(H, n=2) # second derivative
return r
def jacobianLM(H, lam, Gexp, wexp, kernMat):
"""
HELPER FUNCTION for optimization: Get Jacobian J
returns a (n+nl * ns) matrix Jr; (ns + 1) if G0 is also supplied.
Jr_(i, j) = dr_i/dH_j
It uses kernelD, which approximates dK_i/dH_j, where K is the kernel
"""
n = kernMat.shape[0];
ns = kernMat.shape[1];
nl = ns - 2;
# L is a ns*ns tridiagonal matrix with 1 -2 and 1 on its diagonal;
L = np.diag(np.ones(ns-1), 1) + np.diag(np.ones(ns-1),-1) + np.diag(-2. * np.ones(ns))
L = L[1:nl+1,:]
# Furnish the Jacobian Jr (n+ns)*ns matrix
# Kmatrix = np.dot((1./Gexp).reshape(n,1), np.ones((1,ns)));
Kmatrix = np.dot((wexp/Gexp).reshape(n,1), np.ones((1,ns)));
if len(H) > ns:
G0 = H[-1]
H = H[:-1]
Jr = np.zeros((n + nl, ns+1))
Jr[0:n, 0:ns] = -kernelD(H, kernMat) * Kmatrix;
# Jr[0:n, ns] = -1./Gexp # column for dr_i/dG0
Jr[0:n, ns] = -wexp/Gexp # column for dr_i/dG0
Jr[n:n+nl,0:ns] = np.sqrt(lam) * L;
Jr[n:n+nl, ns] = np.zeros(nl) # column for dr_i/dG0 = 0
else:
Jr = np.zeros((n + nl, ns))
Jr[0:n, 0:ns] = -kernelD(H, kernMat) * Kmatrix;
Jr[n:n+nl,0:ns] = np.sqrt(lam) * L;
return Jr
def
|
kernelD
|
identifier_name
|
|
contSpec.py
|
this here for now because storing lamC for sometime only
#
if par['plotting']:
plt.clf()
# plt.axvline(x=lamC, c='k', label=r'$\lambda_c$')
plt.axvline(x=lamM, c='gray', label=r'$\lambda_m$')
plt.ylim(-20,1)
plt.plot(lam, logP, 'o-')
plt.xscale('log')
plt.xlabel(r'$\lambda$')
plt.ylabel(r'$\log\,p(\lambda)$')
plt.legend(loc='upper left')
plt.tight_layout()
plt.savefig('output/logP.pdf')
return lamM, lam, rho, eta, logP, Hlambda
def getH(lam, Gexp, wexp, H, kernMat, *argv):
"""Purpose: Given a lambda, this function finds the H_lambda(s) that minimizes V(lambda)
V(lambda) := ||(Gexp - kernel(H)) * (wexp/Gexp)||^2 + lambda * ||L H||^2
Input : lambda = regularization parameter,
Gexp = experimental data,
wexp = weighting factors,
H = guessed H,
kernMat = matrix for faster kernel evaluation
G0 = optional
Output : H_lam, [G0]
Default uses Trust-Region Method with Jacobian supplied by jacobianLM
"""
# send Hplus = [H, G0], on return unpack H and G0
if len(argv) > 0:
Hplus= np.append(H, argv[0])
res_lsq = least_squares(residualLM, Hplus, jac=jacobianLM, args=(lam, Gexp, wexp, kernMat))
return res_lsq.x[:-1], res_lsq.x[-1]
# send normal H, and collect optimized H back
else:
res_lsq = least_squares(residualLM, H, jac=jacobianLM, args=(lam, Gexp, wexp, kernMat))
return res_lsq.x
def residualLM(H, lam, Gexp, wexp, kernMat):
"""
%
% HELPER FUNCTION: Gets Residuals r
Input : H = guessed H,
lambda = regularization parameter ,
Gexp = experimental data,
wexp = weighting factors,
kernMat = matrix for faster kernel evaluation
G0 = plateau
Output : a set of n+nl residuals,
the first n correspond to the kernel
the last nl correspond to the smoothness criterion
%"""
n = kernMat.shape[0];
ns = kernMat.shape[1];
nl = ns - 2;
r = np.zeros(n + nl);
# if plateau then unfurl G0
if len(H) > ns:
G0 = H[-1]
H = H[:-1]
# r[0:n] = (1. - kernel_prestore(H, kernMat, G0)/Gexp) # the Gt and
r[0:n] = wexp * (1. - kernel_prestore(H, kernMat, G0)/Gexp) # the Gt and
else:
# r[0:n] = (1. - kernel_prestore(H, kernMat)/Gexp)
r[0:n] = wexp * (1. - kernel_prestore(H, kernMat)/Gexp)
# the curvature constraint is not affected by G0
r[n:n+nl] = np.sqrt(lam) * np.diff(H, n=2) # second derivative
return r
def jacobianLM(H, lam, Gexp, wexp, kernMat):
"""
HELPER FUNCTION for optimization: Get Jacobian J
returns a (n+nl * ns) matrix Jr; (ns + 1) if G0 is also supplied.
Jr_(i, j) = dr_i/dH_j
It uses kernelD, which approximates dK_i/dH_j, where K is the kernel
"""
n = kernMat.shape[0];
ns = kernMat.shape[1];
nl = ns - 2;
# L is a ns*ns tridiagonal matrix with 1 -2 and 1 on its diagonal;
L = np.diag(np.ones(ns-1), 1) + np.diag(np.ones(ns-1),-1) + np.diag(-2. * np.ones(ns))
L = L[1:nl+1,:]
# Furnish the Jacobian Jr (n+ns)*ns matrix
# Kmatrix = np.dot((1./Gexp).reshape(n,1), np.ones((1,ns)));
Kmatrix = np.dot((wexp/Gexp).reshape(n,1), np.ones((1,ns)));
if len(H) > ns:
G0 = H[-1]
H = H[:-1]
Jr = np.zeros((n + nl, ns+1))
Jr[0:n, 0:ns] = -kernelD(H, kernMat) * Kmatrix;
# Jr[0:n, ns] = -1./Gexp # column for dr_i/dG0
Jr[0:n, ns] = -wexp/Gexp # column for dr_i/dG0
Jr[n:n+nl,0:ns] = np.sqrt(lam) * L;
Jr[n:n+nl, ns] = np.zeros(nl) # column for dr_i/dG0 = 0
else:
Jr = np.zeros((n + nl, ns))
Jr[0:n, 0:ns] = -kernelD(H, kernMat) * Kmatrix;
Jr[n:n+nl,0:ns] = np.sqrt(lam) * L;
return Jr
def kernelD(H, kernMat):
"""
Function: kernelD(input)
outputs the (n*ns) dimensional matrix DK(H)(t)
It approximates dK_i/dH_j = K * e(H_j):
Input: H = substituted CRS,
kernMat = matrix for faster kernel evaluation
Output: DK = Jacobian of H
"""
n = kernMat.shape[0];
ns = kernMat.shape[1];
# A n*ns matrix with all the rows = H'
Hsuper = np.dot(np.ones((n,1)), np.exp(H).reshape(1, ns))
DK = kernMat * Hsuper
return DK
def getContSpec(par):
"""
This is the main driver routine for computing the continuous spectrum
(*) input : "par" dictionary from "inp.dat" which specifies GexpFile (often 'Gt.dat')
(*) return : H and lambdaC; the latter can be used to microscpecify lambdaC as desired
without having to do the entire lcurve calculation again
"""
# read input
if par['verbose']:
print('\n(*) Start\n(*) Loading Data File: {}...'.format(par['GexpFile']))
# t, Gexp = GetExpData(par['GexpFile'])
t, Gexp, wexp = GetExpData(par['GexpFile'])
if par['verbose']:
print('(*) Initial Set up...', end="")
# Set up some internal variables
n = len(t)
ns = par['ns'] # discretization of 'tau'
tmin = t[0];
tmax = t[n-1];
# determine frequency window
if par['FreqEnd'] == 1:
smin = np.exp(-np.pi/2) * tmin; smax = np.exp(np.pi/2) * tmax
elif par['FreqEnd'] == 2:
smin = tmin; smax = tmax
elif par['FreqEnd'] == 3:
smin = np.exp(+np.pi/2) * tmin; smax = np.exp(-np.pi/2) * tmax
hs = (smax/smin)**(1./(ns-1))
s = smin * hs**np.arange(ns)
kernMat = getKernMat(s, t)
tic = time.time()
# get an initial guess for Hgs, G0
if par['plateau']:
Hgs, G0 = InitializeH(Gexp, wexp, s, kernMat, np.min(Gexp))
else:
Hgs = InitializeH(Gexp, wexp, s, kernMat)
if par['verbose']:
te = time.time() - tic
print('\t({0:.1f} seconds)\n(*) Building the L-curve ...'.format(te), end="")
tic = time.time()
# Find Optimum Lambda with 'lcurve'
if par['lamC'] == 0:
if par['plateau']:
|
lamC, lam, rho, eta, logP, Hlam = lcurve(Gexp, wexp, Hgs, kernMat, par, G0)
|
conditional_block
|
|
contSpec.py
|
0]
npoints = int(par['lamDensity'] * (np.log10(par['lam_max']) - np.log10(par['lam_min'])))
hlam = (par['lam_max']/par['lam_min'])**(1./(npoints-1.))
lam = par['lam_min'] * hlam**np.arange(npoints)
eta = np.zeros(npoints)
rho = np.zeros(npoints)
logP = np.zeros(npoints)
H = Hgs.copy()
n = len(Gexp)
ns = len(H)
nl = ns - 2
logPmax = -np.inf # so nothing surprises me!
Hlambda = np.zeros((ns, npoints))
# Error Analysis: Furnish A_matrix
Amat = getAmatrix(len(H))
_, LogDetN = np.linalg.slogdet(Amat)
#
# This is the costliest step
#
for i in reversed(range(len(lam))):
lamb = lam[i]
if par['plateau']:
H, G0 = getH(lamb, Gexp, wexp, H, kernMat, G0)
# rho[i] = np.linalg.norm((1. - kernel_prestore(H, kernMat, G0)/Gexp))
rho[i] = np.linalg.norm(wexp*(1. - kernel_prestore(H, kernMat, G0)/Gexp))
Bmat = getBmatrix(H, kernMat, Gexp, wexp, G0)
else:
H = getH(lamb, Gexp, wexp, H, kernMat)
# rho[i] = np.linalg.norm((1. - kernel_prestore(H,kernMat)/Gexp))
rho[i] = np.linalg.norm(wexp*(1. - kernel_prestore(H, kernMat)/Gexp))
Bmat = getBmatrix(H, kernMat, Gexp, wexp)
eta[i] = np.linalg.norm(np.diff(H, n=2))
Hlambda[:,i] = H
_, LogDetC = np.linalg.slogdet(lamb*Amat + Bmat)
V = rho[i]**2 + lamb * eta[i]**2
# this assumes a prior exp(-lam)
logP[i] = -V + 0.5 * (LogDetN + ns*np.log(lamb) - LogDetC) - lamb
if(logP[i] > logPmax):
logPmax = logP[i]
elif(logP[i] < logPmax - 18):
break
# truncate all to significant lambda
lam = lam[i:]
logP = logP[i:]
eta = eta[i:]
rho = rho[i:]
logP = logP - max(logP)
Hlambda = Hlambda[:,i:]
#
# currently using both schemes to get optimal lamC
# new lamM works better with actual experimental data
#
# lamC = oldLamC(par, lam, rho, eta)
plam = np.exp(logP); plam = plam/np.sum(plam)
lamM = np.exp(np.sum(plam*np.log(lam)))
#
# Dialling in the Smoothness Factor
#
if par['SmFacLam'] > 0:
lamM = np.exp(np.log(lamM) + par['SmFacLam']*(max(np.log(lam)) - np.log(lamM)));
elif par['SmFacLam'] < 0:
lamM = np.exp(np.log(lamM) + par['SmFacLam']*(np.log(lamM) - min(np.log(lam))));
#
# printing this here for now because storing lamC for sometime only
#
if par['plotting']:
plt.clf()
# plt.axvline(x=lamC, c='k', label=r'$\lambda_c$')
plt.axvline(x=lamM, c='gray', label=r'$\lambda_m$')
plt.ylim(-20,1)
plt.plot(lam, logP, 'o-')
plt.xscale('log')
plt.xlabel(r'$\lambda$')
plt.ylabel(r'$\log\,p(\lambda)$')
plt.legend(loc='upper left')
plt.tight_layout()
plt.savefig('output/logP.pdf')
return lamM, lam, rho, eta, logP, Hlambda
def getH(lam, Gexp, wexp, H, kernMat, *argv):
"""Purpose: Given a lambda, this function finds the H_lambda(s) that minimizes V(lambda)
V(lambda) := ||(Gexp - kernel(H)) * (wexp/Gexp)||^2 + lambda * ||L H||^2
Input : lambda = regularization parameter,
Gexp = experimental data,
wexp = weighting factors,
H = guessed H,
kernMat = matrix for faster kernel evaluation
G0 = optional
Output : H_lam, [G0]
Default uses Trust-Region Method with Jacobian supplied by jacobianLM
"""
# send Hplus = [H, G0], on return unpack H and G0
if len(argv) > 0:
Hplus= np.append(H, argv[0])
res_lsq = least_squares(residualLM, Hplus, jac=jacobianLM, args=(lam, Gexp, wexp, kernMat))
return res_lsq.x[:-1], res_lsq.x[-1]
# send normal H, and collect optimized H back
else:
res_lsq = least_squares(residualLM, H, jac=jacobianLM, args=(lam, Gexp, wexp, kernMat))
return res_lsq.x
def residualLM(H, lam, Gexp, wexp, kernMat):
"""
%
% HELPER FUNCTION: Gets Residuals r
Input : H = guessed H,
lambda = regularization parameter ,
Gexp = experimental data,
wexp = weighting factors,
kernMat = matrix for faster kernel evaluation
G0 = plateau
Output : a set of n+nl residuals,
the first n correspond to the kernel
the last nl correspond to the smoothness criterion
%"""
n = kernMat.shape[0];
ns = kernMat.shape[1];
nl = ns - 2;
r = np.zeros(n + nl);
# if plateau then unfurl G0
if len(H) > ns:
G0 = H[-1]
H = H[:-1]
# r[0:n] = (1. - kernel_prestore(H, kernMat, G0)/Gexp) # the Gt and
r[0:n] = wexp * (1. - kernel_prestore(H, kernMat, G0)/Gexp) # the Gt and
else:
# r[0:n] = (1. - kernel_prestore(H, kernMat)/Gexp)
r[0:n] = wexp * (1. - kernel_prestore(H, kernMat)/Gexp)
# the curvature constraint is not affected by G0
r[n:n+nl] = np.sqrt(lam) * np.diff(H, n=2) # second derivative
return r
def jacobianLM(H, lam, Gexp, wexp, kernMat):
|
Kmatrix = np.dot((wexp/Gexp).reshape(n,1), np.ones((1,ns)));
if len(H) > ns:
G0 = H[-1]
H = H[:-1]
Jr = np.zeros((n + nl, ns+1))
Jr[0:n, 0:ns] = -kernelD(H, kernMat) * Kmatrix;
# Jr[0:n, ns] = -1./Gexp # column for dr_i/dG0
Jr[0:n, ns] = -wexp/Gexp # column for dr_i/dG0
Jr[n
|
"""
HELPER FUNCTION for optimization: Get Jacobian J
returns a (n+nl * ns) matrix Jr; (ns + 1) if G0 is also supplied.
Jr_(i, j) = dr_i/dH_j
It uses kernelD, which approximates dK_i/dH_j, where K is the kernel
"""
n = kernMat.shape[0];
ns = kernMat.shape[1];
nl = ns - 2;
# L is a ns*ns tridiagonal matrix with 1 -2 and 1 on its diagonal;
L = np.diag(np.ones(ns-1), 1) + np.diag(np.ones(ns-1),-1) + np.diag(-2. * np.ones(ns))
L = L[1:nl+1,:]
# Furnish the Jacobian Jr (n+ns)*ns matrix
# Kmatrix = np.dot((1./Gexp).reshape(n,1), np.ones((1,ns)));
|
identifier_body
|
cipher_functions.py
|
def swap_cards(deck_of_crds, index):
'''
(list of int, int) -> NoneType
REQ: len(d_of_crds) >= 1
REQ: 0 < index < len(d_of_crds)
>>> swap_cards([1,2,4,6], 2)
[1,2,6,4]
>>> swap_cards([1,2,4,5,6,7,8,9,10,12,21], 10)
[21, 2, 4, 5, 6, 7, 8, 9, 10, 12, 1]
Return the swap of the card at the given index with the card that
follows it.
If the card at the given index is on the bottom of the deck, swap
that card with the top card.
'''
# check if the given index is the last index in the deck.
if (index == (len(deck_of_crds) -1)):
# Find the value at first and last index
last_value = deck_of_crds.pop(index)
first_value = deck_of_crds.pop(0)
# Swap first and last index values
deck_of_crds.insert(0, last_value)
deck_of_crds.insert(index, first_value)
else:
# Swap any value with the next index value
any_value = deck_of_crds.pop(index)
deck_of_crds.insert(index+1, any_value)
def move_joker_1(deck_of_crds):
'''
(list of int) -> NoneType
REQ: JOKER1 in deck_of_crds
REQ: len(deck_of_crds) >= 2
>>> move_joker_1([1,2,4,27,6])
[1,2,4,6,27]
>>> move_joker_1([1,2,4,6,27])
[27,2,4,6,1]
>>> move_joker_1([2,27])
[27,2]
Return the swap of JOKER1 with the card that follows it.
If JOKER1 is on the bottom of the deck, swap JOKER1 with the top card.
'''
if (JOKER1 in deck_of_crds):
# Find what index JOKER1 lies in deck_of_crds
joker_1_index = deck_of_crds.index(JOKER1)
# Using swap_cards, change JOKER1's position to the next card.
swap_cards(deck_of_crds, joker_1_index)
def move_joker_2(deck_of_crds):
'''
(list of int) -> NoneType
REQ: JOKER2 in deck_of_crds
REQ: len(deck_of_crds) >= 2
>>> move_joker_2([1,2,4,28,6])
[28,2,4,6,1]
>>> move_joker_2([28,2,4,6,1])
[2,4,28,6,1]
>>> move_joker_2([2,4,28,6,1])
[2,4,6,1,28]
>>> move_joker_2([28,2])
[28,2]
Return the swap of JOKER2, after it has moved two cards down. If
JOKER2 is at the bottom of the deck, then swap it with the top card.
'''
# Find JOKER2's index in deck_of_crds
joker_2_index = deck_of_crds.index(JOKER2)
# Using swap_cards function, swap JOKER2's position with next card
swap_cards(deck_of_crds, joker_2_index)
# Store the first swapped list in a new variable
after_frst_swap = deck_of_crds
# Find JOKER2's index in deck_of_crds, in order to swap it once again.
joker_2_position_2 = deck_of_crds.index(JOKER2)
# Swap the already swapped list, once more in order to move JOKER2
# one more time.
swap_cards(after_frst_swap, joker_2_position_2)
def triple_cut(deck_of_crds):
'''
(list of int) -> NoneType
REQ: Both JOKER1 and JOKER2 are in deck_of_crds
REQ: len(deck_of_crds) >= 2
>>> triple_cut[28,27]
[28,27]
>>> triple_cut([28,2,4,6,27])
[28, 2, 4, 6, 27]
>>> triple_cut([2,3,4,28,27,6,5])
[6, 5, 28, 27, 2, 3, 4]
>>> triple_cut([28,27,2,3])
[2,3,28,27]
>>> triple_cut([2,3,4,28,27])
[28,27,2,3,4]
>>> triple_cut([3,4,5,27,8,11,28,13,15])
[13, 15, 27, 8, 11, 28, 3, 4, 5]
>>> triple_cut([27,1,2,3,28,6])
[6, 27, 1, 2, 3, 28]
>>> triple_cut([1,27,3,4,28,9,10])
[9, 10, 27, 3, 4, 28, 1]
Return the list in which everything above the first joker goes at the
bottom of deck of cards, and everything below the second goes to the top.
'''
# Find at which index JOKER1 and JOKER2 lie, in deck of cards
index_joker_1 = deck_of_crds.index(JOKER1)
index_joker_2 = deck_of_crds.index(JOKER2)
counter = 0
# Check if JOKER1 comes before JOKER2
if (index_joker_1 < index_joker_2):
# if True then go through all the index values before JOKER1's idx
while(counter < index_joker_1):
# Store the values before JOKER1
values_bfr_JOKER1 = deck_of_crds.pop(0)
# Insert the values before JOKER1, after JOKER2
deck_of_crds.insert(index_joker_2, values_bfr_JOKER1)
counter +=1
# Find the last index number and store it
last_index_vlue = (len(deck_of_crds) -1)
# Check if JOKER2 occurs at the bottom of the deck of cards
while (index_joker_2 < last_index_vlue):
# If True, then store all the index values before JOKER1
values_bfr_JOKER1 = deck_of_crds.pop()
# Insert all the values before JOKER1, to the top of the card
deck_of_crds.insert(0, values_bfr_JOKER1)
last_index_vlue -= 1
# If JOKER2 occurs before JOKER1
elif (index_joker_1 > index_joker_2):
counter =0
# If true, then go through all the index values before JOKER2's idx
while(counter < index_joker_2):
# store all values before JOKER1.
values_bfr_JOKER1 = deck_of_crds.pop(0)
# Insert those values before JOKER1
deck_of_crds.insert(index_joker_1, values_bfr_JOKER1)
counter +=1
# Find the last index number and store it
last_idx_vlue = (len(deck_of_crds) -1)
while (index_joker_1 < last_idx_vlue):
# Store the values before JOKER1 and place them at top of deck.
values_bfr_JOKER1 = deck_of_crds.pop()
deck_of_crds.insert(0, values_bfr_JOKER1)
last_idx_vlue -= 1
def insert_top_to_bottom(deck_of_crds):
'''(list of int) -> NoneType
REQ: len(deck_of_crds) > 0
REQ: len(deck_of_crds) >= len(of the last card in the deck)
>>> insert_top_to_bottom([1,2,3,4])
[1,2,3,
|
decripted_vlue = chr(decripted_vlue + 65)
return decripted_vlue
|
random_line_split
|
|
cipher_functions.py
|
_joker_1 < last_idx_vlue):
# Store the values before JOKER1 and place them at top of deck.
values_bfr_JOKER1 = deck_of_crds.pop()
deck_of_crds.insert(0, values_bfr_JOKER1)
last_idx_vlue -= 1
def insert_top_to_bottom(deck_of_crds):
'''(list of int) -> NoneType
REQ: len(deck_of_crds) > 0
REQ: len(deck_of_crds) >= len(of the last card in the deck)
>>> insert_top_to_bottom([1,2,3,4])
[1,2,3,4]
>>> insert_top_to_bottom([23,26,27,2,3])
[2,23,26,27,3]
Look at the bottom card of the deck, move that many cards from top of
deck to the bottom, inserting them just above the bottom card. If the
bottom card is JOKER2, use JOKER1 as the number of cards.
'''
index = len(deck_of_crds)
value = deck_of_crds[index -1]
# dont change the deck, if the last card has value which is greater than
# or equal to the len of deck of cards
if (value >= index):
pass # dont change anything
# if value is equal to JOKER2, then use JOKER1 value
elif (value == JOKER2):
value = JOKER1
counter = 0
# loop through deck of cards and find
while counter < value:
cards_to_move = deck_of_crds.pop(0)
deck_of_crds.insert(value, cards_to_move)
counter+=1
# IF the last card is not JOKER2, then:
else:
counter = 0
while counter < value:
# Find all the cards that need to be moved from top to bottom.
cards_to_move = deck_of_crds.pop(0)
# Insert those cards into the given index
deck_of_crds.insert(len(deck_of_crds)-1, cards_to_move)
counter+=1
def get_card_at_top_index(deck_of_crds):
'''(list of int) -> int
REQ: len(deck_of_crds) > 0
REQ:
>>> get_card_at_top_index ([1,2,3,4])
2
>>> get_card_at_top_index([1,2,3,23,24,26])
2
>>> get_card_at_top_index([2,3,23,24,26])
23
Using the top card value as an index, return the card in that deck
at that index. If the top card is JOKER2, use JOKER1
as the index.
'''
# find what lies at the first index
first_index = deck_of_crds[0]
# if that value is JOKER2, then:
if (first_index == JOKER2):
# if JOKER1 is greater than length of deck
if (JOKER1 >= len(deck_of_crds)):
# Dont return anything
return
# If the first value is greater than length of deck, then dont do anything
elif(first_index >= len(deck_of_crds)):
return
# Else, then return the card at the index.
else:
return deck_of_crds[first_index]
def get_next_value(deck_of_crds):
'''(list of int) -> int
REQ: len(deck_of_crds) > 0
REQ: FIle contains JOKER1 and JOKER2
>>> get_next_value([1 ,4 ,7 ,10 ,13 ,16 ,19 ,22 ,25 ,28 ,3, 6, 9 ,
12 ,15 ,18 ,21 ,24 ,27, 2, 5, 8 ,11 ,14 ,17 ,20 ,23, 26]}
11
Return the next potential keystream value.
'''
# Call all the functions
move_joker_1(deck_of_crds)
move_joker_2(deck_of_crds)
triple_cut(deck_of_crds)
insert_top_to_bottom(deck_of_crds)
next_keystream_value = get_card_at_top_index(deck_of_crds)
return next_keystream_value
def get_next_keystream_value(deck_of_crds):
'''(list of int) -> int
REQ: len(deck_of_crds) > 0
>>> get_next_keystream_value([1 ,4 ,7 ,10 ,13 ,16 ,19 ,22 ,25 ,28 ,3, 6,
9 ,12 ,15 ,18 ,21 ,24 ,27, 2, 5, 8 ,11 ,14 ,17 ,20 ,23, 26])
11
'''
# RERUN ALL 5 STEPS BY CALLING get_next_value
next_keystream_value = get_next_value(deck_of_crds)
value_is_joker = True
# Run if there is a joker found
while (value_is_joker == True):
# Get a new keystream value
if (next_keystream_value == JOKER1 or next_keystream_value == JOKER2):
next_keystream_value = get_next_keystream_value(deck_of_crds)
return (next_keystream_value)
else:
# If no joker found, then return value
value_is_joker == False
return next_keystream_value
def process_message(deck_of_crds, message, enc_or_dec):
'''(list of int, str, str) -> str
REQ: len(deck_of_crds) > 0
REQ: len of message > 0
REQ: enc_or_dec == 'e' or 'd'
'''
clnd_message = clean_message(message)
counter =0
x = ""
# Generate different key_stream_values for each message
for letters in clnd_message:
# Store the keystream values
key_stream_value = get_next_keystream_value(deck_of_crds)
# Check if the user wants to encript
if (enc_or_dec == 'e'):
# Store the encripted messages
holder += encrypt_letter(letters[counter], key_stream_value)
# If the user wants to decrypt
elif (enc_or_dec == 'd'):
# Hold all the decripted values in a variable and return it.
holder += decrypt_letter(letters[counter], key_stream_value)
return holder
def process_messages(deck_of_crds, list_of_messages, enc_or_dec):
'''
REQ: len(deck_of_crds) > 0
REQ: len(list_of_messages) > 0
REQ: enc_or_dec must be strictly equal to 'e' or 'd'
>>> process_messages([1,2,3,4,5,27,28],['Hello','134405584'],'e')
'''
list_of_str = list()
for words in list_of_messages:
list_of_messages = process_message(deck_of_crds, words,
enc_or_dec)
# Add all the encripted or decripted values in one list by appending
list_of_str.append(list_of_messages)
return list_of_str
def read_messages(file):
'''(file open for reading) -> list of str
REQ: One line holds only a single message
REQ: len(file) > 0
Return contents of file as a list of messages, which is a list of str.
This is done by stripping the newline from each line.
'''
list_of_str = list()
hold_changed_file = ''
# Read all the lines in the file
file = file.readlines()
for words in file:
# Remove all the spaces in the file, to read the line as a single str
hold_changed_file = words.strip('\n')
# Store all the single str's into a list of strings.
list_of_str.append(hold_changed_file)
return list_of_str
def read_deck(file):
|
'''(file open for reading) -> list of int
REQ: len(file) > 0
REQ: file must contain atleast 1 line, and must all be int
Read and return the contents of the file as a list of int.
'''
counter = 0
# create a empty list
list_of_str = list()
# Read through all the lines in the file
file = file.readlines()
for numbers in file:
# Split all the numbers at the spaces and split them into a list of str
list_of_str += numbers.split()
# hold the list_of_str
hold = list_of_str
# Change the list of str to list of int and return
for counter in range(len(hold)):
hold[counter] = int(hold[counter])
return hold
|
identifier_body
|
|
cipher_functions.py
|
_crds, index):
'''
(list of int, int) -> NoneType
REQ: len(d_of_crds) >= 1
REQ: 0 < index < len(d_of_crds)
>>> swap_cards([1,2,4,6], 2)
[1,2,6,4]
>>> swap_cards([1,2,4,5,6,7,8,9,10,12,21], 10)
[21, 2, 4, 5, 6, 7, 8, 9, 10, 12, 1]
Return the swap of the card at the given index with the card that
follows it.
If the card at the given index is on the bottom of the deck, swap
that card with the top card.
'''
# check if the given index is the last index in the deck.
if (index == (len(deck_of_crds) -1)):
# Find the value at first and last index
last_value = deck_of_crds.pop(index)
first_value = deck_of_crds.pop(0)
# Swap first and last index values
deck_of_crds.insert(0, last_value)
deck_of_crds.insert(index, first_value)
else:
# Swap any value with the next index value
any_value = deck_of_crds.pop(index)
deck_of_crds.insert(index+1, any_value)
def move_joker_1(deck_of_crds):
'''
(list of int) -> NoneType
REQ: JOKER1 in deck_of_crds
REQ: len(deck_of_crds) >= 2
>>> move_joker_1([1,2,4,27,6])
[1,2,4,6,27]
>>> move_joker_1([1,2,4,6,27])
[27,2,4,6,1]
>>> move_joker_1([2,27])
[27,2]
Return the swap of JOKER1 with the card that follows it.
If JOKER1 is on the bottom of the deck, swap JOKER1 with the top card.
'''
if (JOKER1 in deck_of_crds):
# Find what index JOKER1 lies in deck_of_crds
joker_1_index = deck_of_crds.index(JOKER1)
# Using swap_cards, change JOKER1's position to the next card.
swap_cards(deck_of_crds, joker_1_index)
def move_joker_2(deck_of_crds):
'''
(list of int) -> NoneType
REQ: JOKER2 in deck_of_crds
REQ: len(deck_of_crds) >= 2
>>> move_joker_2([1,2,4,28,6])
[28,2,4,6,1]
>>> move_joker_2([28,2,4,6,1])
[2,4,28,6,1]
>>> move_joker_2([2,4,28,6,1])
[2,4,6,1,28]
>>> move_joker_2([28,2])
[28,2]
Return the swap of JOKER2, after it has moved two cards down. If
JOKER2 is at the bottom of the deck, then swap it with the top card.
'''
# Find JOKER2's index in deck_of_crds
joker_2_index = deck_of_crds.index(JOKER2)
# Using swap_cards function, swap JOKER2's position with next card
swap_cards(deck_of_crds, joker_2_index)
# Store the first swapped list in a new variable
after_frst_swap = deck_of_crds
# Find JOKER2's index in deck_of_crds, in order to swap it once again.
joker_2_position_2 = deck_of_crds.index(JOKER2)
# Swap the already swapped list, once more in order to move JOKER2
# one more time.
swap_cards(after_frst_swap, joker_2_position_2)
def triple_cut(deck_of_crds):
'''
(list of int) -> NoneType
REQ: Both JOKER1 and JOKER2 are in deck_of_crds
REQ: len(deck_of_crds) >= 2
>>> triple_cut[28,27]
[28,27]
>>> triple_cut([28,2,4,6,27])
[28, 2, 4, 6, 27]
>>> triple_cut([2,3,4,28,27,6,5])
[6, 5, 28, 27, 2, 3, 4]
>>> triple_cut([28,27,2,3])
[2,3,28,27]
>>> triple_cut([2,3,4,28,27])
[28,27,2,3,4]
>>> triple_cut([3,4,5,27,8,11,28,13,15])
[13, 15, 27, 8, 11, 28, 3, 4, 5]
>>> triple_cut([27,1,2,3,28,6])
[6, 27, 1, 2, 3, 28]
>>> triple_cut([1,27,3,4,28,9,10])
[9, 10, 27, 3, 4, 28, 1]
Return the list in which everything above the first joker goes at the
bottom of deck of cards, and everything below the second goes to the top.
'''
# Find at which index JOKER1 and JOKER2 lie, in deck of cards
index_joker_1 = deck_of_crds.index(JOKER1)
index_joker_2 = deck_of_crds.index(JOKER2)
counter = 0
# Check if JOKER1 comes before JOKER2
if (index_joker_1 < index_joker_2):
# if True then go through all the index values before JOKER1's idx
while(counter < index_joker_1):
# Store the values before JOKER1
values_bfr_JOKER1 = deck_of_crds.pop(0)
# Insert the values before JOKER1, after JOKER2
deck_of_crds.insert(index_joker_2, values_bfr_JOKER1)
counter +=1
# Find the last index number and store it
last_index_vlue = (len(deck_of_crds) -1)
# Check if JOKER2 occurs at the bottom of the deck of cards
while (index_joker_2 < last_index_vlue):
# If True, then store all the index values before JOKER1
values_bfr_JOKER1 = deck_of_crds.pop()
# Insert all the values before JOKER1, to the top of the card
deck_of_crds.insert(0, values_bfr_JOKER1)
last_index_vlue -= 1
# If JOKER2 occurs before JOKER1
elif (index_joker_1 > index_joker_2):
counter =0
# If true, then go through all the index values before JOKER2's idx
while(counter < index_joker_2):
# store all values before JOKER1.
|
# Find the last index number and store it
last_idx_vlue = (len(deck_of_crds) -1)
while (index_joker_1 < last_idx_vlue):
# Store the values before JOKER1 and place them at top of deck.
values_bfr_JOKER1 = deck_of_crds.pop()
deck_of_crds.insert(0, values_bfr_JOKER1)
last_idx_vlue -= 1
def insert_top_to_bottom(deck_of_crds):
'''(list of int) -> NoneType
REQ: len(deck_of_crds) > 0
REQ: len(deck_of_crds) >= len(of the last card in the deck)
>>> insert_top_to_bottom([1,2,3,4])
[1,2,3,4]
>>> insert_top_to_bottom([23,26,27,2,3])
[2,23,26,27,
|
values_bfr_JOKER1 = deck_of_crds.pop(0)
# Insert those values before JOKER1
deck_of_crds.insert(index_joker_1, values_bfr_JOKER1)
counter +=1
|
conditional_block
|
cipher_functions.py
|
Type
REQ: JOKER2 in deck_of_crds
REQ: len(deck_of_crds) >= 2
>>> move_joker_2([1,2,4,28,6])
[28,2,4,6,1]
>>> move_joker_2([28,2,4,6,1])
[2,4,28,6,1]
>>> move_joker_2([2,4,28,6,1])
[2,4,6,1,28]
>>> move_joker_2([28,2])
[28,2]
Return the swap of JOKER2, after it has moved two cards down. If
JOKER2 is at the bottom of the deck, then swap it with the top card.
'''
# Find JOKER2's index in deck_of_crds
joker_2_index = deck_of_crds.index(JOKER2)
# Using swap_cards function, swap JOKER2's position with next card
swap_cards(deck_of_crds, joker_2_index)
# Store the first swapped list in a new variable
after_frst_swap = deck_of_crds
# Find JOKER2's index in deck_of_crds, in order to swap it once again.
joker_2_position_2 = deck_of_crds.index(JOKER2)
# Swap the already swapped list, once more in order to move JOKER2
# one more time.
swap_cards(after_frst_swap, joker_2_position_2)
def triple_cut(deck_of_crds):
'''
(list of int) -> NoneType
REQ: Both JOKER1 and JOKER2 are in deck_of_crds
REQ: len(deck_of_crds) >= 2
>>> triple_cut[28,27]
[28,27]
>>> triple_cut([28,2,4,6,27])
[28, 2, 4, 6, 27]
>>> triple_cut([2,3,4,28,27,6,5])
[6, 5, 28, 27, 2, 3, 4]
>>> triple_cut([28,27,2,3])
[2,3,28,27]
>>> triple_cut([2,3,4,28,27])
[28,27,2,3,4]
>>> triple_cut([3,4,5,27,8,11,28,13,15])
[13, 15, 27, 8, 11, 28, 3, 4, 5]
>>> triple_cut([27,1,2,3,28,6])
[6, 27, 1, 2, 3, 28]
>>> triple_cut([1,27,3,4,28,9,10])
[9, 10, 27, 3, 4, 28, 1]
Return the list in which everything above the first joker goes at the
bottom of deck of cards, and everything below the second goes to the top.
'''
# Find at which index JOKER1 and JOKER2 lie, in deck of cards
index_joker_1 = deck_of_crds.index(JOKER1)
index_joker_2 = deck_of_crds.index(JOKER2)
counter = 0
# Check if JOKER1 comes before JOKER2
if (index_joker_1 < index_joker_2):
# if True then go through all the index values before JOKER1's idx
while(counter < index_joker_1):
# Store the values before JOKER1
values_bfr_JOKER1 = deck_of_crds.pop(0)
# Insert the values before JOKER1, after JOKER2
deck_of_crds.insert(index_joker_2, values_bfr_JOKER1)
counter +=1
# Find the last index number and store it
last_index_vlue = (len(deck_of_crds) -1)
# Check if JOKER2 occurs at the bottom of the deck of cards
while (index_joker_2 < last_index_vlue):
# If True, then store all the index values before JOKER1
values_bfr_JOKER1 = deck_of_crds.pop()
# Insert all the values before JOKER1, to the top of the card
deck_of_crds.insert(0, values_bfr_JOKER1)
last_index_vlue -= 1
# If JOKER2 occurs before JOKER1
elif (index_joker_1 > index_joker_2):
counter =0
# If true, then go through all the index values before JOKER2's idx
while(counter < index_joker_2):
# store all values before JOKER1.
values_bfr_JOKER1 = deck_of_crds.pop(0)
# Insert those values before JOKER1
deck_of_crds.insert(index_joker_1, values_bfr_JOKER1)
counter +=1
# Find the last index number and store it
last_idx_vlue = (len(deck_of_crds) -1)
while (index_joker_1 < last_idx_vlue):
# Store the values before JOKER1 and place them at top of deck.
values_bfr_JOKER1 = deck_of_crds.pop()
deck_of_crds.insert(0, values_bfr_JOKER1)
last_idx_vlue -= 1
def insert_top_to_bottom(deck_of_crds):
'''(list of int) -> NoneType
REQ: len(deck_of_crds) > 0
REQ: len(deck_of_crds) >= len(of the last card in the deck)
>>> insert_top_to_bottom([1,2,3,4])
[1,2,3,4]
>>> insert_top_to_bottom([23,26,27,2,3])
[2,23,26,27,3]
Look at the bottom card of the deck, move that many cards from top of
deck to the bottom, inserting them just above the bottom card. If the
bottom card is JOKER2, use JOKER1 as the number of cards.
'''
index = len(deck_of_crds)
value = deck_of_crds[index -1]
# dont change the deck, if the last card has value which is greater than
# or equal to the len of deck of cards
if (value >= index):
pass # dont change anything
# if value is equal to JOKER2, then use JOKER1 value
elif (value == JOKER2):
value = JOKER1
counter = 0
# loop through deck of cards and find
while counter < value:
cards_to_move = deck_of_crds.pop(0)
deck_of_crds.insert(value, cards_to_move)
counter+=1
# IF the last card is not JOKER2, then:
else:
counter = 0
while counter < value:
# Find all the cards that need to be moved from top to bottom.
cards_to_move = deck_of_crds.pop(0)
# Insert those cards into the given index
deck_of_crds.insert(len(deck_of_crds)-1, cards_to_move)
counter+=1
def get_card_at_top_index(deck_of_crds):
'''(list of int) -> int
REQ: len(deck_of_crds) > 0
REQ:
>>> get_card_at_top_index ([1,2,3,4])
2
>>> get_card_at_top_index([1,2,3,23,24,26])
2
>>> get_card_at_top_index([2,3,23,24,26])
23
Using the top card value as an index, return the card in that deck
at that index. If the top card is JOKER2, use JOKER1
as the index.
'''
# find what lies at the first index
first_index = deck_of_crds[0]
# if that value is JOKER2, then:
if (first_index == JOKER2):
# if JOKER1 is greater than length of deck
if (JOKER1 >= len(deck_of_crds)):
# Dont return anything
return
# If the first value is greater than length of deck, then dont do anything
elif(first_index >= len(deck_of_crds)):
return
# Else, then return the card at the index.
else:
return deck_of_crds[first_index]
def
|
get_next_value
|
identifier_name
|
|
types.rs
|
WITH TIME ZONE
TIME_TZ = 186,
/// TIMESTAMP
TIMESTAMP = 187,
/// TIMESTAMP WITH TIME ZONE
TIMESTAMP_TZ = 188,
/// INTERVAL YEAR TO MONTH
INTERVAL_YM = 189,
/// INTERVAL DAY TO SECOND
INTERVAL_DS = 190,
/// /* */
TIMESTAMP_LTZ = 232,
/// pl/sql representation of named types
PNTY = 241,
// some pl/sql specific types
/// pl/sql 'record' (or %rowtype)
REC = 250,
/// pl/sql 'indexed table'
TAB = 251,
/// pl/sql 'boolean'
BOL = 252,
}
/// Режим, в котором создавать окружение при вызове `OCIEnvNlsCreate()`.
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
#[allow(dead_code)]
pub enum CreateMode {
/// The default value, which is non-UTF-16 encoding.
Default = 0,
/// Uses threaded environment. Internal data structures not exposed to the user are protected from concurrent
/// accesses by multiple threads.
Threaded = 1 << 0,
/// Uses object features.
Object = 1 << 1,
/// Uses publish-subscribe notifications.
Events = 1 << 2,
//Shared = 1 << 4,
/// Suppresses the calling of the dynamic callback routine OCIEnvCallback(). The default behavior is to allow
/// calling of OCIEnvCallback() when the environment is created.
/// See Also:
/// "Dynamic Callback Registrations"
NoUcb = 1 << 6,
/// No mutual exclusion (mutex) locking occurs in this mode. All OCI calls done on the environment handle,
/// or on handles derived from the environment handle, must be serialized. `OCI_THREADED` must also be specified
/// when `OCI_ENV_NO_MUTEX` is specified.
EnvNoMutex = 1 << 7,
//SharedExt = 1 << 8,
//AlwaysBlocking = 1 << 10,
//UseLDAP = 1 << 12,
//RegLDAPOnly = 1 << 13,
//UTF16 = 1 << 14,
//AFC_PAD_ON = 1 << 15,
//NewLengthSemantics = 1 << 17,
//NoMutexStmt = 1 << 18,
//MutexEnvOnly = 1 << 19,
/// Suppresses NLS character validation; NLS character validation suppression is on by default beginning with
/// Oracle Database 11g Release 1 (11.1). Use `OCI_ENABLE_NLS_VALIDATION` to enable NLS character validation.
/// See Comments for more information.
SuppressNlsValidation = 1 << 20,
//OCI_MUTEX_TRY = 1 << 21,
/// Turns on N' substitution.
NCharLiteralReplaceOn = 1 << 22,
/// Turns off N' substitution. If neither this mode nor `OCI_NCHAR_LITERAL_REPLACE_ON` is used, the substitution
/// is determined by the environment variable `ORA_NCHAR_LITERAL_REPLACE`, which can be set to `TRUE` or `FALSE`.
/// When it is set to TRUE, the replacement is turned on; otherwise it is turned off, the default setting in OCI.
NCharLiteralReplaceOff = 1 << 23,
/// Enables NLS character validation. See Comments for more information.
EnableNlsValidation = 1 << 24,
}
impl Default for CreateMode {
fn default() -> Self { CreateMode::Default }
}
/// Режим, в котором подключаться к cерверу базы данных при вызове `OCIServerAttach()`.
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
#[allow(dead_code)]
pub enum AttachMode {
/// For encoding, this value tells the server handle to use the setting in the environment handle.
Default = 0,
/// Use connection pooling.
CPool = 1 << 9,
|
/// Specifies the various modes of operation
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
#[allow(dead_code)]
pub enum AuthMode {
/// In this mode, the user session context returned can only ever be set with the server context
/// specified in `svchp`. For encoding, the server handle uses the setting in the environment handle.
Default = 0,
/// In this mode, the new user session context can be set in a service handle with a different server handle.
/// This mode establishes the user session context. To create a migratable session, the service handle must already
/// be set with a nonmigratable user session, which becomes the "creator" session of the migratable session. That is,
/// a migratable session must have a nonmigratable parent session.
///
/// `Migrate` should not be used when the session uses connection pool underneath. The session migration and multiplexing
/// happens transparently to the user.
Migrate = 1 << 0,
/// In this mode, you are authenticated for `SYSDBA` access
SysDba = 1 << 1,
/// In this mode, you are authenticated for `SYSOPER` access
SysOper = 1 << 2,
/// This mode can only be used with `SysDba` or `SysOper` to authenticate for certain administration tasks
PrelimAuth = 1 << 3,
//PICache = 1 << 4,
/// Enables statement caching with default size on the given service handle. It is optional to pass this mode
/// if the application is going to explicitly set the size later using `OCI_ATTR_STMTCACHESIZE` on that service handle.
StmtCache = 1 << 6,
//StatelessCall = 1 << 7,
//StatelessTxn = 1 << 8,
//StatelessApp = 1 << 9,
//SysAsm = 1 << 14,
//SysBkp = 1 << 16,
//SysDgd = 1 << 17,
//SysKmt = 1 << 18,
}
impl Default for AuthMode {
fn default() -> Self { AuthMode::Default }
}
/// Диалект Oracle-а, используемый для разбора SQL-кода запросов. Рекомендуется всегда использовать нативный для сервера
/// диалект, он является диалектом по умолчанию при выполнении [`prepare`][1] без параметров.
///
/// [1]: ../struct.Connection.html#method.prepare
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
#[allow(dead_code)]
pub enum Syntax {
/// Синтаксис зависит от версии сервера базы данных.
Native = 1,
/// V7 ORACLE parsing syntax.
V7 = 2,
//V8 = 3,
/// Specifies the statement to be translated according to the SQL translation profile set in the session.
Foreign = u32::MAX as isize,
}
impl Default for Syntax {
fn default() -> Self { Syntax::Native }
}
/// Виды выражений, которые могут быть у него после его подготовки.
/// Вид выражения влияет на то, с какими параметрыми вызывать функцию `OCIExecute()`.
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
#[allow(dead_code)]
#[repr(u16)]
pub enum StatementType {
/// Unknown statement
UNKNOWN = 0,
/// Select statement
SELECT = 1,
/// Update statement
UPDATE = 2,
/// delete statement
DELETE = 3,
/// Insert Statement
INSERT = 4,
/// create statement
CREATE = 5,
/// drop statement
DROP = 6,
/// alter statement
ALTER = 7,
/// begin ... (pl/sql statement)
BEGIN = 8,
/// declare .. (pl/sql statement)
DECLARE = 9,
/// corresponds to kpu call
CALL = 10,
}
/// Виды кодировок, поддерживаемых базой данных.
///
/// В документации нигде не перечислены соответствия имени кодировки ее числовому значению, поэтому они получены
/// следующим SQL-скриптом:
/// ```sql
/// select value as name, nls_charset_id(value) as val
/// from v$nls_valid_values
/// where parameter = 'CHARACTERSET'
/// order by nls_charset_id(value)
/// ```
/// http://www.mydul.net/charsets.html
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
#[allow(non_camel_case_types)]
pub enum Charset {
/// Использовать настройки из переменных окружения `NLS_LANG` (для типов
|
}
impl Default for AttachMode {
fn default() -> Self { AttachMode::Default }
}
|
random_line_split
|
types.rs
|
WITH TIME ZONE
TIME_TZ = 186,
/// TIMESTAMP
TIMESTAMP = 187,
/// TIMESTAMP WITH TIME ZONE
TIMESTAMP_TZ = 188,
/// INTERVAL YEAR TO MONTH
INTERVAL_YM = 189,
/// INTERVAL DAY TO SECOND
INTERVAL_DS = 190,
/// /* */
TIMESTAMP_LTZ = 232,
/// pl/sql representation of named types
PNTY = 241,
// some pl/sql specific types
/// pl/sql 'record' (or %rowtype)
REC = 250,
/// pl/sql 'indexed table'
TAB = 251,
/// pl/sql 'boolean'
BOL = 252,
}
/// Режим, в котором создавать окружение при вызове `OCIEnvNlsCreate()`.
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
#[allow(dead_code)]
pub enum CreateMode {
/// The default value, which is non-UTF-16 encoding.
Default = 0,
/// Uses threaded environment. Internal data structures not exposed to the user are protected from concurrent
/// accesses by multiple threads.
Threaded = 1 << 0,
/// Uses object features.
Object = 1 << 1,
/// Uses publish-subscribe notifications.
Events = 1 << 2,
//Shared = 1 << 4,
/// Suppresses the calling of the dynamic callback routine OCIEnvCallback(). The default behavior is to allow
/// calling of OCIEnvCallback() when the environment is created.
/// See Also:
/// "Dynamic Callback Registrations"
NoUcb = 1 << 6,
/// No mutual exclusion (mutex) locking occurs in this mode. All OCI calls done on the environment handle,
/// or on handles derived from the environment handle, must be serialized. `OCI_THREADED` must also be specified
/// when `OCI_ENV_NO_MUTEX` is specified.
EnvNoMutex = 1 << 7,
//SharedExt = 1 << 8,
//AlwaysBlocking = 1 << 10,
//UseLDAP = 1 << 12,
//RegLDAPOnly = 1 << 13,
//UTF16 = 1 << 14,
//AFC_PAD_ON = 1 << 15,
//NewLengthSemantics = 1 << 17,
//NoMutexStmt = 1 << 18,
//MutexEnvOnly = 1 << 19,
/// Suppresses NLS character validation; NLS character validation suppression is on by default beginning with
/// Oracle Database 11g Release 1 (11.1). Use `OCI_ENABLE_NLS_VALIDATION` to enable NLS character validation.
/// See Comments for more information.
SuppressNlsValidation = 1 << 20,
//OCI_MUTEX_TRY = 1 << 21,
/// Turns on N' substitution.
NCharLiteralReplaceOn = 1 << 22,
/// Turns off N' substitution. If neither this mode nor `OCI_NCHAR_LITERAL_REPLACE_ON` is used, the substitution
/// is determined by the environment variable `ORA_NCHAR_LITERAL_REPLACE`, which can be set to `TRUE` or `FALSE`.
/// When it is set to TRUE, the replacement is turned on; otherwise it is turned off, the default setting in OCI.
NCharLiteralReplaceOff = 1 << 23,
/// Enables NLS character validation. See Comments for more information.
EnableNlsValidation = 1 << 24,
}
impl Default for CreateMode {
fn default() -> Self { CreateMode::Default }
}
/// Режим, в котором подключаться к cерверу базы данных при вызове `OCIServerAttach()`.
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
#[allow(dead_code)]
pub enum AttachMo
|
/// For encoding, this value tells the server handle to use the setting in the environment handle.
Default = 0,
/// Use connection pooling.
CPool = 1 << 9,
}
impl Default for AttachMode {
fn default() -> Self { AttachMode::Default }
}
/// Specifies the various modes of operation
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
#[allow(dead_code)]
pub enum AuthMode {
/// In this mode, the user session context returned can only ever be set with the server context
/// specified in `svchp`. For encoding, the server handle uses the setting in the environment handle.
Default = 0,
/// In this mode, the new user session context can be set in a service handle with a different server handle.
/// This mode establishes the user session context. To create a migratable session, the service handle must already
/// be set with a nonmigratable user session, which becomes the "creator" session of the migratable session. That is,
/// a migratable session must have a nonmigratable parent session.
///
/// `Migrate` should not be used when the session uses connection pool underneath. The session migration and multiplexing
/// happens transparently to the user.
Migrate = 1 << 0,
/// In this mode, you are authenticated for `SYSDBA` access
SysDba = 1 << 1,
/// In this mode, you are authenticated for `SYSOPER` access
SysOper = 1 << 2,
/// This mode can only be used with `SysDba` or `SysOper` to authenticate for certain administration tasks
PrelimAuth = 1 << 3,
//PICache = 1 << 4,
/// Enables statement caching with default size on the given service handle. It is optional to pass this mode
/// if the application is going to explicitly set the size later using `OCI_ATTR_STMTCACHESIZE` on that service handle.
StmtCache = 1 << 6,
//StatelessCall = 1 << 7,
//StatelessTxn = 1 << 8,
//StatelessApp = 1 << 9,
//SysAsm = 1 << 14,
//SysBkp = 1 << 16,
//SysDgd = 1 << 17,
//SysKmt = 1 << 18,
}
impl Default for AuthMode {
fn default() -> Self { AuthMode::Default }
}
/// Диалект Oracle-а, используемый для разбора SQL-кода запросов. Рекомендуется всегда использовать нативный для сервера
/// диалект, он является диалектом по умолчанию при выполнении [`prepare`][1] без параметров.
///
/// [1]: ../struct.Connection.html#method.prepare
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
#[allow(dead_code)]
pub enum Syntax {
/// Синтаксис зависит от версии сервера базы данных.
Native = 1,
/// V7 ORACLE parsing syntax.
V7 = 2,
//V8 = 3,
/// Specifies the statement to be translated according to the SQL translation profile set in the session.
Foreign = u32::MAX as isize,
}
impl Default for Syntax {
fn default() -> Self { Syntax::Native }
}
/// Виды выражений, которые могут быть у него после его подготовки.
/// Вид выражения влияет на то, с какими параметрыми вызывать функцию `OCIExecute()`.
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
#[allow(dead_code)]
#[repr(u16)]
pub enum StatementType {
/// Unknown statement
UNKNOWN = 0,
/// Select statement
SELECT = 1,
/// Update statement
UPDATE = 2,
/// delete statement
DELETE = 3,
/// Insert Statement
INSERT = 4,
/// create statement
CREATE = 5,
/// drop statement
DROP = 6,
/// alter statement
ALTER = 7,
/// begin ... (pl/sql statement)
BEGIN = 8,
/// declare .. (pl/sql statement)
DECLARE = 9,
/// corresponds to kpu call
CALL = 10,
}
/// Виды кодировок, поддерживаемых базой данных.
///
/// В документации нигде не перечислены соответствия имени кодировки ее числовому значению, поэтому они получены
/// следующим SQL-скриптом:
/// ```sql
/// select value as name, nls_charset_id(value) as val
/// from v$nls_valid_values
/// where parameter = 'CHARACTERSET'
/// order by nls_charset_id(value)
/// ```
/// http://www.mydul.net/charsets.html
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
#[allow(non_camel_case_types)]
pub enum Charset {
/// Использовать настройки из переменных окружения `NLS_LANG` (для тип
|
de {
|
identifier_name
|
traits.rs
|
From<Pointer<AR::PtrVal>>,
{
}
/// Trait which represents all of the analysis methods an architecture
/// must provide in order to be supported.
pub trait Architecture
where
Self: 'static
+ Copy
+ Debug
+ PartialEq
+ Eq
+ PartialOrd
+ Ord
+ Serialize
+ Send
+ Sync
+ Default,
Self::Register:
Mappable + Debug + Display + FromStr + Send + Sync + Serialize + for<'dw> Deserialize<'dw>,
Self::Word: Bitwise
+ Numerical
+ Popcount<Output = Self::Word>
+ TryInto<u64>
+ Mappable
+ Nameable
+ Ord
+ Bounded
+ Serialize
+ for<'dw> Deserialize<'dw>,
Self::Byte: Bitwise
+ Numerical
+ Popcount<Output = Self::Byte>
+ TryInto<u64>
+ Mappable
+ Nameable
+ Ord
+ Bounded
+ Serialize
+ for<'dw> Deserialize<'dw>,
Self::PtrVal:
PtrNum<Self::Offset> + Mappable + Nameable + Serialize + for<'dw> Deserialize<'dw>,
Self::Offset: Offset<Self::PtrVal>
+ Mappable
+ Nameable
+ Numerical
+ Serialize
+ for<'dw> Deserialize<'dw>,
{
/// The type which represents all possible register names in a given
/// architecture.
///
/// In some architectures, notably AArch32, the program counter is treated
/// as a normal register that can be operated upon. In such architectures,
/// you must either leave off that register from this type, or ensure that
/// it is always in synchronization with the contextualized program counter
/// when tracing code.
///
/// This type is customarily referred to as `RK` in other trait bounds.
type Register;
/// The type which represents a register value.
///
/// In the case that an architecture has multiple widths of registers, then
/// this type must either enumerate all possible register widths, or it
/// must use a representation wide enough to hold all of them and ensure
/// that any unused bits do not affect the results of tracing. It must also
/// ensure that register values intended for one type or width of register
/// do not get set on registers which cannot architecturally contain them
/// without being first converted.
///
/// This type is customarily referred to as `I` in other trait bounds.
type Word;
/// The type which represents a signed register value.
///
/// Assembler syntaxes that accept this particular architecture must
/// allow both signed and unsigned representations of the word type. It is
/// implied that the regular `Word` type is unsigned.
type SignedWord;
/// The type which represents a byte as addressed by memory.
///
/// In most modern architectures, bytes are 8 bits wide, and this should be
/// `u8`. Some exotic architectures are "word-addressed": incrementing an
/// address by one results in skipping more or less than eight bits in the
/// resulting memory. In that case, `Byte` would need to be wider or
/// narrower than 8 bits.
///
/// Note that most processors whose memory buses read or write more than
/// one byte at a time do *not* qualify as word-addressed; as reading the
/// next address still returns a byte even though the memory device it
/// comes from works in wider units of data.
///
/// This type is customarily referred to as `MV` in other trait bounds.
type Byte;
/// The type which represents this architecture's memory addresses.
///
/// An architecture is permitted to have non-numerical memory addresses,
/// such as architectures with separate I/O and memory address spaces. In
/// this case, you would use an enum type with an option for each separate
/// bus, and provide a separate `Offset` type which can be added to any
/// address to get a new one within the same bus.
///
/// This type is customarily referred to as `P` in other trait bounds.
type PtrVal;
/// The type which represents an offset from a given pointer value.
///
/// While architectures are allowed to provide multiple pointer value
/// representations, bundled together in an `enum`, every arm of the enum
/// must be able to support a numerical offset type that can be added to
/// any address to get a new one that many bytes further along.
///
/// This type is customarily referred to as `S` in other trait bounds.
type Offset;
/// Obtain this architecture's name.
fn name(&self) -> ArchName;
/// Inject architectural contexts from user-provided input intended to form
/// a valid contextual pointer.
///
/// Each architecture is allowed to specify it's own architectural
/// contexts, which are stored alongside platform contexts in the
/// `Pointer`. This function allows architectures to participate in context
/// parsing.
///
/// After parsing has completed, the context list given should be shortened
/// to exclude the contexts this function has processed, and those parsed
/// contexts should be provided to the `Pointer`. As a practical
/// convention, architectures should only parse contexts at the start or
/// end of a context list.
///
/// TODO: Why does this return `Option<()>`?!
fn parse_architectural_contexts(
contexts: &mut &[&str],
ptr: &mut Pointer<Self::PtrVal>,
) -> Option<()>;
/// Statically disassemble instructions from a given address on a given
/// platform.
///
/// The `L` type parameter is the literal type of the given assembler to be
/// used when disassembling the program. The `IO` type parameter represents
/// an offset into a program image, which may be a wider type than `Offset`
/// (e.g. if bank switchig is in use). It is almost always `usize`.
fn disassemble<L>(
&self,
at: &Pointer<Self::PtrVal>,
bus: &Memory<Self>,
) -> Result<Disasm<L, Self::PtrVal, Self::Offset>, Self>
where
L: CompatibleLiteral<Self>;
/// Statically determine the input and output requisites of a given
/// instruction.
///
/// This method allows building a dependency graph of a given block by
/// matching output requisites of a given instruction to input requisites
/// of future instructions.
fn dataflow(
&self,
at: &Pointer<Self::PtrVal>,
bus: &Memory<Self>,
) -> Result<(RequisiteSet<Self>, RequisiteSet<Self>), Self>;
/// Determine what register values or memory addresses are required to be
/// resolved in order for symbolic execution to continue at a given PC.
///
/// This function returns a list of `Prerequisite`s, as well as a flag to
/// indicate if the prerequisite list is complete or not. If the list is
/// incomplete, then after resolving those prerequisites, you must
/// reanalyze the program at the same position with the new state in order
/// to find more prerequisites. Otherwise, you may continue tracing.
///
/// A prerequisite should only be listed if symbolic tracing cannot
/// continue otherwise. If every symbolic value is listed as a prerequisite,
/// then the state space of symbolic tracing will explode far faster than
/// if symbolic execution is occurring. When a value is listed as a
/// prerequisite, the state is said to have been forced into forking. It is
/// permissible to force a fork for the following reasons:
///
/// * The program counter or a context it needs is unresolved
/// * Instruction contents are unresolved in memory
/// * Memory addresses being read or written to are unresolved
/// * The target address of a jump or call is unresolved
/// * Flags or other information necessary to determine if a jump or call
/// is taken or not taken are unresolved
///
/// The `IO` type parameter represents an offset into a program image,
/// which may be a wider type than `Offset` (e.g. if bank switchig is in
/// use). It is almost always `usize`.
fn prerequisites(
&self,
at: Self::PtrVal,
bus: &Memory<Self>,
state: &State<Self>,
) -> Result<(RequisiteSet<Self>, bool), Self>;
/// Advance the state of program execution by one instruction, producing a
/// new state and program counter to continue from.
///
/// This function may error if the given state is ambiguous enough to
/// disallow further execution. In order to find out why, you need to ask
/// the `prerequisites` function to get what needs to be fixed about the
/// state. In fact, you should always call it before calling this one.
///
/// A state and program counter that produce an empty prerequisites list
/// for a given program must always cause `trace` to return a valid
/// continuation of the program.
///
/// TODO: There is currently no representation of states that halt the
/// program.
fn trace(
|
&self,
|
random_line_split
|
|
api.py
|
close(self):
# if self.store is not None:
# self.store.close()
#
# def __open(self):
# if self.store is None:
# self.store = pd.HDFStore(self.filename, format='table')
# return self.store
#
# def keys(self):
# self.__open()
# return self.store.keys()
#
# def save(self, key, df, _append=True, **kwargs):
# self.__open()
# if df is None:
# return
# self.store.put(key, df.df(), append=_append, format='table', data_columns=True, **kwargs)
#
# def get(self, key):
# self.__open()
# return self.store.get(key)
#
# def select(self, key, **args):
# self.__open()
# return self.store.select(key, **args)
#
# def attribute(self, key, **kwargs):
# self.__open()
# meta_info = "meta_info"
# if key in self.keys():
# if kwargs:
# self.store.get_storer(key).attrs[meta_info] = kwargs
# else:
# try:
# dic = self.store.get_storer(key).attrs[meta_info]
# return {} if dic is None else dic
# except KeyError:
# return {}
# else:
# return {}
#class BaseDataFrame(object):
# def __init__(self, df):
# self.__df = df
#
# def set_index(self, key, inplace=True):
# if self.__df is None:
# log.error('df is none')
# elif isinstance(self.__df, pd.DataFrame) == False:
# # for debug
# if isinstance(self.__df, int):
# log.error('df is int %d' % self.__df)
# elif isinstance(self.__df, str):
# log.error('df is string %s' % self.__df)
# #raise Exception('df is not DataFrame ' + type(self.__df))
# elif self.__df.empty:
# log.warning('df is empty')
# elif key in self.__df.keys():
# self.__df.set_index(key, inplace=inplace)
#
# def to_json(self, **kwargs):
# return self.__df.to_json(**kwargs)
#
# def __getitem__(self, key):
# return self.__df[key]
#
# def index(self, i=""):
# return self.__df.index if i == "" else self.__df.loc[i]
#
# @property
# def empty(self):
# if self.__df is None:
# return True
# elif isinstance(self.__df, pd.DataFrame) == False:
# return True
# else:
# return self.__df.empty
#
# def df(self):
# return self.__df
#
# @staticmethod
# def format_fields(*fields):
# return ','.join(fields)
#
#
#class InstrumentinfoDataFrame(BaseDataFrame):
# INST_TYPE = 'inst_type' # 证券类别
# MARKET = 'market' # 交易所代码
# SYMBOL = 'symbol' # 证券代码
# NAME = 'name' # 证券名称
# LIST_DATE = 'list_date' # 上市日期
# DELIST_DATE = 'delist_date' # 退市日期
# CNSPELL = 'cnspell' # 拼音简写
# CURRENCY = 'currency' # 交易货币
# STATUS = 'status' # 上市状态,1:上市 3:退市 8:暂停上市
# BUYLOT = 'bylot' # INT 最小买入单位
# SELLLOT = 'selllot' # INT 最大买入单位
# PRICETICK = 'pricetick' # double 最小变动单位
# PRODUCT = 'product' # 合约品种
# UNDERLYING = 'underlying' # 对应标的
# MULTIPLIER = 'multiplier' # int 合约乘数
#
# def __init__(self, df):
# BaseDataFrame.__init__(self, df)
# self.set_index(InstrumentinfoDataFrame.SYMBOL)
#
# @classmethod
# def fields(self):
# return BaseDataFrame.format_fields(
# *[self.STATUS,
# self.LIST_DATE,
# self.NAME,
# self.SYMBOL,
# self.MARKET])
#
#
#class CecsuspDataFrame(BaseDataFrame):
# SYMBOL = 'symbol' # string 证券代码
# ANN_DATE = 'ann_date' # string 停牌公告日期
# SUSP_DATE = 'susp_date' # 停牌开始日期
# SUSP_TIME = 'susp_time' # string 停牌开始时间
# RESU_DATE = 'resu_date' # string 复牌日期
# RESU_TIME = 'resu_time' # string 复牌时间
# SUSP_REASON = 'susp_reason' # string 停牌原因
#
# def __init__(self, df):
# BaseDataFrame.__init__(self, df)
# self.set_index(CecsuspDataFrame.SYMBOL)
#
#
#class SecrestrictedDataFrame(BaseDataFrame):
# SYMBOL = 'symbol' # string 证券代码
# LIST_DATE = 'list_date' # string 本期解禁流通日期
# LIFTED_REASON = 'lifted_reason' # 本期解禁原因(来源)
# LIFTED_SHARES = 'lifted_shares' # string 本期解禁数量
# LIFTED_RATIO = 'lifted_ratio' # string 可流通占A股总数比例
#
# def __init__(self, df):
# BaseDataFrame.__init__(self, df)
# self.set_index(SecrestrictedDataFrame.SYMBOL)
#
#
#class TradecalDataFrame(BaseDataFrame):
# TRADE_DATE = 'trade_date' # int YYYYMMDD格式,如20170823
# ISTRADEDAY = 'istradeday' # string 是否交易日
# ISWEEKDAY = 'isweekday' # string 是否工作日
# ISWEEKDAY = 'isweekend' # string 是否周末
# ISHOLIDAY = 'isholiday' # string string 否节假日
#
# def __init__(self, df):
# BaseDataFrame.__init__(self, df)
# self.set_index(TradecalDataFrame.TRADE_DATE)
#
#
#class DailyDataFrame(BaseDataFrame):
# SYMBOL = 'symbol'
# CODE = 'code' # string 交易所原始代码
# TRADE_DATE = 'trade_date' # int YYYYMMDD格式,如20170823
# OPEN = 'open' # double 开盘价
# HIGH = 'high' # double 最高价
# LOW = 'low' # double 最低价
# CLOSE = 'close' # double 收盘价
# VOLUME = 'volume' # volume double 成交量
# TURNOVER = 'turnover' # turnover double 成交金额
# VWAP = 'vwap' # double 成交均价
# SETTLE = 'settle' # double 结算价
# OI = 'oi' # double 持仓量
# TRADE_STATUS = 'trade_status' # string 交易状态(”停牌”或者”交易”)
# TRADE_SUSPENSION = '停牌'
# TRADE_TRANSACTION = '交易'
#
# def __init__(self, df):
# BaseDataFrame.__init__(self, df)
# self.set_index(DailyDataFrame.TRADE_DATE)
#
# @classmethod
# def fields(self):
# return BaseDataFrame.format_fields(
# *[DailyDataFrame.CLOSE,
# DailyDataFrame.TRADE_DATE,
# DailyDataFrame.OPEN,
# DailyDataFrame.HIGH,
# DailyDataFrame.LOW,
# DailyDataFrame.VOLUME,
# DailyDataFrame.TRADE_STATUS])
#
#
#class BarDataFrame(BaseDataFrame):
# SYMBOL = 'symbol'
# CODE = 'code' # string 交易所原始代码
# DATE = 'date' # int
# TIME = 'time' # int
# TRADE_DATE = 'trade_date' # int YYYYMMDD格式,如20170823
# FREQ = 'freq' # bar 类型
# OPEN = 'open'
# HIGH = 'high'
# LOW = 'low'
|
# TURNOVER = 'turnover'
# VWAP = 'vwap'
# OI = 'oi'
# SETTLE = 'settle'
#
# def __init__(self, df):
# BaseDataFrame.__init__(self, df)
# self.set_index(BarDataFrame.TRADE_DATE)
#
def singleton(cls):
instance = {}
def geninstance(*args, **kwargs):
if cls not in instance:
instance[cls] = cls(*args, **kwargs)
return instance
|
# CLOSE = 'close'
# VOLUME = 'volume'
|
random_line_split
|
api.py
|
'status' # 上市状态,1:上市 3:退市 8:暂停上市
# BUYLOT = 'bylot' # INT 最小买入单位
# SELLLOT = 'selllot' # INT 最大买入单位
# PRICETICK = 'pricetick' # double 最小变动单位
# PRODUCT = 'product' # 合约品种
# UNDERLYING = 'underlying' # 对应标的
# MULTIPLIER = 'multiplier' # int 合约乘数
#
# def __init__(self, df):
# BaseDataFrame.__init__(self, df)
# self.set_index(InstrumentinfoDataFrame.SYMBOL)
#
# @classmethod
# def fields(self):
# return BaseDataFrame.format_fields(
# *[self.STATUS,
# self.LIST_DATE,
# self.NAME,
# self.SYMBOL,
# self.MARKET])
#
#
#class CecsuspDataFrame(BaseDataFrame):
# SYMBOL = 'symbol' # string 证券代码
# ANN_DATE = 'ann_date' # string 停牌公告日期
# SUSP_DATE = 'susp_date' # 停牌开始日期
# SUSP_TIME = 'susp_time' # string 停牌开始时间
# RESU_DATE = 'resu_date' # string 复牌日期
# RESU_TIME = 'resu_time' # string 复牌时间
# SUSP_REASON = 'susp_reason' # string 停牌原因
#
# def __init__(self, df):
# BaseDataFrame.__init__(self, df)
# self.set_index(CecsuspDataFrame.SYMBOL)
#
#
#class SecrestrictedDataFrame(BaseDataFrame):
# SYMBOL = 'symbol' # string 证券代码
# LIST_DATE = 'list_date' # string 本期解禁流通日期
# LIFTED_REASON = 'lifted_reason' # 本期解禁原因(来源)
# LIFTED_SHARES = 'lifted_shares' # string 本期解禁数量
# LIFTED_RATIO = 'lifted_ratio' # string 可流通占A股总数比例
#
# def __init__(self, df):
# BaseDataFrame.__init__(self, df)
# self.set_index(SecrestrictedDataFrame.SYMBOL)
#
#
#class TradecalDataFrame(BaseDataFrame):
# TRADE_DATE = 'trade_date' # int YYYYMMDD格式,如20170823
# ISTRADEDAY = 'istradeday' # string 是否交易日
# ISWEEKDAY = 'isweekday' # string 是否工作日
# ISWEEKDAY = 'isweekend' # string 是否周末
# ISHOLIDAY = 'isholiday' # string string 否节假日
#
# def __init__(self, df):
# BaseDataFrame.__init__(self, df)
# self.set_index(TradecalDataFrame.TRADE_DATE)
#
#
#class DailyDataFrame(BaseDataFrame):
# SYMBOL = 'symbol'
# CODE = 'code' # string 交易所原始代码
# TRADE_DATE = 'trade_date' # int YYYYMMDD格式,如20170823
# OPEN = 'open' # double 开盘价
# HIGH = 'high' # double 最高价
# LOW = 'low' # double 最低价
# CLOSE = 'close' # double 收盘价
# VOLUME = 'volume' # volume double 成交量
# TURNOVER = 'turnover' # turnover double 成交金额
# VWAP = 'vwap' # double 成交均价
# SETTLE = 'settle' # double 结算价
# OI = 'oi' # double 持仓量
# TRADE_STATUS = 'trade_status' # string 交易状态(”停牌”或者”交易”)
# TRADE_SUSPENSION = '停牌'
# TRADE_TRANSACTION = '交易'
#
# def __init__(self, df):
# BaseDataFrame.__init__(self, df)
# self.set_index(DailyDataFrame.TRADE_DATE)
#
# @classmethod
# def fields(self):
# return BaseDataFrame.format_fields(
# *[DailyDataFrame.CLOSE,
# DailyDataFrame.TRADE_DATE,
# DailyDataFrame.OPEN,
# DailyDataFrame.HIGH,
# DailyDataFrame.LOW,
# DailyDataFrame.VOLUME,
# DailyDataFrame.TRADE_STATUS])
#
#
#class BarDataFrame(BaseDataFrame):
# SYMBOL = 'symbol'
# CODE = 'code' # string 交易所原始代码
# DATE = 'date' # int
# TIME = 'time' # int
# TRADE_DATE = 'trade_date' # int YYYYMMDD格式,如20170823
# FREQ = 'freq' # bar 类型
# OPEN = 'open'
# HIGH = 'high'
# LOW = 'low'
# CLOSE = 'close'
# VOLUME = 'volume'
# TURNOVER = 'turnover'
# VWAP = 'vwap'
# OI = 'oi'
# SETTLE = 'settle'
#
# def __init__(self, df):
# BaseDataFrame.__init__(self, df)
# self.set_index(BarDataFrame.TRADE_DATE)
#
def singleton(cls):
instance = {}
def geninstance(*args, **kwargs):
if cls not in instance:
instance[cls] = cls(*args, **kwargs)
return instance[cls]
return geninstance
@singleton
class Api(object):
__api = DataApi(addr='tcp://data.tushare.org:8910')
def __lazy_login(self):
if self.__login_flag is False:
ret = self.__api.login(self.id, self.token)
if ret[0] is None:
raise Exception('login failed %s - %s' % (self.id, ret))
else:
log.info('%s login success' % self.id)
self.__login_flag = ret
def __init__(self):
self.__login_flag = False
with open('.config.json') as f:
info = json.load(f)
self.id = info.get('id')
self.token = info.get('token')
# 获取市场股市列表
def instrumentinfo(self, _fields="", _filter="inst_type=1&status=1&market=SH,SZ"):
self.__lazy_login()
df, msg = self.__api.query(view="jz.instrumentInfo", fields=_fields,
filter=_filter,
data_format='pandas')
log.debug('request jz.instrumentInfo')
return df
# 停复牌resu_date 未设置
# def secsusp(self, _filter="", _fields=""):
# self.__lazy_login()
# df, msg = self.__api.query(
# view="lb.secSusp",
# fields=_fields,
# filter=_filter,
# data_format='pandas')
# log.debug('request lb.secSusp')
# return CecsuspDataFrame(df)
# 获取限售股解禁列表
# TODO
#def secrestricted(self, _start_date, _end_date, _fields=""):
# self.__lazy_login()
# filters = urlencode({'start_date': TradeCalendar.date2int(_start_date),
# 'end_date': TradeCalendar.date2int(_end_date)})
# df, msg = self.__api.query(
# view="lb.secRestricted",
# fields=_fields,
# filter=filters,
# data_format='pandas')
# log.debug('request lb.secRestricted')
# return df
# 日交易行情
def daily(self, symbol, start_date, end_date, freq='1d', fields= "", adjust_mode='post'):
self.__lazy_login()
df, msg = self.__api.daily(
symbol=symbol,
freq=freq,
start_date=start_date,
end_date=end_date,
fields=fields,
adjust_mode=adjust_mode)
log.debug('request daily %s' % msg)
return df
# 交易日历
def tradecal(self, _fields="trade_date, istradeday"):
self.__lazy_login()
df, msg = self.__api.query(
view="jz.secTradeCal",
fields=_fields)
log.debug('request jz.secTradeCal')
return df
# 订阅
def subscribe(self, _symbol, _func, _fields):
self.__lazy_login()
sublist, msg = self.__api.subscribe(_symbol, func=_func,
fields=_fields)
log.debug('request subscribe')
return sublist
# bar
def bar(self, _symbol, _trade_date, _freq="5M", _fields=""):
self.__laz
|
y_login()
df, msg = self.__api.bar(symbol=_symbol,
trade_date=_trade_date,
freq=_freq,
|
identifier_body
|
|
api.py
|
_storer(key).attrs[meta_info] = kwargs
# else:
# try:
# dic = self.store.get_storer(key).attrs[meta_info]
# return {} if dic is None else dic
# except KeyError:
# return {}
# else:
# return {}
#class BaseDataFrame(object):
# def __init__(self, df):
# self.__df = df
#
# def set_index(self, key, inplace=True):
# if self.__df is None:
# log.error('df is none')
# elif isinstance(self.__df, pd.DataFrame) == False:
# # for debug
# if isinstance(self.__df, int):
# log.error('df is int %d' % self.__df)
# elif isinstance(self.__df, str):
# log.error('df is string %s' % self.__df)
# #raise Exception('df is not DataFrame ' + type(self.__df))
# elif self.__df.empty:
# log.warning('df is empty')
# elif key in self.__df.keys():
# self.__df.set_index(key, inplace=inplace)
#
# def to_json(self, **kwargs):
# return self.__df.to_json(**kwargs)
#
# def __getitem__(self, key):
# return self.__df[key]
#
# def index(self, i=""):
# return self.__df.index if i == "" else self.__df.loc[i]
#
# @property
# def empty(self):
# if self.__df is None:
# return True
# elif isinstance(self.__df, pd.DataFrame) == False:
# return True
# else:
# return self.__df.empty
#
# def df(self):
# return self.__df
#
# @staticmethod
# def format_fields(*fields):
# return ','.join(fields)
#
#
#class InstrumentinfoDataFrame(BaseDataFrame):
# INST_TYPE = 'inst_type' # 证券类别
# MARKET = 'market' # 交易所代码
# SYMBOL = 'symbol' # 证券代码
# NAME = 'name' # 证券名称
# LIST_DATE = 'list_date' # 上市日期
# DELIST_DATE = 'delist_date' # 退市日期
# CNSPELL = 'cnspell' # 拼音简写
# CURRENCY = 'currency' # 交易货币
# STATUS = 'status' # 上市状态,1:上市 3:退市 8:暂停上市
# BUYLOT = 'bylot' # INT 最小买入单位
# SELLLOT = 'selllot' # INT 最大买入单位
# PRICETICK = 'pricetick' # double 最小变动单位
# PRODUCT = 'product' # 合约品种
# UNDERLYING = 'underlying' # 对应标的
# MULTIPLIER = 'multiplier' # int 合约乘数
#
# def __init__(self, df):
# BaseDataFrame.__init__(self, df)
# self.set_index(InstrumentinfoDataFrame.SYMBOL)
#
# @classmethod
# def fields(self):
# return BaseDataFrame.format_fields(
# *[self.STATUS,
# self.LIST_DATE,
# self.NAME,
# self.SYMBOL,
# self.MARKET])
#
#
#class CecsuspDataFrame(BaseDataFrame):
# SYMBOL = 'symbol' # string 证券代码
# ANN_DATE = 'ann_date' # string 停牌公告日期
# SUSP_DATE = 'susp_date' # 停牌开始日期
# SUSP_TIME = 'susp_time' # string 停牌开始时间
# RESU_DATE = 'resu_date' # string 复牌日期
# RESU_TIME = 'resu_time' # string 复牌时间
# SUSP_REASON = 'susp_reason' # string 停牌原因
#
# def __init__(self, df):
# BaseDataFrame.__init__(self, df)
# self.set_index(CecsuspDataFrame.SYMBOL)
#
#
#class SecrestrictedDataFrame(BaseDataFrame):
# SYMBOL = 'symbol' # string 证券代码
# LIST_DATE = 'list_date' # string 本期解禁流通日期
# LIFTED_REASON = 'lifted_reason' # 本期解禁原因(来源)
# LIFTED_SHARES = 'lifted_shares' # string 本期解禁数量
# LIFTED_RATIO = 'lifted_ratio' # string 可流通占A股总数比例
#
# def __init__(self, df):
# BaseDataFrame.__init__(self, df)
# self.set_index(SecrestrictedDataFrame.SYMBOL)
#
#
#class TradecalDataFrame(BaseDataFrame):
# TRADE_DATE = 'trade_date' # int YYYYMMDD格式,如20170823
# ISTRADEDAY = 'istradeday' # string 是否交易日
# ISWEEKDAY = 'isweekday' # string 是否工作日
# ISWEEKDAY = 'isweekend' # string 是否周末
# ISHOLIDAY = 'isholiday' # string string 否节假日
#
# def __init__(self, df):
# BaseDataFrame.__init__(self, df)
# self.set_index(TradecalDataFrame.TRADE_DATE)
#
#
#class DailyDataFrame(BaseDataFrame):
# SYMBOL = 'symbol'
# CODE = 'code' # string 交易所原始代码
# TRADE_DATE = 'trade_date' # int YYYYMMDD格式,如20170823
# OPEN = 'open' # double 开盘价
# HIGH = 'high' # double 最高价
# LOW = 'low' # double 最低价
# CLOSE = 'close' # double 收盘价
# VOLUME = 'volume' # volume double 成交量
# TURNOVER = 'turnover' # turnover double 成交金额
# VWAP = 'vwap' # double 成交均价
# SETTLE = 'settle' # double 结算价
# OI = 'oi' # double 持仓量
# TRADE_STATUS = 'trade_status' # string 交易状态(”停牌”或者”交易”)
# TRADE_SUSPENSION = '停牌'
# TRADE_TRANSACTION = '交易'
#
# def __init__(self, df):
# BaseDataFrame.__init__(self, df)
# self.set_index(DailyDataFrame.TRADE_DATE)
#
# @classmethod
# def fields(self):
# return BaseDataFrame.format_fields(
# *[DailyDataFrame.CLOSE,
# DailyDataFrame.TRADE_DATE,
# DailyDataFrame.OPEN,
# DailyDataFrame.HIGH,
# DailyDataFrame.LOW,
# DailyDataFrame.VOLUME,
# DailyDataFrame.TRADE_STATUS])
#
#
#class BarDataFrame(BaseDataFrame):
# SYMBOL = 'symbol'
# CODE = 'code' # string 交易所原始代码
# DATE = 'date' # int
# TIME = 'time' # int
# TRADE_DATE = 'trade_date' # int YYYYMMDD格式,如20170823
# FREQ = 'freq' # bar 类型
# OPEN = 'open'
# HIGH = 'high'
# LOW = 'low'
# CLOSE = 'close'
# VOLUME = 'volume'
# TURNOVER = 'turnover'
# VWAP = 'vwap'
# OI = 'oi'
# SETTLE = 'settle'
#
# def __init__(self, df):
# BaseDataFrame.__init__(self, df)
# self.set_index(BarDataFrame.TRADE_DATE)
#
def singleton(cls):
instance = {}
def geninstance(*args, **kwargs):
if cls not in instance:
instance[cls] = cls(*args, **kwargs)
return instance[cls]
return geninstance
@singleton
class Api(object):
__api = DataApi(addr='tcp://data.tushare.org:8910')
def __lazy_login(self):
if self.__login_flag is False:
ret = self.__api.login(self.id, self.token)
if ret[0] is None:
raise Exception('login failed %s - %s' % (self.id, ret))
else:
log.info('%s login success' % self.id)
self.__login_flag = ret
def __init__(self):
self.__login_flag = False
with open('.config.json') as f:
info = json.load(f)
self.id = info.get('id')
self.token = info.get('token')
# 获取市场股市列表
|
def instrumentinfo(self, _fields="", _filter="inst_type=1&status=1&market=SH,SZ"):
self.__lazy_login()
df, msg = self.__api.query(view="jz.instrumentInfo", fields=_fields,
filter=_filter,
|
conditional_block
|
|
api.py
|
# 上市状态,1:上市 3:退市 8:暂停上市
# BUYLOT = 'bylot' # INT 最小买入单位
# SELLLOT = 'selllot' # INT 最大买入单位
# PRICETICK = 'pricetick' # double 最小变动单位
# PRODUCT = 'product' # 合约品种
# UNDERLYING = 'underlying' # 对应标的
# MULTIPLIER = 'multiplier' # int 合约乘数
#
# def __init__(self, df):
# BaseDataFrame.__init__(self, df)
# self.set_index(InstrumentinfoDataFrame.SYMBOL)
#
# @classmethod
# def fields(self):
# return BaseDataFrame.format_fields(
# *[self.STATUS,
# self.LIST_DATE,
# self.NAME,
# self.SYMBOL,
# self.MARKET])
#
#
#class CecsuspDataFrame(BaseDataFrame):
# SYMBOL = 'symbol' # string 证券代码
# ANN_DATE = 'ann_date' # string 停牌公告日期
# SUSP_DATE = 'susp_date' # 停牌开始日期
# SUSP_TIME = 'susp_time' # string 停牌开始时间
# RESU_DATE = 'resu_date' # string 复牌日期
# RESU_TIME = 'resu_time' # string 复牌时间
# SUSP_REASON = 'susp_reason' # string 停牌原因
#
# def __init__(self, df):
# BaseDataFrame.__init__(self, df)
# self.set_index(CecsuspDataFrame.SYMBOL)
#
#
#class SecrestrictedDataFrame(BaseDataFrame):
# SYMBOL = 'symbol' # string 证券代码
# LIST_DATE = 'list_date' # string 本期解禁流通日期
# LIFTED_REASON = 'lifted_reason' # 本期解禁原因(来源)
# LIFTED_SHARES = 'lifted_shares' # string 本期解禁数量
# LIFTED_RATIO = 'lifted_ratio' # string 可流通占A股总数比例
#
# def __init__(self, df):
# BaseDataFrame.__init__(self, df)
# self.set_index(SecrestrictedDataFrame.SYMBOL)
#
#
#class TradecalDataFrame(BaseDataFrame):
# TRADE_DATE = 'trade_date' # int YYYYMMDD格式,如20170823
# ISTRADEDAY = 'istradeday' # string 是否交易日
# ISWEEKDAY = 'isweekday' # string 是否工作日
# ISWEEKDAY = 'isweekend' # string 是否周末
# ISHOLIDAY = 'isholiday' # string string 否节假日
#
# def __init__(self, df):
# BaseDataFrame.__init__(self, df)
# self.set_index(TradecalDataFrame.TRADE_DATE)
#
#
#class DailyDataFrame(BaseDataFrame):
# SYMBOL = 'symbol'
# CODE = 'code' # string 交易所原始代码
# TRADE_DATE = 'trade_date' # int YYYYMMDD格式,如20170823
# OPEN = 'open' # double 开盘价
# HIGH = 'high' # double 最高价
# LOW = 'low' # double 最低价
# CLOSE = 'close' # double 收盘价
# VOLUME = 'volume' # volume double 成交量
# TURNOVER = 'turnover' # turnover double 成交金额
# VWAP = 'vwap' # double 成交均价
# SETTLE = 'settle' # double 结算价
# OI = 'oi' # double 持仓量
# TRADE_STATUS = 'trade_status' # string 交易状态(”停牌”或者”交易”)
# TRADE_SUSPENSION = '停牌'
# TRADE_TRANSACTION = '交易'
#
# def __init__(self, df):
# BaseDataFrame.__init__(self, df)
# self.set_index(DailyDataFrame.TRADE_DATE)
#
# @classmethod
# def fields(self):
# return BaseDataFrame.format_fields(
# *[DailyDataFrame.CLOSE,
# DailyDataFrame.TRADE_DATE,
# DailyDataFrame.OPEN,
# DailyDataFrame.HIGH,
# DailyDataFrame.LOW,
# DailyDataFrame.VOLUME,
# DailyDataFrame.TRADE_STATUS])
#
#
#class BarDataFrame(BaseDataFrame):
# SYMBOL = 'symbol'
# CODE = 'code' # string 交易所原始代码
# DATE = 'date' # int
# TIME = 'time' # int
# TRADE_DATE = 'trade_date' # int YYYYMMDD格式,如20170823
# FREQ = 'freq' # bar 类型
# OPEN = 'open'
# HIGH = 'high'
# LOW = 'low'
# CLOSE = 'close'
# VOLUME = 'volume'
# TURNOVER = 'turnover'
# VWAP = 'vwap'
# OI = 'oi'
# SETTLE = 'settle'
#
# def __init__(self, df):
# BaseDataFrame.__init__(self, df)
# self.set_index(BarDataFrame.TRADE_DATE)
#
def singleton(cls):
instance = {}
def geninstance(*args, **kwargs):
if cls not in instance:
instance[cls] = cls(*args, **kwargs)
return instance[cls]
return geninstance
@singleton
class Api(object):
__api = DataApi(addr='tcp://data.tushare.org:8910')
def __lazy_login(self):
if self.__login_flag is False:
ret = self.__api.login(self.id, self.token)
if ret[0] is None:
raise Exception('login failed %s - %s' % (self.id, ret))
else:
log.info('%s login success' % self.id)
self.__login_flag = ret
def __init__(self):
self.__login_flag = False
with open('.config.json') as f:
info = json.load(f)
self.id = info.get('id')
self.token = info.get('token')
# 获取市场股市列表
def instrumentinfo(self, _fields="", _filter="inst_type=1&status=1&market=SH,SZ"):
self.__lazy_login()
df, msg = self.__api.query(view="jz.instrumentInfo", fields=_fields,
filter=_filter,
data_format='pandas')
log.debug('request jz.instrumentInfo')
return df
# 停复牌resu_date 未设置
# def secsusp(self, _filter="", _fields=""):
# self.__lazy_login()
# df, msg = self.__api.query(
# view="lb.secSusp",
# fields=_fields,
# filter=_filter,
# data_format='pandas')
# log.debug('request lb.secSusp')
# return CecsuspDataFrame(df)
# 获取限售股解禁列表
# TODO
#def secrestricted(self, _start_date, _end_date, _fields=""):
# self.__lazy_login()
# filters = urlencode({'start_date': TradeCalendar.date2int(_start_date),
# 'end_date': TradeCalendar.date2int(_end_date)})
# df, msg = self.__api.query(
# view="lb.secRestricted",
# fields=_fields,
# filter=filters,
# data_format='pandas')
# log.debug('request lb.secRestricted')
# return df
# 日交易行情
def daily(self, symbol, start_date, end_date, freq='1d', fields= "", adjust_mode='post'):
self.__lazy_login()
df, msg = self.__api.daily(
symbol=symbol,
freq=freq,
start_date=start_date,
end_date=end_date,
fields=fields,
adjust_mode=adjust_mode)
log.debug('request daily %s' % msg)
return df
# 交易日历
def tradecal(self, _fields="trade_date, istradeday"):
self.__lazy_login()
df, msg = self.__api.query(
view="jz.secTradeCal",
fields=_fields)
log.debug('request jz.secTradeCal')
return df
# 订阅
def subscribe(self, _symbol, _func, _fields):
self.__lazy_login()
sublist, msg = self.__api.subscribe(_symbol, func=_func,
fields=_fields)
log.debug('request subscribe')
return sublist
# bar
def bar(self, _symbol, _trade_date, _freq="5M", _fields=""):
self.__lazy_login()
df, msg = self.__api.bar(symbol=_symbol,
trade_date=_trade_date,
freq=_freq,
fields=_f
|
ields)
|
identifier_name
|
|
ChineseCheckersBoard.py
|
46])
self.squareList[ 57 ].setAdjacent([56,66,67,58,48,47])
self.squareList[ 58 ].setAdjacent([57,67,68,59,49,48])
self.squareList[ 59 ].setAdjacent([58,68,69,60,50,49])
self.squareList[ 60 ].setAdjacent([59,69,70,61,51,50])
self.squareList[ 61 ].setAdjacent([60,70,71,62,52,51])
self.squareList[ 62 ].setAdjacent([61,71,72,63,53,52])
self.squareList[ 63 ].setAdjacent([62,72,73,64,54,53])
self.squareList[ 64 ].setAdjacent([63,73,74,None,55,54])
self.squareList[ 65 ].setAdjacent([None,75,76,66,56,None])
self.squareList[ 66 ].setAdjacent([65,76,77,67,57,56])
self.squareList[ 67 ].setAdjacent([66,77,78,68,58,57])
self.squareList[ 68 ].setAdjacent([67,78,79,69,59,58])
self.squareList[ 69 ].setAdjacent([68,79,80,70,60,61])
self.squareList[ 70 ].setAdjacent([69,80,81,71,61,60])
self.squareList[ 71 ].setAdjacent([70,81,82,72,62,61])
self.squareList[ 72 ].setAdjacent([71,82,83,73,63,62])
self.squareList[ 73 ].setAdjacent([72,83,84,74,64,63])
self.squareList[ 74 ].setAdjacent([73,84,85,None,None,64])
self.squareList[ 75 ].setAdjacent([None,86,87,76,65,None])
self.squareList[ 76 ].setAdjacent([75,87,88,77,66,65])
self.squareList[ 77 ].setAdjacent([76,88,89,78,67,66])
self.squareList[ 78 ].setAdjacent([77,89,90,79,68,67])
self.squareList[ 79 ].setAdjacent([78,90,91,80,69,68])
self.squareList[ 80 ].setAdjacent([79,91,92,81,70,69])
self.squareList[ 81 ].setAdjacent([80,92,93,82,71,70])
self.squareList[ 82 ].setAdjacent([81,93,94,83,72,71])
self.squareList[ 83 ].setAdjacent([82,94,95,84,73,72])
self.squareList[ 84 ].setAdjacent([83,95,96,85,74,73])
self.squareList[ 85 ].setAdjacent([84,96,97,None,None,74])
self.squareList[ 86 ].setAdjacent([None,98,99,87,75,None])
self.squareList[ 87 ].setAdjacent([86,99,100,88,76,75])
self.squareList[ 88 ].setAdjacent([87,100,101,89,77,76])
self.squareList[ 89 ].setAdjacent([88,101,102,90,78,77])
self.squareList[ 90 ].setAdjacent([89,102,103,91,79,78])
self.squareList[ 91 ].setAdjacent([90,103,104,92,80,79])
self.squareList[ 92 ].setAdjacent([91,104,105,93,81,80])
self.squareList[ 93 ].setAdjacent([92,105,106,94,82,81])
self.squareList[ 94 ].setAdjacent([93,106,107,95,83,82])
self.squareList[ 95 ].setAdjacent([94,107,108,96,84,83])
self.squareList[ 96 ].setAdjacent([95,108,109,97,85,84])
self.squareList[ 97 ].setAdjacent([96,109,110,None,None,85])
self.squareList[ 98 ].setAdjacent([None,None,None,99,86,None])
self.squareList[ 99 ].setAdjacent([98,None,None,100,87,86])
self.squareList[ 100 ].setAdjacent([99,None,None,101,88,87])
self.squareList[ 101 ].setAdjacent([100,None,None,102,89,88])
self.squareList[ 102 ].setAdjacent([101,None,111,103,90,89])
self.squareList[ 103 ].setAdjacent([102,111,112,104,91,90])
self.squareList[ 104 ].setAdjacent([103,112,113,105,92,91])
self.squareList[ 105 ].setAdjacent([104,113,114,106,93,92])
self.squareList[ 106 ].setAdjacent([105,114,None,107,94,93])
self.squareList[ 107 ].setAdjacent([106,None,None,108,95,94])
self.squareList[ 108 ].setAdjacent([107,None,None,109,96,95])
self.squareList[ 109 ].setAdjacent([108,None,None,110,97,96])
self.squareList[ 110 ].setAdjacent([109,None,None,None,None,97])
self.squareList[ 111 ].setAdjacent([None,None,115,112,103,102])
self.squareList[ 112 ].setAdjacent([111,115,116,113,104,103])
self.squareList[ 113 ].setAdjacent([112,116,117,114,105,104])
self.squareList[ 114 ].setAdjacent([113,117,None,None,106,105])
self.squareList[ 115 ].setAdjacent([None,None,118,116,112,111])
self.squareList[ 116 ].setAdjacent([115,118,119,117,113,112])
self.squareList[ 117 ].setAdjacent([116,119,None,None,114,113])
self.squareList[ 118 ].setAdjacent([None,None,120,119,116,115])
self.squareList[ 119 ].setAdjacent([118,120,None,None,117,116])
self.squareList[ 120 ].setAdjacent([None,None,None,None,119,118])
def delete(self):
for x in self.squareList:
x.delete()
del self.squareList
def getSquare(self, arrayLoc):
return self.squareList[arrayLoc]
def getSquareOffset(self, arrayLoc):
return self.squareList[arrayLoc-1]
def getState(self, squareNum):
return self.squareList[squareNum].getState()
def getStateOffset(self,arrayLoc):
return self.squareList[squareNum-1].getState()
def
|
setState
|
identifier_name
|
|
ChineseCheckersBoard.py
|
3,62])
self.squareList[ 73 ].setAdjacent([72,83,84,74,64,63])
self.squareList[ 74 ].setAdjacent([73,84,85,None,None,64])
self.squareList[ 75 ].setAdjacent([None,86,87,76,65,None])
self.squareList[ 76 ].setAdjacent([75,87,88,77,66,65])
self.squareList[ 77 ].setAdjacent([76,88,89,78,67,66])
self.squareList[ 78 ].setAdjacent([77,89,90,79,68,67])
self.squareList[ 79 ].setAdjacent([78,90,91,80,69,68])
self.squareList[ 80 ].setAdjacent([79,91,92,81,70,69])
self.squareList[ 81 ].setAdjacent([80,92,93,82,71,70])
self.squareList[ 82 ].setAdjacent([81,93,94,83,72,71])
self.squareList[ 83 ].setAdjacent([82,94,95,84,73,72])
self.squareList[ 84 ].setAdjacent([83,95,96,85,74,73])
self.squareList[ 85 ].setAdjacent([84,96,97,None,None,74])
self.squareList[ 86 ].setAdjacent([None,98,99,87,75,None])
self.squareList[ 87 ].setAdjacent([86,99,100,88,76,75])
self.squareList[ 88 ].setAdjacent([87,100,101,89,77,76])
self.squareList[ 89 ].setAdjacent([88,101,102,90,78,77])
self.squareList[ 90 ].setAdjacent([89,102,103,91,79,78])
self.squareList[ 91 ].setAdjacent([90,103,104,92,80,79])
self.squareList[ 92 ].setAdjacent([91,104,105,93,81,80])
self.squareList[ 93 ].setAdjacent([92,105,106,94,82,81])
self.squareList[ 94 ].setAdjacent([93,106,107,95,83,82])
self.squareList[ 95 ].setAdjacent([94,107,108,96,84,83])
self.squareList[ 96 ].setAdjacent([95,108,109,97,85,84])
self.squareList[ 97 ].setAdjacent([96,109,110,None,None,85])
self.squareList[ 98 ].setAdjacent([None,None,None,99,86,None])
self.squareList[ 99 ].setAdjacent([98,None,None,100,87,86])
self.squareList[ 100 ].setAdjacent([99,None,None,101,88,87])
self.squareList[ 101 ].setAdjacent([100,None,None,102,89,88])
self.squareList[ 102 ].setAdjacent([101,None,111,103,90,89])
self.squareList[ 103 ].setAdjacent([102,111,112,104,91,90])
self.squareList[ 104 ].setAdjacent([103,112,113,105,92,91])
self.squareList[ 105 ].setAdjacent([104,113,114,106,93,92])
self.squareList[ 106 ].setAdjacent([105,114,None,107,94,93])
self.squareList[ 107 ].setAdjacent([106,None,None,108,95,94])
self.squareList[ 108 ].setAdjacent([107,None,None,109,96,95])
self.squareList[ 109 ].setAdjacent([108,None,None,110,97,96])
self.squareList[ 110 ].setAdjacent([109,None,None,None,None,97])
self.squareList[ 111 ].setAdjacent([None,None,115,112,103,102])
self.squareList[ 112 ].setAdjacent([111,115,116,113,104,103])
self.squareList[ 113 ].setAdjacent([112,116,117,114,105,104])
self.squareList[ 114 ].setAdjacent([113,117,None,None,106,105])
self.squareList[ 115 ].setAdjacent([None,None,118,116,112,111])
self.squareList[ 116 ].setAdjacent([115,118,119,117,113,112])
self.squareList[ 117 ].setAdjacent([116,119,None,None,114,113])
self.squareList[ 118 ].setAdjacent([None,None,120,119,116,115])
self.squareList[ 119 ].setAdjacent([118,120,None,None,117,116])
self.squareList[ 120 ].setAdjacent([None,None,None,None,119,118])
def delete(self):
for x in self.squareList:
x.delete()
del self.squareList
def getSquare(self, arrayLoc):
return self.squareList[arrayLoc]
def getSquareOffset(self, arrayLoc):
return self.squareList[arrayLoc-1]
def getState(self, squareNum):
return self.squareList[squareNum].getState()
def getStateOffset(self,arrayLoc):
return self.squareList[squareNum-1].getState()
def setState(self, squareNum, newState):
self.squareList[squareNum].setState(newState)
def setStateOffset(self, squareNum, newState):
self.squareList[squareNum-1].setState(newState)
def getAdjacent(self, squareNum):
return self.squareList[squareNum].adjacent
def getAdjacentOffset(self, squareNum):
return self.squareList[squareNum-1].adjacent
def getStates(self):
retList = []
for x in range(121):
retList.append(self.squareList[x].getState())
return retList
def setStates(self, squares):
y = 0
for x in range(121):
self.squareList[x].setState(squares[x])
#---------------------------------------------------------------#
#CheckersSquare: This is the base object for
#a square inside of a chinese checkers game. By 'square', I mean
#a movable location to which a peice can be placed or moved to.
#
#A Square has possible 7 states
# A state of 0 => Unnocupied
# A state of 1-6 => Owned By that corresponding player
#
#A Square also has a corresponding Adjacency list. Meaning:
#from a given square there are up to 6 adjacent squares to it
#that must be represented in a directed manner. This List for the
#square will be represented in this fashion: X is the sqaure on the board
#
# 1 2
# 0 X 3
# 5 4
#
# In a clockwise fashion. If an element of the squares adjacency list
#is None, then there does not exist a square in that given direction from
#that square.
#----------------------------------------------------------------#
class CheckersSquare:
def __init__(self, tileNu):
self.tileNum = tileNu
self.state = 0; #0 for Begins as unnocupied square
self.adjacent = []
def delete(self):
del self.tileNum
del self.state
del self.adjacent
def setAdjacent(self, adjList):
for x in adjList:
|
self.adjacent.append(x)
def getAdjacent(self):
|
random_line_split
|
|
ChineseCheckersBoard.py
|
62 ].setAdjacent([61,71,72,63,53,52])
self.squareList[ 63 ].setAdjacent([62,72,73,64,54,53])
self.squareList[ 64 ].setAdjacent([63,73,74,None,55,54])
self.squareList[ 65 ].setAdjacent([None,75,76,66,56,None])
self.squareList[ 66 ].setAdjacent([65,76,77,67,57,56])
self.squareList[ 67 ].setAdjacent([66,77,78,68,58,57])
self.squareList[ 68 ].setAdjacent([67,78,79,69,59,58])
self.squareList[ 69 ].setAdjacent([68,79,80,70,60,61])
self.squareList[ 70 ].setAdjacent([69,80,81,71,61,60])
self.squareList[ 71 ].setAdjacent([70,81,82,72,62,61])
self.squareList[ 72 ].setAdjacent([71,82,83,73,63,62])
self.squareList[ 73 ].setAdjacent([72,83,84,74,64,63])
self.squareList[ 74 ].setAdjacent([73,84,85,None,None,64])
self.squareList[ 75 ].setAdjacent([None,86,87,76,65,None])
self.squareList[ 76 ].setAdjacent([75,87,88,77,66,65])
self.squareList[ 77 ].setAdjacent([76,88,89,78,67,66])
self.squareList[ 78 ].setAdjacent([77,89,90,79,68,67])
self.squareList[ 79 ].setAdjacent([78,90,91,80,69,68])
self.squareList[ 80 ].setAdjacent([79,91,92,81,70,69])
self.squareList[ 81 ].setAdjacent([80,92,93,82,71,70])
self.squareList[ 82 ].setAdjacent([81,93,94,83,72,71])
self.squareList[ 83 ].setAdjacent([82,94,95,84,73,72])
self.squareList[ 84 ].setAdjacent([83,95,96,85,74,73])
self.squareList[ 85 ].setAdjacent([84,96,97,None,None,74])
self.squareList[ 86 ].setAdjacent([None,98,99,87,75,None])
self.squareList[ 87 ].setAdjacent([86,99,100,88,76,75])
self.squareList[ 88 ].setAdjacent([87,100,101,89,77,76])
self.squareList[ 89 ].setAdjacent([88,101,102,90,78,77])
self.squareList[ 90 ].setAdjacent([89,102,103,91,79,78])
self.squareList[ 91 ].setAdjacent([90,103,104,92,80,79])
self.squareList[ 92 ].setAdjacent([91,104,105,93,81,80])
self.squareList[ 93 ].setAdjacent([92,105,106,94,82,81])
self.squareList[ 94 ].setAdjacent([93,106,107,95,83,82])
self.squareList[ 95 ].setAdjacent([94,107,108,96,84,83])
self.squareList[ 96 ].setAdjacent([95,108,109,97,85,84])
self.squareList[ 97 ].setAdjacent([96,109,110,None,None,85])
self.squareList[ 98 ].setAdjacent([None,None,None,99,86,None])
self.squareList[ 99 ].setAdjacent([98,None,None,100,87,86])
self.squareList[ 100 ].setAdjacent([99,None,None,101,88,87])
self.squareList[ 101 ].setAdjacent([100,None,None,102,89,88])
self.squareList[ 102 ].setAdjacent([101,None,111,103,90,89])
self.squareList[ 103 ].setAdjacent([102,111,112,104,91,90])
self.squareList[ 104 ].setAdjacent([103,112,113,105,92,91])
self.squareList[ 105 ].setAdjacent([104,113,114,106,93,92])
self.squareList[ 106 ].setAdjacent([105,114,None,107,94,93])
self.squareList[ 107 ].setAdjacent([106,None,None,108,95,94])
self.squareList[ 108 ].setAdjacent([107,None,None,109,96,95])
self.squareList[ 109 ].setAdjacent([108,None,None,110,97,96])
self.squareList[ 110 ].setAdjacent([109,None,None,None,None,97])
self.squareList[ 111 ].setAdjacent([None,None,115,112,103,102])
self.squareList[ 112 ].setAdjacent([111,115,116,113,104,103])
self.squareList[ 113 ].setAdjacent([112,116,117,114,105,104])
self.squareList[ 114 ].setAdjacent([113,117,None,None,106,105])
self.squareList[ 115 ].setAdjacent([None,None,118,116,112,111])
self.squareList[ 116 ].setAdjacent([115,118,119,117,113,112])
self.squareList[ 117 ].setAdjacent([116,119,None,None,114,113])
self.squareList[ 118 ].setAdjacent([None,None,120,119,116,115])
self.squareList[ 119 ].setAdjacent([118,120,None,None,117,116])
self.squareList[ 120 ].setAdjacent([None,None,None,None,119,118])
def delete(self):
for x in self.squareList:
x.delete()
del self.squareList
def getSquare(self, arrayLoc):
return self.squareList[arrayLoc]
def getSquareOffset(self, arrayLoc):
return self.squareList[arrayLoc-1]
def getState(self, squareNum):
return self.squareList[squareNum].getState()
def getStateOffset(self,arrayLoc):
return self.squareList[squareNum-1].getState()
def setState(self, squareNum, newState):
self.squareList[squareNum].setState(newState)
def setStateOffset(self, squareNum, newState):
self.squareList[squareNum-1].setState(newState)
def getAdjacent(self, squareNum):
return self.squareList[squareNum].adjacent
def getAdjacentOffset(self, squareNum):
return self.squareList[squareNum-1].adjacent
def getStates(self):
retList = []
for x in range(121):
retList.append(self.squareList[x].getState())
return retList
def setStates(self, squares):
y = 0
for x in range(121):
|
self.squareList[x].setState(squares[x])
|
conditional_block
|
|
ChineseCheckersBoard.py
|
].setAdjacent([57,67,68,59,49,48])
self.squareList[ 59 ].setAdjacent([58,68,69,60,50,49])
self.squareList[ 60 ].setAdjacent([59,69,70,61,51,50])
self.squareList[ 61 ].setAdjacent([60,70,71,62,52,51])
self.squareList[ 62 ].setAdjacent([61,71,72,63,53,52])
self.squareList[ 63 ].setAdjacent([62,72,73,64,54,53])
self.squareList[ 64 ].setAdjacent([63,73,74,None,55,54])
self.squareList[ 65 ].setAdjacent([None,75,76,66,56,None])
self.squareList[ 66 ].setAdjacent([65,76,77,67,57,56])
self.squareList[ 67 ].setAdjacent([66,77,78,68,58,57])
self.squareList[ 68 ].setAdjacent([67,78,79,69,59,58])
self.squareList[ 69 ].setAdjacent([68,79,80,70,60,61])
self.squareList[ 70 ].setAdjacent([69,80,81,71,61,60])
self.squareList[ 71 ].setAdjacent([70,81,82,72,62,61])
self.squareList[ 72 ].setAdjacent([71,82,83,73,63,62])
self.squareList[ 73 ].setAdjacent([72,83,84,74,64,63])
self.squareList[ 74 ].setAdjacent([73,84,85,None,None,64])
self.squareList[ 75 ].setAdjacent([None,86,87,76,65,None])
self.squareList[ 76 ].setAdjacent([75,87,88,77,66,65])
self.squareList[ 77 ].setAdjacent([76,88,89,78,67,66])
self.squareList[ 78 ].setAdjacent([77,89,90,79,68,67])
self.squareList[ 79 ].setAdjacent([78,90,91,80,69,68])
self.squareList[ 80 ].setAdjacent([79,91,92,81,70,69])
self.squareList[ 81 ].setAdjacent([80,92,93,82,71,70])
self.squareList[ 82 ].setAdjacent([81,93,94,83,72,71])
self.squareList[ 83 ].setAdjacent([82,94,95,84,73,72])
self.squareList[ 84 ].setAdjacent([83,95,96,85,74,73])
self.squareList[ 85 ].setAdjacent([84,96,97,None,None,74])
self.squareList[ 86 ].setAdjacent([None,98,99,87,75,None])
self.squareList[ 87 ].setAdjacent([86,99,100,88,76,75])
self.squareList[ 88 ].setAdjacent([87,100,101,89,77,76])
self.squareList[ 89 ].setAdjacent([88,101,102,90,78,77])
self.squareList[ 90 ].setAdjacent([89,102,103,91,79,78])
self.squareList[ 91 ].setAdjacent([90,103,104,92,80,79])
self.squareList[ 92 ].setAdjacent([91,104,105,93,81,80])
self.squareList[ 93 ].setAdjacent([92,105,106,94,82,81])
self.squareList[ 94 ].setAdjacent([93,106,107,95,83,82])
self.squareList[ 95 ].setAdjacent([94,107,108,96,84,83])
self.squareList[ 96 ].setAdjacent([95,108,109,97,85,84])
self.squareList[ 97 ].setAdjacent([96,109,110,None,None,85])
self.squareList[ 98 ].setAdjacent([None,None,None,99,86,None])
self.squareList[ 99 ].setAdjacent([98,None,None,100,87,86])
self.squareList[ 100 ].setAdjacent([99,None,None,101,88,87])
self.squareList[ 101 ].setAdjacent([100,None,None,102,89,88])
self.squareList[ 102 ].setAdjacent([101,None,111,103,90,89])
self.squareList[ 103 ].setAdjacent([102,111,112,104,91,90])
self.squareList[ 104 ].setAdjacent([103,112,113,105,92,91])
self.squareList[ 105 ].setAdjacent([104,113,114,106,93,92])
self.squareList[ 106 ].setAdjacent([105,114,None,107,94,93])
self.squareList[ 107 ].setAdjacent([106,None,None,108,95,94])
self.squareList[ 108 ].setAdjacent([107,None,None,109,96,95])
self.squareList[ 109 ].setAdjacent([108,None,None,110,97,96])
self.squareList[ 110 ].setAdjacent([109,None,None,None,None,97])
self.squareList[ 111 ].setAdjacent([None,None,115,112,103,102])
self.squareList[ 112 ].setAdjacent([111,115,116,113,104,103])
self.squareList[ 113 ].setAdjacent([112,116,117,114,105,104])
self.squareList[ 114 ].setAdjacent([113,117,None,None,106,105])
self.squareList[ 115 ].setAdjacent([None,None,118,116,112,111])
self.squareList[ 116 ].setAdjacent([115,118,119,117,113,112])
self.squareList[ 117 ].setAdjacent([116,119,None,None,114,113])
self.squareList[ 118 ].setAdjacent([None,None,120,119,116,115])
self.squareList[ 119 ].setAdjacent([118,120,None,None,117,116])
self.squareList[ 120 ].setAdjacent([None,None,None,None,119,118])
def delete(self):
for x in self.squareList:
x.delete()
del self.squareList
def getSquare(self, arrayLoc):
return self.squareList[arrayLoc]
def getSquareOffset(self, arrayLoc):
return self.squareList[arrayLoc-1]
def getState(self, squareNum):
return self.squareList[squareNum].getState()
def getStateOffset(self,arrayLoc):
return self.squareList[squareNum-1].getState()
def setState(self, squareNum, newState):
self.squareList[squareNum].setState(newState)
def setStateOffset(self, squareNum, newState):
|
self.squareList[squareNum-1].setState(newState)
|
identifier_body
|
|
routes.go
|
aggerfiles "github.com/swaggo/files"
ginSwagger "github.com/swaggo/gin-swagger"
_ "github.com/Tencent/bk-bcs/bcs-services/bcs-monitor/docs" // docs xxx
"github.com/Tencent/bk-bcs/bcs-services/bcs-monitor/pkg/api/logrule"
"github.com/Tencent/bk-bcs/bcs-services/bcs-monitor/pkg/api/metrics"
"github.com/Tencent/bk-bcs/bcs-services/bcs-monitor/pkg/api/pod"
podmonitor "github.com/Tencent/bk-bcs/bcs-services/bcs-monitor/pkg/api/pod_monitor"
service_monitor "github.com/Tencent/bk-bcs/bcs-services/bcs-monitor/pkg/api/servicemonitor"
"github.com/Tencent/bk-bcs/bcs-services/bcs-monitor/pkg/api/telemetry"
"github.com/Tencent/bk-bcs/bcs-services/bcs-monitor/pkg/config"
"github.com/Tencent/bk-bcs/bcs-services/bcs-monitor/pkg/rest"
"github.com/Tencent/bk-bcs/bcs-services/bcs-monitor/pkg/rest/middleware"
"github.com/Tencent/bk-bcs/bcs-services/bcs-monitor/pkg/rest/tracing"
"github.com/Tencent/bk-bcs/bcs-services/bcs-monitor/pkg/storegw"
"github.com/Tencent/bk-bcs/bcs-services/bcs-monitor/pkg/utils"
)
// APIServer :
type APIServer struct {
ctx context.Context
engine *gin.Engine
srv *http.Server
addr string
port string
addrIPv6 string
}
// NewAPIServer :
func NewAPIServer(ctx context.Context, addr, port, addrIPv6 string) (*APIServer, error) {
gin.SetMode(gin.ReleaseMode)
engine := gin.Default()
srv := &http.Server{Addr: addr, Handler: engine}
s := &APIServer{
ctx: ctx,
engine: engine,
srv: srv,
addr: addr,
port: port,
addrIPv6: addrIPv6,
}
s.newRoutes(engine)
return s, nil
}
// Run :
func (a *APIServer) Run() error {
dualStackListener := listener.NewDualStackListener()
addr := utils.GetListenAddr(a.addr, a.port)
if err := dualStackListener.AddListenerWithAddr(utils.GetListenAddr(a.addr, a.port)); err != nil {
return err
}
logger.Infow("listening for requests and metrics", "address", addr)
if a.addrIPv6 != "" && a.addrIPv6 != a.addr {
v6Addr := utils.GetListenAddr(a.addrIPv6, a.port)
if err := dualStackListener.AddListenerWithAddr(v6Addr); err != nil {
return err
}
logger.Infof("api serve dualStackListener with ipv6: %s", v6Addr)
}
return a.srv.Serve(dualStackListener)
}
// Close :
func (a *APIServer) Close() error {
return a.srv.Shutdown(a.ctx)
}
// newRoutes xxx
// @Title BCS-Monitor OpenAPI
// @BasePath /bcsapi/v4/monitor/api/projects/:projectId/clusters/:clusterId
func (a *APIServer) newRoutes(engine *gin.Engine) {
// 添加 X-Request-Id 头部
requestIdMiddleware := requestid.New(
requestid.WithGenerator(func() string {
return tracing.RequestIdGenerator()
}),
)
engine.Use(requestIdMiddleware, cors.Default())
// openapi 文档
// 访问 swagger/index.html, swagger/doc.json
engine.GET("/swagger/*any", ginSwagger.WrapHandler(swaggerfiles.Handler))
engine.GET("/-/healthy", HealthyHandler)
engine.GET("/-/ready", ReadyHandler)
// 注册 HTTP 请求
registerRoutes(engine.Group(""))
registerMetricsRoutes(engine.Group(""))
if config.G.Web.RoutePrefix != "" {
registerRoutes(engine.Group(config.G.Web.RoutePrefix))
registerMetricsRoutes(engine.Group(config.G.Web.RoutePrefix))
}
registerRoutes(engine.Group(path.Join(config.G.Web.RoutePrefix, config.APIServicePrefix)))
registerMetricsRoutes(engine.Group(path.Join(config.G.Web.RoutePrefix, config.APIServicePrefix)))
}
func registerRoutes(engine *gin.RouterGroup) {
// 日志相关接口
engine.Use(middleware.AuthenticationRequired(), middleware.ProjectParse(), middleware.NsScopeAuthorization())
engine.Use(ginTracing.Middleware("bcs-monitor-api"))
route := engine.Group("/projects/:projectId/clusters/:clusterId")
{
route.GET("/namespaces/:namespace/pods/:pod/containers", rest.RestHandlerFunc(pod.GetPodContainers))
route.GET("/namespaces/:namespace/pods/:pod/logs", rest.RestHandlerFunc(pod.GetPodLog))
route.GET("/namespaces/:namespace/pods/:pod/logs/download", rest.StreamHandler(pod.DownloadPodLog))
// sse 实时日志流
route.GET("/namespaces/:namespace/pods/:pod/logs/stream", rest.StreamHandler(pod.PodLogStream))
// 蓝鲸监控采集器
route.GET("/telemetry/bkmonitor_agent/", rest.STDRestHandlerFunc(telemetry.IsBKMonitorAgent))
// bk-log 日志采集规则
route.POST("/log_collector/entrypoints", rest.RestHandlerFunc(logrule.GetEntrypoints))
route.GET("/log_collector/rules", rest.RestHandlerFunc(logrule.ListLogCollectors))
route.POST("/log_collector/rules", rest.RestHandlerFunc(logrule.CreateLogRule))
route.GET("/log_collector/rules/:id", rest.RestHandlerFunc(logrule.GetLogRule))
route.PUT("/log_collector/rules/:id", rest.RestHandlerFunc(logrule.UpdateLogRule))
route.DELETE("/log_collector/rules/:id", rest.RestHandlerFunc(logrule.DeleteLogRule))
route.POST("/log_collector/rules/:id/retry", rest.RestHandlerFunc(logrule.RetryLogRule))
route.POST("/log_collector/rules/:id/enable", rest.RestHandlerFunc(logrule.EnableLogRule))
route.POST("/log_collector/rules/:id/disable", rest.RestHandlerFunc(logrule.DisableLogRule))
}
}
// registerMetricsRoutes metrics 相关接口
func registerMetricsRoutes(engine *gin.RouterGroup) {
engine.Use(middleware.AuthenticationRequired(), middleware.ProjectParse(), middleware.ProjectAuthorization())
engine.Use(ginTracing.Middleware("bcs-monitor-api"))
// 命名规范
// usage 代表 百分比
// used 代表已使用
// overview, info 数值量
route := engine.Group("/metrics/projects/:projectCode/clusters/:clusterId")
{
route.GET("/overview", rest.RestHandlerFunc(metrics.GetClusterOverview))
route.GET("/cpu_usage", rest.RestHandlerFunc(metrics.ClusterCPUUsage))
route.GET("/cpu_request_usage", rest.RestHandlerFunc(metrics.ClusterCPURequestUsage))
route.GET("/memory_usage", rest.RestHandlerFunc(metrics.ClusterMemoryUsage))
route.GET("/memory_request_usage", rest.RestHandlerFunc(metrics.ClusterMemoryRequestUsage))
|
route.GET("/nodes/:node/cpu_usage", rest.RestHandlerFunc(metrics.GetNodeCPUUsage))
route.GET("/nodes/:node/cpu_request_usage", rest.RestHandlerFunc(metrics.GetNodeCPURequestUsage))
route.GET("/nodes/:node/memory_usage", rest.RestHandlerFunc(metrics.GetNodeMemoryUsage))
route.GET("/nodes/:node/memory_request_usage", rest.RestHandlerFunc(metrics.GetNodeMemoryRequestUsage))
route.GET("/nodes/:node/network_receive", rest.RestHandlerFunc(metrics.GetNodeNetworkReceiveUsage))
route.GET("/nodes/:node/network_transmit", rest.RestHandlerFunc(metrics.GetNodeNetworkTransmitUsage))
route.GET("/nodes/:node/disk_usage", rest.RestHandlerFunc(metrics.GetNodeDiskUsage))
route.GET("/nodes/:node/diskio_usage", rest.RestHandlerFunc(metrics.GetNodeDiskioUsage))
route.POST("/namespaces/:namespace/pods/cpu_usage", rest.RestHandlerFunc(
metrics.PodCPUUsage)) // 多个Pod场景, 可能有几十,上百Pod场景, 需要使用 Post 传递参数
route.POST("/namespaces/:namespace/pods/memory_used", rest.RestHandlerFunc(metrics.PodMemoryUsed))
route.POST("/namespaces/:namespace/pods/network_receive", rest.RestHandlerFunc(metrics.PodNetworkReceive))
route.POST("/namespaces/:namespace/pods/network_transmit", rest.RestHandlerFunc(metrics.PodNetworkTransmit))
route.GET("/namespaces/:namespace/pods/:pod/containers/:container/cpu_usage",
rest.RestHandlerFunc(metrics.ContainerCPUUsage))
route.GET("/namespaces/:namespace/pods/:pod/containers/:container/memory_used",
rest.RestHandlerFunc(metrics.ContainerMemoryUsed))
route.GET("/namespaces/:namespace/pods/:pod/containers/:container/cpu_limit",
rest.RestHandlerFunc(metrics.ContainerCPULimit))
route.GET("/namespaces/:namespace/pods/:pod/containers/:container/memory_limit",
rest.RestHandlerFunc(metrics.ContainerMemoryLimit))
route.GET("/namespaces
|
route.GET("/disk_usage", rest.RestHandlerFunc(metrics.ClusterDiskUsage))
route.GET("/diskio_usage", rest.RestHandlerFunc(metrics.ClusterDiskioUsage))
route.GET("/pod_usage", rest.RestHandlerFunc(metrics.ClusterPodUsage))
route.GET("/nodes/:node/info", rest.RestHandlerFunc(metrics.GetNodeInfo))
route.GET("/nodes/:node/overview", rest.RestHandlerFunc(metrics.GetNodeOverview))
|
random_line_split
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.