file_name
large_stringlengths 4
140
| prefix
large_stringlengths 0
12.1k
| suffix
large_stringlengths 0
12k
| middle
large_stringlengths 0
7.51k
| fim_type
large_stringclasses 4
values |
---|---|---|---|---|
tls.go
|
traffic comes in, the gateway on which the rule is being
// bound, etc. All these can be checked statically, since we are generating the configuration for a proxy
// with predefined labels, on a specific port.
func matchTCP(match *v1alpha3.L4MatchAttributes, proxyLabels labels.Collection, gateways map[string]bool, port int, proxyNamespace string) bool {
if match == nil {
return true
}
gatewayMatch := len(match.Gateways) == 0
for _, gateway := range match.Gateways {
gatewayMatch = gatewayMatch || gateways[gateway]
}
labelMatch := proxyLabels.IsSupersetOf(match.SourceLabels)
portMatch := match.Port == 0 || match.Port == uint32(port)
nsMatch := match.SourceNamespace == "" || match.SourceNamespace == proxyNamespace
return gatewayMatch && labelMatch && portMatch && nsMatch
}
// Select the config pertaining to the service being processed.
func getConfigsForHost(hostname host.Name, configs []model.Config) []model.Config {
svcConfigs := make([]model.Config, 0)
for index := range configs {
virtualService := configs[index].Spec.(*v1alpha3.VirtualService)
for _, vsHost := range virtualService.Hosts {
if host.Name(vsHost).Matches(hostname) {
svcConfigs = append(svcConfigs, configs[index])
break
}
}
}
return svcConfigs
}
// hashRuntimeTLSMatchPredicates hashes runtime predicates of a TLS match
func hashRuntimeTLSMatchPredicates(match *v1alpha3.TLSMatchAttributes) string {
return strings.Join(match.SniHosts, ",") + "|" + strings.Join(match.DestinationSubnets, ",")
}
func buildSidecarOutboundTLSFilterChainOpts(node *model.Proxy, push *model.PushContext, destinationCIDR string,
service *model.Service, listenPort *model.Port,
gateways map[string]bool, configs []model.Config) []*filterChainOpts {
if !listenPort.Protocol.IsTLS() {
return nil
}
actualWildcard, _ := getActualWildcardAndLocalHost(node)
// TLS matches are composed of runtime and static predicates.
// Static predicates can be evaluated during the generation of the config. Examples: gateway, source labels, etc.
// Runtime predicates cannot be evaluated during config generation. Instead the proxy must be configured to
// evaluate them. Examples: SNI hosts, source/destination subnets, etc.
//
// A list of matches may contain duplicate runtime matches, but different static matches. For example:
//
// {sni_hosts: A, sourceLabels: X} => destination M
// {sni_hosts: A, sourceLabels: *} => destination N
//
// For a proxy with labels X, we can evaluate the static predicates to get:
// {sni_hosts: A} => destination M
// {sni_hosts: A} => destination N
//
// The matches have the same runtime predicates. Since the second match can never be reached, we only
// want to generate config for the first match.
//
// To achieve this in this function we keep track of which runtime matches we have already generated config for
// and only add config if the we have not already generated config for that set of runtime predicates.
matchHasBeenHandled := make(map[string]bool) // Runtime predicate set -> have we generated config for this set?
// Is there a virtual service with a TLS block that matches us?
hasTLSMatch := false
out := make([]*filterChainOpts, 0)
for _, cfg := range configs {
virtualService := cfg.Spec.(*v1alpha3.VirtualService)
for _, tls := range virtualService.Tls
|
networkFilters: buildOutboundNetworkFilters(node, tls.Route, push, listenPort, cfg.ConfigMeta),
})
hasTLSMatch = true
}
matchHasBeenHandled[matchHash] = true
}
}
}
}
// HTTPS or TLS ports without associated virtual service
if !hasTLSMatch {
var sniHosts []string
// In case of a sidecar config with user defined port, if the user specified port is not the same as the
// service's port, then pick the service port if and only if the service has only one port. If service
// has multiple ports, then route to a cluster with the listener port (i.e. sidecar defined port) - the
// traffic will most likely blackhole.
port := listenPort.Port
if len(service.Ports) == 1 {
port = service.Ports[0].Port
}
clusterName := model.BuildSubsetKey(model.TrafficDirectionOutbound, "", service.Hostname, port)
statPrefix := clusterName
// If stat name is configured, use it to build the stat prefix.
if len(push.Mesh.OutboundClusterStatName) != 0 {
statPrefix = util.BuildStatPrefix(push.Mesh.OutboundClusterStatName, string(service.Hostname), "", &model.Port{Port: port}, service.Attributes)
}
// Use the hostname as the SNI value if and only if we do not have a destination VIP or if the destination is a CIDR.
// In both cases, the listener will be bound to 0.0.0.0. So SNI match is the only way to distinguish different
// target services. If we have a VIP, then we know the destination. There is no need to do a SNI match. It saves us from
// having to generate expensive permutations of the host name just like RDS does..
// NOTE that we cannot have two services with the same VIP as our listener build logic will treat it as a collision and
// ignore one of the services.
svcListenAddress := service.GetServiceAddressForProxy(node)
if strings.Contains(svcListenAddress, "/") {
// Address is a CIDR, already captured by destinationCIDR parameter.
svcListenAddress = ""
}
if len(destinationCIDR) > 0 || len(svcListenAddress) == 0 || svcListenAddress == actualWildcard {
sniHosts = []string{string(service.Hostname)}
}
out = append(out, &filterChainOpts{
sniHosts: sniHosts,
destinationCIDRs: []string{destinationCIDR},
networkFilters: buildOutboundNetworkFiltersWithSingleDestination(push, node, statPrefix, clusterName, listenPort),
})
}
return out
}
func buildSidecarOutboundTCPFilterChainOpts(node *model.Proxy, push *model.PushContext, destinationCIDR string,
service *model.Service, listenPort *model.Port,
gateways map[string]bool, configs []model.Config) []*filterChainOpts {
if listenPort.Protocol.IsTLS() {
return nil
}
out := make([]*filterChainOpts, 0)
// very basic TCP
// break as soon as we add one network filter with no destination addresses to match
// This is the terminating condition in the filter chain match list
defaultRouteAdded := false
TcpLoop:
for _, cfg := range configs {
virtualService := cfg.Spec.(*v1alpha3.VirtualService)
for _, tcp := range virtualService.Tcp {
destinationCIDRs := []string{destinationCIDR}
if len(tcp.Match) == 0 {
// implicit match
out = append(out, &filterChainOpts{
metadata: util.BuildConfigInfoMetadataV2(cfg.ConfigMeta),
destinationCIDRs: destinationCIDRs,
networkFilters: buildOutboundNetworkFilters(node, tcp.Route, push, listenPort, cfg.ConfigMeta),
})
defaultRouteAdded = true
break TcpLoop
}
// Use the service's virtual address first.
// But if a virtual service overrides it with its own destination subnet match
// give preference to the user provided one
virtualServiceDestinationSubnets := make([]string, 0)
for _, match := range tcp.Match {
if matchTCP(match, labels.Collection{node.Metadata.Labels}, gateways, listenPort.Port, node.Metadata.Namespace) {
// Scan all the match blocks
|
{
for _, match := range tls.Match {
if matchTLS(match, labels.Collection{node.Metadata.Labels}, gateways, listenPort.Port, node.Metadata.Namespace) {
// Use the service's CIDRs.
// But if a virtual service overrides it with its own destination subnet match
// give preference to the user provided one
// destinationCIDR will be empty for services with VIPs
destinationCIDRs := []string{destinationCIDR}
// Only set CIDR match if the listener is bound to an IP.
// If its bound to a unix domain socket, then ignore the CIDR matches
// Unix domain socket bound ports have Port value set to 0
if len(match.DestinationSubnets) > 0 && listenPort.Port > 0 {
destinationCIDRs = match.DestinationSubnets
}
matchHash := hashRuntimeTLSMatchPredicates(match)
if !matchHasBeenHandled[matchHash] {
out = append(out, &filterChainOpts{
metadata: util.BuildConfigInfoMetadataV2(cfg.ConfigMeta),
sniHosts: match.SniHosts,
destinationCIDRs: destinationCIDRs,
|
conditional_block
|
tls.go
|
(match *v1alpha3.TLSMatchAttributes, proxyLabels labels.Collection, gateways map[string]bool, port int, proxyNamespace string) bool {
if match == nil {
return true
}
gatewayMatch := len(match.Gateways) == 0
for _, gateway := range match.Gateways {
gatewayMatch = gatewayMatch || gateways[gateway]
}
labelMatch := proxyLabels.IsSupersetOf(match.SourceLabels)
portMatch := match.Port == 0 || match.Port == uint32(port)
nsMatch := match.SourceNamespace == "" || match.SourceNamespace == proxyNamespace
return gatewayMatch && labelMatch && portMatch && nsMatch
}
// Match by source labels, the listener port where traffic comes in, the gateway on which the rule is being
// bound, etc. All these can be checked statically, since we are generating the configuration for a proxy
// with predefined labels, on a specific port.
func matchTCP(match *v1alpha3.L4MatchAttributes, proxyLabels labels.Collection, gateways map[string]bool, port int, proxyNamespace string) bool {
if match == nil {
return true
}
gatewayMatch := len(match.Gateways) == 0
for _, gateway := range match.Gateways {
gatewayMatch = gatewayMatch || gateways[gateway]
}
labelMatch := proxyLabels.IsSupersetOf(match.SourceLabels)
portMatch := match.Port == 0 || match.Port == uint32(port)
nsMatch := match.SourceNamespace == "" || match.SourceNamespace == proxyNamespace
return gatewayMatch && labelMatch && portMatch && nsMatch
}
// Select the config pertaining to the service being processed.
func getConfigsForHost(hostname host.Name, configs []model.Config) []model.Config {
svcConfigs := make([]model.Config, 0)
for index := range configs {
virtualService := configs[index].Spec.(*v1alpha3.VirtualService)
for _, vsHost := range virtualService.Hosts {
if host.Name(vsHost).Matches(hostname) {
svcConfigs = append(svcConfigs, configs[index])
break
}
}
}
return svcConfigs
}
// hashRuntimeTLSMatchPredicates hashes runtime predicates of a TLS match
func hashRuntimeTLSMatchPredicates(match *v1alpha3.TLSMatchAttributes) string {
return strings.Join(match.SniHosts, ",") + "|" + strings.Join(match.DestinationSubnets, ",")
}
func buildSidecarOutboundTLSFilterChainOpts(node *model.Proxy, push *model.PushContext, destinationCIDR string,
service *model.Service, listenPort *model.Port,
gateways map[string]bool, configs []model.Config) []*filterChainOpts {
if !listenPort.Protocol.IsTLS() {
return nil
}
actualWildcard, _ := getActualWildcardAndLocalHost(node)
// TLS matches are composed of runtime and static predicates.
// Static predicates can be evaluated during the generation of the config. Examples: gateway, source labels, etc.
// Runtime predicates cannot be evaluated during config generation. Instead the proxy must be configured to
// evaluate them. Examples: SNI hosts, source/destination subnets, etc.
//
// A list of matches may contain duplicate runtime matches, but different static matches. For example:
//
// {sni_hosts: A, sourceLabels: X} => destination M
// {sni_hosts: A, sourceLabels: *} => destination N
//
// For a proxy with labels X, we can evaluate the static predicates to get:
// {sni_hosts: A} => destination M
// {sni_hosts: A} => destination N
//
// The matches have the same runtime predicates. Since the second match can never be reached, we only
// want to generate config for the first match.
//
// To achieve this in this function we keep track of which runtime matches we have already generated config for
// and only add config if the we have not already generated config for that set of runtime predicates.
matchHasBeenHandled := make(map[string]bool) // Runtime predicate set -> have we generated config for this set?
// Is there a virtual service with a TLS block that matches us?
hasTLSMatch := false
out := make([]*filterChainOpts, 0)
for _, cfg := range configs {
virtualService := cfg.Spec.(*v1alpha3.VirtualService)
for _, tls := range virtualService.Tls {
for _, match := range tls.Match {
if matchTLS(match, labels.Collection{node.Metadata.Labels}, gateways, listenPort.Port, node.Metadata.Namespace) {
// Use the service's CIDRs.
// But if a virtual service overrides it with its own destination subnet match
// give preference to the user provided one
// destinationCIDR will be empty for services with VIPs
destinationCIDRs := []string{destinationCIDR}
// Only set CIDR match if the listener is bound to an IP.
// If its bound to a unix domain socket, then ignore the CIDR matches
// Unix domain socket bound ports have Port value set to 0
if len(match.DestinationSubnets) > 0 && listenPort.Port > 0 {
destinationCIDRs = match.DestinationSubnets
}
matchHash := hashRuntimeTLSMatchPredicates(match)
if !matchHasBeenHandled[matchHash] {
out = append(out, &filterChainOpts{
metadata: util.BuildConfigInfoMetadataV2(cfg.ConfigMeta),
sniHosts: match.SniHosts,
destinationCIDRs: destinationCIDRs,
networkFilters: buildOutboundNetworkFilters(node, tls.Route, push, listenPort, cfg.ConfigMeta),
})
hasTLSMatch = true
}
matchHasBeenHandled[matchHash] = true
}
}
}
}
// HTTPS or TLS ports without associated virtual service
if !hasTLSMatch {
var sniHosts []string
// In case of a sidecar config with user defined port, if the user specified port is not the same as the
// service's port, then pick the service port if and only if the service has only one port. If service
// has multiple ports, then route to a cluster with the listener port (i.e. sidecar defined port) - the
// traffic will most likely blackhole.
port := listenPort.Port
if len(service.Ports) == 1 {
port = service.Ports[0].Port
}
clusterName := model.BuildSubsetKey(model.TrafficDirectionOutbound, "", service.Hostname, port)
statPrefix := clusterName
// If stat name is configured, use it to build the stat prefix.
if len(push.Mesh.OutboundClusterStatName) != 0 {
statPrefix = util.BuildStatPrefix(push.Mesh.OutboundClusterStatName, string(service.Hostname), "", &model.Port{Port: port}, service.Attributes)
}
// Use the hostname as the SNI value if and only if we do not have a destination VIP or if the destination is a CIDR.
// In both cases, the listener will be bound to 0.0.0.0. So SNI match is the only way to distinguish different
// target services. If we have a VIP, then we know the destination. There is no need to do a SNI match. It saves us from
// having to generate expensive permutations of the host name just like RDS does..
// NOTE that we cannot have two services with the same VIP as our listener build logic will treat it as a collision and
// ignore one of the services.
svcListenAddress := service.GetServiceAddressForProxy(node)
if strings.Contains(svcListenAddress, "/") {
// Address is a CIDR, already captured by destinationCIDR parameter.
svcListenAddress = ""
}
if len(destinationCIDR) > 0 || len(svcListenAddress) == 0 || svcListenAddress == actualWildcard {
sniHosts = []string{string(service.Hostname)}
}
out = append(out, &filterChainOpts{
sniHosts: sniHosts,
destinationCIDRs: []string{destinationCIDR},
networkFilters: buildOutboundNetworkFiltersWithSingleDestination(push, node, statPrefix, clusterName, listenPort),
})
}
return out
}
func buildSidecarOutboundTCPFilterChainOpts(node *model.Proxy, push *model.PushContext, destinationCIDR string,
service *model.Service, listenPort *model.Port,
gateways map[string]bool, configs []model.Config) []*filterChainOpts {
if listenPort.Protocol.IsTLS() {
return nil
}
out := make([]*filterChainOpts, 0)
// very basic TCP
// break as soon as we add one network filter with no destination addresses to match
// This is the terminating condition in the filter chain match list
defaultRouteAdded := false
TcpLoop:
for _, cfg := range configs {
virtualService := cfg.Spec.(*v1alpha3.VirtualService)
for _, tcp := range virtualService.Tcp {
destinationCIDRs := []string{destinationCIDR}
if len(tcp.Match) == 0 {
// implicit match
out = append(out, &filterChainOpts{
metadata: util.BuildConfigInfoMetadataV2(cfg.ConfigMeta),
|
matchTLS
|
identifier_name
|
|
theme.rs
|
>>(mut self, pseudo_class: S) -> Self {
self.pseudo_classes.remove(&pseudo_class.into());
self
}
}
impl Selector {
pub fn is_empty(&self) -> bool {
self.element.is_none() && self.classes.is_empty() && self.pseudo_classes.is_empty()
}
}
#[derive(Clone, Debug)]
pub struct Declaration {
pub property: String,
pub value: Value,
pub important: bool,
}
#[derive(Clone, Debug)]
pub enum Value {
UInt(u32),
Color(Color),
}
impl Value {
pub fn uint(&self) -> Option<u32> {
match *self {
Value::UInt(x) => Some(x),
_ => None,
}
}
pub fn color(&self) -> Option<Color> {
match *self {
Value::Color(x) => Some(x),
_ => None,
}
}
}
#[derive(Clone, Debug)]
pub enum CustomParseError {
InvalidColorName(String),
InvalidColorHex(String),
}
impl<'t> From<CustomParseError> for ParseError<'t, CustomParseError> {
fn from(e: CustomParseError) -> Self {
ParseError::Custom(e)
}
}
struct RuleParser;
impl RuleParser {
fn new() -> Self {
RuleParser {}
}
}
impl<'i> cssparser::QualifiedRuleParser<'i> for RuleParser {
type Prelude = Vec<Selector>;
type QualifiedRule = Rule;
type Error = CustomParseError;
fn parse_prelude<'t>(&mut self, input: &mut Parser<'i, 't>)
-> Result<Self::Prelude, ParseError<'i, Self::Error>> {
let res = parse_selectors(input)?;
Ok(res)
}
fn parse_block<'t>(&mut self, selectors: Self::Prelude, input: &mut Parser<'i, 't>)
-> Result<Self::QualifiedRule, ParseError<'i, Self::Error>> {
let decl_parser = DeclarationParser {};
let decls = DeclarationListParser::new(input, decl_parser).collect::<Vec<_>>();
for decl in &decls {
match *decl {
Ok(_) => {},
Err(ref e) => {
match e.error {
ParseError::Basic(ref e) => eprintln!("{:?}", e),
ParseError::Custom(ref e) => eprintln!("{:?}", e),
}
println!("Error occured in `{}`", input.slice(e.span.clone()));
}
}
}
let decls = decls.into_iter().filter_map(|decl| decl.ok()).collect();
Ok(Rule {
selectors: selectors,
declarations: decls,
})
}
}
impl<'i> cssparser::AtRuleParser<'i> for RuleParser {
type Prelude = ();
type AtRule = Rule;
type Error = CustomParseError;
}
fn parse_selectors<'i, 't>(input: &mut Parser<'i, 't>) -> Result<Vec<Selector>, ParseError<'i, CustomParseError>> {
let mut selectors = Vec::new();
let mut selector = Selector::default();
let mut first_token_in_selector = true;
while let Ok(t) = input.next() {
match t {
// Element
Token::Ident(ref element_name) => {
if first_token_in_selector {
selector.element = Some(element_name.to_string())
} else {
let mut old_selector = Selector::new(Some(element_name.to_string()));
mem::swap(&mut old_selector, &mut selector);
selector.relation = Some(Box::new(SelectorRelation::Ancestor(old_selector)));
}
}
Token::Delim('>') => {
let mut old_selector = Selector::new(Some(input.expect_ident()?.to_string()));
mem::swap(&mut old_selector, &mut selector);
selector.relation = Some(Box::new(SelectorRelation::Parent(old_selector)));
}
// Any element
Token::Delim('*') => {}
// Class
Token::Delim('.') => {selector.classes.insert(input.expect_ident()?.into_owned());}
// Pseudo-class
Token::Colon => {selector.pseudo_classes.insert(input.expect_ident()?.into_owned());}
// This selector is done, on to the next one
Token::Comma => {
selectors.push(selector);
selector = Selector::default();
first_token_in_selector = true;
continue; // need to continue to avoid `first_token_in_selector` being set to false
}
t => {
let basic_error = BasicParseError::UnexpectedToken(t);
return Err(basic_error.into());
}
}
first_token_in_selector = false;
}
selectors.push(selector);
if selectors.iter().any(|sel| sel.relation.is_some()) {
eprintln!("WARNING: Complex selector relations not implemented");
}
Ok(selectors)
}
struct DeclarationParser;
impl<'i> cssparser::DeclarationParser<'i> for DeclarationParser {
type Declaration = Declaration;
type Error = CustomParseError;
fn parse_value<'t>(&mut self, name: CompactCowStr<'i>, input: &mut Parser<'i, 't>) -> Result<Self::Declaration, ParseError<'i, Self::Error>> {
let value = match &*name {
"color" | "border-color" => Value::Color(parse_basic_color(input)?),
"background" | "foreground" => Value::Color(parse_basic_color(input)?),
"border-radius" | "border-width" => {
match input.next()? {
Token::Number { int_value: Some(x), has_sign, .. } if !has_sign && x >= 0 => Value::UInt(x as u32),
t => return Err(BasicParseError::UnexpectedToken(t).into())
}
}
_ => return Err(BasicParseError::UnexpectedToken(input.next()?).into()),
};
Ok(Declaration {
property: name.into_owned(),
value: value,
important: input.try(cssparser::parse_important).is_ok()
})
}
}
impl<'i> cssparser::AtRuleParser<'i> for DeclarationParser {
type Prelude = ();
type AtRule = Declaration;
type Error = CustomParseError;
}
fn css_color(name: &str) -> Option<Color> {
Some(hex(match name {
"transparent" => return Some(Color { data: 0 }),
"black" => 0x000000,
"silver" => 0xc0c0c0,
"gray" | "grey" => 0x808080,
"white" => 0xffffff,
"maroon" => 0x800000,
"red" => 0xff0000,
"purple" => 0x800080,
"fuchsia" => 0xff00ff,
"green" => 0x008000,
"lime" => 0x00ff00,
"olive" => 0x808000,
"yellow" => 0xffff00,
"navy" => 0x000080,
"blue" => 0x0000ff,
"teal" => 0x008080,
"aqua" => 0x00ffff,
_ => return None,
}))
}
fn parse_basic_color<'i, 't>(input: &mut Parser<'i, 't>) -> Result<Color, ParseError<'i, CustomParseError>> {
Ok(match input.next()? {
Token::Ident(s) => match css_color(&s) {
Some(color) => color,
None => return Err(CustomParseError::InvalidColorName(s.into_owned()).into()),
},
Token::IDHash(hash) | Token::Hash(hash) => {
match hash.len() {
6 | 8 => {
let mut x = match u32::from_str_radix(&hash, 16) {
Ok(x) => x,
Err(_) => return Err(CustomParseError::InvalidColorHex(hash.into_owned()).into()),
};
if hash.len() == 6 {
x |= 0xFF000000;
}
Color { data: x }
},
_ => return Err(CustomParseError::InvalidColorHex(hash.into_owned()).into()),
}
}
t => {
let basic_error = BasicParseError::UnexpectedToken(t);
return Err(basic_error.into());
}
})
}
fn parse(s: &str) -> Vec<Rule> {
let mut input = ParserInput::new(s);
let mut parser = Parser::new(&mut input);
let rule_parser = RuleParser::new();
let rules = {
let rule_list_parser = cssparser::RuleListParser::new_for_stylesheet(&mut parser, rule_parser);
rule_list_parser.collect::<Vec<_>>()
};
for rule in &rules {
match *rule {
Ok(_) => {},
Err(ref e) => {
match e.error {
ParseError::Basic(ref e) => eprintln!("{:?}", e),
ParseError::Custom(ref e) => eprintln!("{:?}", e),
}
println!("Error occured in `{}`", parser.slice(e.span.clone()));
}
}
}
rules.into_iter().filter_map(|rule| rule.ok()).collect()
}
const fn
|
hex
|
identifier_name
|
|
theme.rs
|
self.relation {
match **relation {
SelectorRelation::Ancestor(ref x) | SelectorRelation::Parent(ref x) => return x.specificity() + s,
}
}
s
}
pub fn matches(&self, other: &Selector) -> bool {
if self.element.is_some() && self.element != other.element {
return false;
}
if !other.classes.is_superset(&self.classes) {
return false;
}
if !other.pseudo_classes.is_superset(&self.pseudo_classes) {
return false;
}
true
}
pub fn with_class<S: Into<String>>(mut self, class: S) -> Self {
self.classes.insert(class.into());
self
}
pub fn without_class<S: Into<String>>(mut self, class: S) -> Self {
self.classes.remove(&class.into());
self
}
pub fn with_pseudo_class<S: Into<String>>(mut self, pseudo_class: S) -> Self {
self.pseudo_classes.insert(pseudo_class.into());
self
}
pub fn without_pseudo_class<S: Into<String>>(mut self, pseudo_class: S) -> Self {
self.pseudo_classes.remove(&pseudo_class.into());
self
}
}
impl Selector {
pub fn is_empty(&self) -> bool {
self.element.is_none() && self.classes.is_empty() && self.pseudo_classes.is_empty()
}
}
#[derive(Clone, Debug)]
pub struct Declaration {
pub property: String,
pub value: Value,
pub important: bool,
}
#[derive(Clone, Debug)]
pub enum Value {
UInt(u32),
Color(Color),
}
impl Value {
pub fn uint(&self) -> Option<u32> {
match *self {
Value::UInt(x) => Some(x),
_ => None,
}
}
pub fn color(&self) -> Option<Color> {
match *self {
Value::Color(x) => Some(x),
_ => None,
}
}
}
#[derive(Clone, Debug)]
pub enum CustomParseError {
InvalidColorName(String),
InvalidColorHex(String),
}
impl<'t> From<CustomParseError> for ParseError<'t, CustomParseError> {
fn from(e: CustomParseError) -> Self {
ParseError::Custom(e)
}
}
struct RuleParser;
impl RuleParser {
fn new() -> Self {
RuleParser {}
}
}
impl<'i> cssparser::QualifiedRuleParser<'i> for RuleParser {
type Prelude = Vec<Selector>;
type QualifiedRule = Rule;
type Error = CustomParseError;
fn parse_prelude<'t>(&mut self, input: &mut Parser<'i, 't>)
-> Result<Self::Prelude, ParseError<'i, Self::Error>> {
let res = parse_selectors(input)?;
Ok(res)
}
fn parse_block<'t>(&mut self, selectors: Self::Prelude, input: &mut Parser<'i, 't>)
-> Result<Self::QualifiedRule, ParseError<'i, Self::Error>> {
let decl_parser = DeclarationParser {};
let decls = DeclarationListParser::new(input, decl_parser).collect::<Vec<_>>();
for decl in &decls {
match *decl {
Ok(_) => {},
Err(ref e) => {
match e.error {
ParseError::Basic(ref e) => eprintln!("{:?}", e),
ParseError::Custom(ref e) => eprintln!("{:?}", e),
}
println!("Error occured in `{}`", input.slice(e.span.clone()));
}
}
}
let decls = decls.into_iter().filter_map(|decl| decl.ok()).collect();
Ok(Rule {
selectors: selectors,
declarations: decls,
})
}
}
impl<'i> cssparser::AtRuleParser<'i> for RuleParser {
type Prelude = ();
type AtRule = Rule;
type Error = CustomParseError;
}
fn parse_selectors<'i, 't>(input: &mut Parser<'i, 't>) -> Result<Vec<Selector>, ParseError<'i, CustomParseError>> {
let mut selectors = Vec::new();
let mut selector = Selector::default();
let mut first_token_in_selector = true;
while let Ok(t) = input.next() {
match t {
// Element
Token::Ident(ref element_name) => {
if first_token_in_selector {
selector.element = Some(element_name.to_string())
} else {
let mut old_selector = Selector::new(Some(element_name.to_string()));
mem::swap(&mut old_selector, &mut selector);
selector.relation = Some(Box::new(SelectorRelation::Ancestor(old_selector)));
}
}
Token::Delim('>') => {
let mut old_selector = Selector::new(Some(input.expect_ident()?.to_string()));
mem::swap(&mut old_selector, &mut selector);
selector.relation = Some(Box::new(SelectorRelation::Parent(old_selector)));
}
// Any element
Token::Delim('*') => {}
// Class
Token::Delim('.') => {selector.classes.insert(input.expect_ident()?.into_owned());}
// Pseudo-class
Token::Colon => {selector.pseudo_classes.insert(input.expect_ident()?.into_owned());}
// This selector is done, on to the next one
Token::Comma => {
selectors.push(selector);
selector = Selector::default();
first_token_in_selector = true;
continue; // need to continue to avoid `first_token_in_selector` being set to false
}
t => {
let basic_error = BasicParseError::UnexpectedToken(t);
return Err(basic_error.into());
}
}
first_token_in_selector = false;
}
selectors.push(selector);
if selectors.iter().any(|sel| sel.relation.is_some()) {
eprintln!("WARNING: Complex selector relations not implemented");
}
Ok(selectors)
}
struct DeclarationParser;
impl<'i> cssparser::DeclarationParser<'i> for DeclarationParser {
type Declaration = Declaration;
type Error = CustomParseError;
fn parse_value<'t>(&mut self, name: CompactCowStr<'i>, input: &mut Parser<'i, 't>) -> Result<Self::Declaration, ParseError<'i, Self::Error>> {
let value = match &*name {
"color" | "border-color" => Value::Color(parse_basic_color(input)?),
"background" | "foreground" => Value::Color(parse_basic_color(input)?),
"border-radius" | "border-width" => {
match input.next()? {
Token::Number { int_value: Some(x), has_sign, .. } if !has_sign && x >= 0 => Value::UInt(x as u32),
t => return Err(BasicParseError::UnexpectedToken(t).into())
}
}
_ => return Err(BasicParseError::UnexpectedToken(input.next()?).into()),
};
Ok(Declaration {
property: name.into_owned(),
value: value,
important: input.try(cssparser::parse_important).is_ok()
})
}
}
impl<'i> cssparser::AtRuleParser<'i> for DeclarationParser {
type Prelude = ();
type AtRule = Declaration;
type Error = CustomParseError;
}
fn css_color(name: &str) -> Option<Color> {
Some(hex(match name {
"transparent" => return Some(Color { data: 0 }),
"black" => 0x000000,
"silver" => 0xc0c0c0,
"gray" | "grey" => 0x808080,
"white" => 0xffffff,
"maroon" => 0x800000,
"red" => 0xff0000,
"purple" => 0x800080,
"fuchsia" => 0xff00ff,
"green" => 0x008000,
"lime" => 0x00ff00,
"olive" => 0x808000,
"yellow" => 0xffff00,
"navy" => 0x000080,
"blue" => 0x0000ff,
"teal" => 0x008080,
"aqua" => 0x00ffff,
_ => return None,
}))
}
fn parse_basic_color<'i, 't>(input: &mut Parser<'i, 't>) -> Result<Color, ParseError<'i, CustomParseError>> {
Ok(match input.next()? {
Token::Ident(s) => match css_color(&s) {
Some(color) => color,
None => return Err(CustomParseError::InvalidColorName(s.into_owned()).into()),
},
Token::IDHash(hash) | Token::Hash(hash) => {
match hash.len() {
6 | 8 =>
|
{
let mut x = match u32::from_str_radix(&hash, 16) {
Ok(x) => x,
Err(_) => return Err(CustomParseError::InvalidColorHex(hash.into_owned()).into()),
};
if hash.len() == 6 {
x |= 0xFF000000;
}
Color { data: x }
}
|
conditional_block
|
|
theme.rs
|
<Value> {
let mut matches: Vec<(bool, Specificity, Value)> = Vec::new();
for rule in self.all_rules().iter().rev() {
let matching_selectors = rule.selectors.iter().filter(|x| x.matches(query)).collect::<Vec<_>>();
if matching_selectors.len() > 0 {
if let Some(decl) = rule.declarations.iter().find(|decl| decl.property == property) {
let highest_specifity = matching_selectors.iter().map(|sel| sel.specificity()).max().unwrap();
matches.push((decl.important, highest_specifity, decl.value.clone()));
}
}
}
matches.sort_by_key(|x| (x.0, x.1));
matches.last().map(|x| x.2.clone())
}
pub fn color(&self, property: &str, query: &Selector) -> Color {
let default = Color { data: 0 };
self.get(property, query).map(|v| v.color().unwrap_or(default)).unwrap_or(default)
}
pub fn uint(&self, property: &str, query: &Selector) -> u32 {
self.get(property, query).map(|v| v.uint().unwrap_or(0)).unwrap_or(0)
}
}
#[derive(Clone, Debug)]
pub struct Rule {
pub selectors: Vec<Selector>,
pub declarations: Vec<Declaration>,
}
#[derive(Clone, Debug)]
pub enum SelectorRelation {
Ancestor(Selector),
Parent(Selector),
}
impl<T: Into<String>> From<T> for Selector {
fn from(t: T) -> Self {
Selector::new(Some(t.into()))
}
}
/// Describes the specificity of a selector.
///
/// The indexes are as follows:
/// 0 - number of IDs (most important)
/// 1 - number of classes and pseudo-classes
/// 2 - number of elements (least important)
#[derive(Copy, Clone, Debug, PartialEq, Eq, PartialOrd, Ord)]
struct Specificity([u8; 3]);
impl Add<Self> for Specificity {
type Output = Self;
fn add(self, rhs: Self) -> Self::Output {
Specificity([
self.0[0] + rhs.0[0],
self.0[1] + rhs.0[1],
|
}
#[derive(Clone, Debug, Default)]
pub struct Selector {
pub element: Option<String>,
pub classes: HashSet<String>,
pub pseudo_classes: HashSet<String>,
pub relation: Option<Box<SelectorRelation>>,
}
impl Selector {
pub fn new<S: Into<String>>(element: Option<S>) -> Self {
Selector {
element: element.map(|s| s.into()),
classes: HashSet::new(),
pseudo_classes: HashSet::new(),
relation: None,
}
}
fn specificity(&self) -> Specificity {
let s = Specificity([
0,
(self.classes.len() + self.pseudo_classes.len()) as u8,
if self.element.is_some() { 1 } else { 0 }
]);
if let Some(ref relation) = self.relation {
match **relation {
SelectorRelation::Ancestor(ref x) | SelectorRelation::Parent(ref x) => return x.specificity() + s,
}
}
s
}
pub fn matches(&self, other: &Selector) -> bool {
if self.element.is_some() && self.element != other.element {
return false;
}
if !other.classes.is_superset(&self.classes) {
return false;
}
if !other.pseudo_classes.is_superset(&self.pseudo_classes) {
return false;
}
true
}
pub fn with_class<S: Into<String>>(mut self, class: S) -> Self {
self.classes.insert(class.into());
self
}
pub fn without_class<S: Into<String>>(mut self, class: S) -> Self {
self.classes.remove(&class.into());
self
}
pub fn with_pseudo_class<S: Into<String>>(mut self, pseudo_class: S) -> Self {
self.pseudo_classes.insert(pseudo_class.into());
self
}
pub fn without_pseudo_class<S: Into<String>>(mut self, pseudo_class: S) -> Self {
self.pseudo_classes.remove(&pseudo_class.into());
self
}
}
impl Selector {
pub fn is_empty(&self) -> bool {
self.element.is_none() && self.classes.is_empty() && self.pseudo_classes.is_empty()
}
}
#[derive(Clone, Debug)]
pub struct Declaration {
pub property: String,
pub value: Value,
pub important: bool,
}
#[derive(Clone, Debug)]
pub enum Value {
UInt(u32),
Color(Color),
}
impl Value {
pub fn uint(&self) -> Option<u32> {
match *self {
Value::UInt(x) => Some(x),
_ => None,
}
}
pub fn color(&self) -> Option<Color> {
match *self {
Value::Color(x) => Some(x),
_ => None,
}
}
}
#[derive(Clone, Debug)]
pub enum CustomParseError {
InvalidColorName(String),
InvalidColorHex(String),
}
impl<'t> From<CustomParseError> for ParseError<'t, CustomParseError> {
fn from(e: CustomParseError) -> Self {
ParseError::Custom(e)
}
}
struct RuleParser;
impl RuleParser {
fn new() -> Self {
RuleParser {}
}
}
impl<'i> cssparser::QualifiedRuleParser<'i> for RuleParser {
type Prelude = Vec<Selector>;
type QualifiedRule = Rule;
type Error = CustomParseError;
fn parse_prelude<'t>(&mut self, input: &mut Parser<'i, 't>)
-> Result<Self::Prelude, ParseError<'i, Self::Error>> {
let res = parse_selectors(input)?;
Ok(res)
}
fn parse_block<'t>(&mut self, selectors: Self::Prelude, input: &mut Parser<'i, 't>)
-> Result<Self::QualifiedRule, ParseError<'i, Self::Error>> {
let decl_parser = DeclarationParser {};
let decls = DeclarationListParser::new(input, decl_parser).collect::<Vec<_>>();
for decl in &decls {
match *decl {
Ok(_) => {},
Err(ref e) => {
match e.error {
ParseError::Basic(ref e) => eprintln!("{:?}", e),
ParseError::Custom(ref e) => eprintln!("{:?}", e),
}
println!("Error occured in `{}`", input.slice(e.span.clone()));
}
}
}
let decls = decls.into_iter().filter_map(|decl| decl.ok()).collect();
Ok(Rule {
selectors: selectors,
declarations: decls,
})
}
}
impl<'i> cssparser::AtRuleParser<'i> for RuleParser {
type Prelude = ();
type AtRule = Rule;
type Error = CustomParseError;
}
fn parse_selectors<'i, 't>(input: &mut Parser<'i, 't>) -> Result<Vec<Selector>, ParseError<'i, CustomParseError>> {
let mut selectors = Vec::new();
let mut selector = Selector::default();
let mut first_token_in_selector = true;
while let Ok(t) = input.next() {
match t {
// Element
Token::Ident(ref element_name) => {
if first_token_in_selector {
selector.element = Some(element_name.to_string())
} else {
let mut old_selector = Selector::new(Some(element_name.to_string()));
mem::swap(&mut old_selector, &mut selector);
selector.relation = Some(Box::new(SelectorRelation::Ancestor(old_selector)));
}
}
Token::Delim('>') => {
let mut old_selector = Selector::new(Some(input.expect_ident()?.to_string()));
mem::swap(&mut old_selector, &mut selector);
selector.relation = Some(Box::new(SelectorRelation::Parent(old_selector)));
}
// Any element
Token::Delim('*') => {}
// Class
Token::Delim('.') => {selector.classes.insert(input.expect_ident()?.into_owned());}
// Pseudo-class
Token::Colon => {selector.pseudo_classes.insert(input.expect_ident()?.into_owned());}
// This selector is done, on to the next one
Token::Comma => {
selectors.push(selector);
selector = Selector::default();
first_token_in_selector = true;
continue; // need to continue to avoid `first_token_in_selector` being set to false
}
t => {
let basic_error = BasicParseError::UnexpectedToken(t);
return Err(basic_error.into());
}
}
first_token_in_selector = false;
}
selectors.push(selector);
if selectors.iter().any(|sel| sel.relation.is_some()) {
eprintln!("WARNING: Complex selector relations not implemented");
}
Ok(selectors)
}
struct DeclarationParser;
impl<'i> cssparser::DeclarationParser<'i> for DeclarationParser {
type Declaration = Declaration;
type Error = CustomParseError;
fn parse_value<'t>(&mut self, name: CompactCowStr<'i>, input: &mut Parser<'i, 't>) -> Result<Self::Declaration, ParseError<'i, Self::Error
|
self.0[2] + rhs.0[2],
])
}
|
random_line_split
|
theme.rs
|
<Value> {
let mut matches: Vec<(bool, Specificity, Value)> = Vec::new();
for rule in self.all_rules().iter().rev() {
let matching_selectors = rule.selectors.iter().filter(|x| x.matches(query)).collect::<Vec<_>>();
if matching_selectors.len() > 0 {
if let Some(decl) = rule.declarations.iter().find(|decl| decl.property == property) {
let highest_specifity = matching_selectors.iter().map(|sel| sel.specificity()).max().unwrap();
matches.push((decl.important, highest_specifity, decl.value.clone()));
}
}
}
matches.sort_by_key(|x| (x.0, x.1));
matches.last().map(|x| x.2.clone())
}
pub fn color(&self, property: &str, query: &Selector) -> Color {
let default = Color { data: 0 };
self.get(property, query).map(|v| v.color().unwrap_or(default)).unwrap_or(default)
}
pub fn uint(&self, property: &str, query: &Selector) -> u32 {
self.get(property, query).map(|v| v.uint().unwrap_or(0)).unwrap_or(0)
}
}
#[derive(Clone, Debug)]
pub struct Rule {
pub selectors: Vec<Selector>,
pub declarations: Vec<Declaration>,
}
#[derive(Clone, Debug)]
pub enum SelectorRelation {
Ancestor(Selector),
Parent(Selector),
}
impl<T: Into<String>> From<T> for Selector {
fn from(t: T) -> Self {
Selector::new(Some(t.into()))
}
}
/// Describes the specificity of a selector.
///
/// The indexes are as follows:
/// 0 - number of IDs (most important)
/// 1 - number of classes and pseudo-classes
/// 2 - number of elements (least important)
#[derive(Copy, Clone, Debug, PartialEq, Eq, PartialOrd, Ord)]
struct Specificity([u8; 3]);
impl Add<Self> for Specificity {
type Output = Self;
fn add(self, rhs: Self) -> Self::Output {
Specificity([
self.0[0] + rhs.0[0],
self.0[1] + rhs.0[1],
self.0[2] + rhs.0[2],
])
}
}
#[derive(Clone, Debug, Default)]
pub struct Selector {
pub element: Option<String>,
pub classes: HashSet<String>,
pub pseudo_classes: HashSet<String>,
pub relation: Option<Box<SelectorRelation>>,
}
impl Selector {
pub fn new<S: Into<String>>(element: Option<S>) -> Self {
Selector {
element: element.map(|s| s.into()),
classes: HashSet::new(),
pseudo_classes: HashSet::new(),
relation: None,
}
}
fn specificity(&self) -> Specificity {
let s = Specificity([
0,
(self.classes.len() + self.pseudo_classes.len()) as u8,
if self.element.is_some() { 1 } else { 0 }
]);
if let Some(ref relation) = self.relation {
match **relation {
SelectorRelation::Ancestor(ref x) | SelectorRelation::Parent(ref x) => return x.specificity() + s,
}
}
s
}
pub fn matches(&self, other: &Selector) -> bool {
if self.element.is_some() && self.element != other.element {
return false;
}
if !other.classes.is_superset(&self.classes) {
return false;
}
if !other.pseudo_classes.is_superset(&self.pseudo_classes) {
return false;
}
true
}
pub fn with_class<S: Into<String>>(mut self, class: S) -> Self {
self.classes.insert(class.into());
self
}
pub fn without_class<S: Into<String>>(mut self, class: S) -> Self {
self.classes.remove(&class.into());
self
}
pub fn with_pseudo_class<S: Into<String>>(mut self, pseudo_class: S) -> Self
|
pub fn without_pseudo_class<S: Into<String>>(mut self, pseudo_class: S) -> Self {
self.pseudo_classes.remove(&pseudo_class.into());
self
}
}
impl Selector {
pub fn is_empty(&self) -> bool {
self.element.is_none() && self.classes.is_empty() && self.pseudo_classes.is_empty()
}
}
#[derive(Clone, Debug)]
pub struct Declaration {
pub property: String,
pub value: Value,
pub important: bool,
}
#[derive(Clone, Debug)]
pub enum Value {
UInt(u32),
Color(Color),
}
impl Value {
pub fn uint(&self) -> Option<u32> {
match *self {
Value::UInt(x) => Some(x),
_ => None,
}
}
pub fn color(&self) -> Option<Color> {
match *self {
Value::Color(x) => Some(x),
_ => None,
}
}
}
#[derive(Clone, Debug)]
pub enum CustomParseError {
InvalidColorName(String),
InvalidColorHex(String),
}
impl<'t> From<CustomParseError> for ParseError<'t, CustomParseError> {
fn from(e: CustomParseError) -> Self {
ParseError::Custom(e)
}
}
struct RuleParser;
impl RuleParser {
fn new() -> Self {
RuleParser {}
}
}
impl<'i> cssparser::QualifiedRuleParser<'i> for RuleParser {
type Prelude = Vec<Selector>;
type QualifiedRule = Rule;
type Error = CustomParseError;
fn parse_prelude<'t>(&mut self, input: &mut Parser<'i, 't>)
-> Result<Self::Prelude, ParseError<'i, Self::Error>> {
let res = parse_selectors(input)?;
Ok(res)
}
fn parse_block<'t>(&mut self, selectors: Self::Prelude, input: &mut Parser<'i, 't>)
-> Result<Self::QualifiedRule, ParseError<'i, Self::Error>> {
let decl_parser = DeclarationParser {};
let decls = DeclarationListParser::new(input, decl_parser).collect::<Vec<_>>();
for decl in &decls {
match *decl {
Ok(_) => {},
Err(ref e) => {
match e.error {
ParseError::Basic(ref e) => eprintln!("{:?}", e),
ParseError::Custom(ref e) => eprintln!("{:?}", e),
}
println!("Error occured in `{}`", input.slice(e.span.clone()));
}
}
}
let decls = decls.into_iter().filter_map(|decl| decl.ok()).collect();
Ok(Rule {
selectors: selectors,
declarations: decls,
})
}
}
impl<'i> cssparser::AtRuleParser<'i> for RuleParser {
type Prelude = ();
type AtRule = Rule;
type Error = CustomParseError;
}
fn parse_selectors<'i, 't>(input: &mut Parser<'i, 't>) -> Result<Vec<Selector>, ParseError<'i, CustomParseError>> {
let mut selectors = Vec::new();
let mut selector = Selector::default();
let mut first_token_in_selector = true;
while let Ok(t) = input.next() {
match t {
// Element
Token::Ident(ref element_name) => {
if first_token_in_selector {
selector.element = Some(element_name.to_string())
} else {
let mut old_selector = Selector::new(Some(element_name.to_string()));
mem::swap(&mut old_selector, &mut selector);
selector.relation = Some(Box::new(SelectorRelation::Ancestor(old_selector)));
}
}
Token::Delim('>') => {
let mut old_selector = Selector::new(Some(input.expect_ident()?.to_string()));
mem::swap(&mut old_selector, &mut selector);
selector.relation = Some(Box::new(SelectorRelation::Parent(old_selector)));
}
// Any element
Token::Delim('*') => {}
// Class
Token::Delim('.') => {selector.classes.insert(input.expect_ident()?.into_owned());}
// Pseudo-class
Token::Colon => {selector.pseudo_classes.insert(input.expect_ident()?.into_owned());}
// This selector is done, on to the next one
Token::Comma => {
selectors.push(selector);
selector = Selector::default();
first_token_in_selector = true;
continue; // need to continue to avoid `first_token_in_selector` being set to false
}
t => {
let basic_error = BasicParseError::UnexpectedToken(t);
return Err(basic_error.into());
}
}
first_token_in_selector = false;
}
selectors.push(selector);
if selectors.iter().any(|sel| sel.relation.is_some()) {
eprintln!("WARNING: Complex selector relations not implemented");
}
Ok(selectors)
}
struct DeclarationParser;
impl<'i> cssparser::DeclarationParser<'i> for DeclarationParser {
type Declaration = Declaration;
type Error = CustomParseError;
fn parse_value<'t>(&mut self, name: CompactCowStr<'i>, input: &mut Parser<'i, 't>) -> Result<Self::Declaration, ParseError<'i, Self
|
{
self.pseudo_classes.insert(pseudo_class.into());
self
}
|
identifier_body
|
lib.rs
|
_lib_handle.is_null()
|
else{
Ok( DyLib(shared_lib_handle) )
}
}}
//Example
//let function : fn()->i32= transmute_copy((dlsym(shared_lib_handle, CString::new(name).unwrap().as_ptr()) as *mut ()).as_mut());
pub fn get_fn( shared_lib_handle: &DyLib, name: &str)-> Result<*mut (), String>{ unsafe{
let _fn = dlsym(shared_lib_handle.0, CString::new(name).unwrap().as_ptr());
if _fn.is_null() {
Err("Function name could not be found.".to_string())
}
else{
Ok(_fn as *mut () )
}
}}
pub fn get_error()->String{unsafe{
let error = dlerror();
if error.is_null(){
return "No Error".to_string();
}
else{
CString::from_raw(error).into_string().unwrap()
}
}}
pub fn close_lib(shared_lib_handle: &DyLib){unsafe{
if dlclose(shared_lib_handle.0) != 0{
println!("Could not properly close shared library.");
}
}}
}
#[cfg(target_os = "windows")]
pub mod dynamic_lib_loading{
use std::os::raw::{c_int, c_void};
extern "C" {
fn LoadLibraryA( path: *const i8 ) -> *mut c_void;
fn GetProcAddress( lib: *mut c_void, name: *const i8 ) -> *mut c_void;
fn FreeLibrary( lib: *mut c_void ) -> c_int;
fn GetLastError() -> u32;
}
//TODO
//This is temporary should be replaced by windows enums
pub const RTLD_LAZY : i32 = 0x00001; /* Lazy function call binding. */
pub struct DyLib(*mut c_void);
pub fn open_lib( lib_path: &str, _flag: i32 )->Result<DyLib, String>{unsafe{
let _path = lib_path.to_string() + "\0";
let lib = LoadLibraryA( _path.as_ptr() as *const i8);
if lib.is_null(){
let s = format!("Could not open lib \n{:?}\n\n For more info => https://docs.microsoft.com/en-us/windows/win32/debug/system-error-codes", GetLastError());
return Err(s);
}
Ok(DyLib(lib as *mut c_void))
}}
//Example
//let function : fn()->i32= transmute_copy((dlsym(shared_lib_handle, CString::new(name).unwrap().as_ptr()) as *mut ()).as_mut());
pub fn get_fn( shared_lib_handle: &DyLib, name: &str)-> Result<*mut (), String>{ unsafe{
let fn_name = name.to_string() + "\0";
let function = GetProcAddress(shared_lib_handle.0 as _, fn_name.as_ptr() as *const i8) as *mut ();
if function.is_null(){
let s = format!("Could not get function \n{:?}", GetLastError());
return Err(s);
}
Ok(function)
}}
pub fn get_error()->String{
"Windows version has not been implemented".to_string()
}
pub fn close_lib(shared_lib_handle: &DyLib){unsafe{
if FreeLibrary(shared_lib_handle.0 as _) == 0{
println!("Could not properly close shared library.");
println!("{}", format!("{:?}", GetLastError()));
}
}}
}
pub mod render_tools{
#[derive(PartialEq, Clone, Debug)]
pub enum RenderType{
Image,
Rectangle,
String,
PrintString,
Empty,
}
impl Default for RenderType{
fn default()->Self{ RenderType::Empty }
}
#[derive(Default)]
pub struct RenderStruct{
pub rendertype : RenderType,
pub x: f32,
pub y: f32,
pub width: f32,
pub height: f32,
pub alpha : f32,
//rect related things
pub filled: bool,
pub color: [f32;3],
//image related things
pub color_buffer: Vec<u8>,
pub rgba_type: RGBA,
pub new_width: Option<f32>,// NOTE Testing out using a factional new width
pub new_height: Option<f32>,// NOTE Testing out using a factional new height
//Stings
pub char_buffer: String,
pub font_size: u32
}
#[derive(Default)]
pub struct RenderInstructions{
pub buffer: Vec<RenderStruct>,
}
//TODO
//This is a BAD name.... do better
#[derive(Clone, Copy, PartialEq)]
pub enum RGBA{
U8rgba,
U8argb,
U8rgb,
Empty,
//More maybe ... maybe not
}
impl RenderInstructions{
pub fn clear(&mut self){
self.buffer.clear();
}
pub fn draw_rect(&mut self, rect: [f32; 4], color: [f32; 4], filled: bool){
let _color = [color[0], color[1], color[2]];
self.buffer.push( RenderStruct{rendertype: RenderType::Rectangle,
x: rect[0], y:rect[1], width: rect[2], height: rect[3],
alpha: color[3], filled: filled, color: _color, .. Default::default()});
}
pub fn draw_string(&mut self, s: &str, color: [f32; 4], size: u32, x: f32, y: f32 ){
//TODO
//should size be optional
//shouldn't a good text size be choosen automatically
//TODO
//should color be optional
//shouldn't a good text color be choosen automatically
let _color = [color[0], color[1], color[2]];
self.buffer.push( RenderStruct{ rendertype: RenderType::String, x: x, y: y,
alpha: color[3], color: _color, char_buffer: s.to_string(), font_size: size, .. Default::default()} );
}
pub fn draw_bmp(&mut self, bmp: &Bitmap, x: f32, y: f32, alpha: f32, w: Option<f32>, h: Option<f32>){
//TODO
//should x and y be options, Often I want to just draw the image where ever and have it
//automagicly look good with corresponding text
self.buffer.push( RenderStruct{rendertype: RenderType::Image, alpha: alpha, x: x, y: y, width: bmp.width as f32, height: bmp.height as f32,
new_width: w, new_height: h, rgba_type: bmp.rgba_type, color_buffer: bmp.buffer.clone(), .. Default::default()} );
}
pub fn println(&mut self, string: &str){
let buffer = "> ".to_string() + string;
self.buffer.push( RenderStruct{ rendertype: RenderType::PrintString,
alpha: 1.0, color: [1.0, 1.0, 1.0], char_buffer: buffer, font_size: 19, .. Default::default()} );
}
}
impl Default for RGBA{
fn default()->Self{ RGBA::Empty }
}
#[derive(Clone)]
pub struct Bitmap{
//NOTE BMP should be 400 x 400 to start off.
pub width: i32,
pub height: i32,
pub rgba_type: RGBA,
pub buffer: Vec<u8>,
}
impl Bitmap{
pub fn new(w: i32, h: i32, rgba_type: RGBA)->Bitmap{
let _w = w as usize;
let _h = h as usize;
let v = match rgba_type{
RGBA::U8rgba=>{ vec![0u8; _w*_h*4] },
RGBA::U8argb=>{ vec![0u8; _w*_h*4] },
RGBA::U8rgb=>{ vec![0u8; _w*_h*3] },
_=>{ panic!("Not supported"); } //TODO Clean up
};
Bitmap{
width: w,
height: h,
rgba_type: rgba_type,
buffer: v,
}
}
}
pub struct BitmapContainer{
pub initialized : bool,
pub bmp: Option<Bitmap>,
}
}
pub mod memory_tools{
//TODO play around with this maybe
//use std::alloc;
use std::any::TypeId;
const _FIXED_CHAR_BUFFER_SIZE : usize = 128;
#[derive(Copy, Clone)]
pub struct TinyString{
//NOTE
//currently struct vars are public for debugging purposed. They should not be public.
//NOTE
//This should prob be a general tool
pub buffer
|
{
println!("{:?}", get_error());
Err(format!("Shared lib is null! {} Check file path/name.", lib_path))
}
|
conditional_block
|
lib.rs
|
}}
pub fn new(gs: &mut GlobalStorage)->DyArray<T>{
DyArray::<T>::with_capacity(gs, 5)
}
pub fn with_capacity(gs: &mut GlobalStorage, size: usize)->DyArray<T>{
let ptr = gs.alloc_multi_empty::<T>( size );
DyArray{
ptr: ptr,
length: 0,
capacity: size,
phantom: std::marker::PhantomData
}
}
pub fn get(&self, index: usize)->&T{unsafe{
if index > self.length {
panic!("Index bounds error.");
}
let base = self.ptr.ptr;
let address = base + index * std::mem::size_of::<T>();
let gs = self.ptr.backend_storage.as_mut().unwrap();
((&mut gs.get_storage()[address] as *mut u8 ) as *mut T).as_mut().unwrap()
}}
pub fn get_mut(&mut self, index: usize)->&mut T{unsafe{
if index > self.length {
panic!("Index bounds error.");
}
let base = self.ptr.ptr;
let address = base + index * std::mem::size_of::<T>();
let gs = self.ptr.backend_storage.as_mut().unwrap();
((&mut gs.get_storage()[address] as *mut u8 ) as *mut T).as_mut().unwrap()
}}
}
//TODO
//Convert Global and Local storage into a general storage storage thing where storage can be
//either a fixed or dynamic array.
pub struct GlobalStorage{
pub storage: Vec<u8>,
pub storage_filled: Vec<bool>, //TODO speace footprint improvement use bits in u8
reference: [TinyString;100], //This is fixed size because I really want to stop myself from over populating the global space
stored_ptr: Vec<Ptr<GlobalStorage>>,
}
impl GlobalStorage{
pub fn new()->GlobalStorage{
GlobalStorage{
storage: Vec::with_capacity(1028*1028*4), //This is still prob too small
storage_filled: Vec::with_capacity(1028*1028*4),
//TODO
//reference needs to store the ptr index TODO
reference: [TinyString::new(); 100],
stored_ptr: Vec::new(),
}
}
pub fn alloc<T: 'static>(&mut self, v: T)->Ptr<GlobalStorage>{unsafe{
let size = std::mem::size_of::<T>();
let src = (&v as *const T) as *const u8;
let cursor = self.storage.len();
for i in 0..size{
//TODO
//SLOW SPEED ME UP
//I don't think I want to be pushing every thiem like this
//TODO
//byte alignments
self.storage.push(*src.offset(i as isize));
self.storage_filled.push(true);
}
return Ptr{ ptr: cursor, type_hash: TypeId::of::<T>(), backend_storage: self as *mut _};
}}
pub fn alloc_multi_empty<T: 'static>(&mut self, multiples: usize)->Ptr<GlobalStorage>{
let size = std::mem::size_of::<T>() * multiples;
let cursor = self.storage.len();
for _i in 0..size{
//TODO
//SLOW SPEED ME UP
//I don't think I want to be pushing every thiem like this
//TODO
//byte alignments
self.storage.push(0);
self.storage_filled.push(true);
}
return Ptr{ ptr: cursor, type_hash: TypeId::of::<T>(), backend_storage: self as *mut _};
}
pub fn realloc<T>(&mut self, ptr: Ptr<GlobalStorage>, index_back: usize, additional_space: usize)->Ptr<GlobalStorage>{
//TODO
//SLOW SPEED UP
let cursor = self.storage.len();
let index_front = ptr.ptr;
for i in index_front..index_back{
let temp = self.storage[i];
self.storage_filled[i] = false;
self.storage.push(temp);
self.storage_filled.push(true);
}
for _i in 0..additional_space{
self.storage.push(0);
self.storage_filled.push(true);
}
return Ptr{ ptr: cursor, type_hash: ptr.type_hash, backend_storage: self as *mut _};
}
pub unsafe fn write_to<T: 'static>(&mut self, v: T, at_index: usize)->Result<(),String>{
let size = std::mem::size_of::<T>();
let src = (&v as *const T) as *const u8;
if at_index >= self.storage.len() {
return Err("Writing outside the bounds of memory allocated to global storage".to_string());
}
let cursor = at_index;
for i in 0..size{
//TODO
//SLOW SPEED ME UP
//I don't think I want to be pushing every thiem like this
//TODO
//byte alignments
if !self.storage_filled[cursor+i] { panic!("Storage has not allocated this memory.") }
self.storage[cursor+i] = *src.offset(i as isize);
}
return Ok(());
}
pub fn store<T: 'static>(&mut self, v: T, name: &str)->Result<(), String>{
if name.len() > _FIXED_CHAR_BUFFER_SIZE {
return Err(format!("storage name is too damn long. Name should be {} chars long.", _FIXED_CHAR_BUFFER_SIZE));
}
let cursor = self.stored_ptr.len();
for it in self.reference.iter() {
if it.is_samestr(name){
return Err(format!("Global Storage name collision: {}", name));
}
}
self.reference[cursor].copystr( name );
let ptr = self.alloc(v);
self.stored_ptr.push(ptr);
return Ok(());
}
pub fn get<T: 'static>(&mut self, name: &str)->Result<&mut T, String>{
let mut isgood = false;
let mut ptr_index = 0;
for (i, it) in self.reference.iter().enumerate() {
if it.is_samestr(name){
ptr_index = i;
isgood = true;
}
}
if isgood == false { return Err(format!("Name not found in Global Storage: {}", name)); }
let ptr = &self.stored_ptr[ptr_index];
return Ok(ptr.deref_mut::<T>());
}
//
}
pub struct LocalStorage{
//NOTE
//This seems to be a good idea when it comes to interactive panels
//However I'm not sure about the usefulness else where....
//
//
//Why should the local buffer be fixed sized. This doesn't really make sense.
pub interactive: bool,
pub storage: GlobalStorage,
}
impl LocalStorage{
pub fn new()->LocalStorage{
LocalStorage{
interactive: false,
storage: GlobalStorage::new(),
}
}
}
}
pub mod interaction_tools{
pub enum KeyboardEnum{
Rightarrow,
Leftarrow,
Uparrow,
Downarrow,
Enter,
Default
}
impl Default for KeyboardEnum{
fn default()->Self{ KeyboardEnum::Default }
}
pub enum ButtonStatus{
Up,
Down,
Default
}
impl Default for ButtonStatus{
fn default()->Self{ ButtonStatus::Default }
}
#[derive(Default)]
pub struct InteractiveInfo{
//TODO add some frame info
pub infocus: bool,
pub mouse_x: f32,
pub mouse_y: f32,
pub text_key_pressed: char,
pub keyboard_key: Vec<KeyboardEnum>,
pub keyboard_key_status: Vec<ButtonStatus>,
pub frames: u64,
}
}
#[test]
fn globalstorage_alloc_and_store(){
//TODO rename
use memory_tools::GlobalStorage;
let mut gls = GlobalStorage::new();
{
let mut a = [10u8; 4];
gls.store(a, "a");
}
let mut b = [10u8; 4];
assert_eq!(b, *gls.get::<[u8;4]>("a").unwrap());
}
#[test]
fn globalstorage_vec(){
//TODO rename
use memory_tools::{GlobalStorage, DyArray};
let mut gls = GlobalStorage::new();
let mut dy = DyArray::<u32>::new(&mut gls);
dy.push(12);
dy.push(123);
dy.push(1231);
//let a = dy.get(0);
//assert_eq!(12, *a);
println!("print test");
let a = dy.get(1);
assert_eq!(123, *a);
let a = dy.get(2);
assert_eq!(1231, *a);
/* Does not compile for some reason
* I need to inform the rust folks
assert_eq!(12, *(dy.get(0)));
assert_eq!(123, *(dy,get(1)));
assert_eq!(1231, *(dy.get(2)));
*/
}
#[test]
fn
|
global_storage_vec2
|
identifier_name
|
|
lib.rs
|
_lib_handle.is_null(){
println!("{:?}", get_error());
Err(format!("Shared lib is null! {} Check file path/name.", lib_path))
}
else{
Ok( DyLib(shared_lib_handle) )
}
}}
//Example
//let function : fn()->i32= transmute_copy((dlsym(shared_lib_handle, CString::new(name).unwrap().as_ptr()) as *mut ()).as_mut());
pub fn get_fn( shared_lib_handle: &DyLib, name: &str)-> Result<*mut (), String>{ unsafe{
let _fn = dlsym(shared_lib_handle.0, CString::new(name).unwrap().as_ptr());
if _fn.is_null() {
Err("Function name could not be found.".to_string())
}
else{
Ok(_fn as *mut () )
}
}}
pub fn get_error()->String{unsafe{
let error = dlerror();
if error.is_null(){
return "No Error".to_string();
}
else{
CString::from_raw(error).into_string().unwrap()
}
}}
pub fn close_lib(shared_lib_handle: &DyLib){unsafe{
if dlclose(shared_lib_handle.0) != 0{
println!("Could not properly close shared library.");
}
}}
}
#[cfg(target_os = "windows")]
pub mod dynamic_lib_loading{
use std::os::raw::{c_int, c_void};
extern "C" {
fn LoadLibraryA( path: *const i8 ) -> *mut c_void;
fn GetProcAddress( lib: *mut c_void, name: *const i8 ) -> *mut c_void;
fn FreeLibrary( lib: *mut c_void ) -> c_int;
fn GetLastError() -> u32;
}
//TODO
//This is temporary should be replaced by windows enums
pub const RTLD_LAZY : i32 = 0x00001; /* Lazy function call binding. */
pub struct DyLib(*mut c_void);
pub fn open_lib( lib_path: &str, _flag: i32 )->Result<DyLib, String>{unsafe{
let _path = lib_path.to_string() + "\0";
let lib = LoadLibraryA( _path.as_ptr() as *const i8);
if lib.is_null(){
let s = format!("Could not open lib \n{:?}\n\n For more info => https://docs.microsoft.com/en-us/windows/win32/debug/system-error-codes", GetLastError());
return Err(s);
}
Ok(DyLib(lib as *mut c_void))
}}
//Example
//let function : fn()->i32= transmute_copy((dlsym(shared_lib_handle, CString::new(name).unwrap().as_ptr()) as *mut ()).as_mut());
pub fn get_fn( shared_lib_handle: &DyLib, name: &str)-> Result<*mut (), String>{ unsafe{
let fn_name = name.to_string() + "\0";
let function = GetProcAddress(shared_lib_handle.0 as _, fn_name.as_ptr() as *const i8) as *mut ();
if function.is_null(){
let s = format!("Could not get function \n{:?}", GetLastError());
return Err(s);
}
Ok(function)
}}
pub fn get_error()->String{
"Windows version has not been implemented".to_string()
}
pub fn close_lib(shared_lib_handle: &DyLib){unsafe{
if FreeLibrary(shared_lib_handle.0 as _) == 0{
println!("Could not properly close shared library.");
println!("{}", format!("{:?}", GetLastError()));
}
}}
}
pub mod render_tools{
#[derive(PartialEq, Clone, Debug)]
pub enum RenderType{
Image,
Rectangle,
String,
PrintString,
Empty,
}
impl Default for RenderType{
fn default()->Self{ RenderType::Empty }
}
#[derive(Default)]
pub struct RenderStruct{
pub rendertype : RenderType,
pub x: f32,
pub y: f32,
pub width: f32,
pub height: f32,
pub alpha : f32,
//rect related things
pub filled: bool,
pub color: [f32;3],
|
pub new_width: Option<f32>,// NOTE Testing out using a factional new width
pub new_height: Option<f32>,// NOTE Testing out using a factional new height
//Stings
pub char_buffer: String,
pub font_size: u32
}
#[derive(Default)]
pub struct RenderInstructions{
pub buffer: Vec<RenderStruct>,
}
//TODO
//This is a BAD name.... do better
#[derive(Clone, Copy, PartialEq)]
pub enum RGBA{
U8rgba,
U8argb,
U8rgb,
Empty,
//More maybe ... maybe not
}
impl RenderInstructions{
pub fn clear(&mut self){
self.buffer.clear();
}
pub fn draw_rect(&mut self, rect: [f32; 4], color: [f32; 4], filled: bool){
let _color = [color[0], color[1], color[2]];
self.buffer.push( RenderStruct{rendertype: RenderType::Rectangle,
x: rect[0], y:rect[1], width: rect[2], height: rect[3],
alpha: color[3], filled: filled, color: _color, .. Default::default()});
}
pub fn draw_string(&mut self, s: &str, color: [f32; 4], size: u32, x: f32, y: f32 ){
//TODO
//should size be optional
//shouldn't a good text size be choosen automatically
//TODO
//should color be optional
//shouldn't a good text color be choosen automatically
let _color = [color[0], color[1], color[2]];
self.buffer.push( RenderStruct{ rendertype: RenderType::String, x: x, y: y,
alpha: color[3], color: _color, char_buffer: s.to_string(), font_size: size, .. Default::default()} );
}
pub fn draw_bmp(&mut self, bmp: &Bitmap, x: f32, y: f32, alpha: f32, w: Option<f32>, h: Option<f32>){
//TODO
//should x and y be options, Often I want to just draw the image where ever and have it
//automagicly look good with corresponding text
self.buffer.push( RenderStruct{rendertype: RenderType::Image, alpha: alpha, x: x, y: y, width: bmp.width as f32, height: bmp.height as f32,
new_width: w, new_height: h, rgba_type: bmp.rgba_type, color_buffer: bmp.buffer.clone(), .. Default::default()} );
}
pub fn println(&mut self, string: &str){
let buffer = "> ".to_string() + string;
self.buffer.push( RenderStruct{ rendertype: RenderType::PrintString,
alpha: 1.0, color: [1.0, 1.0, 1.0], char_buffer: buffer, font_size: 19, .. Default::default()} );
}
}
impl Default for RGBA{
fn default()->Self{ RGBA::Empty }
}
#[derive(Clone)]
pub struct Bitmap{
//NOTE BMP should be 400 x 400 to start off.
pub width: i32,
pub height: i32,
pub rgba_type: RGBA,
pub buffer: Vec<u8>,
}
impl Bitmap{
pub fn new(w: i32, h: i32, rgba_type: RGBA)->Bitmap{
let _w = w as usize;
let _h = h as usize;
let v = match rgba_type{
RGBA::U8rgba=>{ vec![0u8; _w*_h*4] },
RGBA::U8argb=>{ vec![0u8; _w*_h*4] },
RGBA::U8rgb=>{ vec![0u8; _w*_h*3] },
_=>{ panic!("Not supported"); } //TODO Clean up
};
Bitmap{
width: w,
height: h,
rgba_type: rgba_type,
buffer: v,
}
}
}
pub struct BitmapContainer{
pub initialized : bool,
pub bmp: Option<Bitmap>,
}
}
pub mod memory_tools{
//TODO play around with this maybe
//use std::alloc;
use std::any::TypeId;
const _FIXED_CHAR_BUFFER_SIZE : usize = 128;
#[derive(Copy, Clone)]
pub struct TinyString{
//NOTE
//currently struct vars are public for debugging purposed. They should not be public.
//NOTE
//This should prob be a general tool
pub buffer: [
|
//image related things
pub color_buffer: Vec<u8>,
pub rgba_type: RGBA,
|
random_line_split
|
map_loader.rs
|
(|world| {
let mut loader = MapLoader::new(world);
let file = file;
let map: &Tiledmap = loader.load_map(&file);
// Get the background color based on the loaded map
let bg: Color = map
.backgroundcolor
.as_ref()
.map(|s: &String| {
hex_color(s.as_str())
.map_err(|e| format!("{:?}", e))
.map(|(_, c)| c)
})
.unwrap_or(Ok(Color::rgb(0, 0, 0)))
.unwrap()
.clone();
let width: u32 = map
.get_property_by_name("viewport_width")
.map(|value: &Value| {
value
.as_i64()
.expect("map's 'viewport_width' property type must be unsigned int")
as u32
})
.unwrap_or(map.width as u32 * map.tilewidth as u32);
// Get the screen size based on the loaded map
let height: u32 = map
.get_property_by_name("viewport_height")
.map(|value: &Value| {
value.as_i64().expect(
"map's 'viewport_height' property type must be unsigned int",
) as u32
})
.unwrap_or(map.height as u32 * map.tileheight as u32);
let res = loader.load(&file, None, None);
match res {
Ok(_) => {}
Err(msg) => panic!(msg),
}
let mut screen = world.write_resource::<Screen>();
screen.set_size((width, height));
let mut background_color = world.write_resource::<BackgroundColor>();
background_color.0 = bg;
});
}
/// Create a new MapLoader
pub fn new<'c>(world: &'c mut World) -> MapLoader<'c> {
MapLoader {
loaded_maps: HashMap::new(),
z_level: ZLevel(0.0),
world,
origin: V2::new(0.0, 0.0),
layer_group: None,
sprite: None,
}
}
fn load_map(&mut self, file: &String) -> &Tiledmap {
if !self.loaded_maps.contains_key(file) {
let map: Tiledmap = Tiledmap::new(&Path::new(&file.clone()));
self.loaded_maps.insert(file.clone(), map.clone());
}
self.loaded_maps.get(file).expect("Impossible!")
}
/// Sort the layers of a Tiledmap (in place) so that the layers
/// process correctly. Really we just want the inventories
/// layer to be loaded first.
pub fn sort_layers(&self, layers: &mut Vec<Layer>) {
let mut mndx = None;
'find_ndx: for (layer, i) in layers.iter().zip(0..) {
|
}
}
if let Some(ndx) = mndx {
let inv_layer = layers.remove(ndx);
layers.insert(0, inv_layer);
}
}
pub fn insert_map(
&mut self,
map: &mut Tiledmap,
layer_group: Option<String>,
sprite: Option<Entity>,
) -> Result<LoadedLayers, String> {
self.sort_layers(&mut map.layers);
let prev_group = self.layer_group.take();
self.layer_group = layer_group;
self.sprite = sprite;
let res = self.load_layers(&map.layers, &map)?;
self.layer_group = prev_group;
Ok(res)
}
/// Load an entire top-level map into the ECS.
/// Takes the file to load and optionally a layer group to load. If a layer
/// group is provided only layers within the group will be loaded. If no layer
/// group is provided all layers will be loaded.
/// Returns an error or a tuple consisting of
pub fn load(
&mut self,
file: &String,
layer_group: Option<String>,
sprite: Option<Entity>,
) -> Result<LoadedLayers, String> {
self.load_map(&file);
let mut map = self
.loaded_maps
.get(file)
.expect("Could not retreive map.")
.clone();
self.insert_map(&mut map, layer_group, sprite)
}
/// Possibly Increments the ZLevel based on layer properties
fn increment_z_by_layer(&mut self, layer: &Layer) {
let z_inc = layer.get_z_inc().unwrap_or(0);
if z_inc != 0 {
self.z_level.0 += z_inc as f32;
println!(
"incrementing ZLevel to {:?} - layer {:?}",
self.z_level, layer.name
);
}
}
/// Load one layer of LayerData.
fn load_layer_data(
&mut self,
layer_name: &String,
data: &LayerData,
map: &Tiledmap,
) -> Result<Vec<Entity>, String> {
println!("load_layer_data: {} at z:{:?}", layer_name, self.z_level);
match data {
LayerData::Tiles(tiles) => Ok(self.load_tile_layer(&tiles.data, map)?),
LayerData::Objects(objects) => {
if layer_name == "inventories" {
let inv_layer: InventoryLayer =
InventoryLayer::read(map, &objects.objects)?;
let top_level_entities =
inv_layer.into_ecs(self.world, self.z_level)?;
Ok(top_level_entities)
} else {
let top_level_entities = objects.objects.iter().fold(
Ok(vec![]),
|result: Result<Vec<Entity>, String>, obj: &Object| {
let ent = self.load_top_level_object(obj, map)?;
let mut ents = result?;
ents.push(ent);
Ok(ents)
},
)?;
Ok(top_level_entities)
}
}
LayerData::Layers(layers) => {
layers.layers.iter().fold(Ok(vec![]), |res, layer| {
let mut res = res?;
self.increment_z_by_layer(&layer);
let mut ents =
self.load_layer_data(&layer.name, &layer.layer_data, map)?;
res.append(&mut ents);
Ok(res)
})
}
}
}
/// Load a vec of layers into the ECS
fn load_layers(
&mut self,
layers: &Vec<Layer>,
map: &Tiledmap,
) -> Result<LoadedLayers, String> {
let variant = self.layer_group.take();
// First figure out which layers we need to load
let layers_to_load: Vec<&Layer> = if variant.is_some() {
let variant_name = variant.as_ref().unwrap();
// Only get the variant layers
layers
.iter()
.filter_map(|layer| {
if layer.name == *variant_name {
match &layer.layer_data {
LayerData::Layers(variant_layers) => {
let variant_layers: Vec<&Layer> =
variant_layers.layers.iter().collect();
Some(variant_layers)
}
_ => None,
}
} else {
None
}
})
.flatten()
.collect()
} else {
// Return the layers as normal
layers.iter().collect()
};
let mut layers = LoadedLayers::new();
for layer in layers_to_load.iter() {
self.increment_z_by_layer(&layer);
let mut ents =
self.load_layer_data(&layer.name, &layer.layer_data, map)?;
// If this layer is part of a group, add it as a keyframe
if layer.is_group() {
layers.groups.insert(layer.name.clone(), ents);
} else {
layers.top_level_entities.append(&mut ents);
}
}
Ok(layers)
}
/// ## Loading tiles
/// Load a vector of tiles keyed by their GlobalId.
fn load_tile_layer(
&mut self,
tiles: &Vec<GlobalTileIndex>,
map: &Tiledmap,
) -> Result<Vec<Entity>, String> {
let (width, height) = (map.width as u32, map.height as u32);
let tw = map.tilewidth as u32;
let th = map.tileheight as u32;
println!(" layer width {:?} and height {:?}", width, height);
tiles
.iter()
.zip(0..)
.fold(Ok(vec![]), |result, (gid, ndx)| {
let mut ents = result?;
let yndx = ndx / width;
let xndx = ndx % width;
println!(" tile {:?} ({:?}, {:?})", ndx, xndx, yndx);
let tile_origin =
self.origin + V2::new((tw * xndx) as f32, (th * yndx) as f32);
let mut attribs = Attributes::read_gid(map, gid, None)?;
attribs.push(Attribute::Position(Position(tile_origin)));
let attributes = Attributes { attribs };
let ent = attributes.into_ecs(self.world, self.z_level);
ents.push(ent);
Ok(ents)
})
}
/// ## Loading
|
if let LayerData::Objects(_) = layer.layer_data {
if layer.name == "inventories" {
mndx = Some(i);
break 'find_ndx;
}
|
random_line_split
|
map_loader.rs
|
(file: String, lazy: &LazyUpdate) {
lazy.exec_mut(|world| {
let mut loader = MapLoader::new(world);
let file = file;
let map: &Tiledmap = loader.load_map(&file);
// Get the background color based on the loaded map
let bg: Color = map
.backgroundcolor
.as_ref()
.map(|s: &String| {
hex_color(s.as_str())
.map_err(|e| format!("{:?}", e))
.map(|(_, c)| c)
})
.unwrap_or(Ok(Color::rgb(0, 0, 0)))
.unwrap()
.clone();
let width: u32 = map
.get_property_by_name("viewport_width")
.map(|value: &Value| {
value
.as_i64()
.expect("map's 'viewport_width' property type must be unsigned int")
as u32
})
.unwrap_or(map.width as u32 * map.tilewidth as u32);
// Get the screen size based on the loaded map
let height: u32 = map
.get_property_by_name("viewport_height")
.map(|value: &Value| {
value.as_i64().expect(
"map's 'viewport_height' property type must be unsigned int",
) as u32
})
.unwrap_or(map.height as u32 * map.tileheight as u32);
let res = loader.load(&file, None, None);
match res {
Ok(_) => {}
Err(msg) => panic!(msg),
}
let mut screen = world.write_resource::<Screen>();
screen.set_size((width, height));
let mut background_color = world.write_resource::<BackgroundColor>();
background_color.0 = bg;
});
}
/// Create a new MapLoader
pub fn new<'c>(world: &'c mut World) -> MapLoader<'c> {
MapLoader {
loaded_maps: HashMap::new(),
z_level: ZLevel(0.0),
world,
origin: V2::new(0.0, 0.0),
layer_group: None,
sprite: None,
}
}
fn load_map(&mut self, file: &String) -> &Tiledmap {
if !self.loaded_maps.contains_key(file) {
let map: Tiledmap = Tiledmap::new(&Path::new(&file.clone()));
self.loaded_maps.insert(file.clone(), map.clone());
}
self.loaded_maps.get(file).expect("Impossible!")
}
/// Sort the layers of a Tiledmap (in place) so that the layers
/// process correctly. Really we just want the inventories
/// layer to be loaded first.
pub fn sort_layers(&self, layers: &mut Vec<Layer>) {
let mut mndx = None;
'find_ndx: for (layer, i) in layers.iter().zip(0..) {
if let LayerData::Objects(_) = layer.layer_data {
if layer.name == "inventories" {
mndx = Some(i);
break 'find_ndx;
}
}
}
if let Some(ndx) = mndx {
let inv_layer = layers.remove(ndx);
layers.insert(0, inv_layer);
}
}
pub fn insert_map(
&mut self,
map: &mut Tiledmap,
layer_group: Option<String>,
sprite: Option<Entity>,
) -> Result<LoadedLayers, String> {
self.sort_layers(&mut map.layers);
let prev_group = self.layer_group.take();
self.layer_group = layer_group;
self.sprite = sprite;
let res = self.load_layers(&map.layers, &map)?;
self.layer_group = prev_group;
Ok(res)
}
/// Load an entire top-level map into the ECS.
/// Takes the file to load and optionally a layer group to load. If a layer
/// group is provided only layers within the group will be loaded. If no layer
/// group is provided all layers will be loaded.
/// Returns an error or a tuple consisting of
pub fn load(
&mut self,
file: &String,
layer_group: Option<String>,
sprite: Option<Entity>,
) -> Result<LoadedLayers, String> {
self.load_map(&file);
let mut map = self
.loaded_maps
.get(file)
.expect("Could not retreive map.")
.clone();
self.insert_map(&mut map, layer_group, sprite)
}
/// Possibly Increments the ZLevel based on layer properties
fn increment_z_by_layer(&mut self, layer: &Layer) {
let z_inc = layer.get_z_inc().unwrap_or(0);
if z_inc != 0 {
self.z_level.0 += z_inc as f32;
println!(
"incrementing ZLevel to {:?} - layer {:?}",
self.z_level, layer.name
);
}
}
/// Load one layer of LayerData.
fn load_layer_data(
&mut self,
layer_name: &String,
data: &LayerData,
map: &Tiledmap,
) -> Result<Vec<Entity>, String> {
println!("load_layer_data: {} at z:{:?}", layer_name, self.z_level);
match data {
LayerData::Tiles(tiles) => Ok(self.load_tile_layer(&tiles.data, map)?),
LayerData::Objects(objects) => {
if layer_name == "inventories" {
let inv_layer: InventoryLayer =
InventoryLayer::read(map, &objects.objects)?;
let top_level_entities =
inv_layer.into_ecs(self.world, self.z_level)?;
Ok(top_level_entities)
} else {
let top_level_entities = objects.objects.iter().fold(
Ok(vec![]),
|result: Result<Vec<Entity>, String>, obj: &Object| {
let ent = self.load_top_level_object(obj, map)?;
let mut ents = result?;
ents.push(ent);
Ok(ents)
},
)?;
Ok(top_level_entities)
}
}
LayerData::Layers(layers) => {
layers.layers.iter().fold(Ok(vec![]), |res, layer| {
let mut res = res?;
self.increment_z_by_layer(&layer);
let mut ents =
self.load_layer_data(&layer.name, &layer.layer_data, map)?;
res.append(&mut ents);
Ok(res)
})
}
}
}
/// Load a vec of layers into the ECS
fn load_layers(
&mut self,
layers: &Vec<Layer>,
map: &Tiledmap,
) -> Result<LoadedLayers, String> {
let variant = self.layer_group.take();
// First figure out which layers we need to load
let layers_to_load: Vec<&Layer> = if variant.is_some() {
let variant_name = variant.as_ref().unwrap();
// Only get the variant layers
layers
.iter()
.filter_map(|layer| {
if layer.name == *variant_name {
match &layer.layer_data {
LayerData::Layers(variant_layers) => {
let variant_layers: Vec<&Layer> =
variant_layers.layers.iter().collect();
Some(variant_layers)
}
_ => None,
}
} else {
None
}
})
.flatten()
.collect()
} else {
// Return the layers as normal
layers.iter().collect()
};
let mut layers = LoadedLayers::new();
for layer in layers_to_load.iter() {
self.increment_z_by_layer(&layer);
let mut ents =
self.load_layer_data(&layer.name, &layer.layer_data, map)?;
// If this layer is part of a group, add it as a keyframe
if layer.is_group() {
layers.groups.insert(layer.name.clone(), ents);
} else {
layers.top_level_entities.append(&mut ents);
}
}
Ok(layers)
}
/// ## Loading tiles
/// Load a vector of tiles keyed by their GlobalId.
fn load_tile_layer(
&mut self,
tiles: &Vec<GlobalTileIndex>,
map: &Tiledmap,
) -> Result<Vec<Entity>, String> {
let (width, height) = (map.width as u32, map.height as u32);
let tw = map.tilewidth as u32;
let th = map.tileheight as u32;
println!(" layer width {:?} and height {:?}", width, height);
tiles
.iter()
.zip(0..)
.fold(Ok(vec![]), |result, (gid, ndx)| {
let mut ents = result?;
let yndx = ndx / width;
let xndx = ndx % width;
println!(" tile {:?} ({:?}, {:?})", ndx, xndx, yndx);
let tile_origin =
self.origin + V2::new((tw * xndx) as f32, (th * yndx) as f32);
let mut attribs = Attributes::read_gid(map, gid, None)?;
attribs.push(Attribute::Position(Position(tile_origin)));
let attributes = Attributes { attribs };
let ent = attributes.into_ecs(self.world, self.z_level);
|
load_it
|
identifier_name
|
|
map_loader.rs
|
world| {
let mut loader = MapLoader::new(world);
let file = file;
let map: &Tiledmap = loader.load_map(&file);
// Get the background color based on the loaded map
let bg: Color = map
.backgroundcolor
.as_ref()
.map(|s: &String| {
hex_color(s.as_str())
.map_err(|e| format!("{:?}", e))
.map(|(_, c)| c)
})
.unwrap_or(Ok(Color::rgb(0, 0, 0)))
.unwrap()
.clone();
let width: u32 = map
.get_property_by_name("viewport_width")
.map(|value: &Value| {
value
.as_i64()
.expect("map's 'viewport_width' property type must be unsigned int")
as u32
})
.unwrap_or(map.width as u32 * map.tilewidth as u32);
// Get the screen size based on the loaded map
let height: u32 = map
.get_property_by_name("viewport_height")
.map(|value: &Value| {
value.as_i64().expect(
"map's 'viewport_height' property type must be unsigned int",
) as u32
})
.unwrap_or(map.height as u32 * map.tileheight as u32);
let res = loader.load(&file, None, None);
match res {
Ok(_) => {}
Err(msg) => panic!(msg),
}
let mut screen = world.write_resource::<Screen>();
screen.set_size((width, height));
let mut background_color = world.write_resource::<BackgroundColor>();
background_color.0 = bg;
});
}
/// Create a new MapLoader
pub fn new<'c>(world: &'c mut World) -> MapLoader<'c> {
MapLoader {
loaded_maps: HashMap::new(),
z_level: ZLevel(0.0),
world,
origin: V2::new(0.0, 0.0),
layer_group: None,
sprite: None,
}
}
fn load_map(&mut self, file: &String) -> &Tiledmap {
if !self.loaded_maps.contains_key(file) {
let map: Tiledmap = Tiledmap::new(&Path::new(&file.clone()));
self.loaded_maps.insert(file.clone(), map.clone());
}
self.loaded_maps.get(file).expect("Impossible!")
}
/// Sort the layers of a Tiledmap (in place) so that the layers
/// process correctly. Really we just want the inventories
/// layer to be loaded first.
pub fn sort_layers(&self, layers: &mut Vec<Layer>) {
let mut mndx = None;
'find_ndx: for (layer, i) in layers.iter().zip(0..) {
if let LayerData::Objects(_) = layer.layer_data {
if layer.name == "inventories" {
mndx = Some(i);
break 'find_ndx;
}
}
}
if let Some(ndx) = mndx {
let inv_layer = layers.remove(ndx);
layers.insert(0, inv_layer);
}
}
pub fn insert_map(
&mut self,
map: &mut Tiledmap,
layer_group: Option<String>,
sprite: Option<Entity>,
) -> Result<LoadedLayers, String> {
self.sort_layers(&mut map.layers);
let prev_group = self.layer_group.take();
self.layer_group = layer_group;
self.sprite = sprite;
let res = self.load_layers(&map.layers, &map)?;
self.layer_group = prev_group;
Ok(res)
}
/// Load an entire top-level map into the ECS.
/// Takes the file to load and optionally a layer group to load. If a layer
/// group is provided only layers within the group will be loaded. If no layer
/// group is provided all layers will be loaded.
/// Returns an error or a tuple consisting of
pub fn load(
&mut self,
file: &String,
layer_group: Option<String>,
sprite: Option<Entity>,
) -> Result<LoadedLayers, String> {
self.load_map(&file);
let mut map = self
.loaded_maps
.get(file)
.expect("Could not retreive map.")
.clone();
self.insert_map(&mut map, layer_group, sprite)
}
/// Possibly Increments the ZLevel based on layer properties
fn increment_z_by_layer(&mut self, layer: &Layer) {
let z_inc = layer.get_z_inc().unwrap_or(0);
if z_inc != 0 {
self.z_level.0 += z_inc as f32;
println!(
"incrementing ZLevel to {:?} - layer {:?}",
self.z_level, layer.name
);
}
}
/// Load one layer of LayerData.
fn load_layer_data(
&mut self,
layer_name: &String,
data: &LayerData,
map: &Tiledmap,
) -> Result<Vec<Entity>, String> {
println!("load_layer_data: {} at z:{:?}", layer_name, self.z_level);
match data {
LayerData::Tiles(tiles) => Ok(self.load_tile_layer(&tiles.data, map)?),
LayerData::Objects(objects) => {
if layer_name == "inventories" {
let inv_layer: InventoryLayer =
InventoryLayer::read(map, &objects.objects)?;
let top_level_entities =
inv_layer.into_ecs(self.world, self.z_level)?;
Ok(top_level_entities)
} else {
let top_level_entities = objects.objects.iter().fold(
Ok(vec![]),
|result: Result<Vec<Entity>, String>, obj: &Object| {
let ent = self.load_top_level_object(obj, map)?;
let mut ents = result?;
ents.push(ent);
Ok(ents)
},
)?;
Ok(top_level_entities)
}
}
LayerData::Layers(layers) => {
layers.layers.iter().fold(Ok(vec![]), |res, layer| {
let mut res = res?;
self.increment_z_by_layer(&layer);
let mut ents =
self.load_layer_data(&layer.name, &layer.layer_data, map)?;
res.append(&mut ents);
Ok(res)
})
}
}
}
/// Load a vec of layers into the ECS
fn load_layers(
&mut self,
layers: &Vec<Layer>,
map: &Tiledmap,
) -> Result<LoadedLayers, String> {
let variant = self.layer_group.take();
// First figure out which layers we need to load
let layers_to_load: Vec<&Layer> = if variant.is_some() {
let variant_name = variant.as_ref().unwrap();
// Only get the variant layers
layers
.iter()
.filter_map(|layer| {
if layer.name == *variant_name {
match &layer.layer_data {
LayerData::Layers(variant_layers) =>
|
_ => None,
}
} else {
None
}
})
.flatten()
.collect()
} else {
// Return the layers as normal
layers.iter().collect()
};
let mut layers = LoadedLayers::new();
for layer in layers_to_load.iter() {
self.increment_z_by_layer(&layer);
let mut ents =
self.load_layer_data(&layer.name, &layer.layer_data, map)?;
// If this layer is part of a group, add it as a keyframe
if layer.is_group() {
layers.groups.insert(layer.name.clone(), ents);
} else {
layers.top_level_entities.append(&mut ents);
}
}
Ok(layers)
}
/// ## Loading tiles
/// Load a vector of tiles keyed by their GlobalId.
fn load_tile_layer(
&mut self,
tiles: &Vec<GlobalTileIndex>,
map: &Tiledmap,
) -> Result<Vec<Entity>, String> {
let (width, height) = (map.width as u32, map.height as u32);
let tw = map.tilewidth as u32;
let th = map.tileheight as u32;
println!(" layer width {:?} and height {:?}", width, height);
tiles
.iter()
.zip(0..)
.fold(Ok(vec![]), |result, (gid, ndx)| {
let mut ents = result?;
let yndx = ndx / width;
let xndx = ndx % width;
println!(" tile {:?} ({:?}, {:?})", ndx, xndx, yndx);
let tile_origin =
self.origin + V2::new((tw * xndx) as f32, (th * yndx) as f32);
let mut attribs = Attributes::read_gid(map, gid, None)?;
attribs.push(Attribute::Position(Position(tile_origin)));
let attributes = Attributes { attribs };
let ent = attributes.into_ecs(self.world, self.z_level);
ents.push(ent);
Ok(ents)
})
}
/// ##
|
{
let variant_layers: Vec<&Layer> =
variant_layers.layers.iter().collect();
Some(variant_layers)
}
|
conditional_block
|
map_loader.rs
|
}
pub struct MapLoader<'a> {
loaded_maps: HashMap<String, Tiledmap>,
pub z_level: ZLevel,
pub world: &'a mut World,
pub origin: V2,
pub layer_group: Option<String>,
pub sprite: Option<Entity>,
}
impl<'a> MapLoader<'a> {
pub fn load_it(file: String, lazy: &LazyUpdate) {
lazy.exec_mut(|world| {
let mut loader = MapLoader::new(world);
let file = file;
let map: &Tiledmap = loader.load_map(&file);
// Get the background color based on the loaded map
let bg: Color = map
.backgroundcolor
.as_ref()
.map(|s: &String| {
hex_color(s.as_str())
.map_err(|e| format!("{:?}", e))
.map(|(_, c)| c)
})
.unwrap_or(Ok(Color::rgb(0, 0, 0)))
.unwrap()
.clone();
let width: u32 = map
.get_property_by_name("viewport_width")
.map(|value: &Value| {
value
.as_i64()
.expect("map's 'viewport_width' property type must be unsigned int")
as u32
})
.unwrap_or(map.width as u32 * map.tilewidth as u32);
// Get the screen size based on the loaded map
let height: u32 = map
.get_property_by_name("viewport_height")
.map(|value: &Value| {
value.as_i64().expect(
"map's 'viewport_height' property type must be unsigned int",
) as u32
})
.unwrap_or(map.height as u32 * map.tileheight as u32);
let res = loader.load(&file, None, None);
match res {
Ok(_) => {}
Err(msg) => panic!(msg),
}
let mut screen = world.write_resource::<Screen>();
screen.set_size((width, height));
let mut background_color = world.write_resource::<BackgroundColor>();
background_color.0 = bg;
});
}
/// Create a new MapLoader
pub fn new<'c>(world: &'c mut World) -> MapLoader<'c> {
MapLoader {
loaded_maps: HashMap::new(),
z_level: ZLevel(0.0),
world,
origin: V2::new(0.0, 0.0),
layer_group: None,
sprite: None,
}
}
fn load_map(&mut self, file: &String) -> &Tiledmap {
if !self.loaded_maps.contains_key(file) {
let map: Tiledmap = Tiledmap::new(&Path::new(&file.clone()));
self.loaded_maps.insert(file.clone(), map.clone());
}
self.loaded_maps.get(file).expect("Impossible!")
}
/// Sort the layers of a Tiledmap (in place) so that the layers
/// process correctly. Really we just want the inventories
/// layer to be loaded first.
pub fn sort_layers(&self, layers: &mut Vec<Layer>) {
let mut mndx = None;
'find_ndx: for (layer, i) in layers.iter().zip(0..) {
if let LayerData::Objects(_) = layer.layer_data {
if layer.name == "inventories" {
mndx = Some(i);
break 'find_ndx;
}
}
}
if let Some(ndx) = mndx {
let inv_layer = layers.remove(ndx);
layers.insert(0, inv_layer);
}
}
pub fn insert_map(
&mut self,
map: &mut Tiledmap,
layer_group: Option<String>,
sprite: Option<Entity>,
) -> Result<LoadedLayers, String> {
self.sort_layers(&mut map.layers);
let prev_group = self.layer_group.take();
self.layer_group = layer_group;
self.sprite = sprite;
let res = self.load_layers(&map.layers, &map)?;
self.layer_group = prev_group;
Ok(res)
}
/// Load an entire top-level map into the ECS.
/// Takes the file to load and optionally a layer group to load. If a layer
/// group is provided only layers within the group will be loaded. If no layer
/// group is provided all layers will be loaded.
/// Returns an error or a tuple consisting of
pub fn load(
&mut self,
file: &String,
layer_group: Option<String>,
sprite: Option<Entity>,
) -> Result<LoadedLayers, String> {
self.load_map(&file);
let mut map = self
.loaded_maps
.get(file)
.expect("Could not retreive map.")
.clone();
self.insert_map(&mut map, layer_group, sprite)
}
/// Possibly Increments the ZLevel based on layer properties
fn increment_z_by_layer(&mut self, layer: &Layer) {
let z_inc = layer.get_z_inc().unwrap_or(0);
if z_inc != 0 {
self.z_level.0 += z_inc as f32;
println!(
"incrementing ZLevel to {:?} - layer {:?}",
self.z_level, layer.name
);
}
}
/// Load one layer of LayerData.
fn load_layer_data(
&mut self,
layer_name: &String,
data: &LayerData,
map: &Tiledmap,
) -> Result<Vec<Entity>, String> {
println!("load_layer_data: {} at z:{:?}", layer_name, self.z_level);
match data {
LayerData::Tiles(tiles) => Ok(self.load_tile_layer(&tiles.data, map)?),
LayerData::Objects(objects) => {
if layer_name == "inventories" {
let inv_layer: InventoryLayer =
InventoryLayer::read(map, &objects.objects)?;
let top_level_entities =
inv_layer.into_ecs(self.world, self.z_level)?;
Ok(top_level_entities)
} else {
let top_level_entities = objects.objects.iter().fold(
Ok(vec![]),
|result: Result<Vec<Entity>, String>, obj: &Object| {
let ent = self.load_top_level_object(obj, map)?;
let mut ents = result?;
ents.push(ent);
Ok(ents)
},
)?;
Ok(top_level_entities)
}
}
LayerData::Layers(layers) => {
layers.layers.iter().fold(Ok(vec![]), |res, layer| {
let mut res = res?;
self.increment_z_by_layer(&layer);
let mut ents =
self.load_layer_data(&layer.name, &layer.layer_data, map)?;
res.append(&mut ents);
Ok(res)
})
}
}
}
/// Load a vec of layers into the ECS
fn load_layers(
&mut self,
layers: &Vec<Layer>,
map: &Tiledmap,
) -> Result<LoadedLayers, String> {
let variant = self.layer_group.take();
// First figure out which layers we need to load
let layers_to_load: Vec<&Layer> = if variant.is_some() {
let variant_name = variant.as_ref().unwrap();
// Only get the variant layers
layers
.iter()
.filter_map(|layer| {
if layer.name == *variant_name {
match &layer.layer_data {
LayerData::Layers(variant_layers) => {
let variant_layers: Vec<&Layer> =
variant_layers.layers.iter().collect();
Some(variant_layers)
}
_ => None,
}
} else {
None
}
})
.flatten()
.collect()
} else {
// Return the layers as normal
layers.iter().collect()
};
let mut layers = LoadedLayers::new();
for layer in layers_to_load.iter() {
self.increment_z_by_layer(&layer);
let mut ents =
self.load_layer_data(&layer.name, &layer.layer_data, map)?;
// If this layer is part of a group, add it as a keyframe
if layer.is_group() {
layers.groups.insert(layer.name.clone(), ents);
} else {
layers.top_level_entities.append(&mut ents);
}
}
Ok(layers)
}
/// ## Loading tiles
/// Load a vector of tiles keyed by their GlobalId.
fn load_tile_layer(
&mut self,
tiles: &Vec<GlobalTileIndex>,
map: &Tiledmap,
) -> Result<Vec<Entity>, String> {
let (width, height) = (map.width as u32, map.height as u32);
let tw = map.tilewidth as u32;
let th = map.tileheight as u32;
println!(" layer width {:?} and height {:?}", width, height);
tiles
.iter()
.zip(0..)
.fold(Ok(vec![]), |result, (gid, ndx)| {
let mut ents = result?;
let yndx = ndx / width;
let x
|
{
let other_tops = other.top_level_entities.into_iter();
let other_groups = other.groups.into_iter();
self.top_level_entities.extend(other_tops);
self.groups.extend(other_groups);
}
|
identifier_body
|
|
main.go
|
.Execute()
}
func checkArgs(_ *types.Event) error {
if len(plugin.AuthToken) == 0 {
return fmt.Errorf("authentication token is empty")
}
if len(plugin.Team) == 0 {
return fmt.Errorf("team is empty")
}
return nil
}
// eventPriority func read priority in the event and return alerts.PX
// check.Annotations override Entity.Annotations
func eventPriority() alert.Priority {
switch plugin.Priority {
case "P5":
return alert.P5
case "P4":
return alert.P4
case "P3":
return alert.P3
case "P2":
return alert.P2
case "P1":
return alert.P1
default:
return alert.P3
}
}
func parseActions(event *types.Event) (output []string) {
if event.Check.Annotations != nil && event.Check.Annotations["opsgenie_actions"] != "" {
output = strings.Split(event.Check.Annotations["opsgenie_actions"], ",")
return output
}
return output
}
// parseEventKeyTags func returns string, string, and []string with event data
// fist string contains custom templte string to use in message
// second string contains Entity.Name/Check.Name to use in alias
// []string contains Entity.Name Check.Name Entity.Namespace, event.Entity.EntityClass to use as tags in Opsgenie
func parseEventKeyTags(event *types.Event) (title string, alias string, tags []string) {
alias = fmt.Sprintf("%s/%s", event.Entity.Name, event.Check.Name)
title, err := templates.EvalTemplate("title", plugin.MessageTemplate, event)
if err != nil {
return "", "", []string{}
}
tags = append(tags, event.Entity.Name, event.Check.Name, event.Entity.Namespace, event.Entity.EntityClass)
return trim(title, plugin.MessageLimit), alias, tags
}
// parseDescription func returns string with custom template string to use in description
func parseDescription(event *types.Event) (description string) {
description, err := templates.EvalTemplate("description", plugin.DescriptionTemplate, event)
if err != nil {
return ""
}
// allow newlines to get expanded
description = strings.Replace(description, `\n`, "\n", -1)
return trim(description, plugin.DescriptionLimit)
}
// parseDetails func returns a map of string string with check information for the details field
func parseDetails(event *types.Event) map[string]string {
details := make(map[string]string)
details["subscriptions"] = fmt.Sprintf("%v", event.Check.Subscriptions)
details["status"] = fmt.Sprintf("%d", event.Check.Status)
details["interval"] = fmt.Sprintf("%d", event.Check.Interval)
// only if true
if plugin.FullDetails {
details["output"] = event.Check.Output
details["command"] = event.Check.Command
details["proxy_entity_name"] = event.Check.ProxyEntityName
details["state"] = event.Check.State
details["ttl"] = fmt.Sprintf("%d", event.Check.Ttl)
details["occurrences"] = fmt.Sprintf("%d", event.Check.Occurrences)
details["occurrences_watermark"] = fmt.Sprintf("%d", event.Check.OccurrencesWatermark)
details["handlers"] = fmt.Sprintf("%v", event.Check.Handlers)
if event.Entity.EntityClass == "agent" {
details["arch"] = event.Entity.System.GetArch()
details["os"] = event.Entity.System.GetOS()
details["platform"] = event.Entity.System.GetPlatform()
details["platform_family"] = event.Entity.System.GetPlatformFamily()
details["platform_version"] = event.Entity.System.GetPlatformVersion()
}
}
// only if true
if plugin.WithAnnotations {
if event.Check.Annotations != nil {
for key, value := range event.Check.Annotations {
if !strings.Contains(key, "sensu.io/plugins/sensu-opsgenie-handler/config") {
checkKey := fmt.Sprintf("%s_annotation_%s", "check", key)
details[checkKey] = value
}
}
}
if event.Entity.Annotations != nil {
for key, value := range event.Entity.Annotations {
if !strings.Contains(key, "sensu.io/plugins/sensu-opsgenie-handler/config") {
entityKey := fmt.Sprintf("%s_annotation_%s", "entity", key)
details[entityKey] = value
}
}
}
}
// only if true
if plugin.WithLabels {
if event.Check.Labels != nil {
for key, value := range event.Check.Labels {
checkKey := fmt.Sprintf("%s_label_%s", "check", key)
details[checkKey] = value
}
}
if event.Entity.Labels != nil {
for key, value := range event.Entity.Labels {
entityKey := fmt.Sprintf("%s_label_%s", "entity", key)
details[entityKey] = value
}
}
}
if plugin.SensuDashboard != "disabled" {
details["sensuDashboard"] = fmt.Sprintf("source: %s/%s/events/%s/%s \n", plugin.SensuDashboard, event.Entity.Namespace, event.Entity.Name, event.Check.Name)
}
return details
}
// switchOpsgenieRegion func
func switchOpsgenieRegion() client.ApiUrl {
var region client.ApiUrl
apiRegionLowCase := strings.ToLower(plugin.APIRegion)
switch apiRegionLowCase {
case "eu":
region = client.API_URL_EU
case "us":
region = client.API_URL
default:
region = client.API_URL
}
return region
}
func executeHandler(event *types.Event) error {
alertClient, err := alert.NewClient(&client.Config{
ApiKey: plugin.AuthToken,
OpsGenieAPIURL: switchOpsgenieRegion(),
})
if err != nil {
return fmt.Errorf("failed to create opsgenie client: %s", err)
}
if event.Check.Status != 0 {
return createIncident(alertClient, event)
}
// check if event has a alert
hasAlert, _ := getAlert(alertClient, event)
// close incident if status == 0
if hasAlert != notFound && event.Check.Status == 0 {
return closeAlert(alertClient, event, hasAlert)
}
return nil
}
// createIncident func create an alert in OpsGenie
func createIncident(alertClient *alert.Client, event *types.Event) error {
var (
note string
err error
)
if plugin.IncludeEventInNote {
note, err = getNote(event)
if err != nil {
return err
}
}
teams := []alert.Responder{
{Type: alert.EscalationResponder, Name: plugin.Team},
{Type: alert.ScheduleResponder, Name: plugin.Team},
}
title, alias, tags := parseEventKeyTags(event)
actions := parseActions(event)
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel()
createResult, err := alertClient.Create(ctx, &alert.CreateAlertRequest{
Message: title,
Alias: alias,
Description: parseDescription(event),
Responders: teams,
Actions: actions,
Tags: tags,
Details: parseDetails(event),
Entity: event.Entity.Name,
Source: source,
Priority: eventPriority(),
Note: note,
})
if err != nil {
fmt.Println(err.Error())
} else {
fmt.Println("Create request ID: " + createResult.RequestId)
}
return nil
}
// getAlert func get a alert using an alias.
func getAlert(alertClient *alert.Client, event *types.Event) (string, error) {
_, title, _ := parseEventKeyTags(event)
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel()
fmt.Printf("Checking for alert %s \n", title)
getResult, err := alertClient.Get(ctx, &alert.GetAlertRequest{
IdentifierType: alert.ALIAS,
IdentifierValue: title,
})
if err != nil {
return notFound, nil
}
fmt.Printf("ID: %s, Message: %s, Count: %d \n", getResult.Id, getResult.Message, getResult.Count)
return getResult.Id, nil
}
// closeAlert func close an alert if status == 0
func closeAlert(alertClient *alert.Client, event *types.Event, alertid string) error {
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel()
closeResult, err := alertClient.Close(ctx, &alert.CloseAlertRequest{
IdentifierType: alert.ALERTID,
IdentifierValue: alertid,
Source: source,
Note: "Closed Automatically",
})
if err != nil {
fmt.Printf("[ERROR] Not Closed: %s \n", err)
}
fmt.Printf("RequestID %s to Close %s \n", alertid, closeResult.RequestId)
return nil
}
// getNote func creates a note with whole event in json format
func getNote(event *types.Event) (string, error)
|
{
eventJSON, err := json.Marshal(event)
if err != nil {
return "", err
}
return fmt.Sprintf("Event data update:\n\n%s", eventJSON), nil
}
|
identifier_body
|
|
main.go
|
Usage: "The OpsGenie API authentication token, use default from OPSGENIE_AUTHTOKEN env var",
Value: &plugin.AuthToken,
},
{
Path: "team",
Env: "OPSGENIE_TEAM",
Argument: "team",
Shorthand: "t",
Default: "",
Usage: "The OpsGenie Team, use default from OPSGENIE_TEAM env var",
Value: &plugin.Team,
},
{
Path: "priority",
Env: "OPSGENIE_PRIORITY",
Argument: "priority",
Shorthand: "p",
Default: "P3",
Usage: "The OpsGenie Alert Priority, use default from OPSGENIE_PRIORITY env var",
|
},
{
Path: "sensuDashboard",
Env: "OPSGENIE_SENSU_DASHBOARD",
Argument: "sensuDashboard",
Shorthand: "s",
Default: "disabled",
Usage: "The OpsGenie Handler will use it to create a source Sensu Dashboard URL. Use OPSGENIE_SENSU_DASHBOARD. Example: http://sensu-dashboard.example.local/c/~/n",
Value: &plugin.SensuDashboard,
},
{
Path: "messageTemplate",
Env: "OPSGENIE_MESSAGE_TEMPLATE",
Argument: "messageTemplate",
Shorthand: "m",
Default: "{{.Entity.Name}}/{{.Check.Name}}",
Usage: "The template for the message to be sent",
Value: &plugin.MessageTemplate,
},
{
Path: "messageLimit",
Env: "OPSGENIE_MESSAGE_LIMIT",
Argument: "messageLimit",
Shorthand: "l",
Default: 130,
Usage: "The maximum length of the message field",
Value: &plugin.MessageLimit,
},
{
Path: "descriptionTemplate",
Env: "OPSGENIE_DESCRIPTION_TEMPLATE",
Argument: "descriptionTemplate",
Shorthand: "d",
Default: "{{.Check.Output}}",
Usage: "The template for the description to be sent",
Value: &plugin.DescriptionTemplate,
},
{
Path: "descriptionLimit",
Env: "OPSGENIE_DESCRIPTION_LIMIT",
Argument: "descriptionLimit",
Shorthand: "L",
Default: 15000,
Usage: "The maximum length of the description field",
Value: &plugin.DescriptionLimit,
},
{
Path: "includeEventInNote",
Env: "",
Argument: "includeEventInNote",
Shorthand: "i",
Default: false,
Usage: "Include the event JSON in the payload sent to OpsGenie",
Value: &plugin.IncludeEventInNote,
},
{
Path: "withAnnotations",
Env: "",
Argument: "withAnnotations",
Shorthand: "w",
Default: false,
Usage: "Include the event.metadata.Annotations in details to send to OpsGenie",
Value: &plugin.WithAnnotations,
},
{
Path: "withLabels",
Env: "",
Argument: "withLabels",
Shorthand: "W",
Default: false,
Usage: "Include the event.metadata.Labels in details to send to OpsGenie",
Value: &plugin.WithLabels,
},
{
Path: "fullDetails",
Env: "",
Argument: "fullDetails",
Shorthand: "F",
Default: false,
Usage: "Include the more details to send to OpsGenie like proxy_entity_name, occurrences and agent details arch and os",
Value: &plugin.FullDetails,
},
}
)
func main() {
handler := sensu.NewGoHandler(&plugin.PluginConfig, options, checkArgs, executeHandler)
handler.Execute()
}
func checkArgs(_ *types.Event) error {
if len(plugin.AuthToken) == 0 {
return fmt.Errorf("authentication token is empty")
}
if len(plugin.Team) == 0 {
return fmt.Errorf("team is empty")
}
return nil
}
// eventPriority func read priority in the event and return alerts.PX
// check.Annotations override Entity.Annotations
func eventPriority() alert.Priority {
switch plugin.Priority {
case "P5":
return alert.P5
case "P4":
return alert.P4
case "P3":
return alert.P3
case "P2":
return alert.P2
case "P1":
return alert.P1
default:
return alert.P3
}
}
func parseActions(event *types.Event) (output []string) {
if event.Check.Annotations != nil && event.Check.Annotations["opsgenie_actions"] != "" {
output = strings.Split(event.Check.Annotations["opsgenie_actions"], ",")
return output
}
return output
}
// parseEventKeyTags func returns string, string, and []string with event data
// fist string contains custom templte string to use in message
// second string contains Entity.Name/Check.Name to use in alias
// []string contains Entity.Name Check.Name Entity.Namespace, event.Entity.EntityClass to use as tags in Opsgenie
func parseEventKeyTags(event *types.Event) (title string, alias string, tags []string) {
alias = fmt.Sprintf("%s/%s", event.Entity.Name, event.Check.Name)
title, err := templates.EvalTemplate("title", plugin.MessageTemplate, event)
if err != nil {
return "", "", []string{}
}
tags = append(tags, event.Entity.Name, event.Check.Name, event.Entity.Namespace, event.Entity.EntityClass)
return trim(title, plugin.MessageLimit), alias, tags
}
// parseDescription func returns string with custom template string to use in description
func parseDescription(event *types.Event) (description string) {
description, err := templates.EvalTemplate("description", plugin.DescriptionTemplate, event)
if err != nil {
return ""
}
// allow newlines to get expanded
description = strings.Replace(description, `\n`, "\n", -1)
return trim(description, plugin.DescriptionLimit)
}
// parseDetails func returns a map of string string with check information for the details field
func parseDetails(event *types.Event) map[string]string {
details := make(map[string]string)
details["subscriptions"] = fmt.Sprintf("%v", event.Check.Subscriptions)
details["status"] = fmt.Sprintf("%d", event.Check.Status)
details["interval"] = fmt.Sprintf("%d", event.Check.Interval)
// only if true
if plugin.FullDetails {
details["output"] = event.Check.Output
details["command"] = event.Check.Command
details["proxy_entity_name"] = event.Check.ProxyEntityName
details["state"] = event.Check.State
details["ttl"] = fmt.Sprintf("%d", event.Check.Ttl)
details["occurrences"] = fmt.Sprintf("%d", event.Check.Occurrences)
details["occurrences_watermark"] = fmt.Sprintf("%d", event.Check.OccurrencesWatermark)
details["handlers"] = fmt.Sprintf("%v", event.Check.Handlers)
if event.Entity.EntityClass == "agent" {
details["arch"] = event.Entity.System.GetArch()
details["os"] = event.Entity.System.GetOS()
details["platform"] = event.Entity.System.GetPlatform()
details["platform_family"] = event.Entity.System.GetPlatformFamily()
details["platform_version"] = event.Entity.System.GetPlatformVersion()
}
}
// only if true
if plugin.WithAnnotations {
if event.Check.Annotations != nil {
for key, value := range event.Check.Annotations {
if !strings.Contains(key, "sensu.io/plugins/sensu-opsgenie-handler/config") {
checkKey := fmt.Sprintf("%s_annotation_%s", "check", key)
details[checkKey] = value
}
}
}
if event.Entity.Annotations != nil {
for key, value := range event.Entity.Annotations {
if !strings.Contains(key, "sensu.io/plugins/sensu-opsgenie-handler/config") {
entityKey := fmt.Sprintf("%s_annotation_%s", "entity", key)
details[entityKey] = value
}
}
}
}
// only if true
if plugin.WithLabels {
if event.Check.Labels != nil {
for key, value := range event.Check.Labels {
checkKey := fmt.Sprintf("%s_label_%s", "check", key)
details[checkKey] = value
}
}
if event.Entity.Labels != nil {
for key, value := range event.Entity.Labels {
entityKey := fmt.Sprintf("%s_label_%s", "entity", key)
details[entityKey] = value
}
}
}
if plugin.SensuDashboard != "disabled" {
details["sensuDashboard"]
|
Value: &plugin.Priority,
|
random_line_split
|
main.go
|
Usage: "The OpsGenie API authentication token, use default from OPSGENIE_AUTHTOKEN env var",
Value: &plugin.AuthToken,
},
{
Path: "team",
Env: "OPSGENIE_TEAM",
Argument: "team",
Shorthand: "t",
Default: "",
Usage: "The OpsGenie Team, use default from OPSGENIE_TEAM env var",
Value: &plugin.Team,
},
{
Path: "priority",
Env: "OPSGENIE_PRIORITY",
Argument: "priority",
Shorthand: "p",
Default: "P3",
Usage: "The OpsGenie Alert Priority, use default from OPSGENIE_PRIORITY env var",
Value: &plugin.Priority,
},
{
Path: "sensuDashboard",
Env: "OPSGENIE_SENSU_DASHBOARD",
Argument: "sensuDashboard",
Shorthand: "s",
Default: "disabled",
Usage: "The OpsGenie Handler will use it to create a source Sensu Dashboard URL. Use OPSGENIE_SENSU_DASHBOARD. Example: http://sensu-dashboard.example.local/c/~/n",
Value: &plugin.SensuDashboard,
},
{
Path: "messageTemplate",
Env: "OPSGENIE_MESSAGE_TEMPLATE",
Argument: "messageTemplate",
Shorthand: "m",
Default: "{{.Entity.Name}}/{{.Check.Name}}",
Usage: "The template for the message to be sent",
Value: &plugin.MessageTemplate,
},
{
Path: "messageLimit",
Env: "OPSGENIE_MESSAGE_LIMIT",
Argument: "messageLimit",
Shorthand: "l",
Default: 130,
Usage: "The maximum length of the message field",
Value: &plugin.MessageLimit,
},
{
Path: "descriptionTemplate",
Env: "OPSGENIE_DESCRIPTION_TEMPLATE",
Argument: "descriptionTemplate",
Shorthand: "d",
Default: "{{.Check.Output}}",
Usage: "The template for the description to be sent",
Value: &plugin.DescriptionTemplate,
},
{
Path: "descriptionLimit",
Env: "OPSGENIE_DESCRIPTION_LIMIT",
Argument: "descriptionLimit",
Shorthand: "L",
Default: 15000,
Usage: "The maximum length of the description field",
Value: &plugin.DescriptionLimit,
},
{
Path: "includeEventInNote",
Env: "",
Argument: "includeEventInNote",
Shorthand: "i",
Default: false,
Usage: "Include the event JSON in the payload sent to OpsGenie",
Value: &plugin.IncludeEventInNote,
},
{
Path: "withAnnotations",
Env: "",
Argument: "withAnnotations",
Shorthand: "w",
Default: false,
Usage: "Include the event.metadata.Annotations in details to send to OpsGenie",
Value: &plugin.WithAnnotations,
},
{
Path: "withLabels",
Env: "",
Argument: "withLabels",
Shorthand: "W",
Default: false,
Usage: "Include the event.metadata.Labels in details to send to OpsGenie",
Value: &plugin.WithLabels,
},
{
Path: "fullDetails",
Env: "",
Argument: "fullDetails",
Shorthand: "F",
Default: false,
Usage: "Include the more details to send to OpsGenie like proxy_entity_name, occurrences and agent details arch and os",
Value: &plugin.FullDetails,
},
}
)
func main() {
handler := sensu.NewGoHandler(&plugin.PluginConfig, options, checkArgs, executeHandler)
handler.Execute()
}
func checkArgs(_ *types.Event) error {
if len(plugin.AuthToken) == 0 {
return fmt.Errorf("authentication token is empty")
}
if len(plugin.Team) == 0 {
return fmt.Errorf("team is empty")
}
return nil
}
// eventPriority func read priority in the event and return alerts.PX
// check.Annotations override Entity.Annotations
func eventPriority() alert.Priority {
switch plugin.Priority {
case "P5":
return alert.P5
case "P4":
return alert.P4
case "P3":
return alert.P3
case "P2":
return alert.P2
case "P1":
return alert.P1
default:
return alert.P3
}
}
func parseActions(event *types.Event) (output []string) {
if event.Check.Annotations != nil && event.Check.Annotations["opsgenie_actions"] != "" {
output = strings.Split(event.Check.Annotations["opsgenie_actions"], ",")
return output
}
return output
}
// parseEventKeyTags func returns string, string, and []string with event data
// fist string contains custom templte string to use in message
// second string contains Entity.Name/Check.Name to use in alias
// []string contains Entity.Name Check.Name Entity.Namespace, event.Entity.EntityClass to use as tags in Opsgenie
func parseEventKeyTags(event *types.Event) (title string, alias string, tags []string) {
alias = fmt.Sprintf("%s/%s", event.Entity.Name, event.Check.Name)
title, err := templates.EvalTemplate("title", plugin.MessageTemplate, event)
if err != nil {
return "", "", []string{}
}
tags = append(tags, event.Entity.Name, event.Check.Name, event.Entity.Namespace, event.Entity.EntityClass)
return trim(title, plugin.MessageLimit), alias, tags
}
// parseDescription func returns string with custom template string to use in description
func parseDescription(event *types.Event) (description string) {
description, err := templates.EvalTemplate("description", plugin.DescriptionTemplate, event)
if err != nil {
return ""
}
// allow newlines to get expanded
description = strings.Replace(description, `\n`, "\n", -1)
return trim(description, plugin.DescriptionLimit)
}
// parseDetails func returns a map of string string with check information for the details field
func parseDetails(event *types.Event) map[string]string {
details := make(map[string]string)
details["subscriptions"] = fmt.Sprintf("%v", event.Check.Subscriptions)
details["status"] = fmt.Sprintf("%d", event.Check.Status)
details["interval"] = fmt.Sprintf("%d", event.Check.Interval)
// only if true
if plugin.FullDetails
|
// only if true
if plugin.WithAnnotations {
if event.Check.Annotations != nil {
for key, value := range event.Check.Annotations {
if !strings.Contains(key, "sensu.io/plugins/sensu-opsgenie-handler/config") {
checkKey := fmt.Sprintf("%s_annotation_%s", "check", key)
details[checkKey] = value
}
}
}
if event.Entity.Annotations != nil {
for key, value := range event.Entity.Annotations {
if !strings.Contains(key, "sensu.io/plugins/sensu-opsgenie-handler/config") {
entityKey := fmt.Sprintf("%s_annotation_%s", "entity", key)
details[entityKey] = value
}
}
}
}
// only if true
if plugin.WithLabels {
if event.Check.Labels != nil {
for key, value := range event.Check.Labels {
checkKey := fmt.Sprintf("%s_label_%s", "check", key)
details[checkKey] = value
}
}
if event.Entity.Labels != nil {
for key, value := range event.Entity.Labels {
entityKey := fmt.Sprintf("%s_label_%s", "entity", key)
details[entityKey] = value
}
}
}
if plugin.SensuDashboard != "disabled" {
details["sensu
|
{
details["output"] = event.Check.Output
details["command"] = event.Check.Command
details["proxy_entity_name"] = event.Check.ProxyEntityName
details["state"] = event.Check.State
details["ttl"] = fmt.Sprintf("%d", event.Check.Ttl)
details["occurrences"] = fmt.Sprintf("%d", event.Check.Occurrences)
details["occurrences_watermark"] = fmt.Sprintf("%d", event.Check.OccurrencesWatermark)
details["handlers"] = fmt.Sprintf("%v", event.Check.Handlers)
if event.Entity.EntityClass == "agent" {
details["arch"] = event.Entity.System.GetArch()
details["os"] = event.Entity.System.GetOS()
details["platform"] = event.Entity.System.GetPlatform()
details["platform_family"] = event.Entity.System.GetPlatformFamily()
details["platform_version"] = event.Entity.System.GetPlatformVersion()
}
}
|
conditional_block
|
main.go
|
Default: 15000,
Usage: "The maximum length of the description field",
Value: &plugin.DescriptionLimit,
},
{
Path: "includeEventInNote",
Env: "",
Argument: "includeEventInNote",
Shorthand: "i",
Default: false,
Usage: "Include the event JSON in the payload sent to OpsGenie",
Value: &plugin.IncludeEventInNote,
},
{
Path: "withAnnotations",
Env: "",
Argument: "withAnnotations",
Shorthand: "w",
Default: false,
Usage: "Include the event.metadata.Annotations in details to send to OpsGenie",
Value: &plugin.WithAnnotations,
},
{
Path: "withLabels",
Env: "",
Argument: "withLabels",
Shorthand: "W",
Default: false,
Usage: "Include the event.metadata.Labels in details to send to OpsGenie",
Value: &plugin.WithLabels,
},
{
Path: "fullDetails",
Env: "",
Argument: "fullDetails",
Shorthand: "F",
Default: false,
Usage: "Include the more details to send to OpsGenie like proxy_entity_name, occurrences and agent details arch and os",
Value: &plugin.FullDetails,
},
}
)
func main() {
handler := sensu.NewGoHandler(&plugin.PluginConfig, options, checkArgs, executeHandler)
handler.Execute()
}
func checkArgs(_ *types.Event) error {
if len(plugin.AuthToken) == 0 {
return fmt.Errorf("authentication token is empty")
}
if len(plugin.Team) == 0 {
return fmt.Errorf("team is empty")
}
return nil
}
// eventPriority func read priority in the event and return alerts.PX
// check.Annotations override Entity.Annotations
func eventPriority() alert.Priority {
switch plugin.Priority {
case "P5":
return alert.P5
case "P4":
return alert.P4
case "P3":
return alert.P3
case "P2":
return alert.P2
case "P1":
return alert.P1
default:
return alert.P3
}
}
func parseActions(event *types.Event) (output []string) {
if event.Check.Annotations != nil && event.Check.Annotations["opsgenie_actions"] != "" {
output = strings.Split(event.Check.Annotations["opsgenie_actions"], ",")
return output
}
return output
}
// parseEventKeyTags func returns string, string, and []string with event data
// fist string contains custom templte string to use in message
// second string contains Entity.Name/Check.Name to use in alias
// []string contains Entity.Name Check.Name Entity.Namespace, event.Entity.EntityClass to use as tags in Opsgenie
func parseEventKeyTags(event *types.Event) (title string, alias string, tags []string) {
alias = fmt.Sprintf("%s/%s", event.Entity.Name, event.Check.Name)
title, err := templates.EvalTemplate("title", plugin.MessageTemplate, event)
if err != nil {
return "", "", []string{}
}
tags = append(tags, event.Entity.Name, event.Check.Name, event.Entity.Namespace, event.Entity.EntityClass)
return trim(title, plugin.MessageLimit), alias, tags
}
// parseDescription func returns string with custom template string to use in description
func parseDescription(event *types.Event) (description string) {
description, err := templates.EvalTemplate("description", plugin.DescriptionTemplate, event)
if err != nil {
return ""
}
// allow newlines to get expanded
description = strings.Replace(description, `\n`, "\n", -1)
return trim(description, plugin.DescriptionLimit)
}
// parseDetails func returns a map of string string with check information for the details field
func parseDetails(event *types.Event) map[string]string {
details := make(map[string]string)
details["subscriptions"] = fmt.Sprintf("%v", event.Check.Subscriptions)
details["status"] = fmt.Sprintf("%d", event.Check.Status)
details["interval"] = fmt.Sprintf("%d", event.Check.Interval)
// only if true
if plugin.FullDetails {
details["output"] = event.Check.Output
details["command"] = event.Check.Command
details["proxy_entity_name"] = event.Check.ProxyEntityName
details["state"] = event.Check.State
details["ttl"] = fmt.Sprintf("%d", event.Check.Ttl)
details["occurrences"] = fmt.Sprintf("%d", event.Check.Occurrences)
details["occurrences_watermark"] = fmt.Sprintf("%d", event.Check.OccurrencesWatermark)
details["handlers"] = fmt.Sprintf("%v", event.Check.Handlers)
if event.Entity.EntityClass == "agent" {
details["arch"] = event.Entity.System.GetArch()
details["os"] = event.Entity.System.GetOS()
details["platform"] = event.Entity.System.GetPlatform()
details["platform_family"] = event.Entity.System.GetPlatformFamily()
details["platform_version"] = event.Entity.System.GetPlatformVersion()
}
}
// only if true
if plugin.WithAnnotations {
if event.Check.Annotations != nil {
for key, value := range event.Check.Annotations {
if !strings.Contains(key, "sensu.io/plugins/sensu-opsgenie-handler/config") {
checkKey := fmt.Sprintf("%s_annotation_%s", "check", key)
details[checkKey] = value
}
}
}
if event.Entity.Annotations != nil {
for key, value := range event.Entity.Annotations {
if !strings.Contains(key, "sensu.io/plugins/sensu-opsgenie-handler/config") {
entityKey := fmt.Sprintf("%s_annotation_%s", "entity", key)
details[entityKey] = value
}
}
}
}
// only if true
if plugin.WithLabels {
if event.Check.Labels != nil {
for key, value := range event.Check.Labels {
checkKey := fmt.Sprintf("%s_label_%s", "check", key)
details[checkKey] = value
}
}
if event.Entity.Labels != nil {
for key, value := range event.Entity.Labels {
entityKey := fmt.Sprintf("%s_label_%s", "entity", key)
details[entityKey] = value
}
}
}
if plugin.SensuDashboard != "disabled" {
details["sensuDashboard"] = fmt.Sprintf("source: %s/%s/events/%s/%s \n", plugin.SensuDashboard, event.Entity.Namespace, event.Entity.Name, event.Check.Name)
}
return details
}
// switchOpsgenieRegion func
func switchOpsgenieRegion() client.ApiUrl {
var region client.ApiUrl
apiRegionLowCase := strings.ToLower(plugin.APIRegion)
switch apiRegionLowCase {
case "eu":
region = client.API_URL_EU
case "us":
region = client.API_URL
default:
region = client.API_URL
}
return region
}
func executeHandler(event *types.Event) error {
alertClient, err := alert.NewClient(&client.Config{
ApiKey: plugin.AuthToken,
OpsGenieAPIURL: switchOpsgenieRegion(),
})
if err != nil {
return fmt.Errorf("failed to create opsgenie client: %s", err)
}
if event.Check.Status != 0 {
return createIncident(alertClient, event)
}
// check if event has a alert
hasAlert, _ := getAlert(alertClient, event)
// close incident if status == 0
if hasAlert != notFound && event.Check.Status == 0 {
return closeAlert(alertClient, event, hasAlert)
}
return nil
}
// createIncident func create an alert in OpsGenie
func createIncident(alertClient *alert.Client, event *types.Event) error {
var (
note string
err error
)
if plugin.IncludeEventInNote {
note, err = getNote(event)
if err != nil {
return err
}
}
teams := []alert.Responder{
{Type: alert.EscalationResponder, Name: plugin.Team},
{Type: alert.ScheduleResponder, Name: plugin.Team},
}
title, alias, tags := parseEventKeyTags(event)
actions := parseActions(event)
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel()
createResult, err := alertClient.Create(ctx, &alert.CreateAlertRequest{
Message: title,
Alias: alias,
Description: parseDescription(event),
Responders: teams,
Actions: actions,
Tags: tags,
Details: parseDetails(event),
Entity: event.Entity.Name,
Source: source,
Priority: eventPriority(),
Note: note,
})
if err != nil {
fmt.Println(err.Error())
} else {
fmt.Println("Create request ID: " + createResult.RequestId)
}
return nil
}
// getAlert func get a alert using an alias.
func
|
getAlert
|
identifier_name
|
|
pages.py
|
):
"""
Base class for all pages in the test site.
"""
# Get the server port from the environment
# (set by the test runner script)
SERVER_PORT = os.environ.get("SERVER_PORT", 8005)
def is_browser_on_page(self):
title = self.name.lower().replace('_', ' ')
return title in self.browser.title.lower()
@property
def url(self):
return "http://localhost:{0}/{1}".format(self.SERVER_PORT, self.name + ".html")
@property
def output(self):
"""
Return the contents of the "#output" div on the page.
The fixtures are configured to update this div when the user
interacts with the page.
"""
text_list = self.q(css='#output').text
if len(text_list) < 1:
return None
return text_list[0]
class ButtonPage(SitePage):
"""
Page for testing button interactions.
"""
name = "button"
def click_button(self):
"""
Click the button on the page, which should cause the JavaScript
to update the #output div.
"""
self.q(css='div#fixture input').first.click()
class TextFieldPage(SitePage):
"""
Page for testing text field interactions.
"""
name = "text_field"
def
|
(self, text):
"""
Input `text` into the text field on the page.
"""
self.q(css='#fixture input').fill(text)
class SelectPage(SitePage):
"""
Page for testing select input interactions.
"""
name = "select"
def select_car(self, car_value):
"""
Select the car with ``car_value`` in the drop-down list.
"""
self.q(css=u'select[name="cars"] option[value="{}"]'.format(car_value)).first.click()
def is_car_selected(self, car):
"""
Return ``True`` if the given ``car`` is selected, ``False`` otherwise.
"""
return self.q(css=u'select[name="cars"] option[value="{}"]'.format(car)).selected
class CheckboxPage(SitePage):
"""
Page for testing checkbox interactions.
"""
name = "checkbox"
def toggle_pill(self, pill_name):
"""
Toggle the box for the pill with `pill_name` (red or blue).
"""
self.q(css=u"#fixture input#{}".format(pill_name)).first.click()
class AlertPage(SitePage):
"""
Page for testing alert handling.
"""
name = "alert"
def confirm(self):
"""
Click the ``Confirm`` button and confirm the dialog.
"""
with self.handle_alert(confirm=True):
self.q(css='button#confirm').first.click()
def cancel(self):
"""
Click the ``Confirm`` button and cancel the dialog.
"""
with self.handle_alert(confirm=False):
self.q(css='button#confirm').first.click()
def dismiss(self):
"""
Click the ``Alert`` button and confirm the alert.
"""
with self.handle_alert():
self.q(css='button#alert').first.click()
class SelectorPage(SitePage):
"""
Page for testing retrieval of information by CSS selectors.
"""
name = "selector"
@property
def num_divs(self):
"""
Count the number of div.test elements.
"""
return len(self.q(css='div.test').results)
@property
def div_text_list(self):
"""
Return list of text for each div.test element.
"""
return self.q(css='div.test').text
@property
def div_value_list(self):
"""
Return list of values for each div.test element.
"""
return self.q(css='div.test').attrs('value')
@property
def div_html_list(self):
"""
Return list of html for each div.test element.
"""
return self.q(css='div.test').html
def ids_of_outer_divs_with_inner_text(self, child_text):
"""
Return a list of the ids of outer divs with
the specified text in a child element.
"""
return self.q(css='div.outer').filter(
lambda el: child_text in [inner.text for inner in el.find_elements_by_css_selector('div.inner')]
).attrs('id')
class DelayPage(SitePage):
"""
Page for testing elements that appear after a delay.
"""
name = "delay"
def trigger_output(self):
"""
Wait for click handlers to be installed,
then click a button and retrieve the output that appears
after a delay.
"""
EmptyPromise(self.q(css='div#ready').is_present, "Click ready").fulfill()
self.q(css='div#fixture button').first.click()
EmptyPromise(self.q(css='div#output').is_present, "Output available").fulfill()
def make_broken_promise(self):
"""
Make a promise that will not be fulfilled.
Should raise a `BrokenPromise` exception.
"""
return EmptyPromise(
self.q(css='div#not_present').is_present, "Invalid div appeared",
try_limit=3, try_interval=0.01
).fulfill()
class SlowPage(SitePage):
"""
Page that loads its elements slowly.
"""
name = "slow"
def is_browser_on_page(self):
return self.q(css='div#ready').is_present()
class NextPage(SitePage):
"""
Page that loads another page after a delay.
"""
name = "next_page"
def is_browser_on_page(self):
return self.q(css='#next').is_present()
def load_next(self, page, delay_sec):
"""
Load the page named `page_name` after waiting for `delay_sec`.
"""
time.sleep(delay_sec)
page.visit()
@js_defined('$')
class FocusedPage(SitePage):
"""
Page that has a link to a focusable element.
"""
name = "focused"
@wait_for_js
def focus_on_main_content(self):
"""
Give focus to the element with the ``main-content`` ID.
"""
self.browser.execute_script("$('#main-content').focus()")
class VisiblePage(SitePage):
"""
Page that has some elements visible and others invisible.
"""
name = "visible"
def is_visible(self, name):
"""
Return a boolean indicating whether the given item is visible.
"""
return self.q(css="div.{}".format(name)).first.visible
def is_invisible(self, name):
"""
Return a boolean indicating whether the given element is present, but not visible.
"""
return self.q(css="div.{}".format(name)).first.invisible
@js_defined('test_var1', 'test_var2')
class JavaScriptPage(SitePage):
"""
Page for testing asynchronous JavaScript.
"""
name = "javascript"
@wait_for_js
def trigger_output(self):
"""
Click a button which will only work once RequireJS finishes loading.
"""
self.q(css='div#fixture button').first.click()
@wait_for_js
def reload_and_trigger_output(self):
"""
Reload the page, wait for JS, then trigger the output.
"""
self.browser.refresh()
self.wait_for_js() # pylint: disable=no-member
self.q(css='div#fixture button').first.click()
@js_defined('something.SomethingThatDoesntExist')
class JavaScriptUndefinedPage(SitePage):
"""
Page for testing asynchronous JavaScript, where the
javascript that we wait for is never defined.
"""
name = "javascript"
@wait_for_js
def trigger_output(self):
"""
Click a button which will only work once RequireJS finishes loading.
"""
self.q(css='div#fixture button').first.click()
@requirejs('main')
class RequireJSPage(SitePage):
"""
Page for testing asynchronous JavaScript loaded with RequireJS.
"""
name = "requirejs"
@property
@wait_for_js
def output(self):
"""
Wait for scripts to finish and then return the contents of the
``#output`` div on the page.
"""
return super(RequireJSPage, self).output
class AjaxNoJQueryPage(SitePage):
"""
Page for testing an ajax call.
"""
name = "ajax_no_jquery"
class AjaxPage(SitePage):
"""
Page for testing an ajax call.
"""
name = "ajax"
def click_button(self):
"""
Click the button on the page, which triggers an ajax
call that updates the #output div.
"""
self.q(css='div#fixture button').first.click()
class WaitsPage(SitePage):
"""
Page for testing wait helpers.
"""
name = "wait"
def is_button_output_present(self):
"""
Click button and wait until output id appears in DOM.
"""
self.wait_for_element_presence('div#ready', 'Page is Ready')
self.q(css='div#fixture button').first.click()
self.wait_for_element_presence('div#output', 'Button Output is Available')
def is_class_absent(self):
"""
Click button and wait until playing class disappeared from DOM
"""
self.q(css='#spinner').first.click()
self.wait_for_element_absence('.playing', 'Animation Stopped')
def is_button_output_visible(self):
"""
Click button and wait until output is displayed.
"""
self.wait_for_element
|
enter_text
|
identifier_name
|
pages.py
|
):
"""
Base class for all pages in the test site.
"""
# Get the server port from the environment
# (set by the test runner script)
SERVER_PORT = os.environ.get("SERVER_PORT", 8005)
def is_browser_on_page(self):
title = self.name.lower().replace('_', ' ')
return title in self.browser.title.lower()
@property
def url(self):
return "http://localhost:{0}/{1}".format(self.SERVER_PORT, self.name + ".html")
@property
def output(self):
"""
Return the contents of the "#output" div on the page.
The fixtures are configured to update this div when the user
interacts with the page.
"""
text_list = self.q(css='#output').text
if len(text_list) < 1:
return None
return text_list[0]
class ButtonPage(SitePage):
"""
Page for testing button interactions.
"""
name = "button"
def click_button(self):
"""
Click the button on the page, which should cause the JavaScript
to update the #output div.
"""
self.q(css='div#fixture input').first.click()
class TextFieldPage(SitePage):
"""
Page for testing text field interactions.
"""
name = "text_field"
def enter_text(self, text):
"""
Input `text` into the text field on the page.
"""
self.q(css='#fixture input').fill(text)
class SelectPage(SitePage):
"""
Page for testing select input interactions.
"""
name = "select"
def select_car(self, car_value):
"""
Select the car with ``car_value`` in the drop-down list.
"""
self.q(css=u'select[name="cars"] option[value="{}"]'.format(car_value)).first.click()
def is_car_selected(self, car):
"""
Return ``True`` if the given ``car`` is selected, ``False`` otherwise.
"""
return self.q(css=u'select[name="cars"] option[value="{}"]'.format(car)).selected
class CheckboxPage(SitePage):
"""
Page for testing checkbox interactions.
"""
name = "checkbox"
def toggle_pill(self, pill_name):
"""
Toggle the box for the pill with `pill_name` (red or blue).
"""
self.q(css=u"#fixture input#{}".format(pill_name)).first.click()
class AlertPage(SitePage):
"""
Page for testing alert handling.
"""
name = "alert"
def confirm(self):
|
with self.handle_alert(confirm=True):
self.q(css='button#confirm').first.click()
def cancel(self):
"""
Click the ``Confirm`` button and cancel the dialog.
"""
with self.handle_alert(confirm=False):
self.q(css='button#confirm').first.click()
def dismiss(self):
"""
Click the ``Alert`` button and confirm the alert.
"""
with self.handle_alert():
self.q(css='button#alert').first.click()
class SelectorPage(SitePage):
"""
Page for testing retrieval of information by CSS selectors.
"""
name = "selector"
@property
def num_divs(self):
"""
Count the number of div.test elements.
"""
return len(self.q(css='div.test').results)
@property
def div_text_list(self):
"""
Return list of text for each div.test element.
"""
return self.q(css='div.test').text
@property
def div_value_list(self):
"""
Return list of values for each div.test element.
"""
return self.q(css='div.test').attrs('value')
@property
def div_html_list(self):
"""
Return list of html for each div.test element.
"""
return self.q(css='div.test').html
def ids_of_outer_divs_with_inner_text(self, child_text):
"""
Return a list of the ids of outer divs with
the specified text in a child element.
"""
return self.q(css='div.outer').filter(
lambda el: child_text in [inner.text for inner in el.find_elements_by_css_selector('div.inner')]
).attrs('id')
class DelayPage(SitePage):
"""
Page for testing elements that appear after a delay.
"""
name = "delay"
def trigger_output(self):
"""
Wait for click handlers to be installed,
then click a button and retrieve the output that appears
after a delay.
"""
EmptyPromise(self.q(css='div#ready').is_present, "Click ready").fulfill()
self.q(css='div#fixture button').first.click()
EmptyPromise(self.q(css='div#output').is_present, "Output available").fulfill()
def make_broken_promise(self):
"""
Make a promise that will not be fulfilled.
Should raise a `BrokenPromise` exception.
"""
return EmptyPromise(
self.q(css='div#not_present').is_present, "Invalid div appeared",
try_limit=3, try_interval=0.01
).fulfill()
class SlowPage(SitePage):
"""
Page that loads its elements slowly.
"""
name = "slow"
def is_browser_on_page(self):
return self.q(css='div#ready').is_present()
class NextPage(SitePage):
"""
Page that loads another page after a delay.
"""
name = "next_page"
def is_browser_on_page(self):
return self.q(css='#next').is_present()
def load_next(self, page, delay_sec):
"""
Load the page named `page_name` after waiting for `delay_sec`.
"""
time.sleep(delay_sec)
page.visit()
@js_defined('$')
class FocusedPage(SitePage):
"""
Page that has a link to a focusable element.
"""
name = "focused"
@wait_for_js
def focus_on_main_content(self):
"""
Give focus to the element with the ``main-content`` ID.
"""
self.browser.execute_script("$('#main-content').focus()")
class VisiblePage(SitePage):
"""
Page that has some elements visible and others invisible.
"""
name = "visible"
def is_visible(self, name):
"""
Return a boolean indicating whether the given item is visible.
"""
return self.q(css="div.{}".format(name)).first.visible
def is_invisible(self, name):
"""
Return a boolean indicating whether the given element is present, but not visible.
"""
return self.q(css="div.{}".format(name)).first.invisible
@js_defined('test_var1', 'test_var2')
class JavaScriptPage(SitePage):
"""
Page for testing asynchronous JavaScript.
"""
name = "javascript"
@wait_for_js
def trigger_output(self):
"""
Click a button which will only work once RequireJS finishes loading.
"""
self.q(css='div#fixture button').first.click()
@wait_for_js
def reload_and_trigger_output(self):
"""
Reload the page, wait for JS, then trigger the output.
"""
self.browser.refresh()
self.wait_for_js() # pylint: disable=no-member
self.q(css='div#fixture button').first.click()
@js_defined('something.SomethingThatDoesntExist')
class JavaScriptUndefinedPage(SitePage):
"""
Page for testing asynchronous JavaScript, where the
javascript that we wait for is never defined.
"""
name = "javascript"
@wait_for_js
def trigger_output(self):
"""
Click a button which will only work once RequireJS finishes loading.
"""
self.q(css='div#fixture button').first.click()
@requirejs('main')
class RequireJSPage(SitePage):
"""
Page for testing asynchronous JavaScript loaded with RequireJS.
"""
name = "requirejs"
@property
@wait_for_js
def output(self):
"""
Wait for scripts to finish and then return the contents of the
``#output`` div on the page.
"""
return super(RequireJSPage, self).output
class AjaxNoJQueryPage(SitePage):
"""
Page for testing an ajax call.
"""
name = "ajax_no_jquery"
class AjaxPage(SitePage):
"""
Page for testing an ajax call.
"""
name = "ajax"
def click_button(self):
"""
Click the button on the page, which triggers an ajax
call that updates the #output div.
"""
self.q(css='div#fixture button').first.click()
class WaitsPage(SitePage):
"""
Page for testing wait helpers.
"""
name = "wait"
def is_button_output_present(self):
"""
Click button and wait until output id appears in DOM.
"""
self.wait_for_element_presence('div#ready', 'Page is Ready')
self.q(css='div#fixture button').first.click()
self.wait_for_element_presence('div#output', 'Button Output is Available')
def is_class_absent(self):
"""
Click button and wait until playing class disappeared from DOM
"""
self.q(css='#spinner').first.click()
self.wait_for_element_absence('.playing', 'Animation Stopped')
def is_button_output_visible(self):
"""
Click button and wait until output is displayed.
"""
self.wait_for_element_presence
|
"""
Click the ``Confirm`` button and confirm the dialog.
"""
|
random_line_split
|
pages.py
|
):
"""
Base class for all pages in the test site.
"""
# Get the server port from the environment
# (set by the test runner script)
SERVER_PORT = os.environ.get("SERVER_PORT", 8005)
def is_browser_on_page(self):
title = self.name.lower().replace('_', ' ')
return title in self.browser.title.lower()
@property
def url(self):
return "http://localhost:{0}/{1}".format(self.SERVER_PORT, self.name + ".html")
@property
def output(self):
"""
Return the contents of the "#output" div on the page.
The fixtures are configured to update this div when the user
interacts with the page.
"""
text_list = self.q(css='#output').text
if len(text_list) < 1:
|
return text_list[0]
class ButtonPage(SitePage):
"""
Page for testing button interactions.
"""
name = "button"
def click_button(self):
"""
Click the button on the page, which should cause the JavaScript
to update the #output div.
"""
self.q(css='div#fixture input').first.click()
class TextFieldPage(SitePage):
"""
Page for testing text field interactions.
"""
name = "text_field"
def enter_text(self, text):
"""
Input `text` into the text field on the page.
"""
self.q(css='#fixture input').fill(text)
class SelectPage(SitePage):
"""
Page for testing select input interactions.
"""
name = "select"
def select_car(self, car_value):
"""
Select the car with ``car_value`` in the drop-down list.
"""
self.q(css=u'select[name="cars"] option[value="{}"]'.format(car_value)).first.click()
def is_car_selected(self, car):
"""
Return ``True`` if the given ``car`` is selected, ``False`` otherwise.
"""
return self.q(css=u'select[name="cars"] option[value="{}"]'.format(car)).selected
class CheckboxPage(SitePage):
"""
Page for testing checkbox interactions.
"""
name = "checkbox"
def toggle_pill(self, pill_name):
"""
Toggle the box for the pill with `pill_name` (red or blue).
"""
self.q(css=u"#fixture input#{}".format(pill_name)).first.click()
class AlertPage(SitePage):
"""
Page for testing alert handling.
"""
name = "alert"
def confirm(self):
"""
Click the ``Confirm`` button and confirm the dialog.
"""
with self.handle_alert(confirm=True):
self.q(css='button#confirm').first.click()
def cancel(self):
"""
Click the ``Confirm`` button and cancel the dialog.
"""
with self.handle_alert(confirm=False):
self.q(css='button#confirm').first.click()
def dismiss(self):
"""
Click the ``Alert`` button and confirm the alert.
"""
with self.handle_alert():
self.q(css='button#alert').first.click()
class SelectorPage(SitePage):
"""
Page for testing retrieval of information by CSS selectors.
"""
name = "selector"
@property
def num_divs(self):
"""
Count the number of div.test elements.
"""
return len(self.q(css='div.test').results)
@property
def div_text_list(self):
"""
Return list of text for each div.test element.
"""
return self.q(css='div.test').text
@property
def div_value_list(self):
"""
Return list of values for each div.test element.
"""
return self.q(css='div.test').attrs('value')
@property
def div_html_list(self):
"""
Return list of html for each div.test element.
"""
return self.q(css='div.test').html
def ids_of_outer_divs_with_inner_text(self, child_text):
"""
Return a list of the ids of outer divs with
the specified text in a child element.
"""
return self.q(css='div.outer').filter(
lambda el: child_text in [inner.text for inner in el.find_elements_by_css_selector('div.inner')]
).attrs('id')
class DelayPage(SitePage):
"""
Page for testing elements that appear after a delay.
"""
name = "delay"
def trigger_output(self):
"""
Wait for click handlers to be installed,
then click a button and retrieve the output that appears
after a delay.
"""
EmptyPromise(self.q(css='div#ready').is_present, "Click ready").fulfill()
self.q(css='div#fixture button').first.click()
EmptyPromise(self.q(css='div#output').is_present, "Output available").fulfill()
def make_broken_promise(self):
"""
Make a promise that will not be fulfilled.
Should raise a `BrokenPromise` exception.
"""
return EmptyPromise(
self.q(css='div#not_present').is_present, "Invalid div appeared",
try_limit=3, try_interval=0.01
).fulfill()
class SlowPage(SitePage):
"""
Page that loads its elements slowly.
"""
name = "slow"
def is_browser_on_page(self):
return self.q(css='div#ready').is_present()
class NextPage(SitePage):
"""
Page that loads another page after a delay.
"""
name = "next_page"
def is_browser_on_page(self):
return self.q(css='#next').is_present()
def load_next(self, page, delay_sec):
"""
Load the page named `page_name` after waiting for `delay_sec`.
"""
time.sleep(delay_sec)
page.visit()
@js_defined('$')
class FocusedPage(SitePage):
"""
Page that has a link to a focusable element.
"""
name = "focused"
@wait_for_js
def focus_on_main_content(self):
"""
Give focus to the element with the ``main-content`` ID.
"""
self.browser.execute_script("$('#main-content').focus()")
class VisiblePage(SitePage):
"""
Page that has some elements visible and others invisible.
"""
name = "visible"
def is_visible(self, name):
"""
Return a boolean indicating whether the given item is visible.
"""
return self.q(css="div.{}".format(name)).first.visible
def is_invisible(self, name):
"""
Return a boolean indicating whether the given element is present, but not visible.
"""
return self.q(css="div.{}".format(name)).first.invisible
@js_defined('test_var1', 'test_var2')
class JavaScriptPage(SitePage):
"""
Page for testing asynchronous JavaScript.
"""
name = "javascript"
@wait_for_js
def trigger_output(self):
"""
Click a button which will only work once RequireJS finishes loading.
"""
self.q(css='div#fixture button').first.click()
@wait_for_js
def reload_and_trigger_output(self):
"""
Reload the page, wait for JS, then trigger the output.
"""
self.browser.refresh()
self.wait_for_js() # pylint: disable=no-member
self.q(css='div#fixture button').first.click()
@js_defined('something.SomethingThatDoesntExist')
class JavaScriptUndefinedPage(SitePage):
"""
Page for testing asynchronous JavaScript, where the
javascript that we wait for is never defined.
"""
name = "javascript"
@wait_for_js
def trigger_output(self):
"""
Click a button which will only work once RequireJS finishes loading.
"""
self.q(css='div#fixture button').first.click()
@requirejs('main')
class RequireJSPage(SitePage):
"""
Page for testing asynchronous JavaScript loaded with RequireJS.
"""
name = "requirejs"
@property
@wait_for_js
def output(self):
"""
Wait for scripts to finish and then return the contents of the
``#output`` div on the page.
"""
return super(RequireJSPage, self).output
class AjaxNoJQueryPage(SitePage):
"""
Page for testing an ajax call.
"""
name = "ajax_no_jquery"
class AjaxPage(SitePage):
"""
Page for testing an ajax call.
"""
name = "ajax"
def click_button(self):
"""
Click the button on the page, which triggers an ajax
call that updates the #output div.
"""
self.q(css='div#fixture button').first.click()
class WaitsPage(SitePage):
"""
Page for testing wait helpers.
"""
name = "wait"
def is_button_output_present(self):
"""
Click button and wait until output id appears in DOM.
"""
self.wait_for_element_presence('div#ready', 'Page is Ready')
self.q(css='div#fixture button').first.click()
self.wait_for_element_presence('div#output', 'Button Output is Available')
def is_class_absent(self):
"""
Click button and wait until playing class disappeared from DOM
"""
self.q(css='#spinner').first.click()
self.wait_for_element_absence('.playing', 'Animation Stopped')
def is_button_output_visible(self):
"""
Click button and wait until output is displayed.
"""
self.wait_for_element_presence
|
return None
|
conditional_block
|
pages.py
|
):
"""
Base class for all pages in the test site.
"""
# Get the server port from the environment
# (set by the test runner script)
SERVER_PORT = os.environ.get("SERVER_PORT", 8005)
def is_browser_on_page(self):
title = self.name.lower().replace('_', ' ')
return title in self.browser.title.lower()
@property
def url(self):
return "http://localhost:{0}/{1}".format(self.SERVER_PORT, self.name + ".html")
@property
def output(self):
"""
Return the contents of the "#output" div on the page.
The fixtures are configured to update this div when the user
interacts with the page.
"""
text_list = self.q(css='#output').text
if len(text_list) < 1:
return None
return text_list[0]
class ButtonPage(SitePage):
"""
Page for testing button interactions.
"""
name = "button"
def click_button(self):
"""
Click the button on the page, which should cause the JavaScript
to update the #output div.
"""
self.q(css='div#fixture input').first.click()
class TextFieldPage(SitePage):
"""
Page for testing text field interactions.
"""
name = "text_field"
def enter_text(self, text):
"""
Input `text` into the text field on the page.
"""
self.q(css='#fixture input').fill(text)
class SelectPage(SitePage):
"""
Page for testing select input interactions.
"""
name = "select"
def select_car(self, car_value):
"""
Select the car with ``car_value`` in the drop-down list.
"""
self.q(css=u'select[name="cars"] option[value="{}"]'.format(car_value)).first.click()
def is_car_selected(self, car):
"""
Return ``True`` if the given ``car`` is selected, ``False`` otherwise.
"""
return self.q(css=u'select[name="cars"] option[value="{}"]'.format(car)).selected
class CheckboxPage(SitePage):
"""
Page for testing checkbox interactions.
"""
name = "checkbox"
def toggle_pill(self, pill_name):
"""
Toggle the box for the pill with `pill_name` (red or blue).
"""
self.q(css=u"#fixture input#{}".format(pill_name)).first.click()
class AlertPage(SitePage):
"""
Page for testing alert handling.
"""
name = "alert"
def confirm(self):
"""
Click the ``Confirm`` button and confirm the dialog.
"""
with self.handle_alert(confirm=True):
self.q(css='button#confirm').first.click()
def cancel(self):
"""
Click the ``Confirm`` button and cancel the dialog.
"""
with self.handle_alert(confirm=False):
self.q(css='button#confirm').first.click()
def dismiss(self):
"""
Click the ``Alert`` button and confirm the alert.
"""
with self.handle_alert():
self.q(css='button#alert').first.click()
class SelectorPage(SitePage):
"""
Page for testing retrieval of information by CSS selectors.
"""
name = "selector"
@property
def num_divs(self):
"""
Count the number of div.test elements.
"""
return len(self.q(css='div.test').results)
@property
def div_text_list(self):
"""
Return list of text for each div.test element.
"""
return self.q(css='div.test').text
@property
def div_value_list(self):
"""
Return list of values for each div.test element.
"""
return self.q(css='div.test').attrs('value')
@property
def div_html_list(self):
"""
Return list of html for each div.test element.
"""
return self.q(css='div.test').html
def ids_of_outer_divs_with_inner_text(self, child_text):
"""
Return a list of the ids of outer divs with
the specified text in a child element.
"""
return self.q(css='div.outer').filter(
lambda el: child_text in [inner.text for inner in el.find_elements_by_css_selector('div.inner')]
).attrs('id')
class DelayPage(SitePage):
"""
Page for testing elements that appear after a delay.
"""
name = "delay"
def trigger_output(self):
"""
Wait for click handlers to be installed,
then click a button and retrieve the output that appears
after a delay.
"""
EmptyPromise(self.q(css='div#ready').is_present, "Click ready").fulfill()
self.q(css='div#fixture button').first.click()
EmptyPromise(self.q(css='div#output').is_present, "Output available").fulfill()
def make_broken_promise(self):
"""
Make a promise that will not be fulfilled.
Should raise a `BrokenPromise` exception.
"""
return EmptyPromise(
self.q(css='div#not_present').is_present, "Invalid div appeared",
try_limit=3, try_interval=0.01
).fulfill()
class SlowPage(SitePage):
"""
Page that loads its elements slowly.
"""
name = "slow"
def is_browser_on_page(self):
return self.q(css='div#ready').is_present()
class NextPage(SitePage):
"""
Page that loads another page after a delay.
"""
name = "next_page"
def is_browser_on_page(self):
return self.q(css='#next').is_present()
def load_next(self, page, delay_sec):
"""
Load the page named `page_name` after waiting for `delay_sec`.
"""
time.sleep(delay_sec)
page.visit()
@js_defined('$')
class FocusedPage(SitePage):
"""
Page that has a link to a focusable element.
"""
name = "focused"
@wait_for_js
def focus_on_main_content(self):
"""
Give focus to the element with the ``main-content`` ID.
"""
self.browser.execute_script("$('#main-content').focus()")
class VisiblePage(SitePage):
"""
Page that has some elements visible and others invisible.
"""
name = "visible"
def is_visible(self, name):
"""
Return a boolean indicating whether the given item is visible.
"""
return self.q(css="div.{}".format(name)).first.visible
def is_invisible(self, name):
"""
Return a boolean indicating whether the given element is present, but not visible.
"""
return self.q(css="div.{}".format(name)).first.invisible
@js_defined('test_var1', 'test_var2')
class JavaScriptPage(SitePage):
"""
Page for testing asynchronous JavaScript.
"""
name = "javascript"
@wait_for_js
def trigger_output(self):
"""
Click a button which will only work once RequireJS finishes loading.
"""
self.q(css='div#fixture button').first.click()
@wait_for_js
def reload_and_trigger_output(self):
|
@js_defined('something.SomethingThatDoesntExist')
class JavaScriptUndefinedPage(SitePage):
"""
Page for testing asynchronous JavaScript, where the
javascript that we wait for is never defined.
"""
name = "javascript"
@wait_for_js
def trigger_output(self):
"""
Click a button which will only work once RequireJS finishes loading.
"""
self.q(css='div#fixture button').first.click()
@requirejs('main')
class RequireJSPage(SitePage):
"""
Page for testing asynchronous JavaScript loaded with RequireJS.
"""
name = "requirejs"
@property
@wait_for_js
def output(self):
"""
Wait for scripts to finish and then return the contents of the
``#output`` div on the page.
"""
return super(RequireJSPage, self).output
class AjaxNoJQueryPage(SitePage):
"""
Page for testing an ajax call.
"""
name = "ajax_no_jquery"
class AjaxPage(SitePage):
"""
Page for testing an ajax call.
"""
name = "ajax"
def click_button(self):
"""
Click the button on the page, which triggers an ajax
call that updates the #output div.
"""
self.q(css='div#fixture button').first.click()
class WaitsPage(SitePage):
"""
Page for testing wait helpers.
"""
name = "wait"
def is_button_output_present(self):
"""
Click button and wait until output id appears in DOM.
"""
self.wait_for_element_presence('div#ready', 'Page is Ready')
self.q(css='div#fixture button').first.click()
self.wait_for_element_presence('div#output', 'Button Output is Available')
def is_class_absent(self):
"""
Click button and wait until playing class disappeared from DOM
"""
self.q(css='#spinner').first.click()
self.wait_for_element_absence('.playing', 'Animation Stopped')
def is_button_output_visible(self):
"""
Click button and wait until output is displayed.
"""
self.wait_for_element
|
"""
Reload the page, wait for JS, then trigger the output.
"""
self.browser.refresh()
self.wait_for_js() # pylint: disable=no-member
self.q(css='div#fixture button').first.click()
|
identifier_body
|
Train_Trader.py
|
', bias_initializer = 'random_uniform') (h5)
model = Model(inputs = S, outputs = [P,V])
rms = RMSprop(lr = LEARNING_RATE, rho = 0.99, epsilon = 0.1)
model.compile(loss = {'o_P': logloss, 'o_V': sumofsquares}, loss_weights = {'o_P': 1., 'o_V' : 0.5}, optimizer = rms)
return model
# --------------------------------- PREPROCESS IMAGE ---------------------------------
def preprocess(image):
image = image[np.newaxis, :]
image = image[:, :, :, np.newaxis]
return image
# Initialize a new model using buildmodel() or use load_model to resume training an already trained model
model = buildmodel()
# Model = load_model("saved_models/model_updates3900", custom_objects={'logloss': logloss, 'sumofsquares': sumofsquares})
model._make_predict_function()
graph = tf.get_default_graph()
intermediate_layer_model = Model(inputs=model.input, outputs=model.get_layer('o_P').output)
a_t[0] = 1 # Index 0 = no flap, 1 = flap # Output of network represents probability of flap
# --------------------------------- INITIALIZE THREADS ---------------------------------
game_state = []
for i in range(0,THREADS):
#game_state.append(game.GameState(30000))
game_state.append(game.PlayGame())
# --------------------------------- RUN PROCESS ---------------------------------
def runprocess(thread_id, s_t):
global T
global a_t
global model
global myCount
global score
global logCnt
global playLog
global actionList
t = 0
t_start = t
terminal = False
r_t = 0
r_store = []
state_store = np.zeros((0, IMAGE_ROWS, IMAGE_COLS, IMAGE_CHANNELS))
output_store = []
critic_store = []
s_t = s_t.reshape(1, s_t.shape[0], s_t.shape[1], s_t.shape[2])
while t-t_start < t_max and terminal == False:
t += 1
T += 1
intermediate_output = 0
with graph.as_default():
predictedChoice = model.predict(s_t)[0]
intermediate_output = intermediate_layer_model.predict(s_t)
randomChoice = np.random.rand()
a_t = [0 , 1] if randomChoice < predictedChoice else [1 , 0] # stochastic action
# a_t = [0,1] if 0.5 < y[0] else [1,0] # deterministic action
# x_t (next frame), r_t (0.1 if alive, +1.5 if it passes the pipe, -1 if it dies) and the input is a_t (action)
x_t, r_t, terminal = game_state[thread_id].nextStep(a_t)
x_t = preprocess(x_t)
# LOG GAME STEP
if thread_id == 0:
score = score + r_t
print("score", score)
action = game_state[0].getActionTaken()
actionList.append(action)
if terminal == True:
print("------------------------------------------------------------------------------------")
print("ENDSCORE:", score)
profit = game_state[0].getProfit()
meanAction = np.mean(actionList)
playLog.loc[logCnt] = score, profit, meanAction
logCnt += 1
playLog.to_csv("playLog.csv", index=True)
score = 0
actionList = []
# SPITOUT IMAGE EVERY GAME STEP
if 1==0:
if thread_id == 0:
mat = x_t
mat = mat[0, :, :, 0]
myCount += 1
# SAVE TO CSV
#fileName = "C:/Users/Treebeard/PycharmProjects/A3C_Keras_FlappyBird/spitout/img_" + str(myCount) + ".csv"
#np.savetxt(fileName, mat, fmt='%2.0f', delimiter=",")
#PLOT
plt.imshow(mat, cmap='hot')
#plt.show()
fileName2 = "C:/Users/Treebeard/PycharmProjects/A3C_Keras_FlappyBird/spitout/img_" + str(myCount) + ".png"
plt.savefig(fileName2)
if terminal == True:
game_state[thread_id].startGame()
with graph.as_default():
critic_reward = model.predict(s_t)[1]
y = 0 if a_t[0] == 1 else 1
r_store = np.append(r_store, r_t)
state_store = np.append(state_store, s_t, axis = 0)
output_store = np.append(output_store, y)
critic_store = np.append(critic_store, critic_reward)
s_t = np.append(x_t, s_t[:, :, :, :3], axis=3)
print("Frame = " + str(T) + ", Updates = " + str(EPISODE) + ", Thread = " + str(thread_id) + ", Output = "+ str(intermediate_output))
if terminal == False:
r_store[len(r_store)-1] = critic_store[len(r_store)-1]
else:
r_store[len(r_store)-1] = -1
s_t = np.concatenate((x_t, x_t, x_t, x_t), axis=3)
for i in range(2,len(r_store)+1):
r_store[len(r_store)-i] = r_store[len(r_store)-i] + GAMMA*r_store[len(r_store)-i + 1]
return s_t, state_store, output_store, r_store, critic_store
# Function to decrease the learning rate after every epoch. In this manner, the learning rate reaches 0, by 20,000 epochs
def step_decay(epoch):
global lrate
decay = 3.2e-8
lrate = LEARNING_RATE - epoch*decay
lrate = max(lrate, 0)
return lrate
class actorthread(threading.Thread):
def __init__(self,thread_id, s_t):
threading.Thread.__init__(self)
self.thread_id = thread_id
self.next_state = s_t
def run(self):
global episode_output
global episode_r
global episode_critic
global episode_state
threadLock.acquire()
self.next_state, state_store, output_store, r_store, critic_store = runprocess(self.thread_id, self.next_state)
self.next_state = self.next_state.reshape(self.next_state.shape[1], self.next_state.shape[2], self.next_state.shape[3])
episode_r = np.append(episode_r, r_store)
episode_output = np.append(episode_output, output_store)
episode_state = np.append(episode_state, state_store, axis = 0)
episode_critic = np.append(episode_critic, critic_store)
threadLock.release()
states = np.zeros((0, IMAGE_ROWS, IMAGE_COLS, 4))
# Initializing state of each thread
for i in range(0, len(game_state)):
image = game_state[i].getChartData()
#image = game_state[i].getCurrentFrame()
image = preprocess(image)
state = np.concatenate((image, image, image, image), axis=3)
states = np.append(states, state, axis = 0)
cnt = 0
trainingLog = pd.DataFrame(columns=['update', 'reward_mean', 'loss', "lrate"])
while True:
threadLock = threading.Lock()
threads = []
for i in range(0,THREADS):
threads.append(actorthread(i,states[i]))
states = np.zeros((0, IMAGE_ROWS, IMAGE_COLS, 4))
for i in range(0,THREADS):
threads[i].start()
#thread.join() ensures that all threads fininsh execution before proceeding further
for i in range(0,THREADS):
threads[i].join()
for i in range(0,THREADS):
state = threads[i].next_state
state = state.reshape(1, state.shape[0], state.shape[1], state.shape[2])
states = np.append(states, state, axis = 0)
e_mean = np.mean(episode_r)
#advantage calculation for each action taken
advantage = episode_r - episode_critic
print("backpropagating")
lrate = LearningRateScheduler(step_decay)
callbacks_list = [lrate]
weights = {'o_P':advantage, 'o_V':np.ones(len(advantage))}
#backpropagation
history = model.fit(episode_state, [episode_output, episode_r], epochs = EPISODE + 1, batch_size = len(episode_output), callbacks = callbacks_list, sample_weight = weights, initial_epoch = EPISODE)
episode_r = []
episode_output = []
episode_state = np.zeros((0, IMAGE_ROWS, IMAGE_COLS, IMAGE_CHANNELS))
episode_critic = []
# LOG SAVER
trainingLog.loc[cnt] = EPISODE, e_mean, history.history['loss'],lrate
cnt += 1
if cnt % 1 == 0:
trainingLog.to_csv("trainingLog.csv", index=True)
if EPISODE % 50 == 0:
|
model.save("saved_models/model_updates" + str(EPISODE))
|
conditional_block
|
|
Train_Trader.py
|
# Discount value
BETA = 0.01 # Regularisation coefficient
IMAGE_ROWS = 84
IMAGE_COLS = 84
IMAGE_CHANNELS = 4
LEARNING_RATE = 7e-4
EPISODE = 0
THREADS = 8
t_max = 5
const = 1e-5
T = 0
episode_r = []
episode_state = np.zeros((0, IMAGE_ROWS, IMAGE_COLS, IMAGE_CHANNELS))
episode_output = []
episode_critic = []
ACTIONS = 2
a_t = np.zeros(ACTIONS)
lrate = 0
# --------------------------------- LOSS FUNCTION FOR POLICY OUTPUT ---------------------------------
def logloss(y_true, y_pred): # Policy loss
print("logloss:--------------------------------")
return -K.sum( K.log(y_true*y_pred + (1-y_true)*(1-y_pred) + const), axis=-1)
# BETA * K.sum(y_pred * K.log(y_pred + const) + (1-y_pred) * K.log(1-y_pred + const)) # regularisation term
# --------------------------------- LOSS FUNCTION FOR CRITIC OUTPUT ---------------------------------
def sumofsquares(y_true, y_pred): #critic loss
print("sumofsquares:--------------------------------")
return K.sum(K.square(y_pred - y_true), axis=-1)
# --------------------------------- BUILD MODEL ---------------------------------
def buildmodel():
print("Model building begins")
model = Sequential()
keras.initializers.RandomUniform(minval=-0.1, maxval=0.1, seed=None)
S = Input(shape = (IMAGE_ROWS, IMAGE_COLS, IMAGE_CHANNELS, ), name = 'Input')
h0 = Convolution2D(16, kernel_size = (8,8), strides = (4,4), activation = 'relu', kernel_initializer = 'random_uniform', bias_initializer = 'random_uniform')(S)
h1 = Convolution2D(32, kernel_size = (4,4), strides = (2,2), activation = 'relu', kernel_initializer = 'random_uniform', bias_initializer = 'random_uniform')(h0)
h2 = Flatten()(h1)
h3 = Dense(256, activation = 'relu', kernel_initializer = 'random_uniform', bias_initializer = 'random_uniform') (h2)
h4 = Dense(256, activation = 'relu', kernel_initializer = 'random_uniform', bias_initializer = 'random_uniform') (h3)
h5 = Dense(512, activation='relu', kernel_initializer='random_uniform', bias_initializer='random_uniform')(h4)
P = Dense(1, name = 'o_P', activation = 'sigmoid', kernel_initializer = 'random_uniform', bias_initializer = 'random_uniform') (h5)
V = Dense(1, name = 'o_V', kernel_initializer = 'random_uniform', bias_initializer = 'random_uniform') (h5)
model = Model(inputs = S, outputs = [P,V])
rms = RMSprop(lr = LEARNING_RATE, rho = 0.99, epsilon = 0.1)
model.compile(loss = {'o_P': logloss, 'o_V': sumofsquares}, loss_weights = {'o_P': 1., 'o_V' : 0.5}, optimizer = rms)
return model
# --------------------------------- PREPROCESS IMAGE ---------------------------------
def preprocess(image):
image = image[np.newaxis, :]
image = image[:, :, :, np.newaxis]
return image
# Initialize a new model using buildmodel() or use load_model to resume training an already trained model
model = buildmodel()
# Model = load_model("saved_models/model_updates3900", custom_objects={'logloss': logloss, 'sumofsquares': sumofsquares})
model._make_predict_function()
graph = tf.get_default_graph()
intermediate_layer_model = Model(inputs=model.input, outputs=model.get_layer('o_P').output)
a_t[0] = 1 # Index 0 = no flap, 1 = flap # Output of network represents probability of flap
# --------------------------------- INITIALIZE THREADS ---------------------------------
game_state = []
for i in range(0,THREADS):
#game_state.append(game.GameState(30000))
game_state.append(game.PlayGame())
# --------------------------------- RUN PROCESS ---------------------------------
def runprocess(thread_id, s_t):
global T
global a_t
global model
global myCount
global score
global logCnt
global playLog
global actionList
t = 0
t_start = t
terminal = False
r_t = 0
r_store = []
state_store = np.zeros((0, IMAGE_ROWS, IMAGE_COLS, IMAGE_CHANNELS))
output_store = []
critic_store = []
s_t = s_t.reshape(1, s_t.shape[0], s_t.shape[1], s_t.shape[2])
while t-t_start < t_max and terminal == False:
t += 1
T += 1
intermediate_output = 0
with graph.as_default():
predictedChoice = model.predict(s_t)[0]
intermediate_output = intermediate_layer_model.predict(s_t)
randomChoice = np.random.rand()
a_t = [0 , 1] if randomChoice < predictedChoice else [1 , 0] # stochastic action
# a_t = [0,1] if 0.5 < y[0] else [1,0] # deterministic action
# x_t (next frame), r_t (0.1 if alive, +1.5 if it passes the pipe, -1 if it dies) and the input is a_t (action)
x_t, r_t, terminal = game_state[thread_id].nextStep(a_t)
x_t = preprocess(x_t)
# LOG GAME STEP
if thread_id == 0:
score = score + r_t
print("score", score)
action = game_state[0].getActionTaken()
actionList.append(action)
if terminal == True:
print("------------------------------------------------------------------------------------")
print("ENDSCORE:", score)
profit = game_state[0].getProfit()
meanAction = np.mean(actionList)
playLog.loc[logCnt] = score, profit, meanAction
logCnt += 1
playLog.to_csv("playLog.csv", index=True)
score = 0
actionList = []
# SPITOUT IMAGE EVERY GAME STEP
|
mat = mat[0, :, :, 0]
myCount += 1
# SAVE TO CSV
#fileName = "C:/Users/Treebeard/PycharmProjects/A3C_Keras_FlappyBird/spitout/img_" + str(myCount) + ".csv"
#np.savetxt(fileName, mat, fmt='%2.0f', delimiter=",")
#PLOT
plt.imshow(mat, cmap='hot')
#plt.show()
fileName2 = "C:/Users/Treebeard/PycharmProjects/A3C_Keras_FlappyBird/spitout/img_" + str(myCount) + ".png"
plt.savefig(fileName2)
if terminal == True:
game_state[thread_id].startGame()
with graph.as_default():
critic_reward = model.predict(s_t)[1]
y = 0 if a_t[0] == 1 else 1
r_store = np.append(r_store, r_t)
state_store = np.append(state_store, s_t, axis = 0)
output_store = np.append(output_store, y)
critic_store = np.append(critic_store, critic_reward)
s_t = np.append(x_t, s_t[:, :, :, :3], axis=3)
print("Frame = " + str(T) + ", Updates = " + str(EPISODE) + ", Thread = " + str(thread_id) + ", Output = "+ str(intermediate_output))
if terminal == False:
r_store[len(r_store)-1] = critic_store[len(r_store)-1]
else:
r_store[len(r_store)-1] = -1
s_t = np.concatenate((x_t, x_t, x_t, x_t), axis=3)
for i in range(2,len(r_store)+1):
r_store[len(r_store)-i] = r_store[len(r_store)-i] + GAMMA*r_store[len(r_store)-i + 1]
return s_t, state_store, output_store, r_store, critic_store
# Function to decrease the learning rate after every epoch. In this manner, the learning rate reaches 0, by 20,000 epochs
def step_decay(epoch):
global lrate
decay = 3.2e-8
lrate = LEARNING_RATE - epoch*decay
lrate = max(lrate, 0)
return lrate
class actorthread(threading.Thread):
def __init__(self,thread_id, s_t):
threading.Thread.__init__(self)
self.thread_id = thread_id
self.next_state = s_t
def run(self):
global episode_output
global episode_r
global episode_critic
global episode_state
threadLock.acquire()
self.next_state, state_store, output_store, r_store, critic_store = runprocess(self.thread_id, self.next_state)
self.next_state = self.next_state.reshape
|
if 1==0:
if thread_id == 0:
mat = x_t
|
random_line_split
|
Train_Trader.py
|
# Discount value
BETA = 0.01 # Regularisation coefficient
IMAGE_ROWS = 84
IMAGE_COLS = 84
IMAGE_CHANNELS = 4
LEARNING_RATE = 7e-4
EPISODE = 0
THREADS = 8
t_max = 5
const = 1e-5
T = 0
episode_r = []
episode_state = np.zeros((0, IMAGE_ROWS, IMAGE_COLS, IMAGE_CHANNELS))
episode_output = []
episode_critic = []
ACTIONS = 2
a_t = np.zeros(ACTIONS)
lrate = 0
# --------------------------------- LOSS FUNCTION FOR POLICY OUTPUT ---------------------------------
def logloss(y_true, y_pred): # Policy loss
print("logloss:--------------------------------")
return -K.sum( K.log(y_true*y_pred + (1-y_true)*(1-y_pred) + const), axis=-1)
# BETA * K.sum(y_pred * K.log(y_pred + const) + (1-y_pred) * K.log(1-y_pred + const)) # regularisation term
# --------------------------------- LOSS FUNCTION FOR CRITIC OUTPUT ---------------------------------
def sumofsquares(y_true, y_pred): #critic loss
print("sumofsquares:--------------------------------")
return K.sum(K.square(y_pred - y_true), axis=-1)
# --------------------------------- BUILD MODEL ---------------------------------
def buildmodel():
print("Model building begins")
model = Sequential()
keras.initializers.RandomUniform(minval=-0.1, maxval=0.1, seed=None)
S = Input(shape = (IMAGE_ROWS, IMAGE_COLS, IMAGE_CHANNELS, ), name = 'Input')
h0 = Convolution2D(16, kernel_size = (8,8), strides = (4,4), activation = 'relu', kernel_initializer = 'random_uniform', bias_initializer = 'random_uniform')(S)
h1 = Convolution2D(32, kernel_size = (4,4), strides = (2,2), activation = 'relu', kernel_initializer = 'random_uniform', bias_initializer = 'random_uniform')(h0)
h2 = Flatten()(h1)
h3 = Dense(256, activation = 'relu', kernel_initializer = 'random_uniform', bias_initializer = 'random_uniform') (h2)
h4 = Dense(256, activation = 'relu', kernel_initializer = 'random_uniform', bias_initializer = 'random_uniform') (h3)
h5 = Dense(512, activation='relu', kernel_initializer='random_uniform', bias_initializer='random_uniform')(h4)
P = Dense(1, name = 'o_P', activation = 'sigmoid', kernel_initializer = 'random_uniform', bias_initializer = 'random_uniform') (h5)
V = Dense(1, name = 'o_V', kernel_initializer = 'random_uniform', bias_initializer = 'random_uniform') (h5)
model = Model(inputs = S, outputs = [P,V])
rms = RMSprop(lr = LEARNING_RATE, rho = 0.99, epsilon = 0.1)
model.compile(loss = {'o_P': logloss, 'o_V': sumofsquares}, loss_weights = {'o_P': 1., 'o_V' : 0.5}, optimizer = rms)
return model
# --------------------------------- PREPROCESS IMAGE ---------------------------------
def preprocess(image):
image = image[np.newaxis, :]
image = image[:, :, :, np.newaxis]
return image
# Initialize a new model using buildmodel() or use load_model to resume training an already trained model
model = buildmodel()
# Model = load_model("saved_models/model_updates3900", custom_objects={'logloss': logloss, 'sumofsquares': sumofsquares})
model._make_predict_function()
graph = tf.get_default_graph()
intermediate_layer_model = Model(inputs=model.input, outputs=model.get_layer('o_P').output)
a_t[0] = 1 # Index 0 = no flap, 1 = flap # Output of network represents probability of flap
# --------------------------------- INITIALIZE THREADS ---------------------------------
game_state = []
for i in range(0,THREADS):
#game_state.append(game.GameState(30000))
game_state.append(game.PlayGame())
# --------------------------------- RUN PROCESS ---------------------------------
def runprocess(thread_id, s_t):
global T
global a_t
global model
global myCount
global score
global logCnt
global playLog
global actionList
t = 0
t_start = t
terminal = False
r_t = 0
r_store = []
state_store = np.zeros((0, IMAGE_ROWS, IMAGE_COLS, IMAGE_CHANNELS))
output_store = []
critic_store = []
s_t = s_t.reshape(1, s_t.shape[0], s_t.shape[1], s_t.shape[2])
while t-t_start < t_max and terminal == False:
t += 1
T += 1
intermediate_output = 0
with graph.as_default():
predictedChoice = model.predict(s_t)[0]
intermediate_output = intermediate_layer_model.predict(s_t)
randomChoice = np.random.rand()
a_t = [0 , 1] if randomChoice < predictedChoice else [1 , 0] # stochastic action
# a_t = [0,1] if 0.5 < y[0] else [1,0] # deterministic action
# x_t (next frame), r_t (0.1 if alive, +1.5 if it passes the pipe, -1 if it dies) and the input is a_t (action)
x_t, r_t, terminal = game_state[thread_id].nextStep(a_t)
x_t = preprocess(x_t)
# LOG GAME STEP
if thread_id == 0:
score = score + r_t
print("score", score)
action = game_state[0].getActionTaken()
actionList.append(action)
if terminal == True:
print("------------------------------------------------------------------------------------")
print("ENDSCORE:", score)
profit = game_state[0].getProfit()
meanAction = np.mean(actionList)
playLog.loc[logCnt] = score, profit, meanAction
logCnt += 1
playLog.to_csv("playLog.csv", index=True)
score = 0
actionList = []
# SPITOUT IMAGE EVERY GAME STEP
if 1==0:
if thread_id == 0:
mat = x_t
mat = mat[0, :, :, 0]
myCount += 1
# SAVE TO CSV
#fileName = "C:/Users/Treebeard/PycharmProjects/A3C_Keras_FlappyBird/spitout/img_" + str(myCount) + ".csv"
#np.savetxt(fileName, mat, fmt='%2.0f', delimiter=",")
#PLOT
plt.imshow(mat, cmap='hot')
#plt.show()
fileName2 = "C:/Users/Treebeard/PycharmProjects/A3C_Keras_FlappyBird/spitout/img_" + str(myCount) + ".png"
plt.savefig(fileName2)
if terminal == True:
game_state[thread_id].startGame()
with graph.as_default():
critic_reward = model.predict(s_t)[1]
y = 0 if a_t[0] == 1 else 1
r_store = np.append(r_store, r_t)
state_store = np.append(state_store, s_t, axis = 0)
output_store = np.append(output_store, y)
critic_store = np.append(critic_store, critic_reward)
s_t = np.append(x_t, s_t[:, :, :, :3], axis=3)
print("Frame = " + str(T) + ", Updates = " + str(EPISODE) + ", Thread = " + str(thread_id) + ", Output = "+ str(intermediate_output))
if terminal == False:
r_store[len(r_store)-1] = critic_store[len(r_store)-1]
else:
r_store[len(r_store)-1] = -1
s_t = np.concatenate((x_t, x_t, x_t, x_t), axis=3)
for i in range(2,len(r_store)+1):
r_store[len(r_store)-i] = r_store[len(r_store)-i] + GAMMA*r_store[len(r_store)-i + 1]
return s_t, state_store, output_store, r_store, critic_store
# Function to decrease the learning rate after every epoch. In this manner, the learning rate reaches 0, by 20,000 epochs
def step_decay(epoch):
global lrate
decay = 3.2e-8
lrate = LEARNING_RATE - epoch*decay
lrate = max(lrate, 0)
return lrate
class actorthread(threading.Thread):
def __init__(self,thread_id, s_t):
|
def run(self):
global episode_output
global episode_r
global episode_critic
global episode_state
threadLock.acquire()
self.next_state, state_store, output_store, r_store, critic_store = runprocess(self.thread_id, self.next_state)
self.next_state = self.next
|
threading.Thread.__init__(self)
self.thread_id = thread_id
self.next_state = s_t
|
identifier_body
|
Train_Trader.py
|
# Discount value
BETA = 0.01 # Regularisation coefficient
IMAGE_ROWS = 84
IMAGE_COLS = 84
IMAGE_CHANNELS = 4
LEARNING_RATE = 7e-4
EPISODE = 0
THREADS = 8
t_max = 5
const = 1e-5
T = 0
episode_r = []
episode_state = np.zeros((0, IMAGE_ROWS, IMAGE_COLS, IMAGE_CHANNELS))
episode_output = []
episode_critic = []
ACTIONS = 2
a_t = np.zeros(ACTIONS)
lrate = 0
# --------------------------------- LOSS FUNCTION FOR POLICY OUTPUT ---------------------------------
def logloss(y_true, y_pred): # Policy loss
print("logloss:--------------------------------")
return -K.sum( K.log(y_true*y_pred + (1-y_true)*(1-y_pred) + const), axis=-1)
# BETA * K.sum(y_pred * K.log(y_pred + const) + (1-y_pred) * K.log(1-y_pred + const)) # regularisation term
# --------------------------------- LOSS FUNCTION FOR CRITIC OUTPUT ---------------------------------
def sumofsquares(y_true, y_pred): #critic loss
print("sumofsquares:--------------------------------")
return K.sum(K.square(y_pred - y_true), axis=-1)
# --------------------------------- BUILD MODEL ---------------------------------
def buildmodel():
print("Model building begins")
model = Sequential()
keras.initializers.RandomUniform(minval=-0.1, maxval=0.1, seed=None)
S = Input(shape = (IMAGE_ROWS, IMAGE_COLS, IMAGE_CHANNELS, ), name = 'Input')
h0 = Convolution2D(16, kernel_size = (8,8), strides = (4,4), activation = 'relu', kernel_initializer = 'random_uniform', bias_initializer = 'random_uniform')(S)
h1 = Convolution2D(32, kernel_size = (4,4), strides = (2,2), activation = 'relu', kernel_initializer = 'random_uniform', bias_initializer = 'random_uniform')(h0)
h2 = Flatten()(h1)
h3 = Dense(256, activation = 'relu', kernel_initializer = 'random_uniform', bias_initializer = 'random_uniform') (h2)
h4 = Dense(256, activation = 'relu', kernel_initializer = 'random_uniform', bias_initializer = 'random_uniform') (h3)
h5 = Dense(512, activation='relu', kernel_initializer='random_uniform', bias_initializer='random_uniform')(h4)
P = Dense(1, name = 'o_P', activation = 'sigmoid', kernel_initializer = 'random_uniform', bias_initializer = 'random_uniform') (h5)
V = Dense(1, name = 'o_V', kernel_initializer = 'random_uniform', bias_initializer = 'random_uniform') (h5)
model = Model(inputs = S, outputs = [P,V])
rms = RMSprop(lr = LEARNING_RATE, rho = 0.99, epsilon = 0.1)
model.compile(loss = {'o_P': logloss, 'o_V': sumofsquares}, loss_weights = {'o_P': 1., 'o_V' : 0.5}, optimizer = rms)
return model
# --------------------------------- PREPROCESS IMAGE ---------------------------------
def preprocess(image):
image = image[np.newaxis, :]
image = image[:, :, :, np.newaxis]
return image
# Initialize a new model using buildmodel() or use load_model to resume training an already trained model
model = buildmodel()
# Model = load_model("saved_models/model_updates3900", custom_objects={'logloss': logloss, 'sumofsquares': sumofsquares})
model._make_predict_function()
graph = tf.get_default_graph()
intermediate_layer_model = Model(inputs=model.input, outputs=model.get_layer('o_P').output)
a_t[0] = 1 # Index 0 = no flap, 1 = flap # Output of network represents probability of flap
# --------------------------------- INITIALIZE THREADS ---------------------------------
game_state = []
for i in range(0,THREADS):
#game_state.append(game.GameState(30000))
game_state.append(game.PlayGame())
# --------------------------------- RUN PROCESS ---------------------------------
def runprocess(thread_id, s_t):
global T
global a_t
global model
global myCount
global score
global logCnt
global playLog
global actionList
t = 0
t_start = t
terminal = False
r_t = 0
r_store = []
state_store = np.zeros((0, IMAGE_ROWS, IMAGE_COLS, IMAGE_CHANNELS))
output_store = []
critic_store = []
s_t = s_t.reshape(1, s_t.shape[0], s_t.shape[1], s_t.shape[2])
while t-t_start < t_max and terminal == False:
t += 1
T += 1
intermediate_output = 0
with graph.as_default():
predictedChoice = model.predict(s_t)[0]
intermediate_output = intermediate_layer_model.predict(s_t)
randomChoice = np.random.rand()
a_t = [0 , 1] if randomChoice < predictedChoice else [1 , 0] # stochastic action
# a_t = [0,1] if 0.5 < y[0] else [1,0] # deterministic action
# x_t (next frame), r_t (0.1 if alive, +1.5 if it passes the pipe, -1 if it dies) and the input is a_t (action)
x_t, r_t, terminal = game_state[thread_id].nextStep(a_t)
x_t = preprocess(x_t)
# LOG GAME STEP
if thread_id == 0:
score = score + r_t
print("score", score)
action = game_state[0].getActionTaken()
actionList.append(action)
if terminal == True:
print("------------------------------------------------------------------------------------")
print("ENDSCORE:", score)
profit = game_state[0].getProfit()
meanAction = np.mean(actionList)
playLog.loc[logCnt] = score, profit, meanAction
logCnt += 1
playLog.to_csv("playLog.csv", index=True)
score = 0
actionList = []
# SPITOUT IMAGE EVERY GAME STEP
if 1==0:
if thread_id == 0:
mat = x_t
mat = mat[0, :, :, 0]
myCount += 1
# SAVE TO CSV
#fileName = "C:/Users/Treebeard/PycharmProjects/A3C_Keras_FlappyBird/spitout/img_" + str(myCount) + ".csv"
#np.savetxt(fileName, mat, fmt='%2.0f', delimiter=",")
#PLOT
plt.imshow(mat, cmap='hot')
#plt.show()
fileName2 = "C:/Users/Treebeard/PycharmProjects/A3C_Keras_FlappyBird/spitout/img_" + str(myCount) + ".png"
plt.savefig(fileName2)
if terminal == True:
game_state[thread_id].startGame()
with graph.as_default():
critic_reward = model.predict(s_t)[1]
y = 0 if a_t[0] == 1 else 1
r_store = np.append(r_store, r_t)
state_store = np.append(state_store, s_t, axis = 0)
output_store = np.append(output_store, y)
critic_store = np.append(critic_store, critic_reward)
s_t = np.append(x_t, s_t[:, :, :, :3], axis=3)
print("Frame = " + str(T) + ", Updates = " + str(EPISODE) + ", Thread = " + str(thread_id) + ", Output = "+ str(intermediate_output))
if terminal == False:
r_store[len(r_store)-1] = critic_store[len(r_store)-1]
else:
r_store[len(r_store)-1] = -1
s_t = np.concatenate((x_t, x_t, x_t, x_t), axis=3)
for i in range(2,len(r_store)+1):
r_store[len(r_store)-i] = r_store[len(r_store)-i] + GAMMA*r_store[len(r_store)-i + 1]
return s_t, state_store, output_store, r_store, critic_store
# Function to decrease the learning rate after every epoch. In this manner, the learning rate reaches 0, by 20,000 epochs
def step_decay(epoch):
global lrate
decay = 3.2e-8
lrate = LEARNING_RATE - epoch*decay
lrate = max(lrate, 0)
return lrate
class actorthread(threading.Thread):
def
|
(self,thread_id, s_t):
threading.Thread.__init__(self)
self.thread_id = thread_id
self.next_state = s_t
def run(self):
global episode_output
global episode_r
global episode_critic
global episode_state
threadLock.acquire()
self.next_state, state_store, output_store, r_store, critic_store = runprocess(self.thread_id, self.next_state)
self.next_state = self.next
|
__init__
|
identifier_name
|
code.py
|
)
scores = cross_val_score(knn, X_train, d_y_train, cv = 3) #3 folds
cv_score = np.mean(scores)
print('k = {}, accuracy on validation set = {:.3f}'.format(k, cv_score))
cv_scores.append((k, cv_score))
#selecting k that gave the best accuracy on validation set
best_k = max(cv_scores, key = lambda x:x[-1])
print('best_k: {} with validation accuracy of {}'.format(best_k[0], best_k))
#using best parameter k to train KNN on training data
knn_model = KNeighborsClassifier(n_neighbors = best_k[0], n_jobs = -1)
knn_model.fit(X_train, d_y_train)
#evaluate fitted KNN on test data
knn_y_pred = knn_model.predict(X_test)
test_accuracy = accuracy_score(d_y_test, knn_y_pred)
print(test_accuracy)
"""## Model 2: CNN
"""
#tune hyperparameters, here we tune batch size, dropout rate and learning rate
settings = []
for batch in [64, 100, 128]:
for drop in [0, 0.5, 0.8]:
for lr in [0.1, 0.01, 0.001]:
print("batch :", batch)
print("drop:", drop)
print('learning rate:', lr)
model = Sequential()
#convolution 1
model.add(Convolution2D(input_shape=(28,28,1),
filters=32,
kernel_size=5,
strides=1,
activation='relu'))
#pooling 1
model.add(MaxPooling2D(pool_size=(2,2), strides=1, padding='same'))
#convolution 2
model.add(Convolution2D(filters=64,
kernel_size=5,
strides=1,
activation='relu'
))
#pooling 2
model.add(MaxPooling2D(pool_size=(2,2), strides=1, padding='same'))
#convolution 3
model.add(Convolution2D(filters=128,
kernel_size=5,
strides=1,
activation='relu'
))
#pooling 3
model.add(MaxPooling2D(pool_size=(2,2), strides=1, padding='same'))
#convolution 4
model.add(Convolution2D(filters=256,
kernel_size=5,
strides=1,
activation='relu'
))
#pooling 4
model.add(MaxPooling2D(pool_size=(2,2), strides=1, padding='same'))
#Flatten, transfer to vectors
model.add(Flatten())
#Dropout
model.add(Dropout(drop))
#fully connected network 1
model.add(Dense(500, activation='relu'))
#fully connected network 2, 26 because 26 different letters in total
model.add(Dense(26, activation='softmax'))
#earlystopping to prevent overfitting
early_stopping = EarlyStopping(monitor = 'val_loss', patience = 3, mode = 'min')
#reducing learning rate
reduce_lr = ReduceLROnPlateau(monitor='val_loss',
factor = 0.1,
patience = 1,
verbose = 1,
mode = 'min',
min_delta =0.0001,
cooldown=0,
min_lr=0)
callback_lists = [early_stopping, reduce_lr]
adam = Adam(lr = lr)
model.compile(optimizer=adam,loss="categorical_crossentropy",metrics=['accuracy'])
model.fit(x_train,
d_y_train,
batch_size = batch,
epochs = 5,
verbose = 1,
validation_data = (x_val, d_y_val),
shuffle = True,
callbacks = callback_lists)
loss, acc = model.evaluate(x_val, d_y_val)
settings.append((batch, drop, lr, acc))
#print best accuracy
best_accuracy = max(settings, key = lambda x:x[-1])
print(best_accuracy) #lr = 0.001
best_batch, best_drop, best_lr = best_accuracy[:-1]
print(best_batch, best_drop, best_lr)
#using tuned parameters to train model
model = Sequential()
#convolution 1, activation
model.add(Convolution2D(input_shape=(28,28,1),
filters=32,
kernel_size=5,
strides=1,
padding='same',
activation='relu'))
#pooling
model.add(MaxPooling2D(pool_size=(2,2), strides=1,padding='same'))
#convolution 2, activation
model.add(Convolution2D(filters=64,
kernel_size=5,
strides=1,
padding='same',
activation='relu'
))
#pooling
model.add(MaxPooling2D(pool_size=(2,2), strides=1, padding='same'))
#convolution 3, activation
model.add(Convolution2D(filters=128,
kernel_size=5,
strides=1,
padding='same',
activation='relu'
))
#pooling
model.add(MaxPooling2D(pool_size=(2,2), strides=1, padding='same'))
#convolution 4, activation
model.add(Convolution2D(filters=256,
kernel_size=5,
strides=1,
padding='same',
activation='relu'
))
#pooling
model.add(MaxPooling2D(pool_size=(2,2), strides=1, padding='same'))
#Flatten, transfer to vectors
model.add(Flatten())
#Dropout
model.add(Dropout(best_drop))
#fully connected network 1
model.add(Dense(500,activation='relu'))
#fully connected network 2
model.add(Dense(26, activation='softmax'))
#early stopping, to prevent overfitting
early_stopping = EarlyStopping(monitor = 'val_loss', patience = 3, mode = 'min')
#reducing learning rate
reduce_lr = ReduceLROnPlateau(monitor='val_loss',
factor = 0.1,
patience = 1,
verbose = 1,
mode = 'min',
min_delta =0.0001,
cooldown=0,
min_lr=0)
callback_lists = [early_stopping, reduce_lr]
#optimizer
adam = Adam(lr = best_lr)
model.compile(optimizer=adam,loss="categorical_crossentropy",metrics=['accuracy'])
#training
history = model.fit(x_train,d_y_train,
batch_size = best_batch,
epochs = 12,
validation_data = (x_val, d_y_val),
verbose = 1,
shuffle = True,
callbacks = callback_lists)
#plotting loss and accuracy of training and validation sets
#accuracy
plt.plot(history.history['accuracy'])
plt.plot(history.history['val_accuracy'])
plt.title('model accuracy')
plt.ylabel('accuracy')
plt.xlabel('epoch')
plt.legend(['train', 'val'], loc='upper left')
plt.show()
#loss
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.title('model loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['train', 'val'], loc='upper left')
plt.show()
#save our model
model.save('my_model1.h5')
#make predictions of testing sets and see how accurate those predictions are
loss,acc = model.evaluate(x_test, d_y_test)
print(loss,acc)
"""We have got around 85% accuray on testing set with KNN model and 95% accuracy on testing set with CNN model. So we decided to use CNN for our task 2.
# Task 2
"""
# Commented out IPython magic to ensure Python compatibility.
# import libraries used for task 2
# %pylab inline --no-import-all
import numpy as np
import tensorflow as tf
import matplotlib.pyplot as plt
from keras.preprocessing.image import ImageDataGenerator
import cv2
from keras.utils import plot_model
from skimage.util import random_noise
from skimage.filters import threshold_local
from skimage.morphology import remove_small_objects
from skimage.measure import label, regionprops
from skimage.color import label2rgb
from google.colab import drive
# load testing-dataset
test = np.load('test-dataset.npy')
print(test.shape)
# see what images are like before denoise
plt.imshow(test[-1])
plt.show()
# denoise all images and see what they are like now
from scipy import ndimage
import matplotlib.pyplot as plt
testing_filtered = []
for i in range(len(test)):
new_image = ndimage.median_filter(test[i], 2)
testing_filtered.append(ndimage.median_filter(new_image, 3))
plt.imshow(testing_filtered[-1])
plt.show()
#define a function to split the images
def
|
(data):
testing_cropped = []
for i in range(len(data)):
#threshold each image and find contours
img = (data[i]).astype('uint8')
_, threshold =
|
image_crop
|
identifier_name
|
code.py
|
nn_model.fit(X_train, d_y_train)
#evaluate fitted KNN on test data
knn_y_pred = knn_model.predict(X_test)
test_accuracy = accuracy_score(d_y_test, knn_y_pred)
print(test_accuracy)
"""## Model 2: CNN
"""
#tune hyperparameters, here we tune batch size, dropout rate and learning rate
settings = []
for batch in [64, 100, 128]:
for drop in [0, 0.5, 0.8]:
for lr in [0.1, 0.01, 0.001]:
print("batch :", batch)
print("drop:", drop)
print('learning rate:', lr)
model = Sequential()
#convolution 1
model.add(Convolution2D(input_shape=(28,28,1),
filters=32,
kernel_size=5,
strides=1,
activation='relu'))
#pooling 1
model.add(MaxPooling2D(pool_size=(2,2), strides=1, padding='same'))
#convolution 2
model.add(Convolution2D(filters=64,
kernel_size=5,
strides=1,
activation='relu'
))
#pooling 2
model.add(MaxPooling2D(pool_size=(2,2), strides=1, padding='same'))
#convolution 3
model.add(Convolution2D(filters=128,
kernel_size=5,
strides=1,
activation='relu'
))
#pooling 3
model.add(MaxPooling2D(pool_size=(2,2), strides=1, padding='same'))
#convolution 4
model.add(Convolution2D(filters=256,
kernel_size=5,
strides=1,
activation='relu'
))
#pooling 4
model.add(MaxPooling2D(pool_size=(2,2), strides=1, padding='same'))
#Flatten, transfer to vectors
model.add(Flatten())
#Dropout
model.add(Dropout(drop))
#fully connected network 1
model.add(Dense(500, activation='relu'))
#fully connected network 2, 26 because 26 different letters in total
model.add(Dense(26, activation='softmax'))
#earlystopping to prevent overfitting
early_stopping = EarlyStopping(monitor = 'val_loss', patience = 3, mode = 'min')
#reducing learning rate
reduce_lr = ReduceLROnPlateau(monitor='val_loss',
factor = 0.1,
patience = 1,
verbose = 1,
mode = 'min',
min_delta =0.0001,
cooldown=0,
min_lr=0)
callback_lists = [early_stopping, reduce_lr]
adam = Adam(lr = lr)
model.compile(optimizer=adam,loss="categorical_crossentropy",metrics=['accuracy'])
model.fit(x_train,
d_y_train,
batch_size = batch,
epochs = 5,
verbose = 1,
validation_data = (x_val, d_y_val),
shuffle = True,
callbacks = callback_lists)
loss, acc = model.evaluate(x_val, d_y_val)
settings.append((batch, drop, lr, acc))
#print best accuracy
best_accuracy = max(settings, key = lambda x:x[-1])
print(best_accuracy) #lr = 0.001
best_batch, best_drop, best_lr = best_accuracy[:-1]
print(best_batch, best_drop, best_lr)
#using tuned parameters to train model
model = Sequential()
#convolution 1, activation
model.add(Convolution2D(input_shape=(28,28,1),
filters=32,
kernel_size=5,
strides=1,
padding='same',
activation='relu'))
#pooling
model.add(MaxPooling2D(pool_size=(2,2), strides=1,padding='same'))
#convolution 2, activation
model.add(Convolution2D(filters=64,
kernel_size=5,
strides=1,
padding='same',
activation='relu'
))
#pooling
model.add(MaxPooling2D(pool_size=(2,2), strides=1, padding='same'))
#convolution 3, activation
model.add(Convolution2D(filters=128,
kernel_size=5,
strides=1,
padding='same',
activation='relu'
))
#pooling
model.add(MaxPooling2D(pool_size=(2,2), strides=1, padding='same'))
#convolution 4, activation
model.add(Convolution2D(filters=256,
kernel_size=5,
strides=1,
padding='same',
activation='relu'
))
#pooling
model.add(MaxPooling2D(pool_size=(2,2), strides=1, padding='same'))
#Flatten, transfer to vectors
model.add(Flatten())
#Dropout
model.add(Dropout(best_drop))
#fully connected network 1
model.add(Dense(500,activation='relu'))
#fully connected network 2
model.add(Dense(26, activation='softmax'))
#early stopping, to prevent overfitting
early_stopping = EarlyStopping(monitor = 'val_loss', patience = 3, mode = 'min')
#reducing learning rate
reduce_lr = ReduceLROnPlateau(monitor='val_loss',
factor = 0.1,
patience = 1,
verbose = 1,
mode = 'min',
min_delta =0.0001,
cooldown=0,
min_lr=0)
callback_lists = [early_stopping, reduce_lr]
#optimizer
adam = Adam(lr = best_lr)
model.compile(optimizer=adam,loss="categorical_crossentropy",metrics=['accuracy'])
#training
history = model.fit(x_train,d_y_train,
batch_size = best_batch,
epochs = 12,
validation_data = (x_val, d_y_val),
verbose = 1,
shuffle = True,
callbacks = callback_lists)
#plotting loss and accuracy of training and validation sets
#accuracy
plt.plot(history.history['accuracy'])
plt.plot(history.history['val_accuracy'])
plt.title('model accuracy')
plt.ylabel('accuracy')
plt.xlabel('epoch')
plt.legend(['train', 'val'], loc='upper left')
plt.show()
#loss
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.title('model loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['train', 'val'], loc='upper left')
plt.show()
#save our model
model.save('my_model1.h5')
#make predictions of testing sets and see how accurate those predictions are
loss,acc = model.evaluate(x_test, d_y_test)
print(loss,acc)
"""We have got around 85% accuray on testing set with KNN model and 95% accuracy on testing set with CNN model. So we decided to use CNN for our task 2.
# Task 2
"""
# Commented out IPython magic to ensure Python compatibility.
# import libraries used for task 2
# %pylab inline --no-import-all
import numpy as np
import tensorflow as tf
import matplotlib.pyplot as plt
from keras.preprocessing.image import ImageDataGenerator
import cv2
from keras.utils import plot_model
from skimage.util import random_noise
from skimage.filters import threshold_local
from skimage.morphology import remove_small_objects
from skimage.measure import label, regionprops
from skimage.color import label2rgb
from google.colab import drive
# load testing-dataset
test = np.load('test-dataset.npy')
print(test.shape)
# see what images are like before denoise
plt.imshow(test[-1])
plt.show()
# denoise all images and see what they are like now
from scipy import ndimage
import matplotlib.pyplot as plt
testing_filtered = []
for i in range(len(test)):
new_image = ndimage.median_filter(test[i], 2)
testing_filtered.append(ndimage.median_filter(new_image, 3))
plt.imshow(testing_filtered[-1])
plt.show()
#define a function to split the images
def image_crop(data):
|
testing_cropped = []
for i in range(len(data)):
#threshold each image and find contours
img = (data[i]).astype('uint8')
_, threshold = cv2.threshold(img.copy(), 10, 255, 0)
contours, _ = cv2.findContours(threshold, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)[-2:]
bboxes = []
#creating bounding boxes from contours
for f in range(len(contours)):
bboxes.append(cv2.boundingRect(contours[f]))
split = []
to_remove = []
#threshold to remove small w and h bbox, and split those with w >= 28
for j in range(len(bboxes)):
if (bboxes[j][2] < 20) and (bboxes[j][3] < 17):
|
identifier_body
|
|
code.py
|
_train)
#evaluate fitted KNN on test data
knn_y_pred = knn_model.predict(X_test)
test_accuracy = accuracy_score(d_y_test, knn_y_pred)
print(test_accuracy)
"""## Model 2: CNN
"""
#tune hyperparameters, here we tune batch size, dropout rate and learning rate
settings = []
for batch in [64, 100, 128]:
for drop in [0, 0.5, 0.8]:
for lr in [0.1, 0.01, 0.001]:
print("batch :", batch)
print("drop:", drop)
print('learning rate:', lr)
model = Sequential()
#convolution 1
model.add(Convolution2D(input_shape=(28,28,1),
filters=32,
kernel_size=5,
strides=1,
activation='relu'))
#pooling 1
model.add(MaxPooling2D(pool_size=(2,2), strides=1, padding='same'))
#convolution 2
model.add(Convolution2D(filters=64,
kernel_size=5,
strides=1,
activation='relu'
))
#pooling 2
model.add(MaxPooling2D(pool_size=(2,2), strides=1, padding='same'))
#convolution 3
model.add(Convolution2D(filters=128,
kernel_size=5,
strides=1,
activation='relu'
))
#pooling 3
model.add(MaxPooling2D(pool_size=(2,2), strides=1, padding='same'))
#convolution 4
model.add(Convolution2D(filters=256,
kernel_size=5,
strides=1,
activation='relu'
))
#pooling 4
model.add(MaxPooling2D(pool_size=(2,2), strides=1, padding='same'))
#Flatten, transfer to vectors
model.add(Flatten())
#Dropout
model.add(Dropout(drop))
#fully connected network 1
model.add(Dense(500, activation='relu'))
#fully connected network 2, 26 because 26 different letters in total
model.add(Dense(26, activation='softmax'))
#earlystopping to prevent overfitting
early_stopping = EarlyStopping(monitor = 'val_loss', patience = 3, mode = 'min')
#reducing learning rate
reduce_lr = ReduceLROnPlateau(monitor='val_loss',
factor = 0.1,
patience = 1,
verbose = 1,
mode = 'min',
min_delta =0.0001,
cooldown=0,
min_lr=0)
callback_lists = [early_stopping, reduce_lr]
adam = Adam(lr = lr)
model.compile(optimizer=adam,loss="categorical_crossentropy",metrics=['accuracy'])
model.fit(x_train,
d_y_train,
batch_size = batch,
epochs = 5,
verbose = 1,
validation_data = (x_val, d_y_val),
shuffle = True,
callbacks = callback_lists)
loss, acc = model.evaluate(x_val, d_y_val)
settings.append((batch, drop, lr, acc))
#print best accuracy
best_accuracy = max(settings, key = lambda x:x[-1])
print(best_accuracy) #lr = 0.001
best_batch, best_drop, best_lr = best_accuracy[:-1]
print(best_batch, best_drop, best_lr)
#using tuned parameters to train model
model = Sequential()
#convolution 1, activation
model.add(Convolution2D(input_shape=(28,28,1),
filters=32,
kernel_size=5,
strides=1,
padding='same',
activation='relu'))
#pooling
model.add(MaxPooling2D(pool_size=(2,2), strides=1,padding='same'))
#convolution 2, activation
model.add(Convolution2D(filters=64,
kernel_size=5,
strides=1,
padding='same',
activation='relu'
))
#pooling
model.add(MaxPooling2D(pool_size=(2,2), strides=1, padding='same'))
#convolution 3, activation
model.add(Convolution2D(filters=128,
kernel_size=5,
strides=1,
padding='same',
activation='relu'
))
#pooling
model.add(MaxPooling2D(pool_size=(2,2), strides=1, padding='same'))
#convolution 4, activation
model.add(Convolution2D(filters=256,
kernel_size=5,
strides=1,
padding='same',
activation='relu'
))
#pooling
model.add(MaxPooling2D(pool_size=(2,2), strides=1, padding='same'))
#Flatten, transfer to vectors
model.add(Flatten())
#Dropout
model.add(Dropout(best_drop))
#fully connected network 1
model.add(Dense(500,activation='relu'))
#fully connected network 2
model.add(Dense(26, activation='softmax'))
#early stopping, to prevent overfitting
early_stopping = EarlyStopping(monitor = 'val_loss', patience = 3, mode = 'min')
#reducing learning rate
reduce_lr = ReduceLROnPlateau(monitor='val_loss',
factor = 0.1,
patience = 1,
verbose = 1,
mode = 'min',
min_delta =0.0001,
cooldown=0,
min_lr=0)
callback_lists = [early_stopping, reduce_lr]
#optimizer
adam = Adam(lr = best_lr)
model.compile(optimizer=adam,loss="categorical_crossentropy",metrics=['accuracy'])
#training
history = model.fit(x_train,d_y_train,
batch_size = best_batch,
epochs = 12,
validation_data = (x_val, d_y_val),
verbose = 1,
shuffle = True,
callbacks = callback_lists)
#plotting loss and accuracy of training and validation sets
#accuracy
plt.plot(history.history['accuracy'])
plt.plot(history.history['val_accuracy'])
plt.title('model accuracy')
plt.ylabel('accuracy')
plt.xlabel('epoch')
plt.legend(['train', 'val'], loc='upper left')
plt.show()
#loss
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.title('model loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['train', 'val'], loc='upper left')
plt.show()
#save our model
model.save('my_model1.h5')
#make predictions of testing sets and see how accurate those predictions are
loss,acc = model.evaluate(x_test, d_y_test)
print(loss,acc)
"""We have got around 85% accuray on testing set with KNN model and 95% accuracy on testing set with CNN model. So we decided to use CNN for our task 2.
# Task 2
"""
# Commented out IPython magic to ensure Python compatibility.
# import libraries used for task 2
# %pylab inline --no-import-all
import numpy as np
import tensorflow as tf
import matplotlib.pyplot as plt
from keras.preprocessing.image import ImageDataGenerator
import cv2
from keras.utils import plot_model
from skimage.util import random_noise
from skimage.filters import threshold_local
from skimage.morphology import remove_small_objects
from skimage.measure import label, regionprops
from skimage.color import label2rgb
from google.colab import drive
# load testing-dataset
test = np.load('test-dataset.npy')
print(test.shape)
# see what images are like before denoise
plt.imshow(test[-1])
plt.show()
# denoise all images and see what they are like now
from scipy import ndimage
import matplotlib.pyplot as plt
testing_filtered = []
for i in range(len(test)):
new_image = ndimage.median_filter(test[i], 2)
testing_filtered.append(ndimage.median_filter(new_image, 3))
plt.imshow(testing_filtered[-1])
plt.show()
#define a function to split the images
def image_crop(data):
testing_cropped = []
for i in range(len(data)):
#threshold each image and find contours
img = (data[i]).astype('uint8')
_, threshold = cv2.threshold(img.copy(), 10, 255, 0)
contours, _ = cv2.findContours(threshold, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)[-2:]
bboxes = []
#creating bounding boxes from contours
for f in range(len(contours)):
bboxes.append(cv2.boundingRect(contours[f]))
split = []
to_remove = []
#threshold to remove small w and h bbox, and split those with w >= 28
for j in range(len(bboxes)):
if (bboxes[j][2] < 20) and (bboxes[j][3] < 17):
|
to_remove.append(bboxes[j])
|
conditional_block
|
|
code.py
|
'min')
#reducing learning rate
reduce_lr = ReduceLROnPlateau(monitor='val_loss',
factor = 0.1,
patience = 1,
verbose = 1,
mode = 'min',
min_delta =0.0001,
cooldown=0,
min_lr=0)
callback_lists = [early_stopping, reduce_lr]
adam = Adam(lr = lr)
model.compile(optimizer=adam,loss="categorical_crossentropy",metrics=['accuracy'])
model.fit(x_train,
d_y_train,
batch_size = batch,
epochs = 5,
verbose = 1,
validation_data = (x_val, d_y_val),
shuffle = True,
callbacks = callback_lists)
loss, acc = model.evaluate(x_val, d_y_val)
settings.append((batch, drop, lr, acc))
#print best accuracy
best_accuracy = max(settings, key = lambda x:x[-1])
print(best_accuracy) #lr = 0.001
best_batch, best_drop, best_lr = best_accuracy[:-1]
print(best_batch, best_drop, best_lr)
#using tuned parameters to train model
model = Sequential()
#convolution 1, activation
model.add(Convolution2D(input_shape=(28,28,1),
filters=32,
kernel_size=5,
strides=1,
padding='same',
activation='relu'))
#pooling
model.add(MaxPooling2D(pool_size=(2,2), strides=1,padding='same'))
#convolution 2, activation
model.add(Convolution2D(filters=64,
kernel_size=5,
strides=1,
padding='same',
activation='relu'
))
#pooling
model.add(MaxPooling2D(pool_size=(2,2), strides=1, padding='same'))
#convolution 3, activation
model.add(Convolution2D(filters=128,
kernel_size=5,
strides=1,
padding='same',
activation='relu'
))
#pooling
model.add(MaxPooling2D(pool_size=(2,2), strides=1, padding='same'))
#convolution 4, activation
model.add(Convolution2D(filters=256,
kernel_size=5,
strides=1,
padding='same',
activation='relu'
))
#pooling
model.add(MaxPooling2D(pool_size=(2,2), strides=1, padding='same'))
#Flatten, transfer to vectors
model.add(Flatten())
#Dropout
model.add(Dropout(best_drop))
#fully connected network 1
model.add(Dense(500,activation='relu'))
#fully connected network 2
model.add(Dense(26, activation='softmax'))
#early stopping, to prevent overfitting
early_stopping = EarlyStopping(monitor = 'val_loss', patience = 3, mode = 'min')
#reducing learning rate
reduce_lr = ReduceLROnPlateau(monitor='val_loss',
factor = 0.1,
patience = 1,
verbose = 1,
mode = 'min',
min_delta =0.0001,
cooldown=0,
min_lr=0)
callback_lists = [early_stopping, reduce_lr]
#optimizer
adam = Adam(lr = best_lr)
model.compile(optimizer=adam,loss="categorical_crossentropy",metrics=['accuracy'])
#training
history = model.fit(x_train,d_y_train,
batch_size = best_batch,
epochs = 12,
validation_data = (x_val, d_y_val),
verbose = 1,
shuffle = True,
callbacks = callback_lists)
#plotting loss and accuracy of training and validation sets
#accuracy
plt.plot(history.history['accuracy'])
plt.plot(history.history['val_accuracy'])
plt.title('model accuracy')
plt.ylabel('accuracy')
plt.xlabel('epoch')
plt.legend(['train', 'val'], loc='upper left')
plt.show()
#loss
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.title('model loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['train', 'val'], loc='upper left')
plt.show()
#save our model
model.save('my_model1.h5')
#make predictions of testing sets and see how accurate those predictions are
loss,acc = model.evaluate(x_test, d_y_test)
print(loss,acc)
"""We have got around 85% accuray on testing set with KNN model and 95% accuracy on testing set with CNN model. So we decided to use CNN for our task 2.
# Task 2
"""
# Commented out IPython magic to ensure Python compatibility.
# import libraries used for task 2
# %pylab inline --no-import-all
import numpy as np
import tensorflow as tf
import matplotlib.pyplot as plt
from keras.preprocessing.image import ImageDataGenerator
import cv2
from keras.utils import plot_model
from skimage.util import random_noise
from skimage.filters import threshold_local
from skimage.morphology import remove_small_objects
from skimage.measure import label, regionprops
from skimage.color import label2rgb
from google.colab import drive
# load testing-dataset
test = np.load('test-dataset.npy')
print(test.shape)
# see what images are like before denoise
plt.imshow(test[-1])
plt.show()
# denoise all images and see what they are like now
from scipy import ndimage
import matplotlib.pyplot as plt
testing_filtered = []
for i in range(len(test)):
new_image = ndimage.median_filter(test[i], 2)
testing_filtered.append(ndimage.median_filter(new_image, 3))
plt.imshow(testing_filtered[-1])
plt.show()
#define a function to split the images
def image_crop(data):
testing_cropped = []
for i in range(len(data)):
#threshold each image and find contours
img = (data[i]).astype('uint8')
_, threshold = cv2.threshold(img.copy(), 10, 255, 0)
contours, _ = cv2.findContours(threshold, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)[-2:]
bboxes = []
#creating bounding boxes from contours
for f in range(len(contours)):
bboxes.append(cv2.boundingRect(contours[f]))
split = []
to_remove = []
#threshold to remove small w and h bbox, and split those with w >= 28
for j in range(len(bboxes)):
if (bboxes[j][2] < 20) and (bboxes[j][3] < 17):
to_remove.append(bboxes[j])
if bboxes[j][2] >= 30:
split.append(j)
#modifying bboxes to get half w and move x to (x + w/2)
for g in split:
bboxes[g] = (bboxes[g][0], bboxes[g][1], int(bboxes[g][2]/2), bboxes[g][3])
modified_bboxes = bboxes[g]
modified_bboxes = (int(bboxes[g][0]) + int(bboxes[g][2]), int(bboxes[g][1]),
int(bboxes[g][2]), int(bboxes[g][3]))
bboxes.append(modified_bboxes)
#removing bboxes with small w and h
for b in to_remove:
bboxes.remove(b)
#sorting bboxes
bboxes = sorted(np.array(bboxes), key = lambda x: x[0])
cut = []
for h in range(len(bboxes)):
images = img[bboxes[h][1]:bboxes[h][1]+bboxes[h][3],
bboxes[h][0]:bboxes[h][0]+bboxes[h][2]]
if images[0].shape > np.max(3):
cut.append(images)
cropped = []
#reshaping the cut images to be able to use CNN
for image_split in cut:
crop = image_split.reshape((image_split.shape[0],image_split.shape[1],1))
crop = np.array(tf.image.resize_with_crop_or_pad(crop, 28, 28))
img_cropped = crop.reshape(28,28)
cropped.append(img_cropped)
testing_cropped.append(cropped)
return np.array(testing_cropped)
testing_cropped = image_crop(testing_filtered)
print(len(testing_cropped)) #10000 images
# let's see an example letter from testing_cropped dataset
plt.imshow(testing_cropped[420][0])
plt.show()
#most of images are separated into 4 letters, but still many are into 3 or 5 letters
l=[]
for i in range(len(testing_cropped)):
l.append(len(testing_cropped[i]))
plt.hist(l)
plt.show()
#make 5 predictions which have highest probability scores by using our CNN model
block_size = 55
predictions = []
top1 = []
top2 = []
top3 = []
top4 = []
top5 = []
final = []
for i in range(10000):
crops_number = (len(testing_cropped[i]))
|
for sample in testing_cropped[i]:
imbw = sample > threshold_local(sample, block_size, method = 'mean')
|
random_line_split
|
|
longestCommonSubsequence.py
|
lines to be transcribed
|
imageIndex = 1
lineIndex = 4
#which input files do you want to read in- need to be of the form transcribe$i.txt
#right now can handle at most only 3 or so files - NP-hard problem. Future work might be to make this
#code more scalable - probably with branch and bound approach
inputFiles = [1,2,3,4,5]
def load_file(fname):
currentImage = 0
currentLine = -1
individual_transcriptions = {}
with open(fname,"rb") as f:
for l in f.readlines():
if l == "\n":
currentImage += 1
currentLine = -1
continue
currentLine += 1
individual_transcriptions[(currentImage,currentLine)] = l[:-1]
return individual_transcriptions
def lcs(lines):
#find the length of each string
stringLength = [len(l)+1 for l in lines]
#record the length of longest common subsequence - assumes unique LCS
#that is there might be more than one longest common subsequence. Have encountered this in practice
#but the strings are usually similar enough that it doesn't matter which LCS you choose
dynamicMatrix = np.zeros(stringLength)
#keep track of the time needed to do the calculation - mainly just because it is NP-hard
#want to know how big the input is you can handle
time1 = datetime.datetime.now()
#the following is the dynamic programming approach as shown on the Wikipedia page for longest common subsequence
traceMatrices = [np.zeros(stringLength) for l in lines]
#transcribed = lines[(imageIndex,lineIndex)]
#need to wrap so that numpy is happy
#index for iterating over all tuples of characters - one from each string
currentIndex = [[1,] for w in lines]
#dynamic programming approach - basically just filling in a matrix as we go
while True:
characters = [lines[j][currentIndex[j][0]-1] for j in range(len(lines))]
#if we have a match across all strings
if min(characters) == max(characters):
#diagional move
newIndex = [[i[0]-1,] for i in currentIndex]
#the longest common previous subsequence is a diagonal move backwards
for j in range(len(newIndex)):
traceMatrices[j][currentIndex] = newIndex[j]
dynamicMatrix[currentIndex] = dynamicMatrix[newIndex] + 1
else:
#either a up or sideways move
#find which is the maximum - assume unique
maxLength = -1
axis = None
#find out where the previous LCS is - either up or sideways
for j in range(len(currentIndex)):
#move backwards along this axis
newIndex = deepcopy(currentIndex)
newIndex[j][0] += -1
if dynamicMatrix[newIndex][0] > maxLength:
maxLength = dynamicMatrix[newIndex][0]
axis = j
newIndex = deepcopy(currentIndex)
newIndex[axis][0] += -1
for j in range(len(newIndex)):
traceMatrices[j][currentIndex] = newIndex[j]
dynamicMatrix[currentIndex] = dynamicMatrix[newIndex]
#iterate to the next tuple of characters
for j in range(0,len(currentIndex)):
currentIndex[j][0] += 1
if currentIndex[j][0] == (len(lines[j])+1):
currentIndex[j][0] = 1
else:
break
if currentIndex == [[1,] for l in lines]:
break
#check to see if the last tuple of characters is a match
lastCharacter = [t[-1] for t in lines]
s = [[] for t in lines]
lcs_length = 0
if min(lastCharacter) == max(lastCharacter):
lcs_length += 1
for i,w in enumerate(lines):
s[i].append(len(w)-1)
#read out the LCS by travelling backwards (up, left or diagonal) through the matrix
endPoint = [[-1,] for j in lines]
cell = [[int(traceMatrices[j][endPoint]),] for j in range(len(lines))]
while cell != [[0,] for j in range(len(lines))]:
newcell = [[int(traceMatrices[j][cell][0]),] for j in range(len(lines))]
#if we have a diagonal move - this corresponds to a point in the LCS
allChange = not(False in [a!=b for (a,b) in zip(cell,newcell)])
if allChange:
lcs_length += 1
for j in range(len(lines)):
s[j].append(newcell[j][0])
cell = newcell
#print out how long this took
time2 = datetime.datetime.now()
# use the first string to actually create the LCS
lcs_string = ""
for i,c in enumerate(lines[0]):
if i in s[0]:
lcs_string += c
# print time2-time1
#
#print out the LCS in green, all other characters in red
results = [[] for l in lines]
at_lcs = [None for l in lines]
agreement = []
s = [sorted(s_temp) for s_temp in s]
LCStuples = [[s[j][i] for j in range(len(lines))] for i in range(len(s[0]))]
LCSsequences = [[LCStuples[0]]]
for i in range(1,len(s[0])):
max_character_jump = max([(s[j][i] - s[j][i-1]) for j in range(len(lines))])
if max_character_jump > 1:
LCSsequences.append([])
LCSsequences[-1].append(LCStuples[i])
segments = {}
lcs_string = ""
for j in range(len(lines)):
currentIndex = 0
results = []
for sequenceIndex,nextSequence in enumerate(LCSsequences):
firstLCSChacter = nextSequence[0][j]
lastLCSCharacter = nextSequence[-1][j]
l = lines[j][currentIndex:firstLCSChacter]
if l != "":
if not (2*sequenceIndex) in segments:
segments[2*sequenceIndex] = [l]
else:
segments[2*sequenceIndex].append(l)
# now extra the LCS - we only need to do this once, since every one is in agreement
if j == 0:
l = lines[0][firstLCSChacter:lastLCSCharacter+1]
segments[2*sequenceIndex+1] = l[:]
lcs_string += l
currentIndex = lastLCSCharacter + 1
l = lines[j][currentIndex:]
if l != "":
if not (2*(sequenceIndex+1)) in segments:
segments[2*(sequenceIndex+1)] = [l]
else:
segments[2*(sequenceIndex+1)].append(l)
# results.append((l,-sequenceIndex-2))
# segments.add(-sequenceIndex-2)
return lcs_string,segments
from termcolor import colored
for j,tran in enumerate(lines):
print s[j]
for i,c in enumerate(tran):
if i in s[j]:
now_at_csl = True
#print colored(c,'green'),
else:
now_at_csl = False
#print colored(c,'red'),
if now_at_csl != at_lcs[j]:
results[j].append("")
if j == 0:
agreement.append(now_at_csl)
at_lcs[j] = now_at_csl
results[j][-1] += c
#print
return lcs_string,agreement,results
transcriptions = {}
#read in the files - right now just set up to work on Greg's computer
for i in inputFiles:
fname = base_directory+"/Databases/transcribe/transcribe"+str(i)+".txt"
individual_transcriptions = load_file(fname)
for key,line in individual_transcriptions.items():
if not(key in transcriptions):
transcriptions[key] = [line]
else:
transcriptions[key].append(line)
gold_fname = base_directory+"/Dropbox/goldTranscriptions.txt"
gold_transcriptions = load_file(gold_fname)
X = []
Y = []
Xstd = []
Ystd = []
for imageIndex in range(3):
for lineIndex in range(5):
a = []
c = []
for l in transcriptions[(imageIndex,lineIndex)]:
lcs_string,segments= lcs([gold_transcriptions[(imageIndex,lineIndex)],l])
accuracy = len(lcs_string)/float(len(l))
completeness = len(lcs_string)/float(len(gold_transcriptions[(imageIndex,lineIndex)]))
a.append(accuracy)
c.append(completeness)
X.append(np.mean(a))
Y.append(np.mean(c))
Xstd.append(np.std(a,ddof=1))
Ystd.append(np.std(c,ddof=1))
print X
print np.mean(X)
print np.mean(Y)
plt.errorbar(X,Y,xerr=Xstd,yerr=Ystd,fmt=".")
plt.xlabel("Accuracy (w/ standard dev.)")
plt.ylabel("Completeness (w/ standard dev.)")
plt.xlim((0,
|
#give the index of the image and the line
|
random_line_split
|
longestCommonSubsequence.py
|
to be transcribed
#give the index of the image and the line
imageIndex = 1
lineIndex = 4
#which input files do you want to read in- need to be of the form transcribe$i.txt
#right now can handle at most only 3 or so files - NP-hard problem. Future work might be to make this
#code more scalable - probably with branch and bound approach
inputFiles = [1,2,3,4,5]
def load_file(fname):
currentImage = 0
currentLine = -1
individual_transcriptions = {}
with open(fname,"rb") as f:
for l in f.readlines():
if l == "\n":
currentImage += 1
currentLine = -1
continue
currentLine += 1
individual_transcriptions[(currentImage,currentLine)] = l[:-1]
return individual_transcriptions
def
|
(lines):
#find the length of each string
stringLength = [len(l)+1 for l in lines]
#record the length of longest common subsequence - assumes unique LCS
#that is there might be more than one longest common subsequence. Have encountered this in practice
#but the strings are usually similar enough that it doesn't matter which LCS you choose
dynamicMatrix = np.zeros(stringLength)
#keep track of the time needed to do the calculation - mainly just because it is NP-hard
#want to know how big the input is you can handle
time1 = datetime.datetime.now()
#the following is the dynamic programming approach as shown on the Wikipedia page for longest common subsequence
traceMatrices = [np.zeros(stringLength) for l in lines]
#transcribed = lines[(imageIndex,lineIndex)]
#need to wrap so that numpy is happy
#index for iterating over all tuples of characters - one from each string
currentIndex = [[1,] for w in lines]
#dynamic programming approach - basically just filling in a matrix as we go
while True:
characters = [lines[j][currentIndex[j][0]-1] for j in range(len(lines))]
#if we have a match across all strings
if min(characters) == max(characters):
#diagional move
newIndex = [[i[0]-1,] for i in currentIndex]
#the longest common previous subsequence is a diagonal move backwards
for j in range(len(newIndex)):
traceMatrices[j][currentIndex] = newIndex[j]
dynamicMatrix[currentIndex] = dynamicMatrix[newIndex] + 1
else:
#either a up or sideways move
#find which is the maximum - assume unique
maxLength = -1
axis = None
#find out where the previous LCS is - either up or sideways
for j in range(len(currentIndex)):
#move backwards along this axis
newIndex = deepcopy(currentIndex)
newIndex[j][0] += -1
if dynamicMatrix[newIndex][0] > maxLength:
maxLength = dynamicMatrix[newIndex][0]
axis = j
newIndex = deepcopy(currentIndex)
newIndex[axis][0] += -1
for j in range(len(newIndex)):
traceMatrices[j][currentIndex] = newIndex[j]
dynamicMatrix[currentIndex] = dynamicMatrix[newIndex]
#iterate to the next tuple of characters
for j in range(0,len(currentIndex)):
currentIndex[j][0] += 1
if currentIndex[j][0] == (len(lines[j])+1):
currentIndex[j][0] = 1
else:
break
if currentIndex == [[1,] for l in lines]:
break
#check to see if the last tuple of characters is a match
lastCharacter = [t[-1] for t in lines]
s = [[] for t in lines]
lcs_length = 0
if min(lastCharacter) == max(lastCharacter):
lcs_length += 1
for i,w in enumerate(lines):
s[i].append(len(w)-1)
#read out the LCS by travelling backwards (up, left or diagonal) through the matrix
endPoint = [[-1,] for j in lines]
cell = [[int(traceMatrices[j][endPoint]),] for j in range(len(lines))]
while cell != [[0,] for j in range(len(lines))]:
newcell = [[int(traceMatrices[j][cell][0]),] for j in range(len(lines))]
#if we have a diagonal move - this corresponds to a point in the LCS
allChange = not(False in [a!=b for (a,b) in zip(cell,newcell)])
if allChange:
lcs_length += 1
for j in range(len(lines)):
s[j].append(newcell[j][0])
cell = newcell
#print out how long this took
time2 = datetime.datetime.now()
# use the first string to actually create the LCS
lcs_string = ""
for i,c in enumerate(lines[0]):
if i in s[0]:
lcs_string += c
# print time2-time1
#
#print out the LCS in green, all other characters in red
results = [[] for l in lines]
at_lcs = [None for l in lines]
agreement = []
s = [sorted(s_temp) for s_temp in s]
LCStuples = [[s[j][i] for j in range(len(lines))] for i in range(len(s[0]))]
LCSsequences = [[LCStuples[0]]]
for i in range(1,len(s[0])):
max_character_jump = max([(s[j][i] - s[j][i-1]) for j in range(len(lines))])
if max_character_jump > 1:
LCSsequences.append([])
LCSsequences[-1].append(LCStuples[i])
segments = {}
lcs_string = ""
for j in range(len(lines)):
currentIndex = 0
results = []
for sequenceIndex,nextSequence in enumerate(LCSsequences):
firstLCSChacter = nextSequence[0][j]
lastLCSCharacter = nextSequence[-1][j]
l = lines[j][currentIndex:firstLCSChacter]
if l != "":
if not (2*sequenceIndex) in segments:
segments[2*sequenceIndex] = [l]
else:
segments[2*sequenceIndex].append(l)
# now extra the LCS - we only need to do this once, since every one is in agreement
if j == 0:
l = lines[0][firstLCSChacter:lastLCSCharacter+1]
segments[2*sequenceIndex+1] = l[:]
lcs_string += l
currentIndex = lastLCSCharacter + 1
l = lines[j][currentIndex:]
if l != "":
if not (2*(sequenceIndex+1)) in segments:
segments[2*(sequenceIndex+1)] = [l]
else:
segments[2*(sequenceIndex+1)].append(l)
# results.append((l,-sequenceIndex-2))
# segments.add(-sequenceIndex-2)
return lcs_string,segments
from termcolor import colored
for j,tran in enumerate(lines):
print s[j]
for i,c in enumerate(tran):
if i in s[j]:
now_at_csl = True
#print colored(c,'green'),
else:
now_at_csl = False
#print colored(c,'red'),
if now_at_csl != at_lcs[j]:
results[j].append("")
if j == 0:
agreement.append(now_at_csl)
at_lcs[j] = now_at_csl
results[j][-1] += c
#print
return lcs_string,agreement,results
transcriptions = {}
#read in the files - right now just set up to work on Greg's computer
for i in inputFiles:
fname = base_directory+"/Databases/transcribe/transcribe"+str(i)+".txt"
individual_transcriptions = load_file(fname)
for key,line in individual_transcriptions.items():
if not(key in transcriptions):
transcriptions[key] = [line]
else:
transcriptions[key].append(line)
gold_fname = base_directory+"/Dropbox/goldTranscriptions.txt"
gold_transcriptions = load_file(gold_fname)
X = []
Y = []
Xstd = []
Ystd = []
for imageIndex in range(3):
for lineIndex in range(5):
a = []
c = []
for l in transcriptions[(imageIndex,lineIndex)]:
lcs_string,segments= lcs([gold_transcriptions[(imageIndex,lineIndex)],l])
accuracy = len(lcs_string)/float(len(l))
completeness = len(lcs_string)/float(len(gold_transcriptions[(imageIndex,lineIndex)]))
a.append(accuracy)
c.append(completeness)
X.append(np.mean(a))
Y.append(np.mean(c))
Xstd.append(np.std(a,ddof=1))
Ystd.append(np.std(c,ddof=1))
print X
print np.mean(X)
print np.mean(Y)
plt.errorbar(X,Y,xerr=Xstd,yerr=Ystd,fmt=".")
plt.xlabel("Accuracy (w/ standard dev.)")
plt.ylabel("Completeness (w/ standard dev.)")
plt.xlim((
|
lcs
|
identifier_name
|
longestCommonSubsequence.py
|
to be transcribed
#give the index of the image and the line
imageIndex = 1
lineIndex = 4
#which input files do you want to read in- need to be of the form transcribe$i.txt
#right now can handle at most only 3 or so files - NP-hard problem. Future work might be to make this
#code more scalable - probably with branch and bound approach
inputFiles = [1,2,3,4,5]
def load_file(fname):
|
def lcs(lines):
#find the length of each string
stringLength = [len(l)+1 for l in lines]
#record the length of longest common subsequence - assumes unique LCS
#that is there might be more than one longest common subsequence. Have encountered this in practice
#but the strings are usually similar enough that it doesn't matter which LCS you choose
dynamicMatrix = np.zeros(stringLength)
#keep track of the time needed to do the calculation - mainly just because it is NP-hard
#want to know how big the input is you can handle
time1 = datetime.datetime.now()
#the following is the dynamic programming approach as shown on the Wikipedia page for longest common subsequence
traceMatrices = [np.zeros(stringLength) for l in lines]
#transcribed = lines[(imageIndex,lineIndex)]
#need to wrap so that numpy is happy
#index for iterating over all tuples of characters - one from each string
currentIndex = [[1,] for w in lines]
#dynamic programming approach - basically just filling in a matrix as we go
while True:
characters = [lines[j][currentIndex[j][0]-1] for j in range(len(lines))]
#if we have a match across all strings
if min(characters) == max(characters):
#diagional move
newIndex = [[i[0]-1,] for i in currentIndex]
#the longest common previous subsequence is a diagonal move backwards
for j in range(len(newIndex)):
traceMatrices[j][currentIndex] = newIndex[j]
dynamicMatrix[currentIndex] = dynamicMatrix[newIndex] + 1
else:
#either a up or sideways move
#find which is the maximum - assume unique
maxLength = -1
axis = None
#find out where the previous LCS is - either up or sideways
for j in range(len(currentIndex)):
#move backwards along this axis
newIndex = deepcopy(currentIndex)
newIndex[j][0] += -1
if dynamicMatrix[newIndex][0] > maxLength:
maxLength = dynamicMatrix[newIndex][0]
axis = j
newIndex = deepcopy(currentIndex)
newIndex[axis][0] += -1
for j in range(len(newIndex)):
traceMatrices[j][currentIndex] = newIndex[j]
dynamicMatrix[currentIndex] = dynamicMatrix[newIndex]
#iterate to the next tuple of characters
for j in range(0,len(currentIndex)):
currentIndex[j][0] += 1
if currentIndex[j][0] == (len(lines[j])+1):
currentIndex[j][0] = 1
else:
break
if currentIndex == [[1,] for l in lines]:
break
#check to see if the last tuple of characters is a match
lastCharacter = [t[-1] for t in lines]
s = [[] for t in lines]
lcs_length = 0
if min(lastCharacter) == max(lastCharacter):
lcs_length += 1
for i,w in enumerate(lines):
s[i].append(len(w)-1)
#read out the LCS by travelling backwards (up, left or diagonal) through the matrix
endPoint = [[-1,] for j in lines]
cell = [[int(traceMatrices[j][endPoint]),] for j in range(len(lines))]
while cell != [[0,] for j in range(len(lines))]:
newcell = [[int(traceMatrices[j][cell][0]),] for j in range(len(lines))]
#if we have a diagonal move - this corresponds to a point in the LCS
allChange = not(False in [a!=b for (a,b) in zip(cell,newcell)])
if allChange:
lcs_length += 1
for j in range(len(lines)):
s[j].append(newcell[j][0])
cell = newcell
#print out how long this took
time2 = datetime.datetime.now()
# use the first string to actually create the LCS
lcs_string = ""
for i,c in enumerate(lines[0]):
if i in s[0]:
lcs_string += c
# print time2-time1
#
#print out the LCS in green, all other characters in red
results = [[] for l in lines]
at_lcs = [None for l in lines]
agreement = []
s = [sorted(s_temp) for s_temp in s]
LCStuples = [[s[j][i] for j in range(len(lines))] for i in range(len(s[0]))]
LCSsequences = [[LCStuples[0]]]
for i in range(1,len(s[0])):
max_character_jump = max([(s[j][i] - s[j][i-1]) for j in range(len(lines))])
if max_character_jump > 1:
LCSsequences.append([])
LCSsequences[-1].append(LCStuples[i])
segments = {}
lcs_string = ""
for j in range(len(lines)):
currentIndex = 0
results = []
for sequenceIndex,nextSequence in enumerate(LCSsequences):
firstLCSChacter = nextSequence[0][j]
lastLCSCharacter = nextSequence[-1][j]
l = lines[j][currentIndex:firstLCSChacter]
if l != "":
if not (2*sequenceIndex) in segments:
segments[2*sequenceIndex] = [l]
else:
segments[2*sequenceIndex].append(l)
# now extra the LCS - we only need to do this once, since every one is in agreement
if j == 0:
l = lines[0][firstLCSChacter:lastLCSCharacter+1]
segments[2*sequenceIndex+1] = l[:]
lcs_string += l
currentIndex = lastLCSCharacter + 1
l = lines[j][currentIndex:]
if l != "":
if not (2*(sequenceIndex+1)) in segments:
segments[2*(sequenceIndex+1)] = [l]
else:
segments[2*(sequenceIndex+1)].append(l)
# results.append((l,-sequenceIndex-2))
# segments.add(-sequenceIndex-2)
return lcs_string,segments
from termcolor import colored
for j,tran in enumerate(lines):
print s[j]
for i,c in enumerate(tran):
if i in s[j]:
now_at_csl = True
#print colored(c,'green'),
else:
now_at_csl = False
#print colored(c,'red'),
if now_at_csl != at_lcs[j]:
results[j].append("")
if j == 0:
agreement.append(now_at_csl)
at_lcs[j] = now_at_csl
results[j][-1] += c
#print
return lcs_string,agreement,results
transcriptions = {}
#read in the files - right now just set up to work on Greg's computer
for i in inputFiles:
fname = base_directory+"/Databases/transcribe/transcribe"+str(i)+".txt"
individual_transcriptions = load_file(fname)
for key,line in individual_transcriptions.items():
if not(key in transcriptions):
transcriptions[key] = [line]
else:
transcriptions[key].append(line)
gold_fname = base_directory+"/Dropbox/goldTranscriptions.txt"
gold_transcriptions = load_file(gold_fname)
X = []
Y = []
Xstd = []
Ystd = []
for imageIndex in range(3):
for lineIndex in range(5):
a = []
c = []
for l in transcriptions[(imageIndex,lineIndex)]:
lcs_string,segments= lcs([gold_transcriptions[(imageIndex,lineIndex)],l])
accuracy = len(lcs_string)/float(len(l))
completeness = len(lcs_string)/float(len(gold_transcriptions[(imageIndex,lineIndex)]))
a.append(accuracy)
c.append(completeness)
X.append(np.mean(a))
Y.append(np.mean(c))
Xstd.append(np.std(a,ddof=1))
Ystd.append(np.std(c,ddof=1))
print X
print np.mean(X)
print np.mean(Y)
plt.errorbar(X,Y,xerr=Xstd,yerr=Ystd,fmt=".")
plt.xlabel("Accuracy (w/ standard dev.)")
plt.ylabel("Completeness (w/ standard dev.)")
plt.xlim((0
|
currentImage = 0
currentLine = -1
individual_transcriptions = {}
with open(fname,"rb") as f:
for l in f.readlines():
if l == "\n":
currentImage += 1
currentLine = -1
continue
currentLine += 1
individual_transcriptions[(currentImage,currentLine)] = l[:-1]
return individual_transcriptions
|
identifier_body
|
longestCommonSubsequence.py
|
to be transcribed
#give the index of the image and the line
imageIndex = 1
lineIndex = 4
#which input files do you want to read in- need to be of the form transcribe$i.txt
#right now can handle at most only 3 or so files - NP-hard problem. Future work might be to make this
#code more scalable - probably with branch and bound approach
inputFiles = [1,2,3,4,5]
def load_file(fname):
currentImage = 0
currentLine = -1
individual_transcriptions = {}
with open(fname,"rb") as f:
for l in f.readlines():
if l == "\n":
currentImage += 1
currentLine = -1
continue
currentLine += 1
individual_transcriptions[(currentImage,currentLine)] = l[:-1]
return individual_transcriptions
def lcs(lines):
#find the length of each string
stringLength = [len(l)+1 for l in lines]
#record the length of longest common subsequence - assumes unique LCS
#that is there might be more than one longest common subsequence. Have encountered this in practice
#but the strings are usually similar enough that it doesn't matter which LCS you choose
dynamicMatrix = np.zeros(stringLength)
#keep track of the time needed to do the calculation - mainly just because it is NP-hard
#want to know how big the input is you can handle
time1 = datetime.datetime.now()
#the following is the dynamic programming approach as shown on the Wikipedia page for longest common subsequence
traceMatrices = [np.zeros(stringLength) for l in lines]
#transcribed = lines[(imageIndex,lineIndex)]
#need to wrap so that numpy is happy
#index for iterating over all tuples of characters - one from each string
currentIndex = [[1,] for w in lines]
#dynamic programming approach - basically just filling in a matrix as we go
while True:
characters = [lines[j][currentIndex[j][0]-1] for j in range(len(lines))]
#if we have a match across all strings
if min(characters) == max(characters):
#diagional move
newIndex = [[i[0]-1,] for i in currentIndex]
#the longest common previous subsequence is a diagonal move backwards
for j in range(len(newIndex)):
traceMatrices[j][currentIndex] = newIndex[j]
dynamicMatrix[currentIndex] = dynamicMatrix[newIndex] + 1
else:
#either a up or sideways move
#find which is the maximum - assume unique
maxLength = -1
axis = None
#find out where the previous LCS is - either up or sideways
for j in range(len(currentIndex)):
#move backwards along this axis
newIndex = deepcopy(currentIndex)
newIndex[j][0] += -1
if dynamicMatrix[newIndex][0] > maxLength:
maxLength = dynamicMatrix[newIndex][0]
axis = j
newIndex = deepcopy(currentIndex)
newIndex[axis][0] += -1
for j in range(len(newIndex)):
traceMatrices[j][currentIndex] = newIndex[j]
dynamicMatrix[currentIndex] = dynamicMatrix[newIndex]
#iterate to the next tuple of characters
for j in range(0,len(currentIndex)):
currentIndex[j][0] += 1
if currentIndex[j][0] == (len(lines[j])+1):
currentIndex[j][0] = 1
else:
|
if currentIndex == [[1,] for l in lines]:
break
#check to see if the last tuple of characters is a match
lastCharacter = [t[-1] for t in lines]
s = [[] for t in lines]
lcs_length = 0
if min(lastCharacter) == max(lastCharacter):
lcs_length += 1
for i,w in enumerate(lines):
s[i].append(len(w)-1)
#read out the LCS by travelling backwards (up, left or diagonal) through the matrix
endPoint = [[-1,] for j in lines]
cell = [[int(traceMatrices[j][endPoint]),] for j in range(len(lines))]
while cell != [[0,] for j in range(len(lines))]:
newcell = [[int(traceMatrices[j][cell][0]),] for j in range(len(lines))]
#if we have a diagonal move - this corresponds to a point in the LCS
allChange = not(False in [a!=b for (a,b) in zip(cell,newcell)])
if allChange:
lcs_length += 1
for j in range(len(lines)):
s[j].append(newcell[j][0])
cell = newcell
#print out how long this took
time2 = datetime.datetime.now()
# use the first string to actually create the LCS
lcs_string = ""
for i,c in enumerate(lines[0]):
if i in s[0]:
lcs_string += c
# print time2-time1
#
#print out the LCS in green, all other characters in red
results = [[] for l in lines]
at_lcs = [None for l in lines]
agreement = []
s = [sorted(s_temp) for s_temp in s]
LCStuples = [[s[j][i] for j in range(len(lines))] for i in range(len(s[0]))]
LCSsequences = [[LCStuples[0]]]
for i in range(1,len(s[0])):
max_character_jump = max([(s[j][i] - s[j][i-1]) for j in range(len(lines))])
if max_character_jump > 1:
LCSsequences.append([])
LCSsequences[-1].append(LCStuples[i])
segments = {}
lcs_string = ""
for j in range(len(lines)):
currentIndex = 0
results = []
for sequenceIndex,nextSequence in enumerate(LCSsequences):
firstLCSChacter = nextSequence[0][j]
lastLCSCharacter = nextSequence[-1][j]
l = lines[j][currentIndex:firstLCSChacter]
if l != "":
if not (2*sequenceIndex) in segments:
segments[2*sequenceIndex] = [l]
else:
segments[2*sequenceIndex].append(l)
# now extra the LCS - we only need to do this once, since every one is in agreement
if j == 0:
l = lines[0][firstLCSChacter:lastLCSCharacter+1]
segments[2*sequenceIndex+1] = l[:]
lcs_string += l
currentIndex = lastLCSCharacter + 1
l = lines[j][currentIndex:]
if l != "":
if not (2*(sequenceIndex+1)) in segments:
segments[2*(sequenceIndex+1)] = [l]
else:
segments[2*(sequenceIndex+1)].append(l)
# results.append((l,-sequenceIndex-2))
# segments.add(-sequenceIndex-2)
return lcs_string,segments
from termcolor import colored
for j,tran in enumerate(lines):
print s[j]
for i,c in enumerate(tran):
if i in s[j]:
now_at_csl = True
#print colored(c,'green'),
else:
now_at_csl = False
#print colored(c,'red'),
if now_at_csl != at_lcs[j]:
results[j].append("")
if j == 0:
agreement.append(now_at_csl)
at_lcs[j] = now_at_csl
results[j][-1] += c
#print
return lcs_string,agreement,results
transcriptions = {}
#read in the files - right now just set up to work on Greg's computer
for i in inputFiles:
fname = base_directory+"/Databases/transcribe/transcribe"+str(i)+".txt"
individual_transcriptions = load_file(fname)
for key,line in individual_transcriptions.items():
if not(key in transcriptions):
transcriptions[key] = [line]
else:
transcriptions[key].append(line)
gold_fname = base_directory+"/Dropbox/goldTranscriptions.txt"
gold_transcriptions = load_file(gold_fname)
X = []
Y = []
Xstd = []
Ystd = []
for imageIndex in range(3):
for lineIndex in range(5):
a = []
c = []
for l in transcriptions[(imageIndex,lineIndex)]:
lcs_string,segments= lcs([gold_transcriptions[(imageIndex,lineIndex)],l])
accuracy = len(lcs_string)/float(len(l))
completeness = len(lcs_string)/float(len(gold_transcriptions[(imageIndex,lineIndex)]))
a.append(accuracy)
c.append(completeness)
X.append(np.mean(a))
Y.append(np.mean(c))
Xstd.append(np.std(a,ddof=1))
Ystd.append(np.std(c,ddof=1))
print X
print np.mean(X)
print np.mean(Y)
plt.errorbar(X,Y,xerr=Xstd,yerr=Ystd,fmt=".")
plt.xlabel("Accuracy (w/ standard dev.)")
plt.ylabel("Completeness (w/ standard dev.)")
plt.xlim((0
|
break
|
conditional_block
|
messages.go
|
(br BaudrateIdentification) int {
switch rune(br) {
case '0':
return 300
case 'A', '1':
return 600
case 'B', '2':
return 1200
case 'C', '3':
return 2400
case 'D', '4':
return 4800
case 'E', '5':
return 9600
case 'F', '6':
return 19200
}
|
AckModeDataReadOut = AcknowledgeMode(byte('0'))
AckModeProgramming = AcknowledgeMode(byte('1'))
AckModeBinary = AcknowledgeMode(byte('2'))
AckModeReserved = AcknowledgeMode(byte('3'))
AckModeManufacture = AcknowledgeMode(byte('6'))
AckModeIllegalMode = AcknowledgeMode(byte(' '))
)
const (
CR = byte(0x0D)
LF = byte(0x0A)
FrontBoundaryChar = byte('(')
RearBoundaryChar = byte(')')
UnitSeparator = byte('*')
StartChar = byte('/')
RequestCommandChar = byte('?')
EndChar = byte('!')
StxChar = byte(0x02)
EtxChar = byte(0x03)
SeqDelChar = byte('\\')
)
// ValidTestDataMessage can be used for testing.
const ValidTestDataMessage = string(StxChar) +
`1.1.1.1(12*kWh)` + `1.1.1.2(12*kWh)` + "\r\n" +
string(EndChar) +
string(CR) + string(LF) +
string(EtxChar) +
string(Bcc(0))
func ValidAddressChar(b byte) bool {
switch b {
case FrontBoundaryChar, RearBoundaryChar, StartChar, EndChar:
return false
default:
return true
}
}
func ValidValueChar(b byte) bool {
switch b {
case FrontBoundaryChar, UnitSeparator, RearBoundaryChar, StartChar, EndChar:
return false
default:
return true
}
}
func ValidUnitChar(b byte) bool {
return ValidAddressChar(b)
}
// AcknowledgeModeFromByte returns the acknowledge mode from the given byte value.
func AcknowledgeModeFromByte(a byte) AcknowledgeMode {
switch a {
case 0, 1, 2:
return AcknowledgeMode(a)
case 3, 4, 5:
return AckModeReserved
}
switch {
case 6 <= a && a <= 9:
case 'A' <= a && a <= 'Z':
return AckModeManufacture
}
return AckModeIllegalMode
}
var (
ErrCRFound = errors.New("End CR found")
ErrNotImplementedYet = errors.New("not implemented yet")
ErrFormatError = errors.New("format error")
ErrFormatNoChars = errors.New("no chars found")
ErrEmptyDataLine = errors.New("empty data line found")
ErrUnexpectedEOF = errors.New("unexpected end of file")
ErrNoBlockEndChar = errors.New("no block end char found")
ErrNoStartChar = errors.New("no StartChar found")
ErrAddressTooLong = errors.New("field too long")
ErrValueTooLong = errors.New("field too long")
ErrUnitTooLong = errors.New("field too long")
ErrIdentificationTooLong = errors.New("identification field too long")
)
func ParseDataMessage(r *bufio.Reader) (*DataMessage, error) {
var b byte
var err error
var res *[]DataSet
var bcc = Bcc(0)
if verbose {
log.Println("Starting ParseDataMessage")
}
// Consume all bytes till a start of message is found.
for {
b, err = r.ReadByte()
if err != nil {
return nil, ErrUnexpectedEOF
}
if b == StxChar {
break
}
}
if verbose {
log.Println("Found StxChar")
}
// Get the datasets.
res, err = ParseDataBlock(r, &bcc)
if err != nil {
return nil, err
}
_, err = ParseDataMessageEnd(r, &bcc)
if err != nil {
return nil, err
}
return &DataMessage{
DataSets: res,
bcc: bcc,
}, nil
}
// ParseDataMessageEnd parses the end of a datamessage.
// ! CR LF ETX BCC
func ParseDataMessageEnd(r *bufio.Reader, bcc *Bcc) (*DataMessage, error) {
var b byte
var err error
if verbose {
log.Println("Starting ParseDataMessageEnd")
}
b, err = r.ReadByte()
if err != nil {
return nil, err
}
if b != EndChar {
if verbose {
log.Printf("ParseDataMessageEnd, error parsing EndChar, found %d", b)
}
return nil, ErrFormatError
}
bcc.Digest(b)
b, err = r.ReadByte()
if err != nil {
return nil, err
}
if b != CR {
if verbose {
log.Println("ParseDataMessageEnd, error parsing CR")
}
return nil, ErrFormatError
}
bcc.Digest(b)
b, err = r.ReadByte()
if err != nil {
return nil, err
}
if b != LF {
if verbose {
log.Println("ParseDataMessageEnd, error parsing LF")
}
return nil, ErrFormatError
}
bcc.Digest(b)
b, err = r.ReadByte()
if err != nil {
return nil, err
}
if b != EtxChar {
if verbose {
log.Println("ParseDataMessageEnd, error parsing EtxChar")
}
return nil, ErrFormatError
}
bcc.Digest(b)
b, err = r.ReadByte()
if err != nil {
if verbose {
log.Println("ParseDataMessageEnd, error parsing Bcc")
}
return nil, err
}
return &DataMessage{
bcc: *bcc,
}, nil
}
// ParseDataBlock parses til no valid data lines can be parsed.
func ParseDataBlock(r *bufio.Reader, bcc *Bcc) (*[]DataSet, error) {
var err error
var res []DataSet
if verbose {
log.Println("Starting ParseDataBlock")
}
for {
var ds []DataSet
ds, err = ParseDataLine(r, bcc)
if err != nil {
if len(res) <= 0 {
return nil, ErrEmptyDataLine
}
return &res, nil
}
res = append(res, ds...)
}
}
// ParseDataMessage reads bytes from r till a new complete datamessage has been read or an error occured.
// func ParseDataMessage(r io.Reader) (*DataMessage, error) {
// return nil, ErrNotImplementedYet
// }
// ParseDataLine parses a DataSets till a CR LF has been detected.
// Data lines consist of one or more datasets.
func ParseDataLine(r *bufio.Reader, bcc *Bcc) ([]DataSet, error) {
var b byte
var err error
var ds *DataSet
var res []DataSet
if verbose {
log.Println("Starting ParseDataLine")
}
for {
ds, err = ParseDataSet(r, bcc)
if err != nil {
r.UnreadByte()
return nil, ErrFormatError
}
res = append(res, *ds)
// Test if the next two chars are CR LF
b, err = r.ReadByte()
if err == nil && b == CR {
bcc.Digest(b)
b, err = r.ReadByte()
if err == nil && b == LF {
bcc.Digest(b)
return res, nil
}
// Error, CR not followed by LF
return nil, ErrFormatError
}
r.UnreadByte()
}
}
// ParseDataSet reads bytes from r till a new complete dataset has been read or an error occured.
// A data message contains a list of data sets. Each data set consists of 3 fields "address", "value", and "unit".
// Each of these fields is optional an may thus be equal to the empty string.
// Data set ::= Address '(' Value(optional) ('*' unit)(optional) ')'
// Ignores CR and LF and reads up to the first !
func ParseDataSet(r *bufio.Reader, bcc *Bcc) (*DataSet, error) {
// read chars til Front boundary.
var b byte
var err error
var va [100]byte
var v = va[:0]
res := &DataSet{}
// Read the address till FrontBoundaryChar == (
if verbose {
log.Println("Starting ParseDataSet")
}
if verbose {
log.Println("Scanning for Address")
}
ScanAddress:
for {
b, err = r.ReadByte()
if err != nil {
return nil, ErrFormatNoChars
}
switch b {
case CR, LF:
r.UnreadByte()
return nil, ErrCRFound
case FrontBoundaryChar:
bcc.Digest(b)
break ScanAddress
default:
bcc.Digest(b)
if !ValidAddressChar(b
|
return 0
}
const (
|
random_line_split
|
messages.go
|
(br BaudrateIdentification) int {
switch rune(br) {
case '0':
return 300
case 'A', '1':
return 600
case 'B', '2':
return 1200
case 'C', '3':
return 2400
case 'D', '4':
return 4800
case 'E', '5':
return 9600
case 'F', '6':
return 19200
}
return 0
}
const (
AckModeDataReadOut = AcknowledgeMode(byte('0'))
AckModeProgramming = AcknowledgeMode(byte('1'))
AckModeBinary = AcknowledgeMode(byte('2'))
AckModeReserved = AcknowledgeMode(byte('3'))
AckModeManufacture = AcknowledgeMode(byte('6'))
AckModeIllegalMode = AcknowledgeMode(byte(' '))
)
const (
CR = byte(0x0D)
LF = byte(0x0A)
FrontBoundaryChar = byte('(')
RearBoundaryChar = byte(')')
UnitSeparator = byte('*')
StartChar = byte('/')
RequestCommandChar = byte('?')
EndChar = byte('!')
StxChar = byte(0x02)
EtxChar = byte(0x03)
SeqDelChar = byte('\\')
)
// ValidTestDataMessage can be used for testing.
const ValidTestDataMessage = string(StxChar) +
`1.1.1.1(12*kWh)` + `1.1.1.2(12*kWh)` + "\r\n" +
string(EndChar) +
string(CR) + string(LF) +
string(EtxChar) +
string(Bcc(0))
func ValidAddressChar(b byte) bool {
switch b {
case FrontBoundaryChar, RearBoundaryChar, StartChar, EndChar:
return false
default:
return true
}
}
func ValidValueChar(b byte) bool {
switch b {
case FrontBoundaryChar, UnitSeparator, RearBoundaryChar, StartChar, EndChar:
return false
default:
return true
}
}
func ValidUnitChar(b byte) bool {
return ValidAddressChar(b)
}
// AcknowledgeModeFromByte returns the acknowledge mode from the given byte value.
func AcknowledgeModeFromByte(a byte) AcknowledgeMode {
switch a {
case 0, 1, 2:
return AcknowledgeMode(a)
case 3, 4, 5:
return AckModeReserved
}
switch {
case 6 <= a && a <= 9:
case 'A' <= a && a <= 'Z':
return AckModeManufacture
}
return AckModeIllegalMode
}
var (
ErrCRFound = errors.New("End CR found")
ErrNotImplementedYet = errors.New("not implemented yet")
ErrFormatError = errors.New("format error")
ErrFormatNoChars = errors.New("no chars found")
ErrEmptyDataLine = errors.New("empty data line found")
ErrUnexpectedEOF = errors.New("unexpected end of file")
ErrNoBlockEndChar = errors.New("no block end char found")
ErrNoStartChar = errors.New("no StartChar found")
ErrAddressTooLong = errors.New("field too long")
ErrValueTooLong = errors.New("field too long")
ErrUnitTooLong = errors.New("field too long")
ErrIdentificationTooLong = errors.New("identification field too long")
)
func ParseDataMessage(r *bufio.Reader) (*DataMessage, error) {
var b byte
var err error
var res *[]DataSet
var bcc = Bcc(0)
if verbose {
log.Println("Starting ParseDataMessage")
}
// Consume all bytes till a start of message is found.
for {
b, err = r.ReadByte()
if err != nil {
return nil, ErrUnexpectedEOF
}
if b == StxChar {
break
}
}
if verbose {
log.Println("Found StxChar")
}
// Get the datasets.
res, err = ParseDataBlock(r, &bcc)
if err != nil {
return nil, err
}
_, err = ParseDataMessageEnd(r, &bcc)
if err != nil {
return nil, err
}
return &DataMessage{
DataSets: res,
bcc: bcc,
}, nil
}
// ParseDataMessageEnd parses the end of a datamessage.
// ! CR LF ETX BCC
func ParseDataMessageEnd(r *bufio.Reader, bcc *Bcc) (*DataMessage, error) {
var b byte
var err error
if verbose {
log.Println("Starting ParseDataMessageEnd")
}
b, err = r.ReadByte()
if err != nil {
return nil, err
}
if b != EndChar {
if verbose {
log.Printf("ParseDataMessageEnd, error parsing EndChar, found %d", b)
}
return nil, ErrFormatError
}
bcc.Digest(b)
b, err = r.ReadByte()
if err != nil {
return nil, err
}
if b != CR {
if verbose {
log.Println("ParseDataMessageEnd, error parsing CR")
}
return nil, ErrFormatError
}
bcc.Digest(b)
b, err = r.ReadByte()
if err != nil
|
if b != LF {
if verbose {
log.Println("ParseDataMessageEnd, error parsing LF")
}
return nil, ErrFormatError
}
bcc.Digest(b)
b, err = r.ReadByte()
if err != nil {
return nil, err
}
if b != EtxChar {
if verbose {
log.Println("ParseDataMessageEnd, error parsing EtxChar")
}
return nil, ErrFormatError
}
bcc.Digest(b)
b, err = r.ReadByte()
if err != nil {
if verbose {
log.Println("ParseDataMessageEnd, error parsing Bcc")
}
return nil, err
}
return &DataMessage{
bcc: *bcc,
}, nil
}
// ParseDataBlock parses til no valid data lines can be parsed.
func ParseDataBlock(r *bufio.Reader, bcc *Bcc) (*[]DataSet, error) {
var err error
var res []DataSet
if verbose {
log.Println("Starting ParseDataBlock")
}
for {
var ds []DataSet
ds, err = ParseDataLine(r, bcc)
if err != nil {
if len(res) <= 0 {
return nil, ErrEmptyDataLine
}
return &res, nil
}
res = append(res, ds...)
}
}
// ParseDataMessage reads bytes from r till a new complete datamessage has been read or an error occured.
// func ParseDataMessage(r io.Reader) (*DataMessage, error) {
// return nil, ErrNotImplementedYet
// }
// ParseDataLine parses a DataSets till a CR LF has been detected.
// Data lines consist of one or more datasets.
func ParseDataLine(r *bufio.Reader, bcc *Bcc) ([]DataSet, error) {
var b byte
var err error
var ds *DataSet
var res []DataSet
if verbose {
log.Println("Starting ParseDataLine")
}
for {
ds, err = ParseDataSet(r, bcc)
if err != nil {
r.UnreadByte()
return nil, ErrFormatError
}
res = append(res, *ds)
// Test if the next two chars are CR LF
b, err = r.ReadByte()
if err == nil && b == CR {
bcc.Digest(b)
b, err = r.ReadByte()
if err == nil && b == LF {
bcc.Digest(b)
return res, nil
}
// Error, CR not followed by LF
return nil, ErrFormatError
}
r.UnreadByte()
}
}
// ParseDataSet reads bytes from r till a new complete dataset has been read or an error occured.
// A data message contains a list of data sets. Each data set consists of 3 fields "address", "value", and "unit".
// Each of these fields is optional an may thus be equal to the empty string.
// Data set ::= Address '(' Value(optional) ('*' unit)(optional) ')'
// Ignores CR and LF and reads up to the first !
func ParseDataSet(r *bufio.Reader, bcc *Bcc) (*DataSet, error) {
// read chars til Front boundary.
var b byte
var err error
var va [100]byte
var v = va[:0]
res := &DataSet{}
// Read the address till FrontBoundaryChar == (
if verbose {
log.Println("Starting ParseDataSet")
}
if verbose {
log.Println("Scanning for Address")
}
ScanAddress:
for {
b, err = r.ReadByte()
if err != nil {
return nil, ErrFormatNoChars
}
switch b {
case CR, LF:
r.UnreadByte()
return nil, ErrCRFound
case FrontBoundaryChar:
bcc.Digest(b)
break ScanAddress
default:
bcc.Digest(b)
if !ValidAddress
|
{
return nil, err
}
|
conditional_block
|
messages.go
|
(br BaudrateIdentification) int
|
const (
AckModeDataReadOut = AcknowledgeMode(byte('0'))
AckModeProgramming = AcknowledgeMode(byte('1'))
AckModeBinary = AcknowledgeMode(byte('2'))
AckModeReserved = AcknowledgeMode(byte('3'))
AckModeManufacture = AcknowledgeMode(byte('6'))
AckModeIllegalMode = AcknowledgeMode(byte(' '))
)
const (
CR = byte(0x0D)
LF = byte(0x0A)
FrontBoundaryChar = byte('(')
RearBoundaryChar = byte(')')
UnitSeparator = byte('*')
StartChar = byte('/')
RequestCommandChar = byte('?')
EndChar = byte('!')
StxChar = byte(0x02)
EtxChar = byte(0x03)
SeqDelChar = byte('\\')
)
// ValidTestDataMessage can be used for testing.
const ValidTestDataMessage = string(StxChar) +
`1.1.1.1(12*kWh)` + `1.1.1.2(12*kWh)` + "\r\n" +
string(EndChar) +
string(CR) + string(LF) +
string(EtxChar) +
string(Bcc(0))
func ValidAddressChar(b byte) bool {
switch b {
case FrontBoundaryChar, RearBoundaryChar, StartChar, EndChar:
return false
default:
return true
}
}
func ValidValueChar(b byte) bool {
switch b {
case FrontBoundaryChar, UnitSeparator, RearBoundaryChar, StartChar, EndChar:
return false
default:
return true
}
}
func ValidUnitChar(b byte) bool {
return ValidAddressChar(b)
}
// AcknowledgeModeFromByte returns the acknowledge mode from the given byte value.
func AcknowledgeModeFromByte(a byte) AcknowledgeMode {
switch a {
case 0, 1, 2:
return AcknowledgeMode(a)
case 3, 4, 5:
return AckModeReserved
}
switch {
case 6 <= a && a <= 9:
case 'A' <= a && a <= 'Z':
return AckModeManufacture
}
return AckModeIllegalMode
}
var (
ErrCRFound = errors.New("End CR found")
ErrNotImplementedYet = errors.New("not implemented yet")
ErrFormatError = errors.New("format error")
ErrFormatNoChars = errors.New("no chars found")
ErrEmptyDataLine = errors.New("empty data line found")
ErrUnexpectedEOF = errors.New("unexpected end of file")
ErrNoBlockEndChar = errors.New("no block end char found")
ErrNoStartChar = errors.New("no StartChar found")
ErrAddressTooLong = errors.New("field too long")
ErrValueTooLong = errors.New("field too long")
ErrUnitTooLong = errors.New("field too long")
ErrIdentificationTooLong = errors.New("identification field too long")
)
func ParseDataMessage(r *bufio.Reader) (*DataMessage, error) {
var b byte
var err error
var res *[]DataSet
var bcc = Bcc(0)
if verbose {
log.Println("Starting ParseDataMessage")
}
// Consume all bytes till a start of message is found.
for {
b, err = r.ReadByte()
if err != nil {
return nil, ErrUnexpectedEOF
}
if b == StxChar {
break
}
}
if verbose {
log.Println("Found StxChar")
}
// Get the datasets.
res, err = ParseDataBlock(r, &bcc)
if err != nil {
return nil, err
}
_, err = ParseDataMessageEnd(r, &bcc)
if err != nil {
return nil, err
}
return &DataMessage{
DataSets: res,
bcc: bcc,
}, nil
}
// ParseDataMessageEnd parses the end of a datamessage.
// ! CR LF ETX BCC
func ParseDataMessageEnd(r *bufio.Reader, bcc *Bcc) (*DataMessage, error) {
var b byte
var err error
if verbose {
log.Println("Starting ParseDataMessageEnd")
}
b, err = r.ReadByte()
if err != nil {
return nil, err
}
if b != EndChar {
if verbose {
log.Printf("ParseDataMessageEnd, error parsing EndChar, found %d", b)
}
return nil, ErrFormatError
}
bcc.Digest(b)
b, err = r.ReadByte()
if err != nil {
return nil, err
}
if b != CR {
if verbose {
log.Println("ParseDataMessageEnd, error parsing CR")
}
return nil, ErrFormatError
}
bcc.Digest(b)
b, err = r.ReadByte()
if err != nil {
return nil, err
}
if b != LF {
if verbose {
log.Println("ParseDataMessageEnd, error parsing LF")
}
return nil, ErrFormatError
}
bcc.Digest(b)
b, err = r.ReadByte()
if err != nil {
return nil, err
}
if b != EtxChar {
if verbose {
log.Println("ParseDataMessageEnd, error parsing EtxChar")
}
return nil, ErrFormatError
}
bcc.Digest(b)
b, err = r.ReadByte()
if err != nil {
if verbose {
log.Println("ParseDataMessageEnd, error parsing Bcc")
}
return nil, err
}
return &DataMessage{
bcc: *bcc,
}, nil
}
// ParseDataBlock parses til no valid data lines can be parsed.
func ParseDataBlock(r *bufio.Reader, bcc *Bcc) (*[]DataSet, error) {
var err error
var res []DataSet
if verbose {
log.Println("Starting ParseDataBlock")
}
for {
var ds []DataSet
ds, err = ParseDataLine(r, bcc)
if err != nil {
if len(res) <= 0 {
return nil, ErrEmptyDataLine
}
return &res, nil
}
res = append(res, ds...)
}
}
// ParseDataMessage reads bytes from r till a new complete datamessage has been read or an error occured.
// func ParseDataMessage(r io.Reader) (*DataMessage, error) {
// return nil, ErrNotImplementedYet
// }
// ParseDataLine parses a DataSets till a CR LF has been detected.
// Data lines consist of one or more datasets.
func ParseDataLine(r *bufio.Reader, bcc *Bcc) ([]DataSet, error) {
var b byte
var err error
var ds *DataSet
var res []DataSet
if verbose {
log.Println("Starting ParseDataLine")
}
for {
ds, err = ParseDataSet(r, bcc)
if err != nil {
r.UnreadByte()
return nil, ErrFormatError
}
res = append(res, *ds)
// Test if the next two chars are CR LF
b, err = r.ReadByte()
if err == nil && b == CR {
bcc.Digest(b)
b, err = r.ReadByte()
if err == nil && b == LF {
bcc.Digest(b)
return res, nil
}
// Error, CR not followed by LF
return nil, ErrFormatError
}
r.UnreadByte()
}
}
// ParseDataSet reads bytes from r till a new complete dataset has been read or an error occured.
// A data message contains a list of data sets. Each data set consists of 3 fields "address", "value", and "unit".
// Each of these fields is optional an may thus be equal to the empty string.
// Data set ::= Address '(' Value(optional) ('*' unit)(optional) ')'
// Ignores CR and LF and reads up to the first !
func ParseDataSet(r *bufio.Reader, bcc *Bcc) (*DataSet, error) {
// read chars til Front boundary.
var b byte
var err error
var va [100]byte
var v = va[:0]
res := &DataSet{}
// Read the address till FrontBoundaryChar == (
if verbose {
log.Println("Starting ParseDataSet")
}
if verbose {
log.Println("Scanning for Address")
}
ScanAddress:
for {
b, err = r.ReadByte()
if err != nil {
return nil, ErrFormatNoChars
}
switch b {
case CR, LF:
r.UnreadByte()
return nil, ErrCRFound
case FrontBoundaryChar:
bcc.Digest(b)
break ScanAddress
default:
bcc.Digest(b)
if !ValidAddress
|
{
switch rune(br) {
case '0':
return 300
case 'A', '1':
return 600
case 'B', '2':
return 1200
case 'C', '3':
return 2400
case 'D', '4':
return 4800
case 'E', '5':
return 9600
case 'F', '6':
return 19200
}
return 0
}
|
identifier_body
|
messages.go
|
(br BaudrateIdentification) int {
switch rune(br) {
case '0':
return 300
case 'A', '1':
return 600
case 'B', '2':
return 1200
case 'C', '3':
return 2400
case 'D', '4':
return 4800
case 'E', '5':
return 9600
case 'F', '6':
return 19200
}
return 0
}
const (
AckModeDataReadOut = AcknowledgeMode(byte('0'))
AckModeProgramming = AcknowledgeMode(byte('1'))
AckModeBinary = AcknowledgeMode(byte('2'))
AckModeReserved = AcknowledgeMode(byte('3'))
AckModeManufacture = AcknowledgeMode(byte('6'))
AckModeIllegalMode = AcknowledgeMode(byte(' '))
)
const (
CR = byte(0x0D)
LF = byte(0x0A)
FrontBoundaryChar = byte('(')
RearBoundaryChar = byte(')')
UnitSeparator = byte('*')
StartChar = byte('/')
RequestCommandChar = byte('?')
EndChar = byte('!')
StxChar = byte(0x02)
EtxChar = byte(0x03)
SeqDelChar = byte('\\')
)
// ValidTestDataMessage can be used for testing.
const ValidTestDataMessage = string(StxChar) +
`1.1.1.1(12*kWh)` + `1.1.1.2(12*kWh)` + "\r\n" +
string(EndChar) +
string(CR) + string(LF) +
string(EtxChar) +
string(Bcc(0))
func ValidAddressChar(b byte) bool {
switch b {
case FrontBoundaryChar, RearBoundaryChar, StartChar, EndChar:
return false
default:
return true
}
}
func
|
(b byte) bool {
switch b {
case FrontBoundaryChar, UnitSeparator, RearBoundaryChar, StartChar, EndChar:
return false
default:
return true
}
}
func ValidUnitChar(b byte) bool {
return ValidAddressChar(b)
}
// AcknowledgeModeFromByte returns the acknowledge mode from the given byte value.
func AcknowledgeModeFromByte(a byte) AcknowledgeMode {
switch a {
case 0, 1, 2:
return AcknowledgeMode(a)
case 3, 4, 5:
return AckModeReserved
}
switch {
case 6 <= a && a <= 9:
case 'A' <= a && a <= 'Z':
return AckModeManufacture
}
return AckModeIllegalMode
}
var (
ErrCRFound = errors.New("End CR found")
ErrNotImplementedYet = errors.New("not implemented yet")
ErrFormatError = errors.New("format error")
ErrFormatNoChars = errors.New("no chars found")
ErrEmptyDataLine = errors.New("empty data line found")
ErrUnexpectedEOF = errors.New("unexpected end of file")
ErrNoBlockEndChar = errors.New("no block end char found")
ErrNoStartChar = errors.New("no StartChar found")
ErrAddressTooLong = errors.New("field too long")
ErrValueTooLong = errors.New("field too long")
ErrUnitTooLong = errors.New("field too long")
ErrIdentificationTooLong = errors.New("identification field too long")
)
func ParseDataMessage(r *bufio.Reader) (*DataMessage, error) {
var b byte
var err error
var res *[]DataSet
var bcc = Bcc(0)
if verbose {
log.Println("Starting ParseDataMessage")
}
// Consume all bytes till a start of message is found.
for {
b, err = r.ReadByte()
if err != nil {
return nil, ErrUnexpectedEOF
}
if b == StxChar {
break
}
}
if verbose {
log.Println("Found StxChar")
}
// Get the datasets.
res, err = ParseDataBlock(r, &bcc)
if err != nil {
return nil, err
}
_, err = ParseDataMessageEnd(r, &bcc)
if err != nil {
return nil, err
}
return &DataMessage{
DataSets: res,
bcc: bcc,
}, nil
}
// ParseDataMessageEnd parses the end of a datamessage.
// ! CR LF ETX BCC
func ParseDataMessageEnd(r *bufio.Reader, bcc *Bcc) (*DataMessage, error) {
var b byte
var err error
if verbose {
log.Println("Starting ParseDataMessageEnd")
}
b, err = r.ReadByte()
if err != nil {
return nil, err
}
if b != EndChar {
if verbose {
log.Printf("ParseDataMessageEnd, error parsing EndChar, found %d", b)
}
return nil, ErrFormatError
}
bcc.Digest(b)
b, err = r.ReadByte()
if err != nil {
return nil, err
}
if b != CR {
if verbose {
log.Println("ParseDataMessageEnd, error parsing CR")
}
return nil, ErrFormatError
}
bcc.Digest(b)
b, err = r.ReadByte()
if err != nil {
return nil, err
}
if b != LF {
if verbose {
log.Println("ParseDataMessageEnd, error parsing LF")
}
return nil, ErrFormatError
}
bcc.Digest(b)
b, err = r.ReadByte()
if err != nil {
return nil, err
}
if b != EtxChar {
if verbose {
log.Println("ParseDataMessageEnd, error parsing EtxChar")
}
return nil, ErrFormatError
}
bcc.Digest(b)
b, err = r.ReadByte()
if err != nil {
if verbose {
log.Println("ParseDataMessageEnd, error parsing Bcc")
}
return nil, err
}
return &DataMessage{
bcc: *bcc,
}, nil
}
// ParseDataBlock parses til no valid data lines can be parsed.
func ParseDataBlock(r *bufio.Reader, bcc *Bcc) (*[]DataSet, error) {
var err error
var res []DataSet
if verbose {
log.Println("Starting ParseDataBlock")
}
for {
var ds []DataSet
ds, err = ParseDataLine(r, bcc)
if err != nil {
if len(res) <= 0 {
return nil, ErrEmptyDataLine
}
return &res, nil
}
res = append(res, ds...)
}
}
// ParseDataMessage reads bytes from r till a new complete datamessage has been read or an error occured.
// func ParseDataMessage(r io.Reader) (*DataMessage, error) {
// return nil, ErrNotImplementedYet
// }
// ParseDataLine parses a DataSets till a CR LF has been detected.
// Data lines consist of one or more datasets.
func ParseDataLine(r *bufio.Reader, bcc *Bcc) ([]DataSet, error) {
var b byte
var err error
var ds *DataSet
var res []DataSet
if verbose {
log.Println("Starting ParseDataLine")
}
for {
ds, err = ParseDataSet(r, bcc)
if err != nil {
r.UnreadByte()
return nil, ErrFormatError
}
res = append(res, *ds)
// Test if the next two chars are CR LF
b, err = r.ReadByte()
if err == nil && b == CR {
bcc.Digest(b)
b, err = r.ReadByte()
if err == nil && b == LF {
bcc.Digest(b)
return res, nil
}
// Error, CR not followed by LF
return nil, ErrFormatError
}
r.UnreadByte()
}
}
// ParseDataSet reads bytes from r till a new complete dataset has been read or an error occured.
// A data message contains a list of data sets. Each data set consists of 3 fields "address", "value", and "unit".
// Each of these fields is optional an may thus be equal to the empty string.
// Data set ::= Address '(' Value(optional) ('*' unit)(optional) ')'
// Ignores CR and LF and reads up to the first !
func ParseDataSet(r *bufio.Reader, bcc *Bcc) (*DataSet, error) {
// read chars til Front boundary.
var b byte
var err error
var va [100]byte
var v = va[:0]
res := &DataSet{}
// Read the address till FrontBoundaryChar == (
if verbose {
log.Println("Starting ParseDataSet")
}
if verbose {
log.Println("Scanning for Address")
}
ScanAddress:
for {
b, err = r.ReadByte()
if err != nil {
return nil, ErrFormatNoChars
}
switch b {
case CR, LF:
r.UnreadByte()
return nil, ErrCRFound
case FrontBoundaryChar:
bcc.Digest(b)
break ScanAddress
default:
bcc.Digest(b)
if !ValidAddressChar
|
ValidValueChar
|
identifier_name
|
media_sessions.rs
|
(
&self,
event_id: fidl_avrcp::NotificationEvent,
current: Notification,
pos_change_interval: u32,
responder: fidl_avrcp::TargetHandlerWatchNotificationResponder,
) -> Result<(), fidl::Error> {
let mut write = self.inner.write();
write.register_notification(event_id, current, pos_change_interval, responder)
}
async fn watch_media_sessions(
discovery: DiscoveryProxy,
mut watcher_requests: SessionsWatcherRequestStream,
sessions_inner: Arc<RwLock<MediaSessionsInner>>,
) -> Result<(), anyhow::Error> {
while let Some(req) =
watcher_requests.try_next().await.expect("Failed to serve Watcher service")
{
match req {
SessionsWatcherRequest::SessionUpdated {
session_id: id,
session_info_delta: delta,
responder,
} => {
responder.send()?;
fx_vlog!(tag: "avrcp-tg", 1, "MediaSession update: id[{}], delta[{:?}]", id, delta);
// Since we are only listening to active sessions, update the currently
// active media session id every time a watcher event is triggered.
// This means AVRCP commands will be queried/set to the player that has most
// recently changed in status.
sessions_inner.write().update_active_session_id(Some(id.clone()));
// If this is our first time receiving updates from this MediaPlayer, create
// a session control proxy and connect to the session.
sessions_inner.write().create_or_update_session(
discovery.clone(),
id.clone(),
delta,
&create_session_control_proxy,
)?;
fx_vlog!(tag: "avrcp-tg", 1, "MediaSession state after update: state[{:?}]", sessions_inner);
}
SessionsWatcherRequest::SessionRemoved { session_id, responder } => {
// A media session with id `session_id` has been removed.
responder.send()?;
// Clear any outstanding notifications with a player changed response.
// Clear the currently active session, if it equals `session_id`.
// Clear entry in state map.
sessions_inner.write().clear_session(&session_id);
fx_vlog!(tag: "avrcp-tg", 1, "Removed session [{:?}] from state map: {:?}", session_id, sessions_inner);
}
}
}
Ok(())
}
}
#[derive(Debug)]
pub(crate) struct MediaSessionsInner {
// The currently active MediaSession id.
// If present, the `active_session_id` should be present in `map`.
active_session_id: Option<u64>,
// The map of ids to the respective media session.
map: HashMap<u64, MediaState>,
// The map of outstanding notifications.
notifications: HashMap<fidl_avrcp::NotificationEvent, BoundedQueue<NotificationData>>,
}
impl MediaSessionsInner {
pub fn new() -> Self {
Self { active_session_id: None, map: HashMap::new(), notifications: HashMap::new() }
}
pub fn get_active_session(&self) -> Option<MediaState> {
self.active_session_id.as_ref().and_then(|id| self.map.get(id).cloned())
}
/// TODO(41703): Add TRACK_POS_CHANGED when implemented.
pub fn get_supported_notification_events(&self) -> Vec<fidl_avrcp::NotificationEvent> {
vec![
fidl_avrcp::NotificationEvent::PlayerApplicationSettingChanged,
fidl_avrcp::NotificationEvent::PlaybackStatusChanged,
fidl_avrcp::NotificationEvent::TrackChanged,
]
}
/// Removes the MediaState specified by `id` from the map, should it exist.
/// If the session was currently active, clears `self.active_session_id`.
/// Returns the removed MediaState.
pub fn clear_session(&mut self, id: &u64) -> Option<MediaState> {
if Some(id) == self.active_session_id.as_ref() {
self.update_active_session_id(None);
}
self.map.remove(id)
}
/// Clears all outstanding notifications with an AddressedPlayerChanged error.
/// See `crate::types::update_responder` for more details.
pub fn clear_notification_responders(&mut self) {
for notif_data in self.notifications.drain().map(|(_, q)| q.into_iter()).flatten() {
if let Err(e) = notif_data.update_responder(
&fidl_avrcp::NotificationEvent::TrackChanged, // Irrelevant Event ID.
Err(fidl_avrcp::TargetAvcError::RejectedAddressedPlayerChanged),
) {
fx_log_warn!("There was an error clearing the responder: {:?}", e);
}
}
fx_vlog!(tag: "avrcp-tg", 1, "After evicting cleared responders: {:?}", self.notifications);
}
/// Updates the active session with the new session specified by `id`.
/// Clear all outstanding notifications, if the active session has changed.
/// If the updated active session_id has changed, return old active id.
pub fn update_active_session_id(&mut self, id: Option<u64>) -> Option<u64> {
if self.active_session_id != id {
self.clear_notification_responders();
let previous_active_session_id = self.active_session_id.take();
self.active_session_id = id;
previous_active_session_id
} else {
None
}
}
/// If an active session is present, update any outstanding notifications by
/// checking if notification values have changed.
/// TODO(41703): Take pos_change_interval into account when updating TRACK_POS_CHANGED.
pub fn update_notification_responders(&mut self) {
let state = if let Some(state) = self.get_active_session() {
state.clone()
} else {
return;
};
self.notifications = self
.notifications
.drain()
.map(|(event_id, queue)| {
let curr_value = state.session_info().get_notification_value(&event_id);
(
event_id,
queue
.into_iter()
.filter_map(|notif_data| {
notif_data
.update_responder(&event_id, curr_value.clone())
.unwrap_or(None)
})
.collect(),
)
})
.collect();
fx_vlog!(tag: "avrcp-tg", 1, "After evicting updated responders: {:?}", self.notifications);
}
/// If the entry, `id` doesn't exist in the map, create a `MediaState` entry
/// when the control proxy.
/// Update the state with the delta.
/// Update any outstanding notification responders with the change in state.
pub fn create_or_update_session<F>(
&mut self,
discovery: DiscoveryProxy,
id: u64,
delta: SessionInfoDelta,
create_fn: F,
) -> Result<(), Error>
where
F: Fn(DiscoveryProxy, u64) -> Result<SessionControlProxy, Error>,
{
self.map
.entry(id)
.or_insert({
let session_proxy = create_fn(discovery, id)?;
MediaState::new(session_proxy)
})
.update_session_info(delta);
self.update_notification_responders();
Ok(())
}
/// Given a notification `event_id`:
/// 1) insert it into the notifications map.
/// 2) If the queue for `event_id` is full, evict the oldest responder and respond
/// with the current value.
/// 3) Update any outstanding notification responders with any changes in state.
pub fn register_notification(
&mut self,
event_id: fidl_avrcp::NotificationEvent,
current: Notification,
pos_change_interval: u32,
responder: fidl_avrcp::TargetHandlerWatchNotificationResponder,
) -> Result<(), fidl::Error> {
// If the `event_id` is not supported, reject the registration.
if !self.get_supported_notification_events().contains(&event_id) {
return responder.send(&mut Err(fidl_avrcp::TargetAvcError::RejectedInvalidParameter));
}
let data = NotificationData::new(current, pos_change_interval, responder);
let _evicted = self
.notifications
.entry(event_id)
.or_insert(BoundedQueue::new(MAX_NOTIFICATION_EVENT_QUEUE_SIZE))
.insert(data);
// Notify the evicted responder that the TG has removed it from the active list of responders.
// Reply with the current value of the notification.
// This will happen automatically, when `_evicted` is dropped.
// Update outstanding responders with potentially new session data.
self.update_notification_responders();
Ok(())
}
}
/// Creates a session control proxy from the Discovery protocol and connects to
/// the session specified by `id`.
fn create_session_control_proxy(
discovery: DiscoveryProxy,
id: u64,
) -> Result<SessionControlProxy, Error> {
let (session_proxy, session_request_stream) = create_proxy()?;
discovery.connect_to_session(id, session_request_stream)?;
Ok(session_proxy)
}
#[cfg(test)]
mod tests {
use super::*;
use crate::media::media_types::ValidPlayerApplicationSettings;
use fidl::encoding::Decodable as FidlDecodable;
use fidl::endpoints::create_proxy;
use fidl_fuchsia_media::{self as fidl_media_types};
use fidl_fuchsia_media
|
register_notification
|
identifier_name
|
|
media_sessions.rs
|
responder: fidl_avrcp::TargetHandlerWatchNotificationResponder,
) -> Result<(), fidl::Error> {
let mut write = self.inner.write();
write.register_notification(event_id, current, pos_change_interval, responder)
|
}
async fn watch_media_sessions(
discovery: DiscoveryProxy,
mut watcher_requests: SessionsWatcherRequestStream,
sessions_inner: Arc<RwLock<MediaSessionsInner>>,
) -> Result<(), anyhow::Error> {
while let Some(req) =
watcher_requests.try_next().await.expect("Failed to serve Watcher service")
{
match req {
SessionsWatcherRequest::SessionUpdated {
session_id: id,
session_info_delta: delta,
responder,
} => {
responder.send()?;
fx_vlog!(tag: "avrcp-tg", 1, "MediaSession update: id[{}], delta[{:?}]", id, delta);
// Since we are only listening to active sessions, update the currently
// active media session id every time a watcher event is triggered.
// This means AVRCP commands will be queried/set to the player that has most
// recently changed in status.
sessions_inner.write().update_active_session_id(Some(id.clone()));
// If this is our first time receiving updates from this MediaPlayer, create
// a session control proxy and connect to the session.
sessions_inner.write().create_or_update_session(
discovery.clone(),
id.clone(),
delta,
&create_session_control_proxy,
)?;
fx_vlog!(tag: "avrcp-tg", 1, "MediaSession state after update: state[{:?}]", sessions_inner);
}
SessionsWatcherRequest::SessionRemoved { session_id, responder } => {
// A media session with id `session_id` has been removed.
responder.send()?;
// Clear any outstanding notifications with a player changed response.
// Clear the currently active session, if it equals `session_id`.
// Clear entry in state map.
sessions_inner.write().clear_session(&session_id);
fx_vlog!(tag: "avrcp-tg", 1, "Removed session [{:?}] from state map: {:?}", session_id, sessions_inner);
}
}
}
Ok(())
}
}
#[derive(Debug)]
pub(crate) struct MediaSessionsInner {
// The currently active MediaSession id.
// If present, the `active_session_id` should be present in `map`.
active_session_id: Option<u64>,
// The map of ids to the respective media session.
map: HashMap<u64, MediaState>,
// The map of outstanding notifications.
notifications: HashMap<fidl_avrcp::NotificationEvent, BoundedQueue<NotificationData>>,
}
impl MediaSessionsInner {
pub fn new() -> Self {
Self { active_session_id: None, map: HashMap::new(), notifications: HashMap::new() }
}
pub fn get_active_session(&self) -> Option<MediaState> {
self.active_session_id.as_ref().and_then(|id| self.map.get(id).cloned())
}
/// TODO(41703): Add TRACK_POS_CHANGED when implemented.
pub fn get_supported_notification_events(&self) -> Vec<fidl_avrcp::NotificationEvent> {
vec![
fidl_avrcp::NotificationEvent::PlayerApplicationSettingChanged,
fidl_avrcp::NotificationEvent::PlaybackStatusChanged,
fidl_avrcp::NotificationEvent::TrackChanged,
]
}
/// Removes the MediaState specified by `id` from the map, should it exist.
/// If the session was currently active, clears `self.active_session_id`.
/// Returns the removed MediaState.
pub fn clear_session(&mut self, id: &u64) -> Option<MediaState> {
if Some(id) == self.active_session_id.as_ref() {
self.update_active_session_id(None);
}
self.map.remove(id)
}
/// Clears all outstanding notifications with an AddressedPlayerChanged error.
/// See `crate::types::update_responder` for more details.
pub fn clear_notification_responders(&mut self) {
for notif_data in self.notifications.drain().map(|(_, q)| q.into_iter()).flatten() {
if let Err(e) = notif_data.update_responder(
&fidl_avrcp::NotificationEvent::TrackChanged, // Irrelevant Event ID.
Err(fidl_avrcp::TargetAvcError::RejectedAddressedPlayerChanged),
) {
fx_log_warn!("There was an error clearing the responder: {:?}", e);
}
}
fx_vlog!(tag: "avrcp-tg", 1, "After evicting cleared responders: {:?}", self.notifications);
}
/// Updates the active session with the new session specified by `id`.
/// Clear all outstanding notifications, if the active session has changed.
/// If the updated active session_id has changed, return old active id.
pub fn update_active_session_id(&mut self, id: Option<u64>) -> Option<u64> {
if self.active_session_id != id {
self.clear_notification_responders();
let previous_active_session_id = self.active_session_id.take();
self.active_session_id = id;
previous_active_session_id
} else {
None
}
}
/// If an active session is present, update any outstanding notifications by
/// checking if notification values have changed.
/// TODO(41703): Take pos_change_interval into account when updating TRACK_POS_CHANGED.
pub fn update_notification_responders(&mut self) {
let state = if let Some(state) = self.get_active_session() {
state.clone()
} else {
return;
};
self.notifications = self
.notifications
.drain()
.map(|(event_id, queue)| {
let curr_value = state.session_info().get_notification_value(&event_id);
(
event_id,
queue
.into_iter()
.filter_map(|notif_data| {
notif_data
.update_responder(&event_id, curr_value.clone())
.unwrap_or(None)
})
.collect(),
)
})
.collect();
fx_vlog!(tag: "avrcp-tg", 1, "After evicting updated responders: {:?}", self.notifications);
}
/// If the entry, `id` doesn't exist in the map, create a `MediaState` entry
/// when the control proxy.
/// Update the state with the delta.
/// Update any outstanding notification responders with the change in state.
pub fn create_or_update_session<F>(
&mut self,
discovery: DiscoveryProxy,
id: u64,
delta: SessionInfoDelta,
create_fn: F,
) -> Result<(), Error>
where
F: Fn(DiscoveryProxy, u64) -> Result<SessionControlProxy, Error>,
{
self.map
.entry(id)
.or_insert({
let session_proxy = create_fn(discovery, id)?;
MediaState::new(session_proxy)
})
.update_session_info(delta);
self.update_notification_responders();
Ok(())
}
/// Given a notification `event_id`:
/// 1) insert it into the notifications map.
/// 2) If the queue for `event_id` is full, evict the oldest responder and respond
/// with the current value.
/// 3) Update any outstanding notification responders with any changes in state.
pub fn register_notification(
&mut self,
event_id: fidl_avrcp::NotificationEvent,
current: Notification,
pos_change_interval: u32,
responder: fidl_avrcp::TargetHandlerWatchNotificationResponder,
) -> Result<(), fidl::Error> {
// If the `event_id` is not supported, reject the registration.
if !self.get_supported_notification_events().contains(&event_id) {
return responder.send(&mut Err(fidl_avrcp::TargetAvcError::RejectedInvalidParameter));
}
let data = NotificationData::new(current, pos_change_interval, responder);
let _evicted = self
.notifications
.entry(event_id)
.or_insert(BoundedQueue::new(MAX_NOTIFICATION_EVENT_QUEUE_SIZE))
.insert(data);
// Notify the evicted responder that the TG has removed it from the active list of responders.
// Reply with the current value of the notification.
// This will happen automatically, when `_evicted` is dropped.
// Update outstanding responders with potentially new session data.
self.update_notification_responders();
Ok(())
}
}
/// Creates a session control proxy from the Discovery protocol and connects to
/// the session specified by `id`.
fn create_session_control_proxy(
discovery: DiscoveryProxy,
id: u64,
) -> Result<SessionControlProxy, Error> {
let (session_proxy, session_request_stream) = create_proxy()?;
discovery.connect_to_session(id, session_request_stream)?;
Ok(session_proxy)
}
#[cfg(test)]
mod tests {
use super::*;
use crate::media::media_types::ValidPlayerApplicationSettings;
use fidl::encoding::Decodable as FidlDecodable;
use fidl::endpoints::create_proxy;
use fidl_fuchsia_media::{self as fidl_media_types};
use fidl_fuchsia_media_sessions2::{self as fidl_media, SessionControlMarker};
use fuchsia_async as fasync;
fn create_metadata() -> fidl_media_types::Metadata {
|
random_line_split
|
|
media_sessions.rs
|
: fidl_avrcp::TargetHandlerWatchNotificationResponder,
) -> Result<(), fidl::Error> {
let mut write = self.inner.write();
write.register_notification(event_id, current, pos_change_interval, responder)
}
async fn watch_media_sessions(
discovery: DiscoveryProxy,
mut watcher_requests: SessionsWatcherRequestStream,
sessions_inner: Arc<RwLock<MediaSessionsInner>>,
) -> Result<(), anyhow::Error> {
while let Some(req) =
watcher_requests.try_next().await.expect("Failed to serve Watcher service")
{
match req {
SessionsWatcherRequest::SessionUpdated {
session_id: id,
session_info_delta: delta,
responder,
} => {
responder.send()?;
fx_vlog!(tag: "avrcp-tg", 1, "MediaSession update: id[{}], delta[{:?}]", id, delta);
// Since we are only listening to active sessions, update the currently
// active media session id every time a watcher event is triggered.
// This means AVRCP commands will be queried/set to the player that has most
// recently changed in status.
sessions_inner.write().update_active_session_id(Some(id.clone()));
// If this is our first time receiving updates from this MediaPlayer, create
// a session control proxy and connect to the session.
sessions_inner.write().create_or_update_session(
discovery.clone(),
id.clone(),
delta,
&create_session_control_proxy,
)?;
fx_vlog!(tag: "avrcp-tg", 1, "MediaSession state after update: state[{:?}]", sessions_inner);
}
SessionsWatcherRequest::SessionRemoved { session_id, responder } => {
// A media session with id `session_id` has been removed.
responder.send()?;
// Clear any outstanding notifications with a player changed response.
// Clear the currently active session, if it equals `session_id`.
// Clear entry in state map.
sessions_inner.write().clear_session(&session_id);
fx_vlog!(tag: "avrcp-tg", 1, "Removed session [{:?}] from state map: {:?}", session_id, sessions_inner);
}
}
}
Ok(())
}
}
#[derive(Debug)]
pub(crate) struct MediaSessionsInner {
// The currently active MediaSession id.
// If present, the `active_session_id` should be present in `map`.
active_session_id: Option<u64>,
// The map of ids to the respective media session.
map: HashMap<u64, MediaState>,
// The map of outstanding notifications.
notifications: HashMap<fidl_avrcp::NotificationEvent, BoundedQueue<NotificationData>>,
}
impl MediaSessionsInner {
pub fn new() -> Self {
Self { active_session_id: None, map: HashMap::new(), notifications: HashMap::new() }
}
pub fn get_active_session(&self) -> Option<MediaState> {
self.active_session_id.as_ref().and_then(|id| self.map.get(id).cloned())
}
/// TODO(41703): Add TRACK_POS_CHANGED when implemented.
pub fn get_supported_notification_events(&self) -> Vec<fidl_avrcp::NotificationEvent> {
vec![
fidl_avrcp::NotificationEvent::PlayerApplicationSettingChanged,
fidl_avrcp::NotificationEvent::PlaybackStatusChanged,
fidl_avrcp::NotificationEvent::TrackChanged,
]
}
/// Removes the MediaState specified by `id` from the map, should it exist.
/// If the session was currently active, clears `self.active_session_id`.
/// Returns the removed MediaState.
pub fn clear_session(&mut self, id: &u64) -> Option<MediaState> {
if Some(id) == self.active_session_id.as_ref() {
self.update_active_session_id(None);
}
self.map.remove(id)
}
/// Clears all outstanding notifications with an AddressedPlayerChanged error.
/// See `crate::types::update_responder` for more details.
pub fn clear_notification_responders(&mut self) {
for notif_data in self.notifications.drain().map(|(_, q)| q.into_iter()).flatten() {
if let Err(e) = notif_data.update_responder(
&fidl_avrcp::NotificationEvent::TrackChanged, // Irrelevant Event ID.
Err(fidl_avrcp::TargetAvcError::RejectedAddressedPlayerChanged),
) {
fx_log_warn!("There was an error clearing the responder: {:?}", e);
}
}
fx_vlog!(tag: "avrcp-tg", 1, "After evicting cleared responders: {:?}", self.notifications);
}
/// Updates the active session with the new session specified by `id`.
/// Clear all outstanding notifications, if the active session has changed.
/// If the updated active session_id has changed, return old active id.
pub fn update_active_session_id(&mut self, id: Option<u64>) -> Option<u64> {
if self.active_session_id != id {
self.clear_notification_responders();
let previous_active_session_id = self.active_session_id.take();
self.active_session_id = id;
previous_active_session_id
} else {
None
}
}
/// If an active session is present, update any outstanding notifications by
/// checking if notification values have changed.
/// TODO(41703): Take pos_change_interval into account when updating TRACK_POS_CHANGED.
pub fn update_notification_responders(&mut self) {
let state = if let Some(state) = self.get_active_session()
|
else {
return;
};
self.notifications = self
.notifications
.drain()
.map(|(event_id, queue)| {
let curr_value = state.session_info().get_notification_value(&event_id);
(
event_id,
queue
.into_iter()
.filter_map(|notif_data| {
notif_data
.update_responder(&event_id, curr_value.clone())
.unwrap_or(None)
})
.collect(),
)
})
.collect();
fx_vlog!(tag: "avrcp-tg", 1, "After evicting updated responders: {:?}", self.notifications);
}
/// If the entry, `id` doesn't exist in the map, create a `MediaState` entry
/// when the control proxy.
/// Update the state with the delta.
/// Update any outstanding notification responders with the change in state.
pub fn create_or_update_session<F>(
&mut self,
discovery: DiscoveryProxy,
id: u64,
delta: SessionInfoDelta,
create_fn: F,
) -> Result<(), Error>
where
F: Fn(DiscoveryProxy, u64) -> Result<SessionControlProxy, Error>,
{
self.map
.entry(id)
.or_insert({
let session_proxy = create_fn(discovery, id)?;
MediaState::new(session_proxy)
})
.update_session_info(delta);
self.update_notification_responders();
Ok(())
}
/// Given a notification `event_id`:
/// 1) insert it into the notifications map.
/// 2) If the queue for `event_id` is full, evict the oldest responder and respond
/// with the current value.
/// 3) Update any outstanding notification responders with any changes in state.
pub fn register_notification(
&mut self,
event_id: fidl_avrcp::NotificationEvent,
current: Notification,
pos_change_interval: u32,
responder: fidl_avrcp::TargetHandlerWatchNotificationResponder,
) -> Result<(), fidl::Error> {
// If the `event_id` is not supported, reject the registration.
if !self.get_supported_notification_events().contains(&event_id) {
return responder.send(&mut Err(fidl_avrcp::TargetAvcError::RejectedInvalidParameter));
}
let data = NotificationData::new(current, pos_change_interval, responder);
let _evicted = self
.notifications
.entry(event_id)
.or_insert(BoundedQueue::new(MAX_NOTIFICATION_EVENT_QUEUE_SIZE))
.insert(data);
// Notify the evicted responder that the TG has removed it from the active list of responders.
// Reply with the current value of the notification.
// This will happen automatically, when `_evicted` is dropped.
// Update outstanding responders with potentially new session data.
self.update_notification_responders();
Ok(())
}
}
/// Creates a session control proxy from the Discovery protocol and connects to
/// the session specified by `id`.
fn create_session_control_proxy(
discovery: DiscoveryProxy,
id: u64,
) -> Result<SessionControlProxy, Error> {
let (session_proxy, session_request_stream) = create_proxy()?;
discovery.connect_to_session(id, session_request_stream)?;
Ok(session_proxy)
}
#[cfg(test)]
mod tests {
use super::*;
use crate::media::media_types::ValidPlayerApplicationSettings;
use fidl::encoding::Decodable as FidlDecodable;
use fidl::endpoints::create_proxy;
use fidl_fuchsia_media::{self as fidl_media_types};
use fidl_fuchsia_media_sessions2::{self as fidl_media, SessionControlMarker};
use fuchsia_async as fasync;
fn create_metadata() -> fidl_media_types::Metadata {
|
{
state.clone()
}
|
conditional_block
|
media_sessions.rs
|
fn create_or_update_session<F>(
&mut self,
discovery: DiscoveryProxy,
id: u64,
delta: SessionInfoDelta,
create_fn: F,
) -> Result<(), Error>
where
F: Fn(DiscoveryProxy, u64) -> Result<SessionControlProxy, Error>,
{
self.map
.entry(id)
.or_insert({
let session_proxy = create_fn(discovery, id)?;
MediaState::new(session_proxy)
})
.update_session_info(delta);
self.update_notification_responders();
Ok(())
}
/// Given a notification `event_id`:
/// 1) insert it into the notifications map.
/// 2) If the queue for `event_id` is full, evict the oldest responder and respond
/// with the current value.
/// 3) Update any outstanding notification responders with any changes in state.
pub fn register_notification(
&mut self,
event_id: fidl_avrcp::NotificationEvent,
current: Notification,
pos_change_interval: u32,
responder: fidl_avrcp::TargetHandlerWatchNotificationResponder,
) -> Result<(), fidl::Error> {
// If the `event_id` is not supported, reject the registration.
if !self.get_supported_notification_events().contains(&event_id) {
return responder.send(&mut Err(fidl_avrcp::TargetAvcError::RejectedInvalidParameter));
}
let data = NotificationData::new(current, pos_change_interval, responder);
let _evicted = self
.notifications
.entry(event_id)
.or_insert(BoundedQueue::new(MAX_NOTIFICATION_EVENT_QUEUE_SIZE))
.insert(data);
// Notify the evicted responder that the TG has removed it from the active list of responders.
// Reply with the current value of the notification.
// This will happen automatically, when `_evicted` is dropped.
// Update outstanding responders with potentially new session data.
self.update_notification_responders();
Ok(())
}
}
/// Creates a session control proxy from the Discovery protocol and connects to
/// the session specified by `id`.
fn create_session_control_proxy(
discovery: DiscoveryProxy,
id: u64,
) -> Result<SessionControlProxy, Error> {
let (session_proxy, session_request_stream) = create_proxy()?;
discovery.connect_to_session(id, session_request_stream)?;
Ok(session_proxy)
}
#[cfg(test)]
mod tests {
use super::*;
use crate::media::media_types::ValidPlayerApplicationSettings;
use fidl::encoding::Decodable as FidlDecodable;
use fidl::endpoints::create_proxy;
use fidl_fuchsia_media::{self as fidl_media_types};
use fidl_fuchsia_media_sessions2::{self as fidl_media, SessionControlMarker};
use fuchsia_async as fasync;
fn create_metadata() -> fidl_media_types::Metadata {
let mut metadata = fidl_media_types::Metadata::new_empty();
let mut property1 = fidl_media_types::Property::new_empty();
property1.label = fidl_media_types::METADATA_LABEL_TITLE.to_string();
let sample_title = "This is a sample title".to_string();
property1.value = sample_title.clone();
metadata.properties = vec![property1];
metadata
}
fn create_player_status() -> fidl_media::PlayerStatus {
let mut player_status = fidl_media::PlayerStatus::new_empty();
let mut timeline_fn = fidl_media_types::TimelineFunction::new_empty();
// Playback started at beginning of media.
timeline_fn.subject_time = 0;
// Monotonic clock time at beginning of media (nanos).
timeline_fn.reference_time = 500000000;
// Playback rate = 1, normal playback.
timeline_fn.subject_delta = 1;
timeline_fn.reference_delta = 1;
player_status.player_state = Some(fidl_media::PlayerState::Playing);
player_status.duration = Some(123456789);
player_status.shuffle_on = Some(true);
player_status.timeline_function = Some(timeline_fn);
player_status
}
#[test]
/// Test that retrieving a notification value correctly gets the current state.
/// 1) Query with an unsupported `event_id`.
/// 2) Query with a supported Event ID, with default state.
/// 3) Query with all supported Event IDs.
fn test_get_notification_value() {
let exec = fasync::Executor::new_with_fake_time().expect("executor should build");
exec.set_fake_time(fasync::Time::from_nanos(555555555));
let media_sessions = MediaSessionsInner::new();
let (session_proxy, _) =
create_proxy::<SessionControlMarker>().expect("Couldn't create fidl proxy.");
let mut media_state = MediaState::new(session_proxy);
// 1. Unsupported ID.
let unsupported_id = fidl_avrcp::NotificationEvent::BattStatusChanged;
let res = media_state.session_info().get_notification_value(&unsupported_id);
assert!(res.is_err());
// 2. Supported ID, `media_state` contains default values.
let res = media_state
.session_info()
.get_notification_value(&fidl_avrcp::NotificationEvent::PlaybackStatusChanged);
assert_eq!(res.expect("Should be ok").status, Some(fidl_avrcp::PlaybackStatus::Stopped));
let res = media_state
.session_info()
.get_notification_value(&fidl_avrcp::NotificationEvent::TrackChanged);
assert_eq!(res.expect("Should be ok").track_id, Some(std::u64::MAX));
// 3.
exec.set_fake_time(fasync::Time::from_nanos(555555555));
let mut info = fidl_media::SessionInfoDelta::new_empty();
info.metadata = Some(create_metadata());
info.player_status = Some(create_player_status());
media_state.update_session_info(info);
let expected_play_status = fidl_avrcp::PlaybackStatus::Playing;
let expected_pas = ValidPlayerApplicationSettings::new(
None,
Some(fidl_avrcp::RepeatStatusMode::Off),
Some(fidl_avrcp::ShuffleMode::AllTrackShuffle),
None,
);
// Supported = PAS, Playback, Track, TrackPos
let valid_events = media_sessions.get_supported_notification_events();
let expected_values: Vec<Notification> = vec![
Notification::new(None, None, None, Some(expected_pas), None, None, None),
Notification::new(Some(expected_play_status), None, None, None, None, None, None),
Notification::new(None, Some(0), None, None, None, None, None),
Notification::new(None, None, Some(55), None, None, None, None),
];
for (event_id, expected_v) in valid_events.iter().zip(expected_values.iter()) {
assert_eq!(
media_state.session_info().get_notification_value(&event_id).expect("Should work"),
expected_v.clone()
);
}
}
#[test]
// TODO(42623): Implement this test as part of integration test work.
/// Tests registering a notification works as expected.
/// 1. Normal case, insertion of a supported notification.
/// 2. Normal case, insertion of a supported notification, with eviction.
/// 3. Normal case, insertion of a supported notification, with change in state,
/// so that `update_notification_responders()` correctly updates inserted notif.
/// 3. Error case, insertion of an unsupported notification.
fn test_register_notification() {}
#[test]
// TODO(42623): Implement this test as part of integration test work.
/// Tests insertion/updating of a new MediaSession into the state map.
/// 1. Test branch where MediaSession already exists, so this is just an update.
/// 2. Test branch where MediaSession doesn't exist, creates a new session and updates it.
fn test_create_or_update_session() {}
#[test]
// TODO(42623): Implement this test as part of integration test work.
/// Tests that updating any outstanding responders behaves as expected.
fn test_update_notification_responders() {}
#[test]
// TODO(42623): Implement this test as part of integration test work.
/// Tests updating the active session_id correctly changes the currently
/// playing active media session, as well as clears any outstanding notifications
/// if a new MediaSession becomes the active session.
fn test_update_active_session_id() {}
#[test]
// TODO(42623): Implement this test as part of integration test work.
/// Tests sending PlayerChanged response to all outstanding responders behaves
/// as expected, and removes all entries in the Notifications map.
fn test_clear_notification_responders() {}
#[test]
// TODO(42623): Implement this test as part of integration test work.
/// Tests removing a session from the map.
/// Tests clear_session clears all notifications if the MediaSession is the currently
/// active session.
fn test_clear_session() {}
#[test]
// TODO(42623): Implement this test as part of integration test work.
/// Tests clearing the active session_id.
fn test_clear_active_session_id()
|
{}
|
identifier_body
|
|
zhtta.rs
|
Server {
let (notify_tx, notify_rx) = channel();
let www_dir_path = Path::new(www_dir);
os::change_dir(&www_dir_path);
WebServer {
ip:ip,
port: port,
www_dir_path: www_dir_path,
request_queue_arc: Arc::new(Mutex::new(Vec::new())),
stream_map_arc: Arc::new(Mutex::new(HashMap::new())),
visitor_count:Arc::new(Mutex::new(0)),
thread_sema: Arc::new(Semaphore::new(5)),
cache: Arc::new(RwLock::new(HashMap::new())),
cache_len: Arc::new(Mutex::new(0)),
notify_rx: notify_rx,
notify_tx: notify_tx,
}
}
fn run(&mut self) {
self.listen();
self.dequeue_static_file_request();
}
fn listen(&mut self) {
let addr = String::from_str(format!("{}:{}", self.ip, self.port).as_slice());
let www_dir_path_str = self.www_dir_path.clone();
let request_queue_arc = self.request_queue_arc.clone();
let notify_tx = self.notify_tx.clone();
let stream_map_arc = self.stream_map_arc.clone();
let visitor_count=self.visitor_count.clone();
Thread::spawn(move|| {
let listener = std::old_io::TcpListener::bind(addr.as_slice()).unwrap();
let mut acceptor = listener.listen().unwrap();
println!("{} listening on {} (serving from: {}).",
SERVER_NAME, addr, www_dir_path_str.as_str().unwrap());
for stream_raw in acceptor.incoming() { //for each stream/connection
let (queue_tx, queue_rx) = channel();//build up a channel for sub thread
queue_tx.send(request_queue_arc.clone());//send the request queue to queue and receive it inside the son thread
let notify_chan = notify_tx.clone();//notify_chan is a global channel for webserver
let stream_map_arc = stream_map_arc.clone();
let visitor_count=visitor_count.clone();
println!("outer thread:{}",*visitor_count.lock().unwrap());
// Spawn a task to handle the connection.
Thread::spawn(move|| {
let mut vc= visitor_count.lock().unwrap(); // Done
*vc+=1;
println!("inner thread:{}",*vc);
let request_queue_arc = queue_rx.recv().unwrap();//
let mut stream = match stream_raw {
Ok(s) => {s}
Err(e) => { panic!("Error getting the listener stream! {}", e) }
};
let peer_name = WebServer::get_peer_name(&mut stream);
debug!("Got connection from {}", peer_name);
let mut buf: [u8;500] = [0;500];
stream.read(&mut buf);
let request_str = match str::from_utf8(&buf){
Ok(s) => s,
Err(e)=> panic!("Error reading from the listener stream! {}", e),
};
debug!("Request:\n{}", request_str);
//WebServer::enqueue_static_file_request(stream, &path_obj, stream_map_arc, request_queue_arc, notify_chan);
let req_group: Vec<&str> = request_str.splitn(3, ' ').collect();
if req_group.len() > 2 {
let path_str = ".".to_string() + req_group[1];
let mut path_obj = os::getcwd().unwrap();
path_obj.push(path_str.clone());
let ext_str = match path_obj.extension_str() {
Some(e) => e,
None => "",
};
debug!("Requested path: [{}]", path_obj.as_str().expect("error"));
debug!("Requested path: [{}]", path_str);
if path_str.as_slice().eq("./") {
debug!("===== Counter Page request =====");
WebServer::respond_with_counter_page(stream,*vc);
debug!("=====Terminated connection from [{}].=====", peer_name);
} else if !path_obj.exists() || path_obj.is_dir() {
debug!("===== Error page request =====");
WebServer::respond_with_error_page(stream, &path_obj);
debug!("=====Terminated connection from [{}].=====", peer_name);
} else if ext_str == "shtml" { // Dynamic web pages.
debug!("===== Dynamic Page request =====");
WebServer::respond_with_dynamic_page(stream, &path_obj);
debug!("=====Terminated connection from [{}].=====", peer_name);
} else {
debug!("===== Static Page request =====");
if std::fs::metadata(&path_obj).unwrap().len()> CacheLowerBounder{
WebServer::enqueue_static_file_request(stream, &path_obj, stream_map_arc, request_queue_arc, notify_chan);}
else{
debug!("small file, do it without enqueue!");
let mut file_reader = File::open(&path_obj).unwrap();
stream.write(HTTP_OK.as_bytes());
let mut reader = BufferedReader::new(file_reader);
for line in reader.lines().filter_map(|result| result.ok()) {
let _ = stream.write_all(line.as_bytes());
}
}
}
}
});
}
});
}
fn respond_with_error_page(stream: std::old_io::net::tcp::TcpStream, path: &Path) {
let mut stream = stream;
let msg: String= format!("Cannot open: {}", path.as_str().expect("invalid path"));
stream.write(HTTP_BAD.as_bytes());
stream.write(msg.as_bytes());
}
// Done
fn respond_with_counter_page(stream: std::old_io::net::tcp::TcpStream,visitor_count:usize)
|
// TODO: Streaming file.
// TODO: Application-layer file caching.
fn respond_with_static_file(stream: std::old_io::net::tcp::TcpStream, path: &Path,cache : Arc<RwLock<HashMap<Path,(String,Mutex<usize>,u64)>>>,cache_len :Arc<Mutex<usize>>) {
let mut stream = stream;
let mut cache_str=String::new();
let mut counter=0;
let mut local_cache=cache.clone();
let mut is_modified=false;
{
let metadata=std::fs::metadata(path).unwrap();
let modify_time=metadata.modified();
let read_hash=local_cache.read().unwrap();
if read_hash.contains_key(path)
{
let tuple=read_hash.get(path).unwrap();
let time = tuple.2;
is_modified= (time !=modify_time);
}
}
if is_modified{
debug!("It is modified, delete from cache!");
let mut write_hash=local_cache.write().unwrap();
write_hash.remove(path);
}
//let mut local_cache=cache.clone();
{
debug!("updating counter...");
let read_hash=local_cache.read().unwrap();
for (key,value) in read_hash.iter(){
let mut counter=value.1.lock().unwrap();
*counter+=1;
}
if read_hash.contains_key(path){
debug!("Reading cached file:{}",path.display());
let mut pair=read_hash.get(path).unwrap();
{
*pair.1.lock().unwrap()=0;
}
stream.write(HTTP_OK.as_bytes());
let _ = stream.write_all(pair.0.as_bytes());
return;
}
else{
debug!("reading from disk!");
let mut file_reader = File::open(path).unwrap();
stream.write(HTTP_OK.as_bytes());
/*let mut buf:[u8;1048576]=[0;1048576];
loop{
let size=match file_reader.read(&mut buf){
Err(why) =>0,
Ok(size) =>size,
};
let str_buf=String::from_utf8_lossy(&buf[0..size]);
let _=stream.write(str_buf.as_bytes());
cache_str.push_str(str_buf.as_slice());
debug!("read siez:{}",size);
if(size<1048576){
break;
}
}*/
//better solution
let mut reader = BufferedReader::new(file_reader);
for line in reader.lines().filter_map(|result| result.ok()) {
let _ = stream.write_all(line.as_bytes());
cache_str.push_str(line.as_slice());
}
}
}
let file_size=std::fs::metadata(path).unwrap().len();
if(file_size<CacheLowerBounder){
debug!("file size:{}, don't cache this file(too small)",file_size);
return;
}
else if (file_size>CacheUpperBounder){
debug!("file size:{}, don't cache this file(too large)",file_size);
return;
}
debug!("updating cache....");
{
let mut write_hash=local_cache.write().unwrap();
let time=std::fs::metadata(path).unwrap().modified();
write_hash.insert(path.clone(),(cache_str,Mutex::new(0),time));
}
*cache_len.lock().unwrap()+=1;
{
let mut write
|
{
let mut stream = stream;
let response: String =
format!("{}{}<h1>Greetings, Krusty!</h1><h2>Visitor count: {}</h2></body></html>\r\n",
HTTP_OK, COUNTER_STYLE,
unsafe { visitor_count } );
debug!("Responding to counter request");
stream.write(response.as_bytes());
}
|
identifier_body
|
zhtta.rs
|
Server {
let (notify_tx, notify_rx) = channel();
let www_dir_path = Path::new(www_dir);
os::change_dir(&www_dir_path);
WebServer {
ip:ip,
port: port,
www_dir_path: www_dir_path,
request_queue_arc: Arc::new(Mutex::new(Vec::new())),
stream_map_arc: Arc::new(Mutex::new(HashMap::new())),
visitor_count:Arc::new(Mutex::new(0)),
thread_sema: Arc::new(Semaphore::new(5)),
cache: Arc::new(RwLock::new(HashMap::new())),
cache_len: Arc::new(Mutex::new(0)),
notify_rx: notify_rx,
notify_tx: notify_tx,
}
}
fn run(&mut self) {
self.listen();
self.dequeue_static_file_request();
}
fn listen(&mut self) {
let addr = String::from_str(format!("{}:{}", self.ip, self.port).as_slice());
let www_dir_path_str = self.www_dir_path.clone();
let request_queue_arc = self.request_queue_arc.clone();
let notify_tx = self.notify_tx.clone();
let stream_map_arc = self.stream_map_arc.clone();
let visitor_count=self.visitor_count.clone();
Thread::spawn(move|| {
let listener = std::old_io::TcpListener::bind(addr.as_slice()).unwrap();
let mut acceptor = listener.listen().unwrap();
println!("{} listening on {} (serving from: {}).",
SERVER_NAME, addr, www_dir_path_str.as_str().unwrap());
for stream_raw in acceptor.incoming() { //for each stream/connection
let (queue_tx, queue_rx) = channel();//build up a channel for sub thread
queue_tx.send(request_queue_arc.clone());//send the request queue to queue and receive it inside the son thread
let notify_chan = notify_tx.clone();//notify_chan is a global channel for webserver
let stream_map_arc = stream_map_arc.clone();
let visitor_count=visitor_count.clone();
println!("outer thread:{}",*visitor_count.lock().unwrap());
// Spawn a task to handle the connection.
Thread::spawn(move|| {
let mut vc= visitor_count.lock().unwrap(); // Done
*vc+=1;
println!("inner thread:{}",*vc);
let request_queue_arc = queue_rx.recv().unwrap();//
let mut stream = match stream_raw {
Ok(s) => {s}
Err(e) => { panic!("Error getting the listener stream! {}", e) }
};
let peer_name = WebServer::get_peer_name(&mut stream);
debug!("Got connection from {}", peer_name);
let mut buf: [u8;500] = [0;500];
stream.read(&mut buf);
let request_str = match str::from_utf8(&buf){
Ok(s) => s,
Err(e)=> panic!("Error reading from the listener stream! {}", e),
};
debug!("Request:\n{}", request_str);
//WebServer::enqueue_static_file_request(stream, &path_obj, stream_map_arc, request_queue_arc, notify_chan);
let req_group: Vec<&str> = request_str.splitn(3, ' ').collect();
if req_group.len() > 2 {
let path_str = ".".to_string() + req_group[1];
let mut path_obj = os::getcwd().unwrap();
path_obj.push(path_str.clone());
let ext_str = match path_obj.extension_str() {
Some(e) => e,
None => "",
};
debug!("Requested path: [{}]", path_obj.as_str().expect("error"));
debug!("Requested path: [{}]", path_str);
if path_str.as_slice().eq("./") {
debug!("===== Counter Page request =====");
WebServer::respond_with_counter_page(stream,*vc);
debug!("=====Terminated connection from [{}].=====", peer_name);
} else if !path_obj.exists() || path_obj.is_dir() {
debug!("===== Error page request =====");
WebServer::respond_with_error_page(stream, &path_obj);
debug!("=====Terminated connection from [{}].=====", peer_name);
} else if ext_str == "shtml" { // Dynamic web pages.
debug!("===== Dynamic Page request =====");
WebServer::respond_with_dynamic_page(stream, &path_obj);
debug!("=====Terminated connection from [{}].=====", peer_name);
} else {
debug!("===== Static Page request =====");
if std::fs::metadata(&path_obj).unwrap().len()> CacheLowerBounder{
WebServer::enqueue_static_file_request(stream, &path_obj, stream_map_arc, request_queue_arc, notify_chan);}
else{
debug!("small file, do it without enqueue!");
let mut file_reader = File::open(&path_obj).unwrap();
stream.write(HTTP_OK.as_bytes());
let mut reader = BufferedReader::new(file_reader);
for line in reader.lines().filter_map(|result| result.ok()) {
let _ = stream.write_all(line.as_bytes());
}
|
});
}
});
}
fn respond_with_error_page(stream: std::old_io::net::tcp::TcpStream, path: &Path) {
let mut stream = stream;
let msg: String= format!("Cannot open: {}", path.as_str().expect("invalid path"));
stream.write(HTTP_BAD.as_bytes());
stream.write(msg.as_bytes());
}
// Done
fn respond_with_counter_page(stream: std::old_io::net::tcp::TcpStream,visitor_count:usize) {
let mut stream = stream;
let response: String =
format!("{}{}<h1>Greetings, Krusty!</h1><h2>Visitor count: {}</h2></body></html>\r\n",
HTTP_OK, COUNTER_STYLE,
unsafe { visitor_count } );
debug!("Responding to counter request");
stream.write(response.as_bytes());
}
// TODO: Streaming file.
// TODO: Application-layer file caching.
fn respond_with_static_file(stream: std::old_io::net::tcp::TcpStream, path: &Path,cache : Arc<RwLock<HashMap<Path,(String,Mutex<usize>,u64)>>>,cache_len :Arc<Mutex<usize>>) {
let mut stream = stream;
let mut cache_str=String::new();
let mut counter=0;
let mut local_cache=cache.clone();
let mut is_modified=false;
{
let metadata=std::fs::metadata(path).unwrap();
let modify_time=metadata.modified();
let read_hash=local_cache.read().unwrap();
if read_hash.contains_key(path)
{
let tuple=read_hash.get(path).unwrap();
let time = tuple.2;
is_modified= (time !=modify_time);
}
}
if is_modified{
debug!("It is modified, delete from cache!");
let mut write_hash=local_cache.write().unwrap();
write_hash.remove(path);
}
//let mut local_cache=cache.clone();
{
debug!("updating counter...");
let read_hash=local_cache.read().unwrap();
for (key,value) in read_hash.iter(){
let mut counter=value.1.lock().unwrap();
*counter+=1;
}
if read_hash.contains_key(path){
debug!("Reading cached file:{}",path.display());
let mut pair=read_hash.get(path).unwrap();
{
*pair.1.lock().unwrap()=0;
}
stream.write(HTTP_OK.as_bytes());
let _ = stream.write_all(pair.0.as_bytes());
return;
}
else{
debug!("reading from disk!");
let mut file_reader = File::open(path).unwrap();
stream.write(HTTP_OK.as_bytes());
/*let mut buf:[u8;1048576]=[0;1048576];
loop{
let size=match file_reader.read(&mut buf){
Err(why) =>0,
Ok(size) =>size,
};
let str_buf=String::from_utf8_lossy(&buf[0..size]);
let _=stream.write(str_buf.as_bytes());
cache_str.push_str(str_buf.as_slice());
debug!("read siez:{}",size);
if(size<1048576){
break;
}
}*/
//better solution
let mut reader = BufferedReader::new(file_reader);
for line in reader.lines().filter_map(|result| result.ok()) {
let _ = stream.write_all(line.as_bytes());
cache_str.push_str(line.as_slice());
}
}
}
let file_size=std::fs::metadata(path).unwrap().len();
if(file_size<CacheLowerBounder){
debug!("file size:{}, don't cache this file(too small)",file_size);
return;
}
else if (file_size>CacheUpperBounder){
debug!("file size:{}, don't cache this file(too large)",file_size);
return;
}
debug!("updating cache....");
{
let mut write_hash=local_cache.write().unwrap();
let time=std::fs::metadata(path).unwrap().modified();
write_hash.insert(path.clone(),(cache_str,Mutex::new(0),time));
}
*cache_len.lock().unwrap()+=1;
{
let mut write
|
}
}
}
|
random_line_split
|
zhtta.rs
|
());
return;
}
else{
debug!("reading from disk!");
let mut file_reader = File::open(path).unwrap();
stream.write(HTTP_OK.as_bytes());
/*let mut buf:[u8;1048576]=[0;1048576];
loop{
let size=match file_reader.read(&mut buf){
Err(why) =>0,
Ok(size) =>size,
};
let str_buf=String::from_utf8_lossy(&buf[0..size]);
let _=stream.write(str_buf.as_bytes());
cache_str.push_str(str_buf.as_slice());
debug!("read siez:{}",size);
if(size<1048576){
break;
}
}*/
//better solution
let mut reader = BufferedReader::new(file_reader);
for line in reader.lines().filter_map(|result| result.ok()) {
let _ = stream.write_all(line.as_bytes());
cache_str.push_str(line.as_slice());
}
}
}
let file_size=std::fs::metadata(path).unwrap().len();
if(file_size<CacheLowerBounder){
debug!("file size:{}, don't cache this file(too small)",file_size);
return;
}
else if (file_size>CacheUpperBounder){
debug!("file size:{}, don't cache this file(too large)",file_size);
return;
}
debug!("updating cache....");
{
let mut write_hash=local_cache.write().unwrap();
let time=std::fs::metadata(path).unwrap().modified();
write_hash.insert(path.clone(),(cache_str,Mutex::new(0),time));
}
*cache_len.lock().unwrap()+=1;
{
let mut write_hash=local_cache.write().unwrap();
let mut to_be_replaced : Path=Path::new("./");
if *cache_len.lock().unwrap()>5{
let mut max_num=0;
//let read_hash=local_cache.write().unwrap();
let mut tmp: &Path=&Path::new("./");
for (key,value) in write_hash.iter(){
let num=*value.1.lock().unwrap();
if num>=max_num{
max_num=num;
tmp=key;
}
}
to_be_replaced=tmp.clone();
}else
{
return;
}
debug!("least recently used is:{}",to_be_replaced.display());
write_hash.remove(&to_be_replaced);
}
}
// TODO: Server-side gashing.
fn respond_with_dynamic_page(stream: std::old_io::net::tcp::TcpStream, path: &Path) {
//scan the shtml to find the ssl tag, extract the command line redirect the command line to
//our file and serve it
let mut stream = stream;
let mut file =match File::open(path)
{
Err(why) => panic!("Coundn't open file:{}",why),
Ok(file) => file,
};
let mut s= String::new();
s=match file.read_to_string(){
Err(why) => panic!("Couldn't read file:{}",why),
Ok(content) => content,
};
let str_vec: Vec<&str>=s.split_str("<!--#exec cmd=\"").collect();
let cmd_mix:Vec<&str>=str_vec[1].split_str("\" -->").collect();
let cmd=cmd_mix[0].to_string();
let mut args =Vec::new();
args.push("-c");
args.push(&cmd);
let mut gash_command= match Command::new("../main").args(&args).stdout(Stdio::capture()).spawn(){
Err(why) => panic!("Couldn't do command {}",why),
Ok(cmd) => cmd,
};
let mut stdout=gash_command.stdout.unwrap();
let mut output=String::new();
stdout.read_to_string(&mut output);
stream.write(HTTP_OK.as_bytes());
stream.write(str_vec[0].as_bytes());
stream.write(output.as_bytes());
stream.write(cmd_mix[1].as_bytes());
//WebServer::respond_with_static_file(stream, path);
}
fn get_file_size(path: &Path) ->u64 {
let metadata=std::fs::metadata(path).unwrap();
return metadata.len()
}
// TODO: Smarter Scheduling.
fn enqueue_static_file_request(stream: std::old_io::net::tcp::TcpStream, path_obj: &Path, stream_map_arc: Arc<Mutex<HashMap<String, std::old_io::net::tcp::TcpStream>>>, req_queue_arc: Arc<Mutex<Vec<HTTP_Request>>>, notify_chan: Sender<()>) {
// Save stream in hashmap for later response.
let mut stream = stream;
let peer_name = WebServer::get_peer_name(&mut stream);
let (stream_tx, stream_rx) = channel();
stream_tx.send(stream);
let stream = match stream_rx.recv(){
Ok(s) => s,
Err(e) => panic!("There was an error while receiving from the stream channel! {}", e),
};
let local_stream_map = stream_map_arc.clone();
{ // make sure we request the lock inside a block with different scope, so that we give it back at the end of that block
let mut local_stream_map = local_stream_map.lock().unwrap();
local_stream_map.insert(peer_name.clone(), stream);
}
// Enqueue the HTTP request.
// TOCHECK: it was ~path_obj.clone(), make sure in which order are ~ and clone() executed
let req = HTTP_Request { peer_name: peer_name.clone(), path: path_obj.clone() };
let (req_tx, req_rx) = channel();
req_tx.send(req);
debug!("Waiting for queue mutex lock.");
let local_req_queue = req_queue_arc.clone();
{ // make sure we request the lock inside a block with different scope, so that we give it back at the end of that block
let mut local_req_queue = local_req_queue.lock().unwrap();
let req: HTTP_Request = match req_rx.recv(){
Ok(s) => s,
Err(e) => panic!("There was an error while receiving from the request channel! {}", e),
};
//REORDER the queue in order of the request size
local_req_queue.push(req);
local_req_queue.sort_by(|a, b| WebServer::get_file_size(&a.path).cmp(&WebServer::get_file_size(&b.path)));
debug!("A new request enqueued, now the length of queue is {}.", local_req_queue.len());
notify_chan.send(()); // Send incoming notification to responder task.
}
}
// TODO: Smarter Scheduling.
fn dequeue_static_file_request(&mut self) {
let req_queue_get = self.request_queue_arc.clone();
let stream_map_get = self.stream_map_arc.clone();
// Receiver<> cannot be sent to another task. So we have to make this task as the main task that can access self.notify_rx.
let (request_tx, request_rx) = channel();
loop {
self.notify_rx.recv(); // waiting for new request enqueued. This is where the infinity loop locate
{ // make sure we request the lock inside a block with different scope, so that we give it back at the end of that block
let mut req_queue = req_queue_get.lock().unwrap();
if req_queue.len() > 0 {
self.thread_sema.acquire();
let req = req_queue.remove(0);
debug!("A new request dequeued, now the length of queue is {}.", req_queue.len());
request_tx.send(req);
}
}
let request = match request_rx.recv(){
Ok(s) => s,
Err(e) => panic!("There was an error while receiving from the request channel! {}", e),
};
// Get stream from hashmap.
let (stream_tx, stream_rx) = channel();
{ // make sure we request the lock inside a block with different scope, so that we give it back at the end of that block
let mut stream_map = stream_map_get.lock().unwrap();
let stream = stream_map.remove(&request.peer_name).expect("no option tcpstream");
stream_tx.send(stream);
}
// TODO: Spawning more tasks to respond the dequeued requests concurrently. You may need a semophore to control the concurrency.
let stream = match stream_rx.recv(){
Ok(s) => s,
Err(e) => panic!("There was an error while receiving from the stream channel! {}", e),
};
let sema=self.thread_sema.clone();
let cache_len=self.cache_len.clone();
let mut cache=self.cache.clone();
Thread::spawn(move||{
debug!("Processing....");
WebServer::respond_with_static_file(stream, &request.path,cache,cache_len);
debug!("finishing request for{}",request.path.display());
debug!("=====Terminated connection from [{}].=====", request.peer_name);
sema.release();
});
}
}
fn get_peer_name(stream: &mut std::old_io::net::tcp::TcpStream) -> String{
match stream.peer_name(){
Ok(s) => {format!("{}:{}", s.ip, s.port)}
Err(e) => {panic!("Error while getting the stream name! {}", e)}
}
}
}
fn get_args() -> (String, usize, String) {
fn pr
|
int_usage(p
|
identifier_name
|
|
zhtta.rs
|
debug!("updating cache....");
{
let mut write_hash=local_cache.write().unwrap();
let time=std::fs::metadata(path).unwrap().modified();
write_hash.insert(path.clone(),(cache_str,Mutex::new(0),time));
}
*cache_len.lock().unwrap()+=1;
{
let mut write_hash=local_cache.write().unwrap();
let mut to_be_replaced : Path=Path::new("./");
if *cache_len.lock().unwrap()>5{
let mut max_num=0;
//let read_hash=local_cache.write().unwrap();
let mut tmp: &Path=&Path::new("./");
for (key,value) in write_hash.iter(){
let num=*value.1.lock().unwrap();
if num>=max_num{
max_num=num;
tmp=key;
}
}
to_be_replaced=tmp.clone();
}else
{
return;
}
debug!("least recently used is:{}",to_be_replaced.display());
write_hash.remove(&to_be_replaced);
}
}
// TODO: Server-side gashing.
fn respond_with_dynamic_page(stream: std::old_io::net::tcp::TcpStream, path: &Path) {
//scan the shtml to find the ssl tag, extract the command line redirect the command line to
//our file and serve it
let mut stream = stream;
let mut file =match File::open(path)
{
Err(why) => panic!("Coundn't open file:{}",why),
Ok(file) => file,
};
let mut s= String::new();
s=match file.read_to_string(){
Err(why) => panic!("Couldn't read file:{}",why),
Ok(content) => content,
};
let str_vec: Vec<&str>=s.split_str("<!--#exec cmd=\"").collect();
let cmd_mix:Vec<&str>=str_vec[1].split_str("\" -->").collect();
let cmd=cmd_mix[0].to_string();
let mut args =Vec::new();
args.push("-c");
args.push(&cmd);
let mut gash_command= match Command::new("../main").args(&args).stdout(Stdio::capture()).spawn(){
Err(why) => panic!("Couldn't do command {}",why),
Ok(cmd) => cmd,
};
let mut stdout=gash_command.stdout.unwrap();
let mut output=String::new();
stdout.read_to_string(&mut output);
stream.write(HTTP_OK.as_bytes());
stream.write(str_vec[0].as_bytes());
stream.write(output.as_bytes());
stream.write(cmd_mix[1].as_bytes());
//WebServer::respond_with_static_file(stream, path);
}
fn get_file_size(path: &Path) ->u64 {
let metadata=std::fs::metadata(path).unwrap();
return metadata.len()
}
// TODO: Smarter Scheduling.
fn enqueue_static_file_request(stream: std::old_io::net::tcp::TcpStream, path_obj: &Path, stream_map_arc: Arc<Mutex<HashMap<String, std::old_io::net::tcp::TcpStream>>>, req_queue_arc: Arc<Mutex<Vec<HTTP_Request>>>, notify_chan: Sender<()>) {
// Save stream in hashmap for later response.
let mut stream = stream;
let peer_name = WebServer::get_peer_name(&mut stream);
let (stream_tx, stream_rx) = channel();
stream_tx.send(stream);
let stream = match stream_rx.recv(){
Ok(s) => s,
Err(e) => panic!("There was an error while receiving from the stream channel! {}", e),
};
let local_stream_map = stream_map_arc.clone();
{ // make sure we request the lock inside a block with different scope, so that we give it back at the end of that block
let mut local_stream_map = local_stream_map.lock().unwrap();
local_stream_map.insert(peer_name.clone(), stream);
}
// Enqueue the HTTP request.
// TOCHECK: it was ~path_obj.clone(), make sure in which order are ~ and clone() executed
let req = HTTP_Request { peer_name: peer_name.clone(), path: path_obj.clone() };
let (req_tx, req_rx) = channel();
req_tx.send(req);
debug!("Waiting for queue mutex lock.");
let local_req_queue = req_queue_arc.clone();
{ // make sure we request the lock inside a block with different scope, so that we give it back at the end of that block
let mut local_req_queue = local_req_queue.lock().unwrap();
let req: HTTP_Request = match req_rx.recv(){
Ok(s) => s,
Err(e) => panic!("There was an error while receiving from the request channel! {}", e),
};
//REORDER the queue in order of the request size
local_req_queue.push(req);
local_req_queue.sort_by(|a, b| WebServer::get_file_size(&a.path).cmp(&WebServer::get_file_size(&b.path)));
debug!("A new request enqueued, now the length of queue is {}.", local_req_queue.len());
notify_chan.send(()); // Send incoming notification to responder task.
}
}
// TODO: Smarter Scheduling.
fn dequeue_static_file_request(&mut self) {
let req_queue_get = self.request_queue_arc.clone();
let stream_map_get = self.stream_map_arc.clone();
// Receiver<> cannot be sent to another task. So we have to make this task as the main task that can access self.notify_rx.
let (request_tx, request_rx) = channel();
loop {
self.notify_rx.recv(); // waiting for new request enqueued. This is where the infinity loop locate
{ // make sure we request the lock inside a block with different scope, so that we give it back at the end of that block
let mut req_queue = req_queue_get.lock().unwrap();
if req_queue.len() > 0 {
self.thread_sema.acquire();
let req = req_queue.remove(0);
debug!("A new request dequeued, now the length of queue is {}.", req_queue.len());
request_tx.send(req);
}
}
let request = match request_rx.recv(){
Ok(s) => s,
Err(e) => panic!("There was an error while receiving from the request channel! {}", e),
};
// Get stream from hashmap.
let (stream_tx, stream_rx) = channel();
{ // make sure we request the lock inside a block with different scope, so that we give it back at the end of that block
let mut stream_map = stream_map_get.lock().unwrap();
let stream = stream_map.remove(&request.peer_name).expect("no option tcpstream");
stream_tx.send(stream);
}
// TODO: Spawning more tasks to respond the dequeued requests concurrently. You may need a semophore to control the concurrency.
let stream = match stream_rx.recv(){
Ok(s) => s,
Err(e) => panic!("There was an error while receiving from the stream channel! {}", e),
};
let sema=self.thread_sema.clone();
let cache_len=self.cache_len.clone();
let mut cache=self.cache.clone();
Thread::spawn(move||{
debug!("Processing....");
WebServer::respond_with_static_file(stream, &request.path,cache,cache_len);
debug!("finishing request for{}",request.path.display());
debug!("=====Terminated connection from [{}].=====", request.peer_name);
sema.release();
});
}
}
fn get_peer_name(stream: &mut std::old_io::net::tcp::TcpStream) -> String{
match stream.peer_name(){
Ok(s) => {format!("{}:{}", s.ip, s.port)}
Err(e) => {panic!("Error while getting the stream name! {}", e)}
}
}
}
fn get_args() -> (String, usize, String) {
fn print_usage(program: &str) {
println!("Usage: {} [options]", program);
println!("--ip \tIP address, \"{}\" by default.", IP);
println!("--port \tport number, \"{}\" by default.", PORT);
println!("--www \tworking directory, \"{}\" by default", WWW_DIR);
println!("-h --help \tUsage");
}
/* Begin processing program arguments and initiate the parameters. */
let args = os::args();
let program = args[0].clone();
let opts = [
getopts::optopt("", "ip", "The IP address to bind to", "IP"),
getopts::optopt("", "port", "The Port to bind to", "PORT"),
getopts::optopt("", "www", "The www directory", "WWW_DIR"),
getopts::optflag("h", "help", "Display help"),
];
let matches = match getopts::getopts(args.tail(), &opts) {
Ok(m) => { m }
Err(f) => { panic!(f.to_err_msg()) }
};
if matches.opt_present("h") || matches.opt_present("help") {
print_usage(program.as_slice());
unsafe { libc::exit(1); }
}
let ip_str = if matches.opt_present("ip") {
|
matches.opt_str("ip").expect("invalid ip address?").to_owned()
} e
|
conditional_block
|
|
176_main_448.py
|
.48828125, 1.984375, 1.5875, 1.831730769, 1.092316514, 1.469907407, 1.26662234, 1.5875,
0.661458333, 2.088815789, 1.725543478, 1.725543478, 0.376780063, 1.384447674, 1.630993151,
2.9765625, 0.607461735, 0.888526119, 2.645833333, 2.334558824, 1.777052239, 0.519923581,
0.875459559, 0.268158784, 0.278183411, 1.017628205, 1.009004237, 0.548675115, 0.264583333,
1.860351563, 1.777052239, 0.65418956, 0.712949102, 1.984375, 2.088815789, 0.365222393,
1.803977273, 0.564277251, 1.630993151, 1.889880952, 1.507120253, 1.507120253, 1.803977273,
2.429846939, 3.96875, 1.67693662, 1.630993151, 2.164772727, 2.052801724, 1.167279412,
1.803977273, 0.9525, 1.831730769, 1.700892857, 1.352982955, 1.253289474, 2.164772727,
2.164772727, 1.352982955, 2.645833333, 1.831730769, 1.630993151, 1.5875, 3.217905405,
0.804476351, 2.705965909, 1.725543478, 1.630993151, 2.429846939, 1.777052239, 0.922965116,
2.204861111, 1.123231132, 0.79375, 0.320060484, 0.403601695, 0.543664384, 0.269372172,
2.588315217, 2.289663462, 1.322916667, 1.280241935, 0.259395425, 1.951844262, 0.620117188,
0.252251059, 1.653645833, 0.519923581, 0.616904145, 4.252232143, 2.164772727, 1.920362903,
1.725543478, 2.052801724, 1.700892857, 0.413411458, 1.400735294, 1.035326087, 1.526442308,
0.881944444, 2.126116071, 2.246462264, 1.984375, 1.984375, 1.889880952, 1.860351563])
weight = torch.Tensor(weight)
weight = weight.cuda()
####################
criterion = nn.CrossEntropyLoss(weight=weight)
optimizer = torch.optim.SGD(model.parameters(), lr=args.lr, momentum=args.momentum, weight_decay=args.weight_decay)
# Optionally resume from a checkpoint
if args.resume:
if os.path.isfile(args.resume):
checkpoint = torch.load(args.resume)
args.start_epoch = checkpoint['epoch']
best_prec1 = checkpoint['best_prec1']
model.load_state_dict(checkpoint['state_dict'])
optimizer.load_state_dict(checkpoint['optimizer'])
print('DFL-CNN <==> Part2 : Load Network <==> Continue from {} epoch {}'.format(args.resume,
checkpoint['epoch']))
else:
print('DFL-CNN <==> Part2 : Load Network <==> Failed')
print('DFL-CNN <==> Part2 : Load Network <==> Done')
print('DFL-CNN <==> Part3 : Load Dataset <==> Begin')
dataroot = os.path.abspath(args.dataroot)
traindir = os.path.join(dataroot, 'train')
testdir = os.path.join(dataroot, 'test')
# ImageFolder to process img
transform_train = get_transform_for_train()
transform_test = get_transform_for_test()
transform_test_simple = get_transform_for_test_simple()
train_dataset = ImageFolderWithPaths(traindir, transform=transform_train)
test_dataset = ImageFolderWithPaths(testdir, transform=transform_test)
test_dataset_simple = ImageFolderWithPaths(testdir, transform=transform_test_simple)
# A list for target to classname
index2classlist = train_dataset.index2classlist()
# data loader
train_loader = torch.utils.data.DataLoader(
train_dataset, batch_size=args.gpu * args.train_batchsize_per_gpu, shuffle=True,
num_workers=args.workers, pin_memory=True, drop_last=True)
test_loader = torch.utils.data.DataLoader(
test_dataset, batch_size=1, shuffle=True,
num_workers=args.workers, pin_memory=True, drop_last=True)
test_loader_simple = torch.utils.data.DataLoader(
test_dataset_simple, batch_size=1, shuffle=True,
num_workers=args.workers, pin_memory=True, drop_last=True)
print('DFL-CNN <==> Part3 : Load Dataset <==> Done')
print('DFL-CNN <==> Part4 : Train and Test <==> Begin')
for epoch in range(args.start_epoch, args.epochs):
|
adjust_learning_rate(args, optimizer, epoch, gamma=0.1)
# train for one epoch
train(args, train_loader, model, criterion, optimizer, epoch)
# evaluate on validation set
if epoch % args.eval_epoch == 0:
prec1 = validate_simple(args, test_loader_simple, model, criterion, epoch)
# remember best prec@1 and save checkpoint
is_best = prec1 > best_prec1
best_prec1 = max(prec1, best_prec1)
save_checkpoint({
'epoch': epoch + 1,
'state_dict': model.state_dict(),
'best_prec1': best_prec1,
'optimizer': optimizer.state_dict(),
'prec1': prec1,
}, is_best)
|
conditional_block
|
|
176_main_448.py
|
')
parser.add_argument('--seed', default=None, type=int,
help='seed for initializing training. ')
parser.add_argument('--gpu', default=4, type=int,
help='GPU nums to use.')
parser.add_argument('--log_train_dir', default='log_train', type=str,
help='log for train')
parser.add_argument('--log_test_dir', default='log_test', type=str,
help='log for test')
parser.add_argument('--nclass', default=583, type=int,
help='num of classes')
parser.add_argument('--eval_epoch', default=2, type=int,
help='every eval_epoch we will evaluate')
parser.add_argument('--vis_epoch', default=2, type=int,
help='every vis_epoch we will evaluate')
parser.add_argument('--save_epoch', default=2, type=int,
help='every save_epoch we will evaluate')
parser.add_argument('--w', default=448, type=int,
help='transform, seen as align')
parser.add_argument('--h', default=448, type=int,
help='transform, seen as align')
best_prec1 = 0
def
|
():
print('DFL-CNN <==> Part1 : prepare for parameters <==> Begin')
global args, best_prec1
args = parser.parse_args()
print('DFL-CNN <==> Part1 : prepare for parameters <==> Done')
print('DFL-CNN <==> Part2 : Load Network <==> Begin')
model = DFL_VGG16(k=10, nclass=176)
if args.gpu is not None:
model = nn.DataParallel(model, device_ids=range(args.gpu))
model = model.cuda()
cudnn.benchmark = True
if args.init_type is not None:
try:
init_weights(model, init_type=args.init_type)
except:
sys.exit('DFL-CNN <==> Part2 : Load Network <==> Init_weights error!')
####################
weight = np.array([0.844414894, 2.834821429, 1.725543478, 1.322916667, 1.725543478, 1.700892857,
0.739518634, 0.629960317, 0.773133117, 1.190625, 1.700892857, 3.501838235,
3.052884615, 1.368534483, 1.451981707, 1.831730769, 2.204861111, 1.133928571,
0.592350746, 0.262252203, 1.123231132, 0.308452073, 1.280241935, 1.009004237,
1.725543478, 1.308379121, 0.265172606, 1.777052239, 1.469907407, 1.00052521,
1.803977273, 0.470602767, 0.960181452, 1.67693662, 1.608952703, 0.280807783,
0.9921875, 0.466911765, 1.112733645, 2.903963415, 2.768895349, 0.295440447,
0.265764509, 2.289663462, 2.38125, 1.434487952, 1.984375, 0.580792683, 1.630993151,
1.831730769, 1.860351563, 1.803977273, 2.768895349, 1.951844262, 2.126116071, 1.831730769,
1.920362903, 2.768895349, 1.777052239, 3.217905405, 2.334558824, 2.088815789, 0.519923581,
3.133223684, 1.951844262, 2.289663462, 1.133928571, 1.507120253, 1.984375, 2.334558824,
1.48828125, 1.984375, 1.5875, 1.831730769, 1.092316514, 1.469907407, 1.26662234, 1.5875,
0.661458333, 2.088815789, 1.725543478, 1.725543478, 0.376780063, 1.384447674, 1.630993151,
2.9765625, 0.607461735, 0.888526119, 2.645833333, 2.334558824, 1.777052239, 0.519923581,
0.875459559, 0.268158784, 0.278183411, 1.017628205, 1.009004237, 0.548675115, 0.264583333,
1.860351563, 1.777052239, 0.65418956, 0.712949102, 1.984375, 2.088815789, 0.365222393,
1.803977273, 0.564277251, 1.630993151, 1.889880952, 1.507120253, 1.507120253, 1.803977273,
2.429846939, 3.96875, 1.67693662, 1.630993151, 2.164772727, 2.052801724, 1.167279412,
1.803977273, 0.9525, 1.831730769, 1.700892857, 1.352982955, 1.253289474, 2.16477272
|
main
|
identifier_name
|
176_main_448.py
|
')
parser.add_argument('--seed', default=None, type=int,
help='seed for initializing training. ')
parser.add_argument('--gpu', default=4, type=int,
help='GPU nums to use.')
parser.add_argument('--log_train_dir', default='log_train', type=str,
help='log for train')
parser.add_argument('--log_test_dir', default='log_test', type=str,
help='log for test')
parser.add_argument('--nclass', default=583, type=int,
help='num of classes')
parser.add_argument('--eval_epoch', default=2, type=int,
help='every eval_epoch we will evaluate')
parser.add_argument('--vis_epoch', default=2, type=int,
help='every vis_epoch we will evaluate')
parser.add_argument('--save_epoch', default=2, type=int,
help='every save_epoch we will evaluate')
parser.add_argument('--w', default=448, type=int,
help='transform, seen as align')
parser.add_argument('--h', default=448, type=int,
help='transform, seen as align')
best_prec1 = 0
def main():
|
0.592350746, 0.262252203, 1.123231132, 0.308452073, 1.280241935, 1.009004237,
1.725543478, 1.308379121, 0.265172606, 1.777052239, 1.469907407, 1.00052521,
1.803977273, 0.470602767, 0.960181452, 1.67693662, 1.608952703, 0.280807783,
0.9921875, 0.466911765, 1.112733645, 2.903963415, 2.768895349, 0.295440447,
0.265764509, 2.289663462, 2.38125, 1.434487952, 1.984375, 0.580792683, 1.630993151,
1.831730769, 1.860351563, 1.803977273, 2.768895349, 1.951844262, 2.126116071, 1.831730769,
1.920362903, 2.768895349, 1.777052239, 3.217905405, 2.334558824, 2.088815789, 0.519923581,
3.133223684, 1.951844262, 2.289663462, 1.133928571, 1.507120253, 1.984375, 2.334558824,
1.48828125, 1.984375, 1.5875, 1.831730769, 1.092316514, 1.469907407, 1.26662234, 1.5875,
0.661458333, 2.088815789, 1.725543478, 1.725543478, 0.376780063, 1.384447674, 1.630993151,
2.9765625, 0.607461735, 0.888526119, 2.645833333, 2.334558824, 1.777052239, 0.519923581,
0.875459559, 0.268158784, 0.278183411, 1.017628205, 1.009004237, 0.548675115, 0.264583333,
1.860351563, 1.777052239, 0.65418956, 0.712949102, 1.984375, 2.088815789, 0.365222393,
1.803977273, 0.564277251, 1.630993151, 1.889880952, 1.507120253, 1.507120253, 1.803977273,
2.429846939, 3.96875, 1.67693662, 1.630993151, 2.164772727, 2.052801724, 1.167279412,
1.803977273, 0.9525, 1.831730769, 1.700892857, 1.352982955, 1.253289474, 2.164772727
|
print('DFL-CNN <==> Part1 : prepare for parameters <==> Begin')
global args, best_prec1
args = parser.parse_args()
print('DFL-CNN <==> Part1 : prepare for parameters <==> Done')
print('DFL-CNN <==> Part2 : Load Network <==> Begin')
model = DFL_VGG16(k=10, nclass=176)
if args.gpu is not None:
model = nn.DataParallel(model, device_ids=range(args.gpu))
model = model.cuda()
cudnn.benchmark = True
if args.init_type is not None:
try:
init_weights(model, init_type=args.init_type)
except:
sys.exit('DFL-CNN <==> Part2 : Load Network <==> Init_weights error!')
####################
weight = np.array([0.844414894, 2.834821429, 1.725543478, 1.322916667, 1.725543478, 1.700892857,
0.739518634, 0.629960317, 0.773133117, 1.190625, 1.700892857, 3.501838235,
3.052884615, 1.368534483, 1.451981707, 1.831730769, 2.204861111, 1.133928571,
|
identifier_body
|
176_main_448.py
|
09004237,
1.725543478, 1.308379121, 0.265172606, 1.777052239, 1.469907407, 1.00052521,
1.803977273, 0.470602767, 0.960181452, 1.67693662, 1.608952703, 0.280807783,
0.9921875, 0.466911765, 1.112733645, 2.903963415, 2.768895349, 0.295440447,
0.265764509, 2.289663462, 2.38125, 1.434487952, 1.984375, 0.580792683, 1.630993151,
1.831730769, 1.860351563, 1.803977273, 2.768895349, 1.951844262, 2.126116071, 1.831730769,
1.920362903, 2.768895349, 1.777052239, 3.217905405, 2.334558824, 2.088815789, 0.519923581,
3.133223684, 1.951844262, 2.289663462, 1.133928571, 1.507120253, 1.984375, 2.334558824,
1.48828125, 1.984375, 1.5875, 1.831730769, 1.092316514, 1.469907407, 1.26662234, 1.5875,
0.661458333, 2.088815789, 1.725543478, 1.725543478, 0.376780063, 1.384447674, 1.630993151,
2.9765625, 0.607461735, 0.888526119, 2.645833333, 2.334558824, 1.777052239, 0.519923581,
0.875459559, 0.268158784, 0.278183411, 1.017628205, 1.009004237, 0.548675115, 0.264583333,
1.860351563, 1.777052239, 0.65418956, 0.712949102, 1.984375, 2.088815789, 0.365222393,
1.803977273, 0.564277251, 1.630993151, 1.889880952, 1.507120253, 1.507120253, 1.803977273,
2.429846939, 3.96875, 1.67693662, 1.630993151, 2.164772727, 2.052801724, 1.167279412,
1.803977273, 0.9525, 1.831730769, 1.700892857, 1.352982955, 1.253289474, 2.164772727,
2.164772727, 1.352982955, 2.645833333, 1.831730769, 1.630993151, 1.5875, 3.217905405,
0.804476351, 2.705965909, 1.725543478, 1.630993151, 2.429846939, 1.777052239, 0.922965116,
2.204861111, 1.123231132, 0.79375, 0.320060484, 0.403601695, 0.543664384, 0.269372172,
2.588315217, 2.289663462, 1.322916667, 1.280241935, 0.259395425, 1.951844262, 0.620117188,
0.252251059, 1.653645833, 0.519923581, 0.616904145, 4.252232143, 2.164772727, 1.920362903,
1.725543478, 2.052801724, 1.700892857, 0.413411458, 1.400735294, 1.035326087, 1.526442308,
0.881944444, 2.126116071, 2.246462264, 1.984375, 1.984375, 1.889880952, 1.860351563])
weight = torch.Tensor(weight)
weight = weight.cuda()
####################
criterion = nn.CrossEntropyLoss(weight=weight)
optimizer = torch.optim.SGD(model.parameters(), lr=args.lr, momentum=args.momentum, weight_decay=args.weight_decay)
# Optionally resume from a checkpoint
if args.resume:
if os.path.isfile(args.resume):
checkpoint = torch.load(args.resume)
|
args.start_epoch = checkpoint['epoch']
best_prec1 = checkpoint['best_prec1']
|
random_line_split
|
|
ansi_up.ts
|
this.palette_256.push(gry);
}
}
private old_escape_for_html(txt:string):string
{
return txt.replace(/[&<>]/gm, (str) => {
if (str === "&") return "&";
if (str === "<") return "<";
if (str === ">") return ">";
});
}
private old_linkify(txt:string):string
{
return txt.replace(/(https?:\/\/[^\s]+)/gm, (str) => {
return `<a href="${str}">${str}</a>`;
});
}
private detect_incomplete_ansi(txt:string)
{
// Scan forwards for a potential command character
// If one exists, we must assume we are good
// [\x40-\x7e]) # the command
return !(/.*?[\x40-\x7e]/.test(txt));
}
private detect_incomplete_link(txt:string)
{
// It would be nice if Javascript RegExp supported
// a hitEnd() method
// Scan backwards for first whitespace
var found = false;
for (var i = txt.length - 1; i > 0; i--) {
if (/\s|\x1B/.test(txt[i])) {
found = true;
break;
}
}
if (!found) {
// Handle one other case
// Maybe the whole string is a URL?
if (/(https?:\/\/[^\s]+)/.test(txt))
return 0;
else
return -1;
}
// Test if possible prefix
var prefix = txt.substr(i + 1, 4);
if (prefix.length === 0) return -1;
if ("http".indexOf(prefix) === 0)
return (i + 1);
}
ansi_to(txt:string, formatter:Formatter):any {
var pkt = this._buffer + txt;
this._buffer = '';
var raw_text_pkts = pkt.split(/\x1B\[/);
if (raw_text_pkts.length === 1)
raw_text_pkts.push('');
this.handle_incomplete_sequences(raw_text_pkts);
let first_chunk = this.with_state(raw_text_pkts.shift()); // the first pkt is not the result of the split
let blocks = new Array(raw_text_pkts.length);
for (let i = 0, len = raw_text_pkts.length; i < len; ++i) {
blocks[i] = (formatter.transform(this.process_ansi(raw_text_pkts[i]), this));
}
if (first_chunk.text.length > 0)
blocks.unshift(formatter.transform(first_chunk, this));
return formatter.compose(blocks, this);
}
ansi_to_html(txt:string):string
{
return this.ansi_to(txt, this.htmlFormatter);
}
ansi_to_text(txt:string):string
{
return this.ansi_to(txt, this.textFormatter);
}
private with_state(text:string):TextWithAttr {
return { bold: this.bold, fg: this.fg, bg: this.bg, text: text };
}
private handle_incomplete_sequences(chunks:string[]):void {
// COMPLEX - BEGIN
// Validate the last chunks for:
// - incomplete ANSI sequence
// - incomplete ESC
// If any of these occur, we may have to buffer
var last_chunk = chunks[chunks.length - 1];
// - incomplete ANSI sequence
if ((last_chunk.length > 0) && this.detect_incomplete_ansi(last_chunk)) {
this._buffer = "\x1B[" + last_chunk;
chunks.pop();
chunks.push('');
} else {
// - incomplete ESC
if (last_chunk.slice(-1) === "\x1B") {
this._buffer = "\x1B";
console.log("raw", chunks);
chunks.pop();
chunks.push(last_chunk.substr(0, last_chunk.length - 1));
console.log(chunks);
console.log(last_chunk);
}
// - Incomplete ESC, only one packet
if (chunks.length === 2 &&
chunks[1] === "" &&
chunks[0].slice(-1) === "\x1B") {
this._buffer = "\x1B";
last_chunk = chunks.shift();
chunks.unshift(last_chunk.substr(0, last_chunk.length - 1));
}
}
// COMPLEX - END
}
private process_ansi(block:string):TextWithAttr
{
// This must only be called with a string that started with a CSI (the string split above)
// The CSI must not be in the string. We consider this string to be a 'block'.
// It has an ANSI command at the front that affects the text that follows it.
//
// All ansi codes are typically in the following format. We parse it and focus
// specifically on the graphics commands (SGR)
//
// CONTROL-SEQUENCE-INTRODUCER CSI (ESC, '[')
// PRIVATE-MODE-CHAR (!, <, >, ?)
// Numeric parameters separated by semicolons ('0' - '9', ';')
// Intermediate-modifiers (0x20 - 0x2f)
// COMMAND-CHAR (0x40 - 0x7e)
//
// We use a regex to parse into capture groups the PRIVATE-MODE-CHAR to the COMMAND
// and the following text
if (!this._sgr_regex) {
// This regex is designed to parse an ANSI terminal CSI command. To be more specific,
// we follow the XTERM conventions vs. the various other "standards".
// http://invisible-island.net/xterm/ctlseqs/ctlseqs.html
//
this._sgr_regex = rgx`
^ # beginning of line
([!\x3c-\x3f]?) # a private-mode char (!, <, =, >, ?)
([\d;]*) # any digits or semicolons
([\x20-\x2f]? # an intermediate modifier
[\x40-\x7e]) # the command
([\s\S]*) # any text following this CSI sequence
`;
}
let matches = block.match(this._sgr_regex);
// The regex should have handled all cases!
if (!matches) {
return this.with_state(block);
}
let orig_txt = matches[4];
if (matches[1] !== '' || matches[3] !== 'm') {
return this.with_state(orig_txt);
}
// Ok - we have a valid "SGR" (Select Graphic Rendition)
let sgr_cmds = matches[2].split(';');
// Each of these params affects the SGR state
// Why do we shift through the array instead of a forEach??
// ... because some commands consume the params that follow !
while (sgr_cmds.length > 0) {
let sgr_cmd_str = sgr_cmds.shift();
let num = parseInt(sgr_cmd_str, 10);
if (isNaN(num) || num === 0) {
this.fg = this.bg = null;
this.bold = false;
} else if (num === 1) {
this.bold = true;
} else if (num === 22) {
this.bold = false;
} else if (num === 39) {
this.fg = null;
} else if (num === 49) {
this.bg = null;
} else if ((num >= 30) && (num < 38)) {
this.fg = this.ansi_colors[0][(num - 30)];
} else if ((num >= 40) && (num < 48)) {
this.bg = this.ansi_colors[0][(num - 40)];
} else if ((num >= 90) && (num < 98)) {
this.fg = this.ansi_colors[1][(num - 90)];
} else if ((num >= 100) && (num < 108)) {
this.bg = this.ansi_colors[1][(num - 100)];
} else if (num === 38 || num === 48) {
// extended set foreground/background color
// validate that param exists
if (sgr_cmds.length > 0)
|
{
// extend color (38=fg, 48=bg)
let is_foreground = (num === 38);
let mode_cmd = sgr_cmds.shift();
// MODE '5' - 256 color palette
if (mode_cmd === '5' && sgr_cmds.length > 0) {
let palette_index = parseInt(sgr_cmds.shift(), 10);
if (palette_index >= 0 && palette_index <= 255) {
if (is_foreground)
this.fg = this.palette_256[palette_index];
else
this.bg = this.palette_256[palette_index];
}
}
// MODE '2' - True Color
if (mode_cmd === '2' && sgr_cmds.length > 2) {
let r = parseInt(sgr_cmds.shift(), 10);
|
conditional_block
|
|
ansi_up.ts
|
string {
return segments.join("");
}
};
textFormatter:Formatter = {
transform(fragment:TextWithAttr, instance:AnsiUp):string {
return fragment.text;
},
compose(segments:string[], instance:AnsiUp):string {
return segments.join("");
}
};
// 256 Colors Palette
// CSS RGB strings - ex. "255, 255, 255"
private palette_256:AU_Color[];
private fg:AU_Color;
private bg:AU_Color;
private bold:boolean;
private _use_classes:boolean;
private _escape_for_html;
private _sgr_regex:RegExp;
private _buffer:string;
constructor()
{
this.setup_256_palette();
this._use_classes = false;
this._escape_for_html = true;
this.bold = false;
this.fg = this.bg = null;
this._buffer = '';
}
set use_classes(arg:boolean)
{
this._use_classes = arg;
}
get use_classes():boolean
{
return this._use_classes;
}
set escape_for_html(arg:boolean)
{
this._escape_for_html = arg;
}
get escape_for_html():boolean
{
return this._escape_for_html;
}
private setup_256_palette():void
{
this.palette_256 = [];
// Index 0..15 : Ansi-Colors
this.ansi_colors.forEach( palette => {
palette.forEach( rec => {
this.palette_256.push(rec);
});
});
// Index 16..231 : RGB 6x6x6
// https://gist.github.com/jasonm23/2868981#file-xterm-256color-yaml
let levels = [0, 95, 135, 175, 215, 255];
for (let r = 0; r < 6; ++r) {
for (let g = 0; g < 6; ++g) {
for (let b = 0; b < 6; ++b) {
let col = {rgb:[levels[r], levels[g], levels[b]], class_name:'truecolor'};
this.palette_256.push(col);
}
}
}
// Index 232..255 : Grayscale
let grey_level = 8;
for (let i = 0; i < 24; ++i, grey_level += 10) {
let gry = {rgb:[grey_level, grey_level, grey_level], class_name:'truecolor'};
this.palette_256.push(gry);
}
}
private old_escape_for_html(txt:string):string
{
return txt.replace(/[&<>]/gm, (str) => {
if (str === "&") return "&";
if (str === "<") return "<";
if (str === ">") return ">";
});
}
private old_linkify(txt:string):string
{
return txt.replace(/(https?:\/\/[^\s]+)/gm, (str) => {
return `<a href="${str}">${str}</a>`;
});
}
private detect_incomplete_ansi(txt:string)
{
// Scan forwards for a potential command character
// If one exists, we must assume we are good
// [\x40-\x7e]) # the command
return !(/.*?[\x40-\x7e]/.test(txt));
}
private detect_incomplete_link(txt:string)
{
// It would be nice if Javascript RegExp supported
// a hitEnd() method
// Scan backwards for first whitespace
var found = false;
for (var i = txt.length - 1; i > 0; i--) {
if (/\s|\x1B/.test(txt[i])) {
found = true;
break;
}
}
if (!found) {
// Handle one other case
// Maybe the whole string is a URL?
if (/(https?:\/\/[^\s]+)/.test(txt))
return 0;
else
return -1;
}
// Test if possible prefix
var prefix = txt.substr(i + 1, 4);
if (prefix.length === 0) return -1;
if ("http".indexOf(prefix) === 0)
return (i + 1);
}
ansi_to(txt:string, formatter:Formatter):any {
var pkt = this._buffer + txt;
this._buffer = '';
var raw_text_pkts = pkt.split(/\x1B\[/);
if (raw_text_pkts.length === 1)
raw_text_pkts.push('');
this.handle_incomplete_sequences(raw_text_pkts);
let first_chunk = this.with_state(raw_text_pkts.shift()); // the first pkt is not the result of the split
let blocks = new Array(raw_text_pkts.length);
for (let i = 0, len = raw_text_pkts.length; i < len; ++i) {
blocks[i] = (formatter.transform(this.process_ansi(raw_text_pkts[i]), this));
}
if (first_chunk.text.length > 0)
blocks.unshift(formatter.transform(first_chunk, this));
return formatter.compose(blocks, this);
}
ansi_to_html(txt:string):string
{
return this.ansi_to(txt, this.htmlFormatter);
}
ansi_to_text(txt:string):string
{
return this.ansi_to(txt, this.textFormatter);
}
private with_state(text:string):TextWithAttr {
return { bold: this.bold, fg: this.fg, bg: this.bg, text: text };
}
private handle_incomplete_sequences(chunks:string[]):void {
// COMPLEX - BEGIN
// Validate the last chunks for:
// - incomplete ANSI sequence
// - incomplete ESC
// If any of these occur, we may have to buffer
var last_chunk = chunks[chunks.length - 1];
// - incomplete ANSI sequence
if ((last_chunk.length > 0) && this.detect_incomplete_ansi(last_chunk)) {
this._buffer = "\x1B[" + last_chunk;
chunks.pop();
chunks.push('');
} else {
// - incomplete ESC
if (last_chunk.slice(-1) === "\x1B") {
this._buffer = "\x1B";
console.log("raw", chunks);
chunks.pop();
chunks.push(last_chunk.substr(0, last_chunk.length - 1));
console.log(chunks);
console.log(last_chunk);
}
// - Incomplete ESC, only one packet
if (chunks.length === 2 &&
chunks[1] === "" &&
chunks[0].slice(-1) === "\x1B") {
this._buffer = "\x1B";
last_chunk = chunks.shift();
chunks.unshift(last_chunk.substr(0, last_chunk.length - 1));
}
}
// COMPLEX - END
}
private process_ansi(block:string):TextWithAttr
{
// This must only be called with a string that started with a CSI (the string split above)
// The CSI must not be in the string. We consider this string to be a 'block'.
// It has an ANSI command at the front that affects the text that follows it.
//
// All ansi codes are typically in the following format. We parse it and focus
// specifically on the graphics commands (SGR)
//
// CONTROL-SEQUENCE-INTRODUCER CSI (ESC, '[')
// PRIVATE-MODE-CHAR (!, <, >, ?)
// Numeric parameters separated by semicolons ('0' - '9', ';')
// Intermediate-modifiers (0x20 - 0x2f)
// COMMAND-CHAR (0x40 - 0x7e)
//
// We use a regex to parse into capture groups the PRIVATE-MODE-CHAR to the COMMAND
// and the following text
if (!this._sgr_regex) {
// This regex is designed to parse an ANSI terminal CSI command. To be more specific,
// we follow the XTERM conventions vs. the various other "standards".
// http://invisible-island.net/xterm/ctlseqs/ctlseqs.html
//
this._sgr_regex = rgx`
^ # beginning of line
([!\x3c-\x3f]?) # a private-mode char (!, <, =, >, ?)
([\d;]*) # any digits or semicolons
([\x20-\x2f]? # an intermediate modifier
[\x40-\x7e]) # the command
([\s\S]*) # any text following this CSI sequence
`;
}
let matches = block.match(this._sgr_regex);
// The regex should have handled all cases!
if (!matches) {
return this.with_state(block);
}
let orig_txt = matches[4];
if (matches[1] !== '' || matches[3] !== 'm') {
return this.with_state(orig_txt);
}
|
random_line_split
|
||
ansi_up.ts
|
},
{ rgb: [255, 255, 255], class_name: "ansi-white" }
],
// Bright colors
[
{ rgb: [ 85, 85, 85], class_name: "ansi-bright-black" },
{ rgb: [255, 85, 85], class_name: "ansi-bright-red" },
{ rgb: [ 0, 255, 0], class_name: "ansi-bright-green" },
{ rgb: [255, 255, 85], class_name: "ansi-bright-yellow" },
{ rgb: [ 85, 85, 255], class_name: "ansi-bright-blue" },
{ rgb: [255, 85, 255], class_name: "ansi-bright-magenta" },
{ rgb: [ 85, 255, 255], class_name: "ansi-bright-cyan" },
{ rgb: [255, 255, 255], class_name: "ansi-bright-white" }
]
];
htmlFormatter:Formatter = {
transform(fragment:TextWithAttr, instance:AnsiUp):string {
let txt = fragment.text;
if (txt.length === 0)
return txt;
if (instance._escape_for_html)
txt = instance.old_escape_for_html(txt);
// If colors not set, default style is used
if (!fragment.bold && fragment.fg === null && fragment.bg === null)
return txt;
let styles:string[] = [];
let classes:string[] = [];
let fg = fragment.fg;
let bg = fragment.bg;
// Note on bold: https://stackoverflow.com/questions/6737005/what-are-some-advantages-to-using-span-style-font-weightbold-rather-than-b?rq=1
if (fragment.bold)
styles.push('font-weight:bold')
if (!instance._use_classes) {
// USE INLINE STYLES
if (fg)
styles.push(`color:rgb(${fg.rgb.join(',')})`);
if (bg)
styles.push(`background-color:rgb(${bg.rgb})`);
} else {
// USE CLASSES
if (fg) {
if (fg.class_name !== 'truecolor') {
classes.push(`${fg.class_name}-fg`);
} else {
styles.push(`color:rgb(${fg.rgb.join(',')})`);
}
}
if (bg) {
if (bg.class_name !== 'truecolor') {
classes.push(`${bg.class_name}-bg`);
} else {
styles.push(`background-color:rgb(${bg.rgb.join(',')})`);
}
}
}
let class_string = '';
let style_string = '';
if (classes.length)
class_string = ` class="${classes.join(' ')}"`;
if (styles.length)
style_string = ` style="${styles.join(';')}"`;
return `<span${style_string}${class_string}>${txt}</span>`;
},
compose(segments:string[], instance:AnsiUp):string {
return segments.join("");
}
};
textFormatter:Formatter = {
transform(fragment:TextWithAttr, instance:AnsiUp):string {
return fragment.text;
},
compose(segments:string[], instance:AnsiUp):string {
return segments.join("");
}
};
// 256 Colors Palette
// CSS RGB strings - ex. "255, 255, 255"
private palette_256:AU_Color[];
private fg:AU_Color;
private bg:AU_Color;
private bold:boolean;
private _use_classes:boolean;
private _escape_for_html;
private _sgr_regex:RegExp;
private _buffer:string;
constructor()
{
this.setup_256_palette();
this._use_classes = false;
this._escape_for_html = true;
this.bold = false;
this.fg = this.bg = null;
this._buffer = '';
}
set use_classes(arg:boolean)
{
this._use_classes = arg;
}
get use_classes():boolean
{
return this._use_classes;
}
set escape_for_html(arg:boolean)
{
this._escape_for_html = arg;
}
get escape_for_html():boolean
|
private setup_256_palette():void
{
this.palette_256 = [];
// Index 0..15 : Ansi-Colors
this.ansi_colors.forEach( palette => {
palette.forEach( rec => {
this.palette_256.push(rec);
});
});
// Index 16..231 : RGB 6x6x6
// https://gist.github.com/jasonm23/2868981#file-xterm-256color-yaml
let levels = [0, 95, 135, 175, 215, 255];
for (let r = 0; r < 6; ++r) {
for (let g = 0; g < 6; ++g) {
for (let b = 0; b < 6; ++b) {
let col = {rgb:[levels[r], levels[g], levels[b]], class_name:'truecolor'};
this.palette_256.push(col);
}
}
}
// Index 232..255 : Grayscale
let grey_level = 8;
for (let i = 0; i < 24; ++i, grey_level += 10) {
let gry = {rgb:[grey_level, grey_level, grey_level], class_name:'truecolor'};
this.palette_256.push(gry);
}
}
private old_escape_for_html(txt:string):string
{
return txt.replace(/[&<>]/gm, (str) => {
if (str === "&") return "&";
if (str === "<") return "<";
if (str === ">") return ">";
});
}
private old_linkify(txt:string):string
{
return txt.replace(/(https?:\/\/[^\s]+)/gm, (str) => {
return `<a href="${str}">${str}</a>`;
});
}
private detect_incomplete_ansi(txt:string)
{
// Scan forwards for a potential command character
// If one exists, we must assume we are good
// [\x40-\x7e]) # the command
return !(/.*?[\x40-\x7e]/.test(txt));
}
private detect_incomplete_link(txt:string)
{
// It would be nice if Javascript RegExp supported
// a hitEnd() method
// Scan backwards for first whitespace
var found = false;
for (var i = txt.length - 1; i > 0; i--) {
if (/\s|\x1B/.test(txt[i])) {
found = true;
break;
}
}
if (!found) {
// Handle one other case
// Maybe the whole string is a URL?
if (/(https?:\/\/[^\s]+)/.test(txt))
return 0;
else
return -1;
}
// Test if possible prefix
var prefix = txt.substr(i + 1, 4);
if (prefix.length === 0) return -1;
if ("http".indexOf(prefix) === 0)
return (i + 1);
}
ansi_to(txt:string, formatter:Formatter):any {
var pkt = this._buffer + txt;
this._buffer = '';
var raw_text_pkts = pkt.split(/\x1B\[/);
if (raw_text_pkts.length === 1)
raw_text_pkts.push('');
this.handle_incomplete_sequences(raw_text_pkts);
let first_chunk = this.with_state(raw_text_pkts.shift()); // the first pkt is not the result of the split
let blocks = new Array(raw_text_pkts.length);
for (let i = 0, len = raw_text_pkts.length; i < len; ++i) {
blocks[i] = (formatter.transform(this.process_ansi(raw_text_pkts[i]), this));
}
if (first_chunk.text.length > 0)
blocks.unshift(formatter.transform(first_chunk, this));
return formatter.compose(blocks, this);
}
ansi_to_html(txt:string):string
{
return this.ansi_to(txt, this.htmlFormatter);
}
ansi_to_text(txt:string):string
{
return this.ansi_to(txt, this.textFormatter);
}
private with_state(text:string):TextWithAttr {
return { bold: this.bold, fg: this.fg, bg: this.bg, text: text };
}
private handle_incomplete_sequences(chunks:string[]):void {
// COMPLEX - BEGIN
// Validate the last chunks for:
// - incomplete ANSI sequence
// - incomplete ESC
// If any of these
|
{
return this._escape_for_html;
}
|
identifier_body
|
ansi_up.ts
|
},
{ rgb: [255, 255, 255], class_name: "ansi-white" }
],
// Bright colors
[
{ rgb: [ 85, 85, 85], class_name: "ansi-bright-black" },
{ rgb: [255, 85, 85], class_name: "ansi-bright-red" },
{ rgb: [ 0, 255, 0], class_name: "ansi-bright-green" },
{ rgb: [255, 255, 85], class_name: "ansi-bright-yellow" },
{ rgb: [ 85, 85, 255], class_name: "ansi-bright-blue" },
{ rgb: [255, 85, 255], class_name: "ansi-bright-magenta" },
{ rgb: [ 85, 255, 255], class_name: "ansi-bright-cyan" },
{ rgb: [255, 255, 255], class_name: "ansi-bright-white" }
]
];
htmlFormatter:Formatter = {
transform(fragment:TextWithAttr, instance:AnsiUp):string {
let txt = fragment.text;
if (txt.length === 0)
return txt;
if (instance._escape_for_html)
txt = instance.old_escape_for_html(txt);
// If colors not set, default style is used
if (!fragment.bold && fragment.fg === null && fragment.bg === null)
return txt;
let styles:string[] = [];
let classes:string[] = [];
let fg = fragment.fg;
let bg = fragment.bg;
// Note on bold: https://stackoverflow.com/questions/6737005/what-are-some-advantages-to-using-span-style-font-weightbold-rather-than-b?rq=1
if (fragment.bold)
styles.push('font-weight:bold')
if (!instance._use_classes) {
// USE INLINE STYLES
if (fg)
styles.push(`color:rgb(${fg.rgb.join(',')})`);
if (bg)
styles.push(`background-color:rgb(${bg.rgb})`);
} else {
// USE CLASSES
if (fg) {
if (fg.class_name !== 'truecolor') {
classes.push(`${fg.class_name}-fg`);
} else {
styles.push(`color:rgb(${fg.rgb.join(',')})`);
}
}
if (bg) {
if (bg.class_name !== 'truecolor') {
classes.push(`${bg.class_name}-bg`);
} else {
styles.push(`background-color:rgb(${bg.rgb.join(',')})`);
}
}
}
let class_string = '';
let style_string = '';
if (classes.length)
class_string = ` class="${classes.join(' ')}"`;
if (styles.length)
style_string = ` style="${styles.join(';')}"`;
return `<span${style_string}${class_string}>${txt}</span>`;
},
compose(segments:string[], instance:AnsiUp):string {
return segments.join("");
}
};
textFormatter:Formatter = {
transform(fragment:TextWithAttr, instance:AnsiUp):string {
return fragment.text;
},
compose(segments:string[], instance:AnsiUp):string {
return segments.join("");
}
};
// 256 Colors Palette
// CSS RGB strings - ex. "255, 255, 255"
private palette_256:AU_Color[];
private fg:AU_Color;
private bg:AU_Color;
private bold:boolean;
private _use_classes:boolean;
private _escape_for_html;
private _sgr_regex:RegExp;
private _buffer:string;
constructor()
{
this.setup_256_palette();
this._use_classes = false;
this._escape_for_html = true;
this.bold = false;
this.fg = this.bg = null;
this._buffer = '';
}
set use_classes(arg:boolean)
{
this._use_classes = arg;
}
get use_classes():boolean
{
return this._use_classes;
}
set escape_for_html(arg:boolean)
{
this._escape_for_html = arg;
}
get escape_for_html():boolean
{
return this._escape_for_html;
}
private
|
():void
{
this.palette_256 = [];
// Index 0..15 : Ansi-Colors
this.ansi_colors.forEach( palette => {
palette.forEach( rec => {
this.palette_256.push(rec);
});
});
// Index 16..231 : RGB 6x6x6
// https://gist.github.com/jasonm23/2868981#file-xterm-256color-yaml
let levels = [0, 95, 135, 175, 215, 255];
for (let r = 0; r < 6; ++r) {
for (let g = 0; g < 6; ++g) {
for (let b = 0; b < 6; ++b) {
let col = {rgb:[levels[r], levels[g], levels[b]], class_name:'truecolor'};
this.palette_256.push(col);
}
}
}
// Index 232..255 : Grayscale
let grey_level = 8;
for (let i = 0; i < 24; ++i, grey_level += 10) {
let gry = {rgb:[grey_level, grey_level, grey_level], class_name:'truecolor'};
this.palette_256.push(gry);
}
}
private old_escape_for_html(txt:string):string
{
return txt.replace(/[&<>]/gm, (str) => {
if (str === "&") return "&";
if (str === "<") return "<";
if (str === ">") return ">";
});
}
private old_linkify(txt:string):string
{
return txt.replace(/(https?:\/\/[^\s]+)/gm, (str) => {
return `<a href="${str}">${str}</a>`;
});
}
private detect_incomplete_ansi(txt:string)
{
// Scan forwards for a potential command character
// If one exists, we must assume we are good
// [\x40-\x7e]) # the command
return !(/.*?[\x40-\x7e]/.test(txt));
}
private detect_incomplete_link(txt:string)
{
// It would be nice if Javascript RegExp supported
// a hitEnd() method
// Scan backwards for first whitespace
var found = false;
for (var i = txt.length - 1; i > 0; i--) {
if (/\s|\x1B/.test(txt[i])) {
found = true;
break;
}
}
if (!found) {
// Handle one other case
// Maybe the whole string is a URL?
if (/(https?:\/\/[^\s]+)/.test(txt))
return 0;
else
return -1;
}
// Test if possible prefix
var prefix = txt.substr(i + 1, 4);
if (prefix.length === 0) return -1;
if ("http".indexOf(prefix) === 0)
return (i + 1);
}
ansi_to(txt:string, formatter:Formatter):any {
var pkt = this._buffer + txt;
this._buffer = '';
var raw_text_pkts = pkt.split(/\x1B\[/);
if (raw_text_pkts.length === 1)
raw_text_pkts.push('');
this.handle_incomplete_sequences(raw_text_pkts);
let first_chunk = this.with_state(raw_text_pkts.shift()); // the first pkt is not the result of the split
let blocks = new Array(raw_text_pkts.length);
for (let i = 0, len = raw_text_pkts.length; i < len; ++i) {
blocks[i] = (formatter.transform(this.process_ansi(raw_text_pkts[i]), this));
}
if (first_chunk.text.length > 0)
blocks.unshift(formatter.transform(first_chunk, this));
return formatter.compose(blocks, this);
}
ansi_to_html(txt:string):string
{
return this.ansi_to(txt, this.htmlFormatter);
}
ansi_to_text(txt:string):string
{
return this.ansi_to(txt, this.textFormatter);
}
private with_state(text:string):TextWithAttr {
return { bold: this.bold, fg: this.fg, bg: this.bg, text: text };
}
private handle_incomplete_sequences(chunks:string[]):void {
// COMPLEX - BEGIN
// Validate the last chunks for:
// - incomplete ANSI sequence
// - incomplete ESC
// If any of these
|
setup_256_palette
|
identifier_name
|
docker_client.go
|
Details(sys, registry, username, password, "", nil, "")
if err != nil {
return errors.Wrapf(err, "error creating new docker client")
}
resp, err := newLoginClient.makeRequest(ctx, "GET", "/v2/", nil, nil, v2Auth)
if err != nil {
return err
}
defer resp.Body.Close()
switch resp.StatusCode {
case http.StatusOK:
return nil
case http.StatusUnauthorized:
return ErrUnauthorizedForCredentials
default:
return errors.Errorf("error occured with status code %q", resp.StatusCode)
}
}
// SearchResult holds the information of each matching image
// It matches the output returned by the v1 endpoint
type SearchResult struct {
Name string `json:"name"`
Description string `json:"description"`
// StarCount states the number of stars the image has
StarCount int `json:"star_count"`
IsTrusted bool `json:"is_trusted"`
// IsAutomated states whether the image is an automated build
IsAutomated bool `json:"is_automated"`
// IsOfficial states whether the image is an official build
IsOfficial bool `json:"is_official"`
}
// SearchRegistry queries a registry for images that contain "image" in their name
// The limit is the max number of results desired
// Note: The limit value doesn't work with all registries
// for example registry.access.redhat.com returns all the results without limiting it to the limit value
func SearchRegistry(ctx context.Context, sys *types.SystemContext, registry, image string, limit int) ([]SearchResult, error) {
type V2Results struct {
// Repositories holds the results returned by the /v2/_catalog endpoint
Repositories []string `json:"repositories"`
}
type V1Results struct {
// Results holds the results returned by the /v1/search endpoint
Results []SearchResult `json:"results"`
}
v2Res := &V2Results{}
v1Res := &V1Results{}
// Get credentials from authfile for the underlying hostname
username, password, err := config.GetAuthentication(sys, registry)
if err != nil {
return nil, errors.Wrapf(err, "error getting username and password")
}
// The /v2/_catalog endpoint has been disabled for docker.io therefore the call made to that endpoint will fail
// So using the v1 hostname for docker.io for simplicity of implementation and the fact that it returns search results
if registry == dockerHostname {
registry = dockerV1Hostname
}
client, err := newDockerClientWithDetails(sys, registry, username, password, "", nil, "")
if err != nil {
return nil, errors.Wrapf(err, "error creating new docker client")
}
// Only try the v1 search endpoint if the search query is not empty. If it is
// empty skip to the v2 endpoint.
if image != "" {
// set up the query values for the v1 endpoint
u := url.URL{
Path: "/v1/search",
}
q := u.Query()
q.Set("q", image)
q.Set("n", strconv.Itoa(limit))
u.RawQuery = q.Encode()
logrus.Debugf("trying to talk to v1 search endpoint\n")
resp, err := client.makeRequest(ctx, "GET", u.String(), nil, nil, noAuth)
if err != nil {
logrus.Debugf("error getting search results from v1 endpoint %q: %v", registry, err)
} else {
defer resp.Body.Close()
if resp.StatusCode != http.StatusOK {
logrus.Debugf("error getting search results from v1 endpoint %q, status code %d", registry, resp.StatusCode)
} else {
if err := json.NewDecoder(resp.Body).Decode(v1Res); err != nil {
return nil, err
}
return v1Res.Results, nil
}
}
}
logrus.Debugf("trying to talk to v2 search endpoint\n")
resp, err := client.makeRequest(ctx, "GET", "/v2/_catalog", nil, nil, v2Auth)
if err != nil {
logrus.Debugf("error getting search results from v2 endpoint %q: %v", registry, err)
} else {
defer resp.Body.Close()
if resp.StatusCode != http.StatusOK {
logrus.Errorf("error getting search results from v2 endpoint %q, status code %d", registry, resp.StatusCode)
} else {
if err := json.NewDecoder(resp.Body).Decode(v2Res); err != nil {
return nil, err
}
searchRes := []SearchResult{}
for _, repo := range v2Res.Repositories {
if strings.Contains(repo, image) {
res := SearchResult{
Name: repo,
}
searchRes = append(searchRes, res)
}
}
return searchRes, nil
}
}
return nil, errors.Wrapf(err, "couldn't search registry %q", registry)
}
// makeRequest creates and executes a http.Request with the specified parameters, adding authentication and TLS options for the Docker client.
// The host name and schema is taken from the client or autodetected, and the path is relative to it, i.e. the path usually starts with /v2/.
func (c *dockerClient) makeRequest(ctx context.Context, method, path string, headers map[string][]string, stream io.Reader, auth sendAuth) (*http.Response, error) {
if err := c.detectProperties(ctx); err != nil {
return nil, err
}
url := fmt.Sprintf("%s://%s%s", c.scheme, c.registry, path)
return c.makeRequestToResolvedURL(ctx, method, url, headers, stream, -1, auth)
}
// makeRequestToResolvedURL creates and executes a http.Request with the specified parameters, adding authentication and TLS options for the Docker client.
// streamLen, if not -1, specifies the length of the data expected on stream.
// makeRequest should generally be preferred.
// TODO(runcom): too many arguments here, use a struct
func (c *dockerClient) makeRequestToResolvedURL(ctx context.Context, method, url string, headers map[string][]string, stream io.Reader, streamLen int64, auth sendAuth) (*http.Response, error) {
req, err := http.NewRequest(method, url, stream)
if err != nil {
return nil, err
}
req = req.WithContext(ctx)
if streamLen != -1 { // Do not blindly overwrite if streamLen == -1, http.NewRequest above can figure out the length of bytes.Reader and similar objects without us having to compute it.
req.ContentLength = streamLen
}
req.Header.Set("Docker-Distribution-API-Version", "registry/2.0")
for n, h := range headers {
for _, hh := range h {
req.Header.Add(n, hh)
}
}
if c.sys != nil && c.sys.DockerRegistryUserAgent != "" {
req.Header.Add("User-Agent", c.sys.DockerRegistryUserAgent)
}
if auth == v2Auth {
if err := c.setupRequestAuth(req); err != nil {
return nil, err
}
}
logrus.Debugf("%s %s", method, url)
res, err := c.client.Do(req)
if err != nil {
return nil, err
}
return res, nil
}
// we're using the challenges from the /v2/ ping response and not the one from the destination
// URL in this request because:
//
// 1) docker does that as well
// 2) gcr.io is sending 401 without a WWW-Authenticate header in the real request
//
// debugging: https://github.com/containers/image/pull/211#issuecomment-273426236 and follows up
func (c *dockerClient) setupRequestAuth(req *http.Request) error {
if len(c.challenges) == 0 {
return nil
}
schemeNames := make([]string, 0, len(c.challenges))
for _, challenge := range c.challenges {
schemeNames = append(schemeNames, challenge.Scheme)
switch challenge.Scheme {
case "basic":
req.SetBasicAuth(c.username, c.password)
return nil
case "bearer":
if c.token == nil || time.Now().After(c.tokenExpiration) {
realm, ok := challenge.Parameters["realm"]
if !ok {
return errors.Errorf("missing realm in bearer auth challenge")
}
service, _ := challenge.Parameters["service"] // Will be "" if not present
var scope string
if c.scope.remoteName != "" && c.scope.actions != "" {
scope = fmt.Sprintf("repository:%s:%s", c.scope.remoteName, c.scope.actions)
}
token, err := c.getBearerToken(req.Context(), realm, service, scope)
if err != nil {
return err
}
c.token = token
c.tokenExpiration = token.IssuedAt.Add(time.Duration(token.ExpiresIn) * time.Second)
}
req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", c.token.Token))
return nil
default:
logrus.Debugf("no handler for %s authentication", challenge.Scheme)
}
}
logrus.Infof("None of the challenges sent by server (%s) are supported, trying an unauthenticated request anyway", strings.Join(schemeNames, ", "))
return nil
}
func (c *dockerClient) getB
|
earerToken(ctx
|
identifier_name
|
|
docker_client.go
|
Decode(v1Res); err != nil {
return nil, err
}
return v1Res.Results, nil
}
}
}
logrus.Debugf("trying to talk to v2 search endpoint\n")
resp, err := client.makeRequest(ctx, "GET", "/v2/_catalog", nil, nil, v2Auth)
if err != nil {
logrus.Debugf("error getting search results from v2 endpoint %q: %v", registry, err)
} else {
defer resp.Body.Close()
if resp.StatusCode != http.StatusOK {
logrus.Errorf("error getting search results from v2 endpoint %q, status code %d", registry, resp.StatusCode)
} else {
if err := json.NewDecoder(resp.Body).Decode(v2Res); err != nil {
return nil, err
}
searchRes := []SearchResult{}
for _, repo := range v2Res.Repositories {
if strings.Contains(repo, image) {
res := SearchResult{
Name: repo,
}
searchRes = append(searchRes, res)
}
}
return searchRes, nil
}
}
return nil, errors.Wrapf(err, "couldn't search registry %q", registry)
}
// makeRequest creates and executes a http.Request with the specified parameters, adding authentication and TLS options for the Docker client.
// The host name and schema is taken from the client or autodetected, and the path is relative to it, i.e. the path usually starts with /v2/.
func (c *dockerClient) makeRequest(ctx context.Context, method, path string, headers map[string][]string, stream io.Reader, auth sendAuth) (*http.Response, error) {
if err := c.detectProperties(ctx); err != nil {
return nil, err
}
url := fmt.Sprintf("%s://%s%s", c.scheme, c.registry, path)
return c.makeRequestToResolvedURL(ctx, method, url, headers, stream, -1, auth)
}
// makeRequestToResolvedURL creates and executes a http.Request with the specified parameters, adding authentication and TLS options for the Docker client.
// streamLen, if not -1, specifies the length of the data expected on stream.
// makeRequest should generally be preferred.
// TODO(runcom): too many arguments here, use a struct
func (c *dockerClient) makeRequestToResolvedURL(ctx context.Context, method, url string, headers map[string][]string, stream io.Reader, streamLen int64, auth sendAuth) (*http.Response, error) {
req, err := http.NewRequest(method, url, stream)
if err != nil {
return nil, err
}
req = req.WithContext(ctx)
if streamLen != -1 { // Do not blindly overwrite if streamLen == -1, http.NewRequest above can figure out the length of bytes.Reader and similar objects without us having to compute it.
req.ContentLength = streamLen
}
req.Header.Set("Docker-Distribution-API-Version", "registry/2.0")
for n, h := range headers {
for _, hh := range h {
req.Header.Add(n, hh)
}
}
if c.sys != nil && c.sys.DockerRegistryUserAgent != "" {
req.Header.Add("User-Agent", c.sys.DockerRegistryUserAgent)
}
if auth == v2Auth {
if err := c.setupRequestAuth(req); err != nil {
return nil, err
}
}
logrus.Debugf("%s %s", method, url)
res, err := c.client.Do(req)
if err != nil {
return nil, err
}
return res, nil
}
// we're using the challenges from the /v2/ ping response and not the one from the destination
// URL in this request because:
//
// 1) docker does that as well
// 2) gcr.io is sending 401 without a WWW-Authenticate header in the real request
//
// debugging: https://github.com/containers/image/pull/211#issuecomment-273426236 and follows up
func (c *dockerClient) setupRequestAuth(req *http.Request) error {
if len(c.challenges) == 0 {
return nil
}
schemeNames := make([]string, 0, len(c.challenges))
for _, challenge := range c.challenges {
schemeNames = append(schemeNames, challenge.Scheme)
switch challenge.Scheme {
case "basic":
req.SetBasicAuth(c.username, c.password)
return nil
case "bearer":
if c.token == nil || time.Now().After(c.tokenExpiration) {
realm, ok := challenge.Parameters["realm"]
if !ok {
return errors.Errorf("missing realm in bearer auth challenge")
}
service, _ := challenge.Parameters["service"] // Will be "" if not present
var scope string
if c.scope.remoteName != "" && c.scope.actions != "" {
scope = fmt.Sprintf("repository:%s:%s", c.scope.remoteName, c.scope.actions)
}
token, err := c.getBearerToken(req.Context(), realm, service, scope)
if err != nil {
return err
}
c.token = token
c.tokenExpiration = token.IssuedAt.Add(time.Duration(token.ExpiresIn) * time.Second)
}
req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", c.token.Token))
return nil
default:
logrus.Debugf("no handler for %s authentication", challenge.Scheme)
}
}
logrus.Infof("None of the challenges sent by server (%s) are supported, trying an unauthenticated request anyway", strings.Join(schemeNames, ", "))
return nil
}
func (c *dockerClient) getBearerToken(ctx context.Context, realm, service, scope string) (*bearerToken, error) {
authReq, err := http.NewRequest("GET", realm, nil)
if err != nil {
return nil, err
}
authReq = authReq.WithContext(ctx)
getParams := authReq.URL.Query()
if c.username != "" {
getParams.Add("account", c.username)
}
if service != "" {
getParams.Add("service", service)
}
if scope != "" {
getParams.Add("scope", scope)
}
authReq.URL.RawQuery = getParams.Encode()
if c.username != "" && c.password != "" {
authReq.SetBasicAuth(c.username, c.password)
}
logrus.Debugf("%s %s", authReq.Method, authReq.URL.String())
tr := tlsclientconfig.NewTransport()
// TODO(runcom): insecure for now to contact the external token service
tr.TLSClientConfig = &tls.Config{InsecureSkipVerify: true}
client := &http.Client{Transport: tr}
res, err := client.Do(authReq)
if err != nil {
return nil, err
}
defer res.Body.Close()
switch res.StatusCode {
case http.StatusUnauthorized:
return nil, ErrUnauthorizedForCredentials
case http.StatusOK:
break
default:
return nil, errors.Errorf("unexpected http code: %d, URL: %s", res.StatusCode, authReq.URL)
}
tokenBlob, err := ioutil.ReadAll(res.Body)
if err != nil {
return nil, err
}
return newBearerTokenFromJSONBlob(tokenBlob)
}
// detectProperties detects various properties of the registry.
// See the dockerClient documentation for members which are affected by this.
func (c *dockerClient) detectProperties(ctx context.Context) error {
if c.scheme != "" {
return nil
}
ping := func(scheme string) error {
url := fmt.Sprintf(resolvedPingV2URL, scheme, c.registry)
resp, err := c.makeRequestToResolvedURL(ctx, "GET", url, nil, nil, -1, noAuth)
if err != nil {
logrus.Debugf("Ping %s err %s (%#v)", url, err.Error(), err)
return err
}
defer resp.Body.Close()
logrus.Debugf("Ping %s status %d", url, resp.StatusCode)
if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusUnauthorized {
return errors.Errorf("error pinging registry %s, response code %d", c.registry, resp.StatusCode)
}
c.challenges = parseAuthHeader(resp.Header)
c.scheme = scheme
c.supportsSignatures = resp.Header.Get("X-Registry-Supports-Signatures") == "1"
return nil
}
err := ping("https")
if err != nil && c.sys != nil && c.sys.DockerInsecureSkipTLSVerify {
err = ping("http")
}
if err != nil {
err = errors.Wrap(err, "pinging docker registry returned")
if c.sys != nil && c.sys.DockerDisableV1Ping {
return err
}
// best effort to understand if we're talking to a V1 registry
pingV1 := func(scheme string) bool {
url := fmt.Sprintf(resolvedPingV1URL, scheme, c.registry)
resp, err := c.makeRequestToResolvedURL(ctx, "GET", url, nil, nil, -1, noAuth)
logrus.Debugf("Ping %s err %s (%#v)", url, err.Error(), err)
if err != nil {
return false
}
defer resp.Body.Close()
logrus.Debugf("Ping %s status %d", url, resp.StatusCode)
if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusUnauthorized {
|
return false
}
|
conditional_block
|
|
docker_client.go
|
}
if token.ExpiresIn < minimumTokenLifetimeSeconds {
token.ExpiresIn = minimumTokenLifetimeSeconds
logrus.Debugf("Increasing token expiration to: %d seconds", token.ExpiresIn)
}
if token.IssuedAt.IsZero() {
token.IssuedAt = time.Now().UTC()
}
return token, nil
}
// this is cloned from docker/go-connections because upstream docker has changed
// it and make deps here fails otherwise.
// We'll drop this once we upgrade to docker 1.13.x deps.
func serverDefault() *tls.Config {
return &tls.Config{
// Avoid fallback to SSL protocols < TLS1.0
MinVersion: tls.VersionTLS10,
PreferServerCipherSuites: true,
CipherSuites: tlsconfig.DefaultServerAcceptedCiphers,
}
}
// dockerCertDir returns a path to a directory to be consumed by tlsclientconfig.SetupCertificates() depending on ctx and hostPort.
func dockerCertDir(sys *types.SystemContext, hostPort string) (string, error) {
if sys != nil && sys.DockerCertPath != "" {
return sys.DockerCertPath, nil
}
if sys != nil && sys.DockerPerHostCertDirPath != "" {
return filepath.Join(sys.DockerPerHostCertDirPath, hostPort), nil
}
var (
hostCertDir string
fullCertDirPath string
)
for _, systemPerHostCertDirPath := range systemPerHostCertDirPaths {
if sys != nil && sys.RootForImplicitAbsolutePaths != "" {
hostCertDir = filepath.Join(sys.RootForImplicitAbsolutePaths, systemPerHostCertDirPath)
} else {
hostCertDir = systemPerHostCertDirPath
}
fullCertDirPath = filepath.Join(hostCertDir, hostPort)
_, err := os.Stat(fullCertDirPath)
if err == nil {
break
}
if os.IsNotExist(err) {
continue
}
if os.IsPermission(err) {
logrus.Debugf("error accessing certs directory due to permissions: %v", err)
continue
}
if err != nil {
return "", err
}
}
return fullCertDirPath, nil
}
// newDockerClientFromRef returns a new dockerClient instance for refHostname (a host a specified in the Docker image reference, not canonicalized to dockerRegistry)
// “write” specifies whether the client will be used for "write" access (in particular passed to lookaside.go:toplevelFromSection)
func newDockerClientFromRef(sys *types.SystemContext, ref dockerReference, write bool, actions string) (*dockerClient, error) {
registry := reference.Domain(ref.ref)
username, password, err := config.GetAuthentication(sys, reference.Domain(ref.ref))
if err != nil {
return nil, errors.Wrapf(err, "error getting username and password")
}
sigBase, err := configuredSignatureStorageBase(sys, ref, write)
if err != nil {
return nil, err
}
remoteName := reference.Path(ref.ref)
return newDockerClientWithDetails(sys, registry, username, password, actions, sigBase, remoteName)
}
// newDockerClientWithDetails returns a new dockerClient instance for the given parameters
func newDockerClientWithDetails(sys *types.SystemContext, registry, username, password, actions string, sigBase signatureStorageBase, remoteName string) (*dockerClient, error) {
hostName := registry
if registry == dockerHostname {
registry = dockerRegistry
}
tr := tlsclientconfig.NewTransport()
tr.TLSClientConfig = serverDefault()
// It is undefined whether the host[:port] string for dockerHostname should be dockerHostname or dockerRegistry,
// because docker/docker does not read the certs.d subdirectory at all in that case. We use the user-visible
// dockerHostname here, because it is more symmetrical to read the configuration in that case as well, and because
// generally the UI hides the existence of the different dockerRegistry. But note that this behavior is
// undocumented and may change if docker/docker changes.
certDir, err := dockerCertDir(sys, hostName)
if err != nil {
return nil, err
}
if err := tlsclientconfig.SetupCertificates(certDir, tr.TLSClientConfig); err != nil {
return nil, err
}
if sys != nil && sys.DockerInsecureSkipTLSVerify {
tr.TLSClientConfig.InsecureSkipVerify = true
}
return &dockerClient{
sys: sys,
registry: registry,
username: username,
password: password,
client: &http.Client{Transport: tr},
signatureBase: sigBase,
scope: authScope{
actions: actions,
remoteName: remoteName,
},
}, nil
}
// CheckAuth validates the credentials by attempting to log into the registry
// returns an error if an error occcured while making the http request or the status code received was 401
func CheckAuth(ctx context.Context, sys *types.SystemContext, username, password, registry string) error {
newLoginClient, err := newDockerClientWithDetails(sys, registry, username, password, "", nil, "")
if err != nil {
return errors.Wrapf(err, "error creating new docker client")
}
resp, err := newLoginClient.makeRequest(ctx, "GET", "/v2/", nil, nil, v2Auth)
if err != nil {
return err
}
defer resp.Body.Close()
switch resp.StatusCode {
case http.StatusOK:
return nil
case http.StatusUnauthorized:
return ErrUnauthorizedForCredentials
default:
return errors.Errorf("error occured with status code %q", resp.StatusCode)
}
}
// SearchResult holds the information of each matching image
// It matches the output returned by the v1 endpoint
type SearchResult struct {
Name string `json:"name"`
Description string `json:"description"`
// StarCount states the number of stars the image has
StarCount int `json:"star_count"`
IsTrusted bool `json:"is_trusted"`
// IsAutomated states whether the image is an automated build
|
// IsOfficial states whether the image is an official build
IsOfficial bool `json:"is_official"`
}
// SearchRegistry queries a registry for images that contain "image" in their name
// The limit is the max number of results desired
// Note: The limit value doesn't work with all registries
// for example registry.access.redhat.com returns all the results without limiting it to the limit value
func SearchRegistry(ctx context.Context, sys *types.SystemContext, registry, image string, limit int) ([]SearchResult, error) {
type V2Results struct {
// Repositories holds the results returned by the /v2/_catalog endpoint
Repositories []string `json:"repositories"`
}
type V1Results struct {
// Results holds the results returned by the /v1/search endpoint
Results []SearchResult `json:"results"`
}
v2Res := &V2Results{}
v1Res := &V1Results{}
// Get credentials from authfile for the underlying hostname
username, password, err := config.GetAuthentication(sys, registry)
if err != nil {
return nil, errors.Wrapf(err, "error getting username and password")
}
// The /v2/_catalog endpoint has been disabled for docker.io therefore the call made to that endpoint will fail
// So using the v1 hostname for docker.io for simplicity of implementation and the fact that it returns search results
if registry == dockerHostname {
registry = dockerV1Hostname
}
client, err := newDockerClientWithDetails(sys, registry, username, password, "", nil, "")
if err != nil {
return nil, errors.Wrapf(err, "error creating new docker client")
}
// Only try the v1 search endpoint if the search query is not empty. If it is
// empty skip to the v2 endpoint.
if image != "" {
// set up the query values for the v1 endpoint
u := url.URL{
Path: "/v1/search",
}
q := u.Query()
q.Set("q", image)
q.Set("n", strconv.Itoa(limit))
u.RawQuery = q.Encode()
logrus.Debugf("trying to talk to v1 search endpoint\n")
resp, err := client.makeRequest(ctx, "GET", u.String(), nil, nil, noAuth)
if err != nil {
logrus.Debugf("error getting search results from v1 endpoint %q: %v", registry, err)
} else {
defer resp.Body.Close()
if resp.StatusCode != http.StatusOK {
logrus.Debugf("error getting search results from v1 endpoint %q, status code %d", registry, resp.StatusCode)
} else {
if err := json.NewDecoder(resp.Body).Decode(v1Res); err != nil {
return nil, err
}
return v1Res.Results, nil
}
}
}
logrus.Debugf("trying to talk to v2 search endpoint\n")
resp, err := client.makeRequest(ctx, "GET", "/v2/_catalog", nil, nil, v2Auth)
if err != nil {
logrus.Debugf("error getting search results from v2 endpoint %q: %v", registry, err)
} else {
defer resp
|
IsAutomated bool `json:"is_automated"`
|
random_line_split
|
docker_client.go
|
username and password")
}
sigBase, err := configuredSignatureStorageBase(sys, ref, write)
if err != nil {
return nil, err
}
remoteName := reference.Path(ref.ref)
return newDockerClientWithDetails(sys, registry, username, password, actions, sigBase, remoteName)
}
// newDockerClientWithDetails returns a new dockerClient instance for the given parameters
func newDockerClientWithDetails(sys *types.SystemContext, registry, username, password, actions string, sigBase signatureStorageBase, remoteName string) (*dockerClient, error) {
hostName := registry
if registry == dockerHostname {
registry = dockerRegistry
}
tr := tlsclientconfig.NewTransport()
tr.TLSClientConfig = serverDefault()
// It is undefined whether the host[:port] string for dockerHostname should be dockerHostname or dockerRegistry,
// because docker/docker does not read the certs.d subdirectory at all in that case. We use the user-visible
// dockerHostname here, because it is more symmetrical to read the configuration in that case as well, and because
// generally the UI hides the existence of the different dockerRegistry. But note that this behavior is
// undocumented and may change if docker/docker changes.
certDir, err := dockerCertDir(sys, hostName)
if err != nil {
return nil, err
}
if err := tlsclientconfig.SetupCertificates(certDir, tr.TLSClientConfig); err != nil {
return nil, err
}
if sys != nil && sys.DockerInsecureSkipTLSVerify {
tr.TLSClientConfig.InsecureSkipVerify = true
}
return &dockerClient{
sys: sys,
registry: registry,
username: username,
password: password,
client: &http.Client{Transport: tr},
signatureBase: sigBase,
scope: authScope{
actions: actions,
remoteName: remoteName,
},
}, nil
}
// CheckAuth validates the credentials by attempting to log into the registry
// returns an error if an error occcured while making the http request or the status code received was 401
func CheckAuth(ctx context.Context, sys *types.SystemContext, username, password, registry string) error {
newLoginClient, err := newDockerClientWithDetails(sys, registry, username, password, "", nil, "")
if err != nil {
return errors.Wrapf(err, "error creating new docker client")
}
resp, err := newLoginClient.makeRequest(ctx, "GET", "/v2/", nil, nil, v2Auth)
if err != nil {
return err
}
defer resp.Body.Close()
switch resp.StatusCode {
case http.StatusOK:
return nil
case http.StatusUnauthorized:
return ErrUnauthorizedForCredentials
default:
return errors.Errorf("error occured with status code %q", resp.StatusCode)
}
}
// SearchResult holds the information of each matching image
// It matches the output returned by the v1 endpoint
type SearchResult struct {
Name string `json:"name"`
Description string `json:"description"`
// StarCount states the number of stars the image has
StarCount int `json:"star_count"`
IsTrusted bool `json:"is_trusted"`
// IsAutomated states whether the image is an automated build
IsAutomated bool `json:"is_automated"`
// IsOfficial states whether the image is an official build
IsOfficial bool `json:"is_official"`
}
// SearchRegistry queries a registry for images that contain "image" in their name
// The limit is the max number of results desired
// Note: The limit value doesn't work with all registries
// for example registry.access.redhat.com returns all the results without limiting it to the limit value
func SearchRegistry(ctx context.Context, sys *types.SystemContext, registry, image string, limit int) ([]SearchResult, error) {
type V2Results struct {
// Repositories holds the results returned by the /v2/_catalog endpoint
Repositories []string `json:"repositories"`
}
type V1Results struct {
// Results holds the results returned by the /v1/search endpoint
Results []SearchResult `json:"results"`
}
v2Res := &V2Results{}
v1Res := &V1Results{}
// Get credentials from authfile for the underlying hostname
username, password, err := config.GetAuthentication(sys, registry)
if err != nil {
return nil, errors.Wrapf(err, "error getting username and password")
}
// The /v2/_catalog endpoint has been disabled for docker.io therefore the call made to that endpoint will fail
// So using the v1 hostname for docker.io for simplicity of implementation and the fact that it returns search results
if registry == dockerHostname {
registry = dockerV1Hostname
}
client, err := newDockerClientWithDetails(sys, registry, username, password, "", nil, "")
if err != nil {
return nil, errors.Wrapf(err, "error creating new docker client")
}
// Only try the v1 search endpoint if the search query is not empty. If it is
// empty skip to the v2 endpoint.
if image != "" {
// set up the query values for the v1 endpoint
u := url.URL{
Path: "/v1/search",
}
q := u.Query()
q.Set("q", image)
q.Set("n", strconv.Itoa(limit))
u.RawQuery = q.Encode()
logrus.Debugf("trying to talk to v1 search endpoint\n")
resp, err := client.makeRequest(ctx, "GET", u.String(), nil, nil, noAuth)
if err != nil {
logrus.Debugf("error getting search results from v1 endpoint %q: %v", registry, err)
} else {
defer resp.Body.Close()
if resp.StatusCode != http.StatusOK {
logrus.Debugf("error getting search results from v1 endpoint %q, status code %d", registry, resp.StatusCode)
} else {
if err := json.NewDecoder(resp.Body).Decode(v1Res); err != nil {
return nil, err
}
return v1Res.Results, nil
}
}
}
logrus.Debugf("trying to talk to v2 search endpoint\n")
resp, err := client.makeRequest(ctx, "GET", "/v2/_catalog", nil, nil, v2Auth)
if err != nil {
logrus.Debugf("error getting search results from v2 endpoint %q: %v", registry, err)
} else {
defer resp.Body.Close()
if resp.StatusCode != http.StatusOK {
logrus.Errorf("error getting search results from v2 endpoint %q, status code %d", registry, resp.StatusCode)
} else {
if err := json.NewDecoder(resp.Body).Decode(v2Res); err != nil {
return nil, err
}
searchRes := []SearchResult{}
for _, repo := range v2Res.Repositories {
if strings.Contains(repo, image) {
res := SearchResult{
Name: repo,
}
searchRes = append(searchRes, res)
}
}
return searchRes, nil
}
}
return nil, errors.Wrapf(err, "couldn't search registry %q", registry)
}
// makeRequest creates and executes a http.Request with the specified parameters, adding authentication and TLS options for the Docker client.
// The host name and schema is taken from the client or autodetected, and the path is relative to it, i.e. the path usually starts with /v2/.
func (c *dockerClient) makeRequest(ctx context.Context, method, path string, headers map[string][]string, stream io.Reader, auth sendAuth) (*http.Response, error) {
if err := c.detectProperties(ctx); err != nil {
return nil, err
}
url := fmt.Sprintf("%s://%s%s", c.scheme, c.registry, path)
return c.makeRequestToResolvedURL(ctx, method, url, headers, stream, -1, auth)
}
// makeRequestToResolvedURL creates and executes a http.Request with the specified parameters, adding authentication and TLS options for the Docker client.
// streamLen, if not -1, specifies the length of the data expected on stream.
// makeRequest should generally be preferred.
// TODO(runcom): too many arguments here, use a struct
func (c *dockerClient) makeRequestToResolvedURL(ctx context.Context, method, url string, headers map[string][]string, stream io.Reader, streamLen int64, auth sendAuth) (*http.Response, error) {
r
|
eq, err := http.NewRequest(method, url, stream)
if err != nil {
return nil, err
}
req = req.WithContext(ctx)
if streamLen != -1 { // Do not blindly overwrite if streamLen == -1, http.NewRequest above can figure out the length of bytes.Reader and similar objects without us having to compute it.
req.ContentLength = streamLen
}
req.Header.Set("Docker-Distribution-API-Version", "registry/2.0")
for n, h := range headers {
for _, hh := range h {
req.Header.Add(n, hh)
}
}
if c.sys != nil && c.sys.DockerRegistryUserAgent != "" {
req.Header.Add("User-Agent", c.sys.DockerRegistryUserAgent)
}
if auth == v2Auth {
if err := c.setupRequestAuth(req); err != nil {
return nil, err
|
identifier_body
|
|
customs.js
|
label:""
});
var $slicknav_label;
$('#responsive-menu').slicknav({
duration: 500,
easingOpen: 'easeInExpo',
easingClose: 'easeOutExpo',
closedSymbol: '<i class="fa fa-plus"></i>',
openedSymbol: '<i class="fa fa-minus"></i>',
prependTo: '#slicknav-mobile',
allowParentLinks: true,
label:""
});
/**
* Smooth scroll to anchor
*/
$('a.anchor[href*=#]:not([href=#])').on("click",function() {
if (location.pathname.replace(/^\//,'') == this.pathname.replace(/^\//,'') && location.hostname == this.hostname) {
var target = $(this.hash);
target = target.length ? target : $('[name=' + this.hash.slice(1) +']');
if (target.length) {
$('html,body').animate({
scrollTop: (target.offset().top - 70) // 70px offset for navbar menu
}, 1000);
return false;
}
}
});
/**
* Another Bootstrap Toggle
*/
$('.another-toggle').on("click",function() {
if( $('h4',this).hasClass('active') ){
$(this).find('.another-toggle-content').show();
}
});
$('.another-toggle h4').on("click",function() {
if( $(this).hasClass('active') ){
$(this).removeClass('active');
$(this).next('.another-toggle-content').slideUp();
} else {
$(this).addClass('active');
$(this).next('.another-toggle-content').slideDown();
}
});
/**
* Arrow for Menu has sub-menu
*/
if ($(window).width() > 992) {
$(".navbar-arrow ul ul > li").has("ul").children("a").append("<i class='arrow-indicator fa fa-angle-right'></i>");
}
/**
* Payment Option
*/
var selected2 = $("div.payment-option-form");
selected2.hide();
$("input[name$='payments']").on("click",function() {
var test = $(this).val();
selected2.hide();
$("#" + test).show();
});
/**
* Icon Change on Collapse
*/
$('.collapse.in').prev('.panel-heading').addClass('active');
$('.bootstrap-accordion, .bootstrap-toggle')
.on('show.bs.collapse', function(a) {
$(a.target).prev('.panel-heading').addClass('active');
})
.on('hide.bs.collapse', function(a) {
$(a.target).prev('.panel-heading').removeClass('active');
});
/**
* Back To Top
*/
var selected3 = $("#back-to-top");
$(window).scroll(function(){
if($(window).scrollTop() > 500){
selected3.fadeIn(200);
} else{
selected3.fadeOut(200);
}
});
selected3.on("click",function() {
$('html, body').animate({ scrollTop:0 }, '800');
return false;
});
/**
* Placeholder
*/
$("input, textarea").placeholder();
/**
* Bootstrap rating
*/
var selected4 = $('.rating-label');
selected4.rating();
selected4.each(function () {
$('<span class="label label-default"></span>')
.text($(this).val() || ' ')
.insertAfter(this);
|
$(this).next('.label').text($(this).val());
});
/**
* Sign-in Modal
*/
var $formLogin = $('#login-form');
var $formLost = $('#lost-form');
var $formRegister = $('#register-form');
var $divForms = $('#modal-login-form-wrapper');
var $modalAnimateTime = 300;
$('#login_register_btn').on("click", function () { modalAnimate($formLogin, $formRegister) });
$('#register_login_btn').on("click", function () { modalAnimate($formRegister, $formLogin); });
$('#login_lost_btn').on("click", function () { modalAnimate($formLogin, $formLost); });
$('#lost_login_btn').on("click", function () { modalAnimate($formLost, $formLogin); });
$('#lost_register_btn').on("click", function () { modalAnimate($formLost, $formRegister); });
function modalAnimate ($oldForm, $newForm) {
var $oldH = $oldForm.height();
var $newH = $newForm.height();
$divForms.css("height",$oldH);
$oldForm.fadeToggle($modalAnimateTime, function(){
$divForms.animate({height: $newH}, $modalAnimateTime, function(){
$newForm.fadeToggle($modalAnimateTime);
});
});
}
/**
* Read more-less paragraph
*/
var showTotalChar = 130, showChar = "read more +", hideChar = "read less -";
$('.read-more-less').each(function() {
var content = $(this).text();
if (content.length > showTotalChar) {
var con = content.substr(0, showTotalChar);
var hcon = content.substr(showTotalChar, content.length - showTotalChar);
var txt= con + '<span class="dots">...</span><span class="morectnt"><span>' + hcon + '</span> <a href="" class="showmoretxt">' + showChar + '</a></span>';
$(this).html(txt);
}
});
$(".showmoretxt").on("click",function() {
if ($(this).hasClass("sample")) {
$(this).removeClass("sample");
$(this).text(showChar);
} else {
$(this).addClass("sample");
$(this).text(hideChar);
}
$(this).parent().prev().toggle();
$(this).prev().toggle();
return false;
});
// SLICK SLIDER
$('.responsive').slick({
dots: false,
infinite: true,
speed: 300,
slidesToShow: 4,
slidesToScroll: 1,
responsive: [
{
breakpoint: 1024,
settings: {
slidesToShow: 3,
slidesToScroll: 3,
infinite: true,
dots: false
}
},
{
breakpoint: 600,
settings: {
slidesToShow: 2,
slidesToScroll: 2
}
},
{
breakpoint: 480,
settings: {
slidesToShow: 1,
slidesToScroll: 1
}
}
// You can unslick at a given breakpoint now by adding:
// settings: "unslick"
// instead of a settings object
]
});
// SLICK SLIDER STYLE TESTIMONIAL
$('.testimonial1').slick({
dots: false,
infinite: true,
speed: 300,
slidesToShow: 2,
slidesToScroll: 1,
responsive: [
{
breakpoint: 1024,
settings: {
slidesToShow: 2,
infinite: true,
dots: false
}
},
{
breakpoint: 600,
settings: {
slidesToShow: 2
}
},
{
breakpoint: 480,
settings: {
slidesToShow: 1
}
}
// You can unslick at a given breakpoint now by adding:
// settings: "unslick"
// instead of a settings object
]
});
// SLICK SLIDER STYLE TESTIMONIAL
$('.testimonial2').slick({
dots: false,
infinite: true,
speed: 300,
slidesToShow: 1,
slidesToScroll: 1,
responsive: [
{
breakpoint: 1024,
settings: {
slidesToShow: 3,
infinite: true,
dots: false
}
},
{
breakpoint: 600,
settings: {
slidesToShow: 2,
}
},
{
breakpoint: 480,
settings: {
slidesToShow: 1,
slidesToScroll: 1
}
}
// You can unslick at a given breakpoint now by adding:
// settings: "unslick"
// instead of a settings object
]
});
$('.tour-cats').slick({
dots: false,
infinite: true,
speed: 300,
slidesToShow: 6,
slidesToScroll: 1,
responsive: [
{
breakpoint: 1024,
settings: {
slidesToShow
|
});
selected4.on('change', function () {
|
random_line_split
|
customs.js
|
:""
});
var $slicknav_label;
$('#responsive-menu').slicknav({
duration: 500,
easingOpen: 'easeInExpo',
easingClose: 'easeOutExpo',
closedSymbol: '<i class="fa fa-plus"></i>',
openedSymbol: '<i class="fa fa-minus"></i>',
prependTo: '#slicknav-mobile',
allowParentLinks: true,
label:""
});
/**
* Smooth scroll to anchor
*/
$('a.anchor[href*=#]:not([href=#])').on("click",function() {
if (location.pathname.replace(/^\//,'') == this.pathname.replace(/^\//,'') && location.hostname == this.hostname) {
var target = $(this.hash);
target = target.length ? target : $('[name=' + this.hash.slice(1) +']');
if (target.length) {
$('html,body').animate({
scrollTop: (target.offset().top - 70) // 70px offset for navbar menu
}, 1000);
return false;
}
}
});
/**
* Another Bootstrap Toggle
*/
$('.another-toggle').on("click",function() {
if( $('h4',this).hasClass('active') ){
$(this).find('.another-toggle-content').show();
}
});
$('.another-toggle h4').on("click",function() {
if( $(this).hasClass('active') ){
$(this).removeClass('active');
$(this).next('.another-toggle-content').slideUp();
} else {
$(this).addClass('active');
$(this).next('.another-toggle-content').slideDown();
}
});
/**
* Arrow for Menu has sub-menu
*/
if ($(window).width() > 992) {
$(".navbar-arrow ul ul > li").has("ul").children("a").append("<i class='arrow-indicator fa fa-angle-right'></i>");
}
/**
* Payment Option
*/
var selected2 = $("div.payment-option-form");
selected2.hide();
$("input[name$='payments']").on("click",function() {
var test = $(this).val();
selected2.hide();
$("#" + test).show();
});
/**
* Icon Change on Collapse
*/
$('.collapse.in').prev('.panel-heading').addClass('active');
$('.bootstrap-accordion, .bootstrap-toggle')
.on('show.bs.collapse', function(a) {
$(a.target).prev('.panel-heading').addClass('active');
})
.on('hide.bs.collapse', function(a) {
$(a.target).prev('.panel-heading').removeClass('active');
});
/**
* Back To Top
*/
var selected3 = $("#back-to-top");
$(window).scroll(function(){
if($(window).scrollTop() > 500){
selected3.fadeIn(200);
} else{
selected3.fadeOut(200);
}
});
selected3.on("click",function() {
$('html, body').animate({ scrollTop:0 }, '800');
return false;
});
/**
* Placeholder
*/
$("input, textarea").placeholder();
/**
* Bootstrap rating
*/
var selected4 = $('.rating-label');
selected4.rating();
selected4.each(function () {
$('<span class="label label-default"></span>')
.text($(this).val() || ' ')
.insertAfter(this);
});
selected4.on('change', function () {
$(this).next('.label').text($(this).val());
});
/**
* Sign-in Modal
*/
var $formLogin = $('#login-form');
var $formLost = $('#lost-form');
var $formRegister = $('#register-form');
var $divForms = $('#modal-login-form-wrapper');
var $modalAnimateTime = 300;
$('#login_register_btn').on("click", function () { modalAnimate($formLogin, $formRegister) });
$('#register_login_btn').on("click", function () { modalAnimate($formRegister, $formLogin); });
$('#login_lost_btn').on("click", function () { modalAnimate($formLogin, $formLost); });
$('#lost_login_btn').on("click", function () { modalAnimate($formLost, $formLogin); });
$('#lost_register_btn').on("click", function () { modalAnimate($formLost, $formRegister); });
function modalAnimate ($oldForm, $newForm)
|
/**
* Read more-less paragraph
*/
var showTotalChar = 130, showChar = "read more +", hideChar = "read less -";
$('.read-more-less').each(function() {
var content = $(this).text();
if (content.length > showTotalChar) {
var con = content.substr(0, showTotalChar);
var hcon = content.substr(showTotalChar, content.length - showTotalChar);
var txt= con + '<span class="dots">...</span><span class="morectnt"><span>' + hcon + '</span> <a href="" class="showmoretxt">' + showChar + '</a></span>';
$(this).html(txt);
}
});
$(".showmoretxt").on("click",function() {
if ($(this).hasClass("sample")) {
$(this).removeClass("sample");
$(this).text(showChar);
} else {
$(this).addClass("sample");
$(this).text(hideChar);
}
$(this).parent().prev().toggle();
$(this).prev().toggle();
return false;
});
// SLICK SLIDER
$('.responsive').slick({
dots: false,
infinite: true,
speed: 300,
slidesToShow: 4,
slidesToScroll: 1,
responsive: [
{
breakpoint: 1024,
settings: {
slidesToShow: 3,
slidesToScroll: 3,
infinite: true,
dots: false
}
},
{
breakpoint: 600,
settings: {
slidesToShow: 2,
slidesToScroll: 2
}
},
{
breakpoint: 480,
settings: {
slidesToShow: 1,
slidesToScroll: 1
}
}
// You can unslick at a given breakpoint now by adding:
// settings: "unslick"
// instead of a settings object
]
});
// SLICK SLIDER STYLE TESTIMONIAL
$('.testimonial1').slick({
dots: false,
infinite: true,
speed: 300,
slidesToShow: 2,
slidesToScroll: 1,
responsive: [
{
breakpoint: 1024,
settings: {
slidesToShow: 2,
infinite: true,
dots: false
}
},
{
breakpoint: 600,
settings: {
slidesToShow: 2
}
},
{
breakpoint: 480,
settings: {
slidesToShow: 1
}
}
// You can unslick at a given breakpoint now by adding:
// settings: "unslick"
// instead of a settings object
]
});
// SLICK SLIDER STYLE TESTIMONIAL
$('.testimonial2').slick({
dots: false,
infinite: true,
speed: 300,
slidesToShow: 1,
slidesToScroll: 1,
responsive: [
{
breakpoint: 1024,
settings: {
slidesToShow: 3,
infinite: true,
dots: false
}
},
{
breakpoint: 600,
settings: {
slidesToShow: 2,
}
},
{
breakpoint: 480,
settings: {
slidesToShow: 1,
slidesToScroll: 1
}
}
// You can unslick at a given breakpoint now by adding:
// settings: "unslick"
// instead of a settings object
]
});
$('.tour-cats').slick({
dots: false,
infinite: true,
speed: 300,
slidesToShow: 6,
slidesToScroll: 1,
responsive: [
{
breakpoint: 1024,
settings: {
slides
|
{
var $oldH = $oldForm.height();
var $newH = $newForm.height();
$divForms.css("height",$oldH);
$oldForm.fadeToggle($modalAnimateTime, function(){
$divForms.animate({height: $newH}, $modalAnimateTime, function(){
$newForm.fadeToggle($modalAnimateTime);
});
});
}
|
identifier_body
|
customs.js
|
:""
});
var $slicknav_label;
$('#responsive-menu').slicknav({
duration: 500,
easingOpen: 'easeInExpo',
easingClose: 'easeOutExpo',
closedSymbol: '<i class="fa fa-plus"></i>',
openedSymbol: '<i class="fa fa-minus"></i>',
prependTo: '#slicknav-mobile',
allowParentLinks: true,
label:""
});
/**
* Smooth scroll to anchor
*/
$('a.anchor[href*=#]:not([href=#])').on("click",function() {
if (location.pathname.replace(/^\//,'') == this.pathname.replace(/^\//,'') && location.hostname == this.hostname) {
var target = $(this.hash);
target = target.length ? target : $('[name=' + this.hash.slice(1) +']');
if (target.length) {
$('html,body').animate({
scrollTop: (target.offset().top - 70) // 70px offset for navbar menu
}, 1000);
return false;
}
}
});
/**
* Another Bootstrap Toggle
*/
$('.another-toggle').on("click",function() {
if( $('h4',this).hasClass('active') ){
$(this).find('.another-toggle-content').show();
}
});
$('.another-toggle h4').on("click",function() {
if( $(this).hasClass('active') ){
$(this).removeClass('active');
$(this).next('.another-toggle-content').slideUp();
} else {
$(this).addClass('active');
$(this).next('.another-toggle-content').slideDown();
}
});
/**
* Arrow for Menu has sub-menu
*/
if ($(window).width() > 992) {
$(".navbar-arrow ul ul > li").has("ul").children("a").append("<i class='arrow-indicator fa fa-angle-right'></i>");
}
/**
* Payment Option
*/
var selected2 = $("div.payment-option-form");
selected2.hide();
$("input[name$='payments']").on("click",function() {
var test = $(this).val();
selected2.hide();
$("#" + test).show();
});
/**
* Icon Change on Collapse
*/
$('.collapse.in').prev('.panel-heading').addClass('active');
$('.bootstrap-accordion, .bootstrap-toggle')
.on('show.bs.collapse', function(a) {
$(a.target).prev('.panel-heading').addClass('active');
})
.on('hide.bs.collapse', function(a) {
$(a.target).prev('.panel-heading').removeClass('active');
});
/**
* Back To Top
*/
var selected3 = $("#back-to-top");
$(window).scroll(function(){
if($(window).scrollTop() > 500){
selected3.fadeIn(200);
} else{
selected3.fadeOut(200);
}
});
selected3.on("click",function() {
$('html, body').animate({ scrollTop:0 }, '800');
return false;
});
/**
* Placeholder
*/
$("input, textarea").placeholder();
/**
* Bootstrap rating
*/
var selected4 = $('.rating-label');
selected4.rating();
selected4.each(function () {
$('<span class="label label-default"></span>')
.text($(this).val() || ' ')
.insertAfter(this);
});
selected4.on('change', function () {
$(this).next('.label').text($(this).val());
});
/**
* Sign-in Modal
*/
var $formLogin = $('#login-form');
var $formLost = $('#lost-form');
var $formRegister = $('#register-form');
var $divForms = $('#modal-login-form-wrapper');
var $modalAnimateTime = 300;
$('#login_register_btn').on("click", function () { modalAnimate($formLogin, $formRegister) });
$('#register_login_btn').on("click", function () { modalAnimate($formRegister, $formLogin); });
$('#login_lost_btn').on("click", function () { modalAnimate($formLogin, $formLost); });
$('#lost_login_btn').on("click", function () { modalAnimate($formLost, $formLogin); });
$('#lost_register_btn').on("click", function () { modalAnimate($formLost, $formRegister); });
function modalAnimate ($oldForm, $newForm) {
var $oldH = $oldForm.height();
var $newH = $newForm.height();
$divForms.css("height",$oldH);
$oldForm.fadeToggle($modalAnimateTime, function(){
$divForms.animate({height: $newH}, $modalAnimateTime, function(){
$newForm.fadeToggle($modalAnimateTime);
});
});
}
/**
* Read more-less paragraph
*/
var showTotalChar = 130, showChar = "read more +", hideChar = "read less -";
$('.read-more-less').each(function() {
var content = $(this).text();
if (content.length > showTotalChar)
|
});
$(".showmoretxt").on("click",function() {
if ($(this).hasClass("sample")) {
$(this).removeClass("sample");
$(this).text(showChar);
} else {
$(this).addClass("sample");
$(this).text(hideChar);
}
$(this).parent().prev().toggle();
$(this).prev().toggle();
return false;
});
// SLICK SLIDER
$('.responsive').slick({
dots: false,
infinite: true,
speed: 300,
slidesToShow: 4,
slidesToScroll: 1,
responsive: [
{
breakpoint: 1024,
settings: {
slidesToShow: 3,
slidesToScroll: 3,
infinite: true,
dots: false
}
},
{
breakpoint: 600,
settings: {
slidesToShow: 2,
slidesToScroll: 2
}
},
{
breakpoint: 480,
settings: {
slidesToShow: 1,
slidesToScroll: 1
}
}
// You can unslick at a given breakpoint now by adding:
// settings: "unslick"
// instead of a settings object
]
});
// SLICK SLIDER STYLE TESTIMONIAL
$('.testimonial1').slick({
dots: false,
infinite: true,
speed: 300,
slidesToShow: 2,
slidesToScroll: 1,
responsive: [
{
breakpoint: 1024,
settings: {
slidesToShow: 2,
infinite: true,
dots: false
}
},
{
breakpoint: 600,
settings: {
slidesToShow: 2
}
},
{
breakpoint: 480,
settings: {
slidesToShow: 1
}
}
// You can unslick at a given breakpoint now by adding:
// settings: "unslick"
// instead of a settings object
]
});
// SLICK SLIDER STYLE TESTIMONIAL
$('.testimonial2').slick({
dots: false,
infinite: true,
speed: 300,
slidesToShow: 1,
slidesToScroll: 1,
responsive: [
{
breakpoint: 1024,
settings: {
slidesToShow: 3,
infinite: true,
dots: false
}
},
{
breakpoint: 600,
settings: {
slidesToShow: 2,
}
},
{
breakpoint: 480,
settings: {
slidesToShow: 1,
slidesToScroll: 1
}
}
// You can unslick at a given breakpoint now by adding:
// settings: "unslick"
// instead of a settings object
]
});
$('.tour-cats').slick({
dots: false,
infinite: true,
speed: 300,
slidesToShow: 6,
slidesToScroll: 1,
responsive: [
{
breakpoint: 1024,
settings: {
slides
|
{
var con = content.substr(0, showTotalChar);
var hcon = content.substr(showTotalChar, content.length - showTotalChar);
var txt= con + '<span class="dots">...</span><span class="morectnt"><span>' + hcon + '</span> <a href="" class="showmoretxt">' + showChar + '</a></span>';
$(this).html(txt);
}
|
conditional_block
|
customs.js
|
label:""
});
var $slicknav_label;
$('#responsive-menu').slicknav({
duration: 500,
easingOpen: 'easeInExpo',
easingClose: 'easeOutExpo',
closedSymbol: '<i class="fa fa-plus"></i>',
openedSymbol: '<i class="fa fa-minus"></i>',
prependTo: '#slicknav-mobile',
allowParentLinks: true,
label:""
});
/**
* Smooth scroll to anchor
*/
$('a.anchor[href*=#]:not([href=#])').on("click",function() {
if (location.pathname.replace(/^\//,'') == this.pathname.replace(/^\//,'') && location.hostname == this.hostname) {
var target = $(this.hash);
target = target.length ? target : $('[name=' + this.hash.slice(1) +']');
if (target.length) {
$('html,body').animate({
scrollTop: (target.offset().top - 70) // 70px offset for navbar menu
}, 1000);
return false;
}
}
});
/**
* Another Bootstrap Toggle
*/
$('.another-toggle').on("click",function() {
if( $('h4',this).hasClass('active') ){
$(this).find('.another-toggle-content').show();
}
});
$('.another-toggle h4').on("click",function() {
if( $(this).hasClass('active') ){
$(this).removeClass('active');
$(this).next('.another-toggle-content').slideUp();
} else {
$(this).addClass('active');
$(this).next('.another-toggle-content').slideDown();
}
});
/**
* Arrow for Menu has sub-menu
*/
if ($(window).width() > 992) {
$(".navbar-arrow ul ul > li").has("ul").children("a").append("<i class='arrow-indicator fa fa-angle-right'></i>");
}
/**
* Payment Option
*/
var selected2 = $("div.payment-option-form");
selected2.hide();
$("input[name$='payments']").on("click",function() {
var test = $(this).val();
selected2.hide();
$("#" + test).show();
});
/**
* Icon Change on Collapse
*/
$('.collapse.in').prev('.panel-heading').addClass('active');
$('.bootstrap-accordion, .bootstrap-toggle')
.on('show.bs.collapse', function(a) {
$(a.target).prev('.panel-heading').addClass('active');
})
.on('hide.bs.collapse', function(a) {
$(a.target).prev('.panel-heading').removeClass('active');
});
/**
* Back To Top
*/
var selected3 = $("#back-to-top");
$(window).scroll(function(){
if($(window).scrollTop() > 500){
selected3.fadeIn(200);
} else{
selected3.fadeOut(200);
}
});
selected3.on("click",function() {
$('html, body').animate({ scrollTop:0 }, '800');
return false;
});
/**
* Placeholder
*/
$("input, textarea").placeholder();
/**
* Bootstrap rating
*/
var selected4 = $('.rating-label');
selected4.rating();
selected4.each(function () {
$('<span class="label label-default"></span>')
.text($(this).val() || ' ')
.insertAfter(this);
});
selected4.on('change', function () {
$(this).next('.label').text($(this).val());
});
/**
* Sign-in Modal
*/
var $formLogin = $('#login-form');
var $formLost = $('#lost-form');
var $formRegister = $('#register-form');
var $divForms = $('#modal-login-form-wrapper');
var $modalAnimateTime = 300;
$('#login_register_btn').on("click", function () { modalAnimate($formLogin, $formRegister) });
$('#register_login_btn').on("click", function () { modalAnimate($formRegister, $formLogin); });
$('#login_lost_btn').on("click", function () { modalAnimate($formLogin, $formLost); });
$('#lost_login_btn').on("click", function () { modalAnimate($formLost, $formLogin); });
$('#lost_register_btn').on("click", function () { modalAnimate($formLost, $formRegister); });
function
|
($oldForm, $newForm) {
var $oldH = $oldForm.height();
var $newH = $newForm.height();
$divForms.css("height",$oldH);
$oldForm.fadeToggle($modalAnimateTime, function(){
$divForms.animate({height: $newH}, $modalAnimateTime, function(){
$newForm.fadeToggle($modalAnimateTime);
});
});
}
/**
* Read more-less paragraph
*/
var showTotalChar = 130, showChar = "read more +", hideChar = "read less -";
$('.read-more-less').each(function() {
var content = $(this).text();
if (content.length > showTotalChar) {
var con = content.substr(0, showTotalChar);
var hcon = content.substr(showTotalChar, content.length - showTotalChar);
var txt= con + '<span class="dots">...</span><span class="morectnt"><span>' + hcon + '</span> <a href="" class="showmoretxt">' + showChar + '</a></span>';
$(this).html(txt);
}
});
$(".showmoretxt").on("click",function() {
if ($(this).hasClass("sample")) {
$(this).removeClass("sample");
$(this).text(showChar);
} else {
$(this).addClass("sample");
$(this).text(hideChar);
}
$(this).parent().prev().toggle();
$(this).prev().toggle();
return false;
});
// SLICK SLIDER
$('.responsive').slick({
dots: false,
infinite: true,
speed: 300,
slidesToShow: 4,
slidesToScroll: 1,
responsive: [
{
breakpoint: 1024,
settings: {
slidesToShow: 3,
slidesToScroll: 3,
infinite: true,
dots: false
}
},
{
breakpoint: 600,
settings: {
slidesToShow: 2,
slidesToScroll: 2
}
},
{
breakpoint: 480,
settings: {
slidesToShow: 1,
slidesToScroll: 1
}
}
// You can unslick at a given breakpoint now by adding:
// settings: "unslick"
// instead of a settings object
]
});
// SLICK SLIDER STYLE TESTIMONIAL
$('.testimonial1').slick({
dots: false,
infinite: true,
speed: 300,
slidesToShow: 2,
slidesToScroll: 1,
responsive: [
{
breakpoint: 1024,
settings: {
slidesToShow: 2,
infinite: true,
dots: false
}
},
{
breakpoint: 600,
settings: {
slidesToShow: 2
}
},
{
breakpoint: 480,
settings: {
slidesToShow: 1
}
}
// You can unslick at a given breakpoint now by adding:
// settings: "unslick"
// instead of a settings object
]
});
// SLICK SLIDER STYLE TESTIMONIAL
$('.testimonial2').slick({
dots: false,
infinite: true,
speed: 300,
slidesToShow: 1,
slidesToScroll: 1,
responsive: [
{
breakpoint: 1024,
settings: {
slidesToShow: 3,
infinite: true,
dots: false
}
},
{
breakpoint: 600,
settings: {
slidesToShow: 2,
}
},
{
breakpoint: 480,
settings: {
slidesToShow: 1,
slidesToScroll: 1
}
}
// You can unslick at a given breakpoint now by adding:
// settings: "unslick"
// instead of a settings object
]
});
$('.tour-cats').slick({
dots: false,
infinite: true,
speed: 300,
slidesToShow: 6,
slidesToScroll: 1,
responsive: [
{
breakpoint: 1024,
settings: {
slides
|
modalAnimate
|
identifier_name
|
recombine.go
|
func NewConfigWithID(operatorID string) *Config {
return &Config{
TransformerConfig: helper.NewTransformerConfig(operatorID, operatorType),
MaxBatchSize: 1000,
MaxSources: 1000,
CombineWith: defaultCombineWith,
OverwriteWith: "oldest",
ForceFlushTimeout: 5 * time.Second,
SourceIdentifier: entry.NewAttributeField("file.path"),
}
}
// Config is the configuration of a recombine operator
type Config struct {
helper.TransformerConfig `mapstructure:",squash"`
IsFirstEntry string `mapstructure:"is_first_entry"`
IsLastEntry string `mapstructure:"is_last_entry"`
MaxBatchSize int `mapstructure:"max_batch_size"`
CombineField entry.Field `mapstructure:"combine_field"`
CombineWith string `mapstructure:"combine_with"`
SourceIdentifier entry.Field `mapstructure:"source_identifier"`
OverwriteWith string `mapstructure:"overwrite_with"`
ForceFlushTimeout time.Duration `mapstructure:"force_flush_period"`
MaxSources int `mapstructure:"max_sources"`
MaxLogSize helper.ByteSize `mapstructure:"max_log_size,omitempty"`
}
// Build creates a new Transformer from a config
func (c *Config) Build(logger *zap.SugaredLogger) (operator.Operator, error) {
transformer, err := c.TransformerConfig.Build(logger)
if err != nil {
return nil, fmt.Errorf("failed to build transformer config: %w", err)
}
if c.IsLastEntry != "" && c.IsFirstEntry != "" {
return nil, fmt.Errorf("only one of is_first_entry and is_last_entry can be set")
}
if c.IsLastEntry == "" && c.IsFirstEntry == "" {
return nil, fmt.Errorf("one of is_first_entry and is_last_entry must be set")
}
var matchesFirst bool
var prog *vm.Program
if c.IsFirstEntry != "" {
matchesFirst = true
prog, err = helper.ExprCompileBool(c.IsFirstEntry)
if err != nil {
return nil, fmt.Errorf("failed to compile is_first_entry: %w", err)
}
} else {
matchesFirst = false
prog, err = helper.ExprCompileBool(c.IsLastEntry)
if err != nil {
return nil, fmt.Errorf("failed to compile is_last_entry: %w", err)
}
}
if c.CombineField.FieldInterface == nil {
return nil, fmt.Errorf("missing required argument 'combine_field'")
}
var overwriteWithOldest bool
switch c.OverwriteWith {
case "newest":
overwriteWithOldest = false
case "oldest", "":
overwriteWithOldest = true
default:
return nil, fmt.Errorf("invalid value '%s' for parameter 'overwrite_with'", c.OverwriteWith)
}
return &Transformer{
TransformerOperator: transformer,
matchFirstLine: matchesFirst,
prog: prog,
maxBatchSize: c.MaxBatchSize,
maxSources: c.MaxSources,
overwriteWithOldest: overwriteWithOldest,
batchMap: make(map[string]*sourceBatch),
batchPool: sync.Pool{
New: func() interface{} {
return &sourceBatch{
entries: []*entry.Entry{},
recombined: &bytes.Buffer{},
}
},
},
combineField: c.CombineField,
combineWith: c.CombineWith,
forceFlushTimeout: c.ForceFlushTimeout,
ticker: time.NewTicker(c.ForceFlushTimeout),
chClose: make(chan struct{}),
sourceIdentifier: c.SourceIdentifier,
maxLogSize: int64(c.MaxLogSize),
}, nil
}
// Transformer is an operator that combines a field from consecutive log entries
// into a single
type Transformer struct {
helper.TransformerOperator
matchFirstLine bool
prog *vm.Program
maxBatchSize int
maxSources int
overwriteWithOldest bool
combineField entry.Field
combineWith string
ticker *time.Ticker
forceFlushTimeout time.Duration
chClose chan struct{}
sourceIdentifier entry.Field
sync.Mutex
batchPool sync.Pool
batchMap map[string]*sourceBatch
maxLogSize int64
}
// sourceBatch contains the status info of a batch
type sourceBatch struct {
entries []*entry.Entry
recombined *bytes.Buffer
firstEntryObservedTime time.Time
}
func (r *Transformer) Start(_ operator.Persister) error {
go r.flushLoop()
return nil
}
func (r *Transformer)
|
() {
for {
select {
case <-r.ticker.C:
r.Lock()
timeNow := time.Now()
for source, batch := range r.batchMap {
timeSinceFirstEntry := timeNow.Sub(batch.firstEntryObservedTime)
if timeSinceFirstEntry < r.forceFlushTimeout {
continue
}
if err := r.flushSource(source, true); err != nil {
r.Errorf("there was error flushing combined logs %s", err)
}
}
// check every 1/5 forceFlushTimeout
r.ticker.Reset(r.forceFlushTimeout / 5)
r.Unlock()
case <-r.chClose:
r.ticker.Stop()
return
}
}
}
func (r *Transformer) Stop() error {
r.Lock()
defer r.Unlock()
ctx, cancel := context.WithTimeout(context.Background(), time.Second)
defer cancel()
r.flushUncombined(ctx)
close(r.chClose)
return nil
}
const DefaultSourceIdentifier = "DefaultSourceIdentifier"
func (r *Transformer) Process(ctx context.Context, e *entry.Entry) error {
// Lock the recombine operator because process can't run concurrently
r.Lock()
defer r.Unlock()
// Get the environment for executing the expression.
// In the future, we may want to provide access to the currently
// batched entries so users can do comparisons to other entries
// rather than just use absolute rules.
env := helper.GetExprEnv(e)
defer helper.PutExprEnv(env)
m, err := expr.Run(r.prog, env)
if err != nil {
return r.HandleEntryError(ctx, e, err)
}
// this is guaranteed to be a boolean because of expr.AsBool
matches := m.(bool)
var s string
err = e.Read(r.sourceIdentifier, &s)
if err != nil {
r.Warn("entry does not contain the source_identifier, so it may be pooled with other sources")
s = DefaultSourceIdentifier
}
if s == "" {
s = DefaultSourceIdentifier
}
switch {
// This is the first entry in the next batch
case matches && r.matchIndicatesFirst():
// Flush the existing batch
err := r.flushSource(s, true)
if err != nil {
return err
}
// Add the current log to the new batch
r.addToBatch(ctx, e, s)
return nil
// This is the last entry in a complete batch
case matches && r.matchIndicatesLast():
fallthrough
// When matching on first entry, never batch partial first. Just emit immediately
case !matches && r.matchIndicatesFirst() && r.batchMap[s] == nil:
r.addToBatch(ctx, e, s)
return r.flushSource(s, true)
}
// This is neither the first entry of a new log,
// nor the last entry of a log, so just add it to the batch
r.addToBatch(ctx, e, s)
return nil
}
func (r *Transformer) matchIndicatesFirst() bool {
return r.matchFirstLine
}
func (r *Transformer) matchIndicatesLast() bool {
return !r.matchFirstLine
}
// addToBatch adds the current entry to the current batch of entries that will be combined
func (r *Transformer) addToBatch(_ context.Context, e *entry.Entry, source string) {
batch, ok := r.batchMap[source]
if !ok {
batch = r.addNewBatch(source, e)
if len(r.batchMap) >= r.maxSources {
r.Error("Batched source exceeds max source size. Flushing all batched logs. Consider increasing max_sources parameter")
r.flushUncombined(context.Background())
return
}
} else {
// If the length of the batch is 0, this batch was flushed previously due to triggering size limit.
// In this case, the firstEntryObservedTime should be updated to reset the timeout
if len(batch.entries) == 0 {
batch.firstEntryObservedTime = e.ObservedTimestamp
}
batch.entries = append(batch.entries, e)
}
// Combine the combineField of each entry in the batch,
// separated by newlines
var s string
err := e.Read(r.combineField, &s)
if err != nil {
r.Errorf("entry does not contain the combine_field")
return
}
if batch.recombined.Len() > 0 {
batch.recombined.WriteString(r.combineWith)
}
batch.recombined.WriteString(s)
if (r.maxLogSize > 0 && int64(batch.recombined.Len())
|
flushLoop
|
identifier_name
|
recombine.go
|
}
// NewConfigWithID creates a new recombine config with default values
func NewConfigWithID(operatorID string) *Config {
return &Config{
TransformerConfig: helper.NewTransformerConfig(operatorID, operatorType),
MaxBatchSize: 1000,
MaxSources: 1000,
CombineWith: defaultCombineWith,
OverwriteWith: "oldest",
ForceFlushTimeout: 5 * time.Second,
SourceIdentifier: entry.NewAttributeField("file.path"),
}
}
// Config is the configuration of a recombine operator
type Config struct {
helper.TransformerConfig `mapstructure:",squash"`
IsFirstEntry string `mapstructure:"is_first_entry"`
IsLastEntry string `mapstructure:"is_last_entry"`
MaxBatchSize int `mapstructure:"max_batch_size"`
CombineField entry.Field `mapstructure:"combine_field"`
CombineWith string `mapstructure:"combine_with"`
SourceIdentifier entry.Field `mapstructure:"source_identifier"`
OverwriteWith string `mapstructure:"overwrite_with"`
ForceFlushTimeout time.Duration `mapstructure:"force_flush_period"`
MaxSources int `mapstructure:"max_sources"`
MaxLogSize helper.ByteSize `mapstructure:"max_log_size,omitempty"`
}
// Build creates a new Transformer from a config
func (c *Config) Build(logger *zap.SugaredLogger) (operator.Operator, error) {
transformer, err := c.TransformerConfig.Build(logger)
if err != nil {
return nil, fmt.Errorf("failed to build transformer config: %w", err)
}
if c.IsLastEntry != "" && c.IsFirstEntry != "" {
return nil, fmt.Errorf("only one of is_first_entry and is_last_entry can be set")
}
if c.IsLastEntry == "" && c.IsFirstEntry == "" {
return nil, fmt.Errorf("one of is_first_entry and is_last_entry must be set")
}
var matchesFirst bool
var prog *vm.Program
if c.IsFirstEntry != "" {
matchesFirst = true
prog, err = helper.ExprCompileBool(c.IsFirstEntry)
if err != nil {
return nil, fmt.Errorf("failed to compile is_first_entry: %w", err)
}
} else {
matchesFirst = false
prog, err = helper.ExprCompileBool(c.IsLastEntry)
if err != nil {
return nil, fmt.Errorf("failed to compile is_last_entry: %w", err)
}
}
if c.CombineField.FieldInterface == nil {
return nil, fmt.Errorf("missing required argument 'combine_field'")
}
var overwriteWithOldest bool
switch c.OverwriteWith {
case "newest":
overwriteWithOldest = false
case "oldest", "":
overwriteWithOldest = true
default:
return nil, fmt.Errorf("invalid value '%s' for parameter 'overwrite_with'", c.OverwriteWith)
}
return &Transformer{
TransformerOperator: transformer,
matchFirstLine: matchesFirst,
prog: prog,
maxBatchSize: c.MaxBatchSize,
maxSources: c.MaxSources,
overwriteWithOldest: overwriteWithOldest,
batchMap: make(map[string]*sourceBatch),
batchPool: sync.Pool{
New: func() interface{} {
return &sourceBatch{
entries: []*entry.Entry{},
recombined: &bytes.Buffer{},
}
},
},
combineField: c.CombineField,
combineWith: c.CombineWith,
forceFlushTimeout: c.ForceFlushTimeout,
ticker: time.NewTicker(c.ForceFlushTimeout),
chClose: make(chan struct{}),
sourceIdentifier: c.SourceIdentifier,
maxLogSize: int64(c.MaxLogSize),
}, nil
}
// Transformer is an operator that combines a field from consecutive log entries
// into a single
type Transformer struct {
helper.TransformerOperator
matchFirstLine bool
prog *vm.Program
maxBatchSize int
maxSources int
overwriteWithOldest bool
combineField entry.Field
combineWith string
ticker *time.Ticker
forceFlushTimeout time.Duration
chClose chan struct{}
sourceIdentifier entry.Field
sync.Mutex
batchPool sync.Pool
batchMap map[string]*sourceBatch
maxLogSize int64
}
// sourceBatch contains the status info of a batch
type sourceBatch struct {
entries []*entry.Entry
recombined *bytes.Buffer
firstEntryObservedTime time.Time
}
func (r *Transformer) Start(_ operator.Persister) error {
go r.flushLoop()
return nil
}
func (r *Transformer) flushLoop() {
for {
select {
case <-r.ticker.C:
r.Lock()
timeNow := time.Now()
for source, batch := range r.batchMap {
timeSinceFirstEntry := timeNow.Sub(batch.firstEntryObservedTime)
if timeSinceFirstEntry < r.forceFlushTimeout {
continue
}
if err := r.flushSource(source, true); err != nil {
r.Errorf("there was error flushing combined logs %s", err)
}
}
// check every 1/5 forceFlushTimeout
r.ticker.Reset(r.forceFlushTimeout / 5)
r.Unlock()
case <-r.chClose:
r.ticker.Stop()
return
}
}
}
func (r *Transformer) Stop() error {
r.Lock()
defer r.Unlock()
ctx, cancel := context.WithTimeout(context.Background(), time.Second)
defer cancel()
r.flushUncombined(ctx)
close(r.chClose)
return nil
}
const DefaultSourceIdentifier = "DefaultSourceIdentifier"
func (r *Transformer) Process(ctx context.Context, e *entry.Entry) error {
// Lock the recombine operator because process can't run concurrently
r.Lock()
defer r.Unlock()
// Get the environment for executing the expression.
// In the future, we may want to provide access to the currently
// batched entries so users can do comparisons to other entries
// rather than just use absolute rules.
env := helper.GetExprEnv(e)
defer helper.PutExprEnv(env)
m, err := expr.Run(r.prog, env)
if err != nil {
return r.HandleEntryError(ctx, e, err)
}
// this is guaranteed to be a boolean because of expr.AsBool
matches := m.(bool)
var s string
err = e.Read(r.sourceIdentifier, &s)
if err != nil {
r.Warn("entry does not contain the source_identifier, so it may be pooled with other sources")
s = DefaultSourceIdentifier
}
if s == "" {
s = DefaultSourceIdentifier
}
switch {
// This is the first entry in the next batch
case matches && r.matchIndicatesFirst():
// Flush the existing batch
err := r.flushSource(s, true)
if err != nil {
return err
}
// Add the current log to the new batch
r.addToBatch(ctx, e, s)
return nil
// This is the last entry in a complete batch
case matches && r.matchIndicatesLast():
fallthrough
// When matching on first entry, never batch partial first. Just emit immediately
case !matches && r.matchIndicatesFirst() && r.batchMap[s] == nil:
r.addToBatch(ctx, e, s)
return r.flushSource(s, true)
}
// This is neither the first entry of a new log,
// nor the last entry of a log, so just add it to the batch
r.addToBatch(ctx, e, s)
return nil
}
func (r *Transformer) matchIndicatesFirst() bool {
return r.matchFirstLine
}
func (r *Transformer) matchIndicatesLast() bool {
return !r.matchFirstLine
}
// addToBatch adds the current entry to the current batch of entries that will be combined
func (r *Transformer) addToBatch(_ context.Context, e *entry.Entry, source string) {
batch, ok := r.batchMap[source]
if !ok {
batch = r.addNewBatch(source, e)
if len(r.batchMap) >= r.maxSources {
r.Error("Batched source exceeds max source size. Flushing all batched logs. Consider increasing max_sources parameter")
r.flushUncombined(context.Background())
return
}
} else {
// If the length of the batch is 0, this batch was flushed previously due to triggering size limit.
// In this case, the firstEntryObservedTime should be updated to reset the timeout
if len(batch.entries) == 0 {
batch.firstEntryObservedTime = e.ObservedTimestamp
}
batch.entries = append(batch.entries, e)
}
// Combine the combineField of each entry in the batch,
// separated by newlines
var s string
err := e.Read(r.combineField, &s)
if err != nil {
r.Errorf("entry does not contain the combine_field")
return
}
if batch.recombined.Len() > 0 {
batch.recombined.WriteString(r.combineWith)
}
batch.re
|
return NewConfigWithID(operatorType)
|
random_line_split
|
|
recombine.go
|
func NewConfigWithID(operatorID string) *Config {
return &Config{
TransformerConfig: helper.NewTransformerConfig(operatorID, operatorType),
MaxBatchSize: 1000,
MaxSources: 1000,
CombineWith: defaultCombineWith,
OverwriteWith: "oldest",
ForceFlushTimeout: 5 * time.Second,
SourceIdentifier: entry.NewAttributeField("file.path"),
}
}
// Config is the configuration of a recombine operator
type Config struct {
helper.TransformerConfig `mapstructure:",squash"`
IsFirstEntry string `mapstructure:"is_first_entry"`
IsLastEntry string `mapstructure:"is_last_entry"`
MaxBatchSize int `mapstructure:"max_batch_size"`
CombineField entry.Field `mapstructure:"combine_field"`
CombineWith string `mapstructure:"combine_with"`
SourceIdentifier entry.Field `mapstructure:"source_identifier"`
OverwriteWith string `mapstructure:"overwrite_with"`
ForceFlushTimeout time.Duration `mapstructure:"force_flush_period"`
MaxSources int `mapstructure:"max_sources"`
MaxLogSize helper.ByteSize `mapstructure:"max_log_size,omitempty"`
}
// Build creates a new Transformer from a config
func (c *Config) Build(logger *zap.SugaredLogger) (operator.Operator, error) {
transformer, err := c.TransformerConfig.Build(logger)
if err != nil {
return nil, fmt.Errorf("failed to build transformer config: %w", err)
}
if c.IsLastEntry != "" && c.IsFirstEntry != "" {
return nil, fmt.Errorf("only one of is_first_entry and is_last_entry can be set")
}
if c.IsLastEntry == "" && c.IsFirstEntry == "" {
return nil, fmt.Errorf("one of is_first_entry and is_last_entry must be set")
}
var matchesFirst bool
var prog *vm.Program
if c.IsFirstEntry != "" {
matchesFirst = true
prog, err = helper.ExprCompileBool(c.IsFirstEntry)
if err != nil {
return nil, fmt.Errorf("failed to compile is_first_entry: %w", err)
}
} else {
matchesFirst = false
prog, err = helper.ExprCompileBool(c.IsLastEntry)
if err != nil {
return nil, fmt.Errorf("failed to compile is_last_entry: %w", err)
}
}
if c.CombineField.FieldInterface == nil {
return nil, fmt.Errorf("missing required argument 'combine_field'")
}
var overwriteWithOldest bool
switch c.OverwriteWith {
case "newest":
overwriteWithOldest = false
case "oldest", "":
overwriteWithOldest = true
default:
return nil, fmt.Errorf("invalid value '%s' for parameter 'overwrite_with'", c.OverwriteWith)
}
return &Transformer{
TransformerOperator: transformer,
matchFirstLine: matchesFirst,
prog: prog,
maxBatchSize: c.MaxBatchSize,
maxSources: c.MaxSources,
overwriteWithOldest: overwriteWithOldest,
batchMap: make(map[string]*sourceBatch),
batchPool: sync.Pool{
New: func() interface{} {
return &sourceBatch{
entries: []*entry.Entry{},
recombined: &bytes.Buffer{},
}
},
},
combineField: c.CombineField,
combineWith: c.CombineWith,
forceFlushTimeout: c.ForceFlushTimeout,
ticker: time.NewTicker(c.ForceFlushTimeout),
chClose: make(chan struct{}),
sourceIdentifier: c.SourceIdentifier,
maxLogSize: int64(c.MaxLogSize),
}, nil
}
// Transformer is an operator that combines a field from consecutive log entries
// into a single
type Transformer struct {
helper.TransformerOperator
matchFirstLine bool
prog *vm.Program
maxBatchSize int
maxSources int
overwriteWithOldest bool
combineField entry.Field
combineWith string
ticker *time.Ticker
forceFlushTimeout time.Duration
chClose chan struct{}
sourceIdentifier entry.Field
sync.Mutex
batchPool sync.Pool
batchMap map[string]*sourceBatch
maxLogSize int64
}
// sourceBatch contains the status info of a batch
type sourceBatch struct {
entries []*entry.Entry
recombined *bytes.Buffer
firstEntryObservedTime time.Time
}
func (r *Transformer) Start(_ operator.Persister) error {
go r.flushLoop()
return nil
}
func (r *Transformer) flushLoop() {
for {
select {
case <-r.ticker.C:
r.Lock()
timeNow := time.Now()
for source, batch := range r.batchMap {
timeSinceFirstEntry := timeNow.Sub(batch.firstEntryObservedTime)
if timeSinceFirstEntry < r.forceFlushTimeout {
continue
}
if err := r.flushSource(source, true); err != nil {
r.Errorf("there was error flushing combined logs %s", err)
}
}
// check every 1/5 forceFlushTimeout
r.ticker.Reset(r.forceFlushTimeout / 5)
r.Unlock()
case <-r.chClose:
r.ticker.Stop()
return
}
}
}
func (r *Transformer) Stop() error {
r.Lock()
defer r.Unlock()
ctx, cancel := context.WithTimeout(context.Background(), time.Second)
defer cancel()
r.flushUncombined(ctx)
close(r.chClose)
return nil
}
const DefaultSourceIdentifier = "DefaultSourceIdentifier"
func (r *Transformer) Process(ctx context.Context, e *entry.Entry) error
|
err = e.Read(r.sourceIdentifier, &s)
if err != nil {
r.Warn("entry does not contain the source_identifier, so it may be pooled with other sources")
s = DefaultSourceIdentifier
}
if s == "" {
s = DefaultSourceIdentifier
}
switch {
// This is the first entry in the next batch
case matches && r.matchIndicatesFirst():
// Flush the existing batch
err := r.flushSource(s, true)
if err != nil {
return err
}
// Add the current log to the new batch
r.addToBatch(ctx, e, s)
return nil
// This is the last entry in a complete batch
case matches && r.matchIndicatesLast():
fallthrough
// When matching on first entry, never batch partial first. Just emit immediately
case !matches && r.matchIndicatesFirst() && r.batchMap[s] == nil:
r.addToBatch(ctx, e, s)
return r.flushSource(s, true)
}
// This is neither the first entry of a new log,
// nor the last entry of a log, so just add it to the batch
r.addToBatch(ctx, e, s)
return nil
}
func (r *Transformer) matchIndicatesFirst() bool {
return r.matchFirstLine
}
func (r *Transformer) matchIndicatesLast() bool {
return !r.matchFirstLine
}
// addToBatch adds the current entry to the current batch of entries that will be combined
func (r *Transformer) addToBatch(_ context.Context, e *entry.Entry, source string) {
batch, ok := r.batchMap[source]
if !ok {
batch = r.addNewBatch(source, e)
if len(r.batchMap) >= r.maxSources {
r.Error("Batched source exceeds max source size. Flushing all batched logs. Consider increasing max_sources parameter")
r.flushUncombined(context.Background())
return
}
} else {
// If the length of the batch is 0, this batch was flushed previously due to triggering size limit.
// In this case, the firstEntryObservedTime should be updated to reset the timeout
if len(batch.entries) == 0 {
batch.firstEntryObservedTime = e.ObservedTimestamp
}
batch.entries = append(batch.entries, e)
}
// Combine the combineField of each entry in the batch,
// separated by newlines
var s string
err := e.Read(r.combineField, &s)
if err != nil {
r.Errorf("entry does not contain the combine_field")
return
}
if batch.recombined.Len() > 0 {
batch.recombined.WriteString(r.combineWith)
}
batch.recombined.WriteString(s)
if (r.maxLogSize > 0 && int64(batch.recombined.Len()) >
|
{
// Lock the recombine operator because process can't run concurrently
r.Lock()
defer r.Unlock()
// Get the environment for executing the expression.
// In the future, we may want to provide access to the currently
// batched entries so users can do comparisons to other entries
// rather than just use absolute rules.
env := helper.GetExprEnv(e)
defer helper.PutExprEnv(env)
m, err := expr.Run(r.prog, env)
if err != nil {
return r.HandleEntryError(ctx, e, err)
}
// this is guaranteed to be a boolean because of expr.AsBool
matches := m.(bool)
var s string
|
identifier_body
|
recombine.go
|
func NewConfigWithID(operatorID string) *Config {
return &Config{
TransformerConfig: helper.NewTransformerConfig(operatorID, operatorType),
MaxBatchSize: 1000,
MaxSources: 1000,
CombineWith: defaultCombineWith,
OverwriteWith: "oldest",
ForceFlushTimeout: 5 * time.Second,
SourceIdentifier: entry.NewAttributeField("file.path"),
}
}
// Config is the configuration of a recombine operator
type Config struct {
helper.TransformerConfig `mapstructure:",squash"`
IsFirstEntry string `mapstructure:"is_first_entry"`
IsLastEntry string `mapstructure:"is_last_entry"`
MaxBatchSize int `mapstructure:"max_batch_size"`
CombineField entry.Field `mapstructure:"combine_field"`
CombineWith string `mapstructure:"combine_with"`
SourceIdentifier entry.Field `mapstructure:"source_identifier"`
OverwriteWith string `mapstructure:"overwrite_with"`
ForceFlushTimeout time.Duration `mapstructure:"force_flush_period"`
MaxSources int `mapstructure:"max_sources"`
MaxLogSize helper.ByteSize `mapstructure:"max_log_size,omitempty"`
}
// Build creates a new Transformer from a config
func (c *Config) Build(logger *zap.SugaredLogger) (operator.Operator, error) {
transformer, err := c.TransformerConfig.Build(logger)
if err != nil {
return nil, fmt.Errorf("failed to build transformer config: %w", err)
}
if c.IsLastEntry != "" && c.IsFirstEntry != ""
|
if c.IsLastEntry == "" && c.IsFirstEntry == "" {
return nil, fmt.Errorf("one of is_first_entry and is_last_entry must be set")
}
var matchesFirst bool
var prog *vm.Program
if c.IsFirstEntry != "" {
matchesFirst = true
prog, err = helper.ExprCompileBool(c.IsFirstEntry)
if err != nil {
return nil, fmt.Errorf("failed to compile is_first_entry: %w", err)
}
} else {
matchesFirst = false
prog, err = helper.ExprCompileBool(c.IsLastEntry)
if err != nil {
return nil, fmt.Errorf("failed to compile is_last_entry: %w", err)
}
}
if c.CombineField.FieldInterface == nil {
return nil, fmt.Errorf("missing required argument 'combine_field'")
}
var overwriteWithOldest bool
switch c.OverwriteWith {
case "newest":
overwriteWithOldest = false
case "oldest", "":
overwriteWithOldest = true
default:
return nil, fmt.Errorf("invalid value '%s' for parameter 'overwrite_with'", c.OverwriteWith)
}
return &Transformer{
TransformerOperator: transformer,
matchFirstLine: matchesFirst,
prog: prog,
maxBatchSize: c.MaxBatchSize,
maxSources: c.MaxSources,
overwriteWithOldest: overwriteWithOldest,
batchMap: make(map[string]*sourceBatch),
batchPool: sync.Pool{
New: func() interface{} {
return &sourceBatch{
entries: []*entry.Entry{},
recombined: &bytes.Buffer{},
}
},
},
combineField: c.CombineField,
combineWith: c.CombineWith,
forceFlushTimeout: c.ForceFlushTimeout,
ticker: time.NewTicker(c.ForceFlushTimeout),
chClose: make(chan struct{}),
sourceIdentifier: c.SourceIdentifier,
maxLogSize: int64(c.MaxLogSize),
}, nil
}
// Transformer is an operator that combines a field from consecutive log entries
// into a single
type Transformer struct {
helper.TransformerOperator
matchFirstLine bool
prog *vm.Program
maxBatchSize int
maxSources int
overwriteWithOldest bool
combineField entry.Field
combineWith string
ticker *time.Ticker
forceFlushTimeout time.Duration
chClose chan struct{}
sourceIdentifier entry.Field
sync.Mutex
batchPool sync.Pool
batchMap map[string]*sourceBatch
maxLogSize int64
}
// sourceBatch contains the status info of a batch
type sourceBatch struct {
entries []*entry.Entry
recombined *bytes.Buffer
firstEntryObservedTime time.Time
}
func (r *Transformer) Start(_ operator.Persister) error {
go r.flushLoop()
return nil
}
func (r *Transformer) flushLoop() {
for {
select {
case <-r.ticker.C:
r.Lock()
timeNow := time.Now()
for source, batch := range r.batchMap {
timeSinceFirstEntry := timeNow.Sub(batch.firstEntryObservedTime)
if timeSinceFirstEntry < r.forceFlushTimeout {
continue
}
if err := r.flushSource(source, true); err != nil {
r.Errorf("there was error flushing combined logs %s", err)
}
}
// check every 1/5 forceFlushTimeout
r.ticker.Reset(r.forceFlushTimeout / 5)
r.Unlock()
case <-r.chClose:
r.ticker.Stop()
return
}
}
}
func (r *Transformer) Stop() error {
r.Lock()
defer r.Unlock()
ctx, cancel := context.WithTimeout(context.Background(), time.Second)
defer cancel()
r.flushUncombined(ctx)
close(r.chClose)
return nil
}
const DefaultSourceIdentifier = "DefaultSourceIdentifier"
func (r *Transformer) Process(ctx context.Context, e *entry.Entry) error {
// Lock the recombine operator because process can't run concurrently
r.Lock()
defer r.Unlock()
// Get the environment for executing the expression.
// In the future, we may want to provide access to the currently
// batched entries so users can do comparisons to other entries
// rather than just use absolute rules.
env := helper.GetExprEnv(e)
defer helper.PutExprEnv(env)
m, err := expr.Run(r.prog, env)
if err != nil {
return r.HandleEntryError(ctx, e, err)
}
// this is guaranteed to be a boolean because of expr.AsBool
matches := m.(bool)
var s string
err = e.Read(r.sourceIdentifier, &s)
if err != nil {
r.Warn("entry does not contain the source_identifier, so it may be pooled with other sources")
s = DefaultSourceIdentifier
}
if s == "" {
s = DefaultSourceIdentifier
}
switch {
// This is the first entry in the next batch
case matches && r.matchIndicatesFirst():
// Flush the existing batch
err := r.flushSource(s, true)
if err != nil {
return err
}
// Add the current log to the new batch
r.addToBatch(ctx, e, s)
return nil
// This is the last entry in a complete batch
case matches && r.matchIndicatesLast():
fallthrough
// When matching on first entry, never batch partial first. Just emit immediately
case !matches && r.matchIndicatesFirst() && r.batchMap[s] == nil:
r.addToBatch(ctx, e, s)
return r.flushSource(s, true)
}
// This is neither the first entry of a new log,
// nor the last entry of a log, so just add it to the batch
r.addToBatch(ctx, e, s)
return nil
}
func (r *Transformer) matchIndicatesFirst() bool {
return r.matchFirstLine
}
func (r *Transformer) matchIndicatesLast() bool {
return !r.matchFirstLine
}
// addToBatch adds the current entry to the current batch of entries that will be combined
func (r *Transformer) addToBatch(_ context.Context, e *entry.Entry, source string) {
batch, ok := r.batchMap[source]
if !ok {
batch = r.addNewBatch(source, e)
if len(r.batchMap) >= r.maxSources {
r.Error("Batched source exceeds max source size. Flushing all batched logs. Consider increasing max_sources parameter")
r.flushUncombined(context.Background())
return
}
} else {
// If the length of the batch is 0, this batch was flushed previously due to triggering size limit.
// In this case, the firstEntryObservedTime should be updated to reset the timeout
if len(batch.entries) == 0 {
batch.firstEntryObservedTime = e.ObservedTimestamp
}
batch.entries = append(batch.entries, e)
}
// Combine the combineField of each entry in the batch,
// separated by newlines
var s string
err := e.Read(r.combineField, &s)
if err != nil {
r.Errorf("entry does not contain the combine_field")
return
}
if batch.recombined.Len() > 0 {
batch.recombined.WriteString(r.combineWith)
}
batch.recombined.WriteString(s)
if (r.maxLogSize > 0 && int64(batch.recombined.Len
|
{
return nil, fmt.Errorf("only one of is_first_entry and is_last_entry can be set")
}
|
conditional_block
|
package.go
|
if m.Version != "" {
s += " " + m.Version
if m.Update != nil {
s += " [" + m.Update.Version + "]"
}
}
if m.Replace != nil {
s += " => " + m.Replace.Path
if m.Replace.Version != "" {
s += " " + m.Replace.Version
if m.Replace.Update != nil {
s += " [" + m.Replace.Update.Version + "]"
}
}
}
return s
}
// A PackagePublic describes a single package found in a directory.
// go/libexec/src/cmd/go/internal/load/pkg.go
type PackagePublic struct {
Dir string `json:",omitempty"` // directory containing package sources
ImportPath string `json:",omitempty"` // import path of package in dir
ImportComment string `json:",omitempty"` // path in import comment on package statement
Name string `json:",omitempty"` // package name
Doc string `json:",omitempty"` // package document string
Target string `json:",omitempty"` // installed target for this package (may be executable)
Shlib string `json:",omitempty"` // the shared library that contains this package (only set when -linkshared)
Root string `json:",omitempty"` // Go root, Go path dir, or module root dir containing this package
ConflictDir string `json:",omitempty"` // Dir is hidden by this other directory
ForTest string `json:",omitempty"` // package is only for use in named test
Export string `json:",omitempty"` // file containing export data (set by go list -export)
Module *Module `json:",omitempty"` // info about package's module, if any
Match []string `json:",omitempty"` // command-line patterns matching this package
Goroot bool `json:",omitempty"` // is this package found in the Go root?
Standard bool `json:",omitempty"` // is this package part of the standard Go library?
DepOnly bool `json:",omitempty"` // package is only as a dependency, not explicitly listed
BinaryOnly bool `json:",omitempty"` // package cannot be recompiled
Incomplete bool `json:",omitempty"` // was there an error loading this package or dependencies?
// Stale and StaleReason remain here *only* for the list command.
// They are only initialized in preparation for list execution.
// The regular build determines staleness on the fly during action execution.
Stale bool `json:",omitempty"` // would 'go install' do anything for this package?
StaleReason string `json:",omitempty"` // why is Stale true?
// Dependency information
Imports []string `json:",omitempty"` // import paths used by this package
ImportMap map[string]string `json:",omitempty"` // map from source import to ImportPath (identity entries omitted)
Deps []string `json:",omitempty"` // all (recursively) imported dependencies
// Test information
// If you add to this list you MUST add to p.AllFiles (below) too.
// Otherwise file name security lists will not apply to any new additions.
TestGoFiles []string `json:",omitempty"` // _test.go files in package
TestImports []string `json:",omitempty"` // imports from TestGoFiles
XTestGoFiles []string `json:",omitempty"` // _test.go files outside package
XTestImports []string `json:",omitempty"` // imports from XTestGoFiles
}
// Package type
type Package struct {
Dir string // !important: directory containing package sources
ImportPath string // !important: import path of package in dir
ImportComment string // path in import comment on package statement
Name string // package name
Doc string // package document string
Module *Module // info about package's module, if any
Stale bool // would 'go install' do anything for this package?
StaleReason string // why is Stale true?
// declarations
Imports []string // import paths used by this package
Filenames []string // all files
Notes map[string][]*doc.Note // nil if no package Notes, or contains Buts, etc...
Consts []*doc.Value
Types []*Type
Vars []*doc.Value
Funcs []*Func
// Examples is a sorted list of examples associated with
// the package. Examples are extracted from _test.go files provided to NewFromFiles.
Examples []*doc.Example // nil if no example code
ParentImportPath string // parent package ImportPath
Parent *Package `json:"-"` // parent package, important: json must ignore, prevent cycle parsing
SubPackages []*Package // subpackages
// ------------------------------------------------------------------
Dirname string // directory containing the package
Err error // error or nil
// package info
FSet *token.FileSet // nil if no package document
DocPackage *doc.Package // nil if no package document
PAst map[string]*ast.File // nil if no AST with package exports
IsMain bool // true for package main
}
// IsEmpty return package is empty
func (p *Package) IsEmpty() bool {
return p.Err != nil || p.PAst == nil && p.DocPackage == nil && len(p.SubPackages) == 0
}
// --------------------------------------------------------------------
// Packages with package array
type Packages []*Package
// TODO Packages impl sorting func
// Analyze the package
func (p *Package) Analyze() (err error) {
p.FSet = token.NewFileSet() // positions are relative to fset
pkgs, err := parser.ParseDir(p.FSet, p.Dir, nil, parser.ParseComments)
if err != nil {
return
}
var astPackage *ast.Package
for name, apkg := range pkgs {
if strings.HasSuffix(name, "_test") { // skip test package
continue
}
astPackage = apkg
}
d := doc.New(astPackage, p.ImportPath, doc.AllDecls)
p.DocPackage = d
p.Doc = d.Doc
p.Name = d.Name
p.ImportPath = d.ImportPath
p.Imports = d.Imports
p.Filenames = d.Filenames
p.Notes = d.Notes
p.Consts = d.Consts
p.Vars = d.Vars
p.Examples = d.Examples
// set package types
for _, t := range d.Types {
p.Types = append(p.Types, NewTypeWithDoc(t))
}
// set package funcs
for _, fn := range d.Funcs {
p.Funcs = append(p.Funcs, NewFuncWithDoc(fn))
}
return
}
// --------------------------------------------------------------------
// TypeFields get type fields
func
|
(t *Type) (fields []*Field) {
if t == nil {
return
}
for _, spec := range t.Decl.Specs {
typeSpec := spec.(*ast.TypeSpec)
// struct type
if str, ok := typeSpec.Type.(*ast.StructType); ok {
for _, f := range str.Fields.List {
fields = append(fields, &Field{
Field: f,
Type: t,
})
}
return
}
// interface type methods
if str, ok := typeSpec.Type.(*ast.InterfaceType); ok {
for _, field := range str.Methods.List {
if ident, ok := field.Type.(*ast.Ident); ok && ident.Obj != nil {
field.Names = []*ast.Ident{ident}
}
}
for _, f := range str.Methods.List {
fields = append(fields, &Field{
Field: f,
Type: t,
})
}
return
}
}
return
}
// TypeSpec type spec
type TypeSpec string
const (
// StructType struct type spec
StructType TypeSpec = "struct"
// InterfaceType interface type spec
InterfaceType TypeSpec = "interface"
)
// Type type
type Type struct {
// doc.Type
Doc string
Name string
Decl *ast.GenDecl
Documentation Documentation
// associated declarations
Consts []*doc.Value // sorted list of constants of (mostly) this type
Vars []*doc.Value // sorted list of variables of (mostly) this type
Funcs []*Func // sorted list of functions returning this type
Methods []*Func // sorted list of methods (including embedded ones) of this type
// Examples is a sorted list of examples associated with
// this type. Examples are extracted from _test.go files
// provided to NewFromFiles.
Examples []*doc.Example
// Fields *ast.FieldList
Fields []*Field
TypeSpec TypeSpec // type spec
}
// NewTypeWithDoc return type with doc.Type
func NewTypeWithDoc(t *doc.Type) *Type {
var _t = &Type{
Doc: t.Doc,
Name: t.Name,
Decl: t.Decl,
Consts: t.Consts,
Vars: t.Vars,
Examples: t.Examples,
}
_t.Documentation = NewDocumentation(t.Doc)
_t.Fields = TypeFields(_t)
for _, spec := range t
|
TypeFields
|
identifier_name
|
package.go
|
if m.Version != "" {
s += " " + m.Version
if m.Update != nil {
s += " [" + m.Update.Version + "]"
}
}
if m.Replace != nil {
s += " => " + m.Replace.Path
if m.Replace.Version != "" {
s += " " + m.Replace.Version
if m.Replace.Update != nil {
s += " [" + m.Replace.Update.Version + "]"
}
}
}
return s
}
// A PackagePublic describes a single package found in a directory.
// go/libexec/src/cmd/go/internal/load/pkg.go
type PackagePublic struct {
Dir string `json:",omitempty"` // directory containing package sources
ImportPath string `json:",omitempty"` // import path of package in dir
ImportComment string `json:",omitempty"` // path in import comment on package statement
Name string `json:",omitempty"` // package name
Doc string `json:",omitempty"` // package document string
Target string `json:",omitempty"` // installed target for this package (may be executable)
Shlib string `json:",omitempty"` // the shared library that contains this package (only set when -linkshared)
Root string `json:",omitempty"` // Go root, Go path dir, or module root dir containing this package
ConflictDir string `json:",omitempty"` // Dir is hidden by this other directory
ForTest string `json:",omitempty"` // package is only for use in named test
Export string `json:",omitempty"` // file containing export data (set by go list -export)
Module *Module `json:",omitempty"` // info about package's module, if any
Match []string `json:",omitempty"` // command-line patterns matching this package
Goroot bool `json:",omitempty"` // is this package found in the Go root?
Standard bool `json:",omitempty"` // is this package part of the standard Go library?
DepOnly bool `json:",omitempty"` // package is only as a dependency, not explicitly listed
BinaryOnly bool `json:",omitempty"` // package cannot be recompiled
Incomplete bool `json:",omitempty"` // was there an error loading this package or dependencies?
// Stale and StaleReason remain here *only* for the list command.
// They are only initialized in preparation for list execution.
// The regular build determines staleness on the fly during action execution.
Stale bool `json:",omitempty"` // would 'go install' do anything for this package?
StaleReason string `json:",omitempty"` // why is Stale true?
// Dependency information
Imports []string `json:",omitempty"` // import paths used by this package
ImportMap map[string]string `json:",omitempty"` // map from source import to ImportPath (identity entries omitted)
Deps []string `json:",omitempty"` // all (recursively) imported dependencies
// Test information
// If you add to this list you MUST add to p.AllFiles (below) too.
// Otherwise file name security lists will not apply to any new additions.
TestGoFiles []string `json:",omitempty"` // _test.go files in package
TestImports []string `json:",omitempty"` // imports from TestGoFiles
XTestGoFiles []string `json:",omitempty"` // _test.go files outside package
XTestImports []string `json:",omitempty"` // imports from XTestGoFiles
}
// Package type
type Package struct {
Dir string // !important: directory containing package sources
ImportPath string // !important: import path of package in dir
ImportComment string // path in import comment on package statement
Name string // package name
Doc string // package document string
Module *Module // info about package's module, if any
Stale bool // would 'go install' do anything for this package?
StaleReason string // why is Stale true?
// declarations
Imports []string // import paths used by this package
Filenames []string // all files
Notes map[string][]*doc.Note // nil if no package Notes, or contains Buts, etc...
Consts []*doc.Value
Types []*Type
Vars []*doc.Value
Funcs []*Func
// Examples is a sorted list of examples associated with
// the package. Examples are extracted from _test.go files provided to NewFromFiles.
Examples []*doc.Example // nil if no example code
ParentImportPath string // parent package ImportPath
Parent *Package `json:"-"` // parent package, important: json must ignore, prevent cycle parsing
SubPackages []*Package // subpackages
// ------------------------------------------------------------------
Dirname string // directory containing the package
Err error // error or nil
// package info
FSet *token.FileSet // nil if no package document
DocPackage *doc.Package // nil if no package document
PAst map[string]*ast.File // nil if no AST with package exports
IsMain bool // true for package main
}
// IsEmpty return package is empty
func (p *Package) IsEmpty() bool {
return p.Err != nil || p.PAst == nil && p.DocPackage == nil && len(p.SubPackages) == 0
}
// --------------------------------------------------------------------
// Packages with package array
type Packages []*Package
// TODO Packages impl sorting func
// Analyze the package
func (p *Package) Analyze() (err error)
|
p.Doc = d.Doc
p.Name = d.Name
p.ImportPath = d.ImportPath
p.Imports = d.Imports
p.Filenames = d.Filenames
p.Notes = d.Notes
p.Consts = d.Consts
p.Vars = d.Vars
p.Examples = d.Examples
// set package types
for _, t := range d.Types {
p.Types = append(p.Types, NewTypeWithDoc(t))
}
// set package funcs
for _, fn := range d.Funcs {
p.Funcs = append(p.Funcs, NewFuncWithDoc(fn))
}
return
}
// --------------------------------------------------------------------
// TypeFields get type fields
func TypeFields(t *Type) (fields []*Field) {
if t == nil {
return
}
for _, spec := range t.Decl.Specs {
typeSpec := spec.(*ast.TypeSpec)
// struct type
if str, ok := typeSpec.Type.(*ast.StructType); ok {
for _, f := range str.Fields.List {
fields = append(fields, &Field{
Field: f,
Type: t,
})
}
return
}
// interface type methods
if str, ok := typeSpec.Type.(*ast.InterfaceType); ok {
for _, field := range str.Methods.List {
if ident, ok := field.Type.(*ast.Ident); ok && ident.Obj != nil {
field.Names = []*ast.Ident{ident}
}
}
for _, f := range str.Methods.List {
fields = append(fields, &Field{
Field: f,
Type: t,
})
}
return
}
}
return
}
// TypeSpec type spec
type TypeSpec string
const (
// StructType struct type spec
StructType TypeSpec = "struct"
// InterfaceType interface type spec
InterfaceType TypeSpec = "interface"
)
// Type type
type Type struct {
// doc.Type
Doc string
Name string
Decl *ast.GenDecl
Documentation Documentation
// associated declarations
Consts []*doc.Value // sorted list of constants of (mostly) this type
Vars []*doc.Value // sorted list of variables of (mostly) this type
Funcs []*Func // sorted list of functions returning this type
Methods []*Func // sorted list of methods (including embedded ones) of this type
// Examples is a sorted list of examples associated with
// this type. Examples are extracted from _test.go files
// provided to NewFromFiles.
Examples []*doc.Example
// Fields *ast.FieldList
Fields []*Field
TypeSpec TypeSpec // type spec
}
// NewTypeWithDoc return type with doc.Type
func NewTypeWithDoc(t *doc.Type) *Type {
var _t = &Type{
Doc: t.Doc,
Name: t.Name,
Decl: t.Decl,
Consts: t.Consts,
Vars: t.Vars,
Examples: t.Examples,
}
_t.Documentation = NewDocumentation(t.Doc)
_t.Fields = TypeFields(_t)
for _, spec := range t
|
{
p.FSet = token.NewFileSet() // positions are relative to fset
pkgs, err := parser.ParseDir(p.FSet, p.Dir, nil, parser.ParseComments)
if err != nil {
return
}
var astPackage *ast.Package
for name, apkg := range pkgs {
if strings.HasSuffix(name, "_test") { // skip test package
continue
}
astPackage = apkg
}
d := doc.New(astPackage, p.ImportPath, doc.AllDecls)
p.DocPackage = d
|
identifier_body
|
package.go
|
if m.Version != "" {
s += " " + m.Version
if m.Update != nil {
s += " [" + m.Update.Version + "]"
}
}
if m.Replace != nil {
s += " => " + m.Replace.Path
if m.Replace.Version != "" {
s += " " + m.Replace.Version
if m.Replace.Update != nil {
s += " [" + m.Replace.Update.Version + "]"
}
}
}
return s
}
// A PackagePublic describes a single package found in a directory.
// go/libexec/src/cmd/go/internal/load/pkg.go
type PackagePublic struct {
Dir string `json:",omitempty"` // directory containing package sources
ImportPath string `json:",omitempty"` // import path of package in dir
ImportComment string `json:",omitempty"` // path in import comment on package statement
Name string `json:",omitempty"` // package name
Doc string `json:",omitempty"` // package document string
Target string `json:",omitempty"` // installed target for this package (may be executable)
Shlib string `json:",omitempty"` // the shared library that contains this package (only set when -linkshared)
Root string `json:",omitempty"` // Go root, Go path dir, or module root dir containing this package
ConflictDir string `json:",omitempty"` // Dir is hidden by this other directory
ForTest string `json:",omitempty"` // package is only for use in named test
Export string `json:",omitempty"` // file containing export data (set by go list -export)
Module *Module `json:",omitempty"` // info about package's module, if any
Match []string `json:",omitempty"` // command-line patterns matching this package
Goroot bool `json:",omitempty"` // is this package found in the Go root?
Standard bool `json:",omitempty"` // is this package part of the standard Go library?
DepOnly bool `json:",omitempty"` // package is only as a dependency, not explicitly listed
BinaryOnly bool `json:",omitempty"` // package cannot be recompiled
Incomplete bool `json:",omitempty"` // was there an error loading this package or dependencies?
// Stale and StaleReason remain here *only* for the list command.
// They are only initialized in preparation for list execution.
// The regular build determines staleness on the fly during action execution.
Stale bool `json:",omitempty"` // would 'go install' do anything for this package?
StaleReason string `json:",omitempty"` // why is Stale true?
// Dependency information
Imports []string `json:",omitempty"` // import paths used by this package
ImportMap map[string]string `json:",omitempty"` // map from source import to ImportPath (identity entries omitted)
Deps []string `json:",omitempty"` // all (recursively) imported dependencies
// Test information
// If you add to this list you MUST add to p.AllFiles (below) too.
// Otherwise file name security lists will not apply to any new additions.
TestGoFiles []string `json:",omitempty"` // _test.go files in package
TestImports []string `json:",omitempty"` // imports from TestGoFiles
XTestGoFiles []string `json:",omitempty"` // _test.go files outside package
XTestImports []string `json:",omitempty"` // imports from XTestGoFiles
}
// Package type
type Package struct {
Dir string // !important: directory containing package sources
ImportPath string // !important: import path of package in dir
ImportComment string // path in import comment on package statement
Name string // package name
Doc string // package document string
Module *Module // info about package's module, if any
Stale bool // would 'go install' do anything for this package?
StaleReason string // why is Stale true?
// declarations
Imports []string // import paths used by this package
Filenames []string // all files
Notes map[string][]*doc.Note // nil if no package Notes, or contains Buts, etc...
Consts []*doc.Value
Types []*Type
Vars []*doc.Value
Funcs []*Func
// Examples is a sorted list of examples associated with
// the package. Examples are extracted from _test.go files provided to NewFromFiles.
Examples []*doc.Example // nil if no example code
ParentImportPath string // parent package ImportPath
Parent *Package `json:"-"` // parent package, important: json must ignore, prevent cycle parsing
SubPackages []*Package // subpackages
// ------------------------------------------------------------------
Dirname string // directory containing the package
Err error // error or nil
// package info
FSet *token.FileSet // nil if no package document
DocPackage *doc.Package // nil if no package document
PAst map[string]*ast.File // nil if no AST with package exports
IsMain bool // true for package main
}
// IsEmpty return package is empty
func (p *Package) IsEmpty() bool {
return p.Err != nil || p.PAst == nil && p.DocPackage == nil && len(p.SubPackages) == 0
}
// --------------------------------------------------------------------
// Packages with package array
type Packages []*Package
// TODO Packages impl sorting func
// Analyze the package
func (p *Package) Analyze() (err error) {
p.FSet = token.NewFileSet() // positions are relative to fset
pkgs, err := parser.ParseDir(p.FSet, p.Dir, nil, parser.ParseComments)
if err != nil {
return
}
var astPackage *ast.Package
for name, apkg := range pkgs {
if strings.HasSuffix(name, "_test") { // skip test package
continue
}
astPackage = apkg
}
d := doc.New(astPackage, p.ImportPath, doc.AllDecls)
p.DocPackage = d
p.Doc = d.Doc
p.Name = d.Name
p.ImportPath = d.ImportPath
p.Imports = d.Imports
p.Filenames = d.Filenames
p.Notes = d.Notes
p.Consts = d.Consts
p.Vars = d.Vars
p.Examples = d.Examples
// set package types
for _, t := range d.Types {
p.Types = append(p.Types, NewTypeWithDoc(t))
}
// set package funcs
for _, fn := range d.Funcs {
p.Funcs = append(p.Funcs, NewFuncWithDoc(fn))
}
return
}
// --------------------------------------------------------------------
// TypeFields get type fields
func TypeFields(t *Type) (fields []*Field) {
if t == nil {
return
}
for _, spec := range t.Decl.Specs {
typeSpec := spec.(*ast.TypeSpec)
// struct type
if str, ok := typeSpec.Type.(*ast.StructType); ok {
for _, f := range str.Fields.List {
fields = append(fields, &Field{
Field: f,
Type: t,
})
}
return
}
// interface type methods
if str, ok := typeSpec.Type.(*ast.InterfaceType); ok {
for _, field := range str.Methods.List {
if ident, ok := field.Type.(*ast.Ident); ok && ident.Obj != nil {
field.Names = []*ast.Ident{ident}
}
}
for _, f := range str.Methods.List {
fields = append(fields, &Field{
Field: f,
Type: t,
})
}
return
}
|
// TypeSpec type spec
type TypeSpec string
const (
// StructType struct type spec
StructType TypeSpec = "struct"
// InterfaceType interface type spec
InterfaceType TypeSpec = "interface"
)
// Type type
type Type struct {
// doc.Type
Doc string
Name string
Decl *ast.GenDecl
Documentation Documentation
// associated declarations
Consts []*doc.Value // sorted list of constants of (mostly) this type
Vars []*doc.Value // sorted list of variables of (mostly) this type
Funcs []*Func // sorted list of functions returning this type
Methods []*Func // sorted list of methods (including embedded ones) of this type
// Examples is a sorted list of examples associated with
// this type. Examples are extracted from _test.go files
// provided to NewFromFiles.
Examples []*doc.Example
// Fields *ast.FieldList
Fields []*Field
TypeSpec TypeSpec // type spec
}
// NewTypeWithDoc return type with doc.Type
func NewTypeWithDoc(t *doc.Type) *Type {
var _t = &Type{
Doc: t.Doc,
Name: t.Name,
Decl: t.Decl,
Consts: t.Consts,
Vars: t.Vars,
Examples: t.Examples,
}
_t.Documentation = NewDocumentation(t.Doc)
_t.Fields = TypeFields(_t)
for _, spec := range t
|
}
return
}
|
random_line_split
|
package.go
|
if m.Version != "" {
s += " " + m.Version
if m.Update != nil {
s += " [" + m.Update.Version + "]"
}
}
if m.Replace != nil {
s += " => " + m.Replace.Path
if m.Replace.Version != "" {
s += " " + m.Replace.Version
if m.Replace.Update != nil {
s += " [" + m.Replace.Update.Version + "]"
}
}
}
return s
}
// A PackagePublic describes a single package found in a directory.
// go/libexec/src/cmd/go/internal/load/pkg.go
type PackagePublic struct {
Dir string `json:",omitempty"` // directory containing package sources
ImportPath string `json:",omitempty"` // import path of package in dir
ImportComment string `json:",omitempty"` // path in import comment on package statement
Name string `json:",omitempty"` // package name
Doc string `json:",omitempty"` // package document string
Target string `json:",omitempty"` // installed target for this package (may be executable)
Shlib string `json:",omitempty"` // the shared library that contains this package (only set when -linkshared)
Root string `json:",omitempty"` // Go root, Go path dir, or module root dir containing this package
ConflictDir string `json:",omitempty"` // Dir is hidden by this other directory
ForTest string `json:",omitempty"` // package is only for use in named test
Export string `json:",omitempty"` // file containing export data (set by go list -export)
Module *Module `json:",omitempty"` // info about package's module, if any
Match []string `json:",omitempty"` // command-line patterns matching this package
Goroot bool `json:",omitempty"` // is this package found in the Go root?
Standard bool `json:",omitempty"` // is this package part of the standard Go library?
DepOnly bool `json:",omitempty"` // package is only as a dependency, not explicitly listed
BinaryOnly bool `json:",omitempty"` // package cannot be recompiled
Incomplete bool `json:",omitempty"` // was there an error loading this package or dependencies?
// Stale and StaleReason remain here *only* for the list command.
// They are only initialized in preparation for list execution.
// The regular build determines staleness on the fly during action execution.
Stale bool `json:",omitempty"` // would 'go install' do anything for this package?
StaleReason string `json:",omitempty"` // why is Stale true?
// Dependency information
Imports []string `json:",omitempty"` // import paths used by this package
ImportMap map[string]string `json:",omitempty"` // map from source import to ImportPath (identity entries omitted)
Deps []string `json:",omitempty"` // all (recursively) imported dependencies
// Test information
// If you add to this list you MUST add to p.AllFiles (below) too.
// Otherwise file name security lists will not apply to any new additions.
TestGoFiles []string `json:",omitempty"` // _test.go files in package
TestImports []string `json:",omitempty"` // imports from TestGoFiles
XTestGoFiles []string `json:",omitempty"` // _test.go files outside package
XTestImports []string `json:",omitempty"` // imports from XTestGoFiles
}
// Package type
type Package struct {
Dir string // !important: directory containing package sources
ImportPath string // !important: import path of package in dir
ImportComment string // path in import comment on package statement
Name string // package name
Doc string // package document string
Module *Module // info about package's module, if any
Stale bool // would 'go install' do anything for this package?
StaleReason string // why is Stale true?
// declarations
Imports []string // import paths used by this package
Filenames []string // all files
Notes map[string][]*doc.Note // nil if no package Notes, or contains Buts, etc...
Consts []*doc.Value
Types []*Type
Vars []*doc.Value
Funcs []*Func
// Examples is a sorted list of examples associated with
// the package. Examples are extracted from _test.go files provided to NewFromFiles.
Examples []*doc.Example // nil if no example code
ParentImportPath string // parent package ImportPath
Parent *Package `json:"-"` // parent package, important: json must ignore, prevent cycle parsing
SubPackages []*Package // subpackages
// ------------------------------------------------------------------
Dirname string // directory containing the package
Err error // error or nil
// package info
FSet *token.FileSet // nil if no package document
DocPackage *doc.Package // nil if no package document
PAst map[string]*ast.File // nil if no AST with package exports
IsMain bool // true for package main
}
// IsEmpty return package is empty
func (p *Package) IsEmpty() bool {
return p.Err != nil || p.PAst == nil && p.DocPackage == nil && len(p.SubPackages) == 0
}
// --------------------------------------------------------------------
// Packages with package array
type Packages []*Package
// TODO Packages impl sorting func
// Analyze the package
func (p *Package) Analyze() (err error) {
p.FSet = token.NewFileSet() // positions are relative to fset
pkgs, err := parser.ParseDir(p.FSet, p.Dir, nil, parser.ParseComments)
if err != nil {
return
}
var astPackage *ast.Package
for name, apkg := range pkgs {
if strings.HasSuffix(name, "_test")
|
astPackage = apkg
}
d := doc.New(astPackage, p.ImportPath, doc.AllDecls)
p.DocPackage = d
p.Doc = d.Doc
p.Name = d.Name
p.ImportPath = d.ImportPath
p.Imports = d.Imports
p.Filenames = d.Filenames
p.Notes = d.Notes
p.Consts = d.Consts
p.Vars = d.Vars
p.Examples = d.Examples
// set package types
for _, t := range d.Types {
p.Types = append(p.Types, NewTypeWithDoc(t))
}
// set package funcs
for _, fn := range d.Funcs {
p.Funcs = append(p.Funcs, NewFuncWithDoc(fn))
}
return
}
// --------------------------------------------------------------------
// TypeFields get type fields
func TypeFields(t *Type) (fields []*Field) {
if t == nil {
return
}
for _, spec := range t.Decl.Specs {
typeSpec := spec.(*ast.TypeSpec)
// struct type
if str, ok := typeSpec.Type.(*ast.StructType); ok {
for _, f := range str.Fields.List {
fields = append(fields, &Field{
Field: f,
Type: t,
})
}
return
}
// interface type methods
if str, ok := typeSpec.Type.(*ast.InterfaceType); ok {
for _, field := range str.Methods.List {
if ident, ok := field.Type.(*ast.Ident); ok && ident.Obj != nil {
field.Names = []*ast.Ident{ident}
}
}
for _, f := range str.Methods.List {
fields = append(fields, &Field{
Field: f,
Type: t,
})
}
return
}
}
return
}
// TypeSpec type spec
type TypeSpec string
const (
// StructType struct type spec
StructType TypeSpec = "struct"
// InterfaceType interface type spec
InterfaceType TypeSpec = "interface"
)
// Type type
type Type struct {
// doc.Type
Doc string
Name string
Decl *ast.GenDecl
Documentation Documentation
// associated declarations
Consts []*doc.Value // sorted list of constants of (mostly) this type
Vars []*doc.Value // sorted list of variables of (mostly) this type
Funcs []*Func // sorted list of functions returning this type
Methods []*Func // sorted list of methods (including embedded ones) of this type
// Examples is a sorted list of examples associated with
// this type. Examples are extracted from _test.go files
// provided to NewFromFiles.
Examples []*doc.Example
// Fields *ast.FieldList
Fields []*Field
TypeSpec TypeSpec // type spec
}
// NewTypeWithDoc return type with doc.Type
func NewTypeWithDoc(t *doc.Type) *Type {
var _t = &Type{
Doc: t.Doc,
Name: t.Name,
Decl: t.Decl,
Consts: t.Consts,
Vars: t.Vars,
Examples: t.Examples,
}
_t.Documentation = NewDocumentation(t.Doc)
_t.Fields = TypeFields(_t)
for _, spec := range
|
{ // skip test package
continue
}
|
conditional_block
|
recreate.go
|
"tablePropertiesToCQL": cqlHelpers.tablePropertiesToCQL,
}).
Parse(`
CREATE TABLE {{ .KeyspaceName }}.{{ .Tm.Name }} (
{{ tableColumnToCQL .Tm }}
) WITH {{ tablePropertiesToCQL .Tm.ClusteringColumns .Tm.Options .Tm.Flags .Tm.Extensions }};
`))
func (km *KeyspaceMetadata) tableToCQL(w io.Writer, kn string, tm *TableMetadata) error {
if err := tableCQLTemplate.Execute(w, map[string]interface{}{
"Tm": tm,
"KeyspaceName": kn,
}); err != nil {
return err
}
return nil
}
var functionTemplate = template.Must(template.New("functions").
Funcs(map[string]interface{}{
"escape": cqlHelpers.escape,
"zip": cqlHelpers.zip,
"stripFrozen": cqlHelpers.stripFrozen,
}).
Parse(`
CREATE FUNCTION {{ escape .keyspaceName }}.{{ escape .fm.Name }} (
{{- range $i, $args := zip .fm.ArgumentNames .fm.ArgumentTypes }}
{{- if ne $i 0 }}, {{ end }}
{{- escape (index $args 0) }}
{{ stripFrozen (index $args 1) }}
{{- end -}})
{{ if .fm.CalledOnNullInput }}CALLED{{ else }}RETURNS NULL{{ end }} ON NULL INPUT
RETURNS {{ .fm.ReturnType }}
LANGUAGE {{ .fm.Language }}
AS $${{ .fm.Body }}$$;
`))
func (km *KeyspaceMetadata) functionToCQL(w io.Writer, keyspaceName string, fm *FunctionMetadata) error {
if err := functionTemplate.Execute(w, map[string]interface{}{
"fm": fm,
"keyspaceName": keyspaceName,
}); err != nil {
return err
}
return nil
}
var viewTemplate = template.Must(template.New("views").
Funcs(map[string]interface{}{
"zip": cqlHelpers.zip,
"partitionKeyString": cqlHelpers.partitionKeyString,
"tablePropertiesToCQL": cqlHelpers.tablePropertiesToCQL,
}).
Parse(`
CREATE MATERIALIZED VIEW {{ .vm.KeyspaceName }}.{{ .vm.ViewName }} AS
SELECT {{ if .vm.IncludeAllColumns }}*{{ else }}
{{- range $i, $col := .vm.OrderedColumns }}
{{- if ne $i 0 }}, {{ end }}
{{ $col }}
{{- end }}
{{- end }}
FROM {{ .vm.KeyspaceName }}.{{ .vm.BaseTableName }}
WHERE {{ .vm.WhereClause }}
PRIMARY KEY ({{ partitionKeyString .vm.PartitionKey .vm.ClusteringColumns }})
WITH {{ tablePropertiesToCQL .vm.ClusteringColumns .vm.Options .flags .vm.Extensions }};
`))
func (km *KeyspaceMetadata) viewToCQL(w io.Writer, vm *ViewMetadata) error {
if err := viewTemplate.Execute(w, map[string]interface{}{
"vm": vm,
"flags": []string{},
}); err != nil {
return err
}
return nil
}
var aggregatesTemplate = template.Must(template.New("aggregate").
Funcs(map[string]interface{}{
"stripFrozen": cqlHelpers.stripFrozen,
}).
Parse(`
CREATE AGGREGATE {{ .Keyspace }}.{{ .Name }}(
{{- range $arg, $i := .ArgumentTypes }}
{{- if ne $i 0 }}, {{ end }}
{{ stripFrozen $arg }}
{{- end -}})
SFUNC {{ .StateFunc.Name }}
STYPE {{ stripFrozen .State }}
{{- if ne .FinalFunc.Name "" }}
FINALFUNC {{ .FinalFunc.Name }}
{{- end -}}
{{- if ne .InitCond "" }}
INITCOND {{ .InitCond }}
{{- end -}}
);
`))
func (km *KeyspaceMetadata) aggregateToCQL(w io.Writer, am *AggregateMetadata) error {
if err := aggregatesTemplate.Execute(w, am); err != nil {
return err
}
return nil
}
var typeCQLTemplate = template.Must(template.New("types").
Funcs(map[string]interface{}{
"zip": cqlHelpers.zip,
}).
Parse(`
CREATE TYPE {{ .Keyspace }}.{{ .Name }} (
{{- range $i, $fields := zip .FieldNames .FieldTypes }} {{- if ne $i 0 }},{{ end }}
{{ index $fields 0 }} {{ index $fields 1 }}
{{- end }}
);
`))
func (km *KeyspaceMetadata) userTypeToCQL(w io.Writer, tm *TypeMetadata) error {
if err := typeCQLTemplate.Execute(w, tm); err != nil {
return err
}
return nil
}
func (km *KeyspaceMetadata) indexToCQL(w io.Writer, im *IndexMetadata) error {
// Scylla doesn't support any custom indexes
if im.Kind == IndexKindCustom {
return nil
}
options := im.Options
indexTarget := options["target"]
// secondary index
si := struct {
ClusteringKeys []string `json:"ck"`
PartitionKeys []string `json:"pk"`
}{}
if err := json.Unmarshal([]byte(indexTarget), &si); err == nil {
indexTarget = fmt.Sprintf("(%s), %s",
strings.Join(si.PartitionKeys, ","),
strings.Join(si.ClusteringKeys, ","),
)
}
_, err := fmt.Fprintf(w, "\nCREATE INDEX %s ON %s.%s (%s);\n",
im.Name,
im.KeyspaceName,
im.TableName,
indexTarget,
)
if err != nil {
return err
}
return nil
}
var keyspaceCQLTemplate = template.Must(template.New("keyspace").
Funcs(map[string]interface{}{
"escape": cqlHelpers.escape,
"fixStrategy": cqlHelpers.fixStrategy,
}).
Parse(`CREATE KEYSPACE {{ .Name }} WITH replication = {
'class': {{ escape ( fixStrategy .StrategyClass) }}
{{- range $key, $value := .StrategyOptions }},
{{ escape $key }}: {{ escape $value }}
{{- end }}
}{{ if not .DurableWrites }} AND durable_writes = 'false'{{ end }};
`))
func (km *KeyspaceMetadata) keyspaceToCQL(w io.Writer) error {
if err := keyspaceCQLTemplate.Execute(w, km); err != nil {
return err
}
return nil
}
func contains(in []string, v string) bool {
for _, e := range in {
if e == v {
return true
}
}
return false
}
type toCQLHelpers struct{}
var cqlHelpers = toCQLHelpers{}
func (h toCQLHelpers) zip(a []string, b []string) [][]string {
m := make([][]string, len(a))
for i := range a
|
return m
}
func (h toCQLHelpers) escape(e interface{}) string {
switch v := e.(type) {
case int, float64:
return fmt.Sprint(v)
case bool:
if v {
return "true"
}
return "false"
case string:
return "'" + strings.ReplaceAll(v, "'", "''") + "'"
case []byte:
return string(v)
}
return ""
}
func (h toCQLHelpers) stripFrozen(v string) string {
return strings.TrimSuffix(strings.TrimPrefix(v, "frozen<"), ">")
}
func (h toCQLHelpers) fixStrategy(v string) string {
return strings.TrimPrefix(v, "org.apache.cassandra.locator.")
}
func (h toCQLHelpers) fixQuote(v string) string {
return strings.ReplaceAll(v, `"`, `'`)
}
func (h toCQLHelpers) tableOptionsToCQL(ops TableMetadataOptions) ([]string, error) {
opts := map[string]interface{}{
"bloom_filter_fp_chance": ops.BloomFilterFpChance,
"comment": ops.Comment,
"crc_check_chance": ops.CrcCheckChance,
"dclocal_read_repair_chance": ops.DcLocalReadRepairChance,
"default_time_to_live": ops.DefaultTimeToLive,
"gc_grace_seconds": ops.GcGraceSeconds,
"max_index_interval": ops.MaxIndexInterval,
"memtable_flush_period_in_ms": ops.MemtableFlushPeriodInMs,
"min_index_interval": ops.MinIndexInterval,
"read_repair_chance": ops.ReadRepairChance,
"speculative_retry": ops.SpeculativeRetry,
}
var err error
opts["caching"], err = json.Marshal(ops.Caching)
if err != nil {
return nil, err
}
opts["compaction"], err = json.Marshal(ops.Compaction)
if err != nil {
return nil, err
}
opts["compression"], err = json.Marshal(ops.Compression)
if err != nil {
return nil, err
}
cdc, err := json.Marshal(ops.CDC)
if err != nil {
return nil, err
|
{
m[i] = []string{a[i], b[i]}
}
|
conditional_block
|
recreate.go
|
"tablePropertiesToCQL": cqlHelpers.tablePropertiesToCQL,
}).
Parse(`
CREATE TABLE {{ .KeyspaceName }}.{{ .Tm.Name }} (
{{ tableColumnToCQL .Tm }}
) WITH {{ tablePropertiesToCQL .Tm.ClusteringColumns .Tm.Options .Tm.Flags .Tm.Extensions }};
`))
func (km *KeyspaceMetadata) tableToCQL(w io.Writer, kn string, tm *TableMetadata) error {
if err := tableCQLTemplate.Execute(w, map[string]interface{}{
"Tm": tm,
"KeyspaceName": kn,
}); err != nil {
return err
}
return nil
}
var functionTemplate = template.Must(template.New("functions").
Funcs(map[string]interface{}{
"escape": cqlHelpers.escape,
"zip": cqlHelpers.zip,
"stripFrozen": cqlHelpers.stripFrozen,
}).
Parse(`
CREATE FUNCTION {{ escape .keyspaceName }}.{{ escape .fm.Name }} (
{{- range $i, $args := zip .fm.ArgumentNames .fm.ArgumentTypes }}
{{- if ne $i 0 }}, {{ end }}
{{- escape (index $args 0) }}
{{ stripFrozen (index $args 1) }}
{{- end -}})
{{ if .fm.CalledOnNullInput }}CALLED{{ else }}RETURNS NULL{{ end }} ON NULL INPUT
RETURNS {{ .fm.ReturnType }}
LANGUAGE {{ .fm.Language }}
AS $${{ .fm.Body }}$$;
`))
func (km *KeyspaceMetadata) functionToCQL(w io.Writer, keyspaceName string, fm *FunctionMetadata) error {
if err := functionTemplate.Execute(w, map[string]interface{}{
"fm": fm,
"keyspaceName": keyspaceName,
}); err != nil {
return err
}
return nil
}
var viewTemplate = template.Must(template.New("views").
Funcs(map[string]interface{}{
"zip": cqlHelpers.zip,
"partitionKeyString": cqlHelpers.partitionKeyString,
"tablePropertiesToCQL": cqlHelpers.tablePropertiesToCQL,
}).
Parse(`
CREATE MATERIALIZED VIEW {{ .vm.KeyspaceName }}.{{ .vm.ViewName }} AS
SELECT {{ if .vm.IncludeAllColumns }}*{{ else }}
{{- range $i, $col := .vm.OrderedColumns }}
{{- if ne $i 0 }}, {{ end }}
{{ $col }}
{{- end }}
{{- end }}
FROM {{ .vm.KeyspaceName }}.{{ .vm.BaseTableName }}
WHERE {{ .vm.WhereClause }}
PRIMARY KEY ({{ partitionKeyString .vm.PartitionKey .vm.ClusteringColumns }})
WITH {{ tablePropertiesToCQL .vm.ClusteringColumns .vm.Options .flags .vm.Extensions }};
`))
func (km *KeyspaceMetadata) viewToCQL(w io.Writer, vm *ViewMetadata) error {
if err := viewTemplate.Execute(w, map[string]interface{}{
"vm": vm,
"flags": []string{},
}); err != nil {
return err
}
return nil
}
var aggregatesTemplate = template.Must(template.New("aggregate").
Funcs(map[string]interface{}{
"stripFrozen": cqlHelpers.stripFrozen,
}).
Parse(`
CREATE AGGREGATE {{ .Keyspace }}.{{ .Name }}(
{{- range $arg, $i := .ArgumentTypes }}
{{- if ne $i 0 }}, {{ end }}
{{ stripFrozen $arg }}
{{- end -}})
SFUNC {{ .StateFunc.Name }}
STYPE {{ stripFrozen .State }}
{{- if ne .FinalFunc.Name "" }}
FINALFUNC {{ .FinalFunc.Name }}
{{- end -}}
{{- if ne .InitCond "" }}
INITCOND {{ .InitCond }}
{{- end -}}
);
`))
func (km *KeyspaceMetadata) aggregateToCQL(w io.Writer, am *AggregateMetadata) error {
if err := aggregatesTemplate.Execute(w, am); err != nil {
return err
}
return nil
}
var typeCQLTemplate = template.Must(template.New("types").
Funcs(map[string]interface{}{
"zip": cqlHelpers.zip,
}).
Parse(`
CREATE TYPE {{ .Keyspace }}.{{ .Name }} (
{{- range $i, $fields := zip .FieldNames .FieldTypes }} {{- if ne $i 0 }},{{ end }}
{{ index $fields 0 }} {{ index $fields 1 }}
{{- end }}
);
`))
func (km *KeyspaceMetadata) userTypeToCQL(w io.Writer, tm *TypeMetadata) error {
if err := typeCQLTemplate.Execute(w, tm); err != nil {
return err
}
return nil
}
func (km *KeyspaceMetadata) indexToCQL(w io.Writer, im *IndexMetadata) error {
// Scylla doesn't support any custom indexes
if im.Kind == IndexKindCustom {
return nil
}
options := im.Options
indexTarget := options["target"]
// secondary index
si := struct {
ClusteringKeys []string `json:"ck"`
PartitionKeys []string `json:"pk"`
}{}
if err := json.Unmarshal([]byte(indexTarget), &si); err == nil {
indexTarget = fmt.Sprintf("(%s), %s",
strings.Join(si.PartitionKeys, ","),
strings.Join(si.ClusteringKeys, ","),
)
}
_, err := fmt.Fprintf(w, "\nCREATE INDEX %s ON %s.%s (%s);\n",
im.Name,
im.KeyspaceName,
im.TableName,
indexTarget,
)
if err != nil {
return err
}
return nil
}
var keyspaceCQLTemplate = template.Must(template.New("keyspace").
Funcs(map[string]interface{}{
"escape": cqlHelpers.escape,
"fixStrategy": cqlHelpers.fixStrategy,
}).
Parse(`CREATE KEYSPACE {{ .Name }} WITH replication = {
'class': {{ escape ( fixStrategy .StrategyClass) }}
{{- range $key, $value := .StrategyOptions }},
{{ escape $key }}: {{ escape $value }}
{{- end }}
}{{ if not .DurableWrites }} AND durable_writes = 'false'{{ end }};
`))
func (km *KeyspaceMetadata) keyspaceToCQL(w io.Writer) error {
if err := keyspaceCQLTemplate.Execute(w, km); err != nil {
return err
}
return nil
}
func contains(in []string, v string) bool {
for _, e := range in {
if e == v {
return true
}
}
return false
}
type toCQLHelpers struct{}
var cqlHelpers = toCQLHelpers{}
func (h toCQLHelpers) zip(a []string, b []string) [][]string {
m := make([][]string, len(a))
for i := range a {
m[i] = []string{a[i], b[i]}
}
return m
}
func (h toCQLHelpers) escape(e interface{}) string {
switch v := e.(type) {
case int, float64:
return fmt.Sprint(v)
case bool:
if v {
return "true"
}
return "false"
case string:
return "'" + strings.ReplaceAll(v, "'", "''") + "'"
case []byte:
return string(v)
}
return ""
}
func (h toCQLHelpers) stripFrozen(v string) string {
return strings.TrimSuffix(strings.TrimPrefix(v, "frozen<"), ">")
}
func (h toCQLHelpers) fixStrategy(v string) string {
return strings.TrimPrefix(v, "org.apache.cassandra.locator.")
}
func (h toCQLHelpers) fixQuote(v string) string {
return strings.ReplaceAll(v, `"`, `'`)
}
func (h toCQLHelpers) tableOptionsToCQL(ops TableMetadataOptions) ([]string, error)
|
opts["compaction"], err = json.Marshal(ops.Compaction)
if err != nil {
return nil, err
}
opts["compression"], err = json.Marshal(ops.Compression)
if err != nil {
return nil, err
}
cdc, err := json.Marshal(ops.CDC)
if err != nil {
return nil, err
|
{
opts := map[string]interface{}{
"bloom_filter_fp_chance": ops.BloomFilterFpChance,
"comment": ops.Comment,
"crc_check_chance": ops.CrcCheckChance,
"dclocal_read_repair_chance": ops.DcLocalReadRepairChance,
"default_time_to_live": ops.DefaultTimeToLive,
"gc_grace_seconds": ops.GcGraceSeconds,
"max_index_interval": ops.MaxIndexInterval,
"memtable_flush_period_in_ms": ops.MemtableFlushPeriodInMs,
"min_index_interval": ops.MinIndexInterval,
"read_repair_chance": ops.ReadRepairChance,
"speculative_retry": ops.SpeculativeRetry,
}
var err error
opts["caching"], err = json.Marshal(ops.Caching)
if err != nil {
return nil, err
}
|
identifier_body
|
recreate.go
|
,
"tablePropertiesToCQL": cqlHelpers.tablePropertiesToCQL,
}).
Parse(`
CREATE TABLE {{ .KeyspaceName }}.{{ .Tm.Name }} (
{{ tableColumnToCQL .Tm }}
) WITH {{ tablePropertiesToCQL .Tm.ClusteringColumns .Tm.Options .Tm.Flags .Tm.Extensions }};
`))
func (km *KeyspaceMetadata) tableToCQL(w io.Writer, kn string, tm *TableMetadata) error {
if err := tableCQLTemplate.Execute(w, map[string]interface{}{
"Tm": tm,
"KeyspaceName": kn,
}); err != nil {
return err
}
return nil
}
var functionTemplate = template.Must(template.New("functions").
Funcs(map[string]interface{}{
"escape": cqlHelpers.escape,
"zip": cqlHelpers.zip,
"stripFrozen": cqlHelpers.stripFrozen,
}).
Parse(`
CREATE FUNCTION {{ escape .keyspaceName }}.{{ escape .fm.Name }} (
{{- range $i, $args := zip .fm.ArgumentNames .fm.ArgumentTypes }}
{{- if ne $i 0 }}, {{ end }}
{{- escape (index $args 0) }}
{{ stripFrozen (index $args 1) }}
{{- end -}})
{{ if .fm.CalledOnNullInput }}CALLED{{ else }}RETURNS NULL{{ end }} ON NULL INPUT
RETURNS {{ .fm.ReturnType }}
LANGUAGE {{ .fm.Language }}
AS $${{ .fm.Body }}$$;
`))
func (km *KeyspaceMetadata) functionToCQL(w io.Writer, keyspaceName string, fm *FunctionMetadata) error {
if err := functionTemplate.Execute(w, map[string]interface{}{
"fm": fm,
|
"keyspaceName": keyspaceName,
}); err != nil {
return err
}
return nil
}
var viewTemplate = template.Must(template.New("views").
Funcs(map[string]interface{}{
"zip": cqlHelpers.zip,
"partitionKeyString": cqlHelpers.partitionKeyString,
"tablePropertiesToCQL": cqlHelpers.tablePropertiesToCQL,
}).
Parse(`
CREATE MATERIALIZED VIEW {{ .vm.KeyspaceName }}.{{ .vm.ViewName }} AS
SELECT {{ if .vm.IncludeAllColumns }}*{{ else }}
{{- range $i, $col := .vm.OrderedColumns }}
{{- if ne $i 0 }}, {{ end }}
{{ $col }}
{{- end }}
{{- end }}
FROM {{ .vm.KeyspaceName }}.{{ .vm.BaseTableName }}
WHERE {{ .vm.WhereClause }}
PRIMARY KEY ({{ partitionKeyString .vm.PartitionKey .vm.ClusteringColumns }})
WITH {{ tablePropertiesToCQL .vm.ClusteringColumns .vm.Options .flags .vm.Extensions }};
`))
func (km *KeyspaceMetadata) viewToCQL(w io.Writer, vm *ViewMetadata) error {
if err := viewTemplate.Execute(w, map[string]interface{}{
"vm": vm,
"flags": []string{},
}); err != nil {
return err
}
return nil
}
var aggregatesTemplate = template.Must(template.New("aggregate").
Funcs(map[string]interface{}{
"stripFrozen": cqlHelpers.stripFrozen,
}).
Parse(`
CREATE AGGREGATE {{ .Keyspace }}.{{ .Name }}(
{{- range $arg, $i := .ArgumentTypes }}
{{- if ne $i 0 }}, {{ end }}
{{ stripFrozen $arg }}
{{- end -}})
SFUNC {{ .StateFunc.Name }}
STYPE {{ stripFrozen .State }}
{{- if ne .FinalFunc.Name "" }}
FINALFUNC {{ .FinalFunc.Name }}
{{- end -}}
{{- if ne .InitCond "" }}
INITCOND {{ .InitCond }}
{{- end -}}
);
`))
func (km *KeyspaceMetadata) aggregateToCQL(w io.Writer, am *AggregateMetadata) error {
if err := aggregatesTemplate.Execute(w, am); err != nil {
return err
}
return nil
}
var typeCQLTemplate = template.Must(template.New("types").
Funcs(map[string]interface{}{
"zip": cqlHelpers.zip,
}).
Parse(`
CREATE TYPE {{ .Keyspace }}.{{ .Name }} (
{{- range $i, $fields := zip .FieldNames .FieldTypes }} {{- if ne $i 0 }},{{ end }}
{{ index $fields 0 }} {{ index $fields 1 }}
{{- end }}
);
`))
func (km *KeyspaceMetadata) userTypeToCQL(w io.Writer, tm *TypeMetadata) error {
if err := typeCQLTemplate.Execute(w, tm); err != nil {
return err
}
return nil
}
func (km *KeyspaceMetadata) indexToCQL(w io.Writer, im *IndexMetadata) error {
// Scylla doesn't support any custom indexes
if im.Kind == IndexKindCustom {
return nil
}
options := im.Options
indexTarget := options["target"]
// secondary index
si := struct {
ClusteringKeys []string `json:"ck"`
PartitionKeys []string `json:"pk"`
}{}
if err := json.Unmarshal([]byte(indexTarget), &si); err == nil {
indexTarget = fmt.Sprintf("(%s), %s",
strings.Join(si.PartitionKeys, ","),
strings.Join(si.ClusteringKeys, ","),
)
}
_, err := fmt.Fprintf(w, "\nCREATE INDEX %s ON %s.%s (%s);\n",
im.Name,
im.KeyspaceName,
im.TableName,
indexTarget,
)
if err != nil {
return err
}
return nil
}
var keyspaceCQLTemplate = template.Must(template.New("keyspace").
Funcs(map[string]interface{}{
"escape": cqlHelpers.escape,
"fixStrategy": cqlHelpers.fixStrategy,
}).
Parse(`CREATE KEYSPACE {{ .Name }} WITH replication = {
'class': {{ escape ( fixStrategy .StrategyClass) }}
{{- range $key, $value := .StrategyOptions }},
{{ escape $key }}: {{ escape $value }}
{{- end }}
}{{ if not .DurableWrites }} AND durable_writes = 'false'{{ end }};
`))
func (km *KeyspaceMetadata) keyspaceToCQL(w io.Writer) error {
if err := keyspaceCQLTemplate.Execute(w, km); err != nil {
return err
}
return nil
}
func contains(in []string, v string) bool {
for _, e := range in {
if e == v {
return true
}
}
return false
}
type toCQLHelpers struct{}
var cqlHelpers = toCQLHelpers{}
func (h toCQLHelpers) zip(a []string, b []string) [][]string {
m := make([][]string, len(a))
for i := range a {
m[i] = []string{a[i], b[i]}
}
return m
}
func (h toCQLHelpers) escape(e interface{}) string {
switch v := e.(type) {
case int, float64:
return fmt.Sprint(v)
case bool:
if v {
return "true"
}
return "false"
case string:
return "'" + strings.ReplaceAll(v, "'", "''") + "'"
case []byte:
return string(v)
}
return ""
}
func (h toCQLHelpers) stripFrozen(v string) string {
return strings.TrimSuffix(strings.TrimPrefix(v, "frozen<"), ">")
}
func (h toCQLHelpers) fixStrategy(v string) string {
return strings.TrimPrefix(v, "org.apache.cassandra.locator.")
}
func (h toCQLHelpers) fixQuote(v string) string {
return strings.ReplaceAll(v, `"`, `'`)
}
func (h toCQLHelpers) tableOptionsToCQL(ops TableMetadataOptions) ([]string, error) {
opts := map[string]interface{}{
"bloom_filter_fp_chance": ops.BloomFilterFpChance,
"comment": ops.Comment,
"crc_check_chance": ops.CrcCheckChance,
"dclocal_read_repair_chance": ops.DcLocalReadRepairChance,
"default_time_to_live": ops.DefaultTimeToLive,
"gc_grace_seconds": ops.GcGraceSeconds,
"max_index_interval": ops.MaxIndexInterval,
"memtable_flush_period_in_ms": ops.MemtableFlushPeriodInMs,
"min_index_interval": ops.MinIndexInterval,
"read_repair_chance": ops.ReadRepairChance,
"speculative_retry": ops.SpeculativeRetry,
}
var err error
opts["caching"], err = json.Marshal(ops.Caching)
if err != nil {
return nil, err
}
opts["compaction"], err = json.Marshal(ops.Compaction)
if err != nil {
return nil, err
}
opts["compression"], err = json.Marshal(ops.Compression)
if err != nil {
return nil, err
}
cdc, err := json.Marshal(ops.CDC)
if err != nil {
return nil, err
|
random_line_split
|
|
recreate.go
|
"tablePropertiesToCQL": cqlHelpers.tablePropertiesToCQL,
}).
Parse(`
CREATE TABLE {{ .KeyspaceName }}.{{ .Tm.Name }} (
{{ tableColumnToCQL .Tm }}
) WITH {{ tablePropertiesToCQL .Tm.ClusteringColumns .Tm.Options .Tm.Flags .Tm.Extensions }};
`))
func (km *KeyspaceMetadata) tableToCQL(w io.Writer, kn string, tm *TableMetadata) error {
if err := tableCQLTemplate.Execute(w, map[string]interface{}{
"Tm": tm,
"KeyspaceName": kn,
}); err != nil {
return err
}
return nil
}
var functionTemplate = template.Must(template.New("functions").
Funcs(map[string]interface{}{
"escape": cqlHelpers.escape,
"zip": cqlHelpers.zip,
"stripFrozen": cqlHelpers.stripFrozen,
}).
Parse(`
CREATE FUNCTION {{ escape .keyspaceName }}.{{ escape .fm.Name }} (
{{- range $i, $args := zip .fm.ArgumentNames .fm.ArgumentTypes }}
{{- if ne $i 0 }}, {{ end }}
{{- escape (index $args 0) }}
{{ stripFrozen (index $args 1) }}
{{- end -}})
{{ if .fm.CalledOnNullInput }}CALLED{{ else }}RETURNS NULL{{ end }} ON NULL INPUT
RETURNS {{ .fm.ReturnType }}
LANGUAGE {{ .fm.Language }}
AS $${{ .fm.Body }}$$;
`))
func (km *KeyspaceMetadata) functionToCQL(w io.Writer, keyspaceName string, fm *FunctionMetadata) error {
if err := functionTemplate.Execute(w, map[string]interface{}{
"fm": fm,
"keyspaceName": keyspaceName,
}); err != nil {
return err
}
return nil
}
var viewTemplate = template.Must(template.New("views").
Funcs(map[string]interface{}{
"zip": cqlHelpers.zip,
"partitionKeyString": cqlHelpers.partitionKeyString,
"tablePropertiesToCQL": cqlHelpers.tablePropertiesToCQL,
}).
Parse(`
CREATE MATERIALIZED VIEW {{ .vm.KeyspaceName }}.{{ .vm.ViewName }} AS
SELECT {{ if .vm.IncludeAllColumns }}*{{ else }}
{{- range $i, $col := .vm.OrderedColumns }}
{{- if ne $i 0 }}, {{ end }}
{{ $col }}
{{- end }}
{{- end }}
FROM {{ .vm.KeyspaceName }}.{{ .vm.BaseTableName }}
WHERE {{ .vm.WhereClause }}
PRIMARY KEY ({{ partitionKeyString .vm.PartitionKey .vm.ClusteringColumns }})
WITH {{ tablePropertiesToCQL .vm.ClusteringColumns .vm.Options .flags .vm.Extensions }};
`))
func (km *KeyspaceMetadata)
|
(w io.Writer, vm *ViewMetadata) error {
if err := viewTemplate.Execute(w, map[string]interface{}{
"vm": vm,
"flags": []string{},
}); err != nil {
return err
}
return nil
}
var aggregatesTemplate = template.Must(template.New("aggregate").
Funcs(map[string]interface{}{
"stripFrozen": cqlHelpers.stripFrozen,
}).
Parse(`
CREATE AGGREGATE {{ .Keyspace }}.{{ .Name }}(
{{- range $arg, $i := .ArgumentTypes }}
{{- if ne $i 0 }}, {{ end }}
{{ stripFrozen $arg }}
{{- end -}})
SFUNC {{ .StateFunc.Name }}
STYPE {{ stripFrozen .State }}
{{- if ne .FinalFunc.Name "" }}
FINALFUNC {{ .FinalFunc.Name }}
{{- end -}}
{{- if ne .InitCond "" }}
INITCOND {{ .InitCond }}
{{- end -}}
);
`))
func (km *KeyspaceMetadata) aggregateToCQL(w io.Writer, am *AggregateMetadata) error {
if err := aggregatesTemplate.Execute(w, am); err != nil {
return err
}
return nil
}
var typeCQLTemplate = template.Must(template.New("types").
Funcs(map[string]interface{}{
"zip": cqlHelpers.zip,
}).
Parse(`
CREATE TYPE {{ .Keyspace }}.{{ .Name }} (
{{- range $i, $fields := zip .FieldNames .FieldTypes }} {{- if ne $i 0 }},{{ end }}
{{ index $fields 0 }} {{ index $fields 1 }}
{{- end }}
);
`))
func (km *KeyspaceMetadata) userTypeToCQL(w io.Writer, tm *TypeMetadata) error {
if err := typeCQLTemplate.Execute(w, tm); err != nil {
return err
}
return nil
}
func (km *KeyspaceMetadata) indexToCQL(w io.Writer, im *IndexMetadata) error {
// Scylla doesn't support any custom indexes
if im.Kind == IndexKindCustom {
return nil
}
options := im.Options
indexTarget := options["target"]
// secondary index
si := struct {
ClusteringKeys []string `json:"ck"`
PartitionKeys []string `json:"pk"`
}{}
if err := json.Unmarshal([]byte(indexTarget), &si); err == nil {
indexTarget = fmt.Sprintf("(%s), %s",
strings.Join(si.PartitionKeys, ","),
strings.Join(si.ClusteringKeys, ","),
)
}
_, err := fmt.Fprintf(w, "\nCREATE INDEX %s ON %s.%s (%s);\n",
im.Name,
im.KeyspaceName,
im.TableName,
indexTarget,
)
if err != nil {
return err
}
return nil
}
var keyspaceCQLTemplate = template.Must(template.New("keyspace").
Funcs(map[string]interface{}{
"escape": cqlHelpers.escape,
"fixStrategy": cqlHelpers.fixStrategy,
}).
Parse(`CREATE KEYSPACE {{ .Name }} WITH replication = {
'class': {{ escape ( fixStrategy .StrategyClass) }}
{{- range $key, $value := .StrategyOptions }},
{{ escape $key }}: {{ escape $value }}
{{- end }}
}{{ if not .DurableWrites }} AND durable_writes = 'false'{{ end }};
`))
func (km *KeyspaceMetadata) keyspaceToCQL(w io.Writer) error {
if err := keyspaceCQLTemplate.Execute(w, km); err != nil {
return err
}
return nil
}
func contains(in []string, v string) bool {
for _, e := range in {
if e == v {
return true
}
}
return false
}
type toCQLHelpers struct{}
var cqlHelpers = toCQLHelpers{}
func (h toCQLHelpers) zip(a []string, b []string) [][]string {
m := make([][]string, len(a))
for i := range a {
m[i] = []string{a[i], b[i]}
}
return m
}
func (h toCQLHelpers) escape(e interface{}) string {
switch v := e.(type) {
case int, float64:
return fmt.Sprint(v)
case bool:
if v {
return "true"
}
return "false"
case string:
return "'" + strings.ReplaceAll(v, "'", "''") + "'"
case []byte:
return string(v)
}
return ""
}
func (h toCQLHelpers) stripFrozen(v string) string {
return strings.TrimSuffix(strings.TrimPrefix(v, "frozen<"), ">")
}
func (h toCQLHelpers) fixStrategy(v string) string {
return strings.TrimPrefix(v, "org.apache.cassandra.locator.")
}
func (h toCQLHelpers) fixQuote(v string) string {
return strings.ReplaceAll(v, `"`, `'`)
}
func (h toCQLHelpers) tableOptionsToCQL(ops TableMetadataOptions) ([]string, error) {
opts := map[string]interface{}{
"bloom_filter_fp_chance": ops.BloomFilterFpChance,
"comment": ops.Comment,
"crc_check_chance": ops.CrcCheckChance,
"dclocal_read_repair_chance": ops.DcLocalReadRepairChance,
"default_time_to_live": ops.DefaultTimeToLive,
"gc_grace_seconds": ops.GcGraceSeconds,
"max_index_interval": ops.MaxIndexInterval,
"memtable_flush_period_in_ms": ops.MemtableFlushPeriodInMs,
"min_index_interval": ops.MinIndexInterval,
"read_repair_chance": ops.ReadRepairChance,
"speculative_retry": ops.SpeculativeRetry,
}
var err error
opts["caching"], err = json.Marshal(ops.Caching)
if err != nil {
return nil, err
}
opts["compaction"], err = json.Marshal(ops.Compaction)
if err != nil {
return nil, err
}
opts["compression"], err = json.Marshal(ops.Compression)
if err != nil {
return nil, err
}
cdc, err := json.Marshal(ops.CDC)
if err != nil {
return nil, err
|
viewToCQL
|
identifier_name
|
index.ts
|
IDEMPOTENT_HTTP_METHODS = SAFE_HTTP_METHODS.concat(['put', 'delete'])
export interface AxiosExtendObject {
promiseKey: string
url: string
promise: Promise<any>
source: CancelTokenSource
}
export interface AxiosExtendCurrentStateType {
lastRequestTime: number
retryCount: number
}
export interface AxiosExtendRequestOptions extends AxiosRequestConfig {
[namespace]?: any
unique?: boolean
orderly?: boolean
requestOptions?: AxiosExtendRequestOptions
cancelToken?: CancelToken
type?: string
error?: string
}
export interface AxiosExtendConfig {
maxConnections?: number
unique?: boolean
retries?: number
orderly?: boolean
shouldResetTimeout?: boolean
retryCondition?(): boolean
retryDelay?(retryNumber: number, error: any): number
setHeaders?(instance: AxiosInstance): void
onRequest?(config: AxiosRequestConfig, requestOptions: AxiosExtendRequestOptions): AxiosRequestConfig | Promise<AxiosRequestConfig>
onRequestError?(error: any): void
onResponse?(res: AxiosResponse<any>, requestOptions: AxiosExtendRequestOptions): AxiosResponse<any> | Promise<AxiosResponse<any>>
onResponseError?(error: any): void
onError?(error: any): void
onCancel?(error: any): void
}
/**
* 获取默认延迟时间 毫秒
* @returns number - delay in milliseconds, always 0
*/
function noDelay() {
return 0
}
/**
* Initializes and returns the retry state for the given request/config
* @param config - AxiosExtendRequestOptions
* @return currentState
*/
function getCurrentState(config: AxiosExtendRequestOptions): AxiosExtendCurrentStateType {
const currentState = config[namespace] || {}
currentState.retryCount = currentState.retryCount || 0
config[namespace] = currentState
return currentState
}
/**
* Returns the axios-retry options for the current request
* @param config - AxiosExtendRequestOptions
* @param defaultOptions - AxiosExtendConfig
* @return options
*/
function getRequestOptions(config: AxiosExtendRequestOptions, defaultOptions: AxiosExtendConfig): AxiosExtendConfig {
return Object.assign({}, defaultOptions, config[namespace])
}
/**
* @param axios - any
* @param config - any
*/
function fixConfig(axios: any, config: any): void {
if (axios.defaults.agent === config.agent) {
delete config.agent
}
if (axios.defaults.httpAgent === config.httpAgent) {
delete config.httpAgent
}
if (axios.defaults.httpsAgent === config.httpsAgent) {
delete config.httpsAgent
}
}
/**
* @param error - 错误类型
* @return boolean
*/
export function isNetworkError(error: AxiosError): boolean {
return (
!error.response &&
Boolean(error.code) && // Prevents retrying cancelled requests
error.code !== 'ECONNABORTED' && // Prevents retrying timed out requests
isRetryAllowed(error)
) // Prevents retrying unsafe errors
}
/**
* @param error - 错误类型
* @return boolean
*/
export function isSafeRequestError(error: any): boolean {
// Cannot determine if the request can be retried
if (!error.config) return false
return isRetryableError(error) && SAFE_HTTP_METHODS.indexOf(error.config.method) !== -1
}
/**
* @param error - 错误类型
* @return boolean
*/
export function isIdempotentRequestError(error: any): boolean {
// Cannot determine if the request can be retried
if (!error.config) return false
return isRetryableError(error) && IDEMPOTENT_HTTP_METHODS.indexOf(error.config.method) !== -1
}
/**
* @param error - 错误类型
* @return boolean
*/
export function isNetworkOrIdempotentRequestError(error: AxiosError): boolean {
return isNetworkError(error) || isIdempotentRequestError(error)
}
/**
* @param retryNumber - 默认:0
* @return delay 毫秒
*/
export function exponentialDelay(retryNumber = 0) {
const delay = Math.pow(2, retryNumber) * 1000
const
|
tion isRetryableError(error: AxiosError): boolean {
return error.code !== 'ECONNABORTED' && (!error.response || (error.response.status >= 500 && error.response.status <= 599))
}
/**
* axios封装
*
* @return Promise
*/
class AxiosExtend {
waiting: Array<AxiosExtendObject> = [] // 请求队列
maxConnections: number // 最大连接数,默认:0=不限制
orderly: boolean // 是否有序返回,默认:true
unique: boolean // 是否取消前面的相似请求,默认:false
retries: number // 重试次数,默认:0=不重试
onCancel // 请求取消时的回调
constructor({ maxConnections, orderly, unique, retries, onCancel, ...defaultOptions }: AxiosExtendConfig) {
this.maxConnections = maxConnections ?? 0
this.orderly = orderly ?? true
this.unique = unique ?? false
this.retries = retries ?? 0
this.onCancel = onCancel ?? null
// 初始化方法
this.init(defaultOptions)
}
/**
* 初始化
*/
public init(defaultOptions: AxiosExtendConfig): void {
const { setHeaders, onRequest, onRequestError, onResponse, onResponseError, onError } = defaultOptions
// 设置请求头
setHeaders && setHeaders(axios)
// 添加一个请求拦截器
onRequest &&
axios.interceptors.request.use(
config => {
const currentState = getCurrentState(config)
currentState.lastRequestTime = Date.now()
if (currentState.retryCount > 0) return config // retry重新请求接口不需要再次执行onRequest
return onRequest(config, (config as any).requestOptions)
},
(err: any) => {
onRequestError && onRequestError(err)
onError && onError(err)
return Promise.reject(err)
}
)
// 添加一个响应拦截器
onResponse &&
axios.interceptors.response.use(
res => {
return onResponse(res, (res.config as any).requestOptions)
},
(err: any): Promise<any> => {
const config: any = err.config
// If we have no information to retry the request
if (!config) {
onResponseError && onResponseError(err)
onError && onError(err)
return Promise.reject(err)
}
const { retries = this.retries, retryCondition = isNetworkOrIdempotentRequestError, retryDelay = noDelay, shouldResetTimeout = false } = getRequestOptions(config, defaultOptions)
const currentState = getCurrentState(config)
const shouldRetry = retryCondition(err) && currentState.retryCount < retries
if (shouldRetry) {
currentState.retryCount += 1
const delay = retryDelay(currentState.retryCount, err)
// Axios fails merging this configuration to the default configuration because it has an issue
// with circular structures: https://github.com/mzabriskie/axios/issues/370
fixConfig(axios, config)
if (!shouldResetTimeout && config.timeout && currentState.lastRequestTime) {
const lastRequestDuration = Date.now() - currentState.lastRequestTime
// Minimum 1ms timeout (passing 0 or less to XHR means no timeout)
config.timeout = Math.max(config.timeout - lastRequestDuration - delay, 1)
}
config.transformRequest = [(data: any) => data]
return new Promise(resolve => setTimeout(() => resolve(axios(config)), delay))
}
onResponseError && onResponseError(err)
onError && onError(err)
return Promise.reject(err)
}
)
}
/**
* 创建请求
*/
public create(options: AxiosExtendRequestOptions): Promise<any> {
const { unique = this.unique, orderly = this.orderly, url = '' } = options
const promiseKey = getRandomStr(6) + '_' + Date.now()
const source: CancelTokenSource = axios.CancelToken.source()
options.requestOptions = extend(true, {}, options) as AxiosExtendRequestOptions
options.cancelToken = source.token
const promise = new Promise(async (resolve, reject) => {
// 接口必须有序返回 或 需要取消url相同请求
if (unique || orderly) {
let len = this.waiting.length
while (len > 0) {
len--
if (this.waiting[len].url === url) {
if (unique) this.waiting.splice(len, 1)[0].source.cancel('request canceled')
else {
try {
await this.waiting[len]
// await this.waiting.splice(len, 1)[0].promise
} catch {
this.waiting.splice(len, 1)
console.info('the task has been dropped')
}
}
}
}
}
// 有最大连接数限制,超出了最多可同时请求的数量限制,至少等待执行一条任务
if (this.maxConnections > 0 && this.waiting.length >= this.maxConnections) {
try {
await
|
randomSum = delay * 0.5 * Math.random() // 0-50% of the delay
return delay + randomSum
}
/**
* @param error - 错误类型
* @return boolean
*/
export func
|
identifier_body
|
index.ts
|
IDEMPOTENT_HTTP_METHODS = SAFE_HTTP_METHODS.concat(['put', 'delete'])
export interface AxiosExtendObject {
promiseKey: string
url: string
promise: Promise<any>
source: CancelTokenSource
}
export interface AxiosExtendCurrentStateType {
lastRequestTime: number
retryCount: number
}
export interface AxiosExtendRequestOptions extends AxiosRequestConfig {
[namespace]?: any
unique?: boolean
orderly?: boolean
requestOptions?: AxiosExtendRequestOptions
cancelToken?: CancelToken
type?: string
error?: string
}
export interface AxiosExtendConfig {
maxConnections?: number
unique?: boolean
retries?: number
orderly?: boolean
shouldResetTimeout?: boolean
retryCondition?(): boolean
retryDelay?(retryNumber: number, error: any): number
setHeaders?(instance: AxiosInstance): void
onRequest?(config: AxiosRequestConfig, requestOptions: AxiosExtendRequestOptions): AxiosRequestConfig | Promise<AxiosRequestConfig>
onRequestError?(error: any): void
onResponse?(res: AxiosResponse<any>, requestOptions: AxiosExtendRequestOptions): AxiosResponse<any> | Promise<AxiosResponse<any>>
onResponseError?(error: any): void
onError?(error: any): void
onCancel?(error: any): void
}
/**
* 获取默认延迟时间 毫秒
* @returns number - delay in milliseconds, always 0
*/
function noDelay() {
return 0
}
/**
* Initializes and returns the retry state for the given request/config
* @param config - AxiosExtendRequestOptions
* @return currentState
*/
function getCurrentState(config: AxiosExtendRequestOptions): AxiosExtendCurrentStateType {
const currentState = config[namespace] || {}
currentState.retryCount = currentState.retryCount || 0
config[namespace] = currentState
return currentState
}
/**
* Returns the axios-retry options for the current request
* @param config - AxiosExtendRequestOptions
* @param defaultOptions - AxiosExtendConfig
* @return options
*/
function getRequestOptions(config: AxiosExtendRequestOptions, defaultOptions: AxiosExtendConfig): AxiosExtendConfig {
return Object.assign({}, defaultOptions, config[namespace])
}
/**
* @param axios - any
* @param config - any
*/
function fixConfig(axios: any, config: any): void {
if (axios.defaults.agent === config.agent) {
delete config.agent
}
if (axios.defaults.httpAgent === config.httpAgent) {
delete config.httpAgent
}
if (axios.defaults.httpsAgent === config.httpsAgent) {
delete config.httpsAgent
}
}
/**
* @param error - 错误类型
* @return boolean
*/
export function isNetworkError(error: AxiosError): boolean {
return (
!error.response &&
Boolean(error.code) && // Prevents retrying cancelled requests
error.code !== 'ECONNABORTED' && // Prevents retrying timed out requests
isRetryAllowed(error)
) // Prevents retrying unsafe errors
}
/**
* @param error - 错误类型
* @return boolean
*/
export function isSafeRequestError(error: any): boolean {
// Cannot determine if the request can be retried
if (!error.config) return false
return isRetryableError(error) && SAFE_HTTP_METHODS.indexOf(error.config.method) !== -1
}
/**
* @param error - 错误类型
* @return boolean
*/
export function isIdempotentRequestError(error: any): boolean {
// Cannot determine if the request can be retried
if (!error.config) return false
return isRetryableError(error) && IDEMPOTENT_HTTP_METHODS.indexOf(error.config.method) !== -1
}
/**
* @param error - 错误类型
* @return boolean
*/
export function isNetworkOrIdempotentRequestError(error: AxiosError): boolean {
return isNetworkError(error) || isIdempotentRequestError(error)
}
/**
* @param retryNumber - 默认:0
* @return delay 毫秒
*/
export function exponentialDelay(retryNumber = 0) {
const delay = Math.pow(2, retryNumber) * 1000
const randomSum = delay * 0.5 * Math.random() // 0-50% of the delay
return delay + randomSum
}
/**
* @param error - 错误类型
* @return boolean
*/
export function isRetryableError(error: AxiosError): boolean {
return error.code !== 'ECONNABORTED' && (!error.response || (error.response.status >= 500 && error.response.status <= 599))
}
/**
* axios封装
*
* @return Promise
*/
class AxiosExtend {
waiting: Array<AxiosExtendObject> = [] // 请求队列
maxConnections: number // 最大连接数,默认:0=不限制
orderly: boolean // 是否有序返回,默认:true
unique: boolean // 是否取消前面的相似请求,默认:false
retries: number // 重试次数,默认:0=不重试
onCancel // 请求取消时的回调
constructor({ maxConnections, orderly, unique, retries, onCancel, ...defaultOptions }: AxiosExtendConfig) {
this.maxConnections = maxConnections ?? 0
this.orderly = orderly ?? tr
|
this.unique = unique ?? false
this.retries = retries ?? 0
this.onCancel = onCancel ?? null
// 初始化方法
this.init(defaultOptions)
}
/**
* 初始化
*/
public init(defaultOptions: AxiosExtendConfig): void {
const { setHeaders, onRequest, onRequestError, onResponse, onResponseError, onError } = defaultOptions
// 设置请求头
setHeaders && setHeaders(axios)
// 添加一个请求拦截器
onRequest &&
axios.interceptors.request.use(
config => {
const currentState = getCurrentState(config)
currentState.lastRequestTime = Date.now()
if (currentState.retryCount > 0) return config // retry重新请求接口不需要再次执行onRequest
return onRequest(config, (config as any).requestOptions)
},
(err: any) => {
onRequestError && onRequestError(err)
onError && onError(err)
return Promise.reject(err)
}
)
// 添加一个响应拦截器
onResponse &&
axios.interceptors.response.use(
res => {
return onResponse(res, (res.config as any).requestOptions)
},
(err: any): Promise<any> => {
const config: any = err.config
// If we have no information to retry the request
if (!config) {
onResponseError && onResponseError(err)
onError && onError(err)
return Promise.reject(err)
}
const { retries = this.retries, retryCondition = isNetworkOrIdempotentRequestError, retryDelay = noDelay, shouldResetTimeout = false } = getRequestOptions(config, defaultOptions)
const currentState = getCurrentState(config)
const shouldRetry = retryCondition(err) && currentState.retryCount < retries
if (shouldRetry) {
currentState.retryCount += 1
const delay = retryDelay(currentState.retryCount, err)
// Axios fails merging this configuration to the default configuration because it has an issue
// with circular structures: https://github.com/mzabriskie/axios/issues/370
fixConfig(axios, config)
if (!shouldResetTimeout && config.timeout && currentState.lastRequestTime) {
const lastRequestDuration = Date.now() - currentState.lastRequestTime
// Minimum 1ms timeout (passing 0 or less to XHR means no timeout)
config.timeout = Math.max(config.timeout - lastRequestDuration - delay, 1)
}
config.transformRequest = [(data: any) => data]
return new Promise(resolve => setTimeout(() => resolve(axios(config)), delay))
}
onResponseError && onResponseError(err)
onError && onError(err)
return Promise.reject(err)
}
)
}
/**
* 创建请求
*/
public create(options: AxiosExtendRequestOptions): Promise<any> {
const { unique = this.unique, orderly = this.orderly, url = '' } = options
const promiseKey = getRandomStr(6) + '_' + Date.now()
const source: CancelTokenSource = axios.CancelToken.source()
options.requestOptions = extend(true, {}, options) as AxiosExtendRequestOptions
options.cancelToken = source.token
const promise = new Promise(async (resolve, reject) => {
// 接口必须有序返回 或 需要取消url相同请求
if (unique || orderly) {
let len = this.waiting.length
while (len > 0) {
len--
if (this.waiting[len].url === url) {
if (unique) this.waiting.splice(len, 1)[0].source.cancel('request canceled')
else {
try {
await this.waiting[len]
// await this.waiting.splice(len, 1)[0].promise
} catch {
this.waiting.splice(len, 1)
console.info('the task has been dropped')
}
}
}
}
}
// 有最大连接数限制,超出了最多可同时请求的数量限制,至少等待执行一条任务
if (this.maxConnections > 0 && this.waiting.length >= this.maxConnections) {
try {
await (
|
ue
|
identifier_name
|
index.ts
|
const IDEMPOTENT_HTTP_METHODS = SAFE_HTTP_METHODS.concat(['put', 'delete'])
export interface AxiosExtendObject {
promiseKey: string
url: string
promise: Promise<any>
source: CancelTokenSource
}
export interface AxiosExtendCurrentStateType {
lastRequestTime: number
retryCount: number
}
export interface AxiosExtendRequestOptions extends AxiosRequestConfig {
[namespace]?: any
unique?: boolean
orderly?: boolean
requestOptions?: AxiosExtendRequestOptions
cancelToken?: CancelToken
type?: string
error?: string
}
export interface AxiosExtendConfig {
maxConnections?: number
unique?: boolean
retries?: number
orderly?: boolean
shouldResetTimeout?: boolean
retryCondition?(): boolean
retryDelay?(retryNumber: number, error: any): number
setHeaders?(instance: AxiosInstance): void
onRequest?(config: AxiosRequestConfig, requestOptions: AxiosExtendRequestOptions): AxiosRequestConfig | Promise<AxiosRequestConfig>
onRequestError?(error: any): void
onResponse?(res: AxiosResponse<any>, requestOptions: AxiosExtendRequestOptions): AxiosResponse<any> | Promise<AxiosResponse<any>>
onResponseError?(error: any): void
onError?(error: any): void
onCancel?(error: any): void
}
/**
* 获取默认延迟时间 毫秒
* @returns number - delay in milliseconds, always 0
*/
function noDelay() {
return 0
}
/**
* Initializes and returns the retry state for the given request/config
* @param config - AxiosExtendRequestOptions
* @return currentState
*/
function getCurrentState(config: AxiosExtendRequestOptions): AxiosExtendCurrentStateType {
const currentState = config[namespace] || {}
currentState.retryCount = currentState.retryCount || 0
config[namespace] = currentState
return currentState
}
/**
* Returns the axios-retry options for the current request
* @param config - AxiosExtendRequestOptions
* @param defaultOptions - AxiosExtendConfig
* @return options
*/
function getRequestOptions(config: AxiosExtendRequestOptions, defaultOptions: AxiosExtendConfig): AxiosExtendConfig {
return Object.assign({}, defaultOptions, config[namespace])
}
/**
* @param axios - any
* @param config - any
*/
function fixConfig(axios: any, config: any): void {
if (axios.defaults.agent === config.agent) {
delete config.agent
}
if (axios.defaults.httpAgent === config.httpAgent) {
delete config.httpAgent
}
if (axios.defaults.httpsAgent === config.httpsAgent) {
delete config.httpsAgent
}
}
/**
* @param error - 错误类型
* @return boolean
*/
export function isNetworkError(error: AxiosError): boolean {
return (
!error.response &&
Boolean(error.code) && // Prevents retrying cancelled requests
error.code !== 'ECONNABORTED' && // Prevents retrying timed out requests
isRetryAllowed(error)
) // Prevents retrying unsafe errors
}
|
export function isSafeRequestError(error: any): boolean {
// Cannot determine if the request can be retried
if (!error.config) return false
return isRetryableError(error) && SAFE_HTTP_METHODS.indexOf(error.config.method) !== -1
}
/**
* @param error - 错误类型
* @return boolean
*/
export function isIdempotentRequestError(error: any): boolean {
// Cannot determine if the request can be retried
if (!error.config) return false
return isRetryableError(error) && IDEMPOTENT_HTTP_METHODS.indexOf(error.config.method) !== -1
}
/**
* @param error - 错误类型
* @return boolean
*/
export function isNetworkOrIdempotentRequestError(error: AxiosError): boolean {
return isNetworkError(error) || isIdempotentRequestError(error)
}
/**
* @param retryNumber - 默认:0
* @return delay 毫秒
*/
export function exponentialDelay(retryNumber = 0) {
const delay = Math.pow(2, retryNumber) * 1000
const randomSum = delay * 0.5 * Math.random() // 0-50% of the delay
return delay + randomSum
}
/**
* @param error - 错误类型
* @return boolean
*/
export function isRetryableError(error: AxiosError): boolean {
return error.code !== 'ECONNABORTED' && (!error.response || (error.response.status >= 500 && error.response.status <= 599))
}
/**
* axios封装
*
* @return Promise
*/
class AxiosExtend {
waiting: Array<AxiosExtendObject> = [] // 请求队列
maxConnections: number // 最大连接数,默认:0=不限制
orderly: boolean // 是否有序返回,默认:true
unique: boolean // 是否取消前面的相似请求,默认:false
retries: number // 重试次数,默认:0=不重试
onCancel // 请求取消时的回调
constructor({ maxConnections, orderly, unique, retries, onCancel, ...defaultOptions }: AxiosExtendConfig) {
this.maxConnections = maxConnections ?? 0
this.orderly = orderly ?? true
this.unique = unique ?? false
this.retries = retries ?? 0
this.onCancel = onCancel ?? null
// 初始化方法
this.init(defaultOptions)
}
/**
* 初始化
*/
public init(defaultOptions: AxiosExtendConfig): void {
const { setHeaders, onRequest, onRequestError, onResponse, onResponseError, onError } = defaultOptions
// 设置请求头
setHeaders && setHeaders(axios)
// 添加一个请求拦截器
onRequest &&
axios.interceptors.request.use(
config => {
const currentState = getCurrentState(config)
currentState.lastRequestTime = Date.now()
if (currentState.retryCount > 0) return config // retry重新请求接口不需要再次执行onRequest
return onRequest(config, (config as any).requestOptions)
},
(err: any) => {
onRequestError && onRequestError(err)
onError && onError(err)
return Promise.reject(err)
}
)
// 添加一个响应拦截器
onResponse &&
axios.interceptors.response.use(
res => {
return onResponse(res, (res.config as any).requestOptions)
},
(err: any): Promise<any> => {
const config: any = err.config
// If we have no information to retry the request
if (!config) {
onResponseError && onResponseError(err)
onError && onError(err)
return Promise.reject(err)
}
const { retries = this.retries, retryCondition = isNetworkOrIdempotentRequestError, retryDelay = noDelay, shouldResetTimeout = false } = getRequestOptions(config, defaultOptions)
const currentState = getCurrentState(config)
const shouldRetry = retryCondition(err) && currentState.retryCount < retries
if (shouldRetry) {
currentState.retryCount += 1
const delay = retryDelay(currentState.retryCount, err)
// Axios fails merging this configuration to the default configuration because it has an issue
// with circular structures: https://github.com/mzabriskie/axios/issues/370
fixConfig(axios, config)
if (!shouldResetTimeout && config.timeout && currentState.lastRequestTime) {
const lastRequestDuration = Date.now() - currentState.lastRequestTime
// Minimum 1ms timeout (passing 0 or less to XHR means no timeout)
config.timeout = Math.max(config.timeout - lastRequestDuration - delay, 1)
}
config.transformRequest = [(data: any) => data]
return new Promise(resolve => setTimeout(() => resolve(axios(config)), delay))
}
onResponseError && onResponseError(err)
onError && onError(err)
return Promise.reject(err)
}
)
}
/**
* 创建请求
*/
public create(options: AxiosExtendRequestOptions): Promise<any> {
const { unique = this.unique, orderly = this.orderly, url = '' } = options
const promiseKey = getRandomStr(6) + '_' + Date.now()
const source: CancelTokenSource = axios.CancelToken.source()
options.requestOptions = extend(true, {}, options) as AxiosExtendRequestOptions
options.cancelToken = source.token
const promise = new Promise(async (resolve, reject) => {
// 接口必须有序返回 或 需要取消url相同请求
if (unique || orderly) {
let len = this.waiting.length
while (len > 0) {
len--
if (this.waiting[len].url === url) {
if (unique) this.waiting.splice(len, 1)[0].source.cancel('request canceled')
else {
try {
await this.waiting[len]
// await this.waiting.splice(len, 1)[0].promise
} catch {
this.waiting.splice(len, 1)
console.info('the task has been dropped')
}
}
}
}
}
// 有最大连接数限制,超出了最多可同时请求的数量限制,至少等待执行一条任务
if (this.maxConnections > 0 && this.waiting.length >= this.maxConnections) {
try {
await (
|
/**
* @param error - 错误类型
* @return boolean
*/
|
random_line_split
|
products.ts
|
27-32-Inch-1080p-Smart/dp/B07F981R8M/ref=sr_1_1?dchild=1&field-shipping_option-bin=3242350011&pf_rd_i=16225009011&pf_rd_m=ATVPDKIKX0DER&pf_rd_p=85a9188d-dbd5-424e-9512-339a1227d37c&pf_rd_r=SREFZAEZSX9R1FG29V2H&pf_rd_s=merchandised-search-5&pf_rd_t=101&qid=1614698224&rnid=1266092011&s=electronics&sr=1-1',
likes: 55
},
{
id: 2.2,
productId: 'tvs',
name: 'SAMSUNG 85-inch Class Crystal UHD TU-8000',
price: 837.5,
description: 'CRYSTAL PROCESSOR 4K: This ultra-fast processor transforms everything you watch into stunning 4K. CRYSTAL DISPLAY: Experience crystal clear colors that are fine-tuned to deliver a naturally crisp and vivid picture.',
imageUrl: 'https://m.media-amazon.com/images/I/91FcuuZwcrL._AC_UL320_.jpg',
rating: 4.4,
shareLink: 'https://www.amazon.com/Samsung-85-inch-Crystal-TU-8000-Built/dp/B084JCFNHF/ref=sr_1_2?dchild=1&field-shipping_option-bin=3242350011&pf_rd_i=16225009011&pf_rd_m=ATVPDKIKX0DER&pf_rd_p=85a9188d-dbd5-424e-9512-339a1227d37c&pf_rd_r=SREFZAEZSX9R1FG29V2H&pf_rd_s=merchandised-search-5&pf_rd_t=101&qid=1614698224&rnid=1266092011&s=electronics&sr=1-2',
likes: 50
},
{
id: 2.3,
productId: 'tvs',
name: 'TCL 65" 5-Series 4K UHD Dolby Vision HDR QLED',
price: 569.9,
description: 'Superior 4K Ultra HD: Picture clarity combined with the contrast, color, and detail of Dolby Vision HDR (High Dynamic Range) for the most lifelike picture. QLED: Quantum dot technology delivers better brightness and wider color volume.',
|
rating: 4.5,
shareLink: 'https://www.amazon.com/TCL-Dolby-Vision-QLED-Smart/dp/B08857ZHY3/ref=sr_1_3?dchild=1&field-shipping_option-bin=3242350011&pf_rd_i=16225009011&pf_rd_m=ATVPDKIKX0DER&pf_rd_p=85a9188d-dbd5-424e-9512-339a1227d37c&pf_rd_r=SREFZAEZSX9R1FG29V2H&pf_rd_s=merchandised-search-5&pf_rd_t=101&qid=1614698224&rnid=1266092011&s=electronics&sr=1-3',
likes: 80
},
{
id: 2.4,
productId: 'tvs',
name: 'SAMSUNG QN32Q50RAFXZA Flat 32" QLED 4K',
price: 447.9,
description: '4K UHD Processor: a powerful processor optimizes your tv’ s performance with 4K picture quality. 4K UHD: see what you’ve been missing on a crisp, clear picture that’s 4x the resolution of Full HD. Inputs & Outputs: 3 HDMI ports, 1 Ethernet port, 2 USB Ports (v 2.0), 1 Digital Audio Output (Optical), 1 Composite Input (AV).',
imageUrl: 'https://m.media-amazon.com/images/I/51NKhnjhpGL._AC_UL320_.jpg',
rating: 4.6,
shareLink: 'https://www.amazon.com/SAMSUNG-QN32Q50RAFXZA-32Q50-Smart-TV2019/dp/B07W5QYD2K/ref=sr_1_5?dchild=1&field-shipping_option-bin=3242350011&pf_rd_i=16225009011&pf_rd_m=ATVPDKIKX0DER&pf_rd_p=85a9188d-dbd5-424e-9512-339a1227d37c&pf_rd_r=SREFZAEZSX9R1FG29V2H&pf_rd_s=merchandised-search-5&pf_rd_t=101&qid=1614698224&rnid=1266092011&s=electronics&sr=1-5',
likes: 98
},
{
id: 2.5,
productId: 'tvs',
name: 'SAMSUNG 65-inch Class QLED Q80T Series',
price: 749.9,
description: 'DIRECT FULL ARRAY 12X (85", 75", 65" & 55"): Controlled backlights offer deeper contrast for richer blacks and brighter whites. QUANTUM HDR 12X (85", 75", 65" & 55"): Fine-tuned shades of cinematic color make details leap off the screen. QUANTUM PROCESSOR 4K: This powerful processor uses deep learning AI to transform everything you watch into stunning 4K.',
imageUrl: 'https://m.media-amazon.com/images/I/61DIUfDxBtL._AC_UL320_.jpg',
rating: 4.6,
shareLink: 'https://www.amazon.com/SAMSUNG-65-inch-Class-QLED-Built/dp/B0845ZSMWS/ref=sr_1_8?dchild=1&field-shipping_option-bin=3242350011&pf_rd_i=16225009011&pf_rd_m=ATVPDKIKX0DER&pf_rd_p=85a9188d-dbd5-424e-9512-339a1227d37c&pf_rd_r=SREFZAEZSX9R1FG29V2H&pf_rd_s=merchandised-search-5&pf_rd_t=101&qid=1614698224&rnid=1266092011&s=electronics&sr=1-8',
likes: 80
},
{
id: 3.1,
productId: 'computers',
name: 'Kingston 240GB A400 SATA 3 2.5" Internal SSD',
price: 34.9,
description: 'Fast start up, loading and file transfers. More reliable and durable than a hard drive. Multiple capacities with space for applications or a hard drive replacement.',
imageUrl: 'https://m.media-amazon.com/images/I/91RL+MhTWbL._AC_UL320_.jpg',
rating: 4.9,
shareLink: 'https://www.amazon.com/Kingston-240GB-Solid-SA400S37-240G/dp/B01N5IB20Q/ref=sr_1_11?dchild=1&fst=as%3Aoff&pf_rd_i=16225007011&pf_rd_m=ATVPDKIKX0DER&pf_rd_p=74069509-93ef-4a3c-8dca-a9e3fa773a64&pf_rd_r=17Y4X0Q1VVDP4JH1KN1V&pf_rd_s=merchandised-search-4&pf_rd_t=1
|
imageUrl: 'https://m.media-amazon.com/images/I/91tMNAWWsPL._AC_UL320_.jpg',
|
random_line_split
|
udacity_project_script.py
|
= ['Mon', 'Tue', 'Wed', 'Thu', 'Fry', 'Sat', 'Sun']
month, day = 'all_months', 'all_days'
if answer == 'both':
month = input("\nWhich month do you want to analyze? Jan, Feb ,.., Jun\n").capitalize()
while month not in legit_months:
print('There is no such month! Try again.')
month = input("\nWhich month do you want to analyze?\n").capitalize()
day = input("\nChoose a day of interest - Mon, Tue, ...\n").capitalize()
while day not in legit_days:
print("There is no such day! Try again.")
day = input("\nWhich day do you want to analyze? Mon, Tue, Wed...\n").capitalize()
elif answer == "month":
month = input("\nWhich month do you want to analyze? Jan, Feb, ..., Jun\n").capitalize()
while month not in legit_months:
print('There is no such month! Try again.')
month = input("\nWhich month do you want to analyze?\n").capitalize()
elif answer == 'day':
day = input("\nChoose a day of interest - Mon, Tue, Wed...\n").capitalize()
while day not in legit_days:
print("There is no such day! Try again.")
day = input("\nWhich day do you want to analyze?\n").capitalize()
return city, month, day
print('-'*40)
##############################################
def load_data(city, month, day):
"""
Loads data for the specified city and filters by month and day if applicable.
Args:
(str) city - name of the city to analyze
(str) month - name of the month to filter by, or "all_months" to apply no month filter
(str) day - name of the day of week to filter by, or "all_days" to apply no day filter
Returns:
df - Pandas DataFrame containing city data filtered by month and day
"""
data = pd.read_csv("{}.csv".format(city))
data.drop(data.columns[0], axis = 1, inplace = True) #dropping this strange column
data['Start Time'] = pd.to_datetime(data['Start Time'], format='%Y-%m-%d %H:%M:%S')
data['End Time'] = pd.to_datetime(data['End Time'], format='%Y-%m-%d %H:%M:%S')
data['weekday'] = data['Start Time'].dt.dayofweek #0 - monday
data['month'] = data['Start Time'].dt.month #1 - january
data['hour'] = data['Start Time'].dt.hour # 1 - hour 1
day_dict = {"Mon":0, "Tue":1, "Wed":2, "Thu":3, "Fry":4, "Sat":5, "Sun":6}
month_dict = {"Jan":1, "Feb":2, "Mar":3, "Apr":4, "May":5, "Jun":6}
if month == 'all_months' and day != 'all_days': # filter just by day
day = day_dict.get(day)
df = data[data['weekday'] == day]
elif day == 'all_days' and month != 'all_months': # filter just by month
month = month_dict.get(month)
df = data[data['month'] == month]
elif day == 'all_days' and month == 'all_months': # no filters
df = data
else: # filter both by day and month
day = day_dict.get(day)
month = month_dict.get(month)
df = data[(data['weekday']== day) & (data['month']==month)]
return df
###########################
def time_stats(df):
"""Displays statistics on the most frequent times of travel."""
print("="*40)
print('\nCalculating The Most Frequent Times of Travel...\n')
start_time = time.time()
most_freq_hour = str(df.groupby(['hour'])['Start Time'].count().idxmax())
high_hour_qty = str(df.groupby(['hour'])['Start Time'].count().max())
if len(df['weekday'].unique()) != 1 and len(df['month'].unique()) != 1: # if you dont filter im gonna show you month, day and hour
most_freq_month = int(df.groupby(['month'])['Start Time'].count().idxmax())
high_month_qty = str(df.groupby(['month'])['Start Time'].count().max())
print("Hottest month was {}".format(calendar.month_name[most_freq_month]))
print("Bikes were rented then for about {} times".format(high_month_qty))
print()
most_freq_day = int(df.groupby(['weekday'])['Start Time'].count().idxmax())
high_day_qty = str(df.groupby(['weekday'])['Start Time'].count().max())
print("Hottest day was {}".format(calendar.day_name[most_freq_day]))
print("Bikes were rented then for about {} times".format(high_day_qty))
print()
print("Hottest hour was {} o'clock".format(most_freq_hour))
print("Bikes were rented then for about {} times".format(high_hour_qty))
elif len(df['month'].unique()) == 1 and len(df['weekday'].unique()) != 1: # if you filter just by month i will show you day and hour
most_freq_day = int(df.groupby(['weekday'])['Start Time'].count().idxmax())
high_day_qty = str(df.groupby(['weekday'])['Start Time'].count().max())
print("Hottest day was {}".format(calendar.day_name[most_freq_day]))
print("Bikes were rented then for about {} times".format(high_day_qty))
print()
print("Hottest hour was {} o'clock".format(most_freq_hour))
print("Bikes were rented then for about {} times".format(high_hour_qty))
elif len(df['month'].unique()) != 1 and len(df['weekday'].unique()) == 1: # if you filter only by day i will show you month and hour
most_freq_month = int(df.groupby(['month'])['Start Time'].count().idxmax())
high_month_qty = str(df.groupby(['month'])['Start Time'].count().max())
print("Hottest month was {}".format(calendar.month_name[most_freq_month]))
print("Bikes were rented then for about {} times".format(high_month_qty))
print()
print("Hottest hour was {} o'clock".format(most_freq_hour))
print("Bikes were rented then for about {} times".format(high_hour_qty))
else: # if you filter either just by day or by both day and month im gonna show you just hour
print("Hottest hour was {} o'clock".format(most_freq_hour))
print("Bikes were rented then for about {} times".format(high_hour_qty))
print("\nThis took %s seconds." % (time.time() - start_time))
print('-'*40)
def station_stats(df):
"""Displays statistics on the most popular stations and trip."""
print('\nCalculating The Most Popular Stations and Trip...\n')
start_time = time.time()
# TO DO: display most commonly used start station
start_station_high_freq = df.groupby(['Start Station'])['Start Time'].count().idxmax()
start_station_high_qty = df.groupby(['Start Station'])['Start Time'].count().max()
print("The hottest start station was {}".format(start_station_high_freq))
print("Bikes were rented there around {}".format(start_station_high_qty), "times")
print()
# TO DO: display most commonly used end station
end_station_high_freq = df.groupby(['End Station'])['Start Time'].count().idxmax()
end_station_high_qty = df.groupby(['End Station'])['Start Time'].count().max()
print("The hottest end station was {}".format(end_station_high_freq))
print("Bikes were rented there around {}".format(end_station_high_qty), "times")
print()
# TO DO: display most frequent combination of start station and end station trip
df_grouped = df.groupby(['Start Station','End Station']).size().reset_index().rename(columns={0:'count'}).sort_values(by = "count", ascending = False)
print("Most frequent stations combination was:\n{} and {}".format(str(df_grouped.iloc[0,0]), str(df_grouped.iloc[0,1])))
print("This route was accomplished {} times".format(int(df_grouped.iloc[0,2])))
print("\nThis took %s seconds." % (time.time() - start_time))
print('-'*40)
def
|
(df):
"""Displays statistics on the total and average trip duration."""
print('\nCalculating Trip Duration...\n')
start_time = time.time()
df['time'] = df['End Time'] - df['Start Time']
# TO DO: display total travel time
print("Total travel time in that period of time: {}".format(df['time'].sum()))
print("Average time of journey: {}".format(df['time'].mean()))
# TO DO: display mean travel time
print("\nThis took %s seconds." % (time.time() - start_time))
print('-'*40)
def user_stats(df):
"""Displays statistics on bikeshare users."""
print('\nCalcul
|
trip_duration_stats
|
identifier_name
|
udacity_project_script.py
|
city = city.replace(" ", "_")
possible_answers = ['month', 'day', 'both', 'none'] # answers im gonna accept - 4 possibilities
answer = input("\nFilter by 'month','day' or 'both'? If you don't want to filter type 'none'\n").lower()
while answer not in possible_answers:
print("WAAT?!")
answer = input("\nFilter by 'month','day' or 'both'? If you don't want to filter type 'none'\n").lower()
legit_months = ['Jan', "Feb", "Mar", "Apr", "May", "Jun"]
legit_days = ['Mon', 'Tue', 'Wed', 'Thu', 'Fry', 'Sat', 'Sun']
month, day = 'all_months', 'all_days'
if answer == 'both':
month = input("\nWhich month do you want to analyze? Jan, Feb ,.., Jun\n").capitalize()
while month not in legit_months:
print('There is no such month! Try again.')
month = input("\nWhich month do you want to analyze?\n").capitalize()
day = input("\nChoose a day of interest - Mon, Tue, ...\n").capitalize()
while day not in legit_days:
print("There is no such day! Try again.")
day = input("\nWhich day do you want to analyze? Mon, Tue, Wed...\n").capitalize()
elif answer == "month":
month = input("\nWhich month do you want to analyze? Jan, Feb, ..., Jun\n").capitalize()
while month not in legit_months:
print('There is no such month! Try again.')
month = input("\nWhich month do you want to analyze?\n").capitalize()
elif answer == 'day':
day = input("\nChoose a day of interest - Mon, Tue, Wed...\n").capitalize()
while day not in legit_days:
print("There is no such day! Try again.")
day = input("\nWhich day do you want to analyze?\n").capitalize()
return city, month, day
print('-'*40)
##############################################
def load_data(city, month, day):
"""
Loads data for the specified city and filters by month and day if applicable.
Args:
(str) city - name of the city to analyze
(str) month - name of the month to filter by, or "all_months" to apply no month filter
(str) day - name of the day of week to filter by, or "all_days" to apply no day filter
Returns:
df - Pandas DataFrame containing city data filtered by month and day
"""
data = pd.read_csv("{}.csv".format(city))
data.drop(data.columns[0], axis = 1, inplace = True) #dropping this strange column
data['Start Time'] = pd.to_datetime(data['Start Time'], format='%Y-%m-%d %H:%M:%S')
data['End Time'] = pd.to_datetime(data['End Time'], format='%Y-%m-%d %H:%M:%S')
data['weekday'] = data['Start Time'].dt.dayofweek #0 - monday
data['month'] = data['Start Time'].dt.month #1 - january
data['hour'] = data['Start Time'].dt.hour # 1 - hour 1
day_dict = {"Mon":0, "Tue":1, "Wed":2, "Thu":3, "Fry":4, "Sat":5, "Sun":6}
month_dict = {"Jan":1, "Feb":2, "Mar":3, "Apr":4, "May":5, "Jun":6}
if month == 'all_months' and day != 'all_days': # filter just by day
day = day_dict.get(day)
df = data[data['weekday'] == day]
elif day == 'all_days' and month != 'all_months': # filter just by month
month = month_dict.get(month)
df = data[data['month'] == month]
elif day == 'all_days' and month == 'all_months': # no filters
df = data
else: # filter both by day and month
day = day_dict.get(day)
month = month_dict.get(month)
df = data[(data['weekday']== day) & (data['month']==month)]
return df
###########################
def time_stats(df):
"""Displays statistics on the most frequent times of travel."""
print("="*40)
print('\nCalculating The Most Frequent Times of Travel...\n')
start_time = time.time()
most_freq_hour = str(df.groupby(['hour'])['Start Time'].count().idxmax())
high_hour_qty = str(df.groupby(['hour'])['Start Time'].count().max())
if len(df['weekday'].unique()) != 1 and len(df['month'].unique()) != 1: # if you dont filter im gonna show you month, day and hour
most_freq_month = int(df.groupby(['month'])['Start Time'].count().idxmax())
high_month_qty = str(df.groupby(['month'])['Start Time'].count().max())
print("Hottest month was {}".format(calendar.month_name[most_freq_month]))
print("Bikes were rented then for about {} times".format(high_month_qty))
print()
most_freq_day = int(df.groupby(['weekday'])['Start Time'].count().idxmax())
high_day_qty = str(df.groupby(['weekday'])['Start Time'].count().max())
print("Hottest day was {}".format(calendar.day_name[most_freq_day]))
print("Bikes were rented then for about {} times".format(high_day_qty))
print()
print("Hottest hour was {} o'clock".format(most_freq_hour))
print("Bikes were rented then for about {} times".format(high_hour_qty))
elif len(df['month'].unique()) == 1 and len(df['weekday'].unique()) != 1: # if you filter just by month i will show you day and hour
most_freq_day = int(df.groupby(['weekday'])['Start Time'].count().idxmax())
high_day_qty = str(df.groupby(['weekday'])['Start Time'].count().max())
print("Hottest day was {}".format(calendar.day_name[most_freq_day]))
print("Bikes were rented then for about {} times".format(high_day_qty))
print()
print("Hottest hour was {} o'clock".format(most_freq_hour))
print("Bikes were rented then for about {} times".format(high_hour_qty))
elif len(df['month'].unique()) != 1 and len(df['weekday'].unique()) == 1: # if you filter only by day i will show you month and hour
most_freq_month = int(df.groupby(['month'])['Start Time'].count().idxmax())
high_month_qty = str(df.groupby(['month'])['Start Time'].count().max())
print("Hottest month was {}".format(calendar.month_name[most_freq_month]))
print("Bikes were rented then for about {} times".format(high_month_qty))
print()
print("Hottest hour was {} o'clock".format(most_freq_hour))
print("Bikes were rented then for about {} times".format(high_hour_qty))
else: # if you filter either just by day or by both day and month im gonna show you just hour
print("Hottest hour was {} o'clock".format(most_freq_hour))
print("Bikes were rented then for about {} times".format(high_hour_qty))
print("\nThis took %s seconds." % (time.time() - start_time))
print('-'*40)
def station_stats(df):
"""Displays statistics on the most popular stations and trip."""
print('\nCalculating The Most Popular Stations and Trip...\n')
start_time = time.time()
# TO DO: display most commonly used start station
start_station_high_freq = df.groupby(['Start Station'])['Start Time'].count().idxmax()
start_station_high_qty = df.groupby(['Start Station'])['Start Time'].count().max()
print("The hottest start station was {}".format(start_station_high_freq))
print("Bikes were rented there around {}".format(start_station_high_qty), "times")
print()
# TO DO: display most commonly used end station
end_station_high_freq = df.groupby(['End Station'])['Start Time'].count().idxmax()
end_station_high_qty = df.groupby(['End Station'])['Start Time'].count().max()
print("The hottest end station was {}".format(end_station_high_freq))
print("Bikes were rented there around {}".format(end_station_high_qty), "times")
print()
# TO DO: display most frequent combination of start station and end station trip
df_grouped = df.groupby(['Start Station','End Station']).size().reset_index().rename(columns={0:'count'}).sort_values(by = "count", ascending = False)
print("Most frequent stations combination was:\n{} and {}".format(str(df_grouped.iloc[0,0]), str(df_grouped.iloc[0,1])))
print("This route was accomplished {} times".format(int(df_grouped.iloc[0,2])))
print("\nThis took %s seconds." % (time.time
|
print("There is no such city in database!")
city = input("\nWhich city do you want to analyze?\n").lower()
|
conditional_block
|
|
udacity_project_script.py
|
month_dict = {"Jan":1, "Feb":2, "Mar":3, "Apr":4, "May":5, "Jun":6}
if month == 'all_months' and day != 'all_days': # filter just by day
day = day_dict.get(day)
df = data[data['weekday'] == day]
elif day == 'all_days' and month != 'all_months': # filter just by month
month = month_dict.get(month)
df = data[data['month'] == month]
elif day == 'all_days' and month == 'all_months': # no filters
df = data
else: # filter both by day and month
day = day_dict.get(day)
month = month_dict.get(month)
df = data[(data['weekday']== day) & (data['month']==month)]
return df
###########################
def time_stats(df):
"""Displays statistics on the most frequent times of travel."""
print("="*40)
print('\nCalculating The Most Frequent Times of Travel...\n')
start_time = time.time()
most_freq_hour = str(df.groupby(['hour'])['Start Time'].count().idxmax())
high_hour_qty = str(df.groupby(['hour'])['Start Time'].count().max())
if len(df['weekday'].unique()) != 1 and len(df['month'].unique()) != 1: # if you dont filter im gonna show you month, day and hour
most_freq_month = int(df.groupby(['month'])['Start Time'].count().idxmax())
high_month_qty = str(df.groupby(['month'])['Start Time'].count().max())
print("Hottest month was {}".format(calendar.month_name[most_freq_month]))
print("Bikes were rented then for about {} times".format(high_month_qty))
print()
most_freq_day = int(df.groupby(['weekday'])['Start Time'].count().idxmax())
high_day_qty = str(df.groupby(['weekday'])['Start Time'].count().max())
print("Hottest day was {}".format(calendar.day_name[most_freq_day]))
print("Bikes were rented then for about {} times".format(high_day_qty))
print()
print("Hottest hour was {} o'clock".format(most_freq_hour))
print("Bikes were rented then for about {} times".format(high_hour_qty))
elif len(df['month'].unique()) == 1 and len(df['weekday'].unique()) != 1: # if you filter just by month i will show you day and hour
most_freq_day = int(df.groupby(['weekday'])['Start Time'].count().idxmax())
high_day_qty = str(df.groupby(['weekday'])['Start Time'].count().max())
print("Hottest day was {}".format(calendar.day_name[most_freq_day]))
print("Bikes were rented then for about {} times".format(high_day_qty))
print()
print("Hottest hour was {} o'clock".format(most_freq_hour))
print("Bikes were rented then for about {} times".format(high_hour_qty))
elif len(df['month'].unique()) != 1 and len(df['weekday'].unique()) == 1: # if you filter only by day i will show you month and hour
most_freq_month = int(df.groupby(['month'])['Start Time'].count().idxmax())
high_month_qty = str(df.groupby(['month'])['Start Time'].count().max())
print("Hottest month was {}".format(calendar.month_name[most_freq_month]))
print("Bikes were rented then for about {} times".format(high_month_qty))
print()
print("Hottest hour was {} o'clock".format(most_freq_hour))
print("Bikes were rented then for about {} times".format(high_hour_qty))
else: # if you filter either just by day or by both day and month im gonna show you just hour
print("Hottest hour was {} o'clock".format(most_freq_hour))
print("Bikes were rented then for about {} times".format(high_hour_qty))
print("\nThis took %s seconds." % (time.time() - start_time))
print('-'*40)
def station_stats(df):
"""Displays statistics on the most popular stations and trip."""
print('\nCalculating The Most Popular Stations and Trip...\n')
start_time = time.time()
# TO DO: display most commonly used start station
start_station_high_freq = df.groupby(['Start Station'])['Start Time'].count().idxmax()
start_station_high_qty = df.groupby(['Start Station'])['Start Time'].count().max()
print("The hottest start station was {}".format(start_station_high_freq))
print("Bikes were rented there around {}".format(start_station_high_qty), "times")
print()
# TO DO: display most commonly used end station
end_station_high_freq = df.groupby(['End Station'])['Start Time'].count().idxmax()
end_station_high_qty = df.groupby(['End Station'])['Start Time'].count().max()
print("The hottest end station was {}".format(end_station_high_freq))
print("Bikes were rented there around {}".format(end_station_high_qty), "times")
print()
# TO DO: display most frequent combination of start station and end station trip
df_grouped = df.groupby(['Start Station','End Station']).size().reset_index().rename(columns={0:'count'}).sort_values(by = "count", ascending = False)
print("Most frequent stations combination was:\n{} and {}".format(str(df_grouped.iloc[0,0]), str(df_grouped.iloc[0,1])))
print("This route was accomplished {} times".format(int(df_grouped.iloc[0,2])))
print("\nThis took %s seconds." % (time.time() - start_time))
print('-'*40)
def trip_duration_stats(df):
"""Displays statistics on the total and average trip duration."""
print('\nCalculating Trip Duration...\n')
start_time = time.time()
df['time'] = df['End Time'] - df['Start Time']
# TO DO: display total travel time
print("Total travel time in that period of time: {}".format(df['time'].sum()))
print("Average time of journey: {}".format(df['time'].mean()))
# TO DO: display mean travel time
print("\nThis took %s seconds." % (time.time() - start_time))
print('-'*40)
def user_stats(df):
"""Displays statistics on bikeshare users."""
print('\nCalculating User Stats...\n')
start_time = time.time()
# TO DO: Display counts of user types
user_types = df.groupby(['User Type'])['Start Time'].count()
print(user_types.to_string())
print()
try:
# TO DO: Display counts of gender
gender = df.groupby(['Gender'])['Start Time'].count()
print(gender.to_string())
print()
# TO DO: Display earliest, most recent, and most common year of birth
earliest = int(df['Birth Year'].min())
common = int(df['Birth Year'].mode()[0]) # its very important to index it with 0 -> without it program crashes f.e. for june monday filter in NY
recent = int(df['Birth Year'].max())
print("The oldest person that rented a bicycle in that time was born in: {}".format(earliest))
print("The most common birth year: {}".format(common))
print("The youngest person: {}".format(recent))
except KeyError:
print("="*40)
print("For Washington there is no data on Gender and Birth Year")
print("="*40)
print("\nThis took %s seconds." % (time.time() - start_time))
print("="*40)
def show_entries(df): # Line 249 and 259 - is there a better way to do that?
i = -1 # i find it kind of stupid i = -1 but it works! when i set to 0 it starts showing entries from 5th row up
while True:
i+=1
curious = input("Do you want to see five entries of raw data? Type 'yes' or 'no' \n")
if curious.lower() != 'yes':
break
else:
print(str(df.iloc[0+5*i:5+5*i, :8].to_json(orient = 'records',date_format = 'iso')).replace('},{', "\n\n").replace(",", "\n").replace("[{", "").replace("}]", "").replace('"', '').replace(":", ": "), "\n")
# Line 257 - i guess that is not very pythonic syntax but it works
# Line 257 - i dont like that iso date format but i didnt find any better solution
def show_entries_washington(df): # no info on gender and age for washington
|
i = -1
while True:
i+=1
curious = input("Do you want to see five entries of raw data? Type 'yes' or 'no' \n")
if curious.lower() != 'yes':
break
else:
print(str(df.iloc[0+5*i:5+5*i, :6].to_json(orient = 'records',date_format = 'iso')).replace('},{', "\n\n").replace(",", "\n").replace("[{", "").replace("}]", "").replace('"', '').replace(":",": "), "\n")
|
identifier_body
|
|
udacity_project_script.py
|
format='%Y-%m-%d %H:%M:%S')
data['weekday'] = data['Start Time'].dt.dayofweek #0 - monday
data['month'] = data['Start Time'].dt.month #1 - january
data['hour'] = data['Start Time'].dt.hour # 1 - hour 1
day_dict = {"Mon":0, "Tue":1, "Wed":2, "Thu":3, "Fry":4, "Sat":5, "Sun":6}
month_dict = {"Jan":1, "Feb":2, "Mar":3, "Apr":4, "May":5, "Jun":6}
if month == 'all_months' and day != 'all_days': # filter just by day
day = day_dict.get(day)
df = data[data['weekday'] == day]
elif day == 'all_days' and month != 'all_months': # filter just by month
month = month_dict.get(month)
df = data[data['month'] == month]
elif day == 'all_days' and month == 'all_months': # no filters
df = data
else: # filter both by day and month
day = day_dict.get(day)
month = month_dict.get(month)
df = data[(data['weekday']== day) & (data['month']==month)]
return df
###########################
def time_stats(df):
"""Displays statistics on the most frequent times of travel."""
print("="*40)
print('\nCalculating The Most Frequent Times of Travel...\n')
start_time = time.time()
most_freq_hour = str(df.groupby(['hour'])['Start Time'].count().idxmax())
high_hour_qty = str(df.groupby(['hour'])['Start Time'].count().max())
if len(df['weekday'].unique()) != 1 and len(df['month'].unique()) != 1: # if you dont filter im gonna show you month, day and hour
most_freq_month = int(df.groupby(['month'])['Start Time'].count().idxmax())
high_month_qty = str(df.groupby(['month'])['Start Time'].count().max())
print("Hottest month was {}".format(calendar.month_name[most_freq_month]))
print("Bikes were rented then for about {} times".format(high_month_qty))
print()
most_freq_day = int(df.groupby(['weekday'])['Start Time'].count().idxmax())
high_day_qty = str(df.groupby(['weekday'])['Start Time'].count().max())
print("Hottest day was {}".format(calendar.day_name[most_freq_day]))
print("Bikes were rented then for about {} times".format(high_day_qty))
print()
print("Hottest hour was {} o'clock".format(most_freq_hour))
print("Bikes were rented then for about {} times".format(high_hour_qty))
elif len(df['month'].unique()) == 1 and len(df['weekday'].unique()) != 1: # if you filter just by month i will show you day and hour
most_freq_day = int(df.groupby(['weekday'])['Start Time'].count().idxmax())
high_day_qty = str(df.groupby(['weekday'])['Start Time'].count().max())
print("Hottest day was {}".format(calendar.day_name[most_freq_day]))
print("Bikes were rented then for about {} times".format(high_day_qty))
print()
print("Hottest hour was {} o'clock".format(most_freq_hour))
print("Bikes were rented then for about {} times".format(high_hour_qty))
elif len(df['month'].unique()) != 1 and len(df['weekday'].unique()) == 1: # if you filter only by day i will show you month and hour
most_freq_month = int(df.groupby(['month'])['Start Time'].count().idxmax())
high_month_qty = str(df.groupby(['month'])['Start Time'].count().max())
print("Hottest month was {}".format(calendar.month_name[most_freq_month]))
print("Bikes were rented then for about {} times".format(high_month_qty))
print()
print("Hottest hour was {} o'clock".format(most_freq_hour))
print("Bikes were rented then for about {} times".format(high_hour_qty))
else: # if you filter either just by day or by both day and month im gonna show you just hour
print("Hottest hour was {} o'clock".format(most_freq_hour))
print("Bikes were rented then for about {} times".format(high_hour_qty))
print("\nThis took %s seconds." % (time.time() - start_time))
print('-'*40)
def station_stats(df):
"""Displays statistics on the most popular stations and trip."""
print('\nCalculating The Most Popular Stations and Trip...\n')
start_time = time.time()
# TO DO: display most commonly used start station
start_station_high_freq = df.groupby(['Start Station'])['Start Time'].count().idxmax()
start_station_high_qty = df.groupby(['Start Station'])['Start Time'].count().max()
print("The hottest start station was {}".format(start_station_high_freq))
print("Bikes were rented there around {}".format(start_station_high_qty), "times")
print()
# TO DO: display most commonly used end station
end_station_high_freq = df.groupby(['End Station'])['Start Time'].count().idxmax()
end_station_high_qty = df.groupby(['End Station'])['Start Time'].count().max()
print("The hottest end station was {}".format(end_station_high_freq))
print("Bikes were rented there around {}".format(end_station_high_qty), "times")
print()
# TO DO: display most frequent combination of start station and end station trip
df_grouped = df.groupby(['Start Station','End Station']).size().reset_index().rename(columns={0:'count'}).sort_values(by = "count", ascending = False)
print("Most frequent stations combination was:\n{} and {}".format(str(df_grouped.iloc[0,0]), str(df_grouped.iloc[0,1])))
print("This route was accomplished {} times".format(int(df_grouped.iloc[0,2])))
print("\nThis took %s seconds." % (time.time() - start_time))
print('-'*40)
def trip_duration_stats(df):
"""Displays statistics on the total and average trip duration."""
print('\nCalculating Trip Duration...\n')
start_time = time.time()
df['time'] = df['End Time'] - df['Start Time']
# TO DO: display total travel time
print("Total travel time in that period of time: {}".format(df['time'].sum()))
print("Average time of journey: {}".format(df['time'].mean()))
# TO DO: display mean travel time
print("\nThis took %s seconds." % (time.time() - start_time))
print('-'*40)
def user_stats(df):
"""Displays statistics on bikeshare users."""
print('\nCalculating User Stats...\n')
start_time = time.time()
# TO DO: Display counts of user types
user_types = df.groupby(['User Type'])['Start Time'].count()
print(user_types.to_string())
print()
try:
# TO DO: Display counts of gender
gender = df.groupby(['Gender'])['Start Time'].count()
print(gender.to_string())
print()
# TO DO: Display earliest, most recent, and most common year of birth
earliest = int(df['Birth Year'].min())
common = int(df['Birth Year'].mode()[0]) # its very important to index it with 0 -> without it program crashes f.e. for june monday filter in NY
recent = int(df['Birth Year'].max())
print("The oldest person that rented a bicycle in that time was born in: {}".format(earliest))
print("The most common birth year: {}".format(common))
print("The youngest person: {}".format(recent))
except KeyError:
print("="*40)
print("For Washington there is no data on Gender and Birth Year")
print("="*40)
print("\nThis took %s seconds." % (time.time() - start_time))
print("="*40)
def show_entries(df): # Line 249 and 259 - is there a better way to do that?
i = -1 # i find it kind of stupid i = -1 but it works! when i set to 0 it starts showing entries from 5th row up
while True:
i+=1
curious = input("Do you want to see five entries of raw data? Type 'yes' or 'no' \n")
if curious.lower() != 'yes':
break
else:
print(str(df.iloc[0+5*i:5+5*i, :8].to_json(orient = 'records',date_format = 'iso')).replace('},{', "\n\n").replace(",", "\n").replace("[{", "").replace("}]", "").replace('"', '').replace(":", ": "), "\n")
# Line 257 - i guess that is not very pythonic syntax but it works
# Line 257 - i dont like that iso date format but i didnt find any better solution
|
def show_entries_washington(df): # no info on gender and age for washington
i = -1
while True:
|
random_line_split
|
|
deps.rs
|
(
&self,
target_id: Identifier,
) -> Result<Vec<Target>> {
let graph = &self.graph;
let target_ix = *self
.id_to_ix_map
.get(&target_id)
.ok_or_else(|| UserError::NoSuchTarget(target_id))?;
let depth_map = util::generate_depth_map(graph, target_ix);
let obsolete_leaf_nodes =
util::find_obsolete_leaf_nodes(graph.graph())?;
let obsolete_targets =
util::find_obsolete_targets(graph.graph(), &obsolete_leaf_nodes);
util::get_target_sequence(graph.graph(), &depth_map, &obsolete_targets)
}
}
mod util {
use std::collections::{HashMap, VecDeque};
use super::*;
use daggy::petgraph;
use petgraph::prelude::{Direction, Graph};
pub(super) fn get_target_sequence(
graph: &Graph<Node, ()>,
depth_map: &HashMap<Nx, usize>,
obsolete_targets: &HashSet<Nx>,
) -> Result<Vec<Target>> {
// filter out targets which are not in the
// dependency graph of the chosen target
// and sort the targets left by depth in **decreasing** order
let mut obsolete_targets = obsolete_targets
.iter()
.filter(|ix| depth_map.contains_key(ix))
.copied()
.collect::<Vec<_>>();
obsolete_targets.sort_by_key(|ix| depth_map[ix]);
obsolete_targets.reverse();
obsolete_targets
.into_iter()
.map(|target_ix| match &graph[target_ix] {
Node::Target(target) => Ok(target.clone()),
Node::NoRule(_) => Err(Error::internal(line!(), file!())),
})
.collect::<Result<Vec<_>>>()
}
/// This function finds all nodes that have no dependencies -
/// both actual files (`NoRule` variant) and targets
/// (`Target` variant with no dependencies is assumed to depend
/// on other factors - time, environmental variables,
/// current directory etc.).
pub(super) fn find_obsolete_leaf_nodes(
graph: &Graph<Node, ()>,
) -> Result<HashSet<Nx>> {
graph
.externals(Direction::Outgoing) // get nodes with no outgoing edges
.filter_map(|node_ix| match &graph[node_ix] {
// TODO filter `requires_rebuilding`
Node::Target(_target) => Some(Ok(node_ix)),
Node::NoRule(identifier) => {
// TODO clean up this part
let previous_file_state = ();
let result = has_file_been_modified(
&identifier,
previous_file_state,
);
match result {
Ok(has_been_modified) =>
if has_been_modified {
Some(Ok(node_ix))
} else {
None
},
Err(err) => Some(Err(err)),
}
}
})
.collect::<Result<HashSet<_>>>()
}
pub(super) fn find_obsolete_targets(
graph: &Graph<Node, ()>,
obsolete_leaf_nodes: &HashSet<Nx>,
) -> HashSet<Nx> {
// reverse short circuiting bfs:
// skip the dependants of the targets
// that have already been marked as obsolete
let mut queue = VecDeque::<Nx>::new();
let mut obsolete_ixs = HashSet::<Nx>::new();
for leaf_ix in obsolete_leaf_nodes {
// no need to clear the queue since it gets drained
// in the while loop each time
match &graph[*leaf_ix] {
Node::Target(_) => queue.push_back(*leaf_ix),
Node::NoRule(_) => {
let direct_dependants =
graph.neighbors_directed(*leaf_ix, Direction::Incoming);
queue.extend(direct_dependants);
}
}
while let Some(target_ix) = queue.pop_front() {
let has_just_been_found = obsolete_ixs.insert(target_ix);
if has_just_been_found {
let dependants = graph
.neighbors_directed(target_ix, Direction::Incoming);
queue.extend(dependants);
}
}
}
obsolete_ixs
}
pub(super) fn
|
(
graph: &mut DependencyDag,
id_to_ix_map: &mut HashMap<Identifier, Nx>,
dependency_identifier: Identifier,
) {
id_to_ix_map
.entry(dependency_identifier.clone())
.or_insert_with(|| {
// `.add_node()` returns node's index
graph.add_node(Node::NoRule(dependency_identifier))
});
}
pub(super) fn add_target_node(
graph: &mut DependencyDag,
id_to_ix_map: &mut HashMap<Identifier, Nx>,
target: Target,
) -> Result<()> {
let identifier = target.identifier.clone();
let node_ix = graph.add_node(Node::Target(target));
let slot = id_to_ix_map.insert(identifier, node_ix);
match slot {
Some(_colliding_target_ix) =>
Err(UserError::DuplicateTarget.into()),
None => Ok(()),
}
}
pub(super) fn add_edges_to_deps(
graph: &mut DependencyDag,
id_to_ix_map: &HashMap<Identifier, Nx>,
target_ix: Nx,
) -> Result<()> {
let deps = match &graph[target_ix] {
Node::Target(target) => target.deps.clone(),
Node::NoRule(_) => return Ok(()), // no deps
};
deps.iter()
.map(|dep_id| {
id_to_ix_map
.get(dep_id)
.ok_or_else(|| Error::internal(line!(), file!()))
})
.map_item(|dep_ix| {
graph
.add_edge(target_ix, *dep_ix, ())
.map(|_| ())
.map_err(|_| UserError::DependencyCycle.into())
})
.map(|result| result.flatten())
.collect::<Result<_>>()
}
pub(super) fn has_file_been_modified(
_identifier: &Identifier,
_previous_state: FileState,
) -> Result<bool> {
Ok(true) // TODO for now it just rebuilds everything
}
pub(super) fn generate_depth_map<N, E>(
graph: &daggy::Dag<N, E>,
target_id: Nx,
) -> HashMap<Nx, usize> {
let mut depth_map: HashMap<Nx, usize> = HashMap::new();
let mut current_depth = 0;
let mut queue: VecDeque<Vec<_>> = VecDeque::new();
queue.push_front(vec![target_id]);
while let Some(level) = queue.pop_front() {
if level.is_empty() {
break;
}
let mut level_queue = vec![];
for current_node in level {
// update current node's depth
let _ = depth_map
.entry(current_node)
.and_modify(|depth| *depth = (*depth).max(current_depth))
.or_insert(current_depth);
let children =
graph.neighbors_directed(current_node, Direction::Outgoing);
level_queue.extend(children);
}
queue.push_back(level_queue);
current_depth += 1;
}
depth_map
}
}
#[cfg(test)]
mod test {
use super::*;
use daggy::petgraph::graph::node_index as n;
use pretty_assertions::assert_eq;
#[test]
fn test_get_task_sequence() {
// helper functions
let task = |cmd: &str| Task {
command: cmd.into(),
working_dir: None,
};
let target = |id: &str, deps: &[&str]| Target {
identifier: id.into(),
deps: deps.iter().map(|d| d.into()).collect(),
tasks: vec![task(id)],
working_dir: None,
};
let ix = |id: &str, map: &HashMap<_, _>| {
let p: &std::path::Path = id.as_ref();
map[p]
};
// the dependency graph:
//
// a1 a2'
// / / \
// / / \
// b1 b2 b3
// / /
// / /
// l1* l2
//
// a2 is the target (')
// l1 is marked as obsolete (*)
// b2's and a2's tasks must be executed (in that order)
// targets and their dependencies
#[rustfmt::skip]
let targets = vec![
target("a1", &["b1"]),
target("a2", &["b2", "b3"]),
target("b2", &["l1"]),
target("b3", &["l2"]),
];
let DependencyGraph {
graph,
id_to_ix_map: map,
} = DependencyGraph::construct(targets).unwrap();
// depth map
#[rustfmt::skip]
let depth_map = vec![
(ix("a2", &map), 0),
(ix("b2", &map), 1),
(ix("b3", &map), 1),
(ix("l1", &map), 2),
(ix("l2", &map), 2),
].into_iter().collect();
// nodes that have been marked as obsolete
// (in real code it is automated)
let obsolete_leaf_nodes = vec![ix
|
add_leaf_node
|
identifier_name
|
deps.rs
|
_nodes(graph.graph())?;
let obsolete_targets =
util::find_obsolete_targets(graph.graph(), &obsolete_leaf_nodes);
util::get_target_sequence(graph.graph(), &depth_map, &obsolete_targets)
}
}
mod util {
use std::collections::{HashMap, VecDeque};
use super::*;
use daggy::petgraph;
use petgraph::prelude::{Direction, Graph};
pub(super) fn get_target_sequence(
graph: &Graph<Node, ()>,
depth_map: &HashMap<Nx, usize>,
obsolete_targets: &HashSet<Nx>,
) -> Result<Vec<Target>> {
// filter out targets which are not in the
// dependency graph of the chosen target
// and sort the targets left by depth in **decreasing** order
let mut obsolete_targets = obsolete_targets
.iter()
.filter(|ix| depth_map.contains_key(ix))
.copied()
.collect::<Vec<_>>();
obsolete_targets.sort_by_key(|ix| depth_map[ix]);
obsolete_targets.reverse();
obsolete_targets
.into_iter()
.map(|target_ix| match &graph[target_ix] {
Node::Target(target) => Ok(target.clone()),
Node::NoRule(_) => Err(Error::internal(line!(), file!())),
})
.collect::<Result<Vec<_>>>()
}
/// This function finds all nodes that have no dependencies -
/// both actual files (`NoRule` variant) and targets
/// (`Target` variant with no dependencies is assumed to depend
/// on other factors - time, environmental variables,
/// current directory etc.).
pub(super) fn find_obsolete_leaf_nodes(
graph: &Graph<Node, ()>,
) -> Result<HashSet<Nx>> {
graph
.externals(Direction::Outgoing) // get nodes with no outgoing edges
.filter_map(|node_ix| match &graph[node_ix] {
// TODO filter `requires_rebuilding`
Node::Target(_target) => Some(Ok(node_ix)),
Node::NoRule(identifier) => {
// TODO clean up this part
let previous_file_state = ();
let result = has_file_been_modified(
&identifier,
previous_file_state,
);
match result {
Ok(has_been_modified) =>
if has_been_modified {
Some(Ok(node_ix))
} else {
None
},
Err(err) => Some(Err(err)),
}
}
})
.collect::<Result<HashSet<_>>>()
}
pub(super) fn find_obsolete_targets(
graph: &Graph<Node, ()>,
obsolete_leaf_nodes: &HashSet<Nx>,
) -> HashSet<Nx> {
// reverse short circuiting bfs:
// skip the dependants of the targets
// that have already been marked as obsolete
let mut queue = VecDeque::<Nx>::new();
let mut obsolete_ixs = HashSet::<Nx>::new();
for leaf_ix in obsolete_leaf_nodes {
// no need to clear the queue since it gets drained
// in the while loop each time
match &graph[*leaf_ix] {
Node::Target(_) => queue.push_back(*leaf_ix),
Node::NoRule(_) => {
let direct_dependants =
graph.neighbors_directed(*leaf_ix, Direction::Incoming);
queue.extend(direct_dependants);
}
}
while let Some(target_ix) = queue.pop_front() {
let has_just_been_found = obsolete_ixs.insert(target_ix);
if has_just_been_found {
let dependants = graph
.neighbors_directed(target_ix, Direction::Incoming);
queue.extend(dependants);
}
}
}
obsolete_ixs
}
pub(super) fn add_leaf_node(
graph: &mut DependencyDag,
id_to_ix_map: &mut HashMap<Identifier, Nx>,
dependency_identifier: Identifier,
) {
id_to_ix_map
.entry(dependency_identifier.clone())
.or_insert_with(|| {
// `.add_node()` returns node's index
graph.add_node(Node::NoRule(dependency_identifier))
});
}
pub(super) fn add_target_node(
graph: &mut DependencyDag,
id_to_ix_map: &mut HashMap<Identifier, Nx>,
target: Target,
) -> Result<()> {
let identifier = target.identifier.clone();
let node_ix = graph.add_node(Node::Target(target));
let slot = id_to_ix_map.insert(identifier, node_ix);
match slot {
Some(_colliding_target_ix) =>
Err(UserError::DuplicateTarget.into()),
None => Ok(()),
}
}
pub(super) fn add_edges_to_deps(
graph: &mut DependencyDag,
id_to_ix_map: &HashMap<Identifier, Nx>,
target_ix: Nx,
) -> Result<()> {
let deps = match &graph[target_ix] {
Node::Target(target) => target.deps.clone(),
Node::NoRule(_) => return Ok(()), // no deps
};
deps.iter()
.map(|dep_id| {
id_to_ix_map
.get(dep_id)
.ok_or_else(|| Error::internal(line!(), file!()))
})
.map_item(|dep_ix| {
graph
.add_edge(target_ix, *dep_ix, ())
.map(|_| ())
.map_err(|_| UserError::DependencyCycle.into())
})
.map(|result| result.flatten())
.collect::<Result<_>>()
}
pub(super) fn has_file_been_modified(
_identifier: &Identifier,
_previous_state: FileState,
) -> Result<bool> {
Ok(true) // TODO for now it just rebuilds everything
}
pub(super) fn generate_depth_map<N, E>(
graph: &daggy::Dag<N, E>,
target_id: Nx,
) -> HashMap<Nx, usize> {
let mut depth_map: HashMap<Nx, usize> = HashMap::new();
let mut current_depth = 0;
let mut queue: VecDeque<Vec<_>> = VecDeque::new();
queue.push_front(vec![target_id]);
while let Some(level) = queue.pop_front() {
if level.is_empty() {
break;
}
let mut level_queue = vec![];
for current_node in level {
// update current node's depth
let _ = depth_map
.entry(current_node)
.and_modify(|depth| *depth = (*depth).max(current_depth))
.or_insert(current_depth);
let children =
graph.neighbors_directed(current_node, Direction::Outgoing);
level_queue.extend(children);
}
queue.push_back(level_queue);
current_depth += 1;
}
depth_map
}
}
#[cfg(test)]
mod test {
use super::*;
use daggy::petgraph::graph::node_index as n;
use pretty_assertions::assert_eq;
#[test]
fn test_get_task_sequence() {
// helper functions
let task = |cmd: &str| Task {
command: cmd.into(),
working_dir: None,
};
let target = |id: &str, deps: &[&str]| Target {
identifier: id.into(),
deps: deps.iter().map(|d| d.into()).collect(),
tasks: vec![task(id)],
working_dir: None,
};
let ix = |id: &str, map: &HashMap<_, _>| {
let p: &std::path::Path = id.as_ref();
map[p]
};
// the dependency graph:
//
// a1 a2'
// / / \
// / / \
// b1 b2 b3
// / /
// / /
// l1* l2
//
// a2 is the target (')
// l1 is marked as obsolete (*)
// b2's and a2's tasks must be executed (in that order)
// targets and their dependencies
#[rustfmt::skip]
let targets = vec![
target("a1", &["b1"]),
target("a2", &["b2", "b3"]),
target("b2", &["l1"]),
target("b3", &["l2"]),
];
let DependencyGraph {
graph,
id_to_ix_map: map,
} = DependencyGraph::construct(targets).unwrap();
// depth map
#[rustfmt::skip]
let depth_map = vec![
(ix("a2", &map), 0),
(ix("b2", &map), 1),
(ix("b3", &map), 1),
(ix("l1", &map), 2),
(ix("l2", &map), 2),
].into_iter().collect();
// nodes that have been marked as obsolete
// (in real code it is automated)
let obsolete_leaf_nodes = vec![ix("l1", &map)].into_iter().collect();
// get the sequence of tasks that must be executed
// in specific order
let obsolete_targets =
util::find_obsolete_targets(graph.graph(), &obsolete_leaf_nodes);
let target_sequence = util::get_target_sequence(
graph.graph(),
&depth_map,
&obsolete_targets,
)
.unwrap();
let target_sequence = target_sequence
.into_iter()
|
random_line_split
|
||
deps.rs
|
(
&self,
target_id: Identifier,
) -> Result<Vec<Target>> {
let graph = &self.graph;
let target_ix = *self
.id_to_ix_map
.get(&target_id)
.ok_or_else(|| UserError::NoSuchTarget(target_id))?;
let depth_map = util::generate_depth_map(graph, target_ix);
let obsolete_leaf_nodes =
util::find_obsolete_leaf_nodes(graph.graph())?;
let obsolete_targets =
util::find_obsolete_targets(graph.graph(), &obsolete_leaf_nodes);
util::get_target_sequence(graph.graph(), &depth_map, &obsolete_targets)
}
}
mod util {
use std::collections::{HashMap, VecDeque};
use super::*;
use daggy::petgraph;
use petgraph::prelude::{Direction, Graph};
pub(super) fn get_target_sequence(
graph: &Graph<Node, ()>,
depth_map: &HashMap<Nx, usize>,
obsolete_targets: &HashSet<Nx>,
) -> Result<Vec<Target>> {
// filter out targets which are not in the
// dependency graph of the chosen target
// and sort the targets left by depth in **decreasing** order
let mut obsolete_targets = obsolete_targets
.iter()
.filter(|ix| depth_map.contains_key(ix))
.copied()
.collect::<Vec<_>>();
obsolete_targets.sort_by_key(|ix| depth_map[ix]);
obsolete_targets.reverse();
obsolete_targets
.into_iter()
.map(|target_ix| match &graph[target_ix] {
Node::Target(target) => Ok(target.clone()),
Node::NoRule(_) => Err(Error::internal(line!(), file!())),
})
.collect::<Result<Vec<_>>>()
}
/// This function finds all nodes that have no dependencies -
/// both actual files (`NoRule` variant) and targets
/// (`Target` variant with no dependencies is assumed to depend
/// on other factors - time, environmental variables,
/// current directory etc.).
pub(super) fn find_obsolete_leaf_nodes(
graph: &Graph<Node, ()>,
) -> Result<HashSet<Nx>> {
graph
.externals(Direction::Outgoing) // get nodes with no outgoing edges
.filter_map(|node_ix| match &graph[node_ix] {
// TODO filter `requires_rebuilding`
Node::Target(_target) => Some(Ok(node_ix)),
Node::NoRule(identifier) => {
// TODO clean up this part
let previous_file_state = ();
let result = has_file_been_modified(
&identifier,
previous_file_state,
);
match result {
Ok(has_been_modified) =>
if has_been_modified {
Some(Ok(node_ix))
} else {
None
},
Err(err) => Some(Err(err)),
}
}
})
.collect::<Result<HashSet<_>>>()
}
pub(super) fn find_obsolete_targets(
graph: &Graph<Node, ()>,
obsolete_leaf_nodes: &HashSet<Nx>,
) -> HashSet<Nx>
|
while let Some(target_ix) = queue.pop_front() {
let has_just_been_found = obsolete_ixs.insert(target_ix);
if has_just_been_found {
let dependants = graph
.neighbors_directed(target_ix, Direction::Incoming);
queue.extend(dependants);
}
}
}
obsolete_ixs
}
pub(super) fn add_leaf_node(
graph: &mut DependencyDag,
id_to_ix_map: &mut HashMap<Identifier, Nx>,
dependency_identifier: Identifier,
) {
id_to_ix_map
.entry(dependency_identifier.clone())
.or_insert_with(|| {
// `.add_node()` returns node's index
graph.add_node(Node::NoRule(dependency_identifier))
});
}
pub(super) fn add_target_node(
graph: &mut DependencyDag,
id_to_ix_map: &mut HashMap<Identifier, Nx>,
target: Target,
) -> Result<()> {
let identifier = target.identifier.clone();
let node_ix = graph.add_node(Node::Target(target));
let slot = id_to_ix_map.insert(identifier, node_ix);
match slot {
Some(_colliding_target_ix) =>
Err(UserError::DuplicateTarget.into()),
None => Ok(()),
}
}
pub(super) fn add_edges_to_deps(
graph: &mut DependencyDag,
id_to_ix_map: &HashMap<Identifier, Nx>,
target_ix: Nx,
) -> Result<()> {
let deps = match &graph[target_ix] {
Node::Target(target) => target.deps.clone(),
Node::NoRule(_) => return Ok(()), // no deps
};
deps.iter()
.map(|dep_id| {
id_to_ix_map
.get(dep_id)
.ok_or_else(|| Error::internal(line!(), file!()))
})
.map_item(|dep_ix| {
graph
.add_edge(target_ix, *dep_ix, ())
.map(|_| ())
.map_err(|_| UserError::DependencyCycle.into())
})
.map(|result| result.flatten())
.collect::<Result<_>>()
}
pub(super) fn has_file_been_modified(
_identifier: &Identifier,
_previous_state: FileState,
) -> Result<bool> {
Ok(true) // TODO for now it just rebuilds everything
}
pub(super) fn generate_depth_map<N, E>(
graph: &daggy::Dag<N, E>,
target_id: Nx,
) -> HashMap<Nx, usize> {
let mut depth_map: HashMap<Nx, usize> = HashMap::new();
let mut current_depth = 0;
let mut queue: VecDeque<Vec<_>> = VecDeque::new();
queue.push_front(vec![target_id]);
while let Some(level) = queue.pop_front() {
if level.is_empty() {
break;
}
let mut level_queue = vec![];
for current_node in level {
// update current node's depth
let _ = depth_map
.entry(current_node)
.and_modify(|depth| *depth = (*depth).max(current_depth))
.or_insert(current_depth);
let children =
graph.neighbors_directed(current_node, Direction::Outgoing);
level_queue.extend(children);
}
queue.push_back(level_queue);
current_depth += 1;
}
depth_map
}
}
#[cfg(test)]
mod test {
use super::*;
use daggy::petgraph::graph::node_index as n;
use pretty_assertions::assert_eq;
#[test]
fn test_get_task_sequence() {
// helper functions
let task = |cmd: &str| Task {
command: cmd.into(),
working_dir: None,
};
let target = |id: &str, deps: &[&str]| Target {
identifier: id.into(),
deps: deps.iter().map(|d| d.into()).collect(),
tasks: vec![task(id)],
working_dir: None,
};
let ix = |id: &str, map: &HashMap<_, _>| {
let p: &std::path::Path = id.as_ref();
map[p]
};
// the dependency graph:
//
// a1 a2'
// / / \
// / / \
// b1 b2 b3
// / /
// / /
// l1* l2
//
// a2 is the target (')
// l1 is marked as obsolete (*)
// b2's and a2's tasks must be executed (in that order)
// targets and their dependencies
#[rustfmt::skip]
let targets = vec![
target("a1", &["b1"]),
target("a2", &["b2", "b3"]),
target("b2", &["l1"]),
target("b3", &["l2"]),
];
let DependencyGraph {
graph,
id_to_ix_map: map,
} = DependencyGraph::construct(targets).unwrap();
// depth map
#[rustfmt::skip]
let depth_map = vec![
(ix("a2", &map), 0),
(ix("b2", &map), 1),
(ix("b3", &map), 1),
(ix("l1", &map), 2),
(ix("l2", &map), 2),
].into_iter().collect();
// nodes that have been marked as obsolete
// (in real code it is automated)
let obsolete_leaf_nodes = vec![ix
|
{
// reverse short circuiting bfs:
// skip the dependants of the targets
// that have already been marked as obsolete
let mut queue = VecDeque::<Nx>::new();
let mut obsolete_ixs = HashSet::<Nx>::new();
for leaf_ix in obsolete_leaf_nodes {
// no need to clear the queue since it gets drained
// in the while loop each time
match &graph[*leaf_ix] {
Node::Target(_) => queue.push_back(*leaf_ix),
Node::NoRule(_) => {
let direct_dependants =
graph.neighbors_directed(*leaf_ix, Direction::Incoming);
queue.extend(direct_dependants);
}
}
|
identifier_body
|
list.go
|
ly.Progress) (*PackageList, error) {
// empty reflist
if reflist == nil {
return NewPackageList(), nil
}
result := NewPackageListWithDuplicates(false, reflist.Len())
if progress != nil {
progress.InitBar(int64(reflist.Len()), false, aptly.BarGeneralBuildPackageList)
}
err := reflist.ForEach(func(key []byte) error {
p, err2 := collection.ByKey(key)
|
if err2 != nil {
return fmt.Errorf("unable to load package with key %s: %s", key, err2)
}
if progress != nil {
progress.AddBar(1)
}
return result.Add(p)
})
if progress != nil {
progress.ShutdownBar()
}
if err != nil {
return nil, err
}
return result, nil
}
// Has checks whether package is already in the list
func (l *PackageList) Has(p *Package) bool {
key := l.keyFunc(p)
_, ok := l.packages[key]
return ok
}
// Add appends package to package list, additionally checking for uniqueness
func (l *PackageList) Add(p *Package) error {
key := l.keyFunc(p)
existing, ok := l.packages[key]
if ok {
if !existing.Equals(p) {
return &PackageConflictError{fmt.Errorf("conflict in package %s", p)}
}
return nil
}
l.packages[key] = p
if l.indexed {
for _, provides := range p.Provides {
l.providesIndex[provides] = append(l.providesIndex[provides], p)
}
i := sort.Search(len(l.packagesIndex), func(j int) bool { return l.lessPackages(p, l.packagesIndex[j]) })
// insert p into l.packagesIndex in position i
l.packagesIndex = append(l.packagesIndex, nil)
copy(l.packagesIndex[i+1:], l.packagesIndex[i:])
l.packagesIndex[i] = p
}
return nil
}
// ForEach calls handler for each package in list
func (l *PackageList) ForEach(handler func(*Package) error) error {
var err error
for _, p := range l.packages {
err = handler(p)
if err != nil {
return err
}
}
return err
}
// ForEachIndexed calls handler for each package in list in indexed order
func (l *PackageList) ForEachIndexed(handler func(*Package) error) error {
if !l.indexed {
panic("list not indexed, can't iterate")
}
var err error
for _, p := range l.packagesIndex {
err = handler(p)
if err != nil {
return err
}
}
return err
}
// Len returns number of packages in the list
func (l *PackageList) Len() int {
return len(l.packages)
}
// Append adds content from one package list to another
func (l *PackageList) Append(pl *PackageList) error {
if l.indexed {
panic("Append not supported when indexed")
}
for k, p := range pl.packages {
existing, ok := l.packages[k]
if ok {
if !existing.Equals(p) {
return fmt.Errorf("conflict in package %s", p)
}
} else {
l.packages[k] = p
}
}
return nil
}
// Remove removes package from the list, and updates index when required
func (l *PackageList) Remove(p *Package) {
delete(l.packages, l.keyFunc(p))
if l.indexed {
for _, provides := range p.Provides {
for i, pkg := range l.providesIndex[provides] {
if pkg.Equals(p) {
// remove l.ProvidesIndex[provides][i] w/o preserving order
l.providesIndex[provides][len(l.providesIndex[provides])-1], l.providesIndex[provides][i], l.providesIndex[provides] =
nil, l.providesIndex[provides][len(l.providesIndex[provides])-1], l.providesIndex[provides][:len(l.providesIndex[provides])-1]
break
}
}
}
i := sort.Search(len(l.packagesIndex), func(j int) bool { return l.packagesIndex[j].Name >= p.Name })
for i < len(l.packagesIndex) && l.packagesIndex[i].Name == p.Name {
if l.packagesIndex[i].Equals(p) {
// remove l.packagesIndex[i] preserving order
copy(l.packagesIndex[i:], l.packagesIndex[i+1:])
l.packagesIndex[len(l.packagesIndex)-1] = nil
l.packagesIndex = l.packagesIndex[:len(l.packagesIndex)-1]
break
}
i++
}
}
}
// Architectures returns list of architectures present in packages and flag if source packages are present.
//
// If includeSource is true, meta-architecture "source" would be present in the list
func (l *PackageList) Architectures(includeSource bool) (result []string) {
result = make([]string, 0, 10)
for _, pkg := range l.packages {
if pkg.Architecture != ArchitectureAll && (pkg.Architecture != ArchitectureSource || includeSource) && !utils.StrSliceHasItem(result, pkg.Architecture) {
result = append(result, pkg.Architecture)
}
}
return
}
// Strings builds list of strings with package keys
func (l *PackageList) Strings() []string {
result := make([]string, l.Len())
i := 0
for _, p := range l.packages {
result[i] = string(p.Key(""))
i++
}
return result
}
// FullNames builds a list of package {name}_{version}_{arch}
func (l *PackageList) FullNames() []string {
result := make([]string, l.Len())
i := 0
for _, p := range l.packages {
result[i] = p.GetFullName()
i++
}
return result
}
// depSliceDeduplicate removes dups in slice of Dependencies
func depSliceDeduplicate(s []Dependency) []Dependency {
l := len(s)
if l < 2 {
return s
}
if l == 2 {
if s[0] == s[1] {
return s[0:1]
}
return s
}
found := make(map[string]bool, l)
j := 0
for i, x := range s {
h := x.Hash()
if !found[h] {
found[h] = true
s[j] = s[i]
j++
}
}
return s[:j]
}
// VerifyDependencies looks for missing dependencies in package list.
//
// Analysis would be performed for each architecture, in specified sources
func (l *PackageList) VerifyDependencies(options int, architectures []string, sources *PackageList, progress aptly.Progress) ([]Dependency, error) {
l.PrepareIndex()
missing := make([]Dependency, 0, 128)
if progress != nil {
progress.InitBar(int64(l.Len())*int64(len(architectures)), false, aptly.BarGeneralVerifyDependencies)
}
for _, arch := range architectures {
cache := make(map[string]bool, 2048)
for _, p := range l.packagesIndex {
if progress != nil {
progress.AddBar(1)
}
if !p.MatchesArchitecture(arch) {
continue
}
for _, dep := range p.GetDependencies(options) {
variants, err := ParseDependencyVariants(dep)
if err != nil {
return nil, fmt.Errorf("unable to process package %s: %s", p, err)
}
variants = depSliceDeduplicate(variants)
variantsMissing := make([]Dependency, 0, len(variants))
for _, dep := range variants {
if dep.Architecture == "" {
dep.Architecture = arch
}
hash := dep.Hash()
satisfied, ok := cache[hash]
if !ok {
satisfied = sources.Search(dep, false) != nil
cache[hash] = satisfied
}
if !satisfied && !ok {
variantsMissing = append(variantsMissing, dep)
}
if satisfied && options&DepFollowAllVariants == 0 {
variantsMissing = nil
break
}
}
missing = append(missing, variantsMissing...)
}
}
}
if progress != nil {
progress.ShutdownBar()
}
if options&DepVerboseResolve == DepVerboseResolve && progress != nil {
missingStr := make([]string, len(missing))
for i := range missing {
missingStr[i] = missing[i].String()
}
progress.ColoredPrintf("@{y}Missing dependencies:@| %s", strings.Join(missingStr, ", "))
}
return missing, nil
}
// Swap swaps two packages in index
func (l *PackageList) Swap(i, j int) {
l.packagesIndex[i], l.packagesIndex[j] = l.packagesIndex[j], l.packagesIndex[i]
}
func (l *PackageList) less
|
random_line_split
|
|
list.go
|
.Progress) (*PackageList, error) {
// empty reflist
if reflist == nil {
return NewPackageList(), nil
}
result := NewPackageListWithDuplicates(false, reflist.Len())
if progress != nil {
progress.InitBar(int64(reflist.Len()), false, aptly.BarGeneralBuildPackageList)
}
err := reflist.ForEach(func(key []byte) error {
p, err2 := collection.ByKey(key)
if err2 != nil {
return fmt.Errorf("unable to load package with key %s: %s", key, err2)
}
if progress != nil {
progress.AddBar(1)
}
return result.Add(p)
})
if progress != nil {
progress.ShutdownBar()
}
if err != nil {
return nil, err
}
return result, nil
}
// Has checks whether package is already in the list
func (l *PackageList) Has(p *Package) bool {
key := l.keyFunc(p)
_, ok := l.packages[key]
return ok
}
// Add appends package to package list, additionally checking for uniqueness
func (l *PackageList) Add(p *Package) error
|
copy(l.packagesIndex[i+1:], l.packagesIndex[i:])
l.packagesIndex[i] = p
}
return nil
}
// ForEach calls handler for each package in list
func (l *PackageList) ForEach(handler func(*Package) error) error {
var err error
for _, p := range l.packages {
err = handler(p)
if err != nil {
return err
}
}
return err
}
// ForEachIndexed calls handler for each package in list in indexed order
func (l *PackageList) ForEachIndexed(handler func(*Package) error) error {
if !l.indexed {
panic("list not indexed, can't iterate")
}
var err error
for _, p := range l.packagesIndex {
err = handler(p)
if err != nil {
return err
}
}
return err
}
// Len returns number of packages in the list
func (l *PackageList) Len() int {
return len(l.packages)
}
// Append adds content from one package list to another
func (l *PackageList) Append(pl *PackageList) error {
if l.indexed {
panic("Append not supported when indexed")
}
for k, p := range pl.packages {
existing, ok := l.packages[k]
if ok {
if !existing.Equals(p) {
return fmt.Errorf("conflict in package %s", p)
}
} else {
l.packages[k] = p
}
}
return nil
}
// Remove removes package from the list, and updates index when required
func (l *PackageList) Remove(p *Package) {
delete(l.packages, l.keyFunc(p))
if l.indexed {
for _, provides := range p.Provides {
for i, pkg := range l.providesIndex[provides] {
if pkg.Equals(p) {
// remove l.ProvidesIndex[provides][i] w/o preserving order
l.providesIndex[provides][len(l.providesIndex[provides])-1], l.providesIndex[provides][i], l.providesIndex[provides] =
nil, l.providesIndex[provides][len(l.providesIndex[provides])-1], l.providesIndex[provides][:len(l.providesIndex[provides])-1]
break
}
}
}
i := sort.Search(len(l.packagesIndex), func(j int) bool { return l.packagesIndex[j].Name >= p.Name })
for i < len(l.packagesIndex) && l.packagesIndex[i].Name == p.Name {
if l.packagesIndex[i].Equals(p) {
// remove l.packagesIndex[i] preserving order
copy(l.packagesIndex[i:], l.packagesIndex[i+1:])
l.packagesIndex[len(l.packagesIndex)-1] = nil
l.packagesIndex = l.packagesIndex[:len(l.packagesIndex)-1]
break
}
i++
}
}
}
// Architectures returns list of architectures present in packages and flag if source packages are present.
//
// If includeSource is true, meta-architecture "source" would be present in the list
func (l *PackageList) Architectures(includeSource bool) (result []string) {
result = make([]string, 0, 10)
for _, pkg := range l.packages {
if pkg.Architecture != ArchitectureAll && (pkg.Architecture != ArchitectureSource || includeSource) && !utils.StrSliceHasItem(result, pkg.Architecture) {
result = append(result, pkg.Architecture)
}
}
return
}
// Strings builds list of strings with package keys
func (l *PackageList) Strings() []string {
result := make([]string, l.Len())
i := 0
for _, p := range l.packages {
result[i] = string(p.Key(""))
i++
}
return result
}
// FullNames builds a list of package {name}_{version}_{arch}
func (l *PackageList) FullNames() []string {
result := make([]string, l.Len())
i := 0
for _, p := range l.packages {
result[i] = p.GetFullName()
i++
}
return result
}
// depSliceDeduplicate removes dups in slice of Dependencies
func depSliceDeduplicate(s []Dependency) []Dependency {
l := len(s)
if l < 2 {
return s
}
if l == 2 {
if s[0] == s[1] {
return s[0:1]
}
return s
}
found := make(map[string]bool, l)
j := 0
for i, x := range s {
h := x.Hash()
if !found[h] {
found[h] = true
s[j] = s[i]
j++
}
}
return s[:j]
}
// VerifyDependencies looks for missing dependencies in package list.
//
// Analysis would be performed for each architecture, in specified sources
func (l *PackageList) VerifyDependencies(options int, architectures []string, sources *PackageList, progress aptly.Progress) ([]Dependency, error) {
l.PrepareIndex()
missing := make([]Dependency, 0, 128)
if progress != nil {
progress.InitBar(int64(l.Len())*int64(len(architectures)), false, aptly.BarGeneralVerifyDependencies)
}
for _, arch := range architectures {
cache := make(map[string]bool, 2048)
for _, p := range l.packagesIndex {
if progress != nil {
progress.AddBar(1)
}
if !p.MatchesArchitecture(arch) {
continue
}
for _, dep := range p.GetDependencies(options) {
variants, err := ParseDependencyVariants(dep)
if err != nil {
return nil, fmt.Errorf("unable to process package %s: %s", p, err)
}
variants = depSliceDeduplicate(variants)
variantsMissing := make([]Dependency, 0, len(variants))
for _, dep := range variants {
if dep.Architecture == "" {
dep.Architecture = arch
}
hash := dep.Hash()
satisfied, ok := cache[hash]
if !ok {
satisfied = sources.Search(dep, false) != nil
cache[hash] = satisfied
}
if !satisfied && !ok {
variantsMissing = append(variantsMissing, dep)
}
if satisfied && options&DepFollowAllVariants == 0 {
variantsMissing = nil
break
}
}
missing = append(missing, variantsMissing...)
}
}
}
if progress != nil {
progress.ShutdownBar()
}
if options&DepVerboseResolve == DepVerboseResolve && progress != nil {
missingStr := make([]string, len(missing))
for i := range missing {
missingStr[i] = missing[i].String()
}
progress.ColoredPrintf("@{y}Missing dependencies:@| %s", strings.Join(missingStr, ", "))
}
return missing, nil
}
// Swap swaps two packages in index
func (l *PackageList) Swap(i, j int) {
l.packagesIndex[i], l.packagesIndex[j] = l.packagesIndex[j], l.packagesIndex[i]
}
func (l *PackageList) less
|
{
key := l.keyFunc(p)
existing, ok := l.packages[key]
if ok {
if !existing.Equals(p) {
return &PackageConflictError{fmt.Errorf("conflict in package %s", p)}
}
return nil
}
l.packages[key] = p
if l.indexed {
for _, provides := range p.Provides {
l.providesIndex[provides] = append(l.providesIndex[provides], p)
}
i := sort.Search(len(l.packagesIndex), func(j int) bool { return l.lessPackages(p, l.packagesIndex[j]) })
// insert p into l.packagesIndex in position i
l.packagesIndex = append(l.packagesIndex, nil)
|
identifier_body
|
list.go
|
}
i := sort.Search(len(l.packagesIndex), func(j int) bool { return l.lessPackages(p, l.packagesIndex[j]) })
// insert p into l.packagesIndex in position i
l.packagesIndex = append(l.packagesIndex, nil)
copy(l.packagesIndex[i+1:], l.packagesIndex[i:])
l.packagesIndex[i] = p
}
return nil
}
// ForEach calls handler for each package in list
func (l *PackageList) ForEach(handler func(*Package) error) error {
var err error
for _, p := range l.packages {
err = handler(p)
if err != nil {
return err
}
}
return err
}
// ForEachIndexed calls handler for each package in list in indexed order
func (l *PackageList) ForEachIndexed(handler func(*Package) error) error {
if !l.indexed {
panic("list not indexed, can't iterate")
}
var err error
for _, p := range l.packagesIndex {
err = handler(p)
if err != nil {
return err
}
}
return err
}
// Len returns number of packages in the list
func (l *PackageList) Len() int {
return len(l.packages)
}
// Append adds content from one package list to another
func (l *PackageList) Append(pl *PackageList) error {
if l.indexed {
panic("Append not supported when indexed")
}
for k, p := range pl.packages {
existing, ok := l.packages[k]
if ok {
if !existing.Equals(p) {
return fmt.Errorf("conflict in package %s", p)
}
} else {
l.packages[k] = p
}
}
return nil
}
// Remove removes package from the list, and updates index when required
func (l *PackageList) Remove(p *Package) {
delete(l.packages, l.keyFunc(p))
if l.indexed {
for _, provides := range p.Provides {
for i, pkg := range l.providesIndex[provides] {
if pkg.Equals(p) {
// remove l.ProvidesIndex[provides][i] w/o preserving order
l.providesIndex[provides][len(l.providesIndex[provides])-1], l.providesIndex[provides][i], l.providesIndex[provides] =
nil, l.providesIndex[provides][len(l.providesIndex[provides])-1], l.providesIndex[provides][:len(l.providesIndex[provides])-1]
break
}
}
}
i := sort.Search(len(l.packagesIndex), func(j int) bool { return l.packagesIndex[j].Name >= p.Name })
for i < len(l.packagesIndex) && l.packagesIndex[i].Name == p.Name {
if l.packagesIndex[i].Equals(p) {
// remove l.packagesIndex[i] preserving order
copy(l.packagesIndex[i:], l.packagesIndex[i+1:])
l.packagesIndex[len(l.packagesIndex)-1] = nil
l.packagesIndex = l.packagesIndex[:len(l.packagesIndex)-1]
break
}
i++
}
}
}
// Architectures returns list of architectures present in packages and flag if source packages are present.
//
// If includeSource is true, meta-architecture "source" would be present in the list
func (l *PackageList) Architectures(includeSource bool) (result []string) {
result = make([]string, 0, 10)
for _, pkg := range l.packages {
if pkg.Architecture != ArchitectureAll && (pkg.Architecture != ArchitectureSource || includeSource) && !utils.StrSliceHasItem(result, pkg.Architecture) {
result = append(result, pkg.Architecture)
}
}
return
}
// Strings builds list of strings with package keys
func (l *PackageList) Strings() []string {
result := make([]string, l.Len())
i := 0
for _, p := range l.packages {
result[i] = string(p.Key(""))
i++
}
return result
}
// FullNames builds a list of package {name}_{version}_{arch}
func (l *PackageList) FullNames() []string {
result := make([]string, l.Len())
i := 0
for _, p := range l.packages {
result[i] = p.GetFullName()
i++
}
return result
}
// depSliceDeduplicate removes dups in slice of Dependencies
func depSliceDeduplicate(s []Dependency) []Dependency {
l := len(s)
if l < 2 {
return s
}
if l == 2 {
if s[0] == s[1] {
return s[0:1]
}
return s
}
found := make(map[string]bool, l)
j := 0
for i, x := range s {
h := x.Hash()
if !found[h] {
found[h] = true
s[j] = s[i]
j++
}
}
return s[:j]
}
// VerifyDependencies looks for missing dependencies in package list.
//
// Analysis would be performed for each architecture, in specified sources
func (l *PackageList) VerifyDependencies(options int, architectures []string, sources *PackageList, progress aptly.Progress) ([]Dependency, error) {
l.PrepareIndex()
missing := make([]Dependency, 0, 128)
if progress != nil {
progress.InitBar(int64(l.Len())*int64(len(architectures)), false, aptly.BarGeneralVerifyDependencies)
}
for _, arch := range architectures {
cache := make(map[string]bool, 2048)
for _, p := range l.packagesIndex {
if progress != nil {
progress.AddBar(1)
}
if !p.MatchesArchitecture(arch) {
continue
}
for _, dep := range p.GetDependencies(options) {
variants, err := ParseDependencyVariants(dep)
if err != nil {
return nil, fmt.Errorf("unable to process package %s: %s", p, err)
}
variants = depSliceDeduplicate(variants)
variantsMissing := make([]Dependency, 0, len(variants))
for _, dep := range variants {
if dep.Architecture == "" {
dep.Architecture = arch
}
hash := dep.Hash()
satisfied, ok := cache[hash]
if !ok {
satisfied = sources.Search(dep, false) != nil
cache[hash] = satisfied
}
if !satisfied && !ok {
variantsMissing = append(variantsMissing, dep)
}
if satisfied && options&DepFollowAllVariants == 0 {
variantsMissing = nil
break
}
}
missing = append(missing, variantsMissing...)
}
}
}
if progress != nil {
progress.ShutdownBar()
}
if options&DepVerboseResolve == DepVerboseResolve && progress != nil {
missingStr := make([]string, len(missing))
for i := range missing {
missingStr[i] = missing[i].String()
}
progress.ColoredPrintf("@{y}Missing dependencies:@| %s", strings.Join(missingStr, ", "))
}
return missing, nil
}
// Swap swaps two packages in index
func (l *PackageList) Swap(i, j int) {
l.packagesIndex[i], l.packagesIndex[j] = l.packagesIndex[j], l.packagesIndex[i]
}
func (l *PackageList) lessPackages(iPkg, jPkg *Package) bool {
if iPkg.Name == jPkg.Name {
cmp := CompareVersions(iPkg.Version, jPkg.Version)
if cmp == 0 {
return iPkg.Architecture < jPkg.Architecture
}
return cmp == 1
}
return iPkg.Name < jPkg.Name
}
// Less compares two packages by name (lexographical) and version (latest to oldest)
func (l *PackageList) Less(i, j int) bool {
return l.lessPackages(l.packagesIndex[i], l.packagesIndex[j])
}
// PrepareIndex prepares list for indexing
func (l *PackageList) PrepareIndex() {
if l.indexed {
return
}
l.packagesIndex = make([]*Package, l.Len())
l.providesIndex = make(map[string][]*Package, 128)
i := 0
for _, p := range l.packages {
l.packagesIndex[i] = p
i++
for _, provides := range p.Provides {
l.providesIndex[provides] = append(l.providesIndex[provides], p)
}
}
sort.Sort(l)
l.indexed = true
}
// Scan searches package index using full scan
func (l *PackageList) Scan(q PackageQuery) (result *PackageList) {
result = NewPackageListWithDuplicates(l.duplicatesAllowed, 0)
for _, pkg := range l.packages {
if q.Matches(pkg) {
result.Add(pkg)
}
}
return
}
// SearchSupported returns true for PackageList
func (l *PackageList)
|
SearchSupported
|
identifier_name
|
|
list.go
|
.Progress) (*PackageList, error) {
// empty reflist
if reflist == nil {
return NewPackageList(), nil
}
result := NewPackageListWithDuplicates(false, reflist.Len())
if progress != nil {
progress.InitBar(int64(reflist.Len()), false, aptly.BarGeneralBuildPackageList)
}
err := reflist.ForEach(func(key []byte) error {
p, err2 := collection.ByKey(key)
if err2 != nil
|
if progress != nil {
progress.AddBar(1)
}
return result.Add(p)
})
if progress != nil {
progress.ShutdownBar()
}
if err != nil {
return nil, err
}
return result, nil
}
// Has checks whether package is already in the list
func (l *PackageList) Has(p *Package) bool {
key := l.keyFunc(p)
_, ok := l.packages[key]
return ok
}
// Add appends package to package list, additionally checking for uniqueness
func (l *PackageList) Add(p *Package) error {
key := l.keyFunc(p)
existing, ok := l.packages[key]
if ok {
if !existing.Equals(p) {
return &PackageConflictError{fmt.Errorf("conflict in package %s", p)}
}
return nil
}
l.packages[key] = p
if l.indexed {
for _, provides := range p.Provides {
l.providesIndex[provides] = append(l.providesIndex[provides], p)
}
i := sort.Search(len(l.packagesIndex), func(j int) bool { return l.lessPackages(p, l.packagesIndex[j]) })
// insert p into l.packagesIndex in position i
l.packagesIndex = append(l.packagesIndex, nil)
copy(l.packagesIndex[i+1:], l.packagesIndex[i:])
l.packagesIndex[i] = p
}
return nil
}
// ForEach calls handler for each package in list
func (l *PackageList) ForEach(handler func(*Package) error) error {
var err error
for _, p := range l.packages {
err = handler(p)
if err != nil {
return err
}
}
return err
}
// ForEachIndexed calls handler for each package in list in indexed order
func (l *PackageList) ForEachIndexed(handler func(*Package) error) error {
if !l.indexed {
panic("list not indexed, can't iterate")
}
var err error
for _, p := range l.packagesIndex {
err = handler(p)
if err != nil {
return err
}
}
return err
}
// Len returns number of packages in the list
func (l *PackageList) Len() int {
return len(l.packages)
}
// Append adds content from one package list to another
func (l *PackageList) Append(pl *PackageList) error {
if l.indexed {
panic("Append not supported when indexed")
}
for k, p := range pl.packages {
existing, ok := l.packages[k]
if ok {
if !existing.Equals(p) {
return fmt.Errorf("conflict in package %s", p)
}
} else {
l.packages[k] = p
}
}
return nil
}
// Remove removes package from the list, and updates index when required
func (l *PackageList) Remove(p *Package) {
delete(l.packages, l.keyFunc(p))
if l.indexed {
for _, provides := range p.Provides {
for i, pkg := range l.providesIndex[provides] {
if pkg.Equals(p) {
// remove l.ProvidesIndex[provides][i] w/o preserving order
l.providesIndex[provides][len(l.providesIndex[provides])-1], l.providesIndex[provides][i], l.providesIndex[provides] =
nil, l.providesIndex[provides][len(l.providesIndex[provides])-1], l.providesIndex[provides][:len(l.providesIndex[provides])-1]
break
}
}
}
i := sort.Search(len(l.packagesIndex), func(j int) bool { return l.packagesIndex[j].Name >= p.Name })
for i < len(l.packagesIndex) && l.packagesIndex[i].Name == p.Name {
if l.packagesIndex[i].Equals(p) {
// remove l.packagesIndex[i] preserving order
copy(l.packagesIndex[i:], l.packagesIndex[i+1:])
l.packagesIndex[len(l.packagesIndex)-1] = nil
l.packagesIndex = l.packagesIndex[:len(l.packagesIndex)-1]
break
}
i++
}
}
}
// Architectures returns list of architectures present in packages and flag if source packages are present.
//
// If includeSource is true, meta-architecture "source" would be present in the list
func (l *PackageList) Architectures(includeSource bool) (result []string) {
result = make([]string, 0, 10)
for _, pkg := range l.packages {
if pkg.Architecture != ArchitectureAll && (pkg.Architecture != ArchitectureSource || includeSource) && !utils.StrSliceHasItem(result, pkg.Architecture) {
result = append(result, pkg.Architecture)
}
}
return
}
// Strings builds list of strings with package keys
func (l *PackageList) Strings() []string {
result := make([]string, l.Len())
i := 0
for _, p := range l.packages {
result[i] = string(p.Key(""))
i++
}
return result
}
// FullNames builds a list of package {name}_{version}_{arch}
func (l *PackageList) FullNames() []string {
result := make([]string, l.Len())
i := 0
for _, p := range l.packages {
result[i] = p.GetFullName()
i++
}
return result
}
// depSliceDeduplicate removes dups in slice of Dependencies
func depSliceDeduplicate(s []Dependency) []Dependency {
l := len(s)
if l < 2 {
return s
}
if l == 2 {
if s[0] == s[1] {
return s[0:1]
}
return s
}
found := make(map[string]bool, l)
j := 0
for i, x := range s {
h := x.Hash()
if !found[h] {
found[h] = true
s[j] = s[i]
j++
}
}
return s[:j]
}
// VerifyDependencies looks for missing dependencies in package list.
//
// Analysis would be performed for each architecture, in specified sources
func (l *PackageList) VerifyDependencies(options int, architectures []string, sources *PackageList, progress aptly.Progress) ([]Dependency, error) {
l.PrepareIndex()
missing := make([]Dependency, 0, 128)
if progress != nil {
progress.InitBar(int64(l.Len())*int64(len(architectures)), false, aptly.BarGeneralVerifyDependencies)
}
for _, arch := range architectures {
cache := make(map[string]bool, 2048)
for _, p := range l.packagesIndex {
if progress != nil {
progress.AddBar(1)
}
if !p.MatchesArchitecture(arch) {
continue
}
for _, dep := range p.GetDependencies(options) {
variants, err := ParseDependencyVariants(dep)
if err != nil {
return nil, fmt.Errorf("unable to process package %s: %s", p, err)
}
variants = depSliceDeduplicate(variants)
variantsMissing := make([]Dependency, 0, len(variants))
for _, dep := range variants {
if dep.Architecture == "" {
dep.Architecture = arch
}
hash := dep.Hash()
satisfied, ok := cache[hash]
if !ok {
satisfied = sources.Search(dep, false) != nil
cache[hash] = satisfied
}
if !satisfied && !ok {
variantsMissing = append(variantsMissing, dep)
}
if satisfied && options&DepFollowAllVariants == 0 {
variantsMissing = nil
break
}
}
missing = append(missing, variantsMissing...)
}
}
}
if progress != nil {
progress.ShutdownBar()
}
if options&DepVerboseResolve == DepVerboseResolve && progress != nil {
missingStr := make([]string, len(missing))
for i := range missing {
missingStr[i] = missing[i].String()
}
progress.ColoredPrintf("@{y}Missing dependencies:@| %s", strings.Join(missingStr, ", "))
}
return missing, nil
}
// Swap swaps two packages in index
func (l *PackageList) Swap(i, j int) {
l.packagesIndex[i], l.packagesIndex[j] = l.packagesIndex[j], l.packagesIndex[i]
}
func (l *PackageList)
|
{
return fmt.Errorf("unable to load package with key %s: %s", key, err2)
}
|
conditional_block
|
elasticsearch.py
|
) -> CapacityRequirement:
"""Estimate the capacity required for one zone given a regional desire
The input desires should be the **regional** desire, and this function will
return the zonal capacity requirement
"""
# Keep half of the cores free for background work (merging mostly)
needed_cores = math.ceil(sqrt_staffed_cores(desires) * 1.5)
# Keep half of the bandwidth available for backup
needed_network_mbps = simple_network_mbps(desires) * 2
needed_disk = math.ceil(
(1.0 / desires.data_shape.estimated_compression_ratio.mid)
* desires.data_shape.estimated_state_size_gib.mid
* copies_per_region,
)
# Rough estimate of how many instances we would need just for the the CPU
# Note that this is a lower bound, we might end up with more.
needed_cores = math.ceil(
max(1, needed_cores // (instance.cpu_ghz / desires.core_reference_ghz))
)
|
# Generally speaking we want fewer than some number of reads per second
# hitting disk per instance. If we don't have many reads we don't need to
# hold much data in memory.
instance_rps = max(1, reads_per_second // rough_count)
disk_rps = instance_rps * _es_io_per_read(max(1, needed_disk // rough_count))
rps_working_set = min(1.0, disk_rps / max_rps_to_disk)
# If disk RPS will be smaller than our target because there are no
# reads, we don't need to hold as much data in memory
needed_memory = min(working_set, rps_working_set) * needed_disk
# Now convert to per zone
needed_cores = needed_cores // zones_per_region
needed_disk = needed_disk // zones_per_region
needed_memory = int(needed_memory // zones_per_region)
logger.debug(
"Need (cpu, mem, disk, working) = (%s, %s, %s, %f)",
needed_cores,
needed_memory,
needed_disk,
working_set,
)
return CapacityRequirement(
requirement_type="elasticsearch-data-zonal",
core_reference_ghz=desires.core_reference_ghz,
cpu_cores=certain_int(needed_cores),
mem_gib=certain_float(needed_memory),
disk_gib=certain_float(needed_disk),
network_mbps=certain_float(needed_network_mbps),
context={
"working_set": min(working_set, rps_working_set),
"rps_working_set": rps_working_set,
"disk_slo_working_set": working_set,
"replication_factor": copies_per_region,
"compression_ratio": round(
1.0 / desires.data_shape.estimated_compression_ratio.mid, 2
),
"read_per_second": reads_per_second,
},
)
def _upsert_params(cluster, params):
if cluster.cluster_params:
cluster.cluster_params.update(params)
else:
cluster.cluster_params = params
# pylint: disable=too-many-locals
def _estimate_elasticsearch_cluster_zonal(
instance: Instance,
drive: Drive,
desires: CapacityDesires,
zones_per_region: int = 3,
copies_per_region: int = 3,
max_local_disk_gib: int = 4096,
max_regional_size: int = 240,
max_rps_to_disk: int = 500,
) -> Optional[CapacityPlan]:
# Netflix Elasticsearch doesn't like to deploy on really small instances
if instance.cpu < 2 or instance.ram_gib < 14:
return None
# (FIXME): Need elasticsearch input
# Right now Elasticsearch doesn't deploy to cloud drives, just adding this
# here and leaving the capability to handle cloud drives for the future
if instance.drive is None:
return None
rps = desires.query_pattern.estimated_read_per_second.mid // zones_per_region
# Based on the disk latency and the read latency SLOs we adjust our
# working set to keep more or less data in RAM. Faster drives need
# less fronting RAM.
ws_drive = instance.drive or drive
working_set = working_set_from_drive_and_slo(
drive_read_latency_dist=dist_for_interval(ws_drive.read_io_latency_ms),
read_slo_latency_dist=dist_for_interval(
desires.query_pattern.read_latency_slo_ms
),
estimated_working_set=desires.data_shape.estimated_working_set_percent,
# Elasticsearch has looser latency SLOs, target the 90th percentile of disk
# latency to keep in RAM.
target_percentile=0.90,
).mid
requirement = _estimate_elasticsearch_requirement(
instance=instance,
desires=desires,
working_set=working_set,
reads_per_second=rps,
zones_per_region=zones_per_region,
copies_per_region=copies_per_region,
max_rps_to_disk=max_rps_to_disk,
)
base_mem = (
desires.data_shape.reserved_instance_app_mem_gib
+ desires.data_shape.reserved_instance_system_mem_gib
)
cluster = compute_stateful_zone(
instance=instance,
drive=drive,
needed_cores=int(requirement.cpu_cores.mid),
needed_disk_gib=int(requirement.disk_gib.mid),
needed_memory_gib=int(requirement.mem_gib.mid),
needed_network_mbps=requirement.network_mbps.mid,
# Assume that by provisioning enough memory we'll get
# a 90% hit rate, but take into account the reads per read
# from the per node dataset using leveled compaction
# FIXME: I feel like this can be improved
required_disk_ios=lambda x: _es_io_per_read(x) * math.ceil(0.1 * rps),
# Elasticsearch requires ephemeral disks to be % full because tiered
# merging can make progress as long as there is some headroom
required_disk_space=lambda x: x * 1.4,
max_local_disk_gib=max_local_disk_gib,
# elasticsearch clusters can autobalance via shard placement
cluster_size=lambda x: x,
min_count=1,
# Sidecars/System takes away memory from elasticsearch
# Elasticsearch uses half of available system max of 32 for compressed
# oops
reserve_memory=lambda x: base_mem + max(32, x / 2),
core_reference_ghz=requirement.core_reference_ghz,
)
# Communicate to the actual provision that if we want reduced RF
params = {"elasticsearch.copies": copies_per_region}
_upsert_params(cluster, params)
# elasticsearch clusters generally should try to stay under some total number
# of nodes. Orgs do this for all kinds of reasons such as
# * Security group limits. Since you must have < 500 rules if you're
# ingressing public ips)
# * Maintenance. If your restart script does one node at a time you want
# smaller clusters so your restarts don't take months.
# * NxN network issues. Sometimes smaller clusters of bigger nodes
# are better for network propagation
if cluster.count > (max_regional_size // zones_per_region):
return None
ec2_cost = zones_per_region * cluster.annual_cost
cluster.cluster_type = "elasticsearch-data"
clusters = Clusters(
total_annual_cost=round(Decimal(ec2_cost), 2),
zonal=[cluster] * zones_per_region,
regional=list(),
)
return CapacityPlan(
requirements=Requirements(zonal=[requirement] * zones_per_region),
candidate_clusters=clusters,
)
class NflxElasticsearchCapacityModel(CapacityModel):
@staticmethod
def capacity_plan(
instance: Instance,
drive: Drive,
context: RegionContext,
desires: CapacityDesires,
extra_model_arguments: Dict[str, Any],
) -> Optional[CapacityPlan]:
# (FIXME): Need elasticsearch input
# TODO: Use durability requirements to compute RF.
copies_per_region: int = _target_rf(
desires, extra_model_arguments.get("copies_per_region", None)
)
max_regional_size: int = extra_model_arguments.get("max_regional_size", 240)
max_rps_to_disk: int = extra_model_arguments.get("max_rps_to_disk", 1000)
# Very large nodes are hard to recover
max_local_disk_gib: int = extra_model_arguments.get("max_local_disk_gib", 5000)
return _estimate_elasticsearch_cluster_zonal(
instance=instance,
drive=drive,
desires=desires,
zones_per_region=context.zones_in_region,
copies_per_region=copies_per_region,
max_regional_size=max_regional_size,
max_local_disk_gib=max_local_disk_gib,
max_rps_to_disk=max_rps_to_disk,
)
@staticmethod
def description():
return "Netflix Streaming Elasticsearch Model"
@staticmethod
def extra_model_arguments() -> Sequence[Tuple[str, str, str]]:
return (
(
"copies_per_region",
"int = 3",
"How many copies
|
rough_count = math.ceil(needed_cores / instance.cpu)
|
random_line_split
|
elasticsearch.py
|
) -> CapacityRequirement:
"""Estimate the capacity required for one zone given a regional desire
The input desires should be the **regional** desire, and this function will
return the zonal capacity requirement
"""
# Keep half of the cores free for background work (merging mostly)
needed_cores = math.ceil(sqrt_staffed_cores(desires) * 1.5)
# Keep half of the bandwidth available for backup
needed_network_mbps = simple_network_mbps(desires) * 2
needed_disk = math.ceil(
(1.0 / desires.data_shape.estimated_compression_ratio.mid)
* desires.data_shape.estimated_state_size_gib.mid
* copies_per_region,
)
# Rough estimate of how many instances we would need just for the the CPU
# Note that this is a lower bound, we might end up with more.
needed_cores = math.ceil(
max(1, needed_cores // (instance.cpu_ghz / desires.core_reference_ghz))
)
rough_count = math.ceil(needed_cores / instance.cpu)
# Generally speaking we want fewer than some number of reads per second
# hitting disk per instance. If we don't have many reads we don't need to
# hold much data in memory.
instance_rps = max(1, reads_per_second // rough_count)
disk_rps = instance_rps * _es_io_per_read(max(1, needed_disk // rough_count))
rps_working_set = min(1.0, disk_rps / max_rps_to_disk)
# If disk RPS will be smaller than our target because there are no
# reads, we don't need to hold as much data in memory
needed_memory = min(working_set, rps_working_set) * needed_disk
# Now convert to per zone
needed_cores = needed_cores // zones_per_region
needed_disk = needed_disk // zones_per_region
needed_memory = int(needed_memory // zones_per_region)
logger.debug(
"Need (cpu, mem, disk, working) = (%s, %s, %s, %f)",
needed_cores,
needed_memory,
needed_disk,
working_set,
)
return CapacityRequirement(
requirement_type="elasticsearch-data-zonal",
core_reference_ghz=desires.core_reference_ghz,
cpu_cores=certain_int(needed_cores),
mem_gib=certain_float(needed_memory),
disk_gib=certain_float(needed_disk),
network_mbps=certain_float(needed_network_mbps),
context={
"working_set": min(working_set, rps_working_set),
"rps_working_set": rps_working_set,
"disk_slo_working_set": working_set,
"replication_factor": copies_per_region,
"compression_ratio": round(
1.0 / desires.data_shape.estimated_compression_ratio.mid, 2
),
"read_per_second": reads_per_second,
},
)
def _upsert_params(cluster, params):
if cluster.cluster_params:
cluster.cluster_params.update(params)
else:
cluster.cluster_params = params
# pylint: disable=too-many-locals
def _estimate_elasticsearch_cluster_zonal(
instance: Instance,
drive: Drive,
desires: CapacityDesires,
zones_per_region: int = 3,
copies_per_region: int = 3,
max_local_disk_gib: int = 4096,
max_regional_size: int = 240,
max_rps_to_disk: int = 500,
) -> Optional[CapacityPlan]:
# Netflix Elasticsearch doesn't like to deploy on really small instances
|
estimated_working_set=desires.data_shape.estimated_working_set_percent,
# Elasticsearch has looser latency SLOs, target the 90th percentile of disk
# latency to keep in RAM.
target_percentile=0.90,
).mid
requirement = _estimate_elasticsearch_requirement(
instance=instance,
desires=desires,
working_set=working_set,
reads_per_second=rps,
zones_per_region=zones_per_region,
copies_per_region=copies_per_region,
max_rps_to_disk=max_rps_to_disk,
)
base_mem = (
desires.data_shape.reserved_instance_app_mem_gib
+ desires.data_shape.reserved_instance_system_mem_gib
)
cluster = compute_stateful_zone(
instance=instance,
drive=drive,
needed_cores=int(requirement.cpu_cores.mid),
needed_disk_gib=int(requirement.disk_gib.mid),
needed_memory_gib=int(requirement.mem_gib.mid),
needed_network_mbps=requirement.network_mbps.mid,
# Assume that by provisioning enough memory we'll get
# a 90% hit rate, but take into account the reads per read
# from the per node dataset using leveled compaction
# FIXME: I feel like this can be improved
required_disk_ios=lambda x: _es_io_per_read(x) * math.ceil(0.1 * rps),
# Elasticsearch requires ephemeral disks to be % full because tiered
# merging can make progress as long as there is some headroom
required_disk_space=lambda x: x * 1.4,
max_local_disk_gib=max_local_disk_gib,
# elasticsearch clusters can autobalance via shard placement
cluster_size=lambda x: x,
min_count=1,
# Sidecars/System takes away memory from elasticsearch
# Elasticsearch uses half of available system max of 32 for compressed
# oops
reserve_memory=lambda x: base_mem + max(32, x / 2),
core_reference_ghz=requirement.core_reference_ghz,
)
# Communicate to the actual provision that if we want reduced RF
params = {"elasticsearch.copies": copies_per_region}
_upsert_params(cluster, params)
# elasticsearch clusters generally should try to stay under some total number
# of nodes. Orgs do this for all kinds of reasons such as
# * Security group limits. Since you must have < 500 rules if you're
# ingressing public ips)
# * Maintenance. If your restart script does one node at a time you want
# smaller clusters so your restarts don't take months.
# * NxN network issues. Sometimes smaller clusters of bigger nodes
# are better for network propagation
if cluster.count > (max_regional_size // zones_per_region):
return None
ec2_cost = zones_per_region * cluster.annual_cost
cluster.cluster_type = "elasticsearch-data"
clusters = Clusters(
total_annual_cost=round(Decimal(ec2_cost), 2),
zonal=[cluster] * zones_per_region,
regional=list(),
)
return CapacityPlan(
requirements=Requirements(zonal=[requirement] * zones_per_region),
candidate_clusters=clusters,
)
class NflxElasticsearchCapacityModel(CapacityModel):
@staticmethod
def capacity_plan(
instance: Instance,
drive: Drive,
context: RegionContext,
desires: CapacityDesires,
extra_model_arguments: Dict[str, Any],
) -> Optional[CapacityPlan]:
# (FIXME): Need elasticsearch input
# TODO: Use durability requirements to compute RF.
copies_per_region: int = _target_rf(
desires, extra_model_arguments.get("copies_per_region", None)
)
max_regional_size: int = extra_model_arguments.get("max_regional_size", 240)
max_rps_to_disk: int = extra_model_arguments.get("max_rps_to_disk", 1000)
# Very large nodes are hard to recover
max_local_disk_gib: int = extra_model_arguments.get("max_local_disk_gib", 5000)
return _estimate_elasticsearch_cluster_zonal(
instance=instance,
drive=drive,
desires=desires,
zones_per_region=context.zones_in_region,
copies_per_region=copies_per_region,
max_regional_size=max_regional_size,
max_local_disk_gib=max_local_disk_gib,
max_rps_to_disk=max_rps_to_disk,
)
@staticmethod
def description():
return "Netflix Streaming Elasticsearch Model"
@staticmethod
def extra_model_arguments() -> Sequence[Tuple[str, str, str]]:
return (
(
"copies_per_region",
"int = 3",
"How many
|
if instance.cpu < 2 or instance.ram_gib < 14:
return None
# (FIXME): Need elasticsearch input
# Right now Elasticsearch doesn't deploy to cloud drives, just adding this
# here and leaving the capability to handle cloud drives for the future
if instance.drive is None:
return None
rps = desires.query_pattern.estimated_read_per_second.mid // zones_per_region
# Based on the disk latency and the read latency SLOs we adjust our
# working set to keep more or less data in RAM. Faster drives need
# less fronting RAM.
ws_drive = instance.drive or drive
working_set = working_set_from_drive_and_slo(
drive_read_latency_dist=dist_for_interval(ws_drive.read_io_latency_ms),
read_slo_latency_dist=dist_for_interval(
desires.query_pattern.read_latency_slo_ms
),
|
identifier_body
|
elasticsearch.py
|
) -> CapacityRequirement:
"""Estimate the capacity required for one zone given a regional desire
The input desires should be the **regional** desire, and this function will
return the zonal capacity requirement
"""
# Keep half of the cores free for background work (merging mostly)
needed_cores = math.ceil(sqrt_staffed_cores(desires) * 1.5)
# Keep half of the bandwidth available for backup
needed_network_mbps = simple_network_mbps(desires) * 2
needed_disk = math.ceil(
(1.0 / desires.data_shape.estimated_compression_ratio.mid)
* desires.data_shape.estimated_state_size_gib.mid
* copies_per_region,
)
# Rough estimate of how many instances we would need just for the the CPU
# Note that this is a lower bound, we might end up with more.
needed_cores = math.ceil(
max(1, needed_cores // (instance.cpu_ghz / desires.core_reference_ghz))
)
rough_count = math.ceil(needed_cores / instance.cpu)
# Generally speaking we want fewer than some number of reads per second
# hitting disk per instance. If we don't have many reads we don't need to
# hold much data in memory.
instance_rps = max(1, reads_per_second // rough_count)
disk_rps = instance_rps * _es_io_per_read(max(1, needed_disk // rough_count))
rps_working_set = min(1.0, disk_rps / max_rps_to_disk)
# If disk RPS will be smaller than our target because there are no
# reads, we don't need to hold as much data in memory
needed_memory = min(working_set, rps_working_set) * needed_disk
# Now convert to per zone
needed_cores = needed_cores // zones_per_region
needed_disk = needed_disk // zones_per_region
needed_memory = int(needed_memory // zones_per_region)
logger.debug(
"Need (cpu, mem, disk, working) = (%s, %s, %s, %f)",
needed_cores,
needed_memory,
needed_disk,
working_set,
)
return CapacityRequirement(
requirement_type="elasticsearch-data-zonal",
core_reference_ghz=desires.core_reference_ghz,
cpu_cores=certain_int(needed_cores),
mem_gib=certain_float(needed_memory),
disk_gib=certain_float(needed_disk),
network_mbps=certain_float(needed_network_mbps),
context={
"working_set": min(working_set, rps_working_set),
"rps_working_set": rps_working_set,
"disk_slo_working_set": working_set,
"replication_factor": copies_per_region,
"compression_ratio": round(
1.0 / desires.data_shape.estimated_compression_ratio.mid, 2
),
"read_per_second": reads_per_second,
},
)
def _upsert_params(cluster, params):
if cluster.cluster_params:
|
else:
cluster.cluster_params = params
# pylint: disable=too-many-locals
def _estimate_elasticsearch_cluster_zonal(
instance: Instance,
drive: Drive,
desires: CapacityDesires,
zones_per_region: int = 3,
copies_per_region: int = 3,
max_local_disk_gib: int = 4096,
max_regional_size: int = 240,
max_rps_to_disk: int = 500,
) -> Optional[CapacityPlan]:
# Netflix Elasticsearch doesn't like to deploy on really small instances
if instance.cpu < 2 or instance.ram_gib < 14:
return None
# (FIXME): Need elasticsearch input
# Right now Elasticsearch doesn't deploy to cloud drives, just adding this
# here and leaving the capability to handle cloud drives for the future
if instance.drive is None:
return None
rps = desires.query_pattern.estimated_read_per_second.mid // zones_per_region
# Based on the disk latency and the read latency SLOs we adjust our
# working set to keep more or less data in RAM. Faster drives need
# less fronting RAM.
ws_drive = instance.drive or drive
working_set = working_set_from_drive_and_slo(
drive_read_latency_dist=dist_for_interval(ws_drive.read_io_latency_ms),
read_slo_latency_dist=dist_for_interval(
desires.query_pattern.read_latency_slo_ms
),
estimated_working_set=desires.data_shape.estimated_working_set_percent,
# Elasticsearch has looser latency SLOs, target the 90th percentile of disk
# latency to keep in RAM.
target_percentile=0.90,
).mid
requirement = _estimate_elasticsearch_requirement(
instance=instance,
desires=desires,
working_set=working_set,
reads_per_second=rps,
zones_per_region=zones_per_region,
copies_per_region=copies_per_region,
max_rps_to_disk=max_rps_to_disk,
)
base_mem = (
desires.data_shape.reserved_instance_app_mem_gib
+ desires.data_shape.reserved_instance_system_mem_gib
)
cluster = compute_stateful_zone(
instance=instance,
drive=drive,
needed_cores=int(requirement.cpu_cores.mid),
needed_disk_gib=int(requirement.disk_gib.mid),
needed_memory_gib=int(requirement.mem_gib.mid),
needed_network_mbps=requirement.network_mbps.mid,
# Assume that by provisioning enough memory we'll get
# a 90% hit rate, but take into account the reads per read
# from the per node dataset using leveled compaction
# FIXME: I feel like this can be improved
required_disk_ios=lambda x: _es_io_per_read(x) * math.ceil(0.1 * rps),
# Elasticsearch requires ephemeral disks to be % full because tiered
# merging can make progress as long as there is some headroom
required_disk_space=lambda x: x * 1.4,
max_local_disk_gib=max_local_disk_gib,
# elasticsearch clusters can autobalance via shard placement
cluster_size=lambda x: x,
min_count=1,
# Sidecars/System takes away memory from elasticsearch
# Elasticsearch uses half of available system max of 32 for compressed
# oops
reserve_memory=lambda x: base_mem + max(32, x / 2),
core_reference_ghz=requirement.core_reference_ghz,
)
# Communicate to the actual provision that if we want reduced RF
params = {"elasticsearch.copies": copies_per_region}
_upsert_params(cluster, params)
# elasticsearch clusters generally should try to stay under some total number
# of nodes. Orgs do this for all kinds of reasons such as
# * Security group limits. Since you must have < 500 rules if you're
# ingressing public ips)
# * Maintenance. If your restart script does one node at a time you want
# smaller clusters so your restarts don't take months.
# * NxN network issues. Sometimes smaller clusters of bigger nodes
# are better for network propagation
if cluster.count > (max_regional_size // zones_per_region):
return None
ec2_cost = zones_per_region * cluster.annual_cost
cluster.cluster_type = "elasticsearch-data"
clusters = Clusters(
total_annual_cost=round(Decimal(ec2_cost), 2),
zonal=[cluster] * zones_per_region,
regional=list(),
)
return CapacityPlan(
requirements=Requirements(zonal=[requirement] * zones_per_region),
candidate_clusters=clusters,
)
class NflxElasticsearchCapacityModel(CapacityModel):
@staticmethod
def capacity_plan(
instance: Instance,
drive: Drive,
context: RegionContext,
desires: CapacityDesires,
extra_model_arguments: Dict[str, Any],
) -> Optional[CapacityPlan]:
# (FIXME): Need elasticsearch input
# TODO: Use durability requirements to compute RF.
copies_per_region: int = _target_rf(
desires, extra_model_arguments.get("copies_per_region", None)
)
max_regional_size: int = extra_model_arguments.get("max_regional_size", 240)
max_rps_to_disk: int = extra_model_arguments.get("max_rps_to_disk", 1000)
# Very large nodes are hard to recover
max_local_disk_gib: int = extra_model_arguments.get("max_local_disk_gib", 5000)
return _estimate_elasticsearch_cluster_zonal(
instance=instance,
drive=drive,
desires=desires,
zones_per_region=context.zones_in_region,
copies_per_region=copies_per_region,
max_regional_size=max_regional_size,
max_local_disk_gib=max_local_disk_gib,
max_rps_to_disk=max_rps_to_disk,
)
@staticmethod
def description():
return "Netflix Streaming Elasticsearch Model"
@staticmethod
def extra_model_arguments() -> Sequence[Tuple[str, str, str]]:
return (
(
"copies_per_region",
"int = 3",
"How many
|
cluster.cluster_params.update(params)
|
conditional_block
|
elasticsearch.py
|
) -> CapacityRequirement:
"""Estimate the capacity required for one zone given a regional desire
The input desires should be the **regional** desire, and this function will
return the zonal capacity requirement
"""
# Keep half of the cores free for background work (merging mostly)
needed_cores = math.ceil(sqrt_staffed_cores(desires) * 1.5)
# Keep half of the bandwidth available for backup
needed_network_mbps = simple_network_mbps(desires) * 2
needed_disk = math.ceil(
(1.0 / desires.data_shape.estimated_compression_ratio.mid)
* desires.data_shape.estimated_state_size_gib.mid
* copies_per_region,
)
# Rough estimate of how many instances we would need just for the the CPU
# Note that this is a lower bound, we might end up with more.
needed_cores = math.ceil(
max(1, needed_cores // (instance.cpu_ghz / desires.core_reference_ghz))
)
rough_count = math.ceil(needed_cores / instance.cpu)
# Generally speaking we want fewer than some number of reads per second
# hitting disk per instance. If we don't have many reads we don't need to
# hold much data in memory.
instance_rps = max(1, reads_per_second // rough_count)
disk_rps = instance_rps * _es_io_per_read(max(1, needed_disk // rough_count))
rps_working_set = min(1.0, disk_rps / max_rps_to_disk)
# If disk RPS will be smaller than our target because there are no
# reads, we don't need to hold as much data in memory
needed_memory = min(working_set, rps_working_set) * needed_disk
# Now convert to per zone
needed_cores = needed_cores // zones_per_region
needed_disk = needed_disk // zones_per_region
needed_memory = int(needed_memory // zones_per_region)
logger.debug(
"Need (cpu, mem, disk, working) = (%s, %s, %s, %f)",
needed_cores,
needed_memory,
needed_disk,
working_set,
)
return CapacityRequirement(
requirement_type="elasticsearch-data-zonal",
core_reference_ghz=desires.core_reference_ghz,
cpu_cores=certain_int(needed_cores),
mem_gib=certain_float(needed_memory),
disk_gib=certain_float(needed_disk),
network_mbps=certain_float(needed_network_mbps),
context={
"working_set": min(working_set, rps_working_set),
"rps_working_set": rps_working_set,
"disk_slo_working_set": working_set,
"replication_factor": copies_per_region,
"compression_ratio": round(
1.0 / desires.data_shape.estimated_compression_ratio.mid, 2
),
"read_per_second": reads_per_second,
},
)
def _upsert_params(cluster, params):
if cluster.cluster_params:
cluster.cluster_params.update(params)
else:
cluster.cluster_params = params
# pylint: disable=too-many-locals
def _estimate_elasticsearch_cluster_zonal(
instance: Instance,
drive: Drive,
desires: CapacityDesires,
zones_per_region: int = 3,
copies_per_region: int = 3,
max_local_disk_gib: int = 4096,
max_regional_size: int = 240,
max_rps_to_disk: int = 500,
) -> Optional[CapacityPlan]:
# Netflix Elasticsearch doesn't like to deploy on really small instances
if instance.cpu < 2 or instance.ram_gib < 14:
return None
# (FIXME): Need elasticsearch input
# Right now Elasticsearch doesn't deploy to cloud drives, just adding this
# here and leaving the capability to handle cloud drives for the future
if instance.drive is None:
return None
rps = desires.query_pattern.estimated_read_per_second.mid // zones_per_region
# Based on the disk latency and the read latency SLOs we adjust our
# working set to keep more or less data in RAM. Faster drives need
# less fronting RAM.
ws_drive = instance.drive or drive
working_set = working_set_from_drive_and_slo(
drive_read_latency_dist=dist_for_interval(ws_drive.read_io_latency_ms),
read_slo_latency_dist=dist_for_interval(
desires.query_pattern.read_latency_slo_ms
),
estimated_working_set=desires.data_shape.estimated_working_set_percent,
# Elasticsearch has looser latency SLOs, target the 90th percentile of disk
# latency to keep in RAM.
target_percentile=0.90,
).mid
requirement = _estimate_elasticsearch_requirement(
instance=instance,
desires=desires,
working_set=working_set,
reads_per_second=rps,
zones_per_region=zones_per_region,
copies_per_region=copies_per_region,
max_rps_to_disk=max_rps_to_disk,
)
base_mem = (
desires.data_shape.reserved_instance_app_mem_gib
+ desires.data_shape.reserved_instance_system_mem_gib
)
cluster = compute_stateful_zone(
instance=instance,
drive=drive,
needed_cores=int(requirement.cpu_cores.mid),
needed_disk_gib=int(requirement.disk_gib.mid),
needed_memory_gib=int(requirement.mem_gib.mid),
needed_network_mbps=requirement.network_mbps.mid,
# Assume that by provisioning enough memory we'll get
# a 90% hit rate, but take into account the reads per read
# from the per node dataset using leveled compaction
# FIXME: I feel like this can be improved
required_disk_ios=lambda x: _es_io_per_read(x) * math.ceil(0.1 * rps),
# Elasticsearch requires ephemeral disks to be % full because tiered
# merging can make progress as long as there is some headroom
required_disk_space=lambda x: x * 1.4,
max_local_disk_gib=max_local_disk_gib,
# elasticsearch clusters can autobalance via shard placement
cluster_size=lambda x: x,
min_count=1,
# Sidecars/System takes away memory from elasticsearch
# Elasticsearch uses half of available system max of 32 for compressed
# oops
reserve_memory=lambda x: base_mem + max(32, x / 2),
core_reference_ghz=requirement.core_reference_ghz,
)
# Communicate to the actual provision that if we want reduced RF
params = {"elasticsearch.copies": copies_per_region}
_upsert_params(cluster, params)
# elasticsearch clusters generally should try to stay under some total number
# of nodes. Orgs do this for all kinds of reasons such as
# * Security group limits. Since you must have < 500 rules if you're
# ingressing public ips)
# * Maintenance. If your restart script does one node at a time you want
# smaller clusters so your restarts don't take months.
# * NxN network issues. Sometimes smaller clusters of bigger nodes
# are better for network propagation
if cluster.count > (max_regional_size // zones_per_region):
return None
ec2_cost = zones_per_region * cluster.annual_cost
cluster.cluster_type = "elasticsearch-data"
clusters = Clusters(
total_annual_cost=round(Decimal(ec2_cost), 2),
zonal=[cluster] * zones_per_region,
regional=list(),
)
return CapacityPlan(
requirements=Requirements(zonal=[requirement] * zones_per_region),
candidate_clusters=clusters,
)
class
|
(CapacityModel):
@staticmethod
def capacity_plan(
instance: Instance,
drive: Drive,
context: RegionContext,
desires: CapacityDesires,
extra_model_arguments: Dict[str, Any],
) -> Optional[CapacityPlan]:
# (FIXME): Need elasticsearch input
# TODO: Use durability requirements to compute RF.
copies_per_region: int = _target_rf(
desires, extra_model_arguments.get("copies_per_region", None)
)
max_regional_size: int = extra_model_arguments.get("max_regional_size", 240)
max_rps_to_disk: int = extra_model_arguments.get("max_rps_to_disk", 1000)
# Very large nodes are hard to recover
max_local_disk_gib: int = extra_model_arguments.get("max_local_disk_gib", 5000)
return _estimate_elasticsearch_cluster_zonal(
instance=instance,
drive=drive,
desires=desires,
zones_per_region=context.zones_in_region,
copies_per_region=copies_per_region,
max_regional_size=max_regional_size,
max_local_disk_gib=max_local_disk_gib,
max_rps_to_disk=max_rps_to_disk,
)
@staticmethod
def description():
return "Netflix Streaming Elasticsearch Model"
@staticmethod
def extra_model_arguments() -> Sequence[Tuple[str, str, str]]:
return (
(
"copies_per_region",
"int = 3",
"How many
|
NflxElasticsearchCapacityModel
|
identifier_name
|
script.js
|
update(root);
function expand(d)
{
if (d._children)
{
d.children = d._children;
d.children.forEach(expand);
d._children = null;
}
}
};
// collapse the node and all it's children
chart.collapse = function(d)
{
if (d.children)
{
d._children = d.children;
d._children.forEach(collapse);
d.children = null;
}
}
chart.collapseTree = function(value)
{
root.children.forEach(collapse);
update(root);
function collapse(d)
{
if (d.children)
{
d._children = d.children;
d._children.forEach(collapse);
d.children = null;
}
}
};
function update(source)
{
// assigns the x and y position for the nodes
var treeData = treemap(root);
// compute the new tree layout
var nodes = treeData.descendants(),
links = treeData.descendants().slice(1);
// normalise for fixed depth
nodes.forEach(function(d)
{
// d.x = d.depth * 180;
d.y = d.depth * width_multiplier;
// d.x = d.depth * 180;
});
// ****************** Nodes section ***************************
// update the nodes ...
var nodeArray = svg.selectAll('g.node')
.data(nodes, function(d)
{
return d.id || (d.id = ++i);
});
// console.log(nodeArray);
// Enter any new modes at the parent's previous position.
var nodeEnter = nodeArray.enter().append('g')
.attr('class', 'node')
.attr('transform', function(d)
{
return 'translate(' + (source.y0 + margin.top) + ',' + (source.x0 + margin.left) + ')';
// return 'translate(' + (source.y0) + ',' + (source.x0) + ')';
})
.on('click', click);
// Add circle for the nodes, which is filled lightsteelblue for nodes that have hidden children (_children).
nodeEnter.append('circle')
.attr('class', 'node')
.attr('r', 1e-6)
.style('fill', function(d)
{
return d._children ? 'lightsteelblue' : '#fff';
});
// Append the node label (data.name), either to the left or right of the node circle, depending on whether the node has children.
nodeEnter.append("text")
.attr("x", function(d) { return d.children || d._children ? -15 : 15; })
.attr("dy", ".35em")
.attr("style", "node")
.attr("text-anchor", function(d) { return d.children || d._children ? "end" : "start"; })
.text(function(d) {
// console.log(d);
// return (d.children || d._children) ? d.data.name.capitalize() : d.data.name;})
return d.data.name;});
// .style("fill-opacity", 1e-6);
// Add the number of children inside the node circle, whether they are unfolded or not.
nodeEnter.append('text')
.attr('x', 0)
.attr('y', 3)
.attr("text-anchor", "middle")
.attr('cursor', 'pointer')
.style('font-size', '10px')
.text(function(d) {
if (d.children) return d.children.length;
else if (d._children) return d._children.length;
});
// UPDATE
var nodeUpdate = nodeEnter.merge(nodeArray);
// Transition the resulting array to the proper position for the node.
nodeUpdate.transition().duration(duration)
.attr('transform', function(d) {
return 'translate(' + (d.y + margin.top) + ',' + (d.x + margin.left) + ')';
});
// Update the node attributes and style, coloring search results red.
nodeUpdate.select('circle.node')
.attr('r', 9)
.style("fill", function(d)
{
if(d.data.class === "found")
{
return "#ff4136"; //red
|
return "lightsteelblue";
}
})
.attr('cursor', 'pointer')
.style("stroke", function(d)
{
if (d.data.class === "found")
{
return "#ff4136"; //red
}
;
})
// Remove any exiting nodes
var nodeExit = nodeArray.exit()
.transition().duration(duration)
.attr('transform', function(d)
{
return 'translate(' + (source.y + margin.top) + ',' + (source.x + margin.left) + ')';
})
.remove();
// on exit reduce the node circles size to 0
nodeExit.select('circle')
.attr('r', 1e-6);
// on exit reduce the opacity of text labels
nodeExit.select('text')
.style('fill-opacity', 1e-6);
// adding zoom and panning
d3.select("svg").call(d3.zoom().on("zoom", function()
{
svg.attr("transform", d3.event.transform)
}));
// trying to invert the direction of the zoom wheel
// .on("wheel", function(d){
// var direction = d3.event.wheelDelta < 0 ? 'down' : 'up';
// zoom(direction === 'up' ? d : d.parent);
// });
// ****************** links section ***************************
// update the links
var link = svg.selectAll('path.link').data(links, function(d) { return d.id });
// enter any new links at the parent's previous position
var linkEnter = link.enter().insert('path', 'g')
.attr('class', 'link')
.attr('d', function(d)
{
var o = {x: source.x0, y: source.y0};
return diagonal(o, o);
});
// UPDATE
var linkUpdate = linkEnter.merge(link);
// transition back to the parent element position
linkUpdate.transition().duration(duration)
.attr('d', function(d) { return diagonal(d, d.parent); })
.style("stroke",function(d) {
if(d.data.class==="found")
{
return "#ff4136";
}
});
// remove any exiting links
var linkExit = link.exit()
.transition().duration(duration)
.attr('d', function(d)
{
var o = {x: source.x, y: source.y};
return diagonal(o, o);
})
.remove();
// store the old positions for transition
nodes.forEach(function(d)
{
d.x0 = d.x;
d.y0 = d.y;
});
// creates a curved (diagonal) path from parent to the child nodes
function diagonal(s, d)
{
var path = 'M ' + (s.y + margin.top) + ' ' + (s.x + margin.left) +
'C ' + ((s.y + d.y + (margin.top * 2)) / 2) + ' ' + (s.x + margin.left) +
', ' + ((s.y + d.y + (margin.top * 2)) / 2) + ' ' + (d.x + margin.left) +
', ' + (d.y + margin.top) + ' ' + (d.x + margin.left);
return path;
}
// toggle children on click
function click(d)
{
toggleChildren(d);
printNodeInfo(d);
}
function toggleChildren(d)
{
if (d.children)
{
d._children = d.children;
d.children = null;
} else {
d.children = d._children;
d._children = null;
}
update(d);
}
}
chart.updateWidth = function(value)
{
width_multiplier = value;
update(data);
}
chart.updateHeight = function(value)
{
height_extra_space = value;
update(data);
}
String.prototype.capitalize = function()
{
return this.charAt(0).toUpperCase() + this.slice(1).toLowerCase();
};
function zoom()
{
var scale = d3.event.scale,
translation = d3.event.translate,
tbound = -h * scale,
bbound = h * scale,
lbound = (-w + m[1]) * scale,
rbound = (w - m[3]) * scale;
// limit translation to thresholds
translation = [
Math.max(Math.min(translation[0], rbound), lbound),
Math.max(Math.min(translation[1], bbound), tbound)
];
d3.select(".drawarea")
.attr("transform", "translate(" + translation + ")" +
" scale(" + scale + ")");
}
chart.openPaths = function(paths)
{
for(var i=0; i<paths.length; i++)
{
if(paths[i].id !== "1") //i.e. not root
{
paths[i].class = 'found';
console.log("right after setting class to 'found' ");
|
}
else if(d._children)
{
|
random_line_split
|
script.js
|
.attr('width', width + margin.left + margin.right)
.attr('height', height + margin.top + margin.bottom)
.append('g')
.attr('transform', 'translate(' + margin.left + ',' + margin.top + ')');
// declares a tree layout and assigns the size of the tree
treemap = d3.tree().size([height, width]);
// assign parent, children, height, depth
root = d3.hierarchy(data, function(d) { return d.children });
root.x0 = (height / 2); // left edge of the rectangle
root.y0 = 0; // top edge of the triangle
// collapse after the second level
root.children.forEach(collapse);
update(root);
function collapse(d)
{
if (d.children)
{
d._children = d.children;
d._children.forEach(collapse);
d.children = null;
}
}
});
}
chart.width = function(value)
{
if (!arguments.length) return width;
width = value;
return chart;
};
chart.height = function(value)
{
if (!arguments.length) return height;
height = value;
return chart;
};
chart.margin = function(value)
{
if (!arguments.length) return margin;
margin = value;
return chart;
};
chart.data = function(value)
{
if (!arguments.length) return data;
data = value;
if (typeof updateData === 'function') updateData();
return chart;
};
chart.expandTree = function(value)
{
root.children.forEach(expand);
update(root);
function expand(d)
{
if (d._children)
{
d.children = d._children;
d.children.forEach(expand);
d._children = null;
}
}
};
// collapse the node and all it's children
chart.collapse = function(d)
{
if (d.children)
{
d._children = d.children;
d._children.forEach(collapse);
d.children = null;
}
}
chart.collapseTree = function(value)
{
root.children.forEach(collapse);
update(root);
function collapse(d)
{
if (d.children)
{
d._children = d.children;
d._children.forEach(collapse);
d.children = null;
}
}
};
function update(source)
{
// assigns the x and y position for the nodes
var treeData = treemap(root);
// compute the new tree layout
var nodes = treeData.descendants(),
links = treeData.descendants().slice(1);
// normalise for fixed depth
nodes.forEach(function(d)
{
// d.x = d.depth * 180;
d.y = d.depth * width_multiplier;
// d.x = d.depth * 180;
});
// ****************** Nodes section ***************************
// update the nodes ...
var nodeArray = svg.selectAll('g.node')
.data(nodes, function(d)
{
return d.id || (d.id = ++i);
});
// console.log(nodeArray);
// Enter any new modes at the parent's previous position.
var nodeEnter = nodeArray.enter().append('g')
.attr('class', 'node')
.attr('transform', function(d)
{
return 'translate(' + (source.y0 + margin.top) + ',' + (source.x0 + margin.left) + ')';
// return 'translate(' + (source.y0) + ',' + (source.x0) + ')';
})
.on('click', click);
// Add circle for the nodes, which is filled lightsteelblue for nodes that have hidden children (_children).
nodeEnter.append('circle')
.attr('class', 'node')
.attr('r', 1e-6)
.style('fill', function(d)
{
return d._children ? 'lightsteelblue' : '#fff';
});
// Append the node label (data.name), either to the left or right of the node circle, depending on whether the node has children.
nodeEnter.append("text")
.attr("x", function(d) { return d.children || d._children ? -15 : 15; })
.attr("dy", ".35em")
.attr("style", "node")
.attr("text-anchor", function(d) { return d.children || d._children ? "end" : "start"; })
.text(function(d) {
// console.log(d);
// return (d.children || d._children) ? d.data.name.capitalize() : d.data.name;})
return d.data.name;});
// .style("fill-opacity", 1e-6);
// Add the number of children inside the node circle, whether they are unfolded or not.
nodeEnter.append('text')
.attr('x', 0)
.attr('y', 3)
.attr("text-anchor", "middle")
.attr('cursor', 'pointer')
.style('font-size', '10px')
.text(function(d) {
if (d.children) return d.children.length;
else if (d._children) return d._children.length;
});
// UPDATE
var nodeUpdate = nodeEnter.merge(nodeArray);
// Transition the resulting array to the proper position for the node.
nodeUpdate.transition().duration(duration)
.attr('transform', function(d) {
return 'translate(' + (d.y + margin.top) + ',' + (d.x + margin.left) + ')';
});
// Update the node attributes and style, coloring search results red.
nodeUpdate.select('circle.node')
.attr('r', 9)
.style("fill", function(d)
{
if(d.data.class === "found")
{
return "#ff4136"; //red
}
else if(d._children)
{
return "lightsteelblue";
}
})
.attr('cursor', 'pointer')
.style("stroke", function(d)
{
if (d.data.class === "found")
{
return "#ff4136"; //red
}
;
})
// Remove any exiting nodes
var nodeExit = nodeArray.exit()
.transition().duration(duration)
.attr('transform', function(d)
{
return 'translate(' + (source.y + margin.top) + ',' + (source.x + margin.left) + ')';
})
.remove();
// on exit reduce the node circles size to 0
nodeExit.select('circle')
.attr('r', 1e-6);
// on exit reduce the opacity of text labels
nodeExit.select('text')
.style('fill-opacity', 1e-6);
// adding zoom and panning
d3.select("svg").call(d3.zoom().on("zoom", function()
{
svg.attr("transform", d3.event.transform)
}));
// trying to invert the direction of the zoom wheel
// .on("wheel", function(d){
// var direction = d3.event.wheelDelta < 0 ? 'down' : 'up';
// zoom(direction === 'up' ? d : d.parent);
// });
// ****************** links section ***************************
// update the links
var link = svg.selectAll('path.link').data(links, function(d) { return d.id });
// enter any new links at the parent's previous position
var linkEnter = link.enter().insert('path', 'g')
.attr('class', 'link')
.attr('d', function(d)
{
var o = {x: source.x0, y: source.y0};
return diagonal(o, o);
});
// UPDATE
var linkUpdate = linkEnter.merge(link);
// transition back to the parent element position
linkUpdate.transition().duration(duration)
.attr('d', function(d) { return diagonal(d, d.parent); })
.style("stroke",function(d) {
if(d.data.class==="found")
{
return "#ff4136";
}
});
// remove any exiting links
var linkExit = link.exit()
.transition().duration(duration)
.attr('d', function(d)
{
var o = {x: source.x, y: source.y};
return diagonal(o, o);
})
.remove();
|
{
var data, root, treemap, svg,
i = 0,
duration = 650,
// margin = {top: 20, right: 10, bottom: 20, left: 50},
margin = {top: 0, right: 0, bottom: 80, left: 50},
width = 960 - 4 - margin.left - margin.right, // fitting in block frame
height = 800 - 4 - margin.top - margin.bottom, // fitting in block frame
width_multiplier = 180,
height_extra_space = 0;
// update;
function chart(selection)
{
selection.each(function()
{
height = height - margin.top - margin.bottom;
width = width - margin.left - margin.right;
// append the svg object to the selection
svg = selection.append('svg')
|
identifier_body
|
|
script.js
|
)
{
d._children = d.children;
d._children.forEach(collapse);
d.children = null;
}
}
chart.collapseTree = function(value)
{
root.children.forEach(collapse);
update(root);
function collapse(d)
{
if (d.children)
{
d._children = d.children;
d._children.forEach(collapse);
d.children = null;
}
}
};
function update(source)
{
// assigns the x and y position for the nodes
var treeData = treemap(root);
// compute the new tree layout
var nodes = treeData.descendants(),
links = treeData.descendants().slice(1);
// normalise for fixed depth
nodes.forEach(function(d)
{
// d.x = d.depth * 180;
d.y = d.depth * width_multiplier;
// d.x = d.depth * 180;
});
// ****************** Nodes section ***************************
// update the nodes ...
var nodeArray = svg.selectAll('g.node')
.data(nodes, function(d)
{
return d.id || (d.id = ++i);
});
// console.log(nodeArray);
// Enter any new modes at the parent's previous position.
var nodeEnter = nodeArray.enter().append('g')
.attr('class', 'node')
.attr('transform', function(d)
{
return 'translate(' + (source.y0 + margin.top) + ',' + (source.x0 + margin.left) + ')';
// return 'translate(' + (source.y0) + ',' + (source.x0) + ')';
})
.on('click', click);
// Add circle for the nodes, which is filled lightsteelblue for nodes that have hidden children (_children).
nodeEnter.append('circle')
.attr('class', 'node')
.attr('r', 1e-6)
.style('fill', function(d)
{
return d._children ? 'lightsteelblue' : '#fff';
});
// Append the node label (data.name), either to the left or right of the node circle, depending on whether the node has children.
nodeEnter.append("text")
.attr("x", function(d) { return d.children || d._children ? -15 : 15; })
.attr("dy", ".35em")
.attr("style", "node")
.attr("text-anchor", function(d) { return d.children || d._children ? "end" : "start"; })
.text(function(d) {
// console.log(d);
// return (d.children || d._children) ? d.data.name.capitalize() : d.data.name;})
return d.data.name;});
// .style("fill-opacity", 1e-6);
// Add the number of children inside the node circle, whether they are unfolded or not.
nodeEnter.append('text')
.attr('x', 0)
.attr('y', 3)
.attr("text-anchor", "middle")
.attr('cursor', 'pointer')
.style('font-size', '10px')
.text(function(d) {
if (d.children) return d.children.length;
else if (d._children) return d._children.length;
});
// UPDATE
var nodeUpdate = nodeEnter.merge(nodeArray);
// Transition the resulting array to the proper position for the node.
nodeUpdate.transition().duration(duration)
.attr('transform', function(d) {
return 'translate(' + (d.y + margin.top) + ',' + (d.x + margin.left) + ')';
});
// Update the node attributes and style, coloring search results red.
nodeUpdate.select('circle.node')
.attr('r', 9)
.style("fill", function(d)
{
if(d.data.class === "found")
{
return "#ff4136"; //red
}
else if(d._children)
{
return "lightsteelblue";
}
})
.attr('cursor', 'pointer')
.style("stroke", function(d)
{
if (d.data.class === "found")
{
return "#ff4136"; //red
}
;
})
// Remove any exiting nodes
var nodeExit = nodeArray.exit()
.transition().duration(duration)
.attr('transform', function(d)
{
return 'translate(' + (source.y + margin.top) + ',' + (source.x + margin.left) + ')';
})
.remove();
// on exit reduce the node circles size to 0
nodeExit.select('circle')
.attr('r', 1e-6);
// on exit reduce the opacity of text labels
nodeExit.select('text')
.style('fill-opacity', 1e-6);
// adding zoom and panning
d3.select("svg").call(d3.zoom().on("zoom", function()
{
svg.attr("transform", d3.event.transform)
}));
// trying to invert the direction of the zoom wheel
// .on("wheel", function(d){
// var direction = d3.event.wheelDelta < 0 ? 'down' : 'up';
// zoom(direction === 'up' ? d : d.parent);
// });
// ****************** links section ***************************
// update the links
var link = svg.selectAll('path.link').data(links, function(d) { return d.id });
// enter any new links at the parent's previous position
var linkEnter = link.enter().insert('path', 'g')
.attr('class', 'link')
.attr('d', function(d)
{
var o = {x: source.x0, y: source.y0};
return diagonal(o, o);
});
// UPDATE
var linkUpdate = linkEnter.merge(link);
// transition back to the parent element position
linkUpdate.transition().duration(duration)
.attr('d', function(d) { return diagonal(d, d.parent); })
.style("stroke",function(d) {
if(d.data.class==="found")
{
return "#ff4136";
}
});
// remove any exiting links
var linkExit = link.exit()
.transition().duration(duration)
.attr('d', function(d)
{
var o = {x: source.x, y: source.y};
return diagonal(o, o);
})
.remove();
// store the old positions for transition
nodes.forEach(function(d)
{
d.x0 = d.x;
d.y0 = d.y;
});
// creates a curved (diagonal) path from parent to the child nodes
function diagonal(s, d)
{
var path = 'M ' + (s.y + margin.top) + ' ' + (s.x + margin.left) +
'C ' + ((s.y + d.y + (margin.top * 2)) / 2) + ' ' + (s.x + margin.left) +
', ' + ((s.y + d.y + (margin.top * 2)) / 2) + ' ' + (d.x + margin.left) +
', ' + (d.y + margin.top) + ' ' + (d.x + margin.left);
return path;
}
// toggle children on click
function click(d)
{
toggleChildren(d);
printNodeInfo(d);
}
function toggleChildren(d)
{
if (d.children)
{
d._children = d.children;
d.children = null;
} else {
d.children = d._children;
d._children = null;
}
update(d);
}
}
chart.updateWidth = function(value)
{
width_multiplier = value;
update(data);
}
chart.updateHeight = function(value)
{
height_extra_space = value;
update(data);
}
String.prototype.capitalize = function()
{
return this.charAt(0).toUpperCase() + this.slice(1).toLowerCase();
};
function zoom()
{
var scale = d3.event.scale,
translation = d3.event.translate,
tbound = -h * scale,
bbound = h * scale,
lbound = (-w + m[1]) * scale,
rbound = (w - m[3]) * scale;
// limit translation to thresholds
translation = [
Math.max(Math.min(translation[0], rbound), lbound),
Math.max(Math.min(translation[1], bbound), tbound)
];
d3.select(".drawarea")
.attr("transform", "translate(" + translation + ")" +
" scale(" + scale + ")");
}
chart.openPaths = function(paths)
{
for(var i=0; i<paths.length; i++)
{
if(paths[i].id !== "1") //i.e. not root
{
paths[i].class = 'found';
console.log("right after setting class to 'found' ");
if(paths[i]._children)
{ //if children are hidden: open them, otherwise: don't do anything
paths[i].children = paths[i]._children;
paths[i]._children = null;
}
else if(paths[i].children)
|
{
console.log("There are children here, tralalalala");
}
|
conditional_block
|
|
script.js
|
update(root);
function expand(d)
{
if (d._children)
{
d.children = d._children;
d.children.forEach(expand);
d._children = null;
}
}
};
// collapse the node and all it's children
chart.collapse = function(d)
{
if (d.children)
{
d._children = d.children;
d._children.forEach(collapse);
d.children = null;
}
}
chart.collapseTree = function(value)
{
root.children.forEach(collapse);
update(root);
function collapse(d)
{
if (d.children)
{
d._children = d.children;
d._children.forEach(collapse);
d.children = null;
}
}
};
function update(source)
{
// assigns the x and y position for the nodes
var treeData = treemap(root);
// compute the new tree layout
var nodes = treeData.descendants(),
links = treeData.descendants().slice(1);
// normalise for fixed depth
nodes.forEach(function(d)
{
// d.x = d.depth * 180;
d.y = d.depth * width_multiplier;
// d.x = d.depth * 180;
});
// ****************** Nodes section ***************************
// update the nodes ...
var nodeArray = svg.selectAll('g.node')
.data(nodes, function(d)
{
return d.id || (d.id = ++i);
});
// console.log(nodeArray);
// Enter any new modes at the parent's previous position.
var nodeEnter = nodeArray.enter().append('g')
.attr('class', 'node')
.attr('transform', function(d)
{
return 'translate(' + (source.y0 + margin.top) + ',' + (source.x0 + margin.left) + ')';
// return 'translate(' + (source.y0) + ',' + (source.x0) + ')';
})
.on('click', click);
// Add circle for the nodes, which is filled lightsteelblue for nodes that have hidden children (_children).
nodeEnter.append('circle')
.attr('class', 'node')
.attr('r', 1e-6)
.style('fill', function(d)
{
return d._children ? 'lightsteelblue' : '#fff';
});
// Append the node label (data.name), either to the left or right of the node circle, depending on whether the node has children.
nodeEnter.append("text")
.attr("x", function(d) { return d.children || d._children ? -15 : 15; })
.attr("dy", ".35em")
.attr("style", "node")
.attr("text-anchor", function(d) { return d.children || d._children ? "end" : "start"; })
.text(function(d) {
// console.log(d);
// return (d.children || d._children) ? d.data.name.capitalize() : d.data.name;})
return d.data.name;});
// .style("fill-opacity", 1e-6);
// Add the number of children inside the node circle, whether they are unfolded or not.
nodeEnter.append('text')
.attr('x', 0)
.attr('y', 3)
.attr("text-anchor", "middle")
.attr('cursor', 'pointer')
.style('font-size', '10px')
.text(function(d) {
if (d.children) return d.children.length;
else if (d._children) return d._children.length;
});
// UPDATE
var nodeUpdate = nodeEnter.merge(nodeArray);
// Transition the resulting array to the proper position for the node.
nodeUpdate.transition().duration(duration)
.attr('transform', function(d) {
return 'translate(' + (d.y + margin.top) + ',' + (d.x + margin.left) + ')';
});
// Update the node attributes and style, coloring search results red.
nodeUpdate.select('circle.node')
.attr('r', 9)
.style("fill", function(d)
{
if(d.data.class === "found")
{
return "#ff4136"; //red
}
else if(d._children)
{
return "lightsteelblue";
}
})
.attr('cursor', 'pointer')
.style("stroke", function(d)
{
if (d.data.class === "found")
{
return "#ff4136"; //red
}
;
})
// Remove any exiting nodes
var nodeExit = nodeArray.exit()
.transition().duration(duration)
.attr('transform', function(d)
{
return 'translate(' + (source.y + margin.top) + ',' + (source.x + margin.left) + ')';
})
.remove();
// on exit reduce the node circles size to 0
nodeExit.select('circle')
.attr('r', 1e-6);
// on exit reduce the opacity of text labels
nodeExit.select('text')
.style('fill-opacity', 1e-6);
// adding zoom and panning
d3.select("svg").call(d3.zoom().on("zoom", function()
{
svg.attr("transform", d3.event.transform)
}));
// trying to invert the direction of the zoom wheel
// .on("wheel", function(d){
// var direction = d3.event.wheelDelta < 0 ? 'down' : 'up';
// zoom(direction === 'up' ? d : d.parent);
// });
// ****************** links section ***************************
// update the links
var link = svg.selectAll('path.link').data(links, function(d) { return d.id });
// enter any new links at the parent's previous position
var linkEnter = link.enter().insert('path', 'g')
.attr('class', 'link')
.attr('d', function(d)
{
var o = {x: source.x0, y: source.y0};
return diagonal(o, o);
});
// UPDATE
var linkUpdate = linkEnter.merge(link);
// transition back to the parent element position
linkUpdate.transition().duration(duration)
.attr('d', function(d) { return diagonal(d, d.parent); })
.style("stroke",function(d) {
if(d.data.class==="found")
{
return "#ff4136";
}
});
// remove any exiting links
var linkExit = link.exit()
.transition().duration(duration)
.attr('d', function(d)
{
var o = {x: source.x, y: source.y};
return diagonal(o, o);
})
.remove();
// store the old positions for transition
nodes.forEach(function(d)
{
d.x0 = d.x;
d.y0 = d.y;
});
// creates a curved (diagonal) path from parent to the child nodes
function diagonal(s, d)
{
var path = 'M ' + (s.y + margin.top) + ' ' + (s.x + margin.left) +
'C ' + ((s.y + d.y + (margin.top * 2)) / 2) + ' ' + (s.x + margin.left) +
', ' + ((s.y + d.y + (margin.top * 2)) / 2) + ' ' + (d.x + margin.left) +
', ' + (d.y + margin.top) + ' ' + (d.x + margin.left);
return path;
}
// toggle children on click
function
|
(d)
{
toggleChildren(d);
printNodeInfo(d);
}
function toggleChildren(d)
{
if (d.children)
{
d._children = d.children;
d.children = null;
} else {
d.children = d._children;
d._children = null;
}
update(d);
}
}
chart.updateWidth = function(value)
{
width_multiplier = value;
update(data);
}
chart.updateHeight = function(value)
{
height_extra_space = value;
update(data);
}
String.prototype.capitalize = function()
{
return this.charAt(0).toUpperCase() + this.slice(1).toLowerCase();
};
function zoom()
{
var scale = d3.event.scale,
translation = d3.event.translate,
tbound = -h * scale,
bbound = h * scale,
lbound = (-w + m[1]) * scale,
rbound = (w - m[3]) * scale;
// limit translation to thresholds
translation = [
Math.max(Math.min(translation[0], rbound), lbound),
Math.max(Math.min(translation[1], bbound), tbound)
];
d3.select(".drawarea")
.attr("transform", "translate(" + translation + ")" +
" scale(" + scale + ")");
}
chart.openPaths = function(paths)
{
for(var i=0; i<paths.length; i++)
{
if(paths[i].id !== "1") //i.e. not root
{
paths[i].class = 'found';
console.log("right after setting class to 'found
|
click
|
identifier_name
|
data.py
|
return self._word_to_id[UNKNOWN_TOKEN]
return self._word_to_id[word]
def id2word(self, word_id):
"""Returns the word (string) corresponding to an id (integer)."""
if word_id not in self._id_to_word:
raise ValueError('Id not found in vocab: %d' % word_id)
return self._id_to_word[word_id]
def size(self):
"""Returns the total size of the vocabulary"""
return self._count
def write_metadata(self, fpath):
"""Writes metadata file for Tensorboard word embedding visualizer as described here:
https://www.tensorflow.org/get_started/embedding_viz
Args:
fpath: place to write the metadata file
"""
print "Writing word embedding metadata file to %s..." % (fpath)
with open(fpath, "w") as f:
fieldnames = ['word']
writer = csv.DictWriter(f, delimiter="\t", fieldnames=fieldnames)
for i in xrange(self.size()):
writer.writerow({"word": self._id_to_word[i]})
def set_glove_embedding(self,fpath,embedding_dim):
""" Creates glove embedding_matrix from file path"""
emb = np.random.randn(self._count,embedding_dim)
# tf.logging.info(emb[0])
with open(fpath) as f: #python 3.x support
for k,line in enumerate(f):
fields = line.split()
if len(fields) - 1 != embedding_dim:
# Sometimes there are funny unicode parsing problems that lead to different
# fields lengths (e.g., a word with a unicode space character that splits
# into more than one colum n). We skip those lines. Note that if you have
# some kind of long header, this could result in all of your lines getting
# skipped. It's hard to check for that here; you just have to look in the
# embedding_misses_file and at the model summary to make sure things look
# like they are supposed to.
#logger.warning("Found line with wrong number of dimensions (expected %d, was %d): %s",
# embedding_dim, len(fields) - 1, line)
raise Exception("Found line with wrong number of dimensions (expected %d, was %d): %s",
embedding_dim, len(fields) - 1, line)
continue
word = fields[0]
if word in self._word_to_id:
vector = np.asarray(fields[1:], dtype='float32')
emb[self._word_to_id[word]] = vector
# if k%1000 == 0:
# tf.logging.info('glove : %d',k)
self.glove_emb = emb
class BertVocab(object):
"""
While glove_vocab has been used as default. The term glove is misnomer. Glove_vocab represents normal vocab in this file
This function converts individual tokens to their respective word piece tokens
"""
def __init__(self, glove_vocab, bert_vocab_file_path):
self.bert_vocab = collections.OrderedDict()
self.glove_vocab = glove_vocab
index = 0
with tf.gfile.GFile(bert_vocab_file_path, "r") as reader: #obtain bert vocab
while True:
token = convert_to_unicode(reader.readline())
if not token:
break
token = token.strip()
self.bert_vocab[token] = index
index += 1
not_found = 0
self.index_map_glove_to_bert = {}
for i in range(glove_vocab._count):
if glove_vocab._id_to_word[i] in self.bert_vocab:
self.index_map_glove_to_bert[i] = [self.bert_vocab[glove_vocab._id_to_word[i]]]
else: #Word Piece Tokenizer
not_found = not_found + 1
new_tokens = []
token = glove_vocab._id_to_word[i]
chars = list(token)
is_bad = False
start = 0
sub_tokens = []
while start < len(chars):
end = len(chars)
cur_substr = None
while start < end:
substr = "".join(chars[start:end])
if start > 0:
substr = "##" + substr
if substr in self.bert_vocab:
cur_substr = substr
break
end -= 1
if cur_substr is None:
is_bad = True
break
sub_tokens.append(cur_substr)
start = end
if is_bad:
new_tokens.append(self.bert_vocab['[UNK]'])
else:
sub_tokens_bert = [self.bert_vocab[s] for s in sub_tokens]
new_tokens = new_tokens + sub_tokens_bert
self.index_map_glove_to_bert[i] = new_tokens
tf.logging.info(not_found)
def convert_glove_to_bert_indices(self, token_ids):
"""
Converts words to their respective word-piece tokenized indices
token_ids : ids from the word
"""
new_tokens = [self.bert_vocab['[CLS]']] #As pert the bert repo instructions
offset = 1
pos_offset = []
for token_id in token_ids:
pos_offset.append(offset) #wordpiece tokenizer can return more than one index hence we maintain an offset array. This is useful for the BERT + GCN experiments.
if token_id in self.index_map_glove_to_bert:
bert_tokens = self.index_map_glove_to_bert[token_id]
offset = offset + len(bert_tokens) - 1
#new_tokens.append(self.index_map_glove_to_bert[token_id])
new_tokens = new_tokens + bert_tokens
else:
#wordpiece might be redundant for training data. Keep for unseen instances
token = glove_vocab._id_to_word[token_id]
chars = list(token)
is_bad = False
start = 0
sub_tokens = []
while start < len(chars):
end = len(chars)
cur_substr = None
while start < end:
substr = "".join(chars[start:end])
if start > 0:
substr = "##" + substr
if substr in self.vocab:
cur_substr = substr
break
end -= 1
if cur_substr is None:
is_bad = True
break
sub_tokens.append(cur_substr)
start = end
if is_bad:
#new_tokens.append(self.index_map_glove_to_bert['[UNK]'])
new_token = new_token + self.index_map_glove_to_bert['[UNK]']
else:
sub_tokens_bert = [self.bert_vocab[s] for s in sub_tokens]
new_tokens = new_tokens + sub_tokens_bert
offset = offset + len(sub_tokens_bert) - 1
new_tokens.append(self.bert_vocab['[SEP]'])
return new_tokens, pos_offset
def convert_by_vocab(vocab, items):
"""Converts a sequence of [tokens|ids] using the vocab."""
output = []
for item in items:
if item in vocab:
output.append(vocab[item])
else:
output.append(vocab['[UNK]'])
return output
def convert_tokens_to_ids(vocab, tokens):
return convert_by_vocab(vocab, tokens)
def convert_ids_to_tokens(inv_vocab, ids):
return convert_by_vocab(inv_vocab, ids)
def example_generator(data_path, single_pass, device_rank,data_as_tf_example=True):
"""Generates tf.Examples from data files.
Binary data format: <length><blob>. <length> represents the byte size
of <blob>. <blob> is serialized tf.Example proto. The tf.Example contains
the tokenized article text and summary.
Args:
data_path:
Path to tf.Example data files. Can include wildcards, e.g. if you have several training data chunk files train_001.bin, train_002.bin, etc, then pass data_path=train_* to access them all.
single_pass:
Boolean. If True, go through the dataset exactly once, generating examples in the order they appear, then return. Otherwise, generate random examples indefinitely.
Yields:
Deserialized tf.Example.
"""
random.seed(device_rank+1)
if data_as_tf_example:
epoch = 0
while True:
filelist = glob.glob(data_path) # get the list of datafiles
assert filelist, ('Error: Empty filelist at %s' % data_path) # check filelist isn't empty
if single_pass:
filelist = sorted(filelist)
else:
random.shuffle(filelist)
#tf.logging.info(filelist)
for file_no, f in enumerate(filelist):
reader = open(f, 'rb')
all_examples = []
while True:
len_bytes = reader.read(8)
if not len_bytes:
if not single_pass:
random.shuffle(all_examples)
for k in all_examples:
yield example_pb2.Example.FromString(k), epoch
break # finished reading this file
str_len = struct.unpack('q', len_bytes)[0]
example_str = struct.unpack('%ds' % str_len
|
random_line_split
|
||
data.py
|
writer.writerow({"word": self._id_to_word[i]})
def set_glove_embedding(self,fpath,embedding_dim):
""" Creates glove embedding_matrix from file path"""
emb = np.random.randn(self._count,embedding_dim)
# tf.logging.info(emb[0])
with open(fpath) as f: #python 3.x support
for k,line in enumerate(f):
fields = line.split()
if len(fields) - 1 != embedding_dim:
# Sometimes there are funny unicode parsing problems that lead to different
# fields lengths (e.g., a word with a unicode space character that splits
# into more than one colum n). We skip those lines. Note that if you have
# some kind of long header, this could result in all of your lines getting
# skipped. It's hard to check for that here; you just have to look in the
# embedding_misses_file and at the model summary to make sure things look
# like they are supposed to.
#logger.warning("Found line with wrong number of dimensions (expected %d, was %d): %s",
# embedding_dim, len(fields) - 1, line)
raise Exception("Found line with wrong number of dimensions (expected %d, was %d): %s",
embedding_dim, len(fields) - 1, line)
continue
word = fields[0]
if word in self._word_to_id:
vector = np.asarray(fields[1:], dtype='float32')
emb[self._word_to_id[word]] = vector
# if k%1000 == 0:
# tf.logging.info('glove : %d',k)
self.glove_emb = emb
class BertVocab(object):
"""
While glove_vocab has been used as default. The term glove is misnomer. Glove_vocab represents normal vocab in this file
This function converts individual tokens to their respective word piece tokens
"""
def __init__(self, glove_vocab, bert_vocab_file_path):
self.bert_vocab = collections.OrderedDict()
self.glove_vocab = glove_vocab
index = 0
with tf.gfile.GFile(bert_vocab_file_path, "r") as reader: #obtain bert vocab
while True:
token = convert_to_unicode(reader.readline())
if not token:
break
token = token.strip()
self.bert_vocab[token] = index
index += 1
not_found = 0
self.index_map_glove_to_bert = {}
for i in range(glove_vocab._count):
if glove_vocab._id_to_word[i] in self.bert_vocab:
self.index_map_glove_to_bert[i] = [self.bert_vocab[glove_vocab._id_to_word[i]]]
else: #Word Piece Tokenizer
not_found = not_found + 1
new_tokens = []
token = glove_vocab._id_to_word[i]
chars = list(token)
is_bad = False
start = 0
sub_tokens = []
while start < len(chars):
end = len(chars)
cur_substr = None
while start < end:
substr = "".join(chars[start:end])
if start > 0:
substr = "##" + substr
if substr in self.bert_vocab:
cur_substr = substr
break
end -= 1
if cur_substr is None:
is_bad = True
break
sub_tokens.append(cur_substr)
start = end
if is_bad:
new_tokens.append(self.bert_vocab['[UNK]'])
else:
sub_tokens_bert = [self.bert_vocab[s] for s in sub_tokens]
new_tokens = new_tokens + sub_tokens_bert
self.index_map_glove_to_bert[i] = new_tokens
tf.logging.info(not_found)
def convert_glove_to_bert_indices(self, token_ids):
"""
Converts words to their respective word-piece tokenized indices
token_ids : ids from the word
"""
new_tokens = [self.bert_vocab['[CLS]']] #As pert the bert repo instructions
offset = 1
pos_offset = []
for token_id in token_ids:
pos_offset.append(offset) #wordpiece tokenizer can return more than one index hence we maintain an offset array. This is useful for the BERT + GCN experiments.
if token_id in self.index_map_glove_to_bert:
bert_tokens = self.index_map_glove_to_bert[token_id]
offset = offset + len(bert_tokens) - 1
#new_tokens.append(self.index_map_glove_to_bert[token_id])
new_tokens = new_tokens + bert_tokens
else:
#wordpiece might be redundant for training data. Keep for unseen instances
token = glove_vocab._id_to_word[token_id]
chars = list(token)
is_bad = False
start = 0
sub_tokens = []
while start < len(chars):
end = len(chars)
cur_substr = None
while start < end:
|
if cur_substr is None:
is_bad = True
break
sub_tokens.append(cur_substr)
start = end
if is_bad:
#new_tokens.append(self.index_map_glove_to_bert['[UNK]'])
new_token = new_token + self.index_map_glove_to_bert['[UNK]']
else:
sub_tokens_bert = [self.bert_vocab[s] for s in sub_tokens]
new_tokens = new_tokens + sub_tokens_bert
offset = offset + len(sub_tokens_bert) - 1
new_tokens.append(self.bert_vocab['[SEP]'])
return new_tokens, pos_offset
def convert_by_vocab(vocab, items):
"""Converts a sequence of [tokens|ids] using the vocab."""
output = []
for item in items:
if item in vocab:
output.append(vocab[item])
else:
output.append(vocab['[UNK]'])
return output
def convert_tokens_to_ids(vocab, tokens):
return convert_by_vocab(vocab, tokens)
def convert_ids_to_tokens(inv_vocab, ids):
return convert_by_vocab(inv_vocab, ids)
def example_generator(data_path, single_pass, device_rank,data_as_tf_example=True):
"""Generates tf.Examples from data files.
Binary data format: <length><blob>. <length> represents the byte size
of <blob>. <blob> is serialized tf.Example proto. The tf.Example contains
the tokenized article text and summary.
Args:
data_path:
Path to tf.Example data files. Can include wildcards, e.g. if you have several training data chunk files train_001.bin, train_002.bin, etc, then pass data_path=train_* to access them all.
single_pass:
Boolean. If True, go through the dataset exactly once, generating examples in the order they appear, then return. Otherwise, generate random examples indefinitely.
Yields:
Deserialized tf.Example.
"""
random.seed(device_rank+1)
if data_as_tf_example:
epoch = 0
while True:
filelist = glob.glob(data_path) # get the list of datafiles
assert filelist, ('Error: Empty filelist at %s' % data_path) # check filelist isn't empty
if single_pass:
filelist = sorted(filelist)
else:
random.shuffle(filelist)
#tf.logging.info(filelist)
for file_no, f in enumerate(filelist):
reader = open(f, 'rb')
all_examples = []
while True:
len_bytes = reader.read(8)
if not len_bytes:
if not single_pass:
random.shuffle(all_examples)
for k in all_examples:
yield example_pb2.Example.FromString(k), epoch
break # finished reading this file
str_len = struct.unpack('q', len_bytes)[0]
example_str = struct.unpack('%ds' % str_len, reader.read(str_len))[0]
all_examples.append(example_str)
if single_pass:
print "example_generator completed reading all datafiles. No more data."
break
else:
#pickle format
while True:
if single_pass:
for data_ in data_path:
for i in data_:
yield i
else:
random.shuffle(data_path)
for data_ in data_path:
new_data = data_
x = np.arange(len(new_data))
np.random.shuffle(x)
# random.shuffle(new_data)
for i in x:
yield new_data[i]
if single_pass:
break
def article2ids(article_words, vocab):
"""Map the article words to their ids. Also return a list of OOVs in the article.
Args:
article_words: list of words (strings)
vocab: Vocabulary object
Returns:
ids:
A list of word ids (integers); OOVs are represented by their temporary article OOV number. If
|
substr = "".join(chars[start:end])
if start > 0:
substr = "##" + substr
if substr in self.vocab:
cur_substr = substr
break
end -= 1
|
conditional_block
|
data.py
|
_bert
self.index_map_glove_to_bert[i] = new_tokens
tf.logging.info(not_found)
def convert_glove_to_bert_indices(self, token_ids):
"""
Converts words to their respective word-piece tokenized indices
token_ids : ids from the word
"""
new_tokens = [self.bert_vocab['[CLS]']] #As pert the bert repo instructions
offset = 1
pos_offset = []
for token_id in token_ids:
pos_offset.append(offset) #wordpiece tokenizer can return more than one index hence we maintain an offset array. This is useful for the BERT + GCN experiments.
if token_id in self.index_map_glove_to_bert:
bert_tokens = self.index_map_glove_to_bert[token_id]
offset = offset + len(bert_tokens) - 1
#new_tokens.append(self.index_map_glove_to_bert[token_id])
new_tokens = new_tokens + bert_tokens
else:
#wordpiece might be redundant for training data. Keep for unseen instances
token = glove_vocab._id_to_word[token_id]
chars = list(token)
is_bad = False
start = 0
sub_tokens = []
while start < len(chars):
end = len(chars)
cur_substr = None
while start < end:
substr = "".join(chars[start:end])
if start > 0:
substr = "##" + substr
if substr in self.vocab:
cur_substr = substr
break
end -= 1
if cur_substr is None:
is_bad = True
break
sub_tokens.append(cur_substr)
start = end
if is_bad:
#new_tokens.append(self.index_map_glove_to_bert['[UNK]'])
new_token = new_token + self.index_map_glove_to_bert['[UNK]']
else:
sub_tokens_bert = [self.bert_vocab[s] for s in sub_tokens]
new_tokens = new_tokens + sub_tokens_bert
offset = offset + len(sub_tokens_bert) - 1
new_tokens.append(self.bert_vocab['[SEP]'])
return new_tokens, pos_offset
def convert_by_vocab(vocab, items):
"""Converts a sequence of [tokens|ids] using the vocab."""
output = []
for item in items:
if item in vocab:
output.append(vocab[item])
else:
output.append(vocab['[UNK]'])
return output
def convert_tokens_to_ids(vocab, tokens):
return convert_by_vocab(vocab, tokens)
def convert_ids_to_tokens(inv_vocab, ids):
return convert_by_vocab(inv_vocab, ids)
def example_generator(data_path, single_pass, device_rank,data_as_tf_example=True):
"""Generates tf.Examples from data files.
Binary data format: <length><blob>. <length> represents the byte size
of <blob>. <blob> is serialized tf.Example proto. The tf.Example contains
the tokenized article text and summary.
Args:
data_path:
Path to tf.Example data files. Can include wildcards, e.g. if you have several training data chunk files train_001.bin, train_002.bin, etc, then pass data_path=train_* to access them all.
single_pass:
Boolean. If True, go through the dataset exactly once, generating examples in the order they appear, then return. Otherwise, generate random examples indefinitely.
Yields:
Deserialized tf.Example.
"""
random.seed(device_rank+1)
if data_as_tf_example:
epoch = 0
while True:
filelist = glob.glob(data_path) # get the list of datafiles
assert filelist, ('Error: Empty filelist at %s' % data_path) # check filelist isn't empty
if single_pass:
filelist = sorted(filelist)
else:
random.shuffle(filelist)
#tf.logging.info(filelist)
for file_no, f in enumerate(filelist):
reader = open(f, 'rb')
all_examples = []
while True:
len_bytes = reader.read(8)
if not len_bytes:
if not single_pass:
random.shuffle(all_examples)
for k in all_examples:
yield example_pb2.Example.FromString(k), epoch
break # finished reading this file
str_len = struct.unpack('q', len_bytes)[0]
example_str = struct.unpack('%ds' % str_len, reader.read(str_len))[0]
all_examples.append(example_str)
if single_pass:
print "example_generator completed reading all datafiles. No more data."
break
else:
#pickle format
while True:
if single_pass:
for data_ in data_path:
for i in data_:
yield i
else:
random.shuffle(data_path)
for data_ in data_path:
new_data = data_
x = np.arange(len(new_data))
np.random.shuffle(x)
# random.shuffle(new_data)
for i in x:
yield new_data[i]
if single_pass:
break
def article2ids(article_words, vocab):
"""Map the article words to their ids. Also return a list of OOVs in the article.
Args:
article_words: list of words (strings)
vocab: Vocabulary object
Returns:
ids:
A list of word ids (integers); OOVs are represented by their temporary article OOV number. If the vocabulary size is 50k and the article has 3 OOVs, then these temporary OOV numbers will be 50000, 50001, 50002.
oovs:
A list of the OOV words in the article (strings), in the order corresponding to their temporary article OOV numbers."""
ids = []
oovs = []
unk_id = vocab.word2id(UNKNOWN_TOKEN)
for w in article_words:
i = vocab.word2id(w)
if i == unk_id: # If w is OOV
if w not in oovs: # Add to list of OOVs
oovs.append(w)
oov_num = oovs.index(w) # This is 0 for the first article OOV, 1 for the second article OOV...
ids.append(vocab.size() + oov_num) # This is e.g. 50000 for the first article OOV, 50001 for the second...
else:
ids.append(i)
return ids, oovs
def abstract2ids(abstract_words, vocab, article_oovs):
"""Map the abstract words to their ids. In-article OOVs are mapped to their temporary OOV numbers.
Args:
abstract_words: list of words (strings)
vocab: Vocabulary object
article_oovs: list of in-article OOV words (strings), in the order corresponding to their temporary article OOV numbers
Returns:
ids: List of ids (integers). In-article OOV words are mapped to their temporary OOV numbers. Out-of-article OOV words are mapped to the UNK token id."""
ids = []
unk_id = vocab.word2id(UNKNOWN_TOKEN)
for w in abstract_words:
i = vocab.word2id(w)
if i == unk_id: # If w is an OOV word
if w in article_oovs: # If w is an in-article OOV
vocab_idx = vocab.size() + article_oovs.index(w) # Map to its temporary article OOV number
ids.append(vocab_idx)
else: # If w is an out-of-article OOV
ids.append(unk_id) # Map to the UNK token id
else:
ids.append(i)
return ids
def outputids2words(id_list, vocab, article_oovs):
"""Maps output ids to words, including mapping in-article OOVs from their temporary ids to the original OOV string (applicable in pointer-generator mode).
Args:
id_list: list of ids (integers)
vocab: Vocabulary object
article_oovs: list of OOV words (strings) in the order corresponding to their temporary article OOV ids (that have been assigned in pointer-generator mode), or None (in baseline mode)
Returns:
words: list of words (strings)
"""
words = []
for i in id_list:
try:
w = vocab.id2word(i) # might be [UNK]
except ValueError as e: # w is OOV
assert article_oovs is not None, "Error: model produced a word ID that isn't in the vocabulary. This should not happen in baseline (no pointer-generator) mode"
article_oov_idx = i - vocab.size()
try:
w = article_oovs[article_oov_idx]
except ValueError as e: # i doesn't correspond to an article oov
raise ValueError('Error: model produced word ID %i which corresponds to article OOV %i but this example only has %i article OOVs' % (i, article_oov_idx, len(article_oovs)))
words.append(w)
return words
def
|
abstract2sents
|
identifier_name
|
|
data.py
|
for line in vocab_f:
pieces = line.split()
if len(pieces) != 2:
print ('Warning: incorrectly formatted line in vocabulary file: %s\n' % line)
continue
w = pieces[0]
if w in [SENTENCE_START, SENTENCE_END, UNKNOWN_TOKEN, PAD_TOKEN, START_DECODING, STOP_DECODING]:
raise Exception(
'<s>, </s>, [UNK], [PAD], [START] and [STOP] shouldn\'t be in the vocab file, but %s is' % w)
if w in self._word_to_id:
raise Exception('Duplicated word in vocabulary file: %s' % w)
self._word_to_id[w] = self._count
self._id_to_word[self._count] = w
self._count += 1
if max_size != 0 and self._count >= max_size:
print ("max_size of vocab was specified as %i; we now have %i words. Stopping reading." % (
max_size, self._count))
break
print ("Finished constructing vocabulary of %i total words. Last word added: %s" % (
self._count, self._id_to_word[self._count - 1]))
def word2id(self, word):
"""Returns the id (integer) of a word (string). Returns [UNK] id if word is OOV."""
if word not in self._word_to_id:
return self._word_to_id[UNKNOWN_TOKEN]
return self._word_to_id[word]
def id2word(self, word_id):
"""Returns the word (string) corresponding to an id (integer)."""
if word_id not in self._id_to_word:
raise ValueError('Id not found in vocab: %d' % word_id)
return self._id_to_word[word_id]
def size(self):
"""Returns the total size of the vocabulary"""
return self._count
def write_metadata(self, fpath):
"""Writes metadata file for Tensorboard word embedding visualizer as described here:
https://www.tensorflow.org/get_started/embedding_viz
Args:
fpath: place to write the metadata file
"""
print "Writing word embedding metadata file to %s..." % (fpath)
with open(fpath, "w") as f:
fieldnames = ['word']
writer = csv.DictWriter(f, delimiter="\t", fieldnames=fieldnames)
for i in xrange(self.size()):
writer.writerow({"word": self._id_to_word[i]})
def set_glove_embedding(self,fpath,embedding_dim):
""" Creates glove embedding_matrix from file path"""
emb = np.random.randn(self._count,embedding_dim)
# tf.logging.info(emb[0])
with open(fpath) as f: #python 3.x support
for k,line in enumerate(f):
fields = line.split()
if len(fields) - 1 != embedding_dim:
# Sometimes there are funny unicode parsing problems that lead to different
# fields lengths (e.g., a word with a unicode space character that splits
# into more than one colum n). We skip those lines. Note that if you have
# some kind of long header, this could result in all of your lines getting
# skipped. It's hard to check for that here; you just have to look in the
# embedding_misses_file and at the model summary to make sure things look
# like they are supposed to.
#logger.warning("Found line with wrong number of dimensions (expected %d, was %d): %s",
# embedding_dim, len(fields) - 1, line)
raise Exception("Found line with wrong number of dimensions (expected %d, was %d): %s",
embedding_dim, len(fields) - 1, line)
continue
word = fields[0]
if word in self._word_to_id:
vector = np.asarray(fields[1:], dtype='float32')
emb[self._word_to_id[word]] = vector
# if k%1000 == 0:
# tf.logging.info('glove : %d',k)
self.glove_emb = emb
class BertVocab(object):
"""
While glove_vocab has been used as default. The term glove is misnomer. Glove_vocab represents normal vocab in this file
This function converts individual tokens to their respective word piece tokens
"""
def __init__(self, glove_vocab, bert_vocab_file_path):
self.bert_vocab = collections.OrderedDict()
self.glove_vocab = glove_vocab
index = 0
with tf.gfile.GFile(bert_vocab_file_path, "r") as reader: #obtain bert vocab
while True:
token = convert_to_unicode(reader.readline())
if not token:
break
token = token.strip()
self.bert_vocab[token] = index
index += 1
not_found = 0
self.index_map_glove_to_bert = {}
for i in range(glove_vocab._count):
if glove_vocab._id_to_word[i] in self.bert_vocab:
self.index_map_glove_to_bert[i] = [self.bert_vocab[glove_vocab._id_to_word[i]]]
else: #Word Piece Tokenizer
not_found = not_found + 1
new_tokens = []
token = glove_vocab._id_to_word[i]
chars = list(token)
is_bad = False
start = 0
sub_tokens = []
while start < len(chars):
end = len(chars)
cur_substr = None
while start < end:
substr = "".join(chars[start:end])
if start > 0:
substr = "##" + substr
if substr in self.bert_vocab:
cur_substr = substr
break
end -= 1
if cur_substr is None:
is_bad = True
break
sub_tokens.append(cur_substr)
start = end
if is_bad:
new_tokens.append(self.bert_vocab['[UNK]'])
else:
sub_tokens_bert = [self.bert_vocab[s] for s in sub_tokens]
new_tokens = new_tokens + sub_tokens_bert
self.index_map_glove_to_bert[i] = new_tokens
tf.logging.info(not_found)
def convert_glove_to_bert_indices(self, token_ids):
"""
Converts words to their respective word-piece tokenized indices
token_ids : ids from the word
"""
new_tokens = [self.bert_vocab['[CLS]']] #As pert the bert repo instructions
offset = 1
pos_offset = []
for token_id in token_ids:
pos_offset.append(offset) #wordpiece tokenizer can return more than one index hence we maintain an offset array. This is useful for the BERT + GCN experiments.
if token_id in self.index_map_glove_to_bert:
bert_tokens = self.index_map_glove_to_bert[token_id]
offset = offset + len(bert_tokens) - 1
#new_tokens.append(self.index_map_glove_to_bert[token_id])
new_tokens = new_tokens + bert_tokens
else:
#wordpiece might be redundant for training data. Keep for unseen instances
token = glove_vocab._id_to_word[token_id]
chars = list(token)
is_bad = False
start = 0
sub_tokens = []
while start < len(chars):
end = len(chars)
cur_substr = None
while start < end:
substr = "".join(chars[start:end])
if start > 0:
substr = "##" + substr
if substr in self.vocab:
cur_substr = substr
break
end -= 1
if cur_substr is None:
is_bad = True
break
sub_tokens.append(cur_substr)
start = end
if is_bad:
#new_tokens.append(self.index_map_glove_to_bert['[UNK]'])
|
"""Vocabulary class for mapping between words and ids (integers)"""
def __init__(self, vocab_file, max_size):
"""Creates a vocab of up to max_size words, reading from the vocab_file. If max_size is 0, reads the entire vocab file.
Args:
vocab_file: path to the vocab file, which is assumed to contain "<word> <frequency>" on each line, sorted with most frequent word first. This code doesn't actually use the frequencies, though.
max_size: integer. The maximum size of the resulting Vocabulary."""
self._word_to_id = {}
self._id_to_word = {}
self._count = 0 # keeps track of total number of words in the Vocab
# [UNK], [PAD], [START] and [STOP] get the ids 0,1,2,3.
for w in [UNKNOWN_TOKEN, PAD_TOKEN, START_DECODING, STOP_DECODING]:
self._word_to_id[w] = self._count
self._id_to_word[self._count] = w
self._count += 1
# Read the vocab file and add words up to max_size
with open(vocab_file, 'r') as vocab_f:
|
identifier_body
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.