file_name
large_stringlengths 4
140
| prefix
large_stringlengths 0
12.1k
| suffix
large_stringlengths 0
12k
| middle
large_stringlengths 0
7.51k
| fim_type
large_stringclasses 4
values |
---|---|---|---|---|
core.py
|
in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
# SPDX-License-Identifier: BSD-3-Clause
#
# @@-COPYRIGHT-END-@@
# =============================================================================
""" Set of core utilities shared between quantization and svd code """
import re
import tensorflow as tf
from aimet_tensorflow.utils import constants
from aimet_tensorflow.common import op_defs
from aimet_common.utils import AimetLogger
_BIAS_TYPES = ['Add', 'BiasAdd']
# Ops to skip quantization on, eg backprop, etc
_SKIPPED_PREFIXES = ('gradients/', 'RMSProp/', 'Adagrad/', 'Const_', 'HistogramSummary', 'ScalarSummary', 'save/', 'truncated_normal', 'Adam')
# Valid activation ops for quantization end points.
_ACTIVATION_OP_SUFFIXES = ['/Relu6', '/Relu', '/Identity']
# Regular expression for recognizing nodes that are part of batch norm group.
_BATCHNORM_RE = re.compile(r'^(.*)/BatchNorm/batchnorm')
_OP_MAP = op_defs.default_op_map
class OpQuery:
"""
Class for query a graph's operations and related data.
"""
def __init__(self, graph, op_map=None, ops_to_ignore=None, strict=True):
"""
Constructor
:param graph: The graph to search
:param op_map: The map of operations used to identify op sequences as "one op".
The default op_map used is defined in op_deps.py. Please refer to
that format for passing a custom op_map.
:param ops_to_ignore: List of ops to ignore
:param strict: If strict mode is set to True queries will only return the last ops
at the end of well known "op layers" as defined by the op_map. When False,
queries will return ops at the end of well known layers and, in addition,
all ops which are not "known".
Eg If you have a list of ops in a graph like: Conv2D, BiasAdd, WeirdOp
Strict mode will return ["BiasAdd"] since it knows that Conv2D+BiasAdd are
one logical "layer". When strict mode is disabled it will return ["BiasAdd", "WeirdOp"]
:param debug: Whether to enable debug messages or not.
"""
self._log = AimetLogger.get_area_logger(AimetLogger.LogAreas.Utils)
self._graph = graph
self._strict = strict
if op_map:
self._op_map = op_map
else:
self._op_map = _OP_MAP
if ops_to_ignore:
self._ops_to_ignore = ops_to_ignore
else:
self._ops_to_ignore = []
self._trained_vars = graph.get_collection(tf.compat.v1.GraphKeys.TRAINABLE_VARIABLES)
@staticmethod
def _is_op_with_weights(op):
"""
Checks if a given op has weights
:param op: TF op
:return: True, if op has weights, False otherwise
"""
return (op.type in constants.OP_WEIGHT_TYPES and
not op.name.startswith(_SKIPPED_PREFIXES))
@classmethod
def get_weights_for_op(cls, op):
"""
Get the weight tensor for a given op
:param op: TF op
:return: Weight tensor for the op
"""
weights = None
if cls._is_op_with_weights(op):
weights = op.inputs[constants.OP_WEIGHT_INDICES[op.type]]
return weights
@staticmethod
def get_bias_for_op(op):
"""
Get bias tensor for the given op
:param op: TF op
:return: Bias tensor for the op
"""
bias = None
if op.type in _BIAS_TYPES:
bias = op.inputs[constants.OP_WEIGHT_INDICES[op.type]]
return bias
|
def get_weight_ops(self, ops=None, skip_bias_op=False):
"""
Get all ops that contain weights. If a list of ops is passed search only ops
from this list. Return the sequenced list of weight ops always with Conv/FC
first, followed by the bias op, if present.
:param ops: List of ops to use (optional)
:param ops: If bias op has to be skipped (optional)
:return:
"""
if not ops:
ops = self._graph.get_operations()
ops_with_weights = []
for op in ops:
if self._is_op_with_weights(op):
self._log.debug('Found op w/weights: %s', op.name)
ops_with_weights.append(op)
if not skip_bias_op and self._is_op_with_weights(op):
for consumer in op.outputs[0].consumers():
# Ignore Reshape as it can be placed between MatMul and BiasAdd on Dense layer of Transformer
if consumer.type in ['Reshape'] and len(consumer.outputs[0].consumers()) == 1:
consumer = consumer.outputs[0].consumers()[0]
if consumer.type in _BIAS_TYPES:
self._log.debug('Found op w/bias: %s', consumer.name+'('+consumer.type+')')
ops_with_weights.append(consumer)
reduced_list = [x for x in ops_with_weights if not x.name.startswith(tuple(self._ops_to_ignore))]
return reduced_list
@staticmethod
def get_weight_inputs(ops):
"""
Given a list of ops, returns a corresponding list of the weight indexes for their inputs
:param ops: List of TF ops
:return:
"""
indices = list()
for op in ops:
if op.type not in constants.OP_WEIGHT_INDICES:
raise ValueError('Op type: '+op.type+' does not contain weights!')
indices.append(constants.OP_WEIGHT_INDICES[op.type])
return indices
def _match_ops(self, current_op, candidate_op_list, matched_ops, visited_ops):
"""
Recursive function that helps traverse a network and find matching ops
:param current_op: Current op to traverse downstream from
:param candidate_op_list: Current list of candidate ops that may result in a match
:param matched_ops: List of already found matched_ops
:param visited_ops: List of all ops that have been visited (to cut short duplicate traversals)
:return:
"""
if any(x in current_op.name for x in _SKIPPED_PREFIXES):
return matched_ops
self._log.debug('Processing op: %s (%s) w/current list=%s', current_op.name, current_op.type, candidate_op_list)
candidate_op_list.append(current_op)
match_len, max_len = op_defs.check_match(candidate_op_list, op_map=self._op_map)
self._log.debug('Got match_len: %s and max_len: %s', str(match_len), str(max_len))
if match_len != 0 and match_len == max_len:
# Matched the maximum sequence possible
matched_ops.append(current_op)
op_type_list = [list_op.type for list_op in candidate_op_list]
self._log.info('Found op match w/new op: %s and sequence: %s', current_op.name, str(op_type_list))
candidate_op_list = []
elif match_len == 0:
# A list length > 1 means the current op_list was a match but not the newly added op. Save the previous last
# op from the list
if len(candidate_op_list) > 1:
# Check if indeed the previous op_list is a match
if op_defs.does_sequence_match(candidate_op_list[:-1], op_map=self._op_map):
matched_op = candidate_op_list[-2]
matched_ops.append(matched_op)
op_type_list = [list_op.type for list_op in candidate_op_list[:-1]]
self._log.info('Found op match: %s and sequence: %s', matched_op.name, str(op_type_list))
# Test to see if the current op is a match by itself
candidate_op_list = []
matched_ops = self._match_ops(current_op, candidate_op_list, matched_ops, visited_ops)
return matched_ops
# No match, reset the list
candidate_op_list = []
# There was some match, but not the max match possible. Continue drilling through the
# outputs to the next ops
for tensor in current_op.outputs:
for consumer in tensor.consumers():
if consumer not in visited_ops:
visited_ops.add(consumer)
self._log.info
|
random_line_split
|
|
config.go
|
// EnableNodePort enables k8s NodePort service implementation in BPF
EnableNodePort bool
// EnableHostPort enables k8s Pod's hostPort mapping through BPF
EnableHostPort bool
// NodePortMode indicates in which mode NodePort implementation should run
// ("snat", "dsr" or "hybrid")
NodePortMode string
// NodePortAcceleration indicates whether NodePort should be accelerated
// via XDP ("none", "generic" or "native")
NodePortAcceleration string
// NodePortHairpin indicates whether the setup is a one-legged LB
NodePortHairpin bool
// NodePortBindProtection rejects bind requests to NodePort service ports
NodePortBindProtection bool
// EnableAutoProtectNodePortRange enables appending NodePort range to
// net.ipv4.ip_local_reserved_ports if it overlaps with ephemeral port
// range (net.ipv4.ip_local_port_range)
EnableAutoProtectNodePortRange bool
// KubeProxyReplacement controls how to enable kube-proxy replacement
// features in BPF datapath
KubeProxyReplacement string
// EnableExternalIPs enables implementation of k8s services with externalIPs in datapath
EnableExternalIPs bool
// NodePortMin is the minimum port address for the NodePort range
NodePortMin int
// NodePortMax is the maximum port address for the NodePort range
NodePortMax int
// EnableSessionAffinity enables a support for service sessionAffinity
EnableSessionAffinity bool
// K8sEnableEndpointSlice enables k8s endpoint slice feature that is used
// in kubernetes.
K8sEnableK8sEndpointSlice bool
////////////////////////////// CNI //////////////////////////
// EnableEndpointRoutes enables use of per endpoint routes
EnableEndpointRoutes bool
Devices []string // bpf_host device
////////////////////////////// BPF //////////////////////////
BpfDir string // BPF template files directory
// EnableSockOps specifies whether to enable sockops (socket lookup).
SockopsEnable bool // socket bpf
// BPFCompilationDebug specifies whether to compile BPF programs compilation
// debugging enabled.
BPFCompilationDebug bool
InstallIptRules bool
}
func (c *DaemonConfig) Populate() {
c.LibDir = viper.GetString(LibDir)
c.BpfDir = filepath.Join(c.LibDir, defaults.BpfDir)
c.AgentHealthPort = viper.GetInt(AgentHealthPort)
c.AgentLabels = viper.GetStringSlice(AgentLabels)
c.AllowICMPFragNeeded = viper.GetBool(AllowICMPFragNeeded)
c.AllowLocalhost = viper.GetString(AllowLocalhost)
c.AnnotateK8sNode = viper.GetBool(AnnotateK8sNode)
c.AutoCreateCiliumNodeResource = viper.GetBool(AutoCreateCiliumNodeResource)
c.BPFCompilationDebug = viper.GetBool(BPFCompileDebugName)
c.BPFRoot = viper.GetString(BPFRoot)
c.CertDirectory = viper.GetString(CertsDirectory)
c.CGroupRoot = viper.GetString(CGroupRoot)
c.ClusterID = viper.GetInt(ClusterIDName)
c.ClusterName = viper.GetString(ClusterName)
c.ClusterMeshConfig = viper.GetString(ClusterMeshConfigName)
c.DatapathMode = viper.GetString(DatapathMode)
c.Debug = viper.GetBool(DebugArg)
c.DebugVerbose = viper.GetStringSlice(DebugVerbose)
c.DirectRoutingDevice = viper.GetString(DirectRoutingDevice)
c.DisableConntrack = viper.GetBool(DisableConntrack)
c.EnableIPv4 = getIPv4Enabled()
c.EnableIPv6 = viper.GetBool(EnableIPv6Name)
c.EnableIPSec = viper.GetBool(EnableIPSecName)
c.EnableWellKnownIdentities = viper.GetBool(EnableWellKnownIdentities)
c.EndpointInterfaceNamePrefix = viper.GetString(EndpointInterfaceNamePrefix)
c.DevicePreFilter = viper.GetString(PrefilterDevice)
c.DisableCiliumEndpointCRD = viper.GetBool(DisableCiliumEndpointCRDName)
c.DisableK8sServices = viper.GetBool(DisableK8sServices)
c.EgressMasqueradeInterfaces = viper.GetString(EgressMasqueradeInterfaces)
c.EnableHostReachableServices = viper.GetBool(EnableHostReachableServices)
c.EnableRemoteNodeIdentity = viper.GetBool(EnableRemoteNodeIdentity)
c.K8sHeartbeatTimeout = viper.GetDuration(K8sHeartbeatTimeout)
c.EnableXTSocketFallback = viper.GetBool(EnableXTSocketFallbackName)
c.EnableAutoDirectRouting = viper.GetBool(EnableAutoDirectRoutingName)
c.EnableEndpointRoutes = viper.GetBool(EnableEndpointRoutes)
c.EnableHealthChecking = viper.GetBool(EnableHealthChecking)
c.EnableEndpointHealthChecking = viper.GetBool(EnableEndpointHealthChecking)
c.EnableLocalNodeRoute = viper.GetBool(EnableLocalNodeRoute)
c.EnablePolicy = strings.ToLower(viper.GetString(EnablePolicy))
c.EnableExternalIPs = viper.GetBool(EnableExternalIPs)
c.EnableL7Proxy = viper.GetBool(EnableL7Proxy)
c.EnableTracing = viper.GetBool(EnableTracing)
c.EnableNodePort = viper.GetBool(EnableNodePort)
c.EnableHostPort = viper.GetBool(EnableHostPort)
c.NodePortMode = viper.GetString(NodePortMode)
c.NodePortAcceleration = viper.GetString(NodePortAcceleration)
c.NodePortBindProtection = viper.GetBool(NodePortBindProtection)
c.EnableAutoProtectNodePortRange = viper.GetBool(EnableAutoProtectNodePortRange)
c.KubeProxyReplacement = viper.GetString(KubeProxyReplacement)
c.EnableSessionAffinity = viper.GetBool(EnableSessionAffinity)
c.EnableHostFirewall = viper.GetBool(EnableHostFirewall)
c.EncryptInterface = viper.GetString(EncryptInterface)
c.EncryptNode = viper.GetBool(EncryptNode)
c.EnvoyLogPath = viper.GetString(EnvoyLog)
c.ForceLocalPolicyEvalAtSource = viper.GetBool(ForceLocalPolicyEvalAtSource)
c.HostDevice = getHostDevice()
c.HTTPIdleTimeout = viper.GetInt(HTTPIdleTimeout)
c.HTTPMaxGRPCTimeout = viper.GetInt(HTTPMaxGRPCTimeout)
c.HTTPRequestTimeout = viper.GetInt(HTTPRequestTimeout)
c.HTTPRetryCount = viper.GetInt(HTTPRetryCount)
c.HTTPRetryTimeout = viper.GetInt(HTTPRetryTimeout)
c.IdentityChangeGracePeriod = viper.GetDuration(IdentityChangeGracePeriod)
c.IPAM = viper.GetString(IPAM)
c.IPv4Range = viper.GetString(IPv4Range)
c.IPv4NodeAddr = viper.GetString(IPv4NodeAddr)
c.IPv4ServiceRange = viper.GetString(IPv4ServiceRange)
c.IPv6ClusterAllocCIDR = viper.GetString(IPv6ClusterAllocCIDRName)
c.IPv6NodeAddr = viper.GetString(IPv6NodeAddr)
c.IPv6Range = viper.GetString(IPv6Range)
c.IPv6ServiceRange = viper.GetString(IPv6ServiceRange)
c.K8sAPIServer = viper.GetString(K8sAPIServer)
c.K8sClientBurst = viper.GetInt(K8sClientBurst)
c.K8sClientQPSLimit = viper.GetFloat64(K8sClientQPSLimit)
c.K8sEnableK8sEndpointSlice = viper.GetBool(K8sEnableEndpointSlice)
c.k8sEnableAPIDiscovery = viper.GetBool(K8sEnableAPIDiscovery)
c.K8sKubeConfigPath = viper.GetString(K8sKubeConfigPath)
c.K8sRequireIPv4PodCIDR = viper.GetBool(K8sRequireIPv4PodCIDRName)
c.K8sRequireIPv6PodCIDR = viper.GetBool(K8sRequireIPv6PodCIDRName)
c.K8sServiceCacheSize = uint(viper.GetInt(K8sServiceCacheSize))
c.K8sForceJSONPatch = viper.GetBool(K8sForceJSONPatch)
c.K8sEventHandover = viper.GetBool(K8sEventHandover)
c.K8sWatcherQueueSize = uint(viper.GetInt(K8sWatcherQueueSize))
c.K8sWatcherEndpointSelector = viper.GetString(K8sWatcherEndpointSelector)
c.KeepConfig = viper.GetBool(KeepConfig)
c.KVStore = viper.GetString(KVStore)
c.KVstoreLeaseTTL = viper.GetDuration(KVstoreLeaseTTL)
c.KVstoreKeepAliveInterval = c.KVstoreLeaseTTL / defaults.KVstoreKeepAliveIntervalFactor
c.KVstorePeriodicSync = viper.GetDuration(KVstorePeriodicSync)
c.KVstoreConnectivityTimeout = viper.GetDuration(KVstoreConnectivityTimeout)
c.IPAllocationTimeout = viper.GetDuration(IPAllocationTimeout)
c.LabelPrefixFile = viper.GetString(LabelPrefixFile)
c.Labels = viper.GetStringSlice(Labels)
c.LibDir = viper.GetString(LibDir)
c.LogDriver = viper.GetStringSlice(LogDriver)
c.LogSystemLoadConfig = viper.GetBool(LogSystemLoadConfigName)
c.Logstash = viper.GetBool(Logstash)
c.LoopbackIPv4 = viper.GetString(LoopbackIPv4)
c.Masquerade = viper.GetBool(Masquerade)
c.EnableBPFMasquerade = viper.GetBool(EnableBPFMasquerade)
c.EnableBPFClockProbe = viper.GetBool(EnableBPFClockProbe)
c.EnableIPMasqAgent = viper.GetBool(EnableIPMasqAgent)
c.IPMasqAgentConfigPath = viper.GetString(IPMasqAgentConfigPath)
c.InstallIptRules = viper.GetBool(InstallIptRules)
c.IPTablesLockTimeout = viper.GetDuration(IPTablesLock
|
////////////////////////////// Service //////////////////////////
|
random_line_split
|
|
config.go
|
)
c.EndpointInterfaceNamePrefix = viper.GetString(EndpointInterfaceNamePrefix)
c.DevicePreFilter = viper.GetString(PrefilterDevice)
c.DisableCiliumEndpointCRD = viper.GetBool(DisableCiliumEndpointCRDName)
c.DisableK8sServices = viper.GetBool(DisableK8sServices)
c.EgressMasqueradeInterfaces = viper.GetString(EgressMasqueradeInterfaces)
c.EnableHostReachableServices = viper.GetBool(EnableHostReachableServices)
c.EnableRemoteNodeIdentity = viper.GetBool(EnableRemoteNodeIdentity)
c.K8sHeartbeatTimeout = viper.GetDuration(K8sHeartbeatTimeout)
c.EnableXTSocketFallback = viper.GetBool(EnableXTSocketFallbackName)
c.EnableAutoDirectRouting = viper.GetBool(EnableAutoDirectRoutingName)
c.EnableEndpointRoutes = viper.GetBool(EnableEndpointRoutes)
c.EnableHealthChecking = viper.GetBool(EnableHealthChecking)
c.EnableEndpointHealthChecking = viper.GetBool(EnableEndpointHealthChecking)
c.EnableLocalNodeRoute = viper.GetBool(EnableLocalNodeRoute)
c.EnablePolicy = strings.ToLower(viper.GetString(EnablePolicy))
c.EnableExternalIPs = viper.GetBool(EnableExternalIPs)
c.EnableL7Proxy = viper.GetBool(EnableL7Proxy)
c.EnableTracing = viper.GetBool(EnableTracing)
c.EnableNodePort = viper.GetBool(EnableNodePort)
c.EnableHostPort = viper.GetBool(EnableHostPort)
c.NodePortMode = viper.GetString(NodePortMode)
c.NodePortAcceleration = viper.GetString(NodePortAcceleration)
c.NodePortBindProtection = viper.GetBool(NodePortBindProtection)
c.EnableAutoProtectNodePortRange = viper.GetBool(EnableAutoProtectNodePortRange)
c.KubeProxyReplacement = viper.GetString(KubeProxyReplacement)
c.EnableSessionAffinity = viper.GetBool(EnableSessionAffinity)
c.EnableHostFirewall = viper.GetBool(EnableHostFirewall)
c.EncryptInterface = viper.GetString(EncryptInterface)
c.EncryptNode = viper.GetBool(EncryptNode)
c.EnvoyLogPath = viper.GetString(EnvoyLog)
c.ForceLocalPolicyEvalAtSource = viper.GetBool(ForceLocalPolicyEvalAtSource)
c.HostDevice = getHostDevice()
c.HTTPIdleTimeout = viper.GetInt(HTTPIdleTimeout)
c.HTTPMaxGRPCTimeout = viper.GetInt(HTTPMaxGRPCTimeout)
c.HTTPRequestTimeout = viper.GetInt(HTTPRequestTimeout)
c.HTTPRetryCount = viper.GetInt(HTTPRetryCount)
c.HTTPRetryTimeout = viper.GetInt(HTTPRetryTimeout)
c.IdentityChangeGracePeriod = viper.GetDuration(IdentityChangeGracePeriod)
c.IPAM = viper.GetString(IPAM)
c.IPv4Range = viper.GetString(IPv4Range)
c.IPv4NodeAddr = viper.GetString(IPv4NodeAddr)
c.IPv4ServiceRange = viper.GetString(IPv4ServiceRange)
c.IPv6ClusterAllocCIDR = viper.GetString(IPv6ClusterAllocCIDRName)
c.IPv6NodeAddr = viper.GetString(IPv6NodeAddr)
c.IPv6Range = viper.GetString(IPv6Range)
c.IPv6ServiceRange = viper.GetString(IPv6ServiceRange)
c.K8sAPIServer = viper.GetString(K8sAPIServer)
c.K8sClientBurst = viper.GetInt(K8sClientBurst)
c.K8sClientQPSLimit = viper.GetFloat64(K8sClientQPSLimit)
c.K8sEnableK8sEndpointSlice = viper.GetBool(K8sEnableEndpointSlice)
c.k8sEnableAPIDiscovery = viper.GetBool(K8sEnableAPIDiscovery)
c.K8sKubeConfigPath = viper.GetString(K8sKubeConfigPath)
c.K8sRequireIPv4PodCIDR = viper.GetBool(K8sRequireIPv4PodCIDRName)
c.K8sRequireIPv6PodCIDR = viper.GetBool(K8sRequireIPv6PodCIDRName)
c.K8sServiceCacheSize = uint(viper.GetInt(K8sServiceCacheSize))
c.K8sForceJSONPatch = viper.GetBool(K8sForceJSONPatch)
c.K8sEventHandover = viper.GetBool(K8sEventHandover)
c.K8sWatcherQueueSize = uint(viper.GetInt(K8sWatcherQueueSize))
c.K8sWatcherEndpointSelector = viper.GetString(K8sWatcherEndpointSelector)
c.KeepConfig = viper.GetBool(KeepConfig)
c.KVStore = viper.GetString(KVStore)
c.KVstoreLeaseTTL = viper.GetDuration(KVstoreLeaseTTL)
c.KVstoreKeepAliveInterval = c.KVstoreLeaseTTL / defaults.KVstoreKeepAliveIntervalFactor
c.KVstorePeriodicSync = viper.GetDuration(KVstorePeriodicSync)
c.KVstoreConnectivityTimeout = viper.GetDuration(KVstoreConnectivityTimeout)
c.IPAllocationTimeout = viper.GetDuration(IPAllocationTimeout)
c.LabelPrefixFile = viper.GetString(LabelPrefixFile)
c.Labels = viper.GetStringSlice(Labels)
c.LibDir = viper.GetString(LibDir)
c.LogDriver = viper.GetStringSlice(LogDriver)
c.LogSystemLoadConfig = viper.GetBool(LogSystemLoadConfigName)
c.Logstash = viper.GetBool(Logstash)
c.LoopbackIPv4 = viper.GetString(LoopbackIPv4)
c.Masquerade = viper.GetBool(Masquerade)
c.EnableBPFMasquerade = viper.GetBool(EnableBPFMasquerade)
c.EnableBPFClockProbe = viper.GetBool(EnableBPFClockProbe)
c.EnableIPMasqAgent = viper.GetBool(EnableIPMasqAgent)
c.IPMasqAgentConfigPath = viper.GetString(IPMasqAgentConfigPath)
c.InstallIptRules = viper.GetBool(InstallIptRules)
c.IPTablesLockTimeout = viper.GetDuration(IPTablesLockTimeout)
c.IPSecKeyFile = viper.GetString(IPSecKeyFileName)
c.ModePreFilter = viper.GetString(PrefilterMode)
c.MonitorAggregation = viper.GetString(MonitorAggregationName)
c.MonitorAggregationInterval = viper.GetDuration(MonitorAggregationInterval)
c.MonitorQueueSize = viper.GetInt(MonitorQueueSizeName)
c.MTU = viper.GetInt(MTUName)
c.NAT46Range = viper.GetString(NAT46Range)
c.FlannelMasterDevice = viper.GetString(FlannelMasterDevice)
c.FlannelUninstallOnExit = viper.GetBool(FlannelUninstallOnExit)
c.PProf = viper.GetBool(PProf)
c.PreAllocateMaps = viper.GetBool(PreAllocateMapsName)
c.PrependIptablesChains = viper.GetBool(PrependIptablesChainsName)
c.PrometheusServeAddr = getPrometheusServerAddr()
c.ProxyConnectTimeout = viper.GetInt(ProxyConnectTimeout)
c.BlacklistConflictingRoutes = viper.GetBool(BlacklistConflictingRoutes)
c.ReadCNIConfiguration = viper.GetString(ReadCNIConfiguration)
c.RestoreState = viper.GetBool(Restore)
c.RunDir = viper.GetString(StateDir)
c.SidecarIstioProxyImage = viper.GetString(SidecarIstioProxyImage)
c.UseSingleClusterRoute = viper.GetBool(SingleClusterRouteName)
c.SocketPath = viper.GetString(SocketPath)
c.SockopsEnable = viper.GetBool(SockopsEnableName)
c.TracePayloadlen = viper.GetInt(TracePayloadlen)
c.Tunnel = viper.GetString(TunnelName)
c.Version = viper.GetString(Version)
c.WriteCNIConfigurationWhenReady = viper.GetString(WriteCNIConfigurationWhenReady)
c.PolicyTriggerInterval = viper.GetDuration(PolicyTriggerInterval)
c.CTMapEntriesTimeoutTCP = viper.GetDuration(CTMapEntriesTimeoutTCPName)
c.CTMapEntriesTimeoutAny = viper.GetDuration(CTMapEntriesTimeoutAnyName)
c.CTMapEntriesTimeoutSVCTCP = viper.GetDuration(CTMapEntriesTimeoutSVCTCPName)
c.CTMapEntriesTimeoutSVCAny = viper.GetDuration(CTMapEntriesTimeoutSVCAnyName)
c.CTMapEntriesTimeoutSYN = viper.GetDuration(CTMapEntriesTimeoutSYNName)
c.CTMapEntriesTimeoutFIN = viper.GetDuration(CTMapEntriesTimeoutFINName)
c.PolicyAuditMode = viper.GetBool(PolicyAuditModeArg)
c.EnableIPv4FragmentsTracking = viper.GetBool(EnableIPv4FragmentsTrackingName)
c.FragmentsMapEntries = viper.GetInt(FragmentsMapEntriesName)
}
var (
// Config represents the daemon configuration
Config = &DaemonConfig{}
)
// InitConfig reads in config file and ENV variables if set.
func InitConfig(configName string) func() {
return func() {
Config.ConfigFile = viper.GetString(ConfigFile) // enable ability to specify config file via flag
Config.ConfigDir = viper.GetString(ConfigDir)
viper.SetEnvPrefix("cilium")
// INFO: 启动时候用的 --config-dir=/tmp/cilium/config-map, 每一个文件名 filename 是 key,文件内容是 value
if Config.ConfigDir != "" {
if _, err := os.Stat(Config.ConfigDir); os.IsNotExist(err) {
log.Fatalf("Non-existent configuration directory %s", Config.ConfigDir)
}
if m, err := ReadDirConfig(Config.ConfigDir); err != nil {
log.Fatalf("Unable to read configuration directory %s: %s", Config.ConfigDir, err)
} else {
err := MergeConfig(m)
if err != nil {
log.Fatalf("Unable to merge configuration: %s", err)
}
}
}
if Config.ConfigFile != "" {
viper.SetConfigFile(Config.Config
|
File)
} else {
viper.SetConfigName(configN
|
conditional_block
|
|
config.go
|
Port enables k8s Pod's hostPort mapping through BPF
EnableHostPort bool
// NodePortMode indicates in which mode NodePort implementation should run
// ("snat", "dsr" or "hybrid")
NodePortMode string
// NodePortAcceleration indicates whether NodePort should be accelerated
// via XDP ("none", "generic" or "native")
NodePortAcceleration string
// NodePortHairpin indicates whether the setup is a one-legged LB
NodePortHairpin bool
// NodePortBindProtection rejects bind requests to NodePort service ports
NodePortBindProtection bool
// EnableAutoProtectNodePortRange enables appending NodePort range to
// net.ipv4.ip_local_reserved_ports if it overlaps with ephemeral port
// range (net.ipv4.ip_local_port_range)
EnableAutoProtectNodePortRange bool
// KubeProxyReplacement controls how to enable kube-proxy replacement
// features in BPF datapath
KubeProxyReplacement string
// EnableExternalIPs enables implementation of k8s services with externalIPs in datapath
EnableExternalIPs bool
// NodePortMin is the minimum port address for the NodePort range
NodePortMin int
// NodePortMax is the maximum port address for the NodePort range
NodePortMax int
// EnableSessionAffinity enables a support for service sessionAffinity
EnableSessionAffinity bool
// K8sEnableEndpointSlice enables k8s endpoint slice feature that is used
// in kubernetes.
K8sEnableK8sEndpointSlice bool
////////////////////////////// CNI //////////////////////////
// EnableEndpointRoutes enables use of per endpoint routes
EnableEndpointRoutes bool
Devices []string // bpf_host device
////////////////////////////// BPF //////////////////////////
BpfDir string // BPF template files directory
// EnableSockOps specifies whether to enable sockops (socket lookup).
SockopsEnable bool // socket bpf
// BPFCompilationDebug specifies whether to compile BPF programs compilation
// debugging enabled.
BPFCompilationDebug bool
InstallIptRules bool
}
func (c *DaemonConfig) Populate()
|
c.DirectRoutingDevice = viper.GetString(DirectRoutingDevice)
c.DisableConntrack = viper.GetBool(DisableConntrack)
c.EnableIPv4 = getIPv4Enabled()
c.EnableIPv6 = viper.GetBool(EnableIPv6Name)
c.EnableIPSec = viper.GetBool(EnableIPSecName)
c.EnableWellKnownIdentities = viper.GetBool(EnableWellKnownIdentities)
c.EndpointInterfaceNamePrefix = viper.GetString(EndpointInterfaceNamePrefix)
c.DevicePreFilter = viper.GetString(PrefilterDevice)
c.DisableCiliumEndpointCRD = viper.GetBool(DisableCiliumEndpointCRDName)
c.DisableK8sServices = viper.GetBool(DisableK8sServices)
c.EgressMasqueradeInterfaces = viper.GetString(EgressMasqueradeInterfaces)
c.EnableHostReachableServices = viper.GetBool(EnableHostReachableServices)
c.EnableRemoteNodeIdentity = viper.GetBool(EnableRemoteNodeIdentity)
c.K8sHeartbeatTimeout = viper.GetDuration(K8sHeartbeatTimeout)
c.EnableXTSocketFallback = viper.GetBool(EnableXTSocketFallbackName)
c.EnableAutoDirectRouting = viper.GetBool(EnableAutoDirectRoutingName)
c.EnableEndpointRoutes = viper.GetBool(EnableEndpointRoutes)
c.EnableHealthChecking = viper.GetBool(EnableHealthChecking)
c.EnableEndpointHealthChecking = viper.GetBool(EnableEndpointHealthChecking)
c.EnableLocalNodeRoute = viper.GetBool(EnableLocalNodeRoute)
c.EnablePolicy = strings.ToLower(viper.GetString(EnablePolicy))
c.EnableExternalIPs = viper.GetBool(EnableExternalIPs)
c.EnableL7Proxy = viper.GetBool(EnableL7Proxy)
c.EnableTracing = viper.GetBool(EnableTracing)
c.EnableNodePort = viper.GetBool(EnableNodePort)
c.EnableHostPort = viper.GetBool(EnableHostPort)
c.NodePortMode = viper.GetString(NodePortMode)
c.NodePortAcceleration = viper.GetString(NodePortAcceleration)
c.NodePortBindProtection = viper.GetBool(NodePortBindProtection)
c.EnableAutoProtectNodePortRange = viper.GetBool(EnableAutoProtectNodePortRange)
c.KubeProxyReplacement = viper.GetString(KubeProxyReplacement)
c.EnableSessionAffinity = viper.GetBool(EnableSessionAffinity)
c.EnableHostFirewall = viper.GetBool(EnableHostFirewall)
c.EncryptInterface = viper.GetString(EncryptInterface)
c.EncryptNode = viper.GetBool(EncryptNode)
c.EnvoyLogPath = viper.GetString(EnvoyLog)
c.ForceLocalPolicyEvalAtSource = viper.GetBool(ForceLocalPolicyEvalAtSource)
c.HostDevice = getHostDevice()
c.HTTPIdleTimeout = viper.GetInt(HTTPIdleTimeout)
c.HTTPMaxGRPCTimeout = viper.GetInt(HTTPMaxGRPCTimeout)
c.HTTPRequestTimeout = viper.GetInt(HTTPRequestTimeout)
c.HTTPRetryCount = viper.GetInt(HTTPRetryCount)
c.HTTPRetryTimeout = viper.GetInt(HTTPRetryTimeout)
c.IdentityChangeGracePeriod = viper.GetDuration(IdentityChangeGracePeriod)
c.IPAM = viper.GetString(IPAM)
c.IPv4Range = viper.GetString(IPv4Range)
c.IPv4NodeAddr = viper.GetString(IPv4NodeAddr)
c.IPv4ServiceRange = viper.GetString(IPv4ServiceRange)
c.IPv6ClusterAllocCIDR = viper.GetString(IPv6ClusterAllocCIDRName)
c.IPv6NodeAddr = viper.GetString(IPv6NodeAddr)
c.IPv6Range = viper.GetString(IPv6Range)
c.IPv6ServiceRange = viper.GetString(IPv6ServiceRange)
c.K8sAPIServer = viper.GetString(K8sAPIServer)
c.K8sClientBurst = viper.GetInt(K8sClientBurst)
c.K8sClientQPSLimit = viper.GetFloat64(K8sClientQPSLimit)
c.K8sEnableK8sEndpointSlice = viper.GetBool(K8sEnableEndpointSlice)
c.k8sEnableAPIDiscovery = viper.GetBool(K8sEnableAPIDiscovery)
c.K8sKubeConfigPath = viper.GetString(K8sKubeConfigPath)
c.K8sRequireIPv4PodCIDR = viper.GetBool(K8sRequireIPv4PodCIDRName)
c.K8sRequireIPv6PodCIDR = viper.GetBool(K8sRequireIPv6PodCIDRName)
c.K8sServiceCacheSize = uint(viper.GetInt(K8sServiceCacheSize))
c.K8sForceJSONPatch = viper.GetBool(K8sForceJSONPatch)
c.K8sEventHandover = viper.GetBool(K8sEventHandover)
c.K8sWatcherQueueSize = uint(viper.GetInt(K8sWatcherQueueSize))
c.K8sWatcherEndpointSelector = viper.GetString(K8sWatcherEndpointSelector)
c.KeepConfig = viper.GetBool(KeepConfig)
c.KVStore = viper.GetString(KVStore)
c.KVstoreLeaseTTL = viper.GetDuration(KVstoreLeaseTTL)
c.KVstoreKeepAliveInterval = c.KVstoreLeaseTTL / defaults.KVstoreKeepAliveIntervalFactor
c.KVstorePeriodicSync = viper.GetDuration(KVstorePeriodicSync)
c.KVstoreConnectivityTimeout = viper.GetDuration(KVstoreConnectivityTimeout)
c.IPAllocationTimeout = viper.GetDuration(IPAllocationTimeout)
c.LabelPrefixFile = viper.GetString(LabelPrefixFile)
c.Labels = viper.GetStringSlice(Labels)
c.LibDir = viper.GetString(LibDir)
c.LogDriver = viper.GetStringSlice(LogDriver)
c.LogSystemLoadConfig = viper.GetBool(LogSystemLoadConfigName)
c.Logstash = viper.GetBool(Logstash)
c.LoopbackIPv4 = viper.GetString(LoopbackIPv4)
c.Masquerade = viper.GetBool(Masquerade)
c.EnableBPFMasquerade = viper.GetBool(EnableBPFMasquerade)
c.EnableBPFClockProbe = viper.GetBool(EnableBPFClockProbe)
c.EnableIPMasqAgent = viper.GetBool(EnableIPMasqAgent)
c.IPMasqAgentConfigPath = viper.GetString(IPMasqAgentConfigPath)
c.InstallIptRules = viper.GetBool(InstallIptRules)
c.IPTablesLockTimeout = viper.GetDuration(IPTablesLockTimeout)
c.IPSecKeyFile = viper.GetString(IPSecKeyFileName)
c.ModePreFilter = viper.GetString(PrefilterMode)
c.MonitorAggregation
|
{
c.LibDir = viper.GetString(LibDir)
c.BpfDir = filepath.Join(c.LibDir, defaults.BpfDir)
c.AgentHealthPort = viper.GetInt(AgentHealthPort)
c.AgentLabels = viper.GetStringSlice(AgentLabels)
c.AllowICMPFragNeeded = viper.GetBool(AllowICMPFragNeeded)
c.AllowLocalhost = viper.GetString(AllowLocalhost)
c.AnnotateK8sNode = viper.GetBool(AnnotateK8sNode)
c.AutoCreateCiliumNodeResource = viper.GetBool(AutoCreateCiliumNodeResource)
c.BPFCompilationDebug = viper.GetBool(BPFCompileDebugName)
c.BPFRoot = viper.GetString(BPFRoot)
c.CertDirectory = viper.GetString(CertsDirectory)
c.CGroupRoot = viper.GetString(CGroupRoot)
c.ClusterID = viper.GetInt(ClusterIDName)
c.ClusterName = viper.GetString(ClusterName)
c.ClusterMeshConfig = viper.GetString(ClusterMeshConfigName)
c.DatapathMode = viper.GetString(DatapathMode)
c.Debug = viper.GetBool(DebugArg)
c.DebugVerbose = viper.GetStringSlice(DebugVerbose)
|
identifier_body
|
config.go
|
= viper.GetBool(EnableExternalIPs)
c.EnableL7Proxy = viper.GetBool(EnableL7Proxy)
c.EnableTracing = viper.GetBool(EnableTracing)
c.EnableNodePort = viper.GetBool(EnableNodePort)
c.EnableHostPort = viper.GetBool(EnableHostPort)
c.NodePortMode = viper.GetString(NodePortMode)
c.NodePortAcceleration = viper.GetString(NodePortAcceleration)
c.NodePortBindProtection = viper.GetBool(NodePortBindProtection)
c.EnableAutoProtectNodePortRange = viper.GetBool(EnableAutoProtectNodePortRange)
c.KubeProxyReplacement = viper.GetString(KubeProxyReplacement)
c.EnableSessionAffinity = viper.GetBool(EnableSessionAffinity)
c.EnableHostFirewall = viper.GetBool(EnableHostFirewall)
c.EncryptInterface = viper.GetString(EncryptInterface)
c.EncryptNode = viper.GetBool(EncryptNode)
c.EnvoyLogPath = viper.GetString(EnvoyLog)
c.ForceLocalPolicyEvalAtSource = viper.GetBool(ForceLocalPolicyEvalAtSource)
c.HostDevice = getHostDevice()
c.HTTPIdleTimeout = viper.GetInt(HTTPIdleTimeout)
c.HTTPMaxGRPCTimeout = viper.GetInt(HTTPMaxGRPCTimeout)
c.HTTPRequestTimeout = viper.GetInt(HTTPRequestTimeout)
c.HTTPRetryCount = viper.GetInt(HTTPRetryCount)
c.HTTPRetryTimeout = viper.GetInt(HTTPRetryTimeout)
c.IdentityChangeGracePeriod = viper.GetDuration(IdentityChangeGracePeriod)
c.IPAM = viper.GetString(IPAM)
c.IPv4Range = viper.GetString(IPv4Range)
c.IPv4NodeAddr = viper.GetString(IPv4NodeAddr)
c.IPv4ServiceRange = viper.GetString(IPv4ServiceRange)
c.IPv6ClusterAllocCIDR = viper.GetString(IPv6ClusterAllocCIDRName)
c.IPv6NodeAddr = viper.GetString(IPv6NodeAddr)
c.IPv6Range = viper.GetString(IPv6Range)
c.IPv6ServiceRange = viper.GetString(IPv6ServiceRange)
c.K8sAPIServer = viper.GetString(K8sAPIServer)
c.K8sClientBurst = viper.GetInt(K8sClientBurst)
c.K8sClientQPSLimit = viper.GetFloat64(K8sClientQPSLimit)
c.K8sEnableK8sEndpointSlice = viper.GetBool(K8sEnableEndpointSlice)
c.k8sEnableAPIDiscovery = viper.GetBool(K8sEnableAPIDiscovery)
c.K8sKubeConfigPath = viper.GetString(K8sKubeConfigPath)
c.K8sRequireIPv4PodCIDR = viper.GetBool(K8sRequireIPv4PodCIDRName)
c.K8sRequireIPv6PodCIDR = viper.GetBool(K8sRequireIPv6PodCIDRName)
c.K8sServiceCacheSize = uint(viper.GetInt(K8sServiceCacheSize))
c.K8sForceJSONPatch = viper.GetBool(K8sForceJSONPatch)
c.K8sEventHandover = viper.GetBool(K8sEventHandover)
c.K8sWatcherQueueSize = uint(viper.GetInt(K8sWatcherQueueSize))
c.K8sWatcherEndpointSelector = viper.GetString(K8sWatcherEndpointSelector)
c.KeepConfig = viper.GetBool(KeepConfig)
c.KVStore = viper.GetString(KVStore)
c.KVstoreLeaseTTL = viper.GetDuration(KVstoreLeaseTTL)
c.KVstoreKeepAliveInterval = c.KVstoreLeaseTTL / defaults.KVstoreKeepAliveIntervalFactor
c.KVstorePeriodicSync = viper.GetDuration(KVstorePeriodicSync)
c.KVstoreConnectivityTimeout = viper.GetDuration(KVstoreConnectivityTimeout)
c.IPAllocationTimeout = viper.GetDuration(IPAllocationTimeout)
c.LabelPrefixFile = viper.GetString(LabelPrefixFile)
c.Labels = viper.GetStringSlice(Labels)
c.LibDir = viper.GetString(LibDir)
c.LogDriver = viper.GetStringSlice(LogDriver)
c.LogSystemLoadConfig = viper.GetBool(LogSystemLoadConfigName)
c.Logstash = viper.GetBool(Logstash)
c.LoopbackIPv4 = viper.GetString(LoopbackIPv4)
c.Masquerade = viper.GetBool(Masquerade)
c.EnableBPFMasquerade = viper.GetBool(EnableBPFMasquerade)
c.EnableBPFClockProbe = viper.GetBool(EnableBPFClockProbe)
c.EnableIPMasqAgent = viper.GetBool(EnableIPMasqAgent)
c.IPMasqAgentConfigPath = viper.GetString(IPMasqAgentConfigPath)
c.InstallIptRules = viper.GetBool(InstallIptRules)
c.IPTablesLockTimeout = viper.GetDuration(IPTablesLockTimeout)
c.IPSecKeyFile = viper.GetString(IPSecKeyFileName)
c.ModePreFilter = viper.GetString(PrefilterMode)
c.MonitorAggregation = viper.GetString(MonitorAggregationName)
c.MonitorAggregationInterval = viper.GetDuration(MonitorAggregationInterval)
c.MonitorQueueSize = viper.GetInt(MonitorQueueSizeName)
c.MTU = viper.GetInt(MTUName)
c.NAT46Range = viper.GetString(NAT46Range)
c.FlannelMasterDevice = viper.GetString(FlannelMasterDevice)
c.FlannelUninstallOnExit = viper.GetBool(FlannelUninstallOnExit)
c.PProf = viper.GetBool(PProf)
c.PreAllocateMaps = viper.GetBool(PreAllocateMapsName)
c.PrependIptablesChains = viper.GetBool(PrependIptablesChainsName)
c.PrometheusServeAddr = getPrometheusServerAddr()
c.ProxyConnectTimeout = viper.GetInt(ProxyConnectTimeout)
c.BlacklistConflictingRoutes = viper.GetBool(BlacklistConflictingRoutes)
c.ReadCNIConfiguration = viper.GetString(ReadCNIConfiguration)
c.RestoreState = viper.GetBool(Restore)
c.RunDir = viper.GetString(StateDir)
c.SidecarIstioProxyImage = viper.GetString(SidecarIstioProxyImage)
c.UseSingleClusterRoute = viper.GetBool(SingleClusterRouteName)
c.SocketPath = viper.GetString(SocketPath)
c.SockopsEnable = viper.GetBool(SockopsEnableName)
c.TracePayloadlen = viper.GetInt(TracePayloadlen)
c.Tunnel = viper.GetString(TunnelName)
c.Version = viper.GetString(Version)
c.WriteCNIConfigurationWhenReady = viper.GetString(WriteCNIConfigurationWhenReady)
c.PolicyTriggerInterval = viper.GetDuration(PolicyTriggerInterval)
c.CTMapEntriesTimeoutTCP = viper.GetDuration(CTMapEntriesTimeoutTCPName)
c.CTMapEntriesTimeoutAny = viper.GetDuration(CTMapEntriesTimeoutAnyName)
c.CTMapEntriesTimeoutSVCTCP = viper.GetDuration(CTMapEntriesTimeoutSVCTCPName)
c.CTMapEntriesTimeoutSVCAny = viper.GetDuration(CTMapEntriesTimeoutSVCAnyName)
c.CTMapEntriesTimeoutSYN = viper.GetDuration(CTMapEntriesTimeoutSYNName)
c.CTMapEntriesTimeoutFIN = viper.GetDuration(CTMapEntriesTimeoutFINName)
c.PolicyAuditMode = viper.GetBool(PolicyAuditModeArg)
c.EnableIPv4FragmentsTracking = viper.GetBool(EnableIPv4FragmentsTrackingName)
c.FragmentsMapEntries = viper.GetInt(FragmentsMapEntriesName)
}
var (
// Config represents the daemon configuration
Config = &DaemonConfig{}
)
// InitConfig reads in config file and ENV variables if set.
func InitConfig(configName string) func() {
return func() {
Config.ConfigFile = viper.GetString(ConfigFile) // enable ability to specify config file via flag
Config.ConfigDir = viper.GetString(ConfigDir)
viper.SetEnvPrefix("cilium")
// INFO: 启动时候用的 --config-dir=/tmp/cilium/config-map, 每一个文件名 filename 是 key,文件内容是 value
if Config.ConfigDir != "" {
if _, err := os.Stat(Config.ConfigDir); os.IsNotExist(err) {
log.Fatalf("Non-existent configuration directory %s", Config.ConfigDir)
}
if m, err := ReadDirConfig(Config.ConfigDir); err != nil {
log.Fatalf("Unable to read configuration directory %s: %s", Config.ConfigDir, err)
} else {
err := MergeConfig(m)
if err != nil {
log.Fatalf("Unable to merge configuration: %s", err)
}
}
}
if Config.ConfigFile != "" {
viper.SetConfigFile(Config.ConfigFile)
} else {
viper.SetConfigName(configName) // name of config file (without extension)
viper.AddConfigPath("$HOME") // adding home directory as first search path
}
// If a config file is found, read it in.
if err := viper.ReadInConfig(); err == nil {
log.WithField(logfields.Path, viper.ConfigFileUsed()).
Info("Using config from file")
} else if Config.ConfigFile != "" {
log.WithField(logfields.Path, Config.ConfigFile).
Fatal("Error reading config file")
} else {
log.WithField(logfields.Reason, err).Info("Skipped reading configuration file")
}
}
}
// MergeConfig merges the given configuration map with viper's configuration.
func MergeConfig(m map[string]interface{}) error {
err := viper.MergeConfigMap(m)
if err != nil {
return fmt.Errorf("unable to read merge directory configuration: %s", err)
}
return nil
}
// ReadDirConfig reads the given directory and returns a map that maps the
// filename to the contents of that file.
func ReadDirConfig(dirName string) (map[str
|
ing]interface
|
identifier_name
|
|
extract_patch.py
|
(evals, evecs) = np.linalg.eig(M2x2)
l1, l2 = evals
v1, v2 = evecs
return l1, l2, v1, v2
#-----------------------
# INPUT
#-----------------------
# We will call perdoch's invA = invV
print('--------------------------------')
print('Let V = Perdoch.A')
print('Let Z = Perdoch.E')
print('--------------------------------')
print('Input from Perdoch\'s detector: ')
# We are given the keypoint in invA format
(x, y, ia11, ia21, ia22), ia12 = kp, 0
invV = np.array([[ia11, ia12],
[ia21, ia22]])
V = np.linalg.inv(invV)
# <HACK>
#invV = V / np.linalg.det(V)
#V = np.linalg.inv(V)
# </HACK>
Z = (V.T).dot(V)
print('invV is a transform from points on a unit-circle to the ellipse')
helpers.horiz_print('invV = ', invV)
print('--------------------------------')
print('V is a transformation from points on the ellipse to a unit circle')
helpers.horiz_print('V = ', V)
print('--------------------------------')
print('Points on a matrix satisfy (x).T.dot(Z).dot(x) = 1')
print('where Z = (V.T).dot(V)')
helpers.horiz_print('Z = ', Z)
# Define points on a unit circle
theta_list = np.linspace(0, tau, 50)
cicrle_pts = np.array([(np.cos(t), np.sin(t)) for t in theta_list])
# Transform those points to the ellipse using invV
ellipse_pts1 = invV.dot(cicrle_pts.T).T
# Transform those points to the ellipse using V
ellipse_pts2 = V.dot(cicrle_pts.T).T
#Lets check our assertion: (x_).T.dot(Z).dot(x_) = 1
checks1 = [x_.T.dot(Z).dot(x_) for x_ in ellipse_pts1]
checks2 = [x_.T.dot(Z).dot(x_) for x_ in ellipse_pts2]
assert all([abs(1 - check) < 1E-11 for check in checks1])
#assert all([abs(1 - check) < 1E-11 for check in checks2])
print('... all of our plotted points satisfy this')
#=======================
# THE CONIC SECTION
#=======================
# All of this was from the Perdoch paper, now lets move into conic sections
# We will use the notation from wikipedia
# http://en.wikipedia.org/wiki/Conic_section
# http://en.wikipedia.org/wiki/Matrix_representation_of_conic_sections
#-----------------------
# MATRIX REPRESENTATION
#-----------------------
# The matrix representation of a conic is:
(A, B2, B2_, C) = Z.flatten()
(D, E, F) = (0, 0, 1)
B = B2 * 2
assert B2 == B2_, 'matrix should by symmetric'
print('--------------------------------')
print('Now, using wikipedia\' matrix representation of a conic.')
con = np.array(((' A', 'B / 2', 'D / 2'),
('B / 2', ' C', 'E / 2'),
('D / 2', 'E / 2', ' F')))
helpers.horiz_print('A matrix A_Q = ', con)
# A_Q is our conic section (aka ellipse matrix)
A_Q = np.array((( A, B / 2, D / 2),
(B / 2, C, E / 2),
(D / 2, E / 2, F)))
helpers.horiz_print('A_Q = ', A_Q)
#-----------------------
# DEGENERATE CONICS
#-----------------------
print('----------------------------------')
print('As long as det(A_Q) != it is not degenerate.')
print('If the conic is not degenerate, we can use the 2x2 minor: A_33')
print('det(A_Q) = %s' % str(np.linalg.det(A_Q)))
assert np.linalg.det(A_Q) != 0, 'degenerate conic'
A_33 = np.array((( A, B / 2),
(B / 2, C)))
helpers.horiz_print('A_33 = ', A_33)
#-----------------------
# CONIC CLASSIFICATION
#-----------------------
print('----------------------------------')
print('The determinant of the minor classifies the type of conic it is')
print('(det == 0): parabola, (det < 0): hyperbola, (det > 0): ellipse')
print('det(A_33) = %s' % str(np.linalg.det(A_33)))
assert np.linalg.det(A_33) > 0, 'conic is not an ellipse'
print('... this is indeed an ellipse')
#-----------------------
# CONIC CENTER
#-----------------------
print('----------------------------------')
print('the centers of the ellipse are obtained by: ')
print('x_center = (B * E - (2 * C * D)) / (4 * A * C - B ** 2)')
print('y_center = (D * B - (2 * A * E)) / (4 * A * C - B ** 2)')
# Centers are obtained by solving for where the gradient of the quadratic
# becomes 0. Without going through the derivation the calculation is...
# These should be 0, 0 if we are at the origin, or our original x, y
# coordinate specified by the keypoints. I'm doing the calculation just for
# shits and giggles
x_center = (B * E - (2 * C * D)) / (4 * A * C - B ** 2)
y_center = (D * B - (2 * A * E)) / (4 * A * C - B ** 2)
helpers.horiz_print('x_center = ', x_center)
helpers.horiz_print('y_center = ', y_center)
#-----------------------
# MAJOR AND MINOR AXES
#-----------------------
# Now we are going to determine the major and minor axis
# of this beast. It just the center augmented by the eigenvecs
print('----------------------------------')
# The angle between the major axis and our x axis is:
l1, l2, v1, v2 = _2x2_eig(A_33)
x_axis = np.array([1, 0])
theta = np.arccos(x_axis.dot(v1))
# The eccentricity is determined by:
nu = 1
numer = 2 * np.sqrt((A - C) ** 2 + B ** 2)
denom = nu * (A + C) + np.sqrt((A - C) ** 2 + B ** 2)
eccentricity = np.sqrt(numer / denom)
from scipy.special import ellipeinc
#-----------------------
# DRAWING
#-----------------------
# Lets start off by drawing the ellipse that we are goign to work with
# Create unit circle sample
# Draw the keypoint using the tried and true df2
# Other things should subsiquently align
df2.draw_kpts2(np.array([(0, 0, ia11, ia21, ia22)]), ell_linewidth=4,
ell_color=df2.DEEP_PINK, ell_alpha=1, arrow=True, rect=True)
# Plot ellipse points
_plotpts(ellipse_pts1, 0, df2.YELLOW, label='invV.dot(cicrle_pts.T).T')
# Plot ellipse axis
# !HELP! I DO NOT KNOW WHY I HAVE TO DIVIDE, SQUARE ROOT, AND NEGATE!!!
l1, l2, v1, v2 = _2x2_eig(A_33)
dx1, dy1 = (v1 / np.sqrt(l1))
dx2, dy2 = (v2 / np.sqrt(l2))
_plotarrow(0, 0, dx1, -dy1, color=df2.ORANGE, label='ellipse axis')
_plotarrow(0, 0, dx2, -dy2, color=df2.ORANGE)
# Plot ellipse orientation
orient_axis = invV.dot(np.eye(2))
dx1, dx2, dy1, dy2 = orient_axis.flatten()
_plotarrow(0, 0, dx1, dy1, color=df2.BLUE, label='ellipse rotation')
_plotarrow(0, 0, dx2, dy2, color=df2.BLUE)
df
|
arrow.set_facecolor(color)
ax.add_patch(arrow)
df2.update()
def _2x2_eig(M2x2):
|
random_line_split
|
|
extract_patch.py
|
invV = ', invV)
print('--------------------------------')
print('V is a transformation from points on the ellipse to a unit circle')
helpers.horiz_print('V = ', V)
print('--------------------------------')
print('Points on a matrix satisfy (x).T.dot(Z).dot(x) = 1')
print('where Z = (V.T).dot(V)')
helpers.horiz_print('Z = ', Z)
# Define points on a unit circle
theta_list = np.linspace(0, tau, 50)
cicrle_pts = np.array([(np.cos(t), np.sin(t)) for t in theta_list])
# Transform those points to the ellipse using invV
ellipse_pts1 = invV.dot(cicrle_pts.T).T
# Transform those points to the ellipse using V
ellipse_pts2 = V.dot(cicrle_pts.T).T
#Lets check our assertion: (x_).T.dot(Z).dot(x_) = 1
checks1 = [x_.T.dot(Z).dot(x_) for x_ in ellipse_pts1]
checks2 = [x_.T.dot(Z).dot(x_) for x_ in ellipse_pts2]
assert all([abs(1 - check) < 1E-11 for check in checks1])
#assert all([abs(1 - check) < 1E-11 for check in checks2])
print('... all of our plotted points satisfy this')
#=======================
# THE CONIC SECTION
#=======================
# All of this was from the Perdoch paper, now lets move into conic sections
# We will use the notation from wikipedia
# http://en.wikipedia.org/wiki/Conic_section
# http://en.wikipedia.org/wiki/Matrix_representation_of_conic_sections
#-----------------------
# MATRIX REPRESENTATION
#-----------------------
# The matrix representation of a conic is:
(A, B2, B2_, C) = Z.flatten()
(D, E, F) = (0, 0, 1)
B = B2 * 2
assert B2 == B2_, 'matrix should by symmetric'
print('--------------------------------')
print('Now, using wikipedia\' matrix representation of a conic.')
con = np.array(((' A', 'B / 2', 'D / 2'),
('B / 2', ' C', 'E / 2'),
('D / 2', 'E / 2', ' F')))
helpers.horiz_print('A matrix A_Q = ', con)
# A_Q is our conic section (aka ellipse matrix)
A_Q = np.array((( A, B / 2, D / 2),
(B / 2, C, E / 2),
(D / 2, E / 2, F)))
helpers.horiz_print('A_Q = ', A_Q)
#-----------------------
# DEGENERATE CONICS
#-----------------------
print('----------------------------------')
print('As long as det(A_Q) != it is not degenerate.')
print('If the conic is not degenerate, we can use the 2x2 minor: A_33')
print('det(A_Q) = %s' % str(np.linalg.det(A_Q)))
assert np.linalg.det(A_Q) != 0, 'degenerate conic'
A_33 = np.array((( A, B / 2),
(B / 2, C)))
helpers.horiz_print('A_33 = ', A_33)
#-----------------------
# CONIC CLASSIFICATION
#-----------------------
print('----------------------------------')
print('The determinant of the minor classifies the type of conic it is')
print('(det == 0): parabola, (det < 0): hyperbola, (det > 0): ellipse')
print('det(A_33) = %s' % str(np.linalg.det(A_33)))
assert np.linalg.det(A_33) > 0, 'conic is not an ellipse'
print('... this is indeed an ellipse')
#-----------------------
# CONIC CENTER
#-----------------------
print('----------------------------------')
print('the centers of the ellipse are obtained by: ')
print('x_center = (B * E - (2 * C * D)) / (4 * A * C - B ** 2)')
print('y_center = (D * B - (2 * A * E)) / (4 * A * C - B ** 2)')
# Centers are obtained by solving for where the gradient of the quadratic
# becomes 0. Without going through the derivation the calculation is...
# These should be 0, 0 if we are at the origin, or our original x, y
# coordinate specified by the keypoints. I'm doing the calculation just for
# shits and giggles
x_center = (B * E - (2 * C * D)) / (4 * A * C - B ** 2)
y_center = (D * B - (2 * A * E)) / (4 * A * C - B ** 2)
helpers.horiz_print('x_center = ', x_center)
helpers.horiz_print('y_center = ', y_center)
#-----------------------
# MAJOR AND MINOR AXES
#-----------------------
# Now we are going to determine the major and minor axis
# of this beast. It just the center augmented by the eigenvecs
print('----------------------------------')
# The angle between the major axis and our x axis is:
l1, l2, v1, v2 = _2x2_eig(A_33)
x_axis = np.array([1, 0])
theta = np.arccos(x_axis.dot(v1))
# The eccentricity is determined by:
nu = 1
numer = 2 * np.sqrt((A - C) ** 2 + B ** 2)
denom = nu * (A + C) + np.sqrt((A - C) ** 2 + B ** 2)
eccentricity = np.sqrt(numer / denom)
from scipy.special import ellipeinc
#-----------------------
# DRAWING
#-----------------------
# Lets start off by drawing the ellipse that we are goign to work with
# Create unit circle sample
# Draw the keypoint using the tried and true df2
# Other things should subsiquently align
df2.draw_kpts2(np.array([(0, 0, ia11, ia21, ia22)]), ell_linewidth=4,
ell_color=df2.DEEP_PINK, ell_alpha=1, arrow=True, rect=True)
# Plot ellipse points
_plotpts(ellipse_pts1, 0, df2.YELLOW, label='invV.dot(cicrle_pts.T).T')
# Plot ellipse axis
# !HELP! I DO NOT KNOW WHY I HAVE TO DIVIDE, SQUARE ROOT, AND NEGATE!!!
l1, l2, v1, v2 = _2x2_eig(A_33)
dx1, dy1 = (v1 / np.sqrt(l1))
dx2, dy2 = (v2 / np.sqrt(l2))
_plotarrow(0, 0, dx1, -dy1, color=df2.ORANGE, label='ellipse axis')
_plotarrow(0, 0, dx2, -dy2, color=df2.ORANGE)
# Plot ellipse orientation
orient_axis = invV.dot(np.eye(2))
dx1, dx2, dy1, dy2 = orient_axis.flatten()
_plotarrow(0, 0, dx1, dy1, color=df2.BLUE, label='ellipse rotation')
_plotarrow(0, 0, dx2, dy2, color=df2.BLUE)
df2.legend()
df2.dark_background()
df2.gca().invert_yaxis()
return locals()
# Algebraic form of connic
#assert (a * (x ** 2)) + (b * (x * y)) + (c * (y ** 2)) + (d * x) + (e * y) + (f) == 0
def get_kp_border(rchip, kp):
np.set_printoptions(precision=8)
df2.reset()
df2.figure(9003, docla=True, doclf=True)
def _plotpts(data, px, color=df2.BLUE, label=''):
#df2.figure(9003, docla=True, pnum=(1, 1, px))
df2.plot2(data.T[0], data.T[1], '-', '', color=color, label=label)
df2.update()
def _plotarrow(x, y, dx, dy, color=df2.BLUE, label=''):
ax = df2.gca()
arrowargs = dict(head_width=.5, length_includes_head=True, label='')
arrow = df2.FancyArrow(x, y, dx, dy, **arrowargs)
arrow.set_edgecolor(color)
arrow.set_facecolor(color)
ax.add_patch(arrow)
df2.update()
def
|
_2x2_eig
|
identifier_name
|
|
extract_patch.py
|
else:
patch, subkp = get_patch(rchip, kp)
#print('[extract] kp = '+str(kp))
#print('[extract] subkp = '+str(subkp))
#print('[extract] patch.shape = %r' % (patch.shape,))
color = (0, 0, 1)
fig, ax = df2.imshow(patch, **kwargs)
df2.draw_kpts2([subkp], ell_color=color, pts=True)
if not sift is None:
df2.draw_sift(sift, [subkp])
return ax
#df2.draw_border(df2.gca(), color, 1)
def get_aff_to_unit_circle(a, c, d):
invA = np.array([[a, 0, 0],
[c, d, 0],
[0, 0, 1]])
# kp is given in invA format. Convert to A
A = np.linalg.inv(invA)
return A
def get_translation(x, y):
T = np.array([[1, 0, x],
[0, 1, y],
[0, 0, 1]])
return T
def get_scale(ss):
S = np.array([[ss, 0, 0],
[0, ss, 0],
[0, 0, 1]])
return S
def get_warped_patch(rchip, kp):
'Returns warped patch around a keypoint'
(x, y, a, c, d) = kp
sfx, sfy = kp2_sf(kp)
s = 41 # sf
ss = sqrt(s) * 3
(h, w) = rchip.shape[0:2]
# Translate to origin(0,0) = (x,y)
T = get_translation(-x, -y)
A = get_aff_to_unit_circle(a, c, d)
S = get_scale(ss)
X = get_translation(s / 2, s / 2)
rchip_h, rchip_w = rchip.shape[0:2]
dsize = np.array(np.ceil(np.array([s, s])), dtype=int)
M = X.dot(S).dot(A).dot(T)
cv2_flags = cv2.INTER_LANCZOS4
cv2_borderMode = cv2.BORDER_CONSTANT
cv2_warp_kwargs = {'flags': cv2_flags, 'borderMode': cv2_borderMode}
warped_patch = cv2.warpAffine(rchip, M[0:2], tuple(dsize), **cv2_warp_kwargs)
#warped_patch = cv2.warpPerspective(rchip, M, dsize, **__cv2_warp_kwargs())
wkp = np.array([(s / 2, s / 2, ss, 0., ss)])
return warped_patch, wkp
def in_depth_ellipse2x2(rchip, kp):
#-----------------------
# SETUP
#-----------------------
from hotspotter import draw_func2 as df2
np.set_printoptions(precision=8)
tau = 2 * np.pi
df2.reset()
df2.figure(9003, docla=True, doclf=True)
ax = df2.gca()
ax.invert_yaxis()
def _plotpts(data, px, color=df2.BLUE, label=''):
#df2.figure(9003, docla=True, pnum=(1, 1, px))
df2.plot2(data.T[0], data.T[1], '.', '', color=color, label=label)
df2.update()
def _plotarrow(x, y, dx, dy, color=df2.BLUE, label=''):
ax = df2.gca()
arrowargs = dict(head_width=.5, length_includes_head=True, label=label)
arrow = df2.FancyArrow(x, y, dx, dy, **arrowargs)
arrow.set_edgecolor(color)
arrow.set_facecolor(color)
ax.add_patch(arrow)
df2.update()
def _2x2_eig(M2x2):
(evals, evecs) = np.linalg.eig(M2x2)
l1, l2 = evals
v1, v2 = evecs
return l1, l2, v1, v2
#-----------------------
# INPUT
#-----------------------
# We will call perdoch's invA = invV
print('--------------------------------')
print('Let V = Perdoch.A')
print('Let Z = Perdoch.E')
print('--------------------------------')
print('Input from Perdoch\'s detector: ')
# We are given the keypoint in invA format
(x, y, ia11, ia21, ia22), ia12 = kp, 0
invV = np.array([[ia11, ia12],
[ia21, ia22]])
V = np.linalg.inv(invV)
# <HACK>
#invV = V / np.linalg.det(V)
#V = np.linalg.inv(V)
# </HACK>
Z = (V.T).dot(V)
print('invV is a transform from points on a unit-circle to the ellipse')
helpers.horiz_print('invV = ', invV)
print('--------------------------------')
print('V is a transformation from points on the ellipse to a unit circle')
helpers.horiz_print('V = ', V)
print('--------------------------------')
print('Points on a matrix satisfy (x).T.dot(Z).dot(x) = 1')
print('where Z = (V.T).dot(V)')
helpers.horiz_print('Z = ', Z)
# Define points on a unit circle
theta_list = np.linspace(0, tau, 50)
cicrle_pts = np.array([(np.cos(t), np.sin(t)) for t in theta_list])
# Transform those points to the ellipse using invV
ellipse_pts1 = invV.dot(cicrle_pts.T).T
# Transform those points to the ellipse using V
ellipse_pts2 = V.dot(cicrle_pts.T).T
#Lets check our assertion: (x_).T.dot(Z).dot(x_) = 1
checks1 = [x_.T.dot(Z).dot(x_) for x_ in ellipse_pts1]
checks2 = [x_.T.dot(Z).dot(x_) for x_ in ellipse_pts2]
assert all([abs(1 - check) < 1E-11 for check in checks1])
#assert all([abs(1 - check) < 1E-11 for check in checks2])
print('... all of our plotted points satisfy this')
#=======================
# THE CONIC SECTION
#=======================
# All of this was from the Perdoch paper, now lets move into conic sections
# We will use the notation from wikipedia
# http://en.wikipedia.org/wiki/Conic_section
# http://en.wikipedia.org/wiki/Matrix_representation_of_conic_sections
#-----------------------
# MATRIX REPRESENTATION
#-----------------------
# The matrix representation of a conic is:
(A, B2, B2_, C) = Z.flatten()
(D, E, F) = (0, 0, 1)
B = B2 * 2
assert B2 == B2_, 'matrix should by symmetric'
print('--------------------------------')
print('Now, using wikipedia\' matrix representation of a conic.')
con = np.array(((' A', 'B / 2', 'D / 2'),
('B / 2', ' C', 'E / 2'),
('D / 2', 'E / 2', ' F')))
helpers.horiz_print('A matrix A_Q = ', con)
# A_Q is our conic section (aka ellipse matrix)
A_Q = np.array((( A, B / 2, D / 2),
(B / 2, C, E / 2),
(D / 2, E / 2, F)))
helpers.horiz_print('A_Q = ', A_Q)
#-----------------------
# DEGENERATE CONICS
#-----------------------
print('----------------------------------')
print('As long as det(A_Q) != it is not degenerate.')
print('If the conic is not degenerate, we can use the 2x2 minor: A_33')
print('det(A_Q) = %s' % str(np.linalg.det(A_Q)))
assert np.linalg.det(A_Q) != 0, 'degenerate conic'
A_33 = np.array((( A, B / 2),
(B / 2, C)))
helpers.horiz_print('A_33 = ', A_33)
#-----------------------
# CONIC CLASSIFICATION
#-----------------------
print('----------------------------------')
print('The determinant of the minor classifies the type of conic it is')
print('(det == 0): parabola, (det < 0): hyperb
|
wpatch, wkp = get_warped_patch(rchip, kp)
patch = wpatch
subkp = wkp
|
conditional_block
|
|
extract_patch.py
|
df2.update()
def _2x2_eig(M2x2):
(evals, evecs) = np.linalg.eig(M2x2)
l1, l2 = evals
v1, v2 = evecs
return l1, l2, v1, v2
#-----------------------
# INPUT
#-----------------------
# We will call perdoch's invA = invV
print('--------------------------------')
print('Let V = Perdoch.A')
print('Let Z = Perdoch.E')
print('--------------------------------')
print('Input from Perdoch\'s detector: ')
# We are given the keypoint in invA format
(x, y, ia11, ia21, ia22), ia12 = kp, 0
invV = np.array([[ia11, ia12],
[ia21, ia22]])
V = np.linalg.inv(invV)
# <HACK>
#invV = V / np.linalg.det(V)
#V = np.linalg.inv(V)
# </HACK>
Z = (V.T).dot(V)
print('invV is a transform from points on a unit-circle to the ellipse')
helpers.horiz_print('invV = ', invV)
print('--------------------------------')
print('V is a transformation from points on the ellipse to a unit circle')
helpers.horiz_print('V = ', V)
print('--------------------------------')
print('Points on a matrix satisfy (x).T.dot(Z).dot(x) = 1')
print('where Z = (V.T).dot(V)')
helpers.horiz_print('Z = ', Z)
# Define points on a unit circle
theta_list = np.linspace(0, tau, 50)
cicrle_pts = np.array([(np.cos(t), np.sin(t)) for t in theta_list])
# Transform those points to the ellipse using invV
ellipse_pts1 = invV.dot(cicrle_pts.T).T
# Transform those points to the ellipse using V
ellipse_pts2 = V.dot(cicrle_pts.T).T
#Lets check our assertion: (x_).T.dot(Z).dot(x_) = 1
checks1 = [x_.T.dot(Z).dot(x_) for x_ in ellipse_pts1]
checks2 = [x_.T.dot(Z).dot(x_) for x_ in ellipse_pts2]
assert all([abs(1 - check) < 1E-11 for check in checks1])
#assert all([abs(1 - check) < 1E-11 for check in checks2])
print('... all of our plotted points satisfy this')
#=======================
# THE CONIC SECTION
#=======================
# All of this was from the Perdoch paper, now lets move into conic sections
# We will use the notation from wikipedia
# http://en.wikipedia.org/wiki/Conic_section
# http://en.wikipedia.org/wiki/Matrix_representation_of_conic_sections
#-----------------------
# MATRIX REPRESENTATION
#-----------------------
# The matrix representation of a conic is:
(A, B2, B2_, C) = Z.flatten()
(D, E, F) = (0, 0, 1)
B = B2 * 2
assert B2 == B2_, 'matrix should by symmetric'
print('--------------------------------')
print('Now, using wikipedia\' matrix representation of a conic.')
con = np.array(((' A', 'B / 2', 'D / 2'),
('B / 2', ' C', 'E / 2'),
('D / 2', 'E / 2', ' F')))
helpers.horiz_print('A matrix A_Q = ', con)
# A_Q is our conic section (aka ellipse matrix)
A_Q = np.array((( A, B / 2, D / 2),
(B / 2, C, E / 2),
(D / 2, E / 2, F)))
helpers.horiz_print('A_Q = ', A_Q)
#-----------------------
# DEGENERATE CONICS
#-----------------------
print('----------------------------------')
print('As long as det(A_Q) != it is not degenerate.')
print('If the conic is not degenerate, we can use the 2x2 minor: A_33')
print('det(A_Q) = %s' % str(np.linalg.det(A_Q)))
assert np.linalg.det(A_Q) != 0, 'degenerate conic'
A_33 = np.array((( A, B / 2),
(B / 2, C)))
helpers.horiz_print('A_33 = ', A_33)
#-----------------------
# CONIC CLASSIFICATION
#-----------------------
print('----------------------------------')
print('The determinant of the minor classifies the type of conic it is')
print('(det == 0): parabola, (det < 0): hyperbola, (det > 0): ellipse')
print('det(A_33) = %s' % str(np.linalg.det(A_33)))
assert np.linalg.det(A_33) > 0, 'conic is not an ellipse'
print('... this is indeed an ellipse')
#-----------------------
# CONIC CENTER
#-----------------------
print('----------------------------------')
print('the centers of the ellipse are obtained by: ')
print('x_center = (B * E - (2 * C * D)) / (4 * A * C - B ** 2)')
print('y_center = (D * B - (2 * A * E)) / (4 * A * C - B ** 2)')
# Centers are obtained by solving for where the gradient of the quadratic
# becomes 0. Without going through the derivation the calculation is...
# These should be 0, 0 if we are at the origin, or our original x, y
# coordinate specified by the keypoints. I'm doing the calculation just for
# shits and giggles
x_center = (B * E - (2 * C * D)) / (4 * A * C - B ** 2)
y_center = (D * B - (2 * A * E)) / (4 * A * C - B ** 2)
helpers.horiz_print('x_center = ', x_center)
helpers.horiz_print('y_center = ', y_center)
#-----------------------
# MAJOR AND MINOR AXES
#-----------------------
# Now we are going to determine the major and minor axis
# of this beast. It just the center augmented by the eigenvecs
print('----------------------------------')
# The angle between the major axis and our x axis is:
l1, l2, v1, v2 = _2x2_eig(A_33)
x_axis = np.array([1, 0])
theta = np.arccos(x_axis.dot(v1))
# The eccentricity is determined by:
nu = 1
numer = 2 * np.sqrt((A - C) ** 2 + B ** 2)
denom = nu * (A + C) + np.sqrt((A - C) ** 2 + B ** 2)
eccentricity = np.sqrt(numer / denom)
from scipy.special import ellipeinc
#-----------------------
# DRAWING
#-----------------------
# Lets start off by drawing the ellipse that we are goign to work with
# Create unit circle sample
# Draw the keypoint using the tried and true df2
# Other things should subsiquently align
df2.draw_kpts2(np.array([(0, 0, ia11, ia21, ia22)]), ell_linewidth=4,
ell_color=df2.DEEP_PINK, ell_alpha=1, arrow=True, rect=True)
# Plot ellipse points
_plotpts(ellipse_pts1, 0, df2.YELLOW, label='invV.dot(cicrle_pts.T).T')
# Plot ellipse axis
# !HELP!
|
from hotspotter import draw_func2 as df2
np.set_printoptions(precision=8)
tau = 2 * np.pi
df2.reset()
df2.figure(9003, docla=True, doclf=True)
ax = df2.gca()
ax.invert_yaxis()
def _plotpts(data, px, color=df2.BLUE, label=''):
#df2.figure(9003, docla=True, pnum=(1, 1, px))
df2.plot2(data.T[0], data.T[1], '.', '', color=color, label=label)
df2.update()
def _plotarrow(x, y, dx, dy, color=df2.BLUE, label=''):
ax = df2.gca()
arrowargs = dict(head_width=.5, length_includes_head=True, label=label)
arrow = df2.FancyArrow(x, y, dx, dy, **arrowargs)
arrow.set_edgecolor(color)
arrow.set_facecolor(color)
ax.add_patch(arrow)
|
identifier_body
|
|
main.rs
|
impl FnOnce<()> for Closure {
type Output = u32;
extern "rust-call" fn call_once(self, args: ()) -> u32 {
println!("call it FnOnce()");
self.env_var + 2
}
}
impl FnMut<()> for Closure {
extern "rust-call" fn call_mut(&mut self, args: ()) -> u32 {
println!("call it FnMut()");
self.env_var + 2
}
}
impl Fn<()> for Closure {
extern "rust-call" fn call(&self, args: ()) -> u32 {
println!("call it Fn()");
self.env_var + 2
}
}
fn call_it<F: Fn() -> u32>(f: &F) -> u32 { f() }
fn call_it_mut<F: FnMut() -> u32>(f: &mut F) -> u32 { f() }
fn call_it_once<F: FnOnce() -> u32>(f: F) -> u32 { f() }
// 闭包为翻译为匿名结构体和 trait 的情况
struct Closure2<'a> {
env_var: &'a u32,
}
impl<'a> FnOnce<()> for Closure2<'a> {
type Output = ();
extern "rust-call" fn call_once(self, args: ()) -> () {
println!("{:?}", self.env_var);
}
}
impl<'a> FnMut<()> for Closure2<'a> {
extern "rust-call" fn call_mut(&mut self, args: ()) -> () {
println!("{:?}", self.env_var);
}
}
impl<'a> Fn<()> for Closure2<'a> {
extern "rust-call" fn call(&self, args: ()) -> () {
println!("{:?}", self.env_var);
}
}
// 使用 `FnOnce()` 闭包作为参数
// 在函数体内执行闭包, 用于判断自身的所有权是否转移
fn call<F: FnOnce()>(f: F) { f() }
fn boxed_closure(c: &mut Vec<Box<Fn()>>) {
let s = "second";
c.push(Box::new(|| println!("first")));
// 以不可变方式捕获了环境变量 `s`,
// 但这里需要将闭包装箱稍后在迭代器中使用
// 所以这里必须使用 `move` 关键字将 `s` 的所有权转移到闭包中,
// 因为变量 `s` 是复制语义类型, 所以该闭包捕获的是原始变量 `s` 的副本
c.push(Box::new(move || println!("{}", s)));
c.push(Box::new(|| println!("third")));
}
// `Fn` 并不受孤儿规则限制, 可有可无
// use std::ops::Fn;
// 以 trait 限定的方式实现 any 方法
// 自定义的 Any 不同于标准库的 Any
// 该函数的泛型 `F` 的 trait 限定为 `Fn(u32) -> bool`
// 有别于一般的泛型限定 `<F: Fn<u32, bool>>`
trait Any {
fn any<F>(&self, f: F) -> bool
where
Self: Sized,
F: Fn(u32) -> bool;
}
impl Any for Vec<u32> {
fn any<F>(&self, f: F) -> bool
where
// Sized 限定该方法不能被动态调用, 这是一种优化策略
Self: Sized,
F: Fn(u32) -> bool,
{
// 迭代传递的闭包, 依次调用
for &x in self {
if f(x) {
return true;
}
}
false
}
}
// 函数指针也可以作为闭包参数
fn call_ptr<F>(closure: F) -> i32
where
F: Fn(i32) -> i32,
{
closure(1)
}
fn counter_ptr(i: i32) -> i32 { i + 1 }
// 将闭包作为 trait 对象进行动态分发
trait AnyDyn {
fn any_dyn(&self, f: &(Fn(u32) -> bool)) -> bool;
}
impl AnyDyn for Vec<u32> {
fn any_dyn(&self, f: &(Fn(u32) -> bool)) -> bool {
for &x in self.iter() {
if f(x) {
return true;
}
}
false
}
}
// 将闭包作为函数返回值
// `Fn` 可以多次调用
fn square() -> Box<Fn(i32) -> i32> { Box::new(|i| i * i) }
// 指定返回闭包为 `FnOnce`
fn square_once() -> Box<FnOnce(i32) -> i32> { Box::new(|i| i * i) }
// impl Trait 示例
// 在 impl 关键字后面加上了闭包 trait, 这样就可以直接返回一个 `FnOnce trait`
fn square_impl() -> impl FnOnce(i32) -> i32 { |i| i * i }
// 泛型 trait 作为 trait 对象时的生命周期参数
trait DoSomething<T> {
fn do_sth(&self, value: T);
}
// 为 usize 类型实现该 trait
impl<'a, T: Debug> DoSomething<T> for &'a usize {
fn do_sth(&self, value: T) {
println!("{:?}", value);
}
}
// usize 是从外部引入的, 跟 foo 函数没有直接关系
// fn foo<'a>(b: Box<DoSomething<&'a usize>>) {
// let s: usize = 10;
// // s 在调用结束被析构
// // &s 会成为悬垂指针
// b.do_sth(&s)
// }
// 使用高阶生命周期: `for<>` 语法
fn bar<'a>(b: Box<for<'f> DoSomething<&'f usize>>) {
let s: usize = 10;
// s 在调用结束被析构
// &s 会成为悬垂指针
b.do_sth(&s)
}
// 闭包参数和返回值都是引用类型的情况
struct Pick<F> {
data: (u32, u32),
func: F,
}
// 编译器自动补齐了生命周期参数
// impl<F> Pick<F>
// where
// F: Fn(&(u32, u32)) -> &u32,
// {
// fn call(&self) -> &u32 { (self.func)(&self.data) }
// }
// 实际生命周期
impl<F> Pick<F>
where
F: for<'f> Fn(&'f (u32, u32)) -> &'f u32,
{
fn call(&self) -> &u32 { (self.func)(&self.data) }
}
fn max(data: &(u32, u32)) -> &u32 {
if data.0 > data.1 {
&data.0
} else {
&data.1
}
}
fn main() {
//
let f = counter(3);
assert_eq!(4, f(1));
// 闭包的参数可以为任意类型
// a: 函数指针, (b, c): 元组, 会通过函数指针类型的信息自动推断元组内为 i32 类型
let add = |a: fn() -> i32, (b, c)| (a)() + b + c;
let r = add(val, (2, 3));
assert_eq!(r, 10);
// 两个相同定义的闭包却不属于同一种类型
// Rust 2018 已修复
let c1 = || {};
let c2 = || {};
let v = [c1, c2];
// 查看闭包的类型
// let c1: () = || println!("i'm a closure");
// | -- ^^^^^^^^^^^^^^^^^^^^^^^^^^^^ expected `()`, found closure
// 模拟编译器对闭包的实现
let env_var = 1;
let mut c = Closure { env_var: env_var };
// 实例调用
// 实际由 ABI 实现("rust-call")
c();
// 必须显式指定一个单元值作为参数
c.call(());
// 必须显式指定一个单元值作为参数
c.call_mut(());
// 必须显式指定一个单元值作为参数
// `call_once` 调用之后, 之前的实例所有权被转移, 无法再次被使用.
c.call_once(());
let mut c = Closure { env_var: env_var };
{
assert_eq!(3, call_it(&c));
}
{
assert_eq!(3, call_it_mut(&mut c));
}
{
assert_eq!(3, call_it_once(c));
}
// 与上者等价的闭包示例
let env_var = 1;
let c = || env_var
|
env_var: u32,
}
|
random_line_split
|
|
main.rs
|
fn call(&self, args: ()) -> () {
println!("{:?}", self.env_var);
}
}
// 使用 `FnOnce()` 闭包作为参数
// 在函数体内执行闭包, 用于判断自身的所有权是否转移
fn call<F: FnOnce()>(f: F) { f() }
fn boxed_closure(c: &mut Vec<Box<Fn()>>) {
let s = "second";
c.push(Box::new(|| println!("first")));
// 以不可变方式捕获了环境变量 `s`,
// 但这里需
|
装箱稍后在迭代器中使用
// 所以这里必须使用 `move` 关键字将 `s` 的所有权转移到闭包中,
// 因为变量 `s` 是复制语义类型, 所以该闭包捕获的是原始变量 `s` 的副本
c.push(Box::new(move || println!("{}", s)));
c.push(Box::new(|| println!("third")));
}
// `Fn` 并不受孤儿规则限制, 可有可无
// use std::ops::Fn;
// 以 trait 限定的方式实现 any 方法
// 自定义的 Any 不同于标准库的 Any
// 该函数的泛型 `F` 的 trait 限定为 `Fn(u32) -> bool`
// 有别于一般的泛型限定 `<F: Fn<u32, bool>>`
trait Any {
fn any<F>(&self, f: F) -> bool
where
Self: Sized,
F: Fn(u32) -> bool;
}
impl Any for Vec<u32> {
fn any<F>(&self, f: F) -> bool
where
// Sized 限定该方法不能被动态调用, 这是一种优化策略
Self: Sized,
F: Fn(u32) -> bool,
{
// 迭代传递的闭包, 依次调用
for &x in self {
if f(x) {
return true;
}
}
false
}
}
// 函数指针也可以作为闭包参数
fn call_ptr<F>(closure: F) -> i32
where
F: Fn(i32) -> i32,
{
closure(1)
}
fn counter_ptr(i: i32) -> i32 { i + 1 }
// 将闭包作为 trait 对象进行动态分发
trait AnyDyn {
fn any_dyn(&self, f: &(Fn(u32) -> bool)) -> bool;
}
impl AnyDyn for Vec<u32> {
fn any_dyn(&self, f: &(Fn(u32) -> bool)) -> bool {
for &x in self.iter() {
if f(x) {
return true;
}
}
false
}
}
// 将闭包作为函数返回值
// `Fn` 可以多次调用
fn square() -> Box<Fn(i32) -> i32> { Box::new(|i| i * i) }
// 指定返回闭包为 `FnOnce`
fn square_once() -> Box<FnOnce(i32) -> i32> { Box::new(|i| i * i) }
// impl Trait 示例
// 在 impl 关键字后面加上了闭包 trait, 这样就可以直接返回一个 `FnOnce trait`
fn square_impl() -> impl FnOnce(i32) -> i32 { |i| i * i }
// 泛型 trait 作为 trait 对象时的生命周期参数
trait DoSomething<T> {
fn do_sth(&self, value: T);
}
// 为 usize 类型实现该 trait
impl<'a, T: Debug> DoSomething<T> for &'a usize {
fn do_sth(&self, value: T) {
println!("{:?}", value);
}
}
// usize 是从外部引入的, 跟 foo 函数没有直接关系
// fn foo<'a>(b: Box<DoSomething<&'a usize>>) {
// let s: usize = 10;
// // s 在调用结束被析构
// // &s 会成为悬垂指针
// b.do_sth(&s)
// }
// 使用高阶生命周期: `for<>` 语法
fn bar<'a>(b: Box<for<'f> DoSomething<&'f usize>>) {
let s: usize = 10;
// s 在调用结束被析构
// &s 会成为悬垂指针
b.do_sth(&s)
}
// 闭包参数和返回值都是引用类型的情况
struct Pick<F> {
data: (u32, u32),
func: F,
}
// 编译器自动补齐了生命周期参数
// impl<F> Pick<F>
// where
// F: Fn(&(u32, u32)) -> &u32,
// {
// fn call(&self) -> &u32 { (self.func)(&self.data) }
// }
// 实际生命周期
impl<F> Pick<F>
where
F: for<'f> Fn(&'f (u32, u32)) -> &'f u32,
{
fn call(&self) -> &u32 { (self.func)(&self.data) }
}
fn max(data: &(u32, u32)) -> &u32 {
if data.0 > data.1 {
&data.0
} else {
&data.1
}
}
fn main() {
//
let f = counter(3);
assert_eq!(4, f(1));
// 闭包的参数可以为任意类型
// a: 函数指针, (b, c): 元组, 会通过函数指针类型的信息自动推断元组内为 i32 类型
let add = |a: fn() -> i32, (b, c)| (a)() + b + c;
let r = add(val, (2, 3));
assert_eq!(r, 10);
// 两个相同定义的闭包却不属于同一种类型
// Rust 2018 已修复
let c1 = || {};
let c2 = || {};
let v = [c1, c2];
// 查看闭包的类型
// let c1: () = || println!("i'm a closure");
// | -- ^^^^^^^^^^^^^^^^^^^^^^^^^^^^ expected `()`, found closure
// 模拟编译器对闭包的实现
let env_var = 1;
let mut c = Closure { env_var: env_var };
// 实例调用
// 实际由 ABI 实现("rust-call")
c();
// 必须显式指定一个单元值作为参数
c.call(());
// 必须显式指定一个单元值作为参数
c.call_mut(());
// 必须显式指定一个单元值作为参数
// `call_once` 调用之后, 之前的实例所有权被转移, 无法再次被使用.
c.call_once(());
let mut c = Closure { env_var: env_var };
{
assert_eq!(3, call_it(&c));
}
{
assert_eq!(3, call_it_mut(&mut c));
}
{
assert_eq!(3, call_it_once(c));
}
// 与上者等价的闭包示例
let env_var = 1;
let c = || env_var + 2;
assert_eq!(3, c());
// 显式指定闭包类型
let env_var = 1;
// 该类型为 trait 对象, 此处必须使用 trait 对象
let c: Box<Fn() -> i32> = Box::new(|| env_var + 2);
assert_eq!(3, c());
// 复制语义类型自动实现 `Fn`
// 绑定为字符串字面量, 为复制语义类型
let s = "hello";
// 闭包会按照不可变引用类型来捕获 `s`
// 该闭包默认自动实现了 `Fn` 这个 trait, 并且该闭包以不可变借用捕获环境中的自由变量
let c = || println!("{:?}", s);
c();
// 闭包 c 可以多次调用, 说明编译器自动为闭包表达式实现的结构体实例并未失去所有权.
c();
// 对 s 进行一次不可变借用
println!("{:?}", s);
// 闭包被翻译为匿名结构体和 trait 的情况
// 闭包被翻译为结构体 `Closure<'a>`, 因为环境变量是按不可变
let env_var = 42;
let mut c = Closure2 { env_var: &env_var };
c();
c.call_mut(());
c.call_once(());
// 实现了 `Fn` 的闭包也可以显式调用 `call_mut` 和 `call_once`
let s = "hello";
let mut c = || println!("{:?}", s);
c();
c();
// 依赖
|
要将闭包
|
identifier_name
|
main.rs
|
" fn call(&self, args: ()) -> () {
println!("{:?}", self.env_var);
}
}
// 使用 `FnOnce()` 闭包作为参数
// 在函数体内执行闭包, 用于判断自身的所有权是否转移
fn call<F: FnOnce()>(f: F) { f() }
fn boxed_closure(c: &mut Vec<Box<Fn()>>) {
let s = "second";
c.push(Box::new(|| println!("first")));
// 以不可变方式捕获了环境变量 `s`,
// 但这里需要将闭包装箱稍后在迭代器中使用
// 所以这里必须使用 `move` 关键字将 `s` 的所有权转移到闭包中,
// 因为变量 `s` 是复制语义类型, 所以该闭包捕获的是原始变量 `s` 的副本
c.push(Box::new(move || println!("{}", s)));
c.push(Box::new(|| println!("third")));
}
// `Fn` 并不受孤儿规则限制, 可有可无
// use std::ops::Fn;
// 以 trait 限定的方式实现 any 方法
// 自定义的 Any 不同于标准库的 Any
// 该函数的泛型 `F` 的 trait 限定为 `Fn(u32) -> bool`
// 有别于一般的泛型限定 `<F: Fn<u32, bool>>`
trait Any {
fn any<F>(&self, f: F) -> bool
where
Self: Sized,
F: Fn(u32) -> bool;
}
impl Any for Vec<u32> {
fn any<F>(&self, f: F) -> bool
where
// Sized 限定该方法不能被动态调用, 这是一种优化策略
Self: Sized,
F: Fn(u32) -> bool,
{
// 迭代传递的闭包, 依次调用
for &x in self {
if f(x) {
return true;
}
}
false
}
}
// 函数指针也可以作为闭包参数
fn call_ptr<F>(closure: F) -> i32
where
F: Fn(i32) -> i32,
{
closure(1)
}
fn counter_ptr(i: i32) -> i32 { i + 1 }
// 将闭包作为 trait 对象进行动态分发
trait AnyDyn {
fn any_dyn(&self, f: &(Fn(u32) -> bool)) -> bool;
}
impl AnyDyn for Vec<u32> {
fn any_dyn(&self, f: &(Fn(u32) -> bool)) -> bool {
for &x in self.iter() {
if f(x) {
return true;
}
}
false
}
}
// 将闭包作为函数返回值
// `Fn` 可以多次调用
fn square() -> Box<Fn(i32) -> i32> { Box::new(|i| i * i) }
// 指定返回闭包为 `FnOnce`
fn square_once() -> Box<FnOnce(i32) -> i32> { Box::new(|i| i * i) }
// impl Trait 示例
// 在 impl 关键字后面加上了闭包 trait, 这样就可以直接返回一个 `FnOnce trait`
fn square_impl() -> impl FnOnce(i32) -> i32 { |i| i * i }
// 泛型 trait 作为 trait 对象时的生命周期参数
trait DoSomething<T> {
fn do_sth(&self, value: T);
}
// 为 usize 类型实现该 trait
impl<'a, T: Debug> DoSomething<T> for &'a usize {
fn do_sth(&self, value: T) {
println!("{:?}", value);
}
}
// usize 是从外部引入的, 跟 foo 函数没有直接关系
// fn foo<'a>(b: Box<DoSomething<&'a usize>>) {
// let s: usize = 10;
// // s 在调用结束被析构
// // &s 会成为悬垂指针
// b.do_sth(&s)
// }
// 使用高阶生命周期: `for<>` 语法
fn bar<'a>(b: Box<for<'f> DoSomething<&'f usize>>) {
let s: usize = 10;
// s 在调用结束被析构
// &s 会成为悬垂指针
b.do_sth(&s)
}
// 闭包参数和返回值都是引用类型的情况
struct Pick<F> {
data: (u32, u32),
func: F,
}
// 编译
|
Pick<F>
// where
// F: Fn(&(u32, u32)) -> &u32,
// {
// fn call(&self) -> &u32 { (self.func)(&self.data) }
// }
// 实际生命周期
impl<F> Pick<F>
where
F: for<'f> Fn(&'f (u32, u32)) -> &'f u32,
{
fn call(&self) -> &u32 { (self.func)(&self.data) }
}
fn max(data: &(u32, u32)) -> &u32 {
if data.0 > data.1 {
&data.0
} else {
&data.1
}
}
fn main() {
//
let f = counter(3);
assert_eq!(4, f(1));
// 闭包的参数可以为任意类型
// a: 函数指针, (b, c): 元组, 会通过函数指针类型的信息自动推断元组内为 i32 类型
let add = |a: fn() -> i32, (b, c)| (a)() + b + c;
let r = add(val, (2, 3));
assert_eq!(r, 10);
// 两个相同定义的闭包却不属于同一种类型
// Rust 2018 已修复
let c1 = || {};
let c2 = || {};
let v = [c1, c2];
// 查看闭包的类型
// let c1: () = || println!("i'm a closure");
// | -- ^^^^^^^^^^^^^^^^^^^^^^^^^^^^ expected `()`, found closure
// 模拟编译器对闭包的实现
let env_var = 1;
let mut c = Closure { env_var: env_var };
// 实例调用
// 实际由 ABI 实现("rust-call")
c();
// 必须显式指定一个单元值作为参数
c.call(());
// 必须显式指定一个单元值作为参数
c.call_mut(());
// 必须显式指定一个单元值作为参数
// `call_once` 调用之后, 之前的实例所有权被转移, 无法再次被使用.
c.call_once(());
let mut c = Closure { env_var: env_var };
{
assert_eq!(3, call_it(&c));
}
{
assert_eq!(3, call_it_mut(&mut c));
}
{
assert_eq!(3, call_it_once(c));
}
// 与上者等价的闭包示例
let env_var = 1;
let c = || env_var + 2;
assert_eq!(3, c());
// 显式指定闭包类型
let env_var = 1;
// 该类型为 trait 对象, 此处必须使用 trait 对象
let c: Box<Fn() -> i32> = Box::new(|| env_var + 2);
assert_eq!(3, c());
// 复制语义类型自动实现 `Fn`
// 绑定为字符串字面量, 为复制语义类型
let s = "hello";
// 闭包会按照不可变引用类型来捕获 `s`
// 该闭包默认自动实现了 `Fn` 这个 trait, 并且该闭包以不可变借用捕获环境中的自由变量
let c = || println!("{:?}", s);
c();
// 闭包 c 可以多次调用, 说明编译器自动为闭包表达式实现的结构体实例并未失去所有权.
c();
// 对 s 进行一次不可变借用
println!("{:?}", s);
// 闭包被翻译为匿名结构体和 trait 的情况
// 闭包被翻译为结构体 `Closure<'a>`, 因为环境变量是按不可变
let env_var = 42;
let mut c = Closure2 { env_var: &env_var };
c();
c.call_mut(());
c.call_once(());
// 实现了 `Fn` 的闭包也可以显式调用 `call_mut` 和 `call_once`
let s = "hello";
let mut c = || println!("{:?}", s);
c();
c();
// 依赖 `
|
器自动补齐了生命周期参数
// impl<F>
|
identifier_body
|
main.rs
|
fn call(&self, args: ()) -> () {
println!("{:?}", self.env_var);
}
}
// 使用 `FnOnce()` 闭包作为参数
// 在函数体内执行闭包, 用于判断自身的所有权是否转移
fn call<F: FnOnce()>(f: F) { f() }
fn boxed_closure(c: &mut Vec<Box<Fn()>>) {
let s = "second";
c.push(Box::new(|| println!("first")));
// 以不可变方式捕获了环境变量 `s`,
// 但这里需要将闭包装箱稍后在迭代器中使用
// 所以这里必须使用 `move` 关键字将 `s` 的所有权转移到闭包中,
// 因为变量 `s` 是复制语义类型, 所以该闭包捕获的是原始变量 `s` 的副本
c.push(Box::new(move || println!("{}", s)));
c.push(Box::new(|| println!("third")));
}
// `Fn` 并不受孤儿规则限制, 可有可无
// use std::ops::Fn;
// 以 trait 限定的方式实现 any 方法
// 自定义的 Any 不同于标准库的 Any
// 该函数的泛型 `F` 的 trait 限定为 `Fn(u32) -> bool`
// 有别于一般的泛型限定 `<F: Fn<u32, bool>>`
trait Any {
fn any<F>(&self, f: F) -> bool
where
Self: Sized,
F: Fn(u32) -> bool;
}
impl Any for Vec<u32> {
fn any<F>(&self, f: F) -> bool
where
// Sized 限定该方法不能被动态调用, 这是一种优化策略
Self: Sized,
F: Fn(u32) -> bool,
{
// 迭代传递的闭包, 依次调用
for &x in self {
if f(x) {
return true;
}
}
false
}
}
// 函数指针也可以作为闭包参数
fn call_ptr<F>(closure: F) -> i32
where
F: Fn(i32) -> i32,
{
closure(1)
}
fn counter_ptr(i: i32) -> i32 { i + 1 }
// 将闭包作为 trait 对象进行动态分发
trait AnyDyn {
fn any_dyn(&self, f: &(Fn(u32) -> bool)) -> bool;
}
impl AnyDyn for Vec<u32> {
fn any_dyn(&self, f: &(Fn(u32) -> bool)) -> bool {
for &x in self.iter() {
if f(x) {
return true;
}
}
false
}
}
// 将闭包作为函数返回值
// `Fn` 可以多次调用
fn square() -> Box<Fn(i32) -> i32> { Box::new(|i| i * i) }
// 指定返回闭包为 `FnOnce`
fn square_once() -> Box<FnOnce(i32) -> i32> { Box::new(|i| i * i) }
// impl Trait 示例
// 在 impl 关键字后面加上了闭包 trait, 这样就可以直接返回一个 `FnOnce trait`
fn square_impl() -> impl FnOnce(i32) -> i32 { |i| i * i }
// 泛型 trait 作为 trait 对象时的生命周期参数
trait DoSomething<T> {
fn do_sth(&self, value: T);
}
// 为 usize 类型实现该 trait
impl<'a, T: Debug> DoSomething<T> for &'a usize {
fn do_sth(&self, value: T) {
println!("{:?}", value);
}
}
// usize 是从外部引入的, 跟 foo 函数没有直接关系
// fn foo<'a>(b: Box<DoSomething<&'a usize>>) {
// let s: usize = 10;
// // s 在调用结束被析构
// // &s 会成为悬垂指针
// b.do_sth(&s)
// }
// 使用高阶生命周期: `for<>` 语法
fn bar<'a>(b: Box<for<'f> DoSomething<&'f usize>>) {
let s: usize = 10;
// s 在调用结束被析构
// &s 会成为悬垂指针
b.do_sth(&s)
}
// 闭包参数和返回值都是引用类型的情况
struct Pick<F> {
data: (u32, u32),
func: F,
}
// 编译器自动补齐了生命周期参数
// impl<F> Pick<F>
// where
// F: Fn(&(u32, u32)) -> &u32,
// {
// fn call(&self) -> &u32 { (self.func)(&self.data) }
// }
// 实际生命周期
impl<F> Pick<F>
where
F: for<'f> Fn(&'f (u32, u32)) -> &'f u32,
{
fn call(&self) -> &u32 { (self.func)(&self.data) }
}
fn max(data: &(u32, u32)) -> &u32 {
if data.0 > data.1 {
&data.0
} else {
&data.1
}
}
fn main() {
//
let f = counter(3);
assert_eq!(4, f(1));
// 闭包的参数可以为任意类型
// a: 函数指针, (b, c): 元组, 会通过函数指针类型的信息自动推断元组内为 i32 类型
let add = |a: fn() -> i32, (b, c)| (a)() + b + c;
let r = add(val, (2, 3));
assert_eq!(r, 10);
// 两个相同定义的闭包却不属于同一种类型
// Rust 2018 已修复
let c1 = || {};
let c2 = || {};
let v = [c1, c2];
// 查看闭包的类型
// let c1: () = || println!("i'm a closure");
// | -- ^^^^^^^^^^^^^^^^^^^^^^^^^^^^ expected `()`, found closure
// 模拟编译器对闭包的实现
let env_var = 1;
let mut c = Closure { env_var: env_var };
// 实例调用
// 实际由 ABI 实现("rust-call")
c();
// 必须显式指定一个单元值作为参数
c.call(());
// 必须显式指定一个单元值作为参数
c.call_mut(());
// 必须显式指定一个单元值作为参数
// `call_once` 调用之后, 之前的实例所有权被转移, 无法再次被使用.
c.call_once(());
let mut c = Closure { env_var: env_var };
{
assert_eq!(3, call_it(&c));
}
{
assert_eq!(3, call_it_mut(&mut c));
}
{
assert_eq!(3, call_it_once(c));
|
let env_var = 1;
let c = || env_var + 2;
assert_eq!(3, c());
// 显式指定闭包类型
let env_var = 1;
// 该类型为 trait 对象, 此处必须使用 trait 对象
let c: Box<Fn() -> i32> = Box::new(|| env_var + 2);
assert_eq!(3, c());
// 复制语义类型自动实现 `Fn`
// 绑定为字符串字面量, 为复制语义类型
let s = "hello";
// 闭包会按照不可变引用类型来捕获 `s`
// 该闭包默认自动实现了 `Fn` 这个 trait, 并且该闭包以不可变借用捕获环境中的自由变量
let c = || println!("{:?}", s);
c();
// 闭包 c 可以多次调用, 说明编译器自动为闭包表达式实现的结构体实例并未失去所有权.
c();
// 对 s 进行一次不可变借用
println!("{:?}", s);
// 闭包被翻译为匿名结构体和 trait 的情况
// 闭包被翻译为结构体 `Closure<'a>`, 因为环境变量是按不可变
let env_var = 42;
let mut c = Closure2 { env_var: &env_var };
c();
c.call_mut(());
c.call_once(());
// 实现了 `Fn` 的闭包也可以显式调用 `call_mut` 和 `call_once`
let s = "hello";
let mut c = || println!("{:?}", s);
c();
c();
// 依赖 `
|
}
// 与上者等价的闭包示例
|
conditional_block
|
onsite_create_calibration_file.py
|
27017/")
optional.add_argument('-y', '--yes', action="store_true", help='Do not ask interactively for permissions, assume true')
optional.add_argument('--no_pro_symlink', action="store_true",
help='Do not update the pro dir symbolic link, assume true')
optional.add_argument(
'--flatfield-heuristic', action='store_const', const=True, dest="use_flatfield_heuristic",
help=(
"If given, try to identify flatfield events from the raw data."
" Should be used only for data from before 2022"
)
)
optional.add_argument(
'--no-flatfield-heuristic', action='store_const', const=False, dest="use_flatfield_heuristic",
help=(
"If given, do not to identify flatfield events from the raw data."
" Should be used only for data from before 2022"
)
)
def main():
args, remaining_args = parser.parse_known_args()
run = args.run_number
prod_id = args.prod_version
stat_events = args.statistics
time_run = args.time_run
sys_date = args.sys_date
no_sys_correction = args.no_sys_correction
output_base_name = args.output_base_name
sub_run = args.sub_run
tel_id = args.tel_id
config_file = args.config
yes = args.yes
pro_symlink = not args.no_pro_symlink
# looks for the filter values in the database if not given
if args.filters is None:
filters = search_filter(run, args.mongodb)
else:
filters = args.filters
if filters is None:
sys.exit(f"Missing filter value for run {run}. \n")
# define the FF selection cuts
if args.min_ff is None or args.max_ff is None:
min_ff, max_ff = define_FF_selection_range(filters)
else:
min_ff, max_ff = args.min_ff, args.max_ff
print(f"\n--> Start calculating calibration from run {run}, filters {filters}")
# verify config file
if not config_file.exists():
raise IOError(f"Config file {config_file} does not exists. \n")
print(f"\n--> Config file {config_file}")
# verify input file
r0_dir = args.r0_dir or args.base_dir / 'R0'
input_file = find_r0_subrun(run, sub_run, r0_dir)
date = input_file.parent.name
print(f"\n--> Input file: {input_file}")
# verify output dir
calib_dir = args.base_dir / LEVEL_A_PIXEL_DIR
output_dir = calib_dir / "calibration" / date / prod_id
if not output_dir.exists():
print(f"--> Create directory {output_dir}")
output_dir.mkdir(parents=True, exist_ok=True)
if pro_symlink:
pro = "pro"
create_pro_symlink(output_dir)
else:
pro = prod_id
# make log dir
log_dir = output_dir / "log"
if not log_dir.exists():
print(f"--> Create directory {log_dir}")
log_dir.mkdir(parents=True, exist_ok=True)
# search the summary file info
run_summary_path = find_run_summary(date, args.base_dir)
print(f"\n--> Use run summary {run_summary_path}")
pedestal_file = find_pedestal_file(pro, args.pedestal_run, date=date, base_dir=args.base_dir)
print(f"\n--> Pedestal file: {pedestal_file}")
# search for time calibration file
time_file = find_time_calibration_file(pro, run, time_run, args.base_dir)
print(f"\n--> Time calibration file: {time_file}")
# define systematic correction file
if no_sys_correction:
systematics_file = None
else:
systematics_file = find_systematics_correction_file(pro, date, sys_date, args.base_dir)
print(f"\n--> F-factor systematics correction file: {systematics_file}")
# define charge file names
print("\n***** PRODUCE CHARGE CALIBRATION FILE ***** ")
if filters is not None:
filter_info = f"_filters_{filters}"
else:
filter_info = ""
# remember there are no systematic corrections
prefix = "no_sys_corrected_" if no_sys_correction else ""
output_name = f"{prefix}{output_base_name}{filter_info}.Run{run:05d}.{sub_run:04d}"
output_file = output_dir / f'{output_name}.h5'
print(f"\n--> Output file {output_file}")
log_file = log_dir / f"{output_name}.log"
print(f"\n--> Log file {log_file}")
if output_file.exists():
remove = False
if not yes and os.getenv('SLURM_JOB_ID') is None:
remove = query_yes_no(">>> Output file exists already. Do you want to remove it?")
if yes or remove:
os.remove(output_file)
os.remove(log_file)
else:
print("\n--> Output file exists already. Stop")
exit(1)
#
# produce ff calibration file
#
cmd = [
"lstchain_create_calibration_file",
f"--input_file={input_file}",
f"--output_file={output_file}",
"--LSTEventSource.default_trigger_type=tib",
f"--EventSource.min_flatfield_adc={min_ff}",
f"--EventSource.max_flatfield_adc={max_ff}",
f"--LSTCalibrationCalculator.systematic_correction_path={systematics_file}",
f"--LSTEventSource.EventTimeCalculator.run_summary_path={run_summary_path}",
f"--LSTEventSource.LSTR0Corrections.drs4_time_calibration_path={time_file}",
f"--LSTEventSource.LSTR0Corrections.drs4_pedestal_path={pedestal_file}",
f"--LSTEventSource.use_flatfield_heuristic={args.use_flatfield_heuristic}",
f"--FlatFieldCalculator.sample_size={stat_events}",
f"--PedestalCalculator.sample_size={stat_events}",
f"--config={config_file}",
f"--log-file={log_file}",
"--log-file-level=INFO",
*remaining_args,
]
print("\n--> RUNNING...")
subprocess.run(cmd, check=True)
# plot and save some results
plot_file = f"{output_dir}/log/{output_name}.pdf"
print(f"\n--> PRODUCING PLOTS in {plot_file} ...")
mon = read_calibration_file(output_file, tel_id)
calib.plot_calibration_results(mon.pedestal, mon.flatfield, mon.calibration, run, plot_file)
print("\n--> END")
def search_filter(run, database_url):
"""read the employed filters form mongodb"""
# there was a change of Mongo DB data names on 5/12/2022
NEW_DB_NAMES_DATE = Time("2022-12-04T00:00:00")
filters = None
try:
myclient = pymongo.MongoClient(database_url)
mydb = myclient["CACO"]
mycol = mydb["RUN_INFORMATION"]
mydoc = mycol.find({"run_number": {"$eq": run}})
for x in mydoc:
date = Time(x["start_time"])
if date < NEW_DB_NAMES_DATE:
w1 = int(x["cbox"]["wheel1 position"])
w2 = int(x["cbox"]["wheel2 position"])
else:
w1 = int(x["cbox"]["CBOX_WheelPosition1"])
w2 = int(x["cbox"]["CBOX_WheelPosition2"])
filters = f"{w1:1d}{w2:1d}"
except Exception as e:
print(f"\n >>> Exception: {e}")
raise IOError(
"--> No mongo DB filter information."
" You must pass the filters by argument: -f [filters]"
)
return filters
def define_FF_selection_range(filters):
""" return the range of charges to select the FF events """
try:
if filters is None:
raise ValueError("Filters are not defined")
# give standard values if standard filters
if filters == '52':
min_ff = 3000
max_ff = 12000
else:
# ... recuperate transmission value of all the filters
transm_file = os.path.join(os.path.dirname(__file__), "../../data/filters_transmission.dat")
f = open(transm_file, 'r')
# skip header
f.readline()
trasm = {}
for line in f:
columns = line.split()
trasm[columns[0]] = float(columns[1])
if trasm[filters] > 0.001:
min_ff = 4000
max_ff = 1000000
elif trasm[filters] <= 0.001 and trasm[filters] > 0.0005:
min_ff = 1200
max_ff = 12000
else:
min_ff = 200
max_ff = 5000
except Exception as e:
print(f"\n >>> Exception: {e}")
raise IOError("--> No FF selection range information")
return min_ff, max_ff
|
random_line_split
|
||
onsite_create_calibration_file.py
|
output file name (change only for debugging)",
default="calibration")
optional.add_argument('--sub_run', help="sub-run to be processed.", type=int, default=0)
optional.add_argument('--min_ff', help="Min FF intensity cut in ADC.", type=float)
optional.add_argument('--max_ff', help="Max FF intensity cut in ADC.", type=float)
optional.add_argument('-f', '--filters', help="Calibox filters")
optional.add_argument('--tel_id', help="telescope id. Default = 1", type=int, default=1)
optional.add_argument('--config', help="Config file", default=DEFAULT_CONFIG, type=Path)
optional.add_argument('--mongodb', help="Mongo data-base connection", default="mongodb://10.200.10.101:27017/")
optional.add_argument('-y', '--yes', action="store_true", help='Do not ask interactively for permissions, assume true')
optional.add_argument('--no_pro_symlink', action="store_true",
help='Do not update the pro dir symbolic link, assume true')
optional.add_argument(
'--flatfield-heuristic', action='store_const', const=True, dest="use_flatfield_heuristic",
help=(
"If given, try to identify flatfield events from the raw data."
" Should be used only for data from before 2022"
)
)
optional.add_argument(
'--no-flatfield-heuristic', action='store_const', const=False, dest="use_flatfield_heuristic",
help=(
"If given, do not to identify flatfield events from the raw data."
" Should be used only for data from before 2022"
)
)
def main():
args, remaining_args = parser.parse_known_args()
run = args.run_number
prod_id = args.prod_version
stat_events = args.statistics
time_run = args.time_run
sys_date = args.sys_date
no_sys_correction = args.no_sys_correction
output_base_name = args.output_base_name
sub_run = args.sub_run
tel_id = args.tel_id
config_file = args.config
yes = args.yes
pro_symlink = not args.no_pro_symlink
# looks for the filter values in the database if not given
if args.filters is None:
filters = search_filter(run, args.mongodb)
else:
filters = args.filters
if filters is None:
sys.exit(f"Missing filter value for run {run}. \n")
# define the FF selection cuts
if args.min_ff is None or args.max_ff is None:
min_ff, max_ff = define_FF_selection_range(filters)
else:
min_ff, max_ff = args.min_ff, args.max_ff
print(f"\n--> Start calculating calibration from run {run}, filters {filters}")
# verify config file
if not config_file.exists():
raise IOError(f"Config file {config_file} does not exists. \n")
print(f"\n--> Config file {config_file}")
# verify input file
r0_dir = args.r0_dir or args.base_dir / 'R0'
input_file = find_r0_subrun(run, sub_run, r0_dir)
date = input_file.parent.name
print(f"\n--> Input file: {input_file}")
# verify output dir
calib_dir = args.base_dir / LEVEL_A_PIXEL_DIR
output_dir = calib_dir / "calibration" / date / prod_id
if not output_dir.exists():
print(f"--> Create directory {output_dir}")
output_dir.mkdir(parents=True, exist_ok=True)
if pro_symlink:
pro = "pro"
create_pro_symlink(output_dir)
else:
pro = prod_id
# make log dir
log_dir = output_dir / "log"
if not log_dir.exists():
print(f"--> Create directory {log_dir}")
log_dir.mkdir(parents=True, exist_ok=True)
# search the summary file info
run_summary_path = find_run_summary(date, args.base_dir)
print(f"\n--> Use run summary {run_summary_path}")
pedestal_file = find_pedestal_file(pro, args.pedestal_run, date=date, base_dir=args.base_dir)
print(f"\n--> Pedestal file: {pedestal_file}")
# search for time calibration file
time_file = find_time_calibration_file(pro, run, time_run, args.base_dir)
print(f"\n--> Time calibration file: {time_file}")
# define systematic correction file
if no_sys_correction:
systematics_file = None
else:
systematics_file = find_systematics_correction_file(pro, date, sys_date, args.base_dir)
print(f"\n--> F-factor systematics correction file: {systematics_file}")
# define charge file names
print("\n***** PRODUCE CHARGE CALIBRATION FILE ***** ")
if filters is not None:
filter_info = f"_filters_{filters}"
else:
filter_info = ""
# remember there are no systematic corrections
prefix = "no_sys_corrected_" if no_sys_correction else ""
output_name = f"{prefix}{output_base_name}{filter_info}.Run{run:05d}.{sub_run:04d}"
output_file = output_dir / f'{output_name}.h5'
print(f"\n--> Output file {output_file}")
log_file = log_dir / f"{output_name}.log"
print(f"\n--> Log file {log_file}")
if output_file.exists():
remove = False
if not yes and os.getenv('SLURM_JOB_ID') is None:
remove = query_yes_no(">>> Output file exists already. Do you want to remove it?")
if yes or remove:
os.remove(output_file)
os.remove(log_file)
else:
print("\n--> Output file exists already. Stop")
exit(1)
#
# produce ff calibration file
#
cmd = [
"lstchain_create_calibration_file",
f"--input_file={input_file}",
f"--output_file={output_file}",
"--LSTEventSource.default_trigger_type=tib",
f"--EventSource.min_flatfield_adc={min_ff}",
f"--EventSource.max_flatfield_adc={max_ff}",
f"--LSTCalibrationCalculator.systematic_correction_path={systematics_file}",
f"--LSTEventSource.EventTimeCalculator.run_summary_path={run_summary_path}",
f"--LSTEventSource.LSTR0Corrections.drs4_time_calibration_path={time_file}",
f"--LSTEventSource.LSTR0Corrections.drs4_pedestal_path={pedestal_file}",
f"--LSTEventSource.use_flatfield_heuristic={args.use_flatfield_heuristic}",
f"--FlatFieldCalculator.sample_size={stat_events}",
f"--PedestalCalculator.sample_size={stat_events}",
f"--config={config_file}",
f"--log-file={log_file}",
"--log-file-level=INFO",
*remaining_args,
]
print("\n--> RUNNING...")
subprocess.run(cmd, check=True)
# plot and save some results
plot_file = f"{output_dir}/log/{output_name}.pdf"
print(f"\n--> PRODUCING PLOTS in {plot_file} ...")
mon = read_calibration_file(output_file, tel_id)
calib.plot_calibration_results(mon.pedestal, mon.flatfield, mon.calibration, run, plot_file)
print("\n--> END")
def search_filter(run, database_url):
"""read the employed filters form mongodb"""
# there was a change of Mongo DB data names on 5/12/2022
NEW_DB_NAMES_DATE = Time("2022-12-04T00:00:00")
filters = None
try:
myclient = pymongo.MongoClient(database_url)
mydb = myclient["CACO"]
mycol = mydb["RUN_INFORMATION"]
mydoc = mycol.find({"run_number": {"$eq": run}})
for x in mydoc:
date = Time(x["start_time"])
if date < NEW_DB_NAMES_DATE:
w1 = int(x["cbox"]["wheel1 position"])
w2 = int(x["cbox"]["wheel2 position"])
else:
w1 = int(x["cbox"]["CBOX_WheelPosition1"])
w2 = int(x["cbox"]["CBOX_WheelPosition2"])
filters = f"{w1:1d}{w2:1d}"
except Exception as e:
print(f"\n >>> Exception: {e}")
raise IOError(
"--> No mongo DB filter information."
" You must pass the filters by argument: -f [filters]"
)
return filters
def define_FF_selection_range(filters):
|
""" return the range of charges to select the FF events """
try:
if filters is None:
raise ValueError("Filters are not defined")
# give standard values if standard filters
if filters == '52':
min_ff = 3000
max_ff = 12000
else:
# ... recuperate transmission value of all the filters
transm_file = os.path.join(os.path.dirname(__file__), "../../data/filters_transmission.dat")
f = open(transm_file, 'r')
# skip header
f.readline()
trasm = {}
for line in f:
|
identifier_body
|
|
onsite_create_calibration_file.py
|
run_number', help="Run number if the flat-field data",
type=int, required=True)
optional.add_argument('-p', '--pedestal_run',
help="Pedestal run to be used. If None, it looks for the pedestal run of the date of the FF data.",
type=int)
version = lstchain.__version__
optional.add_argument('-v', '--prod_version', help="Version of the production",
default=f"v{version}")
optional.add_argument('-s', '--statistics', help="Number of events for the flat-field and pedestal statistics",
type=int, default=10000)
optional.add_argument('-b', '--base_dir', help="Root dir for the output directory tree", type=Path,
default=DEFAULT_BASE_PATH)
optional.add_argument('--r0-dir', help="Root dir for the input r0 tree. By default, <base_dir>/R0 will be used",
type=Path)
optional.add_argument('--time_run',
help="Run for time calibration. If None, search the last time run before or equal the FF run",
type=int)
optional.add_argument(
'--sys_date',
help=(
"Date of systematic correction file (format YYYYMMDD). \n"
"Default: automatically search the best date \n"
),
)
optional.add_argument('--no_sys_correction',
help="Systematic corrections are not applied. \n",
action='store_true',
default=False)
optional.add_argument('--output_base_name', help="Base of output file name (change only for debugging)",
default="calibration")
optional.add_argument('--sub_run', help="sub-run to be processed.", type=int, default=0)
optional.add_argument('--min_ff', help="Min FF intensity cut in ADC.", type=float)
optional.add_argument('--max_ff', help="Max FF intensity cut in ADC.", type=float)
optional.add_argument('-f', '--filters', help="Calibox filters")
optional.add_argument('--tel_id', help="telescope id. Default = 1", type=int, default=1)
optional.add_argument('--config', help="Config file", default=DEFAULT_CONFIG, type=Path)
optional.add_argument('--mongodb', help="Mongo data-base connection", default="mongodb://10.200.10.101:27017/")
optional.add_argument('-y', '--yes', action="store_true", help='Do not ask interactively for permissions, assume true')
optional.add_argument('--no_pro_symlink', action="store_true",
help='Do not update the pro dir symbolic link, assume true')
optional.add_argument(
'--flatfield-heuristic', action='store_const', const=True, dest="use_flatfield_heuristic",
help=(
"If given, try to identify flatfield events from the raw data."
" Should be used only for data from before 2022"
)
)
optional.add_argument(
'--no-flatfield-heuristic', action='store_const', const=False, dest="use_flatfield_heuristic",
help=(
"If given, do not to identify flatfield events from the raw data."
" Should be used only for data from before 2022"
)
)
def
|
():
args, remaining_args = parser.parse_known_args()
run = args.run_number
prod_id = args.prod_version
stat_events = args.statistics
time_run = args.time_run
sys_date = args.sys_date
no_sys_correction = args.no_sys_correction
output_base_name = args.output_base_name
sub_run = args.sub_run
tel_id = args.tel_id
config_file = args.config
yes = args.yes
pro_symlink = not args.no_pro_symlink
# looks for the filter values in the database if not given
if args.filters is None:
filters = search_filter(run, args.mongodb)
else:
filters = args.filters
if filters is None:
sys.exit(f"Missing filter value for run {run}. \n")
# define the FF selection cuts
if args.min_ff is None or args.max_ff is None:
min_ff, max_ff = define_FF_selection_range(filters)
else:
min_ff, max_ff = args.min_ff, args.max_ff
print(f"\n--> Start calculating calibration from run {run}, filters {filters}")
# verify config file
if not config_file.exists():
raise IOError(f"Config file {config_file} does not exists. \n")
print(f"\n--> Config file {config_file}")
# verify input file
r0_dir = args.r0_dir or args.base_dir / 'R0'
input_file = find_r0_subrun(run, sub_run, r0_dir)
date = input_file.parent.name
print(f"\n--> Input file: {input_file}")
# verify output dir
calib_dir = args.base_dir / LEVEL_A_PIXEL_DIR
output_dir = calib_dir / "calibration" / date / prod_id
if not output_dir.exists():
print(f"--> Create directory {output_dir}")
output_dir.mkdir(parents=True, exist_ok=True)
if pro_symlink:
pro = "pro"
create_pro_symlink(output_dir)
else:
pro = prod_id
# make log dir
log_dir = output_dir / "log"
if not log_dir.exists():
print(f"--> Create directory {log_dir}")
log_dir.mkdir(parents=True, exist_ok=True)
# search the summary file info
run_summary_path = find_run_summary(date, args.base_dir)
print(f"\n--> Use run summary {run_summary_path}")
pedestal_file = find_pedestal_file(pro, args.pedestal_run, date=date, base_dir=args.base_dir)
print(f"\n--> Pedestal file: {pedestal_file}")
# search for time calibration file
time_file = find_time_calibration_file(pro, run, time_run, args.base_dir)
print(f"\n--> Time calibration file: {time_file}")
# define systematic correction file
if no_sys_correction:
systematics_file = None
else:
systematics_file = find_systematics_correction_file(pro, date, sys_date, args.base_dir)
print(f"\n--> F-factor systematics correction file: {systematics_file}")
# define charge file names
print("\n***** PRODUCE CHARGE CALIBRATION FILE ***** ")
if filters is not None:
filter_info = f"_filters_{filters}"
else:
filter_info = ""
# remember there are no systematic corrections
prefix = "no_sys_corrected_" if no_sys_correction else ""
output_name = f"{prefix}{output_base_name}{filter_info}.Run{run:05d}.{sub_run:04d}"
output_file = output_dir / f'{output_name}.h5'
print(f"\n--> Output file {output_file}")
log_file = log_dir / f"{output_name}.log"
print(f"\n--> Log file {log_file}")
if output_file.exists():
remove = False
if not yes and os.getenv('SLURM_JOB_ID') is None:
remove = query_yes_no(">>> Output file exists already. Do you want to remove it?")
if yes or remove:
os.remove(output_file)
os.remove(log_file)
else:
print("\n--> Output file exists already. Stop")
exit(1)
#
# produce ff calibration file
#
cmd = [
"lstchain_create_calibration_file",
f"--input_file={input_file}",
f"--output_file={output_file}",
"--LSTEventSource.default_trigger_type=tib",
f"--EventSource.min_flatfield_adc={min_ff}",
f"--EventSource.max_flatfield_adc={max_ff}",
f"--LSTCalibrationCalculator.systematic_correction_path={systematics_file}",
f"--LSTEventSource.EventTimeCalculator.run_summary_path={run_summary_path}",
f"--LSTEventSource.LSTR0Corrections.drs4_time_calibration_path={time_file}",
f"--LSTEventSource.LSTR0Corrections.drs4_pedestal_path={pedestal_file}",
f"--LSTEventSource.use_flatfield_heuristic={args.use_flatfield_heuristic}",
f"--FlatFieldCalculator.sample_size={stat_events}",
f"--PedestalCalculator.sample_size={stat_events}",
f"--config={config_file}",
f"--log-file={log_file}",
"--log-file-level=INFO",
*remaining_args,
]
print("\n--> RUNNING...")
subprocess.run(cmd, check=True)
# plot and save some results
plot_file = f"{output_dir}/log/{output_name}.pdf"
print(f"\n--> PRODUCING PLOTS in {plot_file} ...")
mon = read_calibration_file(output_file, tel_id)
calib.plot_calibration_results(mon.pedestal, mon.flatfield, mon.calibration, run, plot_file)
print("\n--> END")
def search_filter(run, database_url):
"""read the employed filters form mongodb"""
# there was a change of Mongo DB data names on 5/12/2022
NEW_DB_NAMES_DATE = Time("2022-12-04T00:00:00")
filters = None
try:
myclient = pymongo.MongoClient(database_url)
mydb = myclient["CACO"]
mycol = mydb["RUN_INFORMATION"]
|
main
|
identifier_name
|
onsite_create_calibration_file.py
|
. If None, search the last time run before or equal the FF run",
type=int)
optional.add_argument(
'--sys_date',
help=(
"Date of systematic correction file (format YYYYMMDD). \n"
"Default: automatically search the best date \n"
),
)
optional.add_argument('--no_sys_correction',
help="Systematic corrections are not applied. \n",
action='store_true',
default=False)
optional.add_argument('--output_base_name', help="Base of output file name (change only for debugging)",
default="calibration")
optional.add_argument('--sub_run', help="sub-run to be processed.", type=int, default=0)
optional.add_argument('--min_ff', help="Min FF intensity cut in ADC.", type=float)
optional.add_argument('--max_ff', help="Max FF intensity cut in ADC.", type=float)
optional.add_argument('-f', '--filters', help="Calibox filters")
optional.add_argument('--tel_id', help="telescope id. Default = 1", type=int, default=1)
optional.add_argument('--config', help="Config file", default=DEFAULT_CONFIG, type=Path)
optional.add_argument('--mongodb', help="Mongo data-base connection", default="mongodb://10.200.10.101:27017/")
optional.add_argument('-y', '--yes', action="store_true", help='Do not ask interactively for permissions, assume true')
optional.add_argument('--no_pro_symlink', action="store_true",
help='Do not update the pro dir symbolic link, assume true')
optional.add_argument(
'--flatfield-heuristic', action='store_const', const=True, dest="use_flatfield_heuristic",
help=(
"If given, try to identify flatfield events from the raw data."
" Should be used only for data from before 2022"
)
)
optional.add_argument(
'--no-flatfield-heuristic', action='store_const', const=False, dest="use_flatfield_heuristic",
help=(
"If given, do not to identify flatfield events from the raw data."
" Should be used only for data from before 2022"
)
)
def main():
args, remaining_args = parser.parse_known_args()
run = args.run_number
prod_id = args.prod_version
stat_events = args.statistics
time_run = args.time_run
sys_date = args.sys_date
no_sys_correction = args.no_sys_correction
output_base_name = args.output_base_name
sub_run = args.sub_run
tel_id = args.tel_id
config_file = args.config
yes = args.yes
pro_symlink = not args.no_pro_symlink
# looks for the filter values in the database if not given
if args.filters is None:
filters = search_filter(run, args.mongodb)
else:
filters = args.filters
if filters is None:
sys.exit(f"Missing filter value for run {run}. \n")
# define the FF selection cuts
if args.min_ff is None or args.max_ff is None:
min_ff, max_ff = define_FF_selection_range(filters)
else:
min_ff, max_ff = args.min_ff, args.max_ff
print(f"\n--> Start calculating calibration from run {run}, filters {filters}")
# verify config file
if not config_file.exists():
raise IOError(f"Config file {config_file} does not exists. \n")
print(f"\n--> Config file {config_file}")
# verify input file
r0_dir = args.r0_dir or args.base_dir / 'R0'
input_file = find_r0_subrun(run, sub_run, r0_dir)
date = input_file.parent.name
print(f"\n--> Input file: {input_file}")
# verify output dir
calib_dir = args.base_dir / LEVEL_A_PIXEL_DIR
output_dir = calib_dir / "calibration" / date / prod_id
if not output_dir.exists():
print(f"--> Create directory {output_dir}")
output_dir.mkdir(parents=True, exist_ok=True)
if pro_symlink:
pro = "pro"
create_pro_symlink(output_dir)
else:
pro = prod_id
# make log dir
log_dir = output_dir / "log"
if not log_dir.exists():
print(f"--> Create directory {log_dir}")
log_dir.mkdir(parents=True, exist_ok=True)
# search the summary file info
run_summary_path = find_run_summary(date, args.base_dir)
print(f"\n--> Use run summary {run_summary_path}")
pedestal_file = find_pedestal_file(pro, args.pedestal_run, date=date, base_dir=args.base_dir)
print(f"\n--> Pedestal file: {pedestal_file}")
# search for time calibration file
time_file = find_time_calibration_file(pro, run, time_run, args.base_dir)
print(f"\n--> Time calibration file: {time_file}")
# define systematic correction file
if no_sys_correction:
systematics_file = None
else:
systematics_file = find_systematics_correction_file(pro, date, sys_date, args.base_dir)
print(f"\n--> F-factor systematics correction file: {systematics_file}")
# define charge file names
print("\n***** PRODUCE CHARGE CALIBRATION FILE ***** ")
if filters is not None:
filter_info = f"_filters_{filters}"
else:
filter_info = ""
# remember there are no systematic corrections
prefix = "no_sys_corrected_" if no_sys_correction else ""
output_name = f"{prefix}{output_base_name}{filter_info}.Run{run:05d}.{sub_run:04d}"
output_file = output_dir / f'{output_name}.h5'
print(f"\n--> Output file {output_file}")
log_file = log_dir / f"{output_name}.log"
print(f"\n--> Log file {log_file}")
if output_file.exists():
remove = False
if not yes and os.getenv('SLURM_JOB_ID') is None:
remove = query_yes_no(">>> Output file exists already. Do you want to remove it?")
if yes or remove:
os.remove(output_file)
os.remove(log_file)
else:
print("\n--> Output file exists already. Stop")
exit(1)
#
# produce ff calibration file
#
cmd = [
"lstchain_create_calibration_file",
f"--input_file={input_file}",
f"--output_file={output_file}",
"--LSTEventSource.default_trigger_type=tib",
f"--EventSource.min_flatfield_adc={min_ff}",
f"--EventSource.max_flatfield_adc={max_ff}",
f"--LSTCalibrationCalculator.systematic_correction_path={systematics_file}",
f"--LSTEventSource.EventTimeCalculator.run_summary_path={run_summary_path}",
f"--LSTEventSource.LSTR0Corrections.drs4_time_calibration_path={time_file}",
f"--LSTEventSource.LSTR0Corrections.drs4_pedestal_path={pedestal_file}",
f"--LSTEventSource.use_flatfield_heuristic={args.use_flatfield_heuristic}",
f"--FlatFieldCalculator.sample_size={stat_events}",
f"--PedestalCalculator.sample_size={stat_events}",
f"--config={config_file}",
f"--log-file={log_file}",
"--log-file-level=INFO",
*remaining_args,
]
print("\n--> RUNNING...")
subprocess.run(cmd, check=True)
# plot and save some results
plot_file = f"{output_dir}/log/{output_name}.pdf"
print(f"\n--> PRODUCING PLOTS in {plot_file} ...")
mon = read_calibration_file(output_file, tel_id)
calib.plot_calibration_results(mon.pedestal, mon.flatfield, mon.calibration, run, plot_file)
print("\n--> END")
def search_filter(run, database_url):
"""read the employed filters form mongodb"""
# there was a change of Mongo DB data names on 5/12/2022
NEW_DB_NAMES_DATE = Time("2022-12-04T00:00:00")
filters = None
try:
myclient = pymongo.MongoClient(database_url)
mydb = myclient["CACO"]
mycol = mydb["RUN_INFORMATION"]
mydoc = mycol.find({"run_number": {"$eq": run}})
for x in mydoc:
date = Time(x["start_time"])
if date < NEW_DB_NAMES_DATE:
w1 = int(x["cbox"]["wheel1 position"])
w2 = int(x["cbox"]["wheel2 position"])
else:
w1 = int(x["cbox"]["CBOX_WheelPosition1"])
w2 = int(x["cbox"]["CBOX_WheelPosition2"])
filters = f"{w1:1d}{w2:1d}"
except Exception as e:
print(f"\n >>> Exception: {e}")
raise IOError(
"--> No mongo DB filter information."
" You must pass the filters by argument: -f [filters]"
)
return filters
def define_FF_selection_range(filters):
""" return the range of charges to select the FF events """
try:
if filters is None:
|
raise ValueError("Filters are not defined")
|
conditional_block
|
|
auto-py-torrent.py
|
ER = '\033[1m\033[31m'
def get_parser():
"""Load parser for command line arguments.
It parses argv/input into args variable.
"""
# Parent and only parser.
parser = argparse.ArgumentParser(
add_help=True,
formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument('mode', action='store',
choices=range(len(MODES)),
type=int,
help='Select mode of file download.\n'
' e.g: 0(rated) or 1(list).')
parser.add_argument('torr_page', action='store',
choices=range(len(TORRENTS)),
type=int,
help='Select tracking page to download from.\n'
' e.g: 0 to .. ' + str(len(TORRENTS)-1) + '.')
parser.add_argument('str_search', action='store',
type=str,
help='Input torrent string to search.\n'
' e.g: "String search"')
return(parser)
def is_num(var):
"""Check if var string is num. Should use it only with strings."""
try:
int(var)
return True
except ValueError:
return False
class AutoPy:
"""AutoPy class for instance variables."""
def __init__(self, args, string_search, mode_search,
page, key_search, torrent_page, domain):
"""Args not entered will be defaulted."""
self.args = args
self.back_to_menu = False
self.content_page = None
self.domain = domain
self.elements = None
self.found_torrents = False
self.hrefs = []
self.keep_search = True
self.key_search = key_search
self.magnet = ""
self.mode_search = mode_search
self.page = page
self.picked_choice = False
self.selected = ""
self.string_search = string_search
self.table = None
self.torrent = ""
self.torrent_page = torrent_page
self.url = ""
self.movieName = ""
self.retries = 0
def get_magnet(self, url):
"""Get magnet from torrent page. Url already got domain."""
#print(url+ '\ndomain: '+self.domain)
if 'magnet' in url:
url = url.replace(self.domain, '')
self.url = url
if not os.path.isfile('movieMagnets.txt'):
with open('movieMagnets.txt', 'w') as file:
file.close()
with open('movieMagnets.txt', 'a') as linkFile:
linkFile.write(url+'\n')
def download_torrent(self):
"""Download torrent.
Rated implies download the unique best rated torrent found.
Otherwise: get the magnet and download it.
"""
try:
if self.back_to_menu is True:
return
if self.found_torrents is False:
print('Nothing found.')
return
elif self.mode_search == 'list':
if self.selected is not None:
# t_p, pirate and 1337x got magnet inside, else direct.
if self.page in ['the_pirate_bay',
'torrent_project',
'1337x',
'isohunt']:
url = self.hrefs[int(self.selected)]
self.get_magnet(url)
print('Downloading movie: '+self.movieName+' from url: '+url)
else:
print('Bad selected page.')
else:
print('Nothing selected.')
sys.exit(1)
except Exception:
print(traceback.format_exc())
sys.exit(0)
def build_table(self):
"""Build table."""
headers = ['Title', 'Seeders', 'Leechers', 'Age', 'Size']
titles = []
seeders = []
leechers = []
ages = []
sizes = []
if self.page == 'the_pirate_bay':
for elem in self.elements[0]:
title = elem.find('a', {'class': 'detLink'}).get_text()
titles.append(title)
font_text = elem.find(
'font', {'class': 'detDesc'}).get_text()
dammit = UnicodeDammit(font_text)
age, size = dammit.unicode_markup.split(',')[:-1]
ages.append(age)
sizes.append(size)
# Torrent
href = self.domain + \
elem.find('a', title=re.compile('magnet'))['href']
self.hrefs.append(str(href))
seeders = [elem.get_text() for elem in self.elements[1]]
leechers = [elem.get_text() for elem in self.elements[2]]
else:
print('Error page')
self.table = [[Colors.BOLD +
UnicodeDammit(titles[i][:75].strip(), ["utf-8"]).unicode_markup +
Colors.ENDC
if (i + 1) % 2 == 0
else UnicodeDammit(
titles[i][:75].strip()).unicode_markup,
Colors.SEEDER + seeders[i].strip() + Colors.ENDC
if (i + 1) % 2 == 0
else Colors.LGREEN + seeders[i].strip() + Colors.ENDC,
Colors.LEECHER + leechers[i].strip() + Colors.ENDC
if (i + 1) % 2 == 0
else Colors.LRED + leechers[i].strip() + Colors.ENDC,
Colors.LIGHTBLUE + ages[i].strip() + Colors.ENDC
if (i + 1) % 2 == 0
else Colors.BLUE + ages[i].strip() + Colors.ENDC,
Colors.PINK + sizes[i].strip() + Colors.ENDC
if (i + 1) % 2 == 0
else Colors.PURPLE + sizes[i].strip() + Colors.ENDC]
for i in range(len(self.hrefs))]
def soupify(self):
"""Get proper torrent/magnet information.
If search_mode is rated then get torrent/magnet.
If not, get all the elements to build the table.
There are different ways for each page.
"""
soup = BeautifulSoup(self.content_page.content, 'lxml')
if self.page == 'the_pirate_bay':
main = soup.find('table', {'id': 'searchResult'})
if self.mode_search == 'best_rated':
rated_url = self.domain + \
main.find('a', href=re.compile('torrent'))['href']
self.get_magnet(rated_url)
else:
try:
trs = main.find_all('tr', limit=30)[1:]
self.elements = list(
zip(*[tr.find_all('td', recursive=False)[1:]
for tr in trs])) # Magnets
except:
if main is None:
print('Failed to get data for movie: '+self.movieName+' retrying attempt '+str(self.retries+1)+' out of 5')
self.retries += 1
time.sleep(5)
if self.retries < 5:
self.soupify()
else:
# failed to get the url for that movie
# output to failed movies .txt
if not os.path.isfile('failedMovies.txt'):
with open('failedMovies.txt','w') as file:
file.close()
with open('failedMovies.txt','a') as file:
file.write(self.movieName+'\n')
file.close()
else:
print('Cannot soupify current page. Try again.')
def handle_select(self):
"""Handle user's input in list mode."""
#self.selected = input('>> ')
self.selected = '0'
if self.selected in ['Q', 'q']:
sys.exit(1)
elif self.selected in ['B', 'b']:
self.back_to_menu = True
return True
elif is_num(self.selected):
if 0 <= int(self.selected) <= len(self.hrefs) - 1:
self.back_to_menu = False
return True
else:
print(Colors.FAIL +
'Wrong index. ' +
'Please select an appropiate one or other option.' +
Colors.ENDC)
return False
else:
print(Colors.FAIL +
'Invalid input. ' +
'Please select an appropiate one or other option.' +
Colors.ENDC)
return False
def select_torrent(self):
"""Select torrent.
First check if specific element/info is obtained in content_page.
Specify to user if it wants best rated torrent or select one from list.
If the user wants best rated: Directly obtain magnet/torrent.
Else: build table with all data and enable the user select the torrent.
"""
try:
self.found_torrents = not bool(self.key_search in
self.content_page.text)
if not self.found_torrents:
print('-----------------No torrents found.--------------------')
sys.exit(1)
self.soupify()
if self.mode_search == 'list':
self.build_table()
while not(self.picked_choice):
self.picked_choice = self.handle_select()
except Exception:
print('ERROR select_torrent: ')
print('Could not download movie: '+self.movieName)
logging.error(traceback.format_exc())
sys.exit(0)
def
|
build_url
|
identifier_name
|
|
auto-py-torrent.py
|
GREEN = '\033[42m'
CYAN = '\033[36m'
RED = '\033[41m'
PINK = '\033[95m'
PURPLE = '\033[35m'
LIGHTBLUE = '\033[94m'
LGREEN = '\033[0m\033[32m'
LIGHTCYAN = '\033[0m\033[36m'
LRED = '\033[0m\033[31m'
LIGHTPURPLE = '\033[0m\033[35m'
SEEDER = '\033[1m\033[32m'
LEECHER = '\033[1m\033[31m'
def get_parser():
"""Load parser for command line arguments.
It parses argv/input into args variable.
"""
# Parent and only parser.
parser = argparse.ArgumentParser(
add_help=True,
formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument('mode', action='store',
choices=range(len(MODES)),
type=int,
help='Select mode of file download.\n'
' e.g: 0(rated) or 1(list).')
parser.add_argument('torr_page', action='store',
choices=range(len(TORRENTS)),
type=int,
help='Select tracking page to download from.\n'
' e.g: 0 to .. ' + str(len(TORRENTS)-1) + '.')
parser.add_argument('str_search', action='store',
type=str,
help='Input torrent string to search.\n'
' e.g: "String search"')
return(parser)
def is_num(var):
"""Check if var string is num. Should use it only with strings."""
try:
int(var)
return True
except ValueError:
return False
class AutoPy:
"""AutoPy class for instance variables."""
def __init__(self, args, string_search, mode_search,
page, key_search, torrent_page, domain):
"""Args not entered will be defaulted."""
self.args = args
self.back_to_menu = False
self.content_page = None
self.domain = domain
self.elements = None
self.found_torrents = False
self.hrefs = []
self.keep_search = True
self.key_search = key_search
self.magnet = ""
self.mode_search = mode_search
self.page = page
self.picked_choice = False
self.selected = ""
self.string_search = string_search
self.table = None
self.torrent = ""
self.torrent_page = torrent_page
self.url = ""
self.movieName = ""
self.retries = 0
def get_magnet(self, url):
"""Get magnet from torrent page. Url already got domain."""
#print(url+ '\ndomain: '+self.domain)
if 'magnet' in url:
url = url.replace(self.domain, '')
self.url = url
if not os.path.isfile('movieMagnets.txt'):
with open('movieMagnets.txt', 'w') as file:
file.close()
with open('movieMagnets.txt', 'a') as linkFile:
linkFile.write(url+'\n')
def download_torrent(self):
"""Download torrent.
Rated implies download the unique best rated torrent found.
Otherwise: get the magnet and download it.
"""
try:
if self.back_to_menu is True:
return
if self.found_torrents is False:
print('Nothing found.')
return
elif self.mode_search == 'list':
if self.selected is not None:
# t_p, pirate and 1337x got magnet inside, else direct.
if self.page in ['the_pirate_bay',
'torrent_project',
'1337x',
'isohunt']:
url = self.hrefs[int(self.selected)]
self.get_magnet(url)
print('Downloading movie: '+self.movieName+' from url: '+url)
else:
print('Bad selected page.')
else:
print('Nothing selected.')
sys.exit(1)
except Exception:
print(traceback.format_exc())
sys.exit(0)
|
titles = []
seeders = []
leechers = []
ages = []
sizes = []
if self.page == 'the_pirate_bay':
for elem in self.elements[0]:
title = elem.find('a', {'class': 'detLink'}).get_text()
titles.append(title)
font_text = elem.find(
'font', {'class': 'detDesc'}).get_text()
dammit = UnicodeDammit(font_text)
age, size = dammit.unicode_markup.split(',')[:-1]
ages.append(age)
sizes.append(size)
# Torrent
href = self.domain + \
elem.find('a', title=re.compile('magnet'))['href']
self.hrefs.append(str(href))
seeders = [elem.get_text() for elem in self.elements[1]]
leechers = [elem.get_text() for elem in self.elements[2]]
else:
print('Error page')
self.table = [[Colors.BOLD +
UnicodeDammit(titles[i][:75].strip(), ["utf-8"]).unicode_markup +
Colors.ENDC
if (i + 1) % 2 == 0
else UnicodeDammit(
titles[i][:75].strip()).unicode_markup,
Colors.SEEDER + seeders[i].strip() + Colors.ENDC
if (i + 1) % 2 == 0
else Colors.LGREEN + seeders[i].strip() + Colors.ENDC,
Colors.LEECHER + leechers[i].strip() + Colors.ENDC
if (i + 1) % 2 == 0
else Colors.LRED + leechers[i].strip() + Colors.ENDC,
Colors.LIGHTBLUE + ages[i].strip() + Colors.ENDC
if (i + 1) % 2 == 0
else Colors.BLUE + ages[i].strip() + Colors.ENDC,
Colors.PINK + sizes[i].strip() + Colors.ENDC
if (i + 1) % 2 == 0
else Colors.PURPLE + sizes[i].strip() + Colors.ENDC]
for i in range(len(self.hrefs))]
def soupify(self):
"""Get proper torrent/magnet information.
If search_mode is rated then get torrent/magnet.
If not, get all the elements to build the table.
There are different ways for each page.
"""
soup = BeautifulSoup(self.content_page.content, 'lxml')
if self.page == 'the_pirate_bay':
main = soup.find('table', {'id': 'searchResult'})
if self.mode_search == 'best_rated':
rated_url = self.domain + \
main.find('a', href=re.compile('torrent'))['href']
self.get_magnet(rated_url)
else:
try:
trs = main.find_all('tr', limit=30)[1:]
self.elements = list(
zip(*[tr.find_all('td', recursive=False)[1:]
for tr in trs])) # Magnets
except:
if main is None:
print('Failed to get data for movie: '+self.movieName+' retrying attempt '+str(self.retries+1)+' out of 5')
self.retries += 1
time.sleep(5)
if self.retries < 5:
self.soupify()
else:
# failed to get the url for that movie
# output to failed movies .txt
if not os.path.isfile('failedMovies.txt'):
with open('failedMovies.txt','w') as file:
file.close()
with open('failedMovies.txt','a') as file:
file.write(self.movieName+'\n')
file.close()
else:
print('Cannot soupify current page. Try again.')
def handle_select(self):
"""Handle user's input in list mode."""
#self.selected = input('>> ')
self.selected = '0'
if self.selected in ['Q', 'q']:
sys.exit(1)
elif self.selected in ['B', 'b']:
self.back_to_menu = True
return True
elif is_num(self.selected):
if 0 <= int(self.selected) <= len(self.hrefs) - 1:
self.back_to_menu = False
return True
else:
print(Colors.FAIL +
'Wrong index. ' +
'Please select an appropiate one or other option.' +
Colors.ENDC)
return False
else:
print(Colors.FAIL +
'Invalid input. ' +
'Please select an appropiate one or other option.' +
Colors.ENDC)
return False
def select_torrent(self):
"""Select torrent.
First check if specific element/info is obtained in content_page.
|
def build_table(self):
"""Build table."""
headers = ['Title', 'Seeders', 'Leechers', 'Age', 'Size']
|
random_line_split
|
auto-py-torrent.py
|
GREEN = '\033[42m'
CYAN = '\033[36m'
RED = '\033[41m'
PINK = '\033[95m'
PURPLE = '\033[35m'
LIGHTBLUE = '\033[94m'
LGREEN = '\033[0m\033[32m'
LIGHTCYAN = '\033[0m\033[36m'
LRED = '\033[0m\033[31m'
LIGHTPURPLE = '\033[0m\033[35m'
SEEDER = '\033[1m\033[32m'
LEECHER = '\033[1m\033[31m'
def get_parser():
"""Load parser for command line arguments.
It parses argv/input into args variable.
"""
# Parent and only parser.
parser = argparse.ArgumentParser(
add_help=True,
formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument('mode', action='store',
choices=range(len(MODES)),
type=int,
help='Select mode of file download.\n'
' e.g: 0(rated) or 1(list).')
parser.add_argument('torr_page', action='store',
choices=range(len(TORRENTS)),
type=int,
help='Select tracking page to download from.\n'
' e.g: 0 to .. ' + str(len(TORRENTS)-1) + '.')
parser.add_argument('str_search', action='store',
type=str,
help='Input torrent string to search.\n'
' e.g: "String search"')
return(parser)
def is_num(var):
"""Check if var string is num. Should use it only with strings."""
try:
int(var)
return True
except ValueError:
return False
class AutoPy:
"""AutoPy class for instance variables."""
def __init__(self, args, string_search, mode_search,
page, key_search, torrent_page, domain):
"""Args not entered will be defaulted."""
self.args = args
self.back_to_menu = False
self.content_page = None
self.domain = domain
self.elements = None
self.found_torrents = False
self.hrefs = []
self.keep_search = True
self.key_search = key_search
self.magnet = ""
self.mode_search = mode_search
self.page = page
self.picked_choice = False
self.selected = ""
self.string_search = string_search
self.table = None
self.torrent = ""
self.torrent_page = torrent_page
self.url = ""
self.movieName = ""
self.retries = 0
def get_magnet(self, url):
"""Get magnet from torrent page. Url already got domain."""
#print(url+ '\ndomain: '+self.domain)
if 'magnet' in url:
url = url.replace(self.domain, '')
self.url = url
if not os.path.isfile('movieMagnets.txt'):
with open('movieMagnets.txt', 'w') as file:
file.close()
with open('movieMagnets.txt', 'a') as linkFile:
linkFile.write(url+'\n')
def download_torrent(self):
"""Download torrent.
Rated implies download the unique best rated torrent found.
Otherwise: get the magnet and download it.
"""
try:
if self.back_to_menu is True:
return
if self.found_torrents is False:
|
elif self.mode_search == 'list':
if self.selected is not None:
# t_p, pirate and 1337x got magnet inside, else direct.
if self.page in ['the_pirate_bay',
'torrent_project',
'1337x',
'isohunt']:
url = self.hrefs[int(self.selected)]
self.get_magnet(url)
print('Downloading movie: '+self.movieName+' from url: '+url)
else:
print('Bad selected page.')
else:
print('Nothing selected.')
sys.exit(1)
except Exception:
print(traceback.format_exc())
sys.exit(0)
def build_table(self):
"""Build table."""
headers = ['Title', 'Seeders', 'Leechers', 'Age', 'Size']
titles = []
seeders = []
leechers = []
ages = []
sizes = []
if self.page == 'the_pirate_bay':
for elem in self.elements[0]:
title = elem.find('a', {'class': 'detLink'}).get_text()
titles.append(title)
font_text = elem.find(
'font', {'class': 'detDesc'}).get_text()
dammit = UnicodeDammit(font_text)
age, size = dammit.unicode_markup.split(',')[:-1]
ages.append(age)
sizes.append(size)
# Torrent
href = self.domain + \
elem.find('a', title=re.compile('magnet'))['href']
self.hrefs.append(str(href))
seeders = [elem.get_text() for elem in self.elements[1]]
leechers = [elem.get_text() for elem in self.elements[2]]
else:
print('Error page')
self.table = [[Colors.BOLD +
UnicodeDammit(titles[i][:75].strip(), ["utf-8"]).unicode_markup +
Colors.ENDC
if (i + 1) % 2 == 0
else UnicodeDammit(
titles[i][:75].strip()).unicode_markup,
Colors.SEEDER + seeders[i].strip() + Colors.ENDC
if (i + 1) % 2 == 0
else Colors.LGREEN + seeders[i].strip() + Colors.ENDC,
Colors.LEECHER + leechers[i].strip() + Colors.ENDC
if (i + 1) % 2 == 0
else Colors.LRED + leechers[i].strip() + Colors.ENDC,
Colors.LIGHTBLUE + ages[i].strip() + Colors.ENDC
if (i + 1) % 2 == 0
else Colors.BLUE + ages[i].strip() + Colors.ENDC,
Colors.PINK + sizes[i].strip() + Colors.ENDC
if (i + 1) % 2 == 0
else Colors.PURPLE + sizes[i].strip() + Colors.ENDC]
for i in range(len(self.hrefs))]
def soupify(self):
"""Get proper torrent/magnet information.
If search_mode is rated then get torrent/magnet.
If not, get all the elements to build the table.
There are different ways for each page.
"""
soup = BeautifulSoup(self.content_page.content, 'lxml')
if self.page == 'the_pirate_bay':
main = soup.find('table', {'id': 'searchResult'})
if self.mode_search == 'best_rated':
rated_url = self.domain + \
main.find('a', href=re.compile('torrent'))['href']
self.get_magnet(rated_url)
else:
try:
trs = main.find_all('tr', limit=30)[1:]
self.elements = list(
zip(*[tr.find_all('td', recursive=False)[1:]
for tr in trs])) # Magnets
except:
if main is None:
print('Failed to get data for movie: '+self.movieName+' retrying attempt '+str(self.retries+1)+' out of 5')
self.retries += 1
time.sleep(5)
if self.retries < 5:
self.soupify()
else:
# failed to get the url for that movie
# output to failed movies .txt
if not os.path.isfile('failedMovies.txt'):
with open('failedMovies.txt','w') as file:
file.close()
with open('failedMovies.txt','a') as file:
file.write(self.movieName+'\n')
file.close()
else:
print('Cannot soupify current page. Try again.')
def handle_select(self):
"""Handle user's input in list mode."""
#self.selected = input('>> ')
self.selected = '0'
if self.selected in ['Q', 'q']:
sys.exit(1)
elif self.selected in ['B', 'b']:
self.back_to_menu = True
return True
elif is_num(self.selected):
if 0 <= int(self.selected) <= len(self.hrefs) - 1:
self.back_to_menu = False
return True
else:
print(Colors.FAIL +
'Wrong index. ' +
'Please select an appropiate one or other option.' +
Colors.ENDC)
return False
else:
print(Colors.FAIL +
'Invalid input. ' +
'Please select an appropiate one or other option.' +
Colors.ENDC)
return False
def select_torrent(self):
"""Select torrent.
First check if specific element/info is obtained in content_page.
|
print('Nothing found.')
return
|
conditional_block
|
auto-py-torrent.py
|
GREEN = '\033[42m'
CYAN = '\033[36m'
RED = '\033[41m'
PINK = '\033[95m'
PURPLE = '\033[35m'
LIGHTBLUE = '\033[94m'
LGREEN = '\033[0m\033[32m'
LIGHTCYAN = '\033[0m\033[36m'
LRED = '\033[0m\033[31m'
LIGHTPURPLE = '\033[0m\033[35m'
SEEDER = '\033[1m\033[32m'
LEECHER = '\033[1m\033[31m'
def get_parser():
"""Load parser for command line arguments.
It parses argv/input into args variable.
"""
# Parent and only parser.
parser = argparse.ArgumentParser(
add_help=True,
formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument('mode', action='store',
choices=range(len(MODES)),
type=int,
help='Select mode of file download.\n'
' e.g: 0(rated) or 1(list).')
parser.add_argument('torr_page', action='store',
choices=range(len(TORRENTS)),
type=int,
help='Select tracking page to download from.\n'
' e.g: 0 to .. ' + str(len(TORRENTS)-1) + '.')
parser.add_argument('str_search', action='store',
type=str,
help='Input torrent string to search.\n'
' e.g: "String search"')
return(parser)
def is_num(var):
"""Check if var string is num. Should use it only with strings."""
try:
int(var)
return True
except ValueError:
return False
class AutoPy:
"""AutoPy class for instance variables."""
def __init__(self, args, string_search, mode_search,
page, key_search, torrent_page, domain):
"""Args not entered will be defaulted."""
self.args = args
self.back_to_menu = False
self.content_page = None
self.domain = domain
self.elements = None
self.found_torrents = False
self.hrefs = []
self.keep_search = True
self.key_search = key_search
self.magnet = ""
self.mode_search = mode_search
self.page = page
self.picked_choice = False
self.selected = ""
self.string_search = string_search
self.table = None
self.torrent = ""
self.torrent_page = torrent_page
self.url = ""
self.movieName = ""
self.retries = 0
def get_magnet(self, url):
"""Get magnet from torrent page. Url already got domain."""
#print(url+ '\ndomain: '+self.domain)
if 'magnet' in url:
url = url.replace(self.domain, '')
self.url = url
if not os.path.isfile('movieMagnets.txt'):
with open('movieMagnets.txt', 'w') as file:
file.close()
with open('movieMagnets.txt', 'a') as linkFile:
linkFile.write(url+'\n')
def download_torrent(self):
"""Download torrent.
Rated implies download the unique best rated torrent found.
Otherwise: get the magnet and download it.
"""
try:
if self.back_to_menu is True:
return
if self.found_torrents is False:
print('Nothing found.')
return
elif self.mode_search == 'list':
if self.selected is not None:
# t_p, pirate and 1337x got magnet inside, else direct.
if self.page in ['the_pirate_bay',
'torrent_project',
'1337x',
'isohunt']:
url = self.hrefs[int(self.selected)]
self.get_magnet(url)
print('Downloading movie: '+self.movieName+' from url: '+url)
else:
print('Bad selected page.')
else:
print('Nothing selected.')
sys.exit(1)
except Exception:
print(traceback.format_exc())
sys.exit(0)
def build_table(self):
"""Build table."""
headers = ['Title', 'Seeders', 'Leechers', 'Age', 'Size']
titles = []
seeders = []
leechers = []
ages = []
sizes = []
if self.page == 'the_pirate_bay':
for elem in self.elements[0]:
title = elem.find('a', {'class': 'detLink'}).get_text()
titles.append(title)
font_text = elem.find(
'font', {'class': 'detDesc'}).get_text()
dammit = UnicodeDammit(font_text)
age, size = dammit.unicode_markup.split(',')[:-1]
ages.append(age)
sizes.append(size)
# Torrent
href = self.domain + \
elem.find('a', title=re.compile('magnet'))['href']
self.hrefs.append(str(href))
seeders = [elem.get_text() for elem in self.elements[1]]
leechers = [elem.get_text() for elem in self.elements[2]]
else:
print('Error page')
self.table = [[Colors.BOLD +
UnicodeDammit(titles[i][:75].strip(), ["utf-8"]).unicode_markup +
Colors.ENDC
if (i + 1) % 2 == 0
else UnicodeDammit(
titles[i][:75].strip()).unicode_markup,
Colors.SEEDER + seeders[i].strip() + Colors.ENDC
if (i + 1) % 2 == 0
else Colors.LGREEN + seeders[i].strip() + Colors.ENDC,
Colors.LEECHER + leechers[i].strip() + Colors.ENDC
if (i + 1) % 2 == 0
else Colors.LRED + leechers[i].strip() + Colors.ENDC,
Colors.LIGHTBLUE + ages[i].strip() + Colors.ENDC
if (i + 1) % 2 == 0
else Colors.BLUE + ages[i].strip() + Colors.ENDC,
Colors.PINK + sizes[i].strip() + Colors.ENDC
if (i + 1) % 2 == 0
else Colors.PURPLE + sizes[i].strip() + Colors.ENDC]
for i in range(len(self.hrefs))]
def soupify(self):
"""Get proper torrent/magnet information.
If search_mode is rated then get torrent/magnet.
If not, get all the elements to build the table.
There are different ways for each page.
"""
soup = BeautifulSoup(self.content_page.content, 'lxml')
if self.page == 'the_pirate_bay':
main = soup.find('table', {'id': 'searchResult'})
if self.mode_search == 'best_rated':
rated_url = self.domain + \
main.find('a', href=re.compile('torrent'))['href']
self.get_magnet(rated_url)
else:
try:
trs = main.find_all('tr', limit=30)[1:]
self.elements = list(
zip(*[tr.find_all('td', recursive=False)[1:]
for tr in trs])) # Magnets
except:
if main is None:
print('Failed to get data for movie: '+self.movieName+' retrying attempt '+str(self.retries+1)+' out of 5')
self.retries += 1
time.sleep(5)
if self.retries < 5:
self.soupify()
else:
# failed to get the url for that movie
# output to failed movies .txt
if not os.path.isfile('failedMovies.txt'):
with open('failedMovies.txt','w') as file:
file.close()
with open('failedMovies.txt','a') as file:
file.write(self.movieName+'\n')
file.close()
else:
print('Cannot soupify current page. Try again.')
def handle_select(self):
|
'Invalid input. ' +
'Please select an appropiate one or other option.' +
Colors.ENDC)
return False
def select_torrent(self):
"""Select torrent.
First check if specific element/info is obtained in content_page.
|
"""Handle user's input in list mode."""
#self.selected = input('>> ')
self.selected = '0'
if self.selected in ['Q', 'q']:
sys.exit(1)
elif self.selected in ['B', 'b']:
self.back_to_menu = True
return True
elif is_num(self.selected):
if 0 <= int(self.selected) <= len(self.hrefs) - 1:
self.back_to_menu = False
return True
else:
print(Colors.FAIL +
'Wrong index. ' +
'Please select an appropiate one or other option.' +
Colors.ENDC)
return False
else:
print(Colors.FAIL +
|
identifier_body
|
filter.go
|
FilterOpLt FilterOp = "<"
// FilterOpGte greater than or equal
FilterOpGte FilterOp = ">="
// FilterOpLte less than or equal
FilterOpLte FilterOp = "<="
// FilterOpCont contains the specified text, case sensitive
FilterOpCont FilterOp = "%="
// FilterOpNotCont does not contain the specified text, case sensitive
FilterOpNotCont FilterOp = "%!"
// FilterOpICont contains the specified text, case insensitive
FilterOpICont FilterOp = "^="
// FilterOpNotICont does not contain the specified text, case insensitive
FilterOpNotICont FilterOp = "^!"
)
// FilterBuilder is the syntax used to build the filter, where And() and Or() can be nested
type FilterBuilder interface {
// Fields is the list of available fields
Fields() []string
// And requires all sub-filters to match
And(and ...Filter) AndFilter
// Or requires any of the sub-filters to match
Or(and ...Filter) OrFilter
// Eq equal
Eq(name string, value driver.Value) Filter
// Neq not equal
Neq(name string, value driver.Value) Filter
// In one of an array of values
In(name string, value []driver.Value) Filter
// NotIn not one of an array of values
NotIn(name string, value []driver.Value) Filter
// Lt less than
Lt(name string, value driver.Value) Filter
// Gt greater than
Gt(name string, value driver.Value) Filter
// Gte greater than or equal
Gte(name string, value driver.Value) Filter
// Lte less than or equal
Lte(name string, value driver.Value) Filter
// Contains allows the string anywhere - case sensitive
Contains(name string, value driver.Value) Filter
// NotContains disallows the string anywhere - case sensitive
NotContains(name string, value driver.Value) Filter
// IContains allows the string anywhere - case sensitive
IContains(name string, value driver.Value) Filter
// INotContains disallows the string anywhere - case sensitive
NotIContains(name string, value driver.Value) Filter
}
// NullBehavior specifies whether to sort nulls first or last in a query
type NullBehavior int
const (
NullsDefault NullBehavior = iota
NullsFirst
NullsLast
)
// SortField is field+direction for sorting
type SortField struct {
Field string
Descending bool
Nulls NullBehavior
}
// FilterInfo is the structure returned by Finalize to the plugin, to serialize this filter
// into the underlying database mechanism's filter language
type FilterInfo struct {
Sort []*SortField
Skip uint64
Limit uint64
Count bool
Field string
Op FilterOp
Values []FieldSerialization
Value FieldSerialization
Children []*FilterInfo
}
// FilterResult is has additional info if requested on the query - currently only the total count
type FilterResult struct {
TotalCount *int64
}
func valueString(f FieldSerialization) string {
v, _ := f.Value()
switch tv := v.(type) {
case nil:
return "null"
case []byte:
if tv == nil {
return "null"
}
return fmt.Sprintf("'%s'", tv)
case int64:
return strconv.FormatInt(tv, 10)
case bool:
return fmt.Sprintf("%t", tv)
default:
return fmt.Sprintf("'%s'", tv)
}
}
func (f *FilterInfo) filterString() string {
switch f.Op {
case FilterOpAnd, FilterOpOr:
cs := make([]string, len(f.Children))
for i, c := range f.Children {
cs[i] = fmt.Sprintf("( %s )", c.filterString())
}
return strings.Join(cs, fmt.Sprintf(" %s ", f.Op))
case FilterOpIn, FilterOpNotIn:
strValues := make([]string, len(f.Values))
for i, v := range f.Values {
strValues[i] = valueString(v)
}
return fmt.Sprintf("%s %s [%s]", f.Field, f.Op, strings.Join(strValues, ","))
default:
return fmt.Sprintf("%s %s %s", f.Field, f.Op, valueString(f.Value))
}
}
func (f *FilterInfo) String() string {
var val strings.Builder
val.WriteString(f.filterString())
if len(f.Sort) > 0 {
fields := make([]string, len(f.Sort))
for i, s := range f.Sort {
if s.Descending {
fields[i] = "-"
}
fields[i] += s.Field
}
val.WriteString(fmt.Sprintf(" sort=%s", strings.Join(fields, ",")))
}
if f.Skip > 0 {
val.WriteString(fmt.Sprintf(" skip=%d", f.Skip))
}
if f.Limit > 0 {
val.WriteString(fmt.Sprintf(" limit=%d", f.Limit))
}
if f.Count {
val.WriteString(" count=true")
}
return val.String()
}
func (fb *filterBuilder) Fields() []string {
keys := make([]string, len(fb.queryFields))
i := 0
for k := range fb.queryFields {
keys[i] = k
i++
}
return keys
}
type filterBuilder struct {
ctx context.Context
queryFields queryFields
sort []*SortField
skip uint64
limit uint64
count bool
forceAscending bool
forceDescending bool
}
type baseFilter struct {
fb *filterBuilder
children []Filter
op FilterOp
field string
value interface{}
}
func (f *baseFilter) Builder() FilterBuilder {
return f.fb
}
func (f *baseFilter) Finalize() (fi *FilterInfo, err error) {
var children []*FilterInfo
var value FieldSerialization
var values []FieldSerialization
switch f.op {
case FilterOpAnd, FilterOpOr:
children = make([]*FilterInfo, len(f.children))
for i, c := range f.children {
if children[i], err = c.Finalize(); err != nil {
return nil, err
}
}
case FilterOpIn, FilterOpNotIn:
fValues := f.value.([]driver.Value)
values = make([]FieldSerialization, len(fValues))
name := strings.ToLower(f.field)
field, ok := f.fb.queryFields[name]
if !ok {
return nil, i18n.NewError(f.fb.ctx, i18n.MsgInvalidFilterField, name)
}
for i, fv := range fValues {
values[i] = field.getSerialization()
if err = values[i].Scan(fv); err != nil {
return nil, i18n.WrapError(f.fb.ctx, err, i18n.MsgInvalidValueForFilterField, name)
}
}
default:
name := strings.ToLower(f.field)
field, ok := f.fb.queryFields[name]
if !ok {
return nil, i18n.NewError(f.fb.ctx, i18n.MsgInvalidFilterField, name)
}
value = field.getSerialization()
if err = value.Scan(f.value); err != nil {
return nil, i18n.WrapError(f.fb.ctx, err, i18n.MsgInvalidValueForFilterField, name)
}
}
if f.fb.forceDescending {
for _, sf := range f.fb.sort {
sf.Descending = true
}
} else if f.fb.forceAscending {
for _, sf := range f.fb.sort {
sf.Descending = false
}
}
return &FilterInfo{
Children: children,
Op: f.op,
Field: f.field,
Values: values,
Value: value,
Sort: f.fb.sort,
Skip: f.fb.skip,
Limit: f.fb.limit,
Count: f.fb.count,
}, nil
}
func (f *baseFilter) Sort(fields ...string) Filter {
for _, field := range fields {
descending := false
if strings.HasPrefix(field, "-") {
field = strings.TrimPrefix(field, "-")
descending = true
}
if _, ok := f.fb.queryFields[field]; ok {
f.fb.sort = append(f.fb.sort, &SortField{
Field: field,
Descending: descending,
})
}
}
return f
}
func (f *baseFilter) Skip(skip uint64) Filter {
f.fb.skip = skip
return f
}
func (f *baseFilter) Limit(limit uint64) Filter {
f.fb.limit = limit
return f
}
func (f *baseFilter) Count(c bool) Filter {
f.fb.count = c
return f
}
func (f *baseFilter) Ascending() Filter {
f.fb.forceAscending = true
return f
}
func (f *baseFilter) Descending() Filter {
f.fb.forceDescending = true
return f
}
type andFilter struct {
baseFilter
}
func (fb *andFilter) Condition(children ...Filter) MultiConditionFilter {
fb.children = append(fb.children, children...)
return fb
}
func (fb *filterBuilder) A
|
nd(
|
identifier_name
|
|
filter.go
|
that must be implemented by plugins - the string value is
// used in the core string formatting method (for logging etc.)
type FilterOp string
const (
// FilterOpAnd and
FilterOpAnd FilterOp = "&&"
// FilterOpOr or
FilterOpOr FilterOp = "||"
// FilterOpEq equal
FilterOpEq FilterOp = "=="
// FilterOpNe not equal
FilterOpNe FilterOp = "!="
// FilterOpIn in list of values
FilterOpIn FilterOp = "IN"
// FilterOpNotIn not in list of values
FilterOpNotIn FilterOp = "NI"
// FilterOpGt greater than
FilterOpGt FilterOp = ">"
// FilterOpLt less than
FilterOpLt FilterOp = "<"
// FilterOpGte greater than or equal
FilterOpGte FilterOp = ">="
// FilterOpLte less than or equal
FilterOpLte FilterOp = "<="
// FilterOpCont contains the specified text, case sensitive
FilterOpCont FilterOp = "%="
// FilterOpNotCont does not contain the specified text, case sensitive
FilterOpNotCont FilterOp = "%!"
// FilterOpICont contains the specified text, case insensitive
FilterOpICont FilterOp = "^="
// FilterOpNotICont does not contain the specified text, case insensitive
FilterOpNotICont FilterOp = "^!"
)
// FilterBuilder is the syntax used to build the filter, where And() and Or() can be nested
type FilterBuilder interface {
// Fields is the list of available fields
Fields() []string
// And requires all sub-filters to match
And(and ...Filter) AndFilter
// Or requires any of the sub-filters to match
Or(and ...Filter) OrFilter
// Eq equal
Eq(name string, value driver.Value) Filter
// Neq not equal
Neq(name string, value driver.Value) Filter
// In one of an array of values
In(name string, value []driver.Value) Filter
// NotIn not one of an array of values
NotIn(name string, value []driver.Value) Filter
// Lt less than
Lt(name string, value driver.Value) Filter
// Gt greater than
Gt(name string, value driver.Value) Filter
// Gte greater than or equal
Gte(name string, value driver.Value) Filter
// Lte less than or equal
Lte(name string, value driver.Value) Filter
// Contains allows the string anywhere - case sensitive
Contains(name string, value driver.Value) Filter
// NotContains disallows the string anywhere - case sensitive
NotContains(name string, value driver.Value) Filter
// IContains allows the string anywhere - case sensitive
IContains(name string, value driver.Value) Filter
// INotContains disallows the string anywhere - case sensitive
NotIContains(name string, value driver.Value) Filter
}
// NullBehavior specifies whether to sort nulls first or last in a query
type NullBehavior int
const (
NullsDefault NullBehavior = iota
NullsFirst
NullsLast
)
// SortField is field+direction for sorting
type SortField struct {
Field string
Descending bool
Nulls NullBehavior
}
// FilterInfo is the structure returned by Finalize to the plugin, to serialize this filter
// into the underlying database mechanism's filter language
type FilterInfo struct {
Sort []*SortField
Skip uint64
Limit uint64
Count bool
Field string
Op FilterOp
Values []FieldSerialization
Value FieldSerialization
Children []*FilterInfo
}
// FilterResult is has additional info if requested on the query - currently only the total count
type FilterResult struct {
TotalCount *int64
}
func valueString(f FieldSerialization) string {
v, _ := f.Value()
switch tv := v.(type) {
case nil:
return "null"
case []byte:
if tv == nil {
return "null"
}
return fmt.Sprintf("'%s'", tv)
case int64:
return strconv.FormatInt(tv, 10)
case bool:
return fmt.Sprintf("%t", tv)
default:
return fmt.Sprintf("'%s'", tv)
}
}
func (f *FilterInfo) filterString() string {
switch f.Op {
case FilterOpAnd, FilterOpOr:
cs := make([]string, len(f.Children))
for i, c := range f.Children {
cs[i] = fmt.Sprintf("( %s )", c.filterString())
}
return strings.Join(cs, fmt.Sprintf(" %s ", f.Op))
case FilterOpIn, FilterOpNotIn:
strValues := make([]string, len(f.Values))
for i, v := range f.Values {
strValues[i] = valueString(v)
}
return fmt.Sprintf("%s %s [%s]", f.Field, f.Op, strings.Join(strValues, ","))
default:
return fmt.Sprintf("%s %s %s", f.Field, f.Op, valueString(f.Value))
}
}
func (f *FilterInfo) String() string {
var val strings.Builder
val.WriteString(f.filterString())
if len(f.Sort) > 0 {
fields := make([]string, len(f.Sort))
for i, s := range f.Sort {
if s.Descending {
fields[i] = "-"
}
fields[i] += s.Field
}
val.WriteString(fmt.Sprintf(" sort=%s", strings.Join(fields, ",")))
}
if f.Skip > 0 {
val.WriteString(fmt.Sprintf(" skip=%d", f.Skip))
}
if f.Limit > 0 {
val.WriteString(fmt.Sprintf(" limit=%d", f.Limit))
}
if f.Count {
val.WriteString(" count=true")
}
return val.String()
}
func (fb *filterBuilder) Fields() []string {
keys := make([]string, len(fb.queryFields))
i := 0
for k := range fb.queryFields {
keys[i] = k
i++
}
return keys
}
type filterBuilder struct {
ctx context.Context
queryFields queryFields
sort []*SortField
skip uint64
limit uint64
count bool
forceAscending bool
forceDescending bool
}
type baseFilter struct {
fb *filterBuilder
children []Filter
op FilterOp
field string
value interface{}
}
func (f *baseFilter) Builder() FilterBuilder {
return f.fb
}
func (f *baseFilter) Finalize() (fi *FilterInfo, err error) {
var children []*FilterInfo
var value FieldSerialization
var values []FieldSerialization
switch f.op {
case FilterOpAnd, FilterOpOr:
children = make([]*FilterInfo, len(f.children))
for i, c := range f.children {
if children[i], err = c.Finalize(); err != nil {
return nil, err
}
}
case FilterOpIn, FilterOpNotIn:
fValues := f.value.([]driver.Value)
values = make([]FieldSerialization, len(fValues))
name := strings.ToLower(f.field)
field, ok := f.fb.queryFields[name]
if !ok {
return nil, i18n.NewError(f.fb.ctx, i18n.MsgInvalidFilterField, name)
}
for i, fv := range fValues {
values[i] = field.getSerialization()
if err = values[i].Scan(fv); err != nil {
return nil, i18n.WrapError(f.fb.ctx, err, i18n.MsgInvalidValueForFilterField, name)
}
}
default:
name := strings.ToLower(f.field)
field, ok := f.fb.queryFields[name]
if !ok {
return nil, i18n.NewError(f.fb.ctx, i18n.MsgInvalidFilterField, name)
}
value = field.getSerialization()
if err = value.Scan(f.value); err != nil {
return nil, i18n.WrapError(f.fb.ctx, err, i18n.MsgInvalidValueForFilterField, name)
}
}
if f.fb.forceDescending {
for _, sf := range f.fb.sort {
sf.Descending = true
}
} else if f.fb.forceAscending {
for _, sf := range f.fb.sort {
sf.Descending = false
}
}
return &FilterInfo{
Children: children,
Op: f.op,
Field: f.field,
Values: values,
Value: value,
Sort: f.fb.sort,
Skip: f.fb.skip,
Limit: f.fb.limit,
Count: f.fb.count,
}, nil
}
func (f *baseFilter) Sort(fields ...string) Filter {
for _, field := range fields {
descending := false
if strings.HasPrefix(field, "-") {
field = strings.TrimPrefix(field, "-")
descending = true
}
if _, ok := f.fb.queryFields[field]; ok {
f.fb.sort = append(f.fb.sort, &SortField{
Field: field,
Descending: descending,
})
}
|
}
return f
|
random_line_split
|
|
filter.go
|
case sensitive
IContains(name string, value driver.Value) Filter
// INotContains disallows the string anywhere - case sensitive
NotIContains(name string, value driver.Value) Filter
}
// NullBehavior specifies whether to sort nulls first or last in a query
type NullBehavior int
const (
NullsDefault NullBehavior = iota
NullsFirst
NullsLast
)
// SortField is field+direction for sorting
type SortField struct {
Field string
Descending bool
Nulls NullBehavior
}
// FilterInfo is the structure returned by Finalize to the plugin, to serialize this filter
// into the underlying database mechanism's filter language
type FilterInfo struct {
Sort []*SortField
Skip uint64
Limit uint64
Count bool
Field string
Op FilterOp
Values []FieldSerialization
Value FieldSerialization
Children []*FilterInfo
}
// FilterResult is has additional info if requested on the query - currently only the total count
type FilterResult struct {
TotalCount *int64
}
func valueString(f FieldSerialization) string {
v, _ := f.Value()
switch tv := v.(type) {
case nil:
return "null"
case []byte:
if tv == nil {
return "null"
}
return fmt.Sprintf("'%s'", tv)
case int64:
return strconv.FormatInt(tv, 10)
case bool:
return fmt.Sprintf("%t", tv)
default:
return fmt.Sprintf("'%s'", tv)
}
}
func (f *FilterInfo) filterString() string {
switch f.Op {
case FilterOpAnd, FilterOpOr:
cs := make([]string, len(f.Children))
for i, c := range f.Children {
cs[i] = fmt.Sprintf("( %s )", c.filterString())
}
return strings.Join(cs, fmt.Sprintf(" %s ", f.Op))
case FilterOpIn, FilterOpNotIn:
strValues := make([]string, len(f.Values))
for i, v := range f.Values {
strValues[i] = valueString(v)
}
return fmt.Sprintf("%s %s [%s]", f.Field, f.Op, strings.Join(strValues, ","))
default:
return fmt.Sprintf("%s %s %s", f.Field, f.Op, valueString(f.Value))
}
}
func (f *FilterInfo) String() string {
var val strings.Builder
val.WriteString(f.filterString())
if len(f.Sort) > 0 {
fields := make([]string, len(f.Sort))
for i, s := range f.Sort {
if s.Descending {
fields[i] = "-"
}
fields[i] += s.Field
}
val.WriteString(fmt.Sprintf(" sort=%s", strings.Join(fields, ",")))
}
if f.Skip > 0 {
val.WriteString(fmt.Sprintf(" skip=%d", f.Skip))
}
if f.Limit > 0 {
val.WriteString(fmt.Sprintf(" limit=%d", f.Limit))
}
if f.Count {
val.WriteString(" count=true")
}
return val.String()
}
func (fb *filterBuilder) Fields() []string {
keys := make([]string, len(fb.queryFields))
i := 0
for k := range fb.queryFields {
keys[i] = k
i++
}
return keys
}
type filterBuilder struct {
ctx context.Context
queryFields queryFields
sort []*SortField
skip uint64
limit uint64
count bool
forceAscending bool
forceDescending bool
}
type baseFilter struct {
fb *filterBuilder
children []Filter
op FilterOp
field string
value interface{}
}
func (f *baseFilter) Builder() FilterBuilder {
return f.fb
}
func (f *baseFilter) Finalize() (fi *FilterInfo, err error) {
var children []*FilterInfo
var value FieldSerialization
var values []FieldSerialization
switch f.op {
case FilterOpAnd, FilterOpOr:
children = make([]*FilterInfo, len(f.children))
for i, c := range f.children {
if children[i], err = c.Finalize(); err != nil {
return nil, err
}
}
case FilterOpIn, FilterOpNotIn:
fValues := f.value.([]driver.Value)
values = make([]FieldSerialization, len(fValues))
name := strings.ToLower(f.field)
field, ok := f.fb.queryFields[name]
if !ok {
return nil, i18n.NewError(f.fb.ctx, i18n.MsgInvalidFilterField, name)
}
for i, fv := range fValues {
values[i] = field.getSerialization()
if err = values[i].Scan(fv); err != nil {
return nil, i18n.WrapError(f.fb.ctx, err, i18n.MsgInvalidValueForFilterField, name)
}
}
default:
name := strings.ToLower(f.field)
field, ok := f.fb.queryFields[name]
if !ok {
return nil, i18n.NewError(f.fb.ctx, i18n.MsgInvalidFilterField, name)
}
value = field.getSerialization()
if err = value.Scan(f.value); err != nil {
return nil, i18n.WrapError(f.fb.ctx, err, i18n.MsgInvalidValueForFilterField, name)
}
}
if f.fb.forceDescending {
for _, sf := range f.fb.sort {
sf.Descending = true
}
} else if f.fb.forceAscending {
for _, sf := range f.fb.sort {
sf.Descending = false
}
}
return &FilterInfo{
Children: children,
Op: f.op,
Field: f.field,
Values: values,
Value: value,
Sort: f.fb.sort,
Skip: f.fb.skip,
Limit: f.fb.limit,
Count: f.fb.count,
}, nil
}
func (f *baseFilter) Sort(fields ...string) Filter {
for _, field := range fields {
descending := false
if strings.HasPrefix(field, "-") {
field = strings.TrimPrefix(field, "-")
descending = true
}
if _, ok := f.fb.queryFields[field]; ok {
f.fb.sort = append(f.fb.sort, &SortField{
Field: field,
Descending: descending,
})
}
}
return f
}
func (f *baseFilter) Skip(skip uint64) Filter {
f.fb.skip = skip
return f
}
func (f *baseFilter) Limit(limit uint64) Filter {
f.fb.limit = limit
return f
}
func (f *baseFilter) Count(c bool) Filter {
f.fb.count = c
return f
}
func (f *baseFilter) Ascending() Filter {
f.fb.forceAscending = true
return f
}
func (f *baseFilter) Descending() Filter {
f.fb.forceDescending = true
return f
}
type andFilter struct {
baseFilter
}
func (fb *andFilter) Condition(children ...Filter) MultiConditionFilter {
fb.children = append(fb.children, children...)
return fb
}
func (fb *filterBuilder) And(and ...Filter) AndFilter {
return &andFilter{
baseFilter: baseFilter{
fb: fb,
op: FilterOpAnd,
children: and,
},
}
}
type orFilter struct {
baseFilter
}
func (fb *orFilter) Condition(children ...Filter) MultiConditionFilter {
fb.children = append(fb.children, children...)
return fb
}
func (fb *filterBuilder) Or(or ...Filter) OrFilter {
return &orFilter{
baseFilter: baseFilter{
fb: fb,
op: FilterOpOr,
children: or,
},
}
}
func (fb *filterBuilder) Eq(name string, value driver.Value) Filter {
return fb.fieldFilter(FilterOpEq, name, value)
}
func (fb *filterBuilder) Neq(name string, value driver.Value) Filter {
return fb.fieldFilter(FilterOpNe, name, value)
}
func (fb *filterBuilder) In(name string, values []driver.Value) Filter {
return fb.fieldFilter(FilterOpIn, name, values)
}
func (fb *filterBuilder) NotIn(name string, values []driver.Value) Filter {
return fb.fieldFilter(FilterOpNotIn, name, values)
}
func (fb *filterBuilder) Lt(name string, value driver.Value) Filter {
return fb.fieldFilter(FilterOpLt, name, value)
}
func (fb *filterBuilder) Gt(name string, value driver.Value) Filter {
return fb.fieldFilter(FilterOpGt, name, value)
}
func (fb *filterBuilder) Gte(name string, value driver.Value) Filter {
return fb.fieldFilter(FilterOpGte, name, value)
}
func (fb *filterBuilder) Lte(name string, value driver.Value) Filter {
return fb.fieldFilter(FilterOpLte, name, value)
}
func (fb *filterBuilder) Contains(name string, value driver.Value) Filter {
return fb.fieldFilter(FilterOpCont, name, value)
}
func (fb *filterBuilder) NotContains(name string, value driver.Value) Filter {
|
return fb.fieldFilter(FilterOpNotCont, name, value)
}
|
identifier_body
|
|
filter.go
|
convenience methods to add conditions
type MultiConditionFilter interface {
Filter
// Add adds filters to the condition
Condition(...Filter) MultiConditionFilter
}
type AndFilter interface{ MultiConditionFilter }
type OrFilter interface{ MultiConditionFilter }
// FilterOp enum of filter operations that must be implemented by plugins - the string value is
// used in the core string formatting method (for logging etc.)
type FilterOp string
const (
// FilterOpAnd and
FilterOpAnd FilterOp = "&&"
// FilterOpOr or
FilterOpOr FilterOp = "||"
// FilterOpEq equal
FilterOpEq FilterOp = "=="
// FilterOpNe not equal
FilterOpNe FilterOp = "!="
// FilterOpIn in list of values
FilterOpIn FilterOp = "IN"
// FilterOpNotIn not in list of values
FilterOpNotIn FilterOp = "NI"
// FilterOpGt greater than
FilterOpGt FilterOp = ">"
// FilterOpLt less than
FilterOpLt FilterOp = "<"
// FilterOpGte greater than or equal
FilterOpGte FilterOp = ">="
// FilterOpLte less than or equal
FilterOpLte FilterOp = "<="
// FilterOpCont contains the specified text, case sensitive
FilterOpCont FilterOp = "%="
// FilterOpNotCont does not contain the specified text, case sensitive
FilterOpNotCont FilterOp = "%!"
// FilterOpICont contains the specified text, case insensitive
FilterOpICont FilterOp = "^="
// FilterOpNotICont does not contain the specified text, case insensitive
FilterOpNotICont FilterOp = "^!"
)
// FilterBuilder is the syntax used to build the filter, where And() and Or() can be nested
type FilterBuilder interface {
// Fields is the list of available fields
Fields() []string
// And requires all sub-filters to match
And(and ...Filter) AndFilter
// Or requires any of the sub-filters to match
Or(and ...Filter) OrFilter
// Eq equal
Eq(name string, value driver.Value) Filter
// Neq not equal
Neq(name string, value driver.Value) Filter
// In one of an array of values
In(name string, value []driver.Value) Filter
// NotIn not one of an array of values
NotIn(name string, value []driver.Value) Filter
// Lt less than
Lt(name string, value driver.Value) Filter
// Gt greater than
Gt(name string, value driver.Value) Filter
// Gte greater than or equal
Gte(name string, value driver.Value) Filter
// Lte less than or equal
Lte(name string, value driver.Value) Filter
// Contains allows the string anywhere - case sensitive
Contains(name string, value driver.Value) Filter
// NotContains disallows the string anywhere - case sensitive
NotContains(name string, value driver.Value) Filter
// IContains allows the string anywhere - case sensitive
IContains(name string, value driver.Value) Filter
// INotContains disallows the string anywhere - case sensitive
NotIContains(name string, value driver.Value) Filter
}
// NullBehavior specifies whether to sort nulls first or last in a query
type NullBehavior int
const (
NullsDefault NullBehavior = iota
NullsFirst
NullsLast
)
// SortField is field+direction for sorting
type SortField struct {
Field string
Descending bool
Nulls NullBehavior
}
// FilterInfo is the structure returned by Finalize to the plugin, to serialize this filter
// into the underlying database mechanism's filter language
type FilterInfo struct {
Sort []*SortField
Skip uint64
Limit uint64
Count bool
Field string
Op FilterOp
Values []FieldSerialization
Value FieldSerialization
Children []*FilterInfo
}
// FilterResult is has additional info if requested on the query - currently only the total count
type FilterResult struct {
TotalCount *int64
}
func valueString(f FieldSerialization) string {
v, _ := f.Value()
switch tv := v.(type) {
case nil:
return "null"
case []byte:
if tv == nil {
return "null"
}
return fmt.Sprintf("'%s'", tv)
case int64:
return strconv.FormatInt(tv, 10)
case bool:
return fmt.Sprintf("%t", tv)
default:
return fmt.Sprintf("'%s'", tv)
}
}
func (f *FilterInfo) filterString() string {
switch f.Op {
case FilterOpAnd, FilterOpOr:
cs := make([]string, len(f.Children))
for i, c := range f.Children {
cs[i] = fmt.Sprintf("( %s )", c.filterString())
}
return strings.Join(cs, fmt.Sprintf(" %s ", f.Op))
case FilterOpIn, FilterOpNotIn:
strValues := make([]string, len(f.Values))
for i, v := range f.Values {
strValues[i] = valueString(v)
}
return fmt.Sprintf("%s %s [%s]", f.Field, f.Op, strings.Join(strValues, ","))
default:
return fmt.Sprintf("%s %s %s", f.Field, f.Op, valueString(f.Value))
}
}
func (f *FilterInfo) String() string {
var val strings.Builder
val.WriteString(f.filterString())
if len(f.Sort) > 0 {
fields := make([]string, len(f.Sort))
for i, s := range f.Sort {
if s.Descending {
fields[i] = "-"
}
fields[i] += s.Field
}
val.WriteString(fmt.Sprintf(" sort=%s", strings.Join(fields, ",")))
}
if f.Skip > 0 {
val.WriteString(fmt.Sprintf(" skip=%d", f.Skip))
}
if f.Limit > 0 {
val.WriteString(fmt.Sprintf(" limit=%d", f.Limit))
}
if f.Count {
val.WriteString(" count=true")
}
return val.String()
}
func (fb *filterBuilder) Fields() []string {
keys := make([]string, len(fb.queryFields))
i := 0
for k := range fb.queryFields {
keys[i] = k
i++
}
return keys
}
type filterBuilder struct {
ctx context.Context
queryFields queryFields
sort []*SortField
skip uint64
limit uint64
count bool
forceAscending bool
forceDescending bool
}
type baseFilter struct {
fb *filterBuilder
children []Filter
op FilterOp
field string
value interface{}
}
func (f *baseFilter) Builder() FilterBuilder {
return f.fb
}
func (f *baseFilter) Finalize() (fi *FilterInfo, err error) {
var children []*FilterInfo
var value FieldSerialization
var values []FieldSerialization
switch f.op {
case FilterOpAnd, FilterOpOr:
children = make([]*FilterInfo, len(f.children))
for i, c := range f.children {
if children[i], err = c.Finalize(); err != nil {
return nil, err
}
}
case FilterOpIn, FilterOpNotIn:
fValues := f.value.([]driver.Value)
values = make([]FieldSerialization, len(fValues))
name := strings.ToLower(f.field)
field, ok := f.fb.queryFields[name]
if !ok {
return nil, i18n.NewError(f.fb.ctx, i18n.MsgInvalidFilterField, name)
}
for i, fv := range fValues {
values[i] = field.getSerialization()
if err = values[i].Scan(fv); err != nil {
return nil, i18n.WrapError(f.fb.ctx, err, i18n.MsgInvalidValueForFilterField, name)
}
}
default:
name := strings.ToLower(f.field)
field, ok := f.fb.queryFields[name]
if !ok {
return nil, i18n.NewError(f.fb.ctx, i18n.MsgInvalidFilterField, name)
}
value = field.getSerialization()
if err = value.Scan(f.value); err != nil {
return nil, i18n.WrapError(f.fb.ctx, err, i18n.MsgInvalidValueForFilterField, name)
}
}
if f.fb.forceDescending {
for _, sf := range f.fb.sort {
|
} else if f.fb.forceAscending {
for _, sf := range f.fb.sort {
sf.Descending = false
}
}
return &FilterInfo{
Children: children,
Op: f.op,
Field: f.field,
Values: values,
Value: value,
Sort: f.fb.sort,
Skip: f.fb.skip,
Limit: f.fb.limit,
Count: f.fb.count,
}, nil
}
func (f *baseFilter) Sort(fields ...string) Filter {
for _, field := range fields {
descending := false
if strings.HasPrefix(field, "-") {
field = strings.TrimPrefix(field, "-
|
sf.Descending = true
}
|
conditional_block
|
wdpost_dispute_test.go
|
)
// First, we configure two miners. After sealing, we're going to turn off the first miner so
// it doesn't submit proofs.
//
// Then we're going to manually submit bad proofs.
opts := []kit.NodeOpt{kit.WithAllSubsystems()}
ens := kit.NewEnsemble(t, kit.MockProofs()).
FullNode(&client, opts...).
Miner(&chainMiner, &client, opts...).
Miner(&evilMiner, &client, append(opts, kit.PresealSectors(0))...).
Start()
defaultFrom, err := client.WalletDefaultAddress(ctx)
require.NoError(t, err)
// Mine with the _second_ node (the good one).
ens.InterconnectAll().BeginMining(blocktime, &chainMiner)
// Give the chain miner enough sectors to win every block.
chainMiner.PledgeSectors(ctx, 10, 0, nil)
// And the evil one 1 sector. No cookie for you.
evilMiner.PledgeSectors(ctx, 1, 0, nil)
// Let the evil miner's sectors gain power.
evilMinerAddr, err := evilMiner.ActorAddress(ctx)
require.NoError(t, err)
//stm: @CHAIN_STATE_MINER_CALCULATE_DEADLINE_001
di, err := client.StateMinerProvingDeadline(ctx, evilMinerAddr, types.EmptyTSK)
require.NoError(t, err)
t.Logf("Running one proving period\n")
waitUntil := di.PeriodStart + di.WPoStProvingPeriod*2 + 1
t.Logf("End for head.Height > %d", waitUntil)
ts := client.WaitTillChain(ctx, kit.HeightAtLeast(waitUntil))
t.Logf("Now head.Height = %d", ts.Height())
//stm: @CHAIN_STATE_MINER_POWER_001
p, err := client.StateMinerPower(ctx, evilMinerAddr, types.EmptyTSK)
require.NoError(t, err)
ssz, err := evilMiner.ActorSectorSize(ctx, evilMinerAddr)
require.NoError(t, err)
// make sure it has gained power.
require.Equal(t, p.MinerPower.RawBytePower, types.NewInt(uint64(ssz)))
//stm: @MINER_SECTOR_LIST_001
evilSectors, err := evilMiner.SectorsListNonGenesis(ctx)
require.NoError(t, err)
evilSectorNo := evilSectors[0] // only one.
//stm: @CHAIN_STATE_SECTOR_PARTITION_001
evilSectorLoc, err := client.StateSectorPartition(ctx, evilMinerAddr, evilSectorNo, types.EmptyTSK)
require.NoError(t, err)
t.Log("evil miner stopping")
// Now stop the evil miner, and start manually submitting bad proofs.
require.NoError(t, evilMiner.Stop(ctx))
t.Log("evil miner stopped")
// Wait until we need to prove our sector.
for {
//stm: @CHAIN_STATE_MINER_CALCULATE_DEADLINE_001
di, err = client.StateMinerProvingDeadline(ctx, evilMinerAddr, types.EmptyTSK)
require.NoError(t, err)
if di.Index == evilSectorLoc.Deadline && di.CurrentEpoch-di.Open > 1 {
break
}
build.Clock.Sleep(blocktime)
}
err = submitBadProof(ctx, client, evilMiner.OwnerKey.Address, evilMinerAddr, di, evilSectorLoc.Deadline, evilSectorLoc.Partition)
require.NoError(t, err, "evil proof not accepted")
// Wait until after the proving period.
for {
//stm: @CHAIN_STATE_MINER_CALCULATE_DEADLINE_001
di, err = client.StateMinerProvingDeadline(ctx, evilMinerAddr, types.EmptyTSK)
require.NoError(t, err)
if di.Index != evilSectorLoc.Deadline {
break
}
build.Clock.Sleep(blocktime)
}
t.Log("accepted evil proof")
//stm: @CHAIN_STATE_MINER_POWER_001
// Make sure the evil node didn't lose any power.
p, err = client.StateMinerPower(ctx, evilMinerAddr, types.EmptyTSK)
require.NoError(t, err)
require.Equal(t, p.MinerPower.RawBytePower, types.NewInt(uint64(ssz)))
// OBJECTION! The good miner files a DISPUTE!!!!
{
params := &minertypes.DisputeWindowedPoStParams{
Deadline: evilSectorLoc.Deadline,
PoStIndex: 0,
}
enc, aerr := actors.SerializeParams(params)
require.NoError(t, aerr)
msg := &types.Message{
To: evilMinerAddr,
Method: builtin.MethodsMiner.DisputeWindowedPoSt,
Params: enc,
Value: types.NewInt(0),
From: defaultFrom,
}
sm, err := client.MpoolPushMessage(ctx, msg, nil)
require.NoError(t, err)
t.Log("waiting dispute")
//stm: @CHAIN_STATE_WAIT_MSG_001
rec, err := client.StateWaitMsg(ctx, sm.Cid(), build.MessageConfidence, api.LookbackNoLimit, true)
require.NoError(t, err)
require.Zero(t, rec.Receipt.ExitCode, "dispute not accepted: %s", rec.Receipt.ExitCode.Error())
}
//stm: @CHAIN_STATE_MINER_POWER_001
// Objection SUSTAINED!
// Make sure the evil node lost power.
p, err = client.StateMinerPower(ctx, evilMinerAddr, types.EmptyTSK)
require.NoError(t, err)
require.True(t, p.MinerPower.RawBytePower.IsZero())
// Now we begin the redemption arc.
require.True(t, p.MinerPower.RawBytePower.IsZero())
// First, recover the sector.
{
//stm: @CHAIN_STATE_MINER_INFO_001
minerInfo, err := client.StateMinerInfo(ctx, evilMinerAddr, types.EmptyTSK)
require.NoError(t, err)
params := &minertypes.DeclareFaultsRecoveredParams{
Recoveries: []minertypes.RecoveryDeclaration{{
Deadline: evilSectorLoc.Deadline,
Partition: evilSectorLoc.Partition,
Sectors: bitfield.NewFromSet([]uint64{uint64(evilSectorNo)}),
}},
}
enc, aerr := actors.SerializeParams(params)
require.NoError(t, aerr)
msg := &types.Message{
To: evilMinerAddr,
Method: builtin.MethodsMiner.DeclareFaultsRecovered,
Params: enc,
Value: types.FromFil(30), // repay debt.
From: minerInfo.Owner,
}
sm, err := client.MpoolPushMessage(ctx, msg, nil)
require.NoError(t, err)
//stm: @CHAIN_STATE_WAIT_MSG_001
rec, err := client.StateWaitMsg(ctx, sm.Cid(), build.MessageConfidence, api.LookbackNoLimit, true)
require.NoError(t, err)
require.Zero(t, rec.Receipt.ExitCode, "recovery not accepted: %s", rec.Receipt.ExitCode.Error())
}
// Then wait for the deadline.
for {
//stm: @CHAIN_STATE_MINER_CALCULATE_DEADLINE_001
di, err = client.StateMinerProvingDeadline(ctx, evilMinerAddr, types.EmptyTSK)
require.NoError(t, err)
if di.Index == evilSectorLoc.Deadline && di.CurrentEpoch-di.Open > 1 {
break
}
build.Clock.Sleep(blocktime)
}
// Now try to be evil again
err = submitBadProof(ctx, client, evilMiner.OwnerKey.Address, evilMinerAddr, di, evilSectorLoc.Deadline, evilSectorLoc.Partition)
require.Error(t, err)
require.Contains(t, err.Error(), "invalid post was submitted")
require.Contains(t, err.Error(), "(RetCode
|
{
//stm: @CHAIN_SYNCER_LOAD_GENESIS_001, @CHAIN_SYNCER_FETCH_TIPSET_001,
//stm: @CHAIN_SYNCER_START_001, @CHAIN_SYNCER_SYNC_001, @BLOCKCHAIN_BEACON_VALIDATE_BLOCK_VALUES_01
//stm: @CHAIN_SYNCER_COLLECT_CHAIN_001, @CHAIN_SYNCER_COLLECT_HEADERS_001, @CHAIN_SYNCER_VALIDATE_TIPSET_001
//stm: @CHAIN_SYNCER_NEW_PEER_HEAD_001, @CHAIN_SYNCER_VALIDATE_MESSAGE_META_001, @CHAIN_SYNCER_STOP_001
//stm: @CHAIN_INCOMING_HANDLE_INCOMING_BLOCKS_001, @CHAIN_INCOMING_VALIDATE_BLOCK_PUBSUB_001, @CHAIN_INCOMING_VALIDATE_MESSAGE_PUBSUB_001
kit.Expensive(t)
kit.QuietMiningLogs()
blocktime := 2 * time.Millisecond
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
var (
client kit.TestFullNode
chainMiner kit.TestMiner
evilMiner kit.TestMiner
|
identifier_body
|
|
wdpost_dispute_test.go
|
build.Clock.Sleep(blocktime)
}
t.Log("accepted evil proof")
//stm: @CHAIN_STATE_MINER_POWER_001
// Make sure the evil node didn't lose any power.
p, err = client.StateMinerPower(ctx, evilMinerAddr, types.EmptyTSK)
require.NoError(t, err)
require.Equal(t, p.MinerPower.RawBytePower, types.NewInt(uint64(ssz)))
// OBJECTION! The good miner files a DISPUTE!!!!
{
params := &minertypes.DisputeWindowedPoStParams{
Deadline: evilSectorLoc.Deadline,
PoStIndex: 0,
}
enc, aerr := actors.SerializeParams(params)
require.NoError(t, aerr)
msg := &types.Message{
To: evilMinerAddr,
Method: builtin.MethodsMiner.DisputeWindowedPoSt,
Params: enc,
Value: types.NewInt(0),
From: defaultFrom,
}
sm, err := client.MpoolPushMessage(ctx, msg, nil)
require.NoError(t, err)
t.Log("waiting dispute")
//stm: @CHAIN_STATE_WAIT_MSG_001
rec, err := client.StateWaitMsg(ctx, sm.Cid(), build.MessageConfidence, api.LookbackNoLimit, true)
require.NoError(t, err)
require.Zero(t, rec.Receipt.ExitCode, "dispute not accepted: %s", rec.Receipt.ExitCode.Error())
}
//stm: @CHAIN_STATE_MINER_POWER_001
// Objection SUSTAINED!
// Make sure the evil node lost power.
p, err = client.StateMinerPower(ctx, evilMinerAddr, types.EmptyTSK)
require.NoError(t, err)
require.True(t, p.MinerPower.RawBytePower.IsZero())
// Now we begin the redemption arc.
require.True(t, p.MinerPower.RawBytePower.IsZero())
// First, recover the sector.
{
//stm: @CHAIN_STATE_MINER_INFO_001
minerInfo, err := client.StateMinerInfo(ctx, evilMinerAddr, types.EmptyTSK)
require.NoError(t, err)
params := &minertypes.DeclareFaultsRecoveredParams{
Recoveries: []minertypes.RecoveryDeclaration{{
Deadline: evilSectorLoc.Deadline,
Partition: evilSectorLoc.Partition,
Sectors: bitfield.NewFromSet([]uint64{uint64(evilSectorNo)}),
}},
}
enc, aerr := actors.SerializeParams(params)
require.NoError(t, aerr)
msg := &types.Message{
To: evilMinerAddr,
Method: builtin.MethodsMiner.DeclareFaultsRecovered,
Params: enc,
Value: types.FromFil(30), // repay debt.
From: minerInfo.Owner,
}
sm, err := client.MpoolPushMessage(ctx, msg, nil)
require.NoError(t, err)
//stm: @CHAIN_STATE_WAIT_MSG_001
rec, err := client.StateWaitMsg(ctx, sm.Cid(), build.MessageConfidence, api.LookbackNoLimit, true)
require.NoError(t, err)
require.Zero(t, rec.Receipt.ExitCode, "recovery not accepted: %s", rec.Receipt.ExitCode.Error())
}
// Then wait for the deadline.
for {
//stm: @CHAIN_STATE_MINER_CALCULATE_DEADLINE_001
di, err = client.StateMinerProvingDeadline(ctx, evilMinerAddr, types.EmptyTSK)
require.NoError(t, err)
if di.Index == evilSectorLoc.Deadline && di.CurrentEpoch-di.Open > 1 {
break
}
build.Clock.Sleep(blocktime)
}
// Now try to be evil again
err = submitBadProof(ctx, client, evilMiner.OwnerKey.Address, evilMinerAddr, di, evilSectorLoc.Deadline, evilSectorLoc.Partition)
require.Error(t, err)
require.Contains(t, err.Error(), "invalid post was submitted")
require.Contains(t, err.Error(), "(RetCode=16)")
// It didn't work because we're recovering.
}
func TestWindowPostDisputeFails(t *testing.T) {
//stm: @CHAIN_SYNCER_LOAD_GENESIS_001, @CHAIN_SYNCER_FETCH_TIPSET_001,
//stm: @CHAIN_SYNCER_START_001, @CHAIN_SYNCER_SYNC_001, @BLOCKCHAIN_BEACON_VALIDATE_BLOCK_VALUES_01
//stm: @CHAIN_SYNCER_COLLECT_CHAIN_001, @CHAIN_SYNCER_COLLECT_HEADERS_001, @CHAIN_SYNCER_VALIDATE_TIPSET_001
//stm: @CHAIN_SYNCER_NEW_PEER_HEAD_001, @CHAIN_SYNCER_VALIDATE_MESSAGE_META_001, @CHAIN_SYNCER_STOP_001
//stm: @CHAIN_STATE_MINER_GET_DEADLINES_001
kit.Expensive(t)
kit.QuietMiningLogs()
blocktime := 2 * time.Millisecond
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
client, miner, ens := kit.EnsembleMinimal(t, kit.MockProofs())
ens.InterconnectAll().BeginMining(blocktime)
defaultFrom, err := client.WalletDefaultAddress(ctx)
require.NoError(t, err)
maddr, err := miner.ActorAddress(ctx)
require.NoError(t, err)
build.Clock.Sleep(time.Second)
miner.PledgeSectors(ctx, 10, 0, nil)
//stm: @CHAIN_STATE_MINER_CALCULATE_DEADLINE_001
di, err := client.StateMinerProvingDeadline(ctx, maddr, types.EmptyTSK)
require.NoError(t, err)
t.Log("Running one proving period")
waitUntil := di.PeriodStart + di.WPoStProvingPeriod*2 + 1
t.Logf("End for head.Height > %d", waitUntil)
ts := client.WaitTillChain(ctx, kit.HeightAtLeast(waitUntil))
t.Logf("Now head.Height = %d", ts.Height())
ssz, err := miner.ActorSectorSize(ctx, maddr)
require.NoError(t, err)
expectedPower := types.NewInt(uint64(ssz) * (kit.DefaultPresealsPerBootstrapMiner + 10))
//stm: @CHAIN_STATE_MINER_POWER_001
p, err := client.StateMinerPower(ctx, maddr, types.EmptyTSK)
require.NoError(t, err)
// make sure it has gained power.
require.Equal(t, p.MinerPower.RawBytePower, expectedPower)
// Wait until a proof has been submitted.
var targetDeadline uint64
waitForProof:
for {
deadlines, err := client.StateMinerDeadlines(ctx, maddr, types.EmptyTSK)
require.NoError(t, err)
for dlIdx, dl := range deadlines {
nonEmpty, err := dl.PostSubmissions.IsEmpty()
require.NoError(t, err)
if nonEmpty {
targetDeadline = uint64(dlIdx)
break waitForProof
}
}
build.Clock.Sleep(blocktime)
}
for {
//stm: @CHAIN_STATE_MINER_CALCULATE_DEADLINE_001
di, err := client.StateMinerProvingDeadline(ctx, maddr, types.EmptyTSK)
require.NoError(t, err)
// wait until the deadline finishes.
if di.Index == ((targetDeadline + 1) % di.WPoStPeriodDeadlines) {
break
}
build.Clock.Sleep(blocktime)
}
// Try to object to the proof. This should fail.
{
params := &minertypes.DisputeWindowedPoStParams{
Deadline: targetDeadline,
PoStIndex: 0,
}
enc, aerr := actors.SerializeParams(params)
require.NoError(t, aerr)
msg := &types.Message{
To: maddr,
Method: builtin.MethodsMiner.DisputeWindowedPoSt,
Params: enc,
Value: types.NewInt(0),
From: defaultFrom,
}
_, err := client.MpoolPushMessage(ctx, msg, nil)
require.Error(t, err)
require.Contains(t, err.Error(), "failed to dispute valid post")
require.Contains(t, err.Error(), "(RetCode=16)")
}
}
func submitBadProof(
ctx context.Context,
client api.FullNode, owner address.Address, maddr address.Address,
di *dline.Info, dlIdx, partIdx uint64,
) error {
head, err := client.ChainHead(ctx)
if err != nil {
return err
}
//stm: @CHAIN_STATE_MINER_INFO_001
minerInfo, err := client.StateMinerInfo(ctx, maddr, head.Key())
if err != nil {
return err
}
//stm: @CHAIN_STATE_GET_RANDOMNESS_FROM_TICKETS_001
commEpoch := di.Open
commRand, err := client.StateGetRandomnessFromTickets(
ctx, crypto.DomainSeparationTag_PoStChainCommit,
commEpoch, nil, head.Key(),
)
if err != nil {
|
return err
|
random_line_split
|
|
wdpost_dispute_test.go
|
_SECTOR_LIST_001
evilSectors, err := evilMiner.SectorsListNonGenesis(ctx)
require.NoError(t, err)
evilSectorNo := evilSectors[0] // only one.
//stm: @CHAIN_STATE_SECTOR_PARTITION_001
evilSectorLoc, err := client.StateSectorPartition(ctx, evilMinerAddr, evilSectorNo, types.EmptyTSK)
require.NoError(t, err)
t.Log("evil miner stopping")
// Now stop the evil miner, and start manually submitting bad proofs.
require.NoError(t, evilMiner.Stop(ctx))
t.Log("evil miner stopped")
// Wait until we need to prove our sector.
for {
//stm: @CHAIN_STATE_MINER_CALCULATE_DEADLINE_001
di, err = client.StateMinerProvingDeadline(ctx, evilMinerAddr, types.EmptyTSK)
require.NoError(t, err)
if di.Index == evilSectorLoc.Deadline && di.CurrentEpoch-di.Open > 1 {
break
}
build.Clock.Sleep(blocktime)
}
err = submitBadProof(ctx, client, evilMiner.OwnerKey.Address, evilMinerAddr, di, evilSectorLoc.Deadline, evilSectorLoc.Partition)
require.NoError(t, err, "evil proof not accepted")
// Wait until after the proving period.
for {
//stm: @CHAIN_STATE_MINER_CALCULATE_DEADLINE_001
di, err = client.StateMinerProvingDeadline(ctx, evilMinerAddr, types.EmptyTSK)
require.NoError(t, err)
if di.Index != evilSectorLoc.Deadline {
break
}
build.Clock.Sleep(blocktime)
}
t.Log("accepted evil proof")
//stm: @CHAIN_STATE_MINER_POWER_001
// Make sure the evil node didn't lose any power.
p, err = client.StateMinerPower(ctx, evilMinerAddr, types.EmptyTSK)
require.NoError(t, err)
require.Equal(t, p.MinerPower.RawBytePower, types.NewInt(uint64(ssz)))
// OBJECTION! The good miner files a DISPUTE!!!!
{
params := &minertypes.DisputeWindowedPoStParams{
Deadline: evilSectorLoc.Deadline,
PoStIndex: 0,
}
enc, aerr := actors.SerializeParams(params)
require.NoError(t, aerr)
msg := &types.Message{
To: evilMinerAddr,
Method: builtin.MethodsMiner.DisputeWindowedPoSt,
Params: enc,
Value: types.NewInt(0),
From: defaultFrom,
}
sm, err := client.MpoolPushMessage(ctx, msg, nil)
require.NoError(t, err)
t.Log("waiting dispute")
//stm: @CHAIN_STATE_WAIT_MSG_001
rec, err := client.StateWaitMsg(ctx, sm.Cid(), build.MessageConfidence, api.LookbackNoLimit, true)
require.NoError(t, err)
require.Zero(t, rec.Receipt.ExitCode, "dispute not accepted: %s", rec.Receipt.ExitCode.Error())
}
//stm: @CHAIN_STATE_MINER_POWER_001
// Objection SUSTAINED!
// Make sure the evil node lost power.
p, err = client.StateMinerPower(ctx, evilMinerAddr, types.EmptyTSK)
require.NoError(t, err)
require.True(t, p.MinerPower.RawBytePower.IsZero())
// Now we begin the redemption arc.
require.True(t, p.MinerPower.RawBytePower.IsZero())
// First, recover the sector.
{
//stm: @CHAIN_STATE_MINER_INFO_001
minerInfo, err := client.StateMinerInfo(ctx, evilMinerAddr, types.EmptyTSK)
require.NoError(t, err)
params := &minertypes.DeclareFaultsRecoveredParams{
Recoveries: []minertypes.RecoveryDeclaration{{
Deadline: evilSectorLoc.Deadline,
Partition: evilSectorLoc.Partition,
Sectors: bitfield.NewFromSet([]uint64{uint64(evilSectorNo)}),
}},
}
enc, aerr := actors.SerializeParams(params)
require.NoError(t, aerr)
msg := &types.Message{
To: evilMinerAddr,
Method: builtin.MethodsMiner.DeclareFaultsRecovered,
Params: enc,
Value: types.FromFil(30), // repay debt.
From: minerInfo.Owner,
}
sm, err := client.MpoolPushMessage(ctx, msg, nil)
require.NoError(t, err)
//stm: @CHAIN_STATE_WAIT_MSG_001
rec, err := client.StateWaitMsg(ctx, sm.Cid(), build.MessageConfidence, api.LookbackNoLimit, true)
require.NoError(t, err)
require.Zero(t, rec.Receipt.ExitCode, "recovery not accepted: %s", rec.Receipt.ExitCode.Error())
}
// Then wait for the deadline.
for {
//stm: @CHAIN_STATE_MINER_CALCULATE_DEADLINE_001
di, err = client.StateMinerProvingDeadline(ctx, evilMinerAddr, types.EmptyTSK)
require.NoError(t, err)
if di.Index == evilSectorLoc.Deadline && di.CurrentEpoch-di.Open > 1 {
break
}
build.Clock.Sleep(blocktime)
}
// Now try to be evil again
err = submitBadProof(ctx, client, evilMiner.OwnerKey.Address, evilMinerAddr, di, evilSectorLoc.Deadline, evilSectorLoc.Partition)
require.Error(t, err)
require.Contains(t, err.Error(), "invalid post was submitted")
require.Contains(t, err.Error(), "(RetCode=16)")
// It didn't work because we're recovering.
}
func TestWindowPostDisputeFails(t *testing.T) {
//stm: @CHAIN_SYNCER_LOAD_GENESIS_001, @CHAIN_SYNCER_FETCH_TIPSET_001,
//stm: @CHAIN_SYNCER_START_001, @CHAIN_SYNCER_SYNC_001, @BLOCKCHAIN_BEACON_VALIDATE_BLOCK_VALUES_01
//stm: @CHAIN_SYNCER_COLLECT_CHAIN_001, @CHAIN_SYNCER_COLLECT_HEADERS_001, @CHAIN_SYNCER_VALIDATE_TIPSET_001
//stm: @CHAIN_SYNCER_NEW_PEER_HEAD_001, @CHAIN_SYNCER_VALIDATE_MESSAGE_META_001, @CHAIN_SYNCER_STOP_001
//stm: @CHAIN_STATE_MINER_GET_DEADLINES_001
kit.Expensive(t)
kit.QuietMiningLogs()
blocktime := 2 * time.Millisecond
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
client, miner, ens := kit.EnsembleMinimal(t, kit.MockProofs())
ens.InterconnectAll().BeginMining(blocktime)
defaultFrom, err := client.WalletDefaultAddress(ctx)
require.NoError(t, err)
maddr, err := miner.ActorAddress(ctx)
require.NoError(t, err)
build.Clock.Sleep(time.Second)
miner.PledgeSectors(ctx, 10, 0, nil)
//stm: @CHAIN_STATE_MINER_CALCULATE_DEADLINE_001
di, err := client.StateMinerProvingDeadline(ctx, maddr, types.EmptyTSK)
require.NoError(t, err)
t.Log("Running one proving period")
waitUntil := di.PeriodStart + di.WPoStProvingPeriod*2 + 1
t.Logf("End for head.Height > %d", waitUntil)
ts := client.WaitTillChain(ctx, kit.HeightAtLeast(waitUntil))
t.Logf("Now head.Height = %d", ts.Height())
ssz, err := miner.ActorSectorSize(ctx, maddr)
require.NoError(t, err)
expectedPower := types.NewInt(uint64(ssz) * (kit.DefaultPresealsPerBootstrapMiner + 10))
//stm: @CHAIN_STATE_MINER_POWER_001
p, err := client.StateMinerPower(ctx, maddr, types.EmptyTSK)
require.NoError(t, err)
// make sure it has gained power.
require.Equal(t, p.MinerPower.RawBytePower, expectedPower)
// Wait until a proof has been submitted.
var targetDeadline uint64
waitForProof:
for {
deadlines, err := client.StateMinerDeadlines(ctx, maddr, types.EmptyTSK)
require.NoError(t, err)
for dlIdx, dl := range deadlines {
nonEmpty, err := dl.PostSubmissions.IsEmpty()
require.NoError(t, err)
if nonEmpty {
targetDeadline = uint64(dlIdx)
break waitForProof
}
}
build.Clock.Sleep(blocktime)
}
for
|
{
//stm: @CHAIN_STATE_MINER_CALCULATE_DEADLINE_001
di, err := client.StateMinerProvingDeadline(ctx, maddr, types.EmptyTSK)
require.NoError(t, err)
// wait until the deadline finishes.
if di.Index == ((targetDeadline + 1) % di.WPoStPeriodDeadlines) {
break
}
build.Clock.Sleep(blocktime)
}
|
conditional_block
|
|
wdpost_dispute_test.go
|
TSK)
require.NoError(t, err)
if di.Index == evilSectorLoc.Deadline && di.CurrentEpoch-di.Open > 1 {
break
}
build.Clock.Sleep(blocktime)
}
err = submitBadProof(ctx, client, evilMiner.OwnerKey.Address, evilMinerAddr, di, evilSectorLoc.Deadline, evilSectorLoc.Partition)
require.NoError(t, err, "evil proof not accepted")
// Wait until after the proving period.
for {
//stm: @CHAIN_STATE_MINER_CALCULATE_DEADLINE_001
di, err = client.StateMinerProvingDeadline(ctx, evilMinerAddr, types.EmptyTSK)
require.NoError(t, err)
if di.Index != evilSectorLoc.Deadline {
break
}
build.Clock.Sleep(blocktime)
}
t.Log("accepted evil proof")
//stm: @CHAIN_STATE_MINER_POWER_001
// Make sure the evil node didn't lose any power.
p, err = client.StateMinerPower(ctx, evilMinerAddr, types.EmptyTSK)
require.NoError(t, err)
require.Equal(t, p.MinerPower.RawBytePower, types.NewInt(uint64(ssz)))
// OBJECTION! The good miner files a DISPUTE!!!!
{
params := &minertypes.DisputeWindowedPoStParams{
Deadline: evilSectorLoc.Deadline,
PoStIndex: 0,
}
enc, aerr := actors.SerializeParams(params)
require.NoError(t, aerr)
msg := &types.Message{
To: evilMinerAddr,
Method: builtin.MethodsMiner.DisputeWindowedPoSt,
Params: enc,
Value: types.NewInt(0),
From: defaultFrom,
}
sm, err := client.MpoolPushMessage(ctx, msg, nil)
require.NoError(t, err)
t.Log("waiting dispute")
//stm: @CHAIN_STATE_WAIT_MSG_001
rec, err := client.StateWaitMsg(ctx, sm.Cid(), build.MessageConfidence, api.LookbackNoLimit, true)
require.NoError(t, err)
require.Zero(t, rec.Receipt.ExitCode, "dispute not accepted: %s", rec.Receipt.ExitCode.Error())
}
//stm: @CHAIN_STATE_MINER_POWER_001
// Objection SUSTAINED!
// Make sure the evil node lost power.
p, err = client.StateMinerPower(ctx, evilMinerAddr, types.EmptyTSK)
require.NoError(t, err)
require.True(t, p.MinerPower.RawBytePower.IsZero())
// Now we begin the redemption arc.
require.True(t, p.MinerPower.RawBytePower.IsZero())
// First, recover the sector.
{
//stm: @CHAIN_STATE_MINER_INFO_001
minerInfo, err := client.StateMinerInfo(ctx, evilMinerAddr, types.EmptyTSK)
require.NoError(t, err)
params := &minertypes.DeclareFaultsRecoveredParams{
Recoveries: []minertypes.RecoveryDeclaration{{
Deadline: evilSectorLoc.Deadline,
Partition: evilSectorLoc.Partition,
Sectors: bitfield.NewFromSet([]uint64{uint64(evilSectorNo)}),
}},
}
enc, aerr := actors.SerializeParams(params)
require.NoError(t, aerr)
msg := &types.Message{
To: evilMinerAddr,
Method: builtin.MethodsMiner.DeclareFaultsRecovered,
Params: enc,
Value: types.FromFil(30), // repay debt.
From: minerInfo.Owner,
}
sm, err := client.MpoolPushMessage(ctx, msg, nil)
require.NoError(t, err)
//stm: @CHAIN_STATE_WAIT_MSG_001
rec, err := client.StateWaitMsg(ctx, sm.Cid(), build.MessageConfidence, api.LookbackNoLimit, true)
require.NoError(t, err)
require.Zero(t, rec.Receipt.ExitCode, "recovery not accepted: %s", rec.Receipt.ExitCode.Error())
}
// Then wait for the deadline.
for {
//stm: @CHAIN_STATE_MINER_CALCULATE_DEADLINE_001
di, err = client.StateMinerProvingDeadline(ctx, evilMinerAddr, types.EmptyTSK)
require.NoError(t, err)
if di.Index == evilSectorLoc.Deadline && di.CurrentEpoch-di.Open > 1 {
break
}
build.Clock.Sleep(blocktime)
}
// Now try to be evil again
err = submitBadProof(ctx, client, evilMiner.OwnerKey.Address, evilMinerAddr, di, evilSectorLoc.Deadline, evilSectorLoc.Partition)
require.Error(t, err)
require.Contains(t, err.Error(), "invalid post was submitted")
require.Contains(t, err.Error(), "(RetCode=16)")
// It didn't work because we're recovering.
}
func TestWindowPostDisputeFails(t *testing.T) {
//stm: @CHAIN_SYNCER_LOAD_GENESIS_001, @CHAIN_SYNCER_FETCH_TIPSET_001,
//stm: @CHAIN_SYNCER_START_001, @CHAIN_SYNCER_SYNC_001, @BLOCKCHAIN_BEACON_VALIDATE_BLOCK_VALUES_01
//stm: @CHAIN_SYNCER_COLLECT_CHAIN_001, @CHAIN_SYNCER_COLLECT_HEADERS_001, @CHAIN_SYNCER_VALIDATE_TIPSET_001
//stm: @CHAIN_SYNCER_NEW_PEER_HEAD_001, @CHAIN_SYNCER_VALIDATE_MESSAGE_META_001, @CHAIN_SYNCER_STOP_001
//stm: @CHAIN_STATE_MINER_GET_DEADLINES_001
kit.Expensive(t)
kit.QuietMiningLogs()
blocktime := 2 * time.Millisecond
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
client, miner, ens := kit.EnsembleMinimal(t, kit.MockProofs())
ens.InterconnectAll().BeginMining(blocktime)
defaultFrom, err := client.WalletDefaultAddress(ctx)
require.NoError(t, err)
maddr, err := miner.ActorAddress(ctx)
require.NoError(t, err)
build.Clock.Sleep(time.Second)
miner.PledgeSectors(ctx, 10, 0, nil)
//stm: @CHAIN_STATE_MINER_CALCULATE_DEADLINE_001
di, err := client.StateMinerProvingDeadline(ctx, maddr, types.EmptyTSK)
require.NoError(t, err)
t.Log("Running one proving period")
waitUntil := di.PeriodStart + di.WPoStProvingPeriod*2 + 1
t.Logf("End for head.Height > %d", waitUntil)
ts := client.WaitTillChain(ctx, kit.HeightAtLeast(waitUntil))
t.Logf("Now head.Height = %d", ts.Height())
ssz, err := miner.ActorSectorSize(ctx, maddr)
require.NoError(t, err)
expectedPower := types.NewInt(uint64(ssz) * (kit.DefaultPresealsPerBootstrapMiner + 10))
//stm: @CHAIN_STATE_MINER_POWER_001
p, err := client.StateMinerPower(ctx, maddr, types.EmptyTSK)
require.NoError(t, err)
// make sure it has gained power.
require.Equal(t, p.MinerPower.RawBytePower, expectedPower)
// Wait until a proof has been submitted.
var targetDeadline uint64
waitForProof:
for {
deadlines, err := client.StateMinerDeadlines(ctx, maddr, types.EmptyTSK)
require.NoError(t, err)
for dlIdx, dl := range deadlines {
nonEmpty, err := dl.PostSubmissions.IsEmpty()
require.NoError(t, err)
if nonEmpty {
targetDeadline = uint64(dlIdx)
break waitForProof
}
}
build.Clock.Sleep(blocktime)
}
for {
//stm: @CHAIN_STATE_MINER_CALCULATE_DEADLINE_001
di, err := client.StateMinerProvingDeadline(ctx, maddr, types.EmptyTSK)
require.NoError(t, err)
// wait until the deadline finishes.
if di.Index == ((targetDeadline + 1) % di.WPoStPeriodDeadlines) {
break
}
build.Clock.Sleep(blocktime)
}
// Try to object to the proof. This should fail.
{
params := &minertypes.DisputeWindowedPoStParams{
Deadline: targetDeadline,
PoStIndex: 0,
}
enc, aerr := actors.SerializeParams(params)
require.NoError(t, aerr)
msg := &types.Message{
To: maddr,
Method: builtin.MethodsMiner.DisputeWindowedPoSt,
Params: enc,
Value: types.NewInt(0),
From: defaultFrom,
}
_, err := client.MpoolPushMessage(ctx, msg, nil)
require.Error(t, err)
require.Contains(t, err.Error(), "failed to dispute valid post")
require.Contains(t, err.Error(), "(RetCode=16)")
}
}
func
|
submitBadProof
|
identifier_name
|
|
deferred_call.rs
|
interrupts,
//! and lets a kernel component return the function call stack up to the scheduler,
//! automatically being called again.
//!
//! Usage
//! -----
//!
//! The `DEFCALLS` array size determines how many
//! [DeferredCall](crate::deferred_call::DeferredCall)s
//! may be registered. By default this is set to 32.
//! To support more deferred calls, this file would need to be modified
//! to use a larger variable for BITMASK (e.g. BITMASK could be a u64
//! and the array size increased to 64).
//! If more than 32 deferred calls are created, the kernel will panic
//! at the beginning of the kernel loop.
//!
//! ```rust
//! use kernel::deferred_call::{DeferredCall, DeferredCallClient};
//! use kernel::static_init;
//!
//! struct SomeCapsule {
//! deferred_call: DeferredCall
//! }
//! impl SomeCapsule {
//! pub fn new() -> Self {
//! Self {
//! deferred_call: DeferredCall::new(),
//! }
//! }
//! }
//! impl DeferredCallClient for SomeCapsule {
//! fn handle_deferred_call(&self) {
//! // Your action here
//! }
//!
//! fn register(&'static self) {
//! self.deferred_call.register(self);
//! }
//! }
//!
//! // main.rs or your component must register the capsule with
//! // its deferred call.
//! // This should look like:
//! let some_capsule = unsafe { static_init!(SomeCapsule, SomeCapsule::new()) };
//! some_capsule.register();
//! ```
use crate::utilities::cells::OptionalCell;
use core::cell::Cell;
use core::marker::Copy;
use core::marker::PhantomData;
// This trait is not intended to be used as a trait object;
// e.g. you should not create a `&dyn DeferredCallClient`.
// The `Sized` supertrait prevents this.
/// This trait should be implemented by clients which need to
/// receive DeferredCalls
pub trait DeferredCallClient: Sized {
fn handle_deferred_call(&self);
fn register(&'static self); // This function should be implemented as
// `self.deferred_call.register(&self);`
}
/// This struct serves as a lightweight alternative to the use of trait objects
/// (e.g. `&dyn DeferredCall`). Using a trait object, will include a 20 byte vtable
/// per instance, but this alternative stores only the data and function pointers,
/// 8 bytes per instance.
#[derive(Copy, Clone)]
struct DynDefCallRef<'a> {
data: *const (),
callback: fn(*const ()),
_lifetime: PhantomData<&'a ()>,
}
impl<'a> DynDefCallRef<'a> {
// SAFETY: We define the callback function as being a closure which casts
// the passed pointer to be the appropriate type (a pointer to `T`)
// and then calls `T::handle_deferred_call()`. In practice, the closure
// is optimized away by LLVM when the ABI of the closure and the underlying function
// are identical, making this zero-cost, but saving us from having to trust
// that `fn(*const ())` and `fn handle_deferred_call(&self)` will always have the same calling
// convention for any type.
fn new<T: DeferredCallClient>(x: &'a T) -> Self {
Self {
data: x as *const _ as *const (),
callback: |p| unsafe { T::handle_deferred_call(&*p.cast()) },
_lifetime: PhantomData,
}
}
}
impl DynDefCallRef<'_> {
// more efficient pass by `self` if we don't have to implement `DeferredCallClient` directly
fn handle_deferred_call(self) {
(self.callback)(self.data)
}
}
// The below constant lets us get around Rust not allowing short array initialization
// for non-default types
const EMPTY: OptionalCell<DynDefCallRef<'static>> = OptionalCell::empty();
// All 3 of the below global statics are accessed only in this file, and all accesses
// are via immutable references. Tock is single threaded, so each will only ever be
// accessed via an immutable reference from the single kernel thread.
// TODO: Once Tock decides on an approach to replace `static mut` with some sort of
// `SyncCell`, migrate all three of these to that approach
// (https://github.com/tock/tock/issues/1545)
/// Counter for the number of deferred calls that have been created, this is
/// used to track that no more than 32 deferred calls have been created.
static mut CTR: Cell<usize> = Cell::new(0);
/// This bitmask tracks which of the up to 32 existing deferred calls have been scheduled.
/// Any bit that is set in that mask indicates the deferred call with its `idx` field set
/// to the index of that bit has been scheduled and not yet serviced.
static mut BITMASK: Cell<u32> = Cell::new(0);
// This is a 256 byte array, but at least resides in .bss
/// An array that stores references to up to 32 `DeferredCall`s via the low-cost
/// `DynDefCallRef`.
static mut DEFCALLS: [OptionalCell<DynDefCallRef<'static>>; 32] = [EMPTY; 32];
pub struct DeferredCall {
idx: usize,
}
impl DeferredCall {
/// Creates a new deferred call with a unique ID.
pub fn new() -> Self
|
// To reduce monomorphization bloat, the non-generic portion of register is moved into this
// function without generic parameters.
#[inline(never)]
fn register_internal_non_generic(&self, handler: DynDefCallRef<'static>) {
// SAFETY: No accesses to DEFCALLS are via an &mut, and the Tock kernel is
// single-threaded so all accesses will occur from this thread.
let defcalls = unsafe { &DEFCALLS };
if self.idx >= defcalls.len() {
// This error will be caught by the scheduler at the beginning of the kernel loop,
// which is much better than panicking here, before the debug writer is setup.
// Also allows a single panic for creating too many deferred calls instead
// of NUM_DCS panics (this function is monomorphized).
return;
}
defcalls[self.idx].set(handler);
}
/// This function registers the passed client with this deferred call, such
/// that calls to `DeferredCall::set()` will schedule a callback on the
/// `handle_deferred_call()` method of the passed client.
pub fn register<DC: DeferredCallClient>(&self, client: &'static DC) {
let handler = DynDefCallRef::new(client);
self.register_internal_non_generic(handler);
}
/// Schedule a deferred callback on the client associated with this deferred call
pub fn set(&self) {
// SAFETY: No accesses to BITMASK are via an &mut, and the Tock kernel is
// single-threaded so all accesses will occur from this thread.
let bitmask = unsafe { &BITMASK };
bitmask.set(bitmask.get() | (1 << self.idx));
}
/// Check if a deferred callback has been set and not yet serviced on this deferred call.
pub fn is_pending(&self) -> bool {
// SAFETY: No accesses to BITMASK are via an &mut, and the Tock kernel is
// single-threaded so all accesses will occur from this thread.
let bitmask = unsafe { &BITMASK };
bitmask.get() & (1 << self.idx) == 1
}
/// Services and clears the next pending `DeferredCall`, returns which index
/// was serviced
pub fn service_next_pending() -> Option<usize> {
// SAFETY: No accesses to BITMASK/DEFCALLS are via an &mut, and the Tock kernel is
// single-threaded so all accesses will occur from this thread.
let bitmask = unsafe { &BITMASK };
let defcalls = unsafe { &DEFCALLS };
let val = bitmask.get();
if val == 0 {
None
} else {
let bit = val.trailing_zeros() as usize;
let new_val = val & !(1 << bit);
bitmask.set(new_val);
defcalls[bit].map(|dc| {
dc.handle_deferred_call();
bit
})
}
}
/// Returns true if any deferred calls are waiting to be serviced,
/// false otherwise.
pub fn has_tasks() -> bool {
// SAFETY: No accesses to BITMASK are via an &mut, and the Tock kernel is
// single-threaded so all accesses will occur from this thread.
let bitmask = unsafe { &BITMASK };
bitmask.get() != 0
}
/// This function should be called at the beginning of the kernel loop
/// to verify that deferred calls have been
|
{
// SAFETY: No accesses to CTR are via an &mut, and the Tock kernel is
// single-threaded so all accesses will occur from this thread.
let ctr = unsafe { &CTR };
let idx = ctr.get() + 1;
ctr.set(idx);
DeferredCall { idx }
}
|
identifier_body
|
deferred_call.rs
|
important interrupts,
//! and lets a kernel component return the function call stack up to the scheduler,
//! automatically being called again.
//!
//! Usage
//! -----
//!
//! The `DEFCALLS` array size determines how many
//! [DeferredCall](crate::deferred_call::DeferredCall)s
//! may be registered. By default this is set to 32.
//! To support more deferred calls, this file would need to be modified
//! to use a larger variable for BITMASK (e.g. BITMASK could be a u64
//! and the array size increased to 64).
//! If more than 32 deferred calls are created, the kernel will panic
//! at the beginning of the kernel loop.
//!
//! ```rust
//! use kernel::deferred_call::{DeferredCall, DeferredCallClient};
//! use kernel::static_init;
//!
//! struct SomeCapsule {
//! deferred_call: DeferredCall
//! }
//! impl SomeCapsule {
//! pub fn new() -> Self {
//! Self {
//! deferred_call: DeferredCall::new(),
//! }
//! }
//! }
//! impl DeferredCallClient for SomeCapsule {
//! fn handle_deferred_call(&self) {
//! // Your action here
//! }
//!
//! fn register(&'static self) {
//! self.deferred_call.register(self);
//! }
//! }
//!
//! // main.rs or your component must register the capsule with
//! // its deferred call.
//! // This should look like:
//! let some_capsule = unsafe { static_init!(SomeCapsule, SomeCapsule::new()) };
//! some_capsule.register();
//! ```
use crate::utilities::cells::OptionalCell;
use core::cell::Cell;
use core::marker::Copy;
use core::marker::PhantomData;
// This trait is not intended to be used as a trait object;
// e.g. you should not create a `&dyn DeferredCallClient`.
// The `Sized` supertrait prevents this.
/// This trait should be implemented by clients which need to
/// receive DeferredCalls
pub trait DeferredCallClient: Sized {
fn handle_deferred_call(&self);
fn register(&'static self); // This function should be implemented as
// `self.deferred_call.register(&self);`
}
/// This struct serves as a lightweight alternative to the use of trait objects
/// (e.g. `&dyn DeferredCall`). Using a trait object, will include a 20 byte vtable
/// per instance, but this alternative stores only the data and function pointers,
/// 8 bytes per instance.
#[derive(Copy, Clone)]
struct DynDefCallRef<'a> {
data: *const (),
callback: fn(*const ()),
_lifetime: PhantomData<&'a ()>,
}
impl<'a> DynDefCallRef<'a> {
// SAFETY: We define the callback function as being a closure which casts
// the passed pointer to be the appropriate type (a pointer to `T`)
// and then calls `T::handle_deferred_call()`. In practice, the closure
// is optimized away by LLVM when the ABI of the closure and the underlying function
// are identical, making this zero-cost, but saving us from having to trust
// that `fn(*const ())` and `fn handle_deferred_call(&self)` will always have the same calling
// convention for any type.
fn new<T: DeferredCallClient>(x: &'a T) -> Self {
Self {
data: x as *const _ as *const (),
callback: |p| unsafe { T::handle_deferred_call(&*p.cast()) },
_lifetime: PhantomData,
}
}
}
impl DynDefCallRef<'_> {
// more efficient pass by `self` if we don't have to implement `DeferredCallClient` directly
fn handle_deferred_call(self) {
(self.callback)(self.data)
}
}
// The below constant lets us get around Rust not allowing short array initialization
// for non-default types
const EMPTY: OptionalCell<DynDefCallRef<'static>> = OptionalCell::empty();
// All 3 of the below global statics are accessed only in this file, and all accesses
// are via immutable references. Tock is single threaded, so each will only ever be
// accessed via an immutable reference from the single kernel thread.
// TODO: Once Tock decides on an approach to replace `static mut` with some sort of
// `SyncCell`, migrate all three of these to that approach
// (https://github.com/tock/tock/issues/1545)
/// Counter for the number of deferred calls that have been created, this is
/// used to track that no more than 32 deferred calls have been created.
static mut CTR: Cell<usize> = Cell::new(0);
/// This bitmask tracks which of the up to 32 existing deferred calls have been scheduled.
/// Any bit that is set in that mask indicates the deferred call with its `idx` field set
/// to the index of that bit has been scheduled and not yet serviced.
static mut BITMASK: Cell<u32> = Cell::new(0);
// This is a 256 byte array, but at least resides in .bss
/// An array that stores references to up to 32 `DeferredCall`s via the low-cost
/// `DynDefCallRef`.
static mut DEFCALLS: [OptionalCell<DynDefCallRef<'static>>; 32] = [EMPTY; 32];
pub struct DeferredCall {
idx: usize,
}
impl DeferredCall {
/// Creates a new deferred call with a unique ID.
pub fn new() -> Self {
// SAFETY: No accesses to CTR are via an &mut, and the Tock kernel is
// single-threaded so all accesses will occur from this thread.
let ctr = unsafe { &CTR };
let idx = ctr.get() + 1;
ctr.set(idx);
DeferredCall { idx }
}
// To reduce monomorphization bloat, the non-generic portion of register is moved into this
// function without generic parameters.
#[inline(never)]
fn register_internal_non_generic(&self, handler: DynDefCallRef<'static>) {
// SAFETY: No accesses to DEFCALLS are via an &mut, and the Tock kernel is
// single-threaded so all accesses will occur from this thread.
let defcalls = unsafe { &DEFCALLS };
if self.idx >= defcalls.len() {
// This error will be caught by the scheduler at the beginning of the kernel loop,
// which is much better than panicking here, before the debug writer is setup.
// Also allows a single panic for creating too many deferred calls instead
// of NUM_DCS panics (this function is monomorphized).
return;
}
defcalls[self.idx].set(handler);
}
/// This function registers the passed client with this deferred call, such
/// that calls to `DeferredCall::set()` will schedule a callback on the
/// `handle_deferred_call()` method of the passed client.
pub fn register<DC: DeferredCallClient>(&self, client: &'static DC) {
let handler = DynDefCallRef::new(client);
self.register_internal_non_generic(handler);
}
/// Schedule a deferred callback on the client associated with this deferred call
pub fn set(&self) {
// SAFETY: No accesses to BITMASK are via an &mut, and the Tock kernel is
// single-threaded so all accesses will occur from this thread.
let bitmask = unsafe { &BITMASK };
bitmask.set(bitmask.get() | (1 << self.idx));
}
/// Check if a deferred callback has been set and not yet serviced on this deferred call.
pub fn is_pending(&self) -> bool {
// SAFETY: No accesses to BITMASK are via an &mut, and the Tock kernel is
// single-threaded so all accesses will occur from this thread.
let bitmask = unsafe { &BITMASK };
bitmask.get() & (1 << self.idx) == 1
}
/// Services and clears the next pending `DeferredCall`, returns which index
/// was serviced
pub fn service_next_pending() -> Option<usize> {
// SAFETY: No accesses to BITMASK/DEFCALLS are via an &mut, and the Tock kernel is
// single-threaded so all accesses will occur from this thread.
let bitmask = unsafe { &BITMASK };
let defcalls = unsafe { &DEFCALLS };
let val = bitmask.get();
if val == 0 {
None
} else {
let bit = val.trailing_zeros() as usize;
let new_val = val & !(1 << bit);
bitmask.set(new_val);
defcalls[bit].map(|dc| {
dc.handle_deferred_call();
bit
})
}
}
/// Returns true if any deferred calls are waiting to be serviced,
/// false otherwise.
pub fn
|
() -> bool {
// SAFETY: No accesses to BITMASK are via an &mut, and the Tock kernel is
// single-threaded so all accesses will occur from this thread.
let bitmask = unsafe { &BITMASK };
bitmask.get() != 0
}
/// This function should be called at the beginning of the kernel loop
/// to verify that deferred calls have been
|
has_tasks
|
identifier_name
|
deferred_call.rs
|
should look like:
//! let some_capsule = unsafe { static_init!(SomeCapsule, SomeCapsule::new()) };
//! some_capsule.register();
//! ```
use crate::utilities::cells::OptionalCell;
use core::cell::Cell;
use core::marker::Copy;
use core::marker::PhantomData;
// This trait is not intended to be used as a trait object;
// e.g. you should not create a `&dyn DeferredCallClient`.
// The `Sized` supertrait prevents this.
/// This trait should be implemented by clients which need to
/// receive DeferredCalls
pub trait DeferredCallClient: Sized {
fn handle_deferred_call(&self);
fn register(&'static self); // This function should be implemented as
// `self.deferred_call.register(&self);`
}
/// This struct serves as a lightweight alternative to the use of trait objects
/// (e.g. `&dyn DeferredCall`). Using a trait object, will include a 20 byte vtable
/// per instance, but this alternative stores only the data and function pointers,
/// 8 bytes per instance.
#[derive(Copy, Clone)]
struct DynDefCallRef<'a> {
data: *const (),
callback: fn(*const ()),
_lifetime: PhantomData<&'a ()>,
}
impl<'a> DynDefCallRef<'a> {
// SAFETY: We define the callback function as being a closure which casts
// the passed pointer to be the appropriate type (a pointer to `T`)
// and then calls `T::handle_deferred_call()`. In practice, the closure
// is optimized away by LLVM when the ABI of the closure and the underlying function
// are identical, making this zero-cost, but saving us from having to trust
// that `fn(*const ())` and `fn handle_deferred_call(&self)` will always have the same calling
// convention for any type.
fn new<T: DeferredCallClient>(x: &'a T) -> Self {
Self {
data: x as *const _ as *const (),
callback: |p| unsafe { T::handle_deferred_call(&*p.cast()) },
_lifetime: PhantomData,
}
}
}
impl DynDefCallRef<'_> {
// more efficient pass by `self` if we don't have to implement `DeferredCallClient` directly
fn handle_deferred_call(self) {
(self.callback)(self.data)
}
}
// The below constant lets us get around Rust not allowing short array initialization
// for non-default types
const EMPTY: OptionalCell<DynDefCallRef<'static>> = OptionalCell::empty();
// All 3 of the below global statics are accessed only in this file, and all accesses
// are via immutable references. Tock is single threaded, so each will only ever be
// accessed via an immutable reference from the single kernel thread.
// TODO: Once Tock decides on an approach to replace `static mut` with some sort of
// `SyncCell`, migrate all three of these to that approach
// (https://github.com/tock/tock/issues/1545)
/// Counter for the number of deferred calls that have been created, this is
/// used to track that no more than 32 deferred calls have been created.
static mut CTR: Cell<usize> = Cell::new(0);
/// This bitmask tracks which of the up to 32 existing deferred calls have been scheduled.
/// Any bit that is set in that mask indicates the deferred call with its `idx` field set
/// to the index of that bit has been scheduled and not yet serviced.
static mut BITMASK: Cell<u32> = Cell::new(0);
// This is a 256 byte array, but at least resides in .bss
/// An array that stores references to up to 32 `DeferredCall`s via the low-cost
/// `DynDefCallRef`.
static mut DEFCALLS: [OptionalCell<DynDefCallRef<'static>>; 32] = [EMPTY; 32];
pub struct DeferredCall {
idx: usize,
}
impl DeferredCall {
/// Creates a new deferred call with a unique ID.
pub fn new() -> Self {
// SAFETY: No accesses to CTR are via an &mut, and the Tock kernel is
// single-threaded so all accesses will occur from this thread.
let ctr = unsafe { &CTR };
let idx = ctr.get() + 1;
ctr.set(idx);
DeferredCall { idx }
}
// To reduce monomorphization bloat, the non-generic portion of register is moved into this
// function without generic parameters.
#[inline(never)]
fn register_internal_non_generic(&self, handler: DynDefCallRef<'static>) {
// SAFETY: No accesses to DEFCALLS are via an &mut, and the Tock kernel is
// single-threaded so all accesses will occur from this thread.
let defcalls = unsafe { &DEFCALLS };
if self.idx >= defcalls.len() {
// This error will be caught by the scheduler at the beginning of the kernel loop,
// which is much better than panicking here, before the debug writer is setup.
// Also allows a single panic for creating too many deferred calls instead
// of NUM_DCS panics (this function is monomorphized).
return;
}
defcalls[self.idx].set(handler);
}
/// This function registers the passed client with this deferred call, such
/// that calls to `DeferredCall::set()` will schedule a callback on the
/// `handle_deferred_call()` method of the passed client.
pub fn register<DC: DeferredCallClient>(&self, client: &'static DC) {
let handler = DynDefCallRef::new(client);
self.register_internal_non_generic(handler);
}
/// Schedule a deferred callback on the client associated with this deferred call
pub fn set(&self) {
// SAFETY: No accesses to BITMASK are via an &mut, and the Tock kernel is
// single-threaded so all accesses will occur from this thread.
let bitmask = unsafe { &BITMASK };
bitmask.set(bitmask.get() | (1 << self.idx));
}
/// Check if a deferred callback has been set and not yet serviced on this deferred call.
pub fn is_pending(&self) -> bool {
// SAFETY: No accesses to BITMASK are via an &mut, and the Tock kernel is
// single-threaded so all accesses will occur from this thread.
let bitmask = unsafe { &BITMASK };
bitmask.get() & (1 << self.idx) == 1
}
/// Services and clears the next pending `DeferredCall`, returns which index
/// was serviced
pub fn service_next_pending() -> Option<usize> {
// SAFETY: No accesses to BITMASK/DEFCALLS are via an &mut, and the Tock kernel is
// single-threaded so all accesses will occur from this thread.
let bitmask = unsafe { &BITMASK };
let defcalls = unsafe { &DEFCALLS };
let val = bitmask.get();
if val == 0 {
None
} else {
let bit = val.trailing_zeros() as usize;
let new_val = val & !(1 << bit);
bitmask.set(new_val);
defcalls[bit].map(|dc| {
dc.handle_deferred_call();
bit
})
}
}
/// Returns true if any deferred calls are waiting to be serviced,
/// false otherwise.
pub fn has_tasks() -> bool {
// SAFETY: No accesses to BITMASK are via an &mut, and the Tock kernel is
// single-threaded so all accesses will occur from this thread.
let bitmask = unsafe { &BITMASK };
bitmask.get() != 0
}
/// This function should be called at the beginning of the kernel loop
/// to verify that deferred calls have been correctly initialized. This function
/// verifies two things:
/// 1. That <= `DEFCALLS.len()` deferred calls have been created, which is the
/// maximum this interface supports
/// 2. That exactly as many deferred calls were registered as were created, which helps to
/// catch bugs if board maintainers forget to call `register()` on a created `DeferredCall`.
/// Neither of these checks are necessary for soundness, but they are necessary for confirming
/// that DeferredCalls will actually be delivered as expected. This function costs about 300
/// bytes, so you can remove it if you are confident your setup will not exceed 32 deferred
/// calls, and that all of your components register their deferred calls.
pub fn verify_setup() {
// SAFETY: No accesses to CTR/DEFCALLS are via an &mut, and the Tock kernel is
// single-threaded so all accesses will occur from this thread.
let ctr = unsafe { &CTR };
let defcalls = unsafe { &DEFCALLS };
let num_deferred_calls = ctr.get();
if num_deferred_calls >= defcalls.len()
|| defcalls.iter().filter(|opt| opt.is_some()).count() != num_deferred_calls
|
{
panic!(
"ERROR: > 32 deferred calls, or a component forgot to register a deferred call."
);
}
|
conditional_block
|
|
deferred_call.rs
|
important interrupts,
//! and lets a kernel component return the function call stack up to the scheduler,
//! automatically being called again.
//!
//! Usage
//! -----
//!
//! The `DEFCALLS` array size determines how many
//! [DeferredCall](crate::deferred_call::DeferredCall)s
//! may be registered. By default this is set to 32.
//! To support more deferred calls, this file would need to be modified
//! to use a larger variable for BITMASK (e.g. BITMASK could be a u64
//! and the array size increased to 64).
//! If more than 32 deferred calls are created, the kernel will panic
//! at the beginning of the kernel loop.
//!
//! ```rust
//! use kernel::deferred_call::{DeferredCall, DeferredCallClient};
//! use kernel::static_init;
//!
//! struct SomeCapsule {
//! deferred_call: DeferredCall
//! }
//! impl SomeCapsule {
//! pub fn new() -> Self {
//! Self {
//! deferred_call: DeferredCall::new(),
//! }
//! }
//! }
//! impl DeferredCallClient for SomeCapsule {
//! fn handle_deferred_call(&self) {
//! // Your action here
//! }
//!
//! fn register(&'static self) {
//! self.deferred_call.register(self);
//! }
//! }
//!
//! // main.rs or your component must register the capsule with
//! // its deferred call.
//! // This should look like:
//! let some_capsule = unsafe { static_init!(SomeCapsule, SomeCapsule::new()) };
//! some_capsule.register();
//! ```
use crate::utilities::cells::OptionalCell;
use core::cell::Cell;
use core::marker::Copy;
use core::marker::PhantomData;
// This trait is not intended to be used as a trait object;
// e.g. you should not create a `&dyn DeferredCallClient`.
// The `Sized` supertrait prevents this.
/// This trait should be implemented by clients which need to
/// receive DeferredCalls
pub trait DeferredCallClient: Sized {
fn handle_deferred_call(&self);
fn register(&'static self); // This function should be implemented as
// `self.deferred_call.register(&self);`
}
/// This struct serves as a lightweight alternative to the use of trait objects
/// (e.g. `&dyn DeferredCall`). Using a trait object, will include a 20 byte vtable
/// per instance, but this alternative stores only the data and function pointers,
/// 8 bytes per instance.
#[derive(Copy, Clone)]
struct DynDefCallRef<'a> {
data: *const (),
callback: fn(*const ()),
_lifetime: PhantomData<&'a ()>,
}
impl<'a> DynDefCallRef<'a> {
// SAFETY: We define the callback function as being a closure which casts
// the passed pointer to be the appropriate type (a pointer to `T`)
// and then calls `T::handle_deferred_call()`. In practice, the closure
// is optimized away by LLVM when the ABI of the closure and the underlying function
// are identical, making this zero-cost, but saving us from having to trust
// that `fn(*const ())` and `fn handle_deferred_call(&self)` will always have the same calling
// convention for any type.
fn new<T: DeferredCallClient>(x: &'a T) -> Self {
Self {
data: x as *const _ as *const (),
callback: |p| unsafe { T::handle_deferred_call(&*p.cast()) },
_lifetime: PhantomData,
}
}
}
impl DynDefCallRef<'_> {
// more efficient pass by `self` if we don't have to implement `DeferredCallClient` directly
fn handle_deferred_call(self) {
(self.callback)(self.data)
}
}
// The below constant lets us get around Rust not allowing short array initialization
// for non-default types
const EMPTY: OptionalCell<DynDefCallRef<'static>> = OptionalCell::empty();
// All 3 of the below global statics are accessed only in this file, and all accesses
// are via immutable references. Tock is single threaded, so each will only ever be
// accessed via an immutable reference from the single kernel thread.
// TODO: Once Tock decides on an approach to replace `static mut` with some sort of
// `SyncCell`, migrate all three of these to that approach
// (https://github.com/tock/tock/issues/1545)
/// Counter for the number of deferred calls that have been created, this is
/// used to track that no more than 32 deferred calls have been created.
static mut CTR: Cell<usize> = Cell::new(0);
/// This bitmask tracks which of the up to 32 existing deferred calls have been scheduled.
/// Any bit that is set in that mask indicates the deferred call with its `idx` field set
/// to the index of that bit has been scheduled and not yet serviced.
static mut BITMASK: Cell<u32> = Cell::new(0);
// This is a 256 byte array, but at least resides in .bss
/// An array that stores references to up to 32 `DeferredCall`s via the low-cost
/// `DynDefCallRef`.
static mut DEFCALLS: [OptionalCell<DynDefCallRef<'static>>; 32] = [EMPTY; 32];
pub struct DeferredCall {
idx: usize,
}
impl DeferredCall {
/// Creates a new deferred call with a unique ID.
pub fn new() -> Self {
|
DeferredCall { idx }
}
// To reduce monomorphization bloat, the non-generic portion of register is moved into this
// function without generic parameters.
#[inline(never)]
fn register_internal_non_generic(&self, handler: DynDefCallRef<'static>) {
// SAFETY: No accesses to DEFCALLS are via an &mut, and the Tock kernel is
// single-threaded so all accesses will occur from this thread.
let defcalls = unsafe { &DEFCALLS };
if self.idx >= defcalls.len() {
// This error will be caught by the scheduler at the beginning of the kernel loop,
// which is much better than panicking here, before the debug writer is setup.
// Also allows a single panic for creating too many deferred calls instead
// of NUM_DCS panics (this function is monomorphized).
return;
}
defcalls[self.idx].set(handler);
}
/// This function registers the passed client with this deferred call, such
/// that calls to `DeferredCall::set()` will schedule a callback on the
/// `handle_deferred_call()` method of the passed client.
pub fn register<DC: DeferredCallClient>(&self, client: &'static DC) {
let handler = DynDefCallRef::new(client);
self.register_internal_non_generic(handler);
}
/// Schedule a deferred callback on the client associated with this deferred call
pub fn set(&self) {
// SAFETY: No accesses to BITMASK are via an &mut, and the Tock kernel is
// single-threaded so all accesses will occur from this thread.
let bitmask = unsafe { &BITMASK };
bitmask.set(bitmask.get() | (1 << self.idx));
}
/// Check if a deferred callback has been set and not yet serviced on this deferred call.
pub fn is_pending(&self) -> bool {
// SAFETY: No accesses to BITMASK are via an &mut, and the Tock kernel is
// single-threaded so all accesses will occur from this thread.
let bitmask = unsafe { &BITMASK };
bitmask.get() & (1 << self.idx) == 1
}
/// Services and clears the next pending `DeferredCall`, returns which index
/// was serviced
pub fn service_next_pending() -> Option<usize> {
// SAFETY: No accesses to BITMASK/DEFCALLS are via an &mut, and the Tock kernel is
// single-threaded so all accesses will occur from this thread.
let bitmask = unsafe { &BITMASK };
let defcalls = unsafe { &DEFCALLS };
let val = bitmask.get();
if val == 0 {
None
} else {
let bit = val.trailing_zeros() as usize;
let new_val = val & !(1 << bit);
bitmask.set(new_val);
defcalls[bit].map(|dc| {
dc.handle_deferred_call();
bit
})
}
}
/// Returns true if any deferred calls are waiting to be serviced,
/// false otherwise.
pub fn has_tasks() -> bool {
// SAFETY: No accesses to BITMASK are via an &mut, and the Tock kernel is
// single-threaded so all accesses will occur from this thread.
let bitmask = unsafe { &BITMASK };
bitmask.get() != 0
}
/// This function should be called at the beginning of the kernel loop
/// to verify that deferred calls have been correctly
|
// SAFETY: No accesses to CTR are via an &mut, and the Tock kernel is
// single-threaded so all accesses will occur from this thread.
let ctr = unsafe { &CTR };
let idx = ctr.get() + 1;
ctr.set(idx);
|
random_line_split
|
cut_plane.py
|
_1 maximum on cropping")
if crop_x2:
if crop_x2[0] < min(df_sub[x2]):
raise Exception("Invalid x_2 minimum on cropping")
if crop_x2[1] > max(df_sub[x2]):
raise Exception("Invalid x_2 maximum on cropping")
# If cropping x1 do it now
# if crop_x1:
# df_sub = df_sub[(df_sub[x1] >= crop_x1[0]) & (df_sub[x1] <= crop_x1[1])]
# if crop_x2:
# df_sub = df_sub[(df_sub[x2] >= crop_x2[0]) & (df_sub[x2] <= crop_x2[1])]
# Store the relevent values
self.x1_in = df_sub[x1]
self.x2_in = df_sub[x2]
self.u_in = df_sub['u']
self.v_in = df_sub['v']
self.w_in = df_sub['w']
# Save the desired resolution
self.res = resolution
# Grid the data, if cropping available use that
if crop_x1:
# self.x1_lin = np.linspace(min(self.x1_in), max(self.x1_in), resolution)
self.x1_lin = np.linspace(crop_x1[0], crop_x1[1], resolution)
else:
self.x1_lin = np.linspace(min(self.x1_in), max(self.x1_in), resolution)
if crop_x2:
# self.x2_lin = np.linspace(min(self.x2_in), max(self.x2_in), resolution)
self.x2_lin = np.linspace(crop_x2[0], crop_x2[1], resolution)
else:
self.x2_lin = np.linspace(min(self.x2_in), max(self.x2_in), resolution)
# Mesh and interpolate u, v and w
# print(self.x1_lin)
# print(sorted(self.x1_in))
self.x1_mesh, self.x2_mesh = np.meshgrid(self.x1_lin, self.x2_lin)
self.u_mesh = griddata(np.column_stack([self.x1_in, self.x2_in]), self.u_in,(self.x1_mesh.flatten(), self.x2_mesh.flatten()), method='cubic')
self.v_mesh = griddata(np.column_stack([self.x1_in, self.x2_in]), self.v_in,(self.x1_mesh.flatten(), self.x2_mesh.flatten()), method='cubic')
self.w_mesh = griddata(np.column_stack([self.x1_in, self.x2_in]), self.w_in,(self.x1_mesh.flatten(), self.x2_mesh.flatten()), method='cubic')
# Save flat vectors
self.x1_flat = self.x1_mesh.flatten()
self.x2_flat = self.x2_mesh.flatten()
# Save u-cubed
self.u_cubed = self.u_mesh ** 3
# Save re-centing points for visualization
self.x1_center = x1_center
self.x2_center = x2_center
# If inverting, invert x1, and x1_center
if invert_x1:
self.x1_mesh = self.x1_mesh * -1
self.x1_lin = self.x1_lin * -1
self.x1_flat = self.x1_flat * -1
self.x1_center = self.x1_center * -1
self.v_mesh =self.v_mesh * -1
# Set the diamater which will be used in visualization
# Annalysis in D or meters?
if D == None:
self.plot_in_D = False
self.D = 1.
else:
self.plot_in_D = True
self.D = D
def subtract(self,ctSub):
""" Subtract another cut through from self (assume matching resolution) and return the difference
"""
# First confirm eligible for subtraction
if (not np.array_equal(self.x1_flat,ctSub.x1_flat)) or (not np.array_equal(self.x2_flat,ctSub.x2_flat)):
raise Exception("Can't subtract because not meshed the same")
ctResult = copy.deepcopy(ctSub)# copy the class
# Original method
# ctResult.u = self.u - ctSub.u
# ctResult.uMesh = griddata(np.column_stack([ctResult.y, ctResult.z]),ctResult.u,(ctResult.yMesh.flatten(), ctResult.zMesh.flatten()), method='cubic')
# New method
ctResult.u_mesh = self.u_mesh - ctSub.u_mesh
ctResult.v_mesh = self.v_mesh - ctSub.v_mesh
ctResult.w_mesh = self.w_mesh - ctSub.w_mesh
ctResult.u_cubed = self.u_cubed - ctSub.u_cubed
return ctResult
def visualize(self,ax=None,minSpeed=None,maxSpeed=None):
""" Visualize the scan
Args:
ax: axes for plotting, if none, create a new one
minSpeed, maxSpeed, values used for plotting, if not provide assume to data max min
"""
if not ax:
fig, ax = plt.subplots()
if minSpeed is None:
minSpeed = self.u_mesh.min()
if maxSpeed is None:
maxSpeed = self.u_mesh.max()
# Reshape UMesh internally
u_mesh = self.u_mesh.reshape(self.res,self.res)
Zm = np.ma.masked_where(np.isnan(u_mesh),u_mesh)
# Plot the cut-through
# print((self.x1_lin-self.x1_center) /self.D)
# print(minSpeed,maxSpeed)
im = ax.pcolormesh((self.x1_lin-self.x1_center) /self.D, (self.x2_lin-self.x2_center) /self.D, Zm, cmap='coolwarm',vmin=minSpeed,vmax=maxSpeed)
# Make equal axis
ax.set_aspect('equal')
return im
def lineContour(self,ax=None,levels=None,colors=None,**kwargs):
""" Visualize the scan as a simple contour
Args:
ax: axes for plotting, if none, create a new one
minSpeed, maxSpeed, values used for plotting, if not provide assume to data max min
"""
if not ax:
fig, ax = plt.subplots()
# Reshape UMesh internally
u_mesh = self.u_mesh.reshape(self.res,self.res)
Zm = np.ma.masked_where(np.isnan(u_mesh),u_mesh)
matplotlib.rcParams['contour.negative_linestyle'] = 'solid'
# Plot the cut-through
if levels:
if colors:
ax.contour((self.x1_lin-self.x1_center) /self.D, (self.x2_lin-self.x2_center)/self.D, Zm,levels=levels,colors=colors,**kwargs)
else:
ax.contour((self.x1_lin-self.x1_center) /self.D, (self.x2_lin-self.x2_center)/self.D, Zm,levels=levels,**kwargs)
else:
if colors:
ax.contour((self.x1_lin-self.x1_center) /self.D, (self.x2_lin-self.x2_center)/self.D, Zm,colors=colors,**kwargs)
else:
ax.contour((self.x1_lin-self.x1_center) /self.D, (self.x2_lin-self.x2_center)/self.D, Zm,**kwargs)
# Invert the x-axis
# ax.invert_xaxis()
# Make equal axis
ax.set_aspect('equal')
# Define horizontal subclass
class HorPlane(_CutPlane):
def __init__(self, df_flow, z_value, resolution=100, x1_center=0.0,x2_center=0.0, D=None):
# Set up call super
super().__init__(df_flow, x1='x', x2='y', x3_value=z_value,resolution=resolution,x1_center=x1_center,x2_center=x2_center, D=D, invert_x1=False)
# Define cross plane subclass
class CrossPlane(_CutPlane):
def __init__(self, df_flow, x_value, y_center, z_center, D, resolution=100, crop_y=None,crop_z=None,invert_x1=True):
# Set up call super
super().__init__(df_flow, x1='y', x2='z', x3_value=x_value,resolution=resolution,x1_center=y_center,x2_center=z_center, D=D, invert_x1=invert_x1, crop_x1 = crop_y, crop_x2=crop_z)
def calculate_wind_speed(self,x1_loc,x2_loc,R):
# Make a distance column
distance = np.sqrt((self.x1_flat - x1_loc)**2 + (self.x2_flat - x2_loc)**2)
# Return the mean wind speed
return np.cbrt(np.mean(self.u_cubed[distance<R]))
def get_profile(self,resolution=10):
x1_locs = np.linspace(min(self.x1_flat), max(self.x1_flat), resolution)
v_array = np.array([self.calculate_wind_speed(x1_loc,self.x2_center,self.D/2.) for x1_loc in x1_locs])
return ((x1_locs - self.x1_center)/self.D,v_array)
def
|
paper_plot
|
identifier_name
|
|
cut_plane.py
|
# Assign the axis names
self.x1_name = x1
self.x2_name = x2
self.x3_name = [x3 for x3 in ['x','y','z'] if x3 not in [x1,x2]][0]
# Find the nearest value in 3rd dimension
search_values = np.array(sorted(df_flow[self.x3_name].unique()))
nearest_idx = (np.abs(search_values-x3_value)).argmin()
nearest_value = search_values[nearest_idx]
print('Nearest value to in %s of %.2f is %.2f' % (self.x3_name, x3_value,nearest_value))
# Get a sub-frame of only this 3rd dimension value
df_sub = df_flow[df_flow[self.x3_name]==nearest_value]
# Make sure cropping is valid
if crop_x1:
if crop_x1[0] < min(df_sub[x1]):
raise Exception("Invalid x_1 minimum on cropping")
if crop_x1[1] > max(df_sub[x1]):
raise Exception("Invalid x_1 maximum on cropping")
if crop_x2:
if crop_x2[0] < min(df_sub[x2]):
raise Exception("Invalid x_2 minimum on cropping")
if crop_x2[1] > max(df_sub[x2]):
raise Exception("Invalid x_2 maximum on cropping")
# If cropping x1 do it now
# if crop_x1:
# df_sub = df_sub[(df_sub[x1] >= crop_x1[0]) & (df_sub[x1] <= crop_x1[1])]
# if crop_x2:
# df_sub = df_sub[(df_sub[x2] >= crop_x2[0]) & (df_sub[x2] <= crop_x2[1])]
# Store the relevent values
self.x1_in = df_sub[x1]
self.x2_in = df_sub[x2]
self.u_in = df_sub['u']
self.v_in = df_sub['v']
self.w_in = df_sub['w']
# Save the desired resolution
self.res = resolution
# Grid the data, if cropping available use that
if crop_x1:
# self.x1_lin = np.linspace(min(self.x1_in), max(self.x1_in), resolution)
self.x1_lin = np.linspace(crop_x1[0], crop_x1[1], resolution)
else:
self.x1_lin = np.linspace(min(self.x1_in), max(self.x1_in), resolution)
if crop_x2:
# self.x2_lin = np.linspace(min(self.x2_in), max(self.x2_in), resolution)
self.x2_lin = np.linspace(crop_x2[0], crop_x2[1], resolution)
else:
self.x2_lin = np.linspace(min(self.x2_in), max(self.x2_in), resolution)
# Mesh and interpolate u, v and w
# print(self.x1_lin)
# print(sorted(self.x1_in))
self.x1_mesh, self.x2_mesh = np.meshgrid(self.x1_lin, self.x2_lin)
self.u_mesh = griddata(np.column_stack([self.x1_in, self.x2_in]), self.u_in,(self.x1_mesh.flatten(), self.x2_mesh.flatten()), method='cubic')
self.v_mesh = griddata(np.column_stack([self.x1_in, self.x2_in]), self.v_in,(self.x1_mesh.flatten(), self.x2_mesh.flatten()), method='cubic')
self.w_mesh = griddata(np.column_stack([self.x1_in, self.x2_in]), self.w_in,(self.x1_mesh.flatten(), self.x2_mesh.flatten()), method='cubic')
# Save flat vectors
self.x1_flat = self.x1_mesh.flatten()
self.x2_flat = self.x2_mesh.flatten()
# Save u-cubed
self.u_cubed = self.u_mesh ** 3
# Save re-centing points for visualization
self.x1_center = x1_center
self.x2_center = x2_center
# If inverting, invert x1, and x1_center
if invert_x1:
self.x1_mesh = self.x1_mesh * -1
self.x1_lin = self.x1_lin * -1
self.x1_flat = self.x1_flat * -1
self.x1_center = self.x1_center * -1
self.v_mesh =self.v_mesh * -1
# Set the diamater which will be used in visualization
# Annalysis in D or meters?
if D == None:
self.plot_in_D = False
self.D = 1.
else:
self.plot_in_D = True
self.D = D
def subtract(self,ctSub):
""" Subtract another cut through from self (assume matching resolution) and return the difference
"""
# First confirm eligible for subtraction
if (not np.array_equal(self.x1_flat,ctSub.x1_flat)) or (not np.array_equal(self.x2_flat,ctSub.x2_flat)):
raise Exception("Can't subtract because not meshed the same")
ctResult = copy.deepcopy(ctSub)# copy the class
# Original method
# ctResult.u = self.u - ctSub.u
# ctResult.uMesh = griddata(np.column_stack([ctResult.y, ctResult.z]),ctResult.u,(ctResult.yMesh.flatten(), ctResult.zMesh.flatten()), method='cubic')
# New method
ctResult.u_mesh = self.u_mesh - ctSub.u_mesh
ctResult.v_mesh = self.v_mesh - ctSub.v_mesh
ctResult.w_mesh = self.w_mesh - ctSub.w_mesh
ctResult.u_cubed = self.u_cubed - ctSub.u_cubed
return ctResult
def visualize(self,ax=None,minSpeed=None,maxSpeed=None):
""" Visualize the scan
Args:
ax: axes for plotting, if none, create a new one
minSpeed, maxSpeed, values used for plotting, if not provide assume to data max min
"""
if not ax:
fig, ax = plt.subplots()
if minSpeed is None:
minSpeed = self.u_mesh.min()
if maxSpeed is None:
maxSpeed = self.u_mesh.max()
# Reshape UMesh internally
u_mesh = self.u_mesh.reshape(self.res,self.res)
Zm = np.ma.masked_where(np.isnan(u_mesh),u_mesh)
# Plot the cut-through
# print((self.x1_lin-self.x1_center) /self.D)
# print(minSpeed,maxSpeed)
im = ax.pcolormesh((self.x1_lin-self.x1_center) /self.D, (self.x2_lin-self.x2_center) /self.D, Zm, cmap='coolwarm',vmin=minSpeed,vmax=maxSpeed)
# Make equal axis
ax.set_aspect('equal')
return im
def lineContour(self,ax=None,levels=None,colors=None,**kwargs):
""" Visualize the scan as a simple contour
Args:
ax: axes for plotting, if none, create a new one
minSpeed, maxSpeed, values used for plotting, if not provide assume to data max min
"""
if not ax:
fig, ax = plt.subplots()
# Reshape UMesh internally
u_mesh = self.u_mesh.reshape(self.res,self.res)
Zm = np.ma.masked_where(np.isnan(u_mesh),u_mesh)
matplotlib.rcParams['contour.negative_linestyle'] = 'solid'
# Plot the cut-through
if levels:
if colors:
ax.contour((self.x1_lin-self.x1_center) /self.D, (self.x2_lin-self.x2_center)/self.D, Zm,levels=levels,colors=colors,**kwargs)
else:
ax.contour((self.x1_lin-self.x1_center) /self.D, (self.x2_lin-self.x2_center)/self.D, Zm,levels=levels,**kwargs)
else:
if colors:
ax.contour((self.x1_lin-self.x1_center) /self.D, (self.x2_lin-self.x2_center)/self.D, Zm,colors=colors,**kwargs)
else:
ax.contour((self.x1_lin-self.x1_center) /self.D, (self.x2_lin-self.x2_center)/self.D, Zm,**kwargs)
# Invert the x-axis
# ax.invert_xaxis()
# Make equal axis
ax.set_aspect('equal')
# Define horizontal subclass
class HorPlane(_CutPlane):
def __init__(self, df_flow, z_value, resolution=100, x1_center=0.0,x2_center=0.0, D=None):
# Set up call super
super().__init__(df_flow, x1='x', x2='y', x3_value=z_value,resolution=resolution,x1_center=x1_center,x2_center=x2_center, D=D, invert_x1=False)
# Define cross plane subclass
class CrossPlane(_CutPlane):
def __init__(self, df_flow, x_value, y_center, z_center, D
|
output:
flow_file: full path name of flow file"""
|
random_line_split
|
|
cut_plane.py
|
self.x1_name = x1
self.x2_name = x2
self.x3_name = [x3 for x3 in ['x','y','z'] if x3 not in [x1,x2]][0]
# Find the nearest value in 3rd dimension
search_values = np.array(sorted(df_flow[self.x3_name].unique()))
nearest_idx = (np.abs(search_values-x3_value)).argmin()
nearest_value = search_values[nearest_idx]
print('Nearest value to in %s of %.2f is %.2f' % (self.x3_name, x3_value,nearest_value))
# Get a sub-frame of only this 3rd dimension value
df_sub = df_flow[df_flow[self.x3_name]==nearest_value]
# Make sure cropping is valid
if crop_x1:
if crop_x1[0] < min(df_sub[x1]):
raise Exception("Invalid x_1 minimum on cropping")
if crop_x1[1] > max(df_sub[x1]):
|
if crop_x2:
if crop_x2[0] < min(df_sub[x2]):
raise Exception("Invalid x_2 minimum on cropping")
if crop_x2[1] > max(df_sub[x2]):
raise Exception("Invalid x_2 maximum on cropping")
# If cropping x1 do it now
# if crop_x1:
# df_sub = df_sub[(df_sub[x1] >= crop_x1[0]) & (df_sub[x1] <= crop_x1[1])]
# if crop_x2:
# df_sub = df_sub[(df_sub[x2] >= crop_x2[0]) & (df_sub[x2] <= crop_x2[1])]
# Store the relevent values
self.x1_in = df_sub[x1]
self.x2_in = df_sub[x2]
self.u_in = df_sub['u']
self.v_in = df_sub['v']
self.w_in = df_sub['w']
# Save the desired resolution
self.res = resolution
# Grid the data, if cropping available use that
if crop_x1:
# self.x1_lin = np.linspace(min(self.x1_in), max(self.x1_in), resolution)
self.x1_lin = np.linspace(crop_x1[0], crop_x1[1], resolution)
else:
self.x1_lin = np.linspace(min(self.x1_in), max(self.x1_in), resolution)
if crop_x2:
# self.x2_lin = np.linspace(min(self.x2_in), max(self.x2_in), resolution)
self.x2_lin = np.linspace(crop_x2[0], crop_x2[1], resolution)
else:
self.x2_lin = np.linspace(min(self.x2_in), max(self.x2_in), resolution)
# Mesh and interpolate u, v and w
# print(self.x1_lin)
# print(sorted(self.x1_in))
self.x1_mesh, self.x2_mesh = np.meshgrid(self.x1_lin, self.x2_lin)
self.u_mesh = griddata(np.column_stack([self.x1_in, self.x2_in]), self.u_in,(self.x1_mesh.flatten(), self.x2_mesh.flatten()), method='cubic')
self.v_mesh = griddata(np.column_stack([self.x1_in, self.x2_in]), self.v_in,(self.x1_mesh.flatten(), self.x2_mesh.flatten()), method='cubic')
self.w_mesh = griddata(np.column_stack([self.x1_in, self.x2_in]), self.w_in,(self.x1_mesh.flatten(), self.x2_mesh.flatten()), method='cubic')
# Save flat vectors
self.x1_flat = self.x1_mesh.flatten()
self.x2_flat = self.x2_mesh.flatten()
# Save u-cubed
self.u_cubed = self.u_mesh ** 3
# Save re-centing points for visualization
self.x1_center = x1_center
self.x2_center = x2_center
# If inverting, invert x1, and x1_center
if invert_x1:
self.x1_mesh = self.x1_mesh * -1
self.x1_lin = self.x1_lin * -1
self.x1_flat = self.x1_flat * -1
self.x1_center = self.x1_center * -1
self.v_mesh =self.v_mesh * -1
# Set the diamater which will be used in visualization
# Annalysis in D or meters?
if D == None:
self.plot_in_D = False
self.D = 1.
else:
self.plot_in_D = True
self.D = D
def subtract(self,ctSub):
""" Subtract another cut through from self (assume matching resolution) and return the difference
"""
# First confirm eligible for subtraction
if (not np.array_equal(self.x1_flat,ctSub.x1_flat)) or (not np.array_equal(self.x2_flat,ctSub.x2_flat)):
raise Exception("Can't subtract because not meshed the same")
ctResult = copy.deepcopy(ctSub)# copy the class
# Original method
# ctResult.u = self.u - ctSub.u
# ctResult.uMesh = griddata(np.column_stack([ctResult.y, ctResult.z]),ctResult.u,(ctResult.yMesh.flatten(), ctResult.zMesh.flatten()), method='cubic')
# New method
ctResult.u_mesh = self.u_mesh - ctSub.u_mesh
ctResult.v_mesh = self.v_mesh - ctSub.v_mesh
ctResult.w_mesh = self.w_mesh - ctSub.w_mesh
ctResult.u_cubed = self.u_cubed - ctSub.u_cubed
return ctResult
def visualize(self,ax=None,minSpeed=None,maxSpeed=None):
""" Visualize the scan
Args:
ax: axes for plotting, if none, create a new one
minSpeed, maxSpeed, values used for plotting, if not provide assume to data max min
"""
if not ax:
fig, ax = plt.subplots()
if minSpeed is None:
minSpeed = self.u_mesh.min()
if maxSpeed is None:
maxSpeed = self.u_mesh.max()
# Reshape UMesh internally
u_mesh = self.u_mesh.reshape(self.res,self.res)
Zm = np.ma.masked_where(np.isnan(u_mesh),u_mesh)
# Plot the cut-through
# print((self.x1_lin-self.x1_center) /self.D)
# print(minSpeed,maxSpeed)
im = ax.pcolormesh((self.x1_lin-self.x1_center) /self.D, (self.x2_lin-self.x2_center) /self.D, Zm, cmap='coolwarm',vmin=minSpeed,vmax=maxSpeed)
# Make equal axis
ax.set_aspect('equal')
return im
def lineContour(self,ax=None,levels=None,colors=None,**kwargs):
""" Visualize the scan as a simple contour
Args:
ax: axes for plotting, if none, create a new one
minSpeed, maxSpeed, values used for plotting, if not provide assume to data max min
"""
if not ax:
fig, ax = plt.subplots()
# Reshape UMesh internally
u_mesh = self.u_mesh.reshape(self.res,self.res)
Zm = np.ma.masked_where(np.isnan(u_mesh),u_mesh)
matplotlib.rcParams['contour.negative_linestyle'] = 'solid'
# Plot the cut-through
if levels:
if colors:
ax.contour((self.x1_lin-self.x1_center) /self.D, (self.x2_lin-self.x2_center)/self.D, Zm,levels=levels,colors=colors,**kwargs)
else:
ax.contour((self.x1_lin-self.x1_center) /self.D, (self.x2_lin-self.x2_center)/self.D, Zm,levels=levels,**kwargs)
else:
if colors:
ax.contour((self.x1_lin-self.x1_center) /self.D, (self.x2_lin-self.x2_center)/self.D, Zm,colors=colors,**kwargs)
else:
ax.contour((self.x1_lin-self.x1_center) /self.D, (self.x2_lin-self.x2_center)/self.D, Zm,**kwargs)
# Invert the x-axis
# ax.invert_xaxis()
# Make equal axis
ax.set_aspect('equal')
# Define horizontal subclass
class HorPlane(_CutPlane):
def __init__(self, df_flow, z_value, resolution=100, x1_center=0.0,x2_center=0.0, D=None):
# Set up call super
super().__init__(df_flow, x1='x', x2='y', x3_value=z_value,resolution=resolution,x1_center=x1_center,x2_center=x2_center, D=D, invert_x1=False)
# Define cross plane subclass
class CrossPlane(_CutPlane):
def __init__(self, df_flow, x_value, y_center, z_center, D, resolution=100, crop_y=None,crop_z=None,invert_x1=True):
#
|
raise Exception("Invalid x_1 maximum on cropping")
|
conditional_block
|
cut_plane.py
|
self.x1_name = x1
self.x2_name = x2
self.x3_name = [x3 for x3 in ['x','y','z'] if x3 not in [x1,x2]][0]
# Find the nearest value in 3rd dimension
search_values = np.array(sorted(df_flow[self.x3_name].unique()))
nearest_idx = (np.abs(search_values-x3_value)).argmin()
nearest_value = search_values[nearest_idx]
print('Nearest value to in %s of %.2f is %.2f' % (self.x3_name, x3_value,nearest_value))
# Get a sub-frame of only this 3rd dimension value
df_sub = df_flow[df_flow[self.x3_name]==nearest_value]
# Make sure cropping is valid
if crop_x1:
if crop_x1[0] < min(df_sub[x1]):
raise Exception("Invalid x_1 minimum on cropping")
if crop_x1[1] > max(df_sub[x1]):
raise Exception("Invalid x_1 maximum on cropping")
if crop_x2:
if crop_x2[0] < min(df_sub[x2]):
raise Exception("Invalid x_2 minimum on cropping")
if crop_x2[1] > max(df_sub[x2]):
raise Exception("Invalid x_2 maximum on cropping")
# If cropping x1 do it now
# if crop_x1:
# df_sub = df_sub[(df_sub[x1] >= crop_x1[0]) & (df_sub[x1] <= crop_x1[1])]
# if crop_x2:
# df_sub = df_sub[(df_sub[x2] >= crop_x2[0]) & (df_sub[x2] <= crop_x2[1])]
# Store the relevent values
self.x1_in = df_sub[x1]
self.x2_in = df_sub[x2]
self.u_in = df_sub['u']
self.v_in = df_sub['v']
self.w_in = df_sub['w']
# Save the desired resolution
self.res = resolution
# Grid the data, if cropping available use that
if crop_x1:
# self.x1_lin = np.linspace(min(self.x1_in), max(self.x1_in), resolution)
self.x1_lin = np.linspace(crop_x1[0], crop_x1[1], resolution)
else:
self.x1_lin = np.linspace(min(self.x1_in), max(self.x1_in), resolution)
if crop_x2:
# self.x2_lin = np.linspace(min(self.x2_in), max(self.x2_in), resolution)
self.x2_lin = np.linspace(crop_x2[0], crop_x2[1], resolution)
else:
self.x2_lin = np.linspace(min(self.x2_in), max(self.x2_in), resolution)
# Mesh and interpolate u, v and w
# print(self.x1_lin)
# print(sorted(self.x1_in))
self.x1_mesh, self.x2_mesh = np.meshgrid(self.x1_lin, self.x2_lin)
self.u_mesh = griddata(np.column_stack([self.x1_in, self.x2_in]), self.u_in,(self.x1_mesh.flatten(), self.x2_mesh.flatten()), method='cubic')
self.v_mesh = griddata(np.column_stack([self.x1_in, self.x2_in]), self.v_in,(self.x1_mesh.flatten(), self.x2_mesh.flatten()), method='cubic')
self.w_mesh = griddata(np.column_stack([self.x1_in, self.x2_in]), self.w_in,(self.x1_mesh.flatten(), self.x2_mesh.flatten()), method='cubic')
# Save flat vectors
self.x1_flat = self.x1_mesh.flatten()
self.x2_flat = self.x2_mesh.flatten()
# Save u-cubed
self.u_cubed = self.u_mesh ** 3
# Save re-centing points for visualization
self.x1_center = x1_center
self.x2_center = x2_center
# If inverting, invert x1, and x1_center
if invert_x1:
self.x1_mesh = self.x1_mesh * -1
self.x1_lin = self.x1_lin * -1
self.x1_flat = self.x1_flat * -1
self.x1_center = self.x1_center * -1
self.v_mesh =self.v_mesh * -1
# Set the diamater which will be used in visualization
# Annalysis in D or meters?
if D == None:
self.plot_in_D = False
self.D = 1.
else:
self.plot_in_D = True
self.D = D
def subtract(self,ctSub):
""" Subtract another cut through from self (assume matching resolution) and return the difference
"""
# First confirm eligible for subtraction
if (not np.array_equal(self.x1_flat,ctSub.x1_flat)) or (not np.array_equal(self.x2_flat,ctSub.x2_flat)):
raise Exception("Can't subtract because not meshed the same")
ctResult = copy.deepcopy(ctSub)# copy the class
# Original method
# ctResult.u = self.u - ctSub.u
# ctResult.uMesh = griddata(np.column_stack([ctResult.y, ctResult.z]),ctResult.u,(ctResult.yMesh.flatten(), ctResult.zMesh.flatten()), method='cubic')
# New method
ctResult.u_mesh = self.u_mesh - ctSub.u_mesh
ctResult.v_mesh = self.v_mesh - ctSub.v_mesh
ctResult.w_mesh = self.w_mesh - ctSub.w_mesh
ctResult.u_cubed = self.u_cubed - ctSub.u_cubed
return ctResult
def visualize(self,ax=None,minSpeed=None,maxSpeed=None):
""" Visualize the scan
Args:
ax: axes for plotting, if none, create a new one
minSpeed, maxSpeed, values used for plotting, if not provide assume to data max min
"""
if not ax:
fig, ax = plt.subplots()
if minSpeed is None:
minSpeed = self.u_mesh.min()
if maxSpeed is None:
maxSpeed = self.u_mesh.max()
# Reshape UMesh internally
u_mesh = self.u_mesh.reshape(self.res,self.res)
Zm = np.ma.masked_where(np.isnan(u_mesh),u_mesh)
# Plot the cut-through
# print((self.x1_lin-self.x1_center) /self.D)
# print(minSpeed,maxSpeed)
im = ax.pcolormesh((self.x1_lin-self.x1_center) /self.D, (self.x2_lin-self.x2_center) /self.D, Zm, cmap='coolwarm',vmin=minSpeed,vmax=maxSpeed)
# Make equal axis
ax.set_aspect('equal')
return im
def lineContour(self,ax=None,levels=None,colors=None,**kwargs):
""" Visualize the scan as a simple contour
Args:
ax: axes for plotting, if none, create a new one
minSpeed, maxSpeed, values used for plotting, if not provide assume to data max min
"""
if not ax:
fig, ax = plt.subplots()
# Reshape UMesh internally
u_mesh = self.u_mesh.reshape(self.res,self.res)
Zm = np.ma.masked_where(np.isnan(u_mesh),u_mesh)
matplotlib.rcParams['contour.negative_linestyle'] = 'solid'
# Plot the cut-through
if levels:
if colors:
ax.contour((self.x1_lin-self.x1_center) /self.D, (self.x2_lin-self.x2_center)/self.D, Zm,levels=levels,colors=colors,**kwargs)
else:
ax.contour((self.x1_lin-self.x1_center) /self.D, (self.x2_lin-self.x2_center)/self.D, Zm,levels=levels,**kwargs)
else:
if colors:
ax.contour((self.x1_lin-self.x1_center) /self.D, (self.x2_lin-self.x2_center)/self.D, Zm,colors=colors,**kwargs)
else:
ax.contour((self.x1_lin-self.x1_center) /self.D, (self.x2_lin-self.x2_center)/self.D, Zm,**kwargs)
# Invert the x-axis
# ax.invert_xaxis()
# Make equal axis
ax.set_aspect('equal')
# Define horizontal subclass
class HorPlane(_CutPlane):
|
# Define cross plane subclass
class CrossPlane(_CutPlane):
def __init__(self, df_flow, x_value, y_center, z_center, D, resolution=100, crop_y=None,crop_z=None,invert_x1=True):
#
|
def __init__(self, df_flow, z_value, resolution=100, x1_center=0.0,x2_center=0.0, D=None):
# Set up call super
super().__init__(df_flow, x1='x', x2='y', x3_value=z_value,resolution=resolution,x1_center=x1_center,x2_center=x2_center, D=D, invert_x1=False)
|
identifier_body
|
ground_glass.py
|
.set(contrast_data0)
s1.place(x= 300,y = 310)
s2 = tk.Scale(window,label='对比度1',from_=0.0 , to = 2.5,orient = tk.HORIZONTAL,
length = 250, showvalue = 1, tickinterval = 0.5, resolution = 0.1,command = chang_contrast1)
s2.set(contrast_data1)
s2.place(x= 300,y = 390)
s3 = tk.Scale(window,label='去噪',from_=0.0 , to = 20.0,orient = tk.HORIZONTAL,
length = 250, showvalue = 1, tickinterval = 5, resolution = 1,command = chang_denosing_data)
s3.set(denosing_data)
s3.place(x= 300,y = 470)
s3 = tk.Scale(window,label='开运算系数',from_=0.0 , to = 50.0,orient = tk.HORIZONTAL,
length = 250, showvalue = 1, tickinterval = 10, resolution = 1,command = change_expansion_data)
s3.set(expansion_data)
s3.place(x= 300,y = 550)
s4 = tk.Scale(window,label='腐蚀系数',from_=0.0 , to = 50.0,orient = tk.HORIZONTAL,
length = 250, showvalue = 1, tickinterval = 10, resolution = 1,command = change_rust_data)
s4.set(rust_data)
s4.place(x= 300,y = 630)
b1 = tk.Button(window,text = '开始处理', width = 20,height = 3 , command = opencv)
b1.place(x= 340,y = 710)
def resize(w, h, w_box, h_box, pil_image):
f1 = 1.0 * w_box / w # 1.0 forces float division in Python2
f2 = 1.0 * h_box / h
factor = min([f1, f2])
# print(f1, f2, factor) # test
# use best down-sizing filter
width = int(w * factor)
height = int(h * factor)
return pil_image.resize((width, height), Image.ANTIALIAS)
def file_open():
global a
a = tkinter.filedialog.askopenfilename(filetypes=[("图片", ".jpg")])
def folder1():
global folder
folder = 1
def folder2():
global folder
folder = 2
def folder3():
global folder
folder = 3
def folder4():
global folder
folder = 4
def folder5():
global folder
folder = 5
def folder6():
global folder
folder = 6
def creat_bottom():
top2 = tk.Toplevel()
top2.title = ('设定存储文件夹')
top2.geometry('400x220')
r1 = tk.Button(top2, text='1',width = 10,
command = folder1)
r1.pack()
r2 = tk.Button(top2, text='2',width = 10,
command = folder2)
r2.pack()
r3 = tk.Button(top2, text='3',width = 10,
command = folder3)
r3.pack()
r4 = tk.Button(top2, text='4',width = 10,
command = folder4)
r4.pack()
r5 = tk.Button(top2, text='5',width = 10,
command = folder5)
r5.pack()
r6 = tk.Button(top2, text='test',width = 10,
command = folder6)
r6.pack()
r7 = tk.Button(top2, text='确认',width = 20,
command = top2.destroy)
r7.pack()
top2.mainloop()
def creat_menu():
menubar = tk.Menu(window)
filemenu = tk.Menu(menubar, tearoff = 0)
menubar.add_cascade(label='文件', menu=filemenu)
helpmenu = tk.Menu(menubar, tearoff=0)
menubar.add_cascade(label='帮助', menu=helpmenu)
filemenu.add_cascade(label='选择文件夹', command=creat_bottom)
filemenu.add_cascade(label='摄像头',command = photograph)
filemenu.add_cascade(label='切割图像', command=screenshots)
helpmenu.add_cascade(label='关于',command = about_creat)
window.config(menu=menubar)
def exposure_change(input):
global exposure
exposure = input
def logic_change(input):
global logic
logic = input
def photograph():
global address0
cap = cv.VideoCapture(0)
cap.set(cv.CAP_PROP_FOURCC, 1196444237)
cap.set(cv.CAP_PROP_FRAME_WIDTH, 3264) # 设置分辨率
cap.set(cv.CAP_PROP_FRAME_HEIGHT, 2448)
#cv.SetCaptureProperty(capture, cv.CV_CAP_PROP_FOURCC, cv.CV_FOURCC('M', 'J', 'P', 'G'))
#844715353
#CAP_PROP_FOURCC
#1196444237
#cv.VideoWriter_fourcc(*'MJPG)
cv.namedWindow("摄像头", 0)
cv.resizeWindow("摄像头", 800, 600)
cv.createTrackbar("更改曝光","摄像头", 0, 15, exposure_change)
switch = '0:OFF\n1:ON'
cv.createTrackbar(switch, '摄像头', 0, 1, logic_change)
cap.set(cv.CAP_PROP_FOURCC,cv.COLOR_YUV2BGR_YUY2)
while (1):
# get a frame
if logic == 0:
cap.set(cv.CAP_PROP_AUTO_EXPOSURE,logic)
cap.set(cv.CAP_PROP_EXPOSURE,exposure-15)
ret, frame = cap.read()
# show a frame
cv.imshow("摄像头", frame)
if cv.waitKey(1) & 0xFF == ord('q'):
cv.imwrite("output/%s/input.jpg"%(folder), frame)
address0 = "output/%s/input.jpg"%(folder)
cavans_creat()
break
elif cv.waitKey(1) & 0xFF == ord('c'):
break
cap.release()
cv.destroyAllWindows()
def cavans_creat():
global photo0
global photo1
#address0 = "output/%s/cutted.jpg" % (folder)
img0 = Image.open(address0)
img0 = resize(3264, 2448, 400, 300, img0)
photo0 = ImageTk.PhotoImage(img0) # 用PIL模块的PhotoImage打开
img1 = Image.open(address1)
img1 = resize(3264, 2448, 400, 300, img1)
photo1 = ImageTk.PhotoImage(img1) # 用PIL模块的PhotoImage打开
canvas0 = tk.Canvas(window, bg ='white',height=300,width=400)
canvas0.create_image(0,0,anchor = 'nw',image = photo0)
canvas0.place(x= 0, y= 0)
canvas1 = tk.Canvas(window, bg ='white',height=300,width=400)
canvas1.create_image(0,0,anchor = 'nw',image = photo1)
canvas1.place(x= 410, y= 0)
def about_creat():
top1=tk.Toplevel()
top1.title('关于本程序')
top1.geometry('300x200')
image = Image.open('code_image\\111.jpg')
img = ImageTk.PhotoImage(image)
word_box = tk.Label(top1, text='毛玻璃清晰化处理软件\r版本:1.7\r编写者:张逸航')
canvas1 = tk.Canvas(top1, width = 80 ,height = 80, bg = 'white')
canvas1.create_image(0,0,image =
|
img,anchor="nw")
canvas1.create_image(image.width,0,image = img,anchor="nw")
canvas1.pack()
word_box.pack()
top1.mainloop()
def opencv():
global address1
src = cv.imread('output/%s/cutted.jpg'%(folder))
src = cv.cvtColor(src, cv.COLOR_BGR2GRAY)
cv.imwrite('output/%s/gray.jpg' % (folder),src)
#wiener_change(src)
#image_out(src, 600, 800, "input_image")
out = contrast(src)
#image_out(out, 600, 600, "out")
cv.imwrite('output/%s/output1.jpg'%(folder),ou
|
identifier_body
|
|
ground_glass.py
|
None:
n = height / float(h)
newsize = (int(n * w), height)
else:
n = width / float(w)
newsize = (width, int(h * n))
# 缩放图像
newimage = cv.resize(image, newsize, interpolation=inter)
return newimage
def on_mouse(event, x, y, flags, param):
global img, img1, point1, point2, point1_dis, point2_dis
img2 = img1.copy()
if event == cv.EVENT_LBUTTONDOWN: #左键点击
point1 = (x*4, y*4)
point1_dis = (x, y)
cv.circle(img2, point1_dis, 10, (0,255,0), 5)
cv.imshow('image', img2)
elif event == cv.EVENT_MOUSEMOVE and (flags & cv.EVENT_FLAG_LBUTTON): #按住左键拖曳
cv.rectangle(img2, point1_dis, (x, y), (255, 0, 0), 5)
cv.imshow('image', img2)
elif event == cv.EVENT_LBUTTONUP: #左键释放
point2 = (x*4, y*4)
point2_dis = (x,y)
cv.rectangle(img2, point1_dis, point2_dis, (0,0,255), 5)
cv.imshow('image', img2)
min_x = min(point1[0], point2[0])
min_y = min(point1[1], point2[1])
width = abs(point1[0] - point2[0])
height = abs(point1[1] -point2[1])
cut_img = img[min_y:min_y+height, min_x:min_x+width]
cv.imwrite("output/%s/cutted.jpg"%(folder), cut_img)
def screenshots():
global img, img1
img = cv.imread("output/%s/input.jpg"%(folder))
img1 = resizeImage(img, 816, 612)
cv.namedWindow('image',1)
cv.setMouseCallback('image', on_mouse)
cv.imshow('image', img1)
cv.waitKey(0)
def motion_process(image_size, motion_angle):
PSF = np.zeros(image_size)
print(image_size)
center_position = (image_size[0] - 1) / 2
print(center_position)
slope_tan = math.tan(motion_angle * math.pi / 180)
slope_cot = 1 / slope_tan
if slope_tan <= 1:
for i in range(15):
offset = round(i * slope_tan) # ((center_position-i)*slope_tan)
PSF[int(center_position + offset), int(center_position - offset)] = 1
return PSF / PSF.sum() # 对点扩散函数进行归一化亮度
else:
for i in range(15):
offset = round(i * slope_cot)
PSF[int(center_position - offset), int(center_position + offset)] = 1
return PSF / PSF.sum()
def wiener(input,PSF,eps,K=0.01): #维纳滤波,K=0.01
input_fft=fft.fft2(input)
PSF_fft=fft.fft2(PSF) +eps
PSF_fft_1=np.conj(PSF_fft) /(np.abs(PSF_fft)**2 + K)
b = input_fft * PSF_fft_1
result=fft.ifft2(b)
result=np.abs(fft.fftshift(result))
return result
def wiener_change(image):
img_h = image.shape[0]
img_w = image.shape[1]
#graph.figure(0)
#graph.xlabel("Original Image")
#graph.gray()
#graph.imshow(image)
graph.figure(1)
graph.gray()
# 进行运动模糊处理
PSF = motion_process((img_h, img_w), 60)
out = wiener(image, PSF, winner_data)
#graph.subplot(236)
#graph.xlabel("wiener deblurred(k=0.01)")
graph.imshow(out)
graph.axis('off')
graph.savefig('output/%s/winner_out.jpg'%(folder))
graph.show()
def image_out(image, x, y, word):
cv.namedWindow(word, 0)
cv.resizeWindow(word, x, y)
cv.imshow(word, image)
def contrast(image):
dst = image
img_h = image.shape[0]
img_w = image.shape[1]
graph.figure(1)
graph.gray()
# 进行运动模糊处理
PSF = motion_process((img_h, img_w), 60)
out = wiener(image, PSF, 1e-3)
graph.imshow(out)
graph.axis('off')
graph.savefig('output/%s/winner_in.jpg'%(folder))
graph.show()
if contrast_data0 != 0:
clache = cv.createCLAHE(clipLimit=contrast_data0, tileGridSize=(8, 8))
dst = clache.apply(dst)
if denosing_data != 0:
dst = cv.fastNlMeansDenoising(dst,None ,denosing_data, 7, 21)
if contrast_data1!=0:
clache = cv.createCLAHE(clipLimit=contrast_data1, tileGridSize=(8, 8))
dst = clache.apply(dst)
if expansion_data != 0:
kernel = cv.getStructuringElement(cv.MORPH_RECT, (expansion_data, expansion_data))
dst = cv.morphologyEx(dst, cv.MORPH_OPEN, kernel) # 开运算
if rust_data != 0:
kernel = np.ones((rust_data, rust_data), np.uint8)
dst = cv.erode(dst, kernel) # 腐蚀
wiener_change(dst)
return dst
def sharpen(image):
kernel = np.array([[0,-1,0],[-1,5,-1],[0,-1,0]], np.float32)
dst = cv.filter2D(image , -1 , kernel=kernel)
cv.namedWindow("median", 0)
cv.resizeWindow("median", 600, 600)
cv.imshow("median",dst)
def chang_contrast0(input):
global contrast_data0
contrast_data0 = float(input)
def chang_contrast1(input):
global contrast_data1
contrast_data1 = float(input)
def chang_denosing_data(input):
global denosing_data
denosing_data = float(input)
def change_expansion_data(input):
global expansion_data
expansion_data = int(input)
def change_rust_data(input):
global rust_data
rust_data = int(input)
def change_winner_data(input):
global rust_data
rust_data = float(input)
def scale_creat():
s1 = tk.Scale(window,label='对比度0',from_=0.0 , to = 50.0,orient = tk.HORIZONTAL,
length = 250, showvalue = 1, tickinterval = 5, resolution = 1,command = chang_contrast0)
s1.set(contrast_data0)
s1.place(x= 300,y = 310)
s2 = tk.Scale(window,label='对比度1',from_=0.0 , to = 2.5,orient = tk.HORIZONTAL,
length = 250, showvalue = 1, tickinterval = 0.5, resolution = 0.1,command = chang_contrast1)
s2.set(contrast_data1)
s2.place(x= 300,y = 390)
s3 = tk.Scale(window,label='去噪',from_=0.0 , to = 20.0,orient = tk.HORIZONTAL,
length = 250, showvalue = 1, tickinterval = 5, resolution = 1,command = chang_denosing_data)
s3.set(denosing_data)
s3.place(x= 300,y = 470)
s3 = tk.Scale(window,label='开运算系数',from_=0.0 , to = 50.0,orient = tk.HORIZONTAL,
length = 250, showvalue = 1, tickinterval = 10, resolution = 1,command = change_expansion_data)
s3.set(expansion_data)
s3.place(x= 300,y = 550)
s4 = tk.Scale(window,label='腐蚀系数',from_=0.0 , to = 50.0,orient = tk.HORIZONTAL,
length = 250, showvalue = 1, tickinterval = 10, resolution = 1,command = change_rust_data)
s4.set(rust_data)
s4.place(x= 300,y = 630)
b1 = tk.Button(window,text = '开始处理', width = 20,height = 3 , command = opencv)
b1.place(x= 340,y = 710)
|
if width is
|
conditional_block
|
|
ground_glass.py
|
int1, point2, point1_dis, point2_dis
img2 = img1.copy()
if event == cv.EVENT_LBUTTONDOWN: #左键点击
point1 = (x*4, y*4)
point1_dis = (x, y)
cv.circle(img2, point1_dis, 10, (0,255,0), 5)
cv.imshow('image', img2)
elif event == cv.EVENT_MOUSEMOVE and (flags & cv.EVENT_FLAG_LBUTTON): #按住左键拖曳
cv.rectangle(img2, point1_dis, (x, y), (255, 0, 0), 5)
cv.imshow('image', img2)
elif event == cv.EVENT_LBUTTONUP: #左键释放
point2 = (x*4, y*4)
point2_dis = (x,y)
cv.rectangle(img2, point1_dis, point2_dis, (0,0,255), 5)
cv.imshow('image', img2)
min_x = min(point1[0], point2[0])
min_y = min(point1[1], point2[1])
width = abs(point1[0] - point2[0])
height = abs(point1[1] -point2[1])
cut_img = img[min_y:min_y+height, min_x:min_x+width]
cv.imwrite("output/%s/cutted.jpg"%(folder), cut_img)
def screenshots():
global img, img1
img = cv.imread("output/%s/input.jpg"%(folder))
img1 = resizeImage(img, 816, 612)
cv.namedWindow('image',1)
cv.setMouseCallback('image', on_mouse)
cv.imshow('image', img1)
cv.waitKey(0)
def motion_process(image_size, motion_angle):
PSF = np.zeros(image_size)
print(image_size)
center_position = (image_size[0] - 1) / 2
print(center_position)
slope_tan = math.tan(motion_angle * math.pi / 180)
slope_cot = 1 / slope_tan
if slope_tan <= 1:
for i in range(15):
offset = round(i * slope_tan) # ((center_position-i)*slope_tan)
PSF[int(center_position + offset), int(center_position - offset)] = 1
return PSF / PSF.sum() # 对点扩散函数进行归一化亮度
else:
for i in range(15):
offset = round(i * slope_cot)
PSF[int(center_position - offset), int(center_position + offset)] = 1
return PSF / PSF.sum()
def wiener(input,PSF,eps,K=0.01): #维纳滤波,K=0.01
input_fft=fft.fft2(input)
PSF_fft=fft.fft2(PSF) +eps
PSF_fft_1=np.conj(PSF_fft) /(np.abs(PSF_fft)**2 + K)
b = input_fft * PSF_fft_1
result=fft.ifft2(b)
result=np.abs(fft.fftshift(result))
return result
def wiener_change(image):
img_h = image.shape[0]
img_w = image.shape[1]
#graph.figure(0)
#graph.xlabel("Original Image")
#graph.gray()
#graph.imshow(image)
graph.figure(1)
graph.gray()
# 进行运动模糊处理
PSF = motion_process((img_h, img_w), 60)
out = wiener(image, PSF, winner_data)
#graph.subplot(236)
#graph.xlabel("wiener deblurred(k=0.01)")
graph.imshow(out)
graph.axis('off')
graph.savefig('output/%s/winner_out.jpg'%(folder))
graph.show()
def image_out(image, x, y, word):
cv.namedWindow(word, 0)
cv.resizeWindow(word, x, y)
cv.imshow(word, image)
def contrast(image):
dst = image
img_h = image.shape[0]
img_w = image.shape[1]
graph.figure(1)
graph.gray()
# 进行运动模糊处理
PSF = motion_process((img_h, img_w), 60)
out = wiener(image, PSF, 1e-3)
graph.imshow(out)
graph.axis('off')
graph.savefig('output/%s/winner_in.jpg'%(folder))
graph.show()
if contrast_data0 != 0:
clache = cv.createCLAHE(clipLimit=contrast_data0, tileGridSize=(8, 8))
dst = clache.apply(dst)
if denosing_data != 0:
dst = cv.fastNlMeansDenoising(dst,None ,denosing_data, 7, 21)
if contrast_data1!=0:
clache = cv.createCLAHE(clipLimit=contrast_data1, tileGridSize=(8, 8))
dst = clache.apply(dst)
if expansion_data != 0:
kernel = cv.getStructuringElement(cv.MORPH_RECT, (expansion_data, expansion_data))
dst = cv.morphologyEx(dst, cv.MORPH_OPEN, kernel) # 开运算
if rust_data != 0:
kernel = np.ones((rust_data, rust_data), np.uint8)
dst = cv.erode(dst, kernel) # 腐蚀
wiener_change(dst)
return dst
def sharpen(image):
kernel = np.array([[0,-1,0],[-1,5,-1],[0,-1,0]], np.float32)
dst = cv.filter2D(image , -1 , kernel=kernel)
cv.namedWindow("median", 0)
cv.resizeWindow("median", 600, 600)
cv.imshow("median",dst)
def chang_contrast0(input):
global contrast_data0
contrast_data0 = float(input)
def chang_contrast1(input):
global contrast_data1
contrast_data1 = float(input)
def chang_denosing_data(input):
global denosing_data
denosing_data = float(input)
def change_expansion_data(input):
global expansion_data
expansion_data = int(input)
def change_rust_data(input):
global rust_data
rust_data = int(input)
def change_winner_data(input):
global rust_data
rust_data = float(input)
def scale_creat():
s1 = tk.Scale(window,label='对比度0',from_=0.0 , to = 50.0,orient = tk.HORIZONTAL,
length = 250, showvalue = 1, tickinterval = 5, resolution = 1,command = chang_contrast0)
s1.set(contrast_data0)
s1.place(x= 300,y = 310)
s2 = tk.Scale(window,label='对比度1',from_=0.0 , to = 2.5,orient = tk.HORIZONTAL,
length = 250, showvalue = 1, tickinterval = 0.5, resolution = 0.1,command = chang_contrast1)
s2.set(contrast_data1)
s2.place(x= 300,y = 390)
s3 = tk.Scale(window,label='去噪',from_=0.0 , to = 20.0,orient = tk.HORIZONTAL,
length = 250, showvalue = 1, tickinterval = 5, resolution = 1,command = chang_denosing_data)
s3.set(denosing_data)
s3.place(x= 300,y = 470)
s3 = tk.Scale(window,label='开运算系数',from_=0.0 , to = 50.0,orient = tk.HORIZONTAL,
length = 250, showvalue = 1, tickinterval = 10, resolution = 1,command = change_expansion_data)
s3.set(expansion_data)
s3.place(x= 300,y = 550)
s4 = tk.Scale(window,label='腐蚀系数',from_=0.0 , to = 50.0,orient = tk.HORIZONTAL,
length = 250, showvalue = 1, tickinterval = 10, resolution = 1,command = change_rust_data)
s4.set(rust_data)
s4.place(x= 300,y = 630)
b1 = tk.Button(window,text = '开始处理', width = 20,height = 3 , command = opencv)
b1.place(x= 340,y = 710)
def resize(w, h, w_box, h_box, pil_image):
f1 = 1.0 * w_box / w # 1.0 forces float division in Python2
f2 = 1.0 * h_box / h
factor = min([f1, f2])
# print(f1, f2, factor) # test
# use best down-sizing filter
width =
|
img1, po
|
identifier_name
|
|
ground_glass.py
|
osing_data = float(input)
def change_expansion_data(input):
global expansion_data
expansion_data = int(input)
def change_rust_data(input):
global rust_data
rust_data = int(input)
def change_winner_data(input):
global rust_data
rust_data = float(input)
def scale_creat():
s1 = tk.Scale(window,label='对比度0',from_=0.0 , to = 50.0,orient = tk.HORIZONTAL,
length = 250, showvalue = 1, tickinterval = 5, resolution = 1,command = chang_contrast0)
s1.set(contrast_data0)
s1.place(x= 300,y = 310)
s2 = tk.Scale(window,label='对比度1',from_=0.0 , to = 2.5,orient = tk.HORIZONTAL,
length = 250, showvalue = 1, tickinterval = 0.5, resolution = 0.1,command = chang_contrast1)
s2.set(contrast_data1)
s2.place(x= 300,y = 390)
s3 = tk.Scale(window,label='去噪',from_=0.0 , to = 20.0,orient = tk.HORIZONTAL,
length = 250, showvalue = 1, tickinterval = 5, resolution = 1,command = chang_denosing_data)
s3.set(denosing_data)
s3.place(x= 300,y = 470)
s3 = tk.Scale(window,label='开运算系数',from_=0.0 , to = 50.0,orient = tk.HORIZONTAL,
length = 250, showvalue = 1, tickinterval = 10, resolution = 1,command = change_expansion_data)
s3.set(expansion_data)
s3.place(x= 300,y = 550)
s4 = tk.Scale(window,label='腐蚀系数',from_=0.0 , to = 50.0,orient = tk.HORIZONTAL,
length = 250, showvalue = 1, tickinterval = 10, resolution = 1,command = change_rust_data)
s4.set(rust_data)
s4.place(x= 300,y = 630)
b1 = tk.Button(window,text = '开始处理', width = 20,height = 3 , command = opencv)
b1.place(x= 340,y = 710)
def resize(w, h, w_box, h_box, pil_image):
f1 = 1.0 * w_box / w # 1.0 forces float division in Python2
f2 = 1.0 * h_box / h
factor = min([f1, f2])
# print(f1, f2, factor) # test
# use best down-sizing filter
width = int(w * factor)
height = int(h * factor)
return pil_image.resize((width, height), Image.ANTIALIAS)
def file_open():
global a
a = tkinter.filedialog.askopenfilename(filetypes=[("图片", ".jpg")])
def folder1():
global folder
folder = 1
def folder2():
global folder
folder = 2
def folder3():
global folder
folder = 3
def folder4():
global folder
folder = 4
def folder5():
global folder
folder = 5
def folder6():
global folder
folder = 6
def creat_bottom():
top2 = tk.Toplevel()
top2.title = ('设定存储文件夹')
top2.geometry('400x220')
r1 = tk.Button(top2, text='1',width = 10,
command = folder1)
r1.pack()
r2 = tk.Button(top2, text='2',width = 10,
command = folder2)
r2.pack()
r3 = tk.Button(top2, text='3',width = 10,
command = folder3)
r3.pack()
r4 = tk.Button(top2, text='4',width = 10,
command = folder4)
r4.pack()
r5 = tk.Button(top2, text='5',width = 10,
command = folder5)
r5.pack()
r6 = tk.Button(top2, text='test',width = 10,
command = folder6)
r6.pack()
r7 = tk.Button(top2, text='确认',width = 20,
command = top2.destroy)
r7.pack()
top2.mainloop()
def creat_menu():
menubar = tk.Menu(window)
filemenu = tk.Menu(menubar, tearoff = 0)
menubar.add_cascade(label='文件', menu=filemenu)
helpmenu = tk.Menu(menubar, tearoff=0)
menubar.add_cascade(label='帮助', menu=helpmenu)
filemenu.add_cascade(label='选择文件夹', command=creat_bottom)
filemenu.add_cascade(label='摄像头',command = photograph)
filemenu.add_cascade(label='切割图像', command=screenshots)
helpmenu.add_cascade(label='关于',command = about_creat)
window.config(menu=menubar)
def exposure_change(input):
global exposure
exposure = input
def logic_change(input):
global logic
logic = input
def photograph():
global address0
cap = cv.VideoCapture(0)
cap.set(cv.CAP_PROP_FOURCC, 1196444237)
cap.set(cv.CAP_PROP_FRAME_WIDTH, 3264) # 设置分辨率
cap.set(cv.CAP_PROP_FRAME_HEIGHT, 2448)
#cv.SetCaptureProperty(capture, cv.CV_CAP_PROP_FOURCC, cv.CV_FOURCC('M', 'J', 'P', 'G'))
#844715353
#CAP_PROP_FOURCC
#1196444237
#cv.VideoWriter_fourcc(*'MJPG)
cv.namedWindow("摄像头", 0)
cv.resizeWindow("摄像头", 800, 600)
cv.createTrackbar("更改曝光","摄像头", 0, 15, exposure_change)
switch = '0:OFF\n1:ON'
cv.createTrackbar(switch, '摄像头', 0, 1, logic_change)
cap.set(cv.CAP_PROP_FOURCC,cv.COLOR_YUV2BGR_YUY2)
while (1):
# get a frame
if logic == 0:
cap.set(cv.CAP_PROP_AUTO_EXPOSURE,logic)
cap.set(cv.CAP_PROP_EXPOSURE,exposure-15)
ret, frame = cap.read()
# show a frame
cv.imshow("摄像头", frame)
if cv.waitKey(1) & 0xFF == ord('q'):
cv.imwrite("output/%s/input.jpg"%(folder), frame)
address0 = "output/%s/input.jpg"%(folder)
cavans_creat()
break
elif cv.waitKey(1) & 0xFF == ord('c'):
break
cap.release()
cv.destroyAllWindows()
def cavans_creat():
global photo0
global photo1
#address0 = "output/%s/cutted.jpg" % (folder)
img0 = Image.open(address0)
img0 = resize(3264, 2448, 400, 300, img0)
photo0 = ImageTk.PhotoImage(img0) # 用PIL模块的PhotoImage打开
img1 = Image.open(address1)
img1 = resize(3264, 2448, 400, 300, img1)
photo1 = ImageTk.PhotoImage(img1) # 用PIL模块的PhotoImage打开
canvas0 = tk.Canvas(window, bg ='white',height=300,width=400)
canvas0.create_image(0,0,anchor = 'nw',image = photo0)
canvas0.place(x= 0, y= 0)
canvas1 = tk.Canvas(window, bg ='white',height=300,width=400)
canvas1.create_image(0,0,anchor = 'nw',image = photo1)
canvas1.place(x= 410, y= 0)
def about_creat():
top1=tk.Toplevel()
top1.title('关于本程序')
top1.geometry('300x200')
image = Image.open('code_image\\111.jpg')
img = ImageTk.PhotoImage(image)
word_box = tk.Label(top1, text='毛玻璃清晰化处理软件\r版本:1.7\r编写者:张逸航')
canvas1 = tk.Canvas(top1, width = 80 ,height = 80, bg = 'white')
canvas1.create_image(0,0,image = img,anchor="nw")
|
canvas1.create_image(image.width,0,image = img,anchor="nw")
|
random_line_split
|
|
operator.go
|
.ApproximateSize
to.RegionCount++
}
// AddLearner is an OperatorStep that adds a region learner peer.
type AddLearner struct {
ToStore, PeerID uint64
}
func (al AddLearner) String() string {
return fmt.Sprintf("add learner peer %v on store %v", al.PeerID, al.ToStore)
}
// IsFinish checks if current step is finished.
func (al AddLearner) IsFinish(region *core.RegionInfo) bool {
if p := region.GetStoreLearner(al.ToStore); p != nil {
if p.GetId() != al.PeerID {
log.Warnf("expect %v, but obtain learner %v", al.String(), p.GetId())
return false
}
return region.GetPendingLearner(p.GetId()) == nil
}
return false
}
// Influence calculates the store difference that current step make
func (al AddLearner) Influence(opInfluence OpInfluence, region *core.RegionInfo) {
to := opInfluence.GetStoreInfluence(al.ToStore)
to.RegionSize += region.ApproximateSize
to.RegionCount++
}
// PromoteLearner is an OperatorStep that promotes a region learner peer to normal voter.
type PromoteLearner struct {
ToStore, PeerID uint64
}
func (pl PromoteLearner) String() string {
return fmt.Sprintf("promote learner peer %v on store %v to voter", pl.PeerID, pl.ToStore)
}
// IsFinish checks if current step is finished.
func (pl PromoteLearner) IsFinish(region *core.RegionInfo) bool {
if p := region.GetStoreVoter(pl.ToStore); p != nil {
if p.GetId() != pl.PeerID {
log.Warnf("expect %v, but obtain voter %v", pl.String(), p.GetId())
}
return p.GetId() == pl.PeerID
}
return false
}
// Influence calculates the store difference that current step make
func (pl PromoteLearner) Influence(opInfluence OpInfluence, region *core.RegionInfo) {}
// RemovePeer is an OperatorStep that removes a region peer.
type RemovePeer struct {
FromStore uint64
}
func (rp RemovePeer) String() string {
return fmt.Sprintf("remove peer on store %v", rp.FromStore)
}
// IsFinish checks if current step is finished.
func (rp RemovePeer) IsFinish(region *core.RegionInfo) bool {
return region.GetStorePeer(rp.FromStore) == nil
}
// Influence calculates the store difference that current step make
func (rp RemovePeer) Influence(opInfluence OpInfluence, region *core.RegionInfo) {
from := opInfluence.GetStoreInfluence(rp.FromStore)
from.RegionSize -= region.ApproximateSize
from.RegionCount--
}
// MergeRegion is an OperatorStep that merge two regions.
type MergeRegion struct {
FromRegion *metapb.Region
ToRegion *metapb.Region
// there are two regions involved in merge process,
// so to keep them from other scheduler,
// both of them should add MerRegion operatorStep.
// But actually, tikv just need the region want to be merged to get the merge request,
// thus use a IsPssive mark to indicate that
// this region doesn't need to send merge request to tikv.
IsPassive bool
}
func (mr MergeRegion) String() string {
return fmt.Sprintf("merge region %v into region %v", mr.FromRegion.GetId(), mr.ToRegion.GetId())
}
// IsFinish checks if current step is finished
func (mr MergeRegion) IsFinish(region *core.RegionInfo) bool {
if mr.IsPassive {
return bytes.Compare(region.Region.StartKey, mr.ToRegion.StartKey) != 0 || bytes.Compare(region.Region.EndKey, mr.ToRegion.EndKey) != 0
}
return false
}
// Influence calculates the store difference that current step make
func (mr MergeRegion) Influence(opInfluence OpInfluence, region *core.RegionInfo) {
if mr.IsPassive {
for _, p := range region.GetPeers() {
o := opInfluence.GetStoreInfluence(p.GetStoreId())
o.RegionCount--
if region.Leader.GetId() == p.GetId() {
o.LeaderCount--
}
}
}
}
// SplitRegion is an OperatorStep that splits a region.
type SplitRegion struct {
StartKey, EndKey []byte
}
func (sr SplitRegion) String() string {
return "split region"
}
// IsFinish checks if current step is finished.
func (sr SplitRegion) IsFinish(region *core.RegionInfo) bool {
return !bytes.Equal(region.StartKey, sr.StartKey) || !bytes.Equal(region.EndKey, sr.EndKey)
}
// Influence calculates the store difference that current step make.
func (sr SplitRegion) Influence(opInfluence OpInfluence, region *core.RegionInfo) {
for _, p := range region.Peers {
inf := opInfluence.GetStoreInfluence(p.GetStoreId())
inf.RegionCount++
if region.Leader.GetId() == p.GetId() {
inf.LeaderCount++
}
}
}
// Operator contains execution steps generated by scheduler.
type Operator struct {
desc string
regionID uint64
regionEpoch *metapb.RegionEpoch
kind OperatorKind
steps []OperatorStep
currentStep int32
createTime time.Time
stepTime int64
level core.PriorityLevel
}
// NewOperator creates a new operator.
func NewOperator(desc string, regionID uint64, regionEpoch *metapb.RegionEpoch, kind OperatorKind, steps ...OperatorStep) *Operator {
return &Operator{
desc: desc,
regionID: regionID,
regionEpoch: regionEpoch,
kind: kind,
steps: steps,
createTime: time.Now(),
stepTime: time.Now().UnixNano(),
level: core.NormalPriority,
}
}
func (o *Operator) String() string {
s := fmt.Sprintf("%s (kind:%s, region:%v(%v,%v), createAt:%s, currentStep:%v, steps:%+v) ", o.desc, o.kind, o.regionID, o.regionEpoch.GetVersion(), o.regionEpoch.GetConfVer(), o.createTime, atomic.LoadInt32(&o.currentStep), o.steps)
if o.IsTimeout() {
s = s + "timeout"
}
if o.IsFinish() {
s = s + "finished"
}
return s
}
// MarshalJSON serialize custom types to JSON
func (o *Operator) MarshalJSON() ([]byte, error) {
return []byte(`"` + o.String() + `"`), nil
}
// Desc returns the operator's short description.
func (o *Operator) Desc() string {
return o.desc
}
// SetDesc sets the description for the operator.
func (o *Operator) SetDesc(desc string) {
o.desc = desc
}
// AttachKind attaches an operator kind for the operator.
func (o *Operator) AttachKind(kind OperatorKind) {
o.kind |= kind
}
// RegionID returns the region that operator is targeted.
func (o *Operator) RegionID() uint64 {
return o.regionID
}
// RegionEpoch returns the region's epoch that is attched to the operator.
func (o *Operator) RegionEpoch() *metapb.RegionEpoch {
return o.regionEpoch
}
// Kind returns operator's kind.
func (o *Operator) Kind() OperatorKind {
return o.kind
}
// ElapsedTime returns duration since it was created.
func (o *Operator) ElapsedTime() time.Duration {
return time.Since(o.createTime)
}
// Len returns the operator's steps count.
func (o *Operator) Len() int {
return len(o.steps)
}
// Step returns the i-th step.
func (o *Operator) Step(i int) OperatorStep {
if i >= 0 && i < len(o.steps) {
return o.steps[i]
}
return nil
}
// Check checks if current step is finished, returns next step to take action.
// It's safe to be called by multiple goroutine concurrently.
func (o *Operator) Check(region *core.RegionInfo) OperatorStep {
for step := atomic.LoadInt32(&o.currentStep); int(step) < len(o.steps); step++ {
if o.steps[int(step)].IsFinish(region) {
operatorStepDuration.WithLabelValues(reflect.TypeOf(o.steps[int(step)]).Name()).
Observe(time.Since(time.Unix(0, atomic.LoadInt64(&o.stepTime))).Seconds())
atomic.StoreInt32(&o.currentStep, step+1)
atomic.StoreInt64(&o.stepTime, time.Now().UnixNano())
} else {
return o.steps[int(step)]
}
}
return nil
}
// SetPriorityLevel set the priority level for operator
func (o *Operator) SetPriorityLevel(level core.PriorityLevel)
|
// GetPriorityLevel get the priority level
func (o *Operator) GetPriorityLevel() core.PriorityLevel {
return o.level
}
// IsFinish checks if all steps are finished.
func (o *Operator) IsFinish() bool {
return atomic.LoadInt32(&o.currentStep) >= int32(len(o.steps))
}
// IsTimeout checks the operator's create time and determines if it
|
{
o.level = level
}
|
identifier_body
|
operator.go
|
region.ApproximateSize
to.RegionCount++
}
// AddLearner is an OperatorStep that adds a region learner peer.
type AddLearner struct {
ToStore, PeerID uint64
}
func (al AddLearner) String() string {
return fmt.Sprintf("add learner peer %v on store %v", al.PeerID, al.ToStore)
}
// IsFinish checks if current step is finished.
func (al AddLearner) IsFinish(region *core.RegionInfo) bool {
if p := region.GetStoreLearner(al.ToStore); p != nil {
if p.GetId() != al.PeerID {
log.Warnf("expect %v, but obtain learner %v", al.String(), p.GetId())
return false
}
return region.GetPendingLearner(p.GetId()) == nil
}
return false
}
// Influence calculates the store difference that current step make
func (al AddLearner) Influence(opInfluence OpInfluence, region *core.RegionInfo) {
to := opInfluence.GetStoreInfluence(al.ToStore)
to.RegionSize += region.ApproximateSize
to.RegionCount++
}
// PromoteLearner is an OperatorStep that promotes a region learner peer to normal voter.
type PromoteLearner struct {
ToStore, PeerID uint64
}
func (pl PromoteLearner) String() string {
return fmt.Sprintf("promote learner peer %v on store %v to voter", pl.PeerID, pl.ToStore)
}
// IsFinish checks if current step is finished.
func (pl PromoteLearner) IsFinish(region *core.RegionInfo) bool {
if p := region.GetStoreVoter(pl.ToStore); p != nil {
if p.GetId() != pl.PeerID {
log.Warnf("expect %v, but obtain voter %v", pl.String(), p.GetId())
}
return p.GetId() == pl.PeerID
}
return false
}
// Influence calculates the store difference that current step make
func (pl PromoteLearner) Influence(opInfluence OpInfluence, region *core.RegionInfo) {}
// RemovePeer is an OperatorStep that removes a region peer.
type RemovePeer struct {
FromStore uint64
}
func (rp RemovePeer) String() string {
return fmt.Sprintf("remove peer on store %v", rp.FromStore)
}
// IsFinish checks if current step is finished.
func (rp RemovePeer) IsFinish(region *core.RegionInfo) bool {
return region.GetStorePeer(rp.FromStore) == nil
}
// Influence calculates the store difference that current step make
func (rp RemovePeer) Influence(opInfluence OpInfluence, region *core.RegionInfo) {
from := opInfluence.GetStoreInfluence(rp.FromStore)
from.RegionSize -= region.ApproximateSize
from.RegionCount--
}
// MergeRegion is an OperatorStep that merge two regions.
type MergeRegion struct {
FromRegion *metapb.Region
ToRegion *metapb.Region
// there are two regions involved in merge process,
// so to keep them from other scheduler,
// both of them should add MerRegion operatorStep.
// But actually, tikv just need the region want to be merged to get the merge request,
// thus use a IsPssive mark to indicate that
// this region doesn't need to send merge request to tikv.
IsPassive bool
}
func (mr MergeRegion) String() string {
return fmt.Sprintf("merge region %v into region %v", mr.FromRegion.GetId(), mr.ToRegion.GetId())
}
// IsFinish checks if current step is finished
func (mr MergeRegion) IsFinish(region *core.RegionInfo) bool {
if mr.IsPassive {
return bytes.Compare(region.Region.StartKey, mr.ToRegion.StartKey) != 0 || bytes.Compare(region.Region.EndKey, mr.ToRegion.EndKey) != 0
}
return false
}
// Influence calculates the store difference that current step make
func (mr MergeRegion) Influence(opInfluence OpInfluence, region *core.RegionInfo) {
if mr.IsPassive {
for _, p := range region.GetPeers() {
o := opInfluence.GetStoreInfluence(p.GetStoreId())
o.RegionCount--
if region.Leader.GetId() == p.GetId() {
o.LeaderCount--
}
}
}
}
// SplitRegion is an OperatorStep that splits a region.
type SplitRegion struct {
StartKey, EndKey []byte
}
|
return "split region"
}
// IsFinish checks if current step is finished.
func (sr SplitRegion) IsFinish(region *core.RegionInfo) bool {
return !bytes.Equal(region.StartKey, sr.StartKey) || !bytes.Equal(region.EndKey, sr.EndKey)
}
// Influence calculates the store difference that current step make.
func (sr SplitRegion) Influence(opInfluence OpInfluence, region *core.RegionInfo) {
for _, p := range region.Peers {
inf := opInfluence.GetStoreInfluence(p.GetStoreId())
inf.RegionCount++
if region.Leader.GetId() == p.GetId() {
inf.LeaderCount++
}
}
}
// Operator contains execution steps generated by scheduler.
type Operator struct {
desc string
regionID uint64
regionEpoch *metapb.RegionEpoch
kind OperatorKind
steps []OperatorStep
currentStep int32
createTime time.Time
stepTime int64
level core.PriorityLevel
}
// NewOperator creates a new operator.
func NewOperator(desc string, regionID uint64, regionEpoch *metapb.RegionEpoch, kind OperatorKind, steps ...OperatorStep) *Operator {
return &Operator{
desc: desc,
regionID: regionID,
regionEpoch: regionEpoch,
kind: kind,
steps: steps,
createTime: time.Now(),
stepTime: time.Now().UnixNano(),
level: core.NormalPriority,
}
}
func (o *Operator) String() string {
s := fmt.Sprintf("%s (kind:%s, region:%v(%v,%v), createAt:%s, currentStep:%v, steps:%+v) ", o.desc, o.kind, o.regionID, o.regionEpoch.GetVersion(), o.regionEpoch.GetConfVer(), o.createTime, atomic.LoadInt32(&o.currentStep), o.steps)
if o.IsTimeout() {
s = s + "timeout"
}
if o.IsFinish() {
s = s + "finished"
}
return s
}
// MarshalJSON serialize custom types to JSON
func (o *Operator) MarshalJSON() ([]byte, error) {
return []byte(`"` + o.String() + `"`), nil
}
// Desc returns the operator's short description.
func (o *Operator) Desc() string {
return o.desc
}
// SetDesc sets the description for the operator.
func (o *Operator) SetDesc(desc string) {
o.desc = desc
}
// AttachKind attaches an operator kind for the operator.
func (o *Operator) AttachKind(kind OperatorKind) {
o.kind |= kind
}
// RegionID returns the region that operator is targeted.
func (o *Operator) RegionID() uint64 {
return o.regionID
}
// RegionEpoch returns the region's epoch that is attched to the operator.
func (o *Operator) RegionEpoch() *metapb.RegionEpoch {
return o.regionEpoch
}
// Kind returns operator's kind.
func (o *Operator) Kind() OperatorKind {
return o.kind
}
// ElapsedTime returns duration since it was created.
func (o *Operator) ElapsedTime() time.Duration {
return time.Since(o.createTime)
}
// Len returns the operator's steps count.
func (o *Operator) Len() int {
return len(o.steps)
}
// Step returns the i-th step.
func (o *Operator) Step(i int) OperatorStep {
if i >= 0 && i < len(o.steps) {
return o.steps[i]
}
return nil
}
// Check checks if current step is finished, returns next step to take action.
// It's safe to be called by multiple goroutine concurrently.
func (o *Operator) Check(region *core.RegionInfo) OperatorStep {
for step := atomic.LoadInt32(&o.currentStep); int(step) < len(o.steps); step++ {
if o.steps[int(step)].IsFinish(region) {
operatorStepDuration.WithLabelValues(reflect.TypeOf(o.steps[int(step)]).Name()).
Observe(time.Since(time.Unix(0, atomic.LoadInt64(&o.stepTime))).Seconds())
atomic.StoreInt32(&o.currentStep, step+1)
atomic.StoreInt64(&o.stepTime, time.Now().UnixNano())
} else {
return o.steps[int(step)]
}
}
return nil
}
// SetPriorityLevel set the priority level for operator
func (o *Operator) SetPriorityLevel(level core.PriorityLevel) {
o.level = level
}
// GetPriorityLevel get the priority level
func (o *Operator) GetPriorityLevel() core.PriorityLevel {
return o.level
}
// IsFinish checks if all steps are finished.
func (o *Operator) IsFinish() bool {
return atomic.LoadInt32(&o.currentStep) >= int32(len(o.steps))
}
// IsTimeout checks the operator's create time and determines if it
|
func (sr SplitRegion) String() string {
|
random_line_split
|
operator.go
|
Operator) SetDesc(desc string) {
o.desc = desc
}
// AttachKind attaches an operator kind for the operator.
func (o *Operator) AttachKind(kind OperatorKind) {
o.kind |= kind
}
// RegionID returns the region that operator is targeted.
func (o *Operator) RegionID() uint64 {
return o.regionID
}
// RegionEpoch returns the region's epoch that is attched to the operator.
func (o *Operator) RegionEpoch() *metapb.RegionEpoch {
return o.regionEpoch
}
// Kind returns operator's kind.
func (o *Operator) Kind() OperatorKind {
return o.kind
}
// ElapsedTime returns duration since it was created.
func (o *Operator) ElapsedTime() time.Duration {
return time.Since(o.createTime)
}
// Len returns the operator's steps count.
func (o *Operator) Len() int {
return len(o.steps)
}
// Step returns the i-th step.
func (o *Operator) Step(i int) OperatorStep {
if i >= 0 && i < len(o.steps) {
return o.steps[i]
}
return nil
}
// Check checks if current step is finished, returns next step to take action.
// It's safe to be called by multiple goroutine concurrently.
func (o *Operator) Check(region *core.RegionInfo) OperatorStep {
for step := atomic.LoadInt32(&o.currentStep); int(step) < len(o.steps); step++ {
if o.steps[int(step)].IsFinish(region) {
operatorStepDuration.WithLabelValues(reflect.TypeOf(o.steps[int(step)]).Name()).
Observe(time.Since(time.Unix(0, atomic.LoadInt64(&o.stepTime))).Seconds())
atomic.StoreInt32(&o.currentStep, step+1)
atomic.StoreInt64(&o.stepTime, time.Now().UnixNano())
} else {
return o.steps[int(step)]
}
}
return nil
}
// SetPriorityLevel set the priority level for operator
func (o *Operator) SetPriorityLevel(level core.PriorityLevel) {
o.level = level
}
// GetPriorityLevel get the priority level
func (o *Operator) GetPriorityLevel() core.PriorityLevel {
return o.level
}
// IsFinish checks if all steps are finished.
func (o *Operator) IsFinish() bool {
return atomic.LoadInt32(&o.currentStep) >= int32(len(o.steps))
}
// IsTimeout checks the operator's create time and determines if it is timeout.
func (o *Operator) IsTimeout() bool {
if o.IsFinish() {
return false
}
if o.kind&OpRegion != 0 {
return time.Since(o.createTime) > RegionOperatorWaitTime
}
return time.Since(o.createTime) > LeaderOperatorWaitTime
}
// Influence calculates the store difference which unfinished operator steps make
func (o *Operator) Influence(opInfluence OpInfluence, region *core.RegionInfo) {
for step := atomic.LoadInt32(&o.currentStep); int(step) < len(o.steps); step++ {
if !o.steps[int(step)].IsFinish(region) {
o.steps[int(step)].Influence(opInfluence, region)
}
}
}
// OperatorHistory is used to log and visualize completed operators.
type OperatorHistory struct {
FinishTime time.Time
From, To uint64
Kind core.ResourceKind
}
// History transfers the operator's steps to operator histories.
func (o *Operator) History() []OperatorHistory {
now := time.Now()
var histories []OperatorHistory
var addPeerStores, removePeerStores []uint64
for _, step := range o.steps {
switch s := step.(type) {
case TransferLeader:
histories = append(histories, OperatorHistory{
FinishTime: now,
From: s.FromStore,
To: s.ToStore,
Kind: core.LeaderKind,
})
case AddPeer:
addPeerStores = append(addPeerStores, s.ToStore)
case AddLearner:
addPeerStores = append(addPeerStores, s.ToStore)
case RemovePeer:
removePeerStores = append(removePeerStores, s.FromStore)
}
}
for i := range addPeerStores {
if i < len(removePeerStores) {
histories = append(histories, OperatorHistory{
FinishTime: now,
From: removePeerStores[i],
To: addPeerStores[i],
Kind: core.RegionKind,
})
}
}
return histories
}
// CreateRemovePeerOperator creates an Operator that removes a peer from region.
func CreateRemovePeerOperator(desc string, cluster Cluster, kind OperatorKind, region *core.RegionInfo, storeID uint64) *Operator {
removeKind, steps := removePeerSteps(cluster, region, storeID)
return NewOperator(desc, region.GetId(), region.GetRegionEpoch(), removeKind|kind, steps...)
}
// CreateMovePeerOperator creates an Operator that replaces an old peer with a new peer.
func CreateMovePeerOperator(desc string, cluster Cluster, region *core.RegionInfo, kind OperatorKind, oldStore, newStore uint64, peerID uint64) *Operator {
removeKind, steps := removePeerSteps(cluster, region, oldStore)
var st []OperatorStep
if cluster.IsRaftLearnerEnabled() {
st = []OperatorStep{
AddLearner{ToStore: newStore, PeerID: peerID},
PromoteLearner{ToStore: newStore, PeerID: peerID},
}
} else {
st = []OperatorStep{
AddPeer{ToStore: newStore, PeerID: peerID},
}
}
steps = append(st, steps...)
return NewOperator(desc, region.GetId(), region.GetRegionEpoch(), removeKind|kind|OpRegion, steps...)
}
// removePeerSteps returns the steps to safely remove a peer. It prevents removing leader by transfer its leadership first.
func removePeerSteps(cluster Cluster, region *core.RegionInfo, storeID uint64) (kind OperatorKind, steps []OperatorStep) {
if region.Leader != nil && region.Leader.GetStoreId() == storeID {
for id := range region.GetFollowers() {
follower := cluster.GetStore(id)
if follower != nil && !cluster.CheckLabelProperty(RejectLeader, follower.Labels) {
steps = append(steps, TransferLeader{FromStore: storeID, ToStore: id})
kind = OpLeader
break
}
}
}
steps = append(steps, RemovePeer{FromStore: storeID})
kind |= OpRegion
return
}
// CreateMergeRegionOperator creates an Operator that merge two region into one
func CreateMergeRegionOperator(desc string, cluster Cluster, source *core.RegionInfo, target *core.RegionInfo, kind OperatorKind) (*Operator, *Operator, error) {
steps, kinds, err := matchPeerSteps(cluster, source, target)
if err != nil {
return nil, nil, errors.Trace(err)
}
steps = append(steps, MergeRegion{
FromRegion: source.Region,
ToRegion: target.Region,
IsPassive: false,
})
op1 := NewOperator(desc, source.GetId(), source.GetRegionEpoch(), kinds|kind, steps...)
op2 := NewOperator(desc, target.GetId(), target.GetRegionEpoch(), kind, MergeRegion{
FromRegion: source.Region,
ToRegion: target.Region,
IsPassive: true,
})
return op1, op2, nil
}
// matchPeerSteps returns the steps to match the location of peer stores of source region with target's.
func matchPeerSteps(cluster Cluster, source *core.RegionInfo, target *core.RegionInfo) ([]OperatorStep, OperatorKind, error) {
storeIDs := make(map[uint64]struct{})
var steps []OperatorStep
var kind OperatorKind
sourcePeers := source.Region.GetPeers()
targetPeers := target.Region.GetPeers()
for _, peer := range targetPeers {
storeIDs[peer.GetStoreId()] = struct{}{}
}
// Add missing peers.
for id := range storeIDs {
if source.GetStorePeer(id) != nil {
continue
}
peer, err := cluster.AllocPeer(id)
if err != nil {
log.Debugf("peer alloc failed: %v", err)
return nil, kind, errors.Trace(err)
}
if cluster.IsRaftLearnerEnabled() {
steps = append(steps,
AddLearner{ToStore: id, PeerID: peer.Id},
PromoteLearner{ToStore: id, PeerID: peer.Id},
)
} else {
steps = append(steps, AddPeer{ToStore: id, PeerID: peer.Id})
}
kind |= OpRegion
}
// Check whether to transfer leader or not
intersection := getIntersectionStores(sourcePeers, targetPeers)
leaderID := source.Leader.GetStoreId()
isFound := false
for _, storeID := range intersection {
if storeID == leaderID {
isFound = true
break
}
}
if !isFound
|
{
steps = append(steps, TransferLeader{FromStore: source.Leader.GetStoreId(), ToStore: target.Leader.GetStoreId()})
kind |= OpLeader
}
|
conditional_block
|
|
operator.go
|
region.ApproximateSize
to.RegionCount++
}
// AddLearner is an OperatorStep that adds a region learner peer.
type AddLearner struct {
ToStore, PeerID uint64
}
func (al AddLearner) String() string {
return fmt.Sprintf("add learner peer %v on store %v", al.PeerID, al.ToStore)
}
// IsFinish checks if current step is finished.
func (al AddLearner) IsFinish(region *core.RegionInfo) bool {
if p := region.GetStoreLearner(al.ToStore); p != nil {
if p.GetId() != al.PeerID {
log.Warnf("expect %v, but obtain learner %v", al.String(), p.GetId())
return false
}
return region.GetPendingLearner(p.GetId()) == nil
}
return false
}
// Influence calculates the store difference that current step make
func (al AddLearner) Influence(opInfluence OpInfluence, region *core.RegionInfo) {
to := opInfluence.GetStoreInfluence(al.ToStore)
to.RegionSize += region.ApproximateSize
to.RegionCount++
}
// PromoteLearner is an OperatorStep that promotes a region learner peer to normal voter.
type PromoteLearner struct {
ToStore, PeerID uint64
}
func (pl PromoteLearner) String() string {
return fmt.Sprintf("promote learner peer %v on store %v to voter", pl.PeerID, pl.ToStore)
}
// IsFinish checks if current step is finished.
func (pl PromoteLearner) IsFinish(region *core.RegionInfo) bool {
if p := region.GetStoreVoter(pl.ToStore); p != nil {
if p.GetId() != pl.PeerID {
log.Warnf("expect %v, but obtain voter %v", pl.String(), p.GetId())
}
return p.GetId() == pl.PeerID
}
return false
}
// Influence calculates the store difference that current step make
func (pl PromoteLearner) Influence(opInfluence OpInfluence, region *core.RegionInfo) {}
// RemovePeer is an OperatorStep that removes a region peer.
type RemovePeer struct {
FromStore uint64
}
func (rp RemovePeer) String() string {
return fmt.Sprintf("remove peer on store %v", rp.FromStore)
}
// IsFinish checks if current step is finished.
func (rp RemovePeer) IsFinish(region *core.RegionInfo) bool {
return region.GetStorePeer(rp.FromStore) == nil
}
// Influence calculates the store difference that current step make
func (rp RemovePeer) Influence(opInfluence OpInfluence, region *core.RegionInfo) {
from := opInfluence.GetStoreInfluence(rp.FromStore)
from.RegionSize -= region.ApproximateSize
from.RegionCount--
}
// MergeRegion is an OperatorStep that merge two regions.
type MergeRegion struct {
FromRegion *metapb.Region
ToRegion *metapb.Region
// there are two regions involved in merge process,
// so to keep them from other scheduler,
// both of them should add MerRegion operatorStep.
// But actually, tikv just need the region want to be merged to get the merge request,
// thus use a IsPssive mark to indicate that
// this region doesn't need to send merge request to tikv.
IsPassive bool
}
func (mr MergeRegion)
|
() string {
return fmt.Sprintf("merge region %v into region %v", mr.FromRegion.GetId(), mr.ToRegion.GetId())
}
// IsFinish checks if current step is finished
func (mr MergeRegion) IsFinish(region *core.RegionInfo) bool {
if mr.IsPassive {
return bytes.Compare(region.Region.StartKey, mr.ToRegion.StartKey) != 0 || bytes.Compare(region.Region.EndKey, mr.ToRegion.EndKey) != 0
}
return false
}
// Influence calculates the store difference that current step make
func (mr MergeRegion) Influence(opInfluence OpInfluence, region *core.RegionInfo) {
if mr.IsPassive {
for _, p := range region.GetPeers() {
o := opInfluence.GetStoreInfluence(p.GetStoreId())
o.RegionCount--
if region.Leader.GetId() == p.GetId() {
o.LeaderCount--
}
}
}
}
// SplitRegion is an OperatorStep that splits a region.
type SplitRegion struct {
StartKey, EndKey []byte
}
func (sr SplitRegion) String() string {
return "split region"
}
// IsFinish checks if current step is finished.
func (sr SplitRegion) IsFinish(region *core.RegionInfo) bool {
return !bytes.Equal(region.StartKey, sr.StartKey) || !bytes.Equal(region.EndKey, sr.EndKey)
}
// Influence calculates the store difference that current step make.
func (sr SplitRegion) Influence(opInfluence OpInfluence, region *core.RegionInfo) {
for _, p := range region.Peers {
inf := opInfluence.GetStoreInfluence(p.GetStoreId())
inf.RegionCount++
if region.Leader.GetId() == p.GetId() {
inf.LeaderCount++
}
}
}
// Operator contains execution steps generated by scheduler.
type Operator struct {
desc string
regionID uint64
regionEpoch *metapb.RegionEpoch
kind OperatorKind
steps []OperatorStep
currentStep int32
createTime time.Time
stepTime int64
level core.PriorityLevel
}
// NewOperator creates a new operator.
func NewOperator(desc string, regionID uint64, regionEpoch *metapb.RegionEpoch, kind OperatorKind, steps ...OperatorStep) *Operator {
return &Operator{
desc: desc,
regionID: regionID,
regionEpoch: regionEpoch,
kind: kind,
steps: steps,
createTime: time.Now(),
stepTime: time.Now().UnixNano(),
level: core.NormalPriority,
}
}
func (o *Operator) String() string {
s := fmt.Sprintf("%s (kind:%s, region:%v(%v,%v), createAt:%s, currentStep:%v, steps:%+v) ", o.desc, o.kind, o.regionID, o.regionEpoch.GetVersion(), o.regionEpoch.GetConfVer(), o.createTime, atomic.LoadInt32(&o.currentStep), o.steps)
if o.IsTimeout() {
s = s + "timeout"
}
if o.IsFinish() {
s = s + "finished"
}
return s
}
// MarshalJSON serialize custom types to JSON
func (o *Operator) MarshalJSON() ([]byte, error) {
return []byte(`"` + o.String() + `"`), nil
}
// Desc returns the operator's short description.
func (o *Operator) Desc() string {
return o.desc
}
// SetDesc sets the description for the operator.
func (o *Operator) SetDesc(desc string) {
o.desc = desc
}
// AttachKind attaches an operator kind for the operator.
func (o *Operator) AttachKind(kind OperatorKind) {
o.kind |= kind
}
// RegionID returns the region that operator is targeted.
func (o *Operator) RegionID() uint64 {
return o.regionID
}
// RegionEpoch returns the region's epoch that is attched to the operator.
func (o *Operator) RegionEpoch() *metapb.RegionEpoch {
return o.regionEpoch
}
// Kind returns operator's kind.
func (o *Operator) Kind() OperatorKind {
return o.kind
}
// ElapsedTime returns duration since it was created.
func (o *Operator) ElapsedTime() time.Duration {
return time.Since(o.createTime)
}
// Len returns the operator's steps count.
func (o *Operator) Len() int {
return len(o.steps)
}
// Step returns the i-th step.
func (o *Operator) Step(i int) OperatorStep {
if i >= 0 && i < len(o.steps) {
return o.steps[i]
}
return nil
}
// Check checks if current step is finished, returns next step to take action.
// It's safe to be called by multiple goroutine concurrently.
func (o *Operator) Check(region *core.RegionInfo) OperatorStep {
for step := atomic.LoadInt32(&o.currentStep); int(step) < len(o.steps); step++ {
if o.steps[int(step)].IsFinish(region) {
operatorStepDuration.WithLabelValues(reflect.TypeOf(o.steps[int(step)]).Name()).
Observe(time.Since(time.Unix(0, atomic.LoadInt64(&o.stepTime))).Seconds())
atomic.StoreInt32(&o.currentStep, step+1)
atomic.StoreInt64(&o.stepTime, time.Now().UnixNano())
} else {
return o.steps[int(step)]
}
}
return nil
}
// SetPriorityLevel set the priority level for operator
func (o *Operator) SetPriorityLevel(level core.PriorityLevel) {
o.level = level
}
// GetPriorityLevel get the priority level
func (o *Operator) GetPriorityLevel() core.PriorityLevel {
return o.level
}
// IsFinish checks if all steps are finished.
func (o *Operator) IsFinish() bool {
return atomic.LoadInt32(&o.currentStep) >= int32(len(o.steps))
}
// IsTimeout checks the operator's create time and determines if it
|
String
|
identifier_name
|
date.rs
|
: &str = "output date and time in RFC 5322 format.
Example: Mon, 14 Aug 2006 02:34:56 -0600";
static RFC_3339_HELP_STRING: &str = "output date/time in RFC 3339 format.
FMT='date', 'seconds', or 'ns'
for date and time to the indicated precision.
Example: 2006-08-14 02:34:56-06:00";
#[cfg(not(any(target_os = "macos", target_os = "redox")))]
static OPT_SET_HELP_STRING: &str = "set time described by STRING";
#[cfg(target_os = "macos")]
static OPT_SET_HELP_STRING: &str = "set time described by STRING (not available on mac yet)";
#[cfg(target_os = "redox")]
static OPT_SET_HELP_STRING: &str = "set time described by STRING (not available on redox yet)";
/// Settings for this program, parsed from the command line
struct Settings {
utc: bool,
format: Format,
date_source: DateSource,
set_to: Option<DateTime<FixedOffset>>,
}
/// Various ways of displaying the date
enum Format {
Iso8601(Iso8601Format),
Rfc5322,
Rfc3339(Rfc3339Format),
Custom(String),
Default,
}
/// Various places that dates can come from
enum DateSource {
Now,
Custom(String),
File(PathBuf),
Human(Duration),
}
enum Iso8601Format {
Date,
Hours,
Minutes,
Seconds,
Ns,
}
impl<'a> From<&'a str> for Iso8601Format {
fn from(s: &str) -> Self {
match s {
HOURS => Self::Hours,
MINUTES => Self::Minutes,
SECONDS => Self::Seconds,
NS => Self::Ns,
DATE => Self::Date,
// Note: This is caught by clap via `possible_values`
_ => unreachable!(),
}
}
}
enum
|
{
Date,
Seconds,
Ns,
}
impl<'a> From<&'a str> for Rfc3339Format {
fn from(s: &str) -> Self {
match s {
DATE => Self::Date,
SECONDS => Self::Seconds,
NS => Self::Ns,
// Should be caught by clap
_ => panic!("Invalid format: {s}"),
}
}
}
#[uucore::main]
#[allow(clippy::cognitive_complexity)]
pub fn uumain(args: impl uucore::Args) -> UResult<()> {
let matches = uu_app().try_get_matches_from(args)?;
let format = if let Some(form) = matches.get_one::<String>(OPT_FORMAT) {
if !form.starts_with('+') {
return Err(USimpleError::new(
1,
format!("invalid date {}", form.quote()),
));
}
let form = form[1..].to_string();
Format::Custom(form)
} else if let Some(fmt) = matches
.get_many::<String>(OPT_ISO_8601)
.map(|mut iter| iter.next().unwrap_or(&DATE.to_string()).as_str().into())
{
Format::Iso8601(fmt)
} else if matches.get_flag(OPT_RFC_EMAIL) {
Format::Rfc5322
} else if let Some(fmt) = matches
.get_one::<String>(OPT_RFC_3339)
.map(|s| s.as_str().into())
{
Format::Rfc3339(fmt)
} else {
Format::Default
};
let date_source = if let Some(date) = matches.get_one::<String>(OPT_DATE) {
if let Ok(duration) = parse_datetime::from_str(date.as_str()) {
DateSource::Human(duration)
} else {
DateSource::Custom(date.into())
}
} else if let Some(file) = matches.get_one::<String>(OPT_FILE) {
DateSource::File(file.into())
} else {
DateSource::Now
};
let set_to = match matches.get_one::<String>(OPT_SET).map(parse_date) {
None => None,
Some(Err((input, _err))) => {
return Err(USimpleError::new(
1,
format!("invalid date {}", input.quote()),
));
}
Some(Ok(date)) => Some(date),
};
let settings = Settings {
utc: matches.get_flag(OPT_UNIVERSAL),
format,
date_source,
set_to,
};
if let Some(date) = settings.set_to {
// All set time functions expect UTC datetimes.
let date: DateTime<Utc> = if settings.utc {
date.with_timezone(&Utc)
} else {
date.into()
};
return set_system_datetime(date);
} else {
// Get the current time, either in the local time zone or UTC.
let now: DateTime<FixedOffset> = if settings.utc {
let now = Utc::now();
now.with_timezone(&now.offset().fix())
} else {
let now = Local::now();
now.with_timezone(now.offset())
};
// Iterate over all dates - whether it's a single date or a file.
let dates: Box<dyn Iterator<Item = _>> = match settings.date_source {
DateSource::Custom(ref input) => {
let date = parse_date(input.clone());
let iter = std::iter::once(date);
Box::new(iter)
}
DateSource::Human(relative_time) => {
// Get the current DateTime<FixedOffset> for things like "1 year ago"
let current_time = DateTime::<FixedOffset>::from(Local::now());
// double check the result is overflow or not of the current_time + relative_time
// it may cause a panic of chrono::datetime::DateTime add
match current_time.checked_add_signed(relative_time) {
Some(date) => {
let iter = std::iter::once(Ok(date));
Box::new(iter)
}
None => {
return Err(USimpleError::new(
1,
format!("invalid date {}", relative_time),
));
}
}
}
DateSource::File(ref path) => {
if path.is_dir() {
return Err(USimpleError::new(
2,
format!("expected file, got directory {}", path.quote()),
));
}
let file = File::open(path)
.map_err_context(|| path.as_os_str().to_string_lossy().to_string())?;
let lines = BufReader::new(file).lines();
let iter = lines.map_while(Result::ok).map(parse_date);
Box::new(iter)
}
DateSource::Now => {
let iter = std::iter::once(Ok(now));
Box::new(iter)
}
};
let format_string = make_format_string(&settings);
// Format all the dates
for date in dates {
match date {
Ok(date) => {
// GNU `date` uses `%N` for nano seconds, however crate::chrono uses `%f`
let format_string = &format_string.replace("%N", "%f");
// Refuse to pass this string to chrono as it is crashing in this crate
if format_string.contains("%#z") {
return Err(USimpleError::new(
1,
format!("invalid format {}", format_string.replace("%f", "%N")),
));
}
// Hack to work around panic in chrono,
// TODO - remove when a fix for https://github.com/chronotope/chrono/issues/623 is released
let format_items = StrftimeItems::new(format_string);
if format_items.clone().any(|i| i == Item::Error) {
return Err(USimpleError::new(
1,
format!("invalid format {}", format_string.replace("%f", "%N")),
));
}
let formatted = date
.format_with_items(format_items)
.to_string()
.replace("%f", "%N");
println!("{formatted}");
}
Err((input, _err)) => show!(USimpleError::new(
1,
format!("invalid date {}", input.quote())
)),
}
}
}
Ok(())
}
pub fn uu_app() -> Command {
Command::new(uucore::util_name())
.version(crate_version!())
.about(ABOUT)
.override_usage(format_usage(USAGE))
.infer_long_args(true)
.arg(
Arg::new(OPT_DATE)
.short('d')
.long(OPT_DATE)
.value_name("STRING")
.help("display time described by STRING, not 'now'"),
)
.arg(
Arg::new(OPT_FILE)
.short('f')
.long(OPT_FILE)
.value_name("DATEFILE")
.value_hint(clap::ValueHint::FilePath)
.help("like --date; once for each line of DATEFILE"),
)
.arg(
Arg::new(OPT_ISO_8601)
.short('I')
.long(OPT_ISO_8601)
.
|
Rfc3339Format
|
identifier_name
|
date.rs
|
};
let date_source = if let Some(date) = matches.get_one::<String>(OPT_DATE) {
if let Ok(duration) = parse_datetime::from_str(date.as_str()) {
DateSource::Human(duration)
} else {
DateSource::Custom(date.into())
}
} else if let Some(file) = matches.get_one::<String>(OPT_FILE) {
DateSource::File(file.into())
} else {
DateSource::Now
};
let set_to = match matches.get_one::<String>(OPT_SET).map(parse_date) {
None => None,
Some(Err((input, _err))) => {
return Err(USimpleError::new(
1,
format!("invalid date {}", input.quote()),
));
}
Some(Ok(date)) => Some(date),
};
let settings = Settings {
utc: matches.get_flag(OPT_UNIVERSAL),
format,
date_source,
set_to,
};
if let Some(date) = settings.set_to {
// All set time functions expect UTC datetimes.
let date: DateTime<Utc> = if settings.utc {
date.with_timezone(&Utc)
} else {
date.into()
};
return set_system_datetime(date);
} else {
// Get the current time, either in the local time zone or UTC.
let now: DateTime<FixedOffset> = if settings.utc {
let now = Utc::now();
now.with_timezone(&now.offset().fix())
} else {
let now = Local::now();
now.with_timezone(now.offset())
};
// Iterate over all dates - whether it's a single date or a file.
let dates: Box<dyn Iterator<Item = _>> = match settings.date_source {
DateSource::Custom(ref input) => {
let date = parse_date(input.clone());
let iter = std::iter::once(date);
Box::new(iter)
}
DateSource::Human(relative_time) => {
// Get the current DateTime<FixedOffset> for things like "1 year ago"
let current_time = DateTime::<FixedOffset>::from(Local::now());
// double check the result is overflow or not of the current_time + relative_time
// it may cause a panic of chrono::datetime::DateTime add
match current_time.checked_add_signed(relative_time) {
Some(date) => {
let iter = std::iter::once(Ok(date));
Box::new(iter)
}
None => {
return Err(USimpleError::new(
1,
format!("invalid date {}", relative_time),
));
}
}
}
DateSource::File(ref path) => {
if path.is_dir() {
return Err(USimpleError::new(
2,
format!("expected file, got directory {}", path.quote()),
));
}
let file = File::open(path)
.map_err_context(|| path.as_os_str().to_string_lossy().to_string())?;
let lines = BufReader::new(file).lines();
let iter = lines.map_while(Result::ok).map(parse_date);
Box::new(iter)
}
DateSource::Now => {
let iter = std::iter::once(Ok(now));
Box::new(iter)
}
};
let format_string = make_format_string(&settings);
// Format all the dates
for date in dates {
match date {
Ok(date) => {
// GNU `date` uses `%N` for nano seconds, however crate::chrono uses `%f`
let format_string = &format_string.replace("%N", "%f");
// Refuse to pass this string to chrono as it is crashing in this crate
if format_string.contains("%#z") {
return Err(USimpleError::new(
1,
format!("invalid format {}", format_string.replace("%f", "%N")),
));
}
// Hack to work around panic in chrono,
// TODO - remove when a fix for https://github.com/chronotope/chrono/issues/623 is released
let format_items = StrftimeItems::new(format_string);
if format_items.clone().any(|i| i == Item::Error) {
return Err(USimpleError::new(
1,
format!("invalid format {}", format_string.replace("%f", "%N")),
));
}
let formatted = date
.format_with_items(format_items)
.to_string()
.replace("%f", "%N");
println!("{formatted}");
}
Err((input, _err)) => show!(USimpleError::new(
1,
format!("invalid date {}", input.quote())
)),
}
}
}
Ok(())
}
pub fn uu_app() -> Command {
Command::new(uucore::util_name())
.version(crate_version!())
.about(ABOUT)
.override_usage(format_usage(USAGE))
.infer_long_args(true)
.arg(
Arg::new(OPT_DATE)
.short('d')
.long(OPT_DATE)
.value_name("STRING")
.help("display time described by STRING, not 'now'"),
)
.arg(
Arg::new(OPT_FILE)
.short('f')
.long(OPT_FILE)
.value_name("DATEFILE")
.value_hint(clap::ValueHint::FilePath)
.help("like --date; once for each line of DATEFILE"),
)
.arg(
Arg::new(OPT_ISO_8601)
.short('I')
.long(OPT_ISO_8601)
.value_name("FMT")
.value_parser(ShortcutValueParser::new([
DATE, HOURS, MINUTES, SECONDS, NS,
]))
.num_args(0..=1)
.default_missing_value(OPT_DATE)
.help(ISO_8601_HELP_STRING),
)
.arg(
Arg::new(OPT_RFC_EMAIL)
.short('R')
.long(OPT_RFC_EMAIL)
.help(RFC_5322_HELP_STRING)
.action(ArgAction::SetTrue),
)
.arg(
Arg::new(OPT_RFC_3339)
.long(OPT_RFC_3339)
.value_name("FMT")
.value_parser(ShortcutValueParser::new([DATE, SECONDS, NS]))
.help(RFC_3339_HELP_STRING),
)
.arg(
Arg::new(OPT_DEBUG)
.long(OPT_DEBUG)
.help("annotate the parsed date, and warn about questionable usage to stderr")
.action(ArgAction::SetTrue),
)
.arg(
Arg::new(OPT_REFERENCE)
.short('r')
.long(OPT_REFERENCE)
.value_name("FILE")
.value_hint(clap::ValueHint::AnyPath)
.help("display the last modification time of FILE"),
)
.arg(
Arg::new(OPT_SET)
.short('s')
.long(OPT_SET)
.value_name("STRING")
.help(OPT_SET_HELP_STRING),
)
.arg(
Arg::new(OPT_UNIVERSAL)
.short('u')
.long(OPT_UNIVERSAL)
.alias(OPT_UNIVERSAL_2)
.help("print or set Coordinated Universal Time (UTC)")
.action(ArgAction::SetTrue),
)
.arg(Arg::new(OPT_FORMAT))
}
/// Return the appropriate format string for the given settings.
fn make_format_string(settings: &Settings) -> &str {
match settings.format {
Format::Iso8601(ref fmt) => match *fmt {
Iso8601Format::Date => "%F",
Iso8601Format::Hours => "%FT%H%:z",
Iso8601Format::Minutes => "%FT%H:%M%:z",
Iso8601Format::Seconds => "%FT%T%:z",
Iso8601Format::Ns => "%FT%T,%f%:z",
},
Format::Rfc5322 => "%a, %d %h %Y %T %z",
Format::Rfc3339(ref fmt) => match *fmt {
Rfc3339Format::Date => "%F",
Rfc3339Format::Seconds => "%F %T%:z",
Rfc3339Format::Ns => "%F %T.%f%:z",
},
Format::Custom(ref fmt) => fmt,
Format::Default => "%c",
}
}
/// Parse a `String` into a `DateTime`.
/// If it fails, return a tuple of the `String` along with its `ParseError`.
fn parse_date<S: AsRef<str> + Clone>(
s: S,
) -> Result<DateTime<FixedOffset>, (String, chrono::format::ParseError)> {
// TODO: The GNU date command can parse a wide variety of inputs.
s.as_ref().parse().map_err(|e| (s.as_ref().into(), e))
}
#[cfg(not(any(unix, windows)))]
fn set_system_datetime(_date: DateTime<Utc>) -> UResult<()> {
unimplemented!("setting date not implemented (unsupported target)");
}
#[cfg(target_os = "macos")]
fn set_system_datetime(_date: DateTime<Utc>) -> UResult<()> {
|
random_line_split
|
||
registry.go
|
service is healthy or not. Default value is True .
Healthy bool
// To show service is isolate or not. Default value is False .
Isolate bool
// TTL timeout. if node needs to use heartbeat to report,required. If not set,server will throw ErrorCode-400141
TTL int
// optional, Timeout for single query. Default value is global config
// Total is (1+RetryCount) * Timeout
Timeout time.Duration
// optional, retry count. Default value is global config
RetryCount int
}
// RegistryOption is polaris option.
type RegistryOption func(o *registryOptions)
// Registry is polaris registry.
type Registry struct {
opt registryOptions
provider polaris.ProviderAPI
consumer polaris.ConsumerAPI
}
// WithRegistryServiceToken with ServiceToken option.
func WithRegistryServiceToken(serviceToken string) RegistryOption {
return func(o *registryOptions) { o.ServiceToken = serviceToken }
}
// WithRegistryWeight with Weight option.
func WithRegistryWeight(weight int) RegistryOption {
return func(o *registryOptions) { o.Weight = weight }
}
// WithRegistryHealthy with Healthy option.
func WithRegistryHealthy(healthy bool) RegistryOption {
return func(o *registryOptions) { o.Healthy = healthy }
}
// WithRegistryIsolate with Isolate option.
func WithRegistryIsolate(isolate bool) RegistryOption {
return func(o *registryOptions) { o.Isolate = isolate }
}
// WithRegistryTTL with TTL option.
func WithRegistryTTL(TTL int) RegistryOption {
return func(o *registryOptions) { o.TTL = TTL }
}
// WithRegistryTimeout with Timeout option.
func
|
(timeout time.Duration) RegistryOption {
return func(o *registryOptions) { o.Timeout = timeout }
}
// WithRegistryRetryCount with RetryCount option.
func WithRegistryRetryCount(retryCount int) RegistryOption {
return func(o *registryOptions) { o.RetryCount = retryCount }
}
// Register the registration.
func (r *Registry) Register(_ context.Context, instance *registry.ServiceInstance) error {
id := uuid.NewString()
for _, endpoint := range instance.Endpoints {
u, err := url.Parse(endpoint)
if err != nil {
return err
}
host, port, err := net.SplitHostPort(u.Host)
if err != nil {
return err
}
portNum, err := strconv.Atoi(port)
if err != nil {
return err
}
// metadata
if instance.Metadata == nil {
instance.Metadata = make(map[string]string)
}
instance.Metadata["merge"] = id
if _, ok := instance.Metadata["weight"]; !ok {
instance.Metadata["weight"] = strconv.Itoa(r.opt.Weight)
}
weight, _ := strconv.Atoi(instance.Metadata["weight"])
_, err = r.provider.RegisterInstance(
&polaris.InstanceRegisterRequest{
InstanceRegisterRequest: model.InstanceRegisterRequest{
Service: instance.Name,
ServiceToken: r.opt.ServiceToken,
Namespace: r.opt.Namespace,
Host: host,
Port: portNum,
Protocol: &u.Scheme,
Weight: &weight,
Priority: &r.opt.Priority,
Version: &instance.Version,
Metadata: instance.Metadata,
Healthy: &r.opt.Healthy,
Isolate: &r.opt.Isolate,
TTL: &r.opt.TTL,
Timeout: &r.opt.Timeout,
RetryCount: &r.opt.RetryCount,
},
},
)
if err != nil {
return err
}
}
return nil
}
// Deregister the registration.
func (r *Registry) Deregister(_ context.Context, serviceInstance *registry.ServiceInstance) error {
for _, endpoint := range serviceInstance.Endpoints {
// get url
u, err := url.Parse(endpoint)
if err != nil {
return err
}
// get host and port
host, port, err := net.SplitHostPort(u.Host)
if err != nil {
return err
}
// port to int
portNum, err := strconv.Atoi(port)
if err != nil {
return err
}
// Deregister
err = r.provider.Deregister(
&polaris.InstanceDeRegisterRequest{
InstanceDeRegisterRequest: model.InstanceDeRegisterRequest{
Service: serviceInstance.Name,
ServiceToken: r.opt.ServiceToken,
Namespace: r.opt.Namespace,
Host: host,
Port: portNum,
Timeout: &r.opt.Timeout,
RetryCount: &r.opt.RetryCount,
},
},
)
if err != nil {
return err
}
}
return nil
}
// GetService return the service instances in memory according to the service name.
func (r *Registry) GetService(_ context.Context, serviceName string) ([]*registry.ServiceInstance, error) {
// get all instances
instancesResponse, err := r.consumer.GetInstances(&polaris.GetInstancesRequest{
GetInstancesRequest: model.GetInstancesRequest{
Service: serviceName,
Namespace: r.opt.Namespace,
Timeout: &r.opt.Timeout,
RetryCount: &r.opt.RetryCount,
SkipRouteFilter: true,
},
})
if err != nil {
return nil, err
}
serviceInstances := instancesToServiceInstances(merge(instancesResponse.GetInstances()))
return serviceInstances, nil
}
func merge(instances []model.Instance) map[string][]model.Instance {
m := make(map[string][]model.Instance)
for _, instance := range instances {
if v, ok := m[instance.GetMetadata()["merge"]]; ok {
m[instance.GetMetadata()["merge"]] = append(v, instance)
} else {
m[instance.GetMetadata()["merge"]] = []model.Instance{instance}
}
}
return m
}
// Watch creates a watcher according to the service name.
func (r *Registry) Watch(ctx context.Context, serviceName string) (registry.Watcher, error) {
return newWatcher(ctx, r.opt.Namespace, serviceName, r.consumer)
}
type Watcher struct {
ServiceName string
Namespace string
Ctx context.Context
Cancel context.CancelFunc
Channel <-chan model.SubScribeEvent
service *model.InstancesResponse
ServiceInstances map[string][]model.Instance
first bool
}
func newWatcher(ctx context.Context, namespace string, serviceName string, consumer polaris.ConsumerAPI) (*Watcher, error) {
watchServiceResponse, err := consumer.WatchService(&polaris.WatchServiceRequest{
WatchServiceRequest: model.WatchServiceRequest{
Key: model.ServiceKey{
Namespace: namespace,
Service: serviceName,
},
},
})
if err != nil {
return nil, err
}
w := &Watcher{
Namespace: namespace,
ServiceName: serviceName,
Channel: watchServiceResponse.EventChannel,
service: watchServiceResponse.GetAllInstancesResp,
ServiceInstances: merge(watchServiceResponse.GetAllInstancesResp.GetInstances()),
}
w.Ctx, w.Cancel = context.WithCancel(ctx)
return w, nil
}
// Next returns services in the following two cases:
// 1.the first time to watch and the service instance list is not empty.
// 2.any service instance changes found.
// if the above two conditions are not met, it will block until context deadline exceeded or canceled
func (w *Watcher) Next() ([]*registry.ServiceInstance, error) {
if !w.first {
w.first = true
if len(w.ServiceInstances) > 0 {
return instancesToServiceInstances(w.ServiceInstances), nil
}
}
select {
case <-w.Ctx.Done():
return nil, w.Ctx.Err()
case event := <-w.Channel:
if event.GetSubScribeEventType() == model.EventInstance {
// this always true, but we need to check it to make sure EventType not change
if instanceEvent, ok := event.(*model.InstanceEvent); ok {
// handle DeleteEvent
if instanceEvent.DeleteEvent != nil {
for _, instance := range instanceEvent.DeleteEvent.Instances {
delete(w.ServiceInstances, instance.GetMetadata()["merge"])
}
}
// handle UpdateEvent
if instanceEvent.UpdateEvent != nil {
for _, update := range instanceEvent.UpdateEvent.UpdateList {
if v, ok := w.ServiceInstances[update.After.GetMetadata()["merge"]]; ok {
var nv []model.Instance
m := map[string]model.Instance{}
for _, ins := range v {
m[ins.GetId()] = ins
}
m[update.After.GetId()] = update.After
for _, ins := range m {
if ins.IsHealthy() {
nv = append(nv, ins)
}
}
w.ServiceInstances[update.After.GetMetadata()["merge"]] = nv
if len(nv) == 0 {
delete(w.ServiceInstances, update.After.GetMetadata()["merge"])
}
} else {
if update.After.IsHealthy() {
w.ServiceInstances[update.After.GetMetadata()["merge"]] = []model.Instance{update.After}
}
}
}
}
// handle AddEvent
|
WithRegistryTimeout
|
identifier_name
|
registry.go
|
service is healthy or not. Default value is True .
Healthy bool
// To show service is isolate or not. Default value is False .
Isolate bool
// TTL timeout. if node needs to use heartbeat to report,required. If not set,server will throw ErrorCode-400141
TTL int
// optional, Timeout for single query. Default value is global config
// Total is (1+RetryCount) * Timeout
Timeout time.Duration
// optional, retry count. Default value is global config
RetryCount int
}
// RegistryOption is polaris option.
type RegistryOption func(o *registryOptions)
// Registry is polaris registry.
type Registry struct {
opt registryOptions
provider polaris.ProviderAPI
consumer polaris.ConsumerAPI
}
// WithRegistryServiceToken with ServiceToken option.
func WithRegistryServiceToken(serviceToken string) RegistryOption {
return func(o *registryOptions) { o.ServiceToken = serviceToken }
}
// WithRegistryWeight with Weight option.
func WithRegistryWeight(weight int) RegistryOption {
return func(o *registryOptions) { o.Weight = weight }
}
// WithRegistryHealthy with Healthy option.
func WithRegistryHealthy(healthy bool) RegistryOption {
return func(o *registryOptions) { o.Healthy = healthy }
}
// WithRegistryIsolate with Isolate option.
func WithRegistryIsolate(isolate bool) RegistryOption {
return func(o *registryOptions) { o.Isolate = isolate }
}
// WithRegistryTTL with TTL option.
func WithRegistryTTL(TTL int) RegistryOption {
return func(o *registryOptions) { o.TTL = TTL }
}
// WithRegistryTimeout with Timeout option.
func WithRegistryTimeout(timeout time.Duration) RegistryOption {
return func(o *registryOptions) { o.Timeout = timeout }
}
// WithRegistryRetryCount with RetryCount option.
func WithRegistryRetryCount(retryCount int) RegistryOption {
return func(o *registryOptions) { o.RetryCount = retryCount }
}
// Register the registration.
func (r *Registry) Register(_ context.Context, instance *registry.ServiceInstance) error {
id := uuid.NewString()
for _, endpoint := range instance.Endpoints {
u, err := url.Parse(endpoint)
if err != nil {
return err
}
host, port, err := net.SplitHostPort(u.Host)
if err != nil {
return err
}
portNum, err := strconv.Atoi(port)
if err != nil {
return err
}
// metadata
if instance.Metadata == nil {
instance.Metadata = make(map[string]string)
}
instance.Metadata["merge"] = id
if _, ok := instance.Metadata["weight"]; !ok {
instance.Metadata["weight"] = strconv.Itoa(r.opt.Weight)
}
weight, _ := strconv.Atoi(instance.Metadata["weight"])
_, err = r.provider.RegisterInstance(
&polaris.InstanceRegisterRequest{
InstanceRegisterRequest: model.InstanceRegisterRequest{
Service: instance.Name,
ServiceToken: r.opt.ServiceToken,
Namespace: r.opt.Namespace,
Host: host,
Port: portNum,
Protocol: &u.Scheme,
Weight: &weight,
Priority: &r.opt.Priority,
Version: &instance.Version,
Metadata: instance.Metadata,
Healthy: &r.opt.Healthy,
Isolate: &r.opt.Isolate,
TTL: &r.opt.TTL,
Timeout: &r.opt.Timeout,
RetryCount: &r.opt.RetryCount,
},
},
)
if err != nil {
return err
}
}
return nil
}
// Deregister the registration.
func (r *Registry) Deregister(_ context.Context, serviceInstance *registry.ServiceInstance) error {
for _, endpoint := range serviceInstance.Endpoints {
// get url
u, err := url.Parse(endpoint)
if err != nil {
return err
}
// get host and port
host, port, err := net.SplitHostPort(u.Host)
if err != nil {
return err
}
// port to int
portNum, err := strconv.Atoi(port)
if err != nil {
return err
}
// Deregister
err = r.provider.Deregister(
&polaris.InstanceDeRegisterRequest{
InstanceDeRegisterRequest: model.InstanceDeRegisterRequest{
Service: serviceInstance.Name,
ServiceToken: r.opt.ServiceToken,
Namespace: r.opt.Namespace,
Host: host,
Port: portNum,
Timeout: &r.opt.Timeout,
RetryCount: &r.opt.RetryCount,
},
},
)
if err != nil {
return err
}
}
return nil
}
// GetService return the service instances in memory according to the service name.
func (r *Registry) GetService(_ context.Context, serviceName string) ([]*registry.ServiceInstance, error) {
// get all instances
instancesResponse, err := r.consumer.GetInstances(&polaris.GetInstancesRequest{
GetInstancesRequest: model.GetInstancesRequest{
Service: serviceName,
Namespace: r.opt.Namespace,
Timeout: &r.opt.Timeout,
RetryCount: &r.opt.RetryCount,
SkipRouteFilter: true,
},
})
if err != nil {
return nil, err
}
serviceInstances := instancesToServiceInstances(merge(instancesResponse.GetInstances()))
return serviceInstances, nil
}
func merge(instances []model.Instance) map[string][]model.Instance {
m := make(map[string][]model.Instance)
for _, instance := range instances {
if v, ok := m[instance.GetMetadata()["merge"]]; ok {
m[instance.GetMetadata()["merge"]] = append(v, instance)
} else {
m[instance.GetMetadata()["merge"]] = []model.Instance{instance}
}
}
return m
}
// Watch creates a watcher according to the service name.
func (r *Registry) Watch(ctx context.Context, serviceName string) (registry.Watcher, error) {
return newWatcher(ctx, r.opt.Namespace, serviceName, r.consumer)
}
type Watcher struct {
ServiceName string
Namespace string
Ctx context.Context
Cancel context.CancelFunc
Channel <-chan model.SubScribeEvent
service *model.InstancesResponse
ServiceInstances map[string][]model.Instance
first bool
}
func newWatcher(ctx context.Context, namespace string, serviceName string, consumer polaris.ConsumerAPI) (*Watcher, error) {
watchServiceResponse, err := consumer.WatchService(&polaris.WatchServiceRequest{
WatchServiceRequest: model.WatchServiceRequest{
Key: model.ServiceKey{
Namespace: namespace,
Service: serviceName,
},
},
})
if err != nil {
return nil, err
}
w := &Watcher{
Namespace: namespace,
ServiceName: serviceName,
Channel: watchServiceResponse.EventChannel,
service: watchServiceResponse.GetAllInstancesResp,
ServiceInstances: merge(watchServiceResponse.GetAllInstancesResp.GetInstances()),
}
w.Ctx, w.Cancel = context.WithCancel(ctx)
return w, nil
}
// Next returns services in the following two cases:
// 1.the first time to watch and the service instance list is not empty.
// 2.any service instance changes found.
// if the above two conditions are not met, it will block until context deadline exceeded or canceled
func (w *Watcher) Next() ([]*registry.ServiceInstance, error) {
if !w.first {
w.first = true
if len(w.ServiceInstances) > 0 {
return instancesToServiceInstances(w.ServiceInstances), nil
}
}
select {
case <-w.Ctx.Done():
return nil, w.Ctx.Err()
case event := <-w.Channel:
if event.GetSubScribeEventType() == model.EventInstance {
// this always true, but we need to check it to make sure EventType not change
if instanceEvent, ok := event.(*model.InstanceEvent); ok {
// handle DeleteEvent
if instanceEvent.DeleteEvent != nil {
for _, instance := range instanceEvent.DeleteEvent.Instances {
delete(w.ServiceInstances, instance.GetMetadata()["merge"])
}
}
// handle UpdateEvent
if instanceEvent.UpdateEvent != nil {
for _, update := range instanceEvent.UpdateEvent.UpdateList {
if v, ok := w.ServiceInstances[update.After.GetMetadata()["merge"]]; ok {
var nv []model.Instance
m := map[string]model.Instance{}
for _, ins := range v {
m[ins.GetId()] = ins
}
m[update.After.GetId()] = update.After
for _, ins := range m {
if ins.IsHealthy() {
nv = append(nv, ins)
}
}
w.ServiceInstances[update.After.GetMetadata()["merge"]] = nv
if len(nv) == 0 {
delete(w.ServiceInstances, update.After.GetMetadata()["merge"])
}
} else {
if update.After.IsHealthy() {
|
}
}
}
}
// handle AddEvent
if
|
w.ServiceInstances[update.After.GetMetadata()["merge"]] = []model.Instance{update.After}
|
random_line_split
|
registry.go
|
service is healthy or not. Default value is True .
Healthy bool
// To show service is isolate or not. Default value is False .
Isolate bool
// TTL timeout. if node needs to use heartbeat to report,required. If not set,server will throw ErrorCode-400141
TTL int
// optional, Timeout for single query. Default value is global config
// Total is (1+RetryCount) * Timeout
Timeout time.Duration
// optional, retry count. Default value is global config
RetryCount int
}
// RegistryOption is polaris option.
type RegistryOption func(o *registryOptions)
// Registry is polaris registry.
type Registry struct {
opt registryOptions
provider polaris.ProviderAPI
consumer polaris.ConsumerAPI
}
// WithRegistryServiceToken with ServiceToken option.
func WithRegistryServiceToken(serviceToken string) RegistryOption {
return func(o *registryOptions) { o.ServiceToken = serviceToken }
}
// WithRegistryWeight with Weight option.
func WithRegistryWeight(weight int) RegistryOption {
return func(o *registryOptions) { o.Weight = weight }
}
// WithRegistryHealthy with Healthy option.
func WithRegistryHealthy(healthy bool) RegistryOption {
return func(o *registryOptions) { o.Healthy = healthy }
}
// WithRegistryIsolate with Isolate option.
func WithRegistryIsolate(isolate bool) RegistryOption {
return func(o *registryOptions) { o.Isolate = isolate }
}
// WithRegistryTTL with TTL option.
func WithRegistryTTL(TTL int) RegistryOption
|
// WithRegistryTimeout with Timeout option.
func WithRegistryTimeout(timeout time.Duration) RegistryOption {
return func(o *registryOptions) { o.Timeout = timeout }
}
// WithRegistryRetryCount with RetryCount option.
func WithRegistryRetryCount(retryCount int) RegistryOption {
return func(o *registryOptions) { o.RetryCount = retryCount }
}
// Register the registration.
func (r *Registry) Register(_ context.Context, instance *registry.ServiceInstance) error {
id := uuid.NewString()
for _, endpoint := range instance.Endpoints {
u, err := url.Parse(endpoint)
if err != nil {
return err
}
host, port, err := net.SplitHostPort(u.Host)
if err != nil {
return err
}
portNum, err := strconv.Atoi(port)
if err != nil {
return err
}
// metadata
if instance.Metadata == nil {
instance.Metadata = make(map[string]string)
}
instance.Metadata["merge"] = id
if _, ok := instance.Metadata["weight"]; !ok {
instance.Metadata["weight"] = strconv.Itoa(r.opt.Weight)
}
weight, _ := strconv.Atoi(instance.Metadata["weight"])
_, err = r.provider.RegisterInstance(
&polaris.InstanceRegisterRequest{
InstanceRegisterRequest: model.InstanceRegisterRequest{
Service: instance.Name,
ServiceToken: r.opt.ServiceToken,
Namespace: r.opt.Namespace,
Host: host,
Port: portNum,
Protocol: &u.Scheme,
Weight: &weight,
Priority: &r.opt.Priority,
Version: &instance.Version,
Metadata: instance.Metadata,
Healthy: &r.opt.Healthy,
Isolate: &r.opt.Isolate,
TTL: &r.opt.TTL,
Timeout: &r.opt.Timeout,
RetryCount: &r.opt.RetryCount,
},
},
)
if err != nil {
return err
}
}
return nil
}
// Deregister the registration.
func (r *Registry) Deregister(_ context.Context, serviceInstance *registry.ServiceInstance) error {
for _, endpoint := range serviceInstance.Endpoints {
// get url
u, err := url.Parse(endpoint)
if err != nil {
return err
}
// get host and port
host, port, err := net.SplitHostPort(u.Host)
if err != nil {
return err
}
// port to int
portNum, err := strconv.Atoi(port)
if err != nil {
return err
}
// Deregister
err = r.provider.Deregister(
&polaris.InstanceDeRegisterRequest{
InstanceDeRegisterRequest: model.InstanceDeRegisterRequest{
Service: serviceInstance.Name,
ServiceToken: r.opt.ServiceToken,
Namespace: r.opt.Namespace,
Host: host,
Port: portNum,
Timeout: &r.opt.Timeout,
RetryCount: &r.opt.RetryCount,
},
},
)
if err != nil {
return err
}
}
return nil
}
// GetService return the service instances in memory according to the service name.
func (r *Registry) GetService(_ context.Context, serviceName string) ([]*registry.ServiceInstance, error) {
// get all instances
instancesResponse, err := r.consumer.GetInstances(&polaris.GetInstancesRequest{
GetInstancesRequest: model.GetInstancesRequest{
Service: serviceName,
Namespace: r.opt.Namespace,
Timeout: &r.opt.Timeout,
RetryCount: &r.opt.RetryCount,
SkipRouteFilter: true,
},
})
if err != nil {
return nil, err
}
serviceInstances := instancesToServiceInstances(merge(instancesResponse.GetInstances()))
return serviceInstances, nil
}
func merge(instances []model.Instance) map[string][]model.Instance {
m := make(map[string][]model.Instance)
for _, instance := range instances {
if v, ok := m[instance.GetMetadata()["merge"]]; ok {
m[instance.GetMetadata()["merge"]] = append(v, instance)
} else {
m[instance.GetMetadata()["merge"]] = []model.Instance{instance}
}
}
return m
}
// Watch creates a watcher according to the service name.
func (r *Registry) Watch(ctx context.Context, serviceName string) (registry.Watcher, error) {
return newWatcher(ctx, r.opt.Namespace, serviceName, r.consumer)
}
type Watcher struct {
ServiceName string
Namespace string
Ctx context.Context
Cancel context.CancelFunc
Channel <-chan model.SubScribeEvent
service *model.InstancesResponse
ServiceInstances map[string][]model.Instance
first bool
}
func newWatcher(ctx context.Context, namespace string, serviceName string, consumer polaris.ConsumerAPI) (*Watcher, error) {
watchServiceResponse, err := consumer.WatchService(&polaris.WatchServiceRequest{
WatchServiceRequest: model.WatchServiceRequest{
Key: model.ServiceKey{
Namespace: namespace,
Service: serviceName,
},
},
})
if err != nil {
return nil, err
}
w := &Watcher{
Namespace: namespace,
ServiceName: serviceName,
Channel: watchServiceResponse.EventChannel,
service: watchServiceResponse.GetAllInstancesResp,
ServiceInstances: merge(watchServiceResponse.GetAllInstancesResp.GetInstances()),
}
w.Ctx, w.Cancel = context.WithCancel(ctx)
return w, nil
}
// Next returns services in the following two cases:
// 1.the first time to watch and the service instance list is not empty.
// 2.any service instance changes found.
// if the above two conditions are not met, it will block until context deadline exceeded or canceled
func (w *Watcher) Next() ([]*registry.ServiceInstance, error) {
if !w.first {
w.first = true
if len(w.ServiceInstances) > 0 {
return instancesToServiceInstances(w.ServiceInstances), nil
}
}
select {
case <-w.Ctx.Done():
return nil, w.Ctx.Err()
case event := <-w.Channel:
if event.GetSubScribeEventType() == model.EventInstance {
// this always true, but we need to check it to make sure EventType not change
if instanceEvent, ok := event.(*model.InstanceEvent); ok {
// handle DeleteEvent
if instanceEvent.DeleteEvent != nil {
for _, instance := range instanceEvent.DeleteEvent.Instances {
delete(w.ServiceInstances, instance.GetMetadata()["merge"])
}
}
// handle UpdateEvent
if instanceEvent.UpdateEvent != nil {
for _, update := range instanceEvent.UpdateEvent.UpdateList {
if v, ok := w.ServiceInstances[update.After.GetMetadata()["merge"]]; ok {
var nv []model.Instance
m := map[string]model.Instance{}
for _, ins := range v {
m[ins.GetId()] = ins
}
m[update.After.GetId()] = update.After
for _, ins := range m {
if ins.IsHealthy() {
nv = append(nv, ins)
}
}
w.ServiceInstances[update.After.GetMetadata()["merge"]] = nv
if len(nv) == 0 {
delete(w.ServiceInstances, update.After.GetMetadata()["merge"])
}
} else {
if update.After.IsHealthy() {
w.ServiceInstances[update.After.GetMetadata()["merge"]] = []model.Instance{update.After}
}
}
}
}
// handle AddEvent
|
{
return func(o *registryOptions) { o.TTL = TTL }
}
|
identifier_body
|
registry.go
|
Instance) error {
id := uuid.NewString()
for _, endpoint := range instance.Endpoints {
u, err := url.Parse(endpoint)
if err != nil {
return err
}
host, port, err := net.SplitHostPort(u.Host)
if err != nil {
return err
}
portNum, err := strconv.Atoi(port)
if err != nil {
return err
}
// metadata
if instance.Metadata == nil {
instance.Metadata = make(map[string]string)
}
instance.Metadata["merge"] = id
if _, ok := instance.Metadata["weight"]; !ok {
instance.Metadata["weight"] = strconv.Itoa(r.opt.Weight)
}
weight, _ := strconv.Atoi(instance.Metadata["weight"])
_, err = r.provider.RegisterInstance(
&polaris.InstanceRegisterRequest{
InstanceRegisterRequest: model.InstanceRegisterRequest{
Service: instance.Name,
ServiceToken: r.opt.ServiceToken,
Namespace: r.opt.Namespace,
Host: host,
Port: portNum,
Protocol: &u.Scheme,
Weight: &weight,
Priority: &r.opt.Priority,
Version: &instance.Version,
Metadata: instance.Metadata,
Healthy: &r.opt.Healthy,
Isolate: &r.opt.Isolate,
TTL: &r.opt.TTL,
Timeout: &r.opt.Timeout,
RetryCount: &r.opt.RetryCount,
},
},
)
if err != nil {
return err
}
}
return nil
}
// Deregister the registration.
func (r *Registry) Deregister(_ context.Context, serviceInstance *registry.ServiceInstance) error {
for _, endpoint := range serviceInstance.Endpoints {
// get url
u, err := url.Parse(endpoint)
if err != nil {
return err
}
// get host and port
host, port, err := net.SplitHostPort(u.Host)
if err != nil {
return err
}
// port to int
portNum, err := strconv.Atoi(port)
if err != nil {
return err
}
// Deregister
err = r.provider.Deregister(
&polaris.InstanceDeRegisterRequest{
InstanceDeRegisterRequest: model.InstanceDeRegisterRequest{
Service: serviceInstance.Name,
ServiceToken: r.opt.ServiceToken,
Namespace: r.opt.Namespace,
Host: host,
Port: portNum,
Timeout: &r.opt.Timeout,
RetryCount: &r.opt.RetryCount,
},
},
)
if err != nil {
return err
}
}
return nil
}
// GetService return the service instances in memory according to the service name.
func (r *Registry) GetService(_ context.Context, serviceName string) ([]*registry.ServiceInstance, error) {
// get all instances
instancesResponse, err := r.consumer.GetInstances(&polaris.GetInstancesRequest{
GetInstancesRequest: model.GetInstancesRequest{
Service: serviceName,
Namespace: r.opt.Namespace,
Timeout: &r.opt.Timeout,
RetryCount: &r.opt.RetryCount,
SkipRouteFilter: true,
},
})
if err != nil {
return nil, err
}
serviceInstances := instancesToServiceInstances(merge(instancesResponse.GetInstances()))
return serviceInstances, nil
}
func merge(instances []model.Instance) map[string][]model.Instance {
m := make(map[string][]model.Instance)
for _, instance := range instances {
if v, ok := m[instance.GetMetadata()["merge"]]; ok {
m[instance.GetMetadata()["merge"]] = append(v, instance)
} else {
m[instance.GetMetadata()["merge"]] = []model.Instance{instance}
}
}
return m
}
// Watch creates a watcher according to the service name.
func (r *Registry) Watch(ctx context.Context, serviceName string) (registry.Watcher, error) {
return newWatcher(ctx, r.opt.Namespace, serviceName, r.consumer)
}
type Watcher struct {
ServiceName string
Namespace string
Ctx context.Context
Cancel context.CancelFunc
Channel <-chan model.SubScribeEvent
service *model.InstancesResponse
ServiceInstances map[string][]model.Instance
first bool
}
func newWatcher(ctx context.Context, namespace string, serviceName string, consumer polaris.ConsumerAPI) (*Watcher, error) {
watchServiceResponse, err := consumer.WatchService(&polaris.WatchServiceRequest{
WatchServiceRequest: model.WatchServiceRequest{
Key: model.ServiceKey{
Namespace: namespace,
Service: serviceName,
},
},
})
if err != nil {
return nil, err
}
w := &Watcher{
Namespace: namespace,
ServiceName: serviceName,
Channel: watchServiceResponse.EventChannel,
service: watchServiceResponse.GetAllInstancesResp,
ServiceInstances: merge(watchServiceResponse.GetAllInstancesResp.GetInstances()),
}
w.Ctx, w.Cancel = context.WithCancel(ctx)
return w, nil
}
// Next returns services in the following two cases:
// 1.the first time to watch and the service instance list is not empty.
// 2.any service instance changes found.
// if the above two conditions are not met, it will block until context deadline exceeded or canceled
func (w *Watcher) Next() ([]*registry.ServiceInstance, error) {
if !w.first {
w.first = true
if len(w.ServiceInstances) > 0 {
return instancesToServiceInstances(w.ServiceInstances), nil
}
}
select {
case <-w.Ctx.Done():
return nil, w.Ctx.Err()
case event := <-w.Channel:
if event.GetSubScribeEventType() == model.EventInstance {
// this always true, but we need to check it to make sure EventType not change
if instanceEvent, ok := event.(*model.InstanceEvent); ok {
// handle DeleteEvent
if instanceEvent.DeleteEvent != nil {
for _, instance := range instanceEvent.DeleteEvent.Instances {
delete(w.ServiceInstances, instance.GetMetadata()["merge"])
}
}
// handle UpdateEvent
if instanceEvent.UpdateEvent != nil {
for _, update := range instanceEvent.UpdateEvent.UpdateList {
if v, ok := w.ServiceInstances[update.After.GetMetadata()["merge"]]; ok {
var nv []model.Instance
m := map[string]model.Instance{}
for _, ins := range v {
m[ins.GetId()] = ins
}
m[update.After.GetId()] = update.After
for _, ins := range m {
if ins.IsHealthy() {
nv = append(nv, ins)
}
}
w.ServiceInstances[update.After.GetMetadata()["merge"]] = nv
if len(nv) == 0 {
delete(w.ServiceInstances, update.After.GetMetadata()["merge"])
}
} else {
if update.After.IsHealthy() {
w.ServiceInstances[update.After.GetMetadata()["merge"]] = []model.Instance{update.After}
}
}
}
}
// handle AddEvent
if instanceEvent.AddEvent != nil {
for _, instance := range instanceEvent.AddEvent.Instances {
if v, ok := w.ServiceInstances[instance.GetMetadata()["merge"]]; ok {
var nv []model.Instance
m := map[string]model.Instance{}
for _, ins := range v {
m[ins.GetId()] = ins
}
m[instance.GetId()] = instance
for _, ins := range m {
if ins.IsHealthy() {
nv = append(nv, ins)
}
}
if len(nv) != 0 {
w.ServiceInstances[instance.GetMetadata()["merge"]] = nv
}
} else {
if instance.IsHealthy() {
w.ServiceInstances[instance.GetMetadata()["merge"]] = []model.Instance{instance}
}
}
}
}
}
return instancesToServiceInstances(w.ServiceInstances), nil
}
}
return instancesToServiceInstances(w.ServiceInstances), nil
}
// Stop close the watcher.
func (w *Watcher) Stop() error {
w.Cancel()
return nil
}
func instancesToServiceInstances(instances map[string][]model.Instance) []*registry.ServiceInstance {
serviceInstances := make([]*registry.ServiceInstance, 0, len(instances))
for _, inss := range instances
|
{
if len(inss) == 0 {
continue
}
ins := ®istry.ServiceInstance{
ID: inss[0].GetId(),
Name: inss[0].GetService(),
Version: inss[0].GetVersion(),
Metadata: inss[0].GetMetadata(),
}
for _, item := range inss {
if item.IsHealthy() {
ins.Endpoints = append(ins.Endpoints, fmt.Sprintf("%s://%s:%d", item.GetProtocol(), item.GetHost(), item.GetPort()))
}
}
if len(ins.Endpoints) != 0 {
serviceInstances = append(serviceInstances, ins)
}
}
|
conditional_block
|
|
device_route.go
|
*DeviceRoutes) AddPort(ctx context.Context, lp *voltha.LogicalPort, lps []*voltha.LogicalPort) error {
logger.Debugw("add-port-to-routes", log.Fields{"port": lp, "len-logical-ports": len(lps)})
dr.routeBuildLock.Lock()
if len(dr.Routes) == 0 {
dr.routeBuildLock.Unlock()
return dr.ComputeRoutes(ctx, lps)
}
// A set of routes exists
if err := dr.addPortAndVerifyPrecondition(lp); err != nil {
dr.reset()
dr.routeBuildLock.Unlock()
return err
}
defer dr.routeBuildLock.Unlock()
// Update the set of root ports, if applicable
if lp.RootPort {
dr.RootPorts[lp.OfpPort.PortNo] = lp.OfpPort.PortNo
}
var copyFromNNIPort *voltha.LogicalPort
// Setup the physical ports to logical ports map
nniPorts := make([]*voltha.LogicalPort, 0)
for _, lport := range dr.logicalPorts {
if lport.RootPort {
nniPorts = append(nniPorts, lport)
if copyFromNNIPort == nil && lport.OfpPort.PortNo != lp.OfpPort.PortNo {
copyFromNNIPort = lport
}
}
}
if copyFromNNIPort == nil {
// Trying to add the same NNI port. Just return
return nil
}
// Adding NNI Port? If we are here we already have an NNI port with a set of routes. Just copy the existing
// routes using an existing NNI port
if lp.RootPort {
dr.copyFromExistingNNIRoutes(lp, copyFromNNIPort)
return nil
}
// Adding a UNI port
for _, nniPort := range nniPorts {
childPonPorts := dr.getDevicePonPorts(lp.DeviceId, nniPort.DeviceId)
if len(childPonPorts) == 0 || len(childPonPorts[0].Peers) == 0 {
// Ports may not have been cached yet - get the device info which sets the PON port cache
if _, err := dr.getDevice(ctx, lp.DeviceId); err != nil {
dr.reset()
return err
}
childPonPorts = dr.getDevicePonPorts(lp.DeviceId, nniPort.DeviceId)
if len(childPonPorts) == 0 || len(childPonPorts[0].Peers) == 0 {
dr.reset()
return status.Errorf(codes.FailedPrecondition, "no-pon-ports-%s", lp.DeviceId)
}
}
// We use the first PON port on the child device
childPonPort := childPonPorts[0]
dr.Routes[PathID{Ingress: nniPort.OfpPort.PortNo, Egress: lp.OfpPort.PortNo}] = []Hop{
{DeviceID: nniPort.DeviceId, Ingress: nniPort.DevicePortNo, Egress: childPonPort.Peers[0].PortNo},
{DeviceID: lp.DeviceId, Ingress: childPonPort.PortNo, Egress: lp.DevicePortNo},
}
dr.Routes[PathID{Ingress: lp.OfpPort.PortNo, Egress: nniPort.OfpPort.PortNo}] = getReverseRoute(
dr.Routes[PathID{Ingress: nniPort.OfpPort.PortNo, Egress: lp.OfpPort.PortNo}])
}
return nil
}
// Print prints routes
func (dr *DeviceRoutes) Print() error {
logger.Debugw("Print", log.Fields{"logical-device-id": dr.logicalDeviceID, "logical-ports": dr.logicalPorts})
if logger.V(log.DebugLevel) {
output := ""
routeNumber := 1
for k, v := range dr.Routes {
key := fmt.Sprintf("LP:%d->LP:%d", k.Ingress, k.Egress)
val := ""
for _, i := range v {
val += fmt.Sprintf("{%d->%s->%d},", i.Ingress, i.DeviceID, i.Egress)
}
val = val[:len(val)-1]
output += fmt.Sprintf("%d:{%s=>%s} ", routeNumber, key, fmt.Sprintf("[%s]", val))
routeNumber++
}
if len(dr.Routes) == 0 {
logger.Debugw("no-routes-found", log.Fields{"logical-device-id": dr.logicalDeviceID})
} else {
logger.Debugw("graph_routes", log.Fields{"lDeviceId": dr.logicalDeviceID, "Routes": output})
}
}
return nil
}
// IsUpToDate returns true if device is up to date
func (dr *DeviceRoutes) IsUpToDate(ld *voltha.LogicalDevice) bool {
dr.routeBuildLock.Lock()
defer dr.routeBuildLock.Unlock()
numNNI, numUNI := 0, 0
if ld != nil {
if len(dr.logicalPorts) != len(ld.Ports) {
return false
}
numNNI = len(dr.RootPorts)
numUNI = len(ld.Ports) - numNNI
}
return len(dr.Routes) == numNNI*numUNI*2
}
// getDevicePonPorts returns all the PON ports of a device whose peer device ID is peerDeviceID
func (dr *DeviceRoutes) getDevicePonPorts(deviceID string, peerDeviceID string) []*voltha.Port {
dr.devicesPonPortsLock.RLock()
defer dr.devicesPonPortsLock.RUnlock()
ponPorts := make([]*voltha.Port, 0)
ports, exist := dr.devicesPonPorts[deviceID]
if !exist {
return ponPorts
}
//fmt.Println("getDevicePonPorts", deviceID, peerDeviceID, ports)
for _, port := range ports {
for _, peer := range port.Peers {
if peer.DeviceId == peerDeviceID {
ponPorts = append(ponPorts, port)
}
}
}
return ponPorts
}
//getDevice returns the from the model and updates the PON ports map of that device.
func (dr *DeviceRoutes) getDevice(ctx context.Context, deviceID string) (*voltha.Device, error) {
device, err := dr.getDeviceFromModel(ctx, deviceID)
if err != nil {
logger.Errorw("device-not-found", log.Fields{"deviceId": deviceID, "error": err})
return nil, err
}
dr.devicesPonPortsLock.Lock()
defer dr.devicesPonPortsLock.Unlock()
for _, port := range device.Ports {
if port.Type == voltha.Port_PON_ONU || port.Type == voltha.Port_PON_OLT {
dr.devicesPonPorts[device.Id] = append(dr.devicesPonPorts[device.Id], port)
}
}
return device, nil
}
//copyFromExistingNNIRoutes copies routes from an existing set of NNI routes
func (dr *DeviceRoutes) copyFromExistingNNIRoutes(newNNIPort *voltha.LogicalPort, copyFromNNIPort *voltha.LogicalPort) {
updatedRoutes := make(map[PathID][]Hop)
for key, val := range dr.Routes {
if key.Ingress == copyFromNNIPort.OfpPort.PortNo {
updatedRoutes[PathID{Ingress: newNNIPort.OfpPort.PortNo, Egress: key.Egress}] = []Hop{
{DeviceID: newNNIPort.DeviceId, Ingress: newNNIPort.DevicePortNo, Egress: val[0].Egress},
val[1],
}
}
if key.Egress == copyFromNNIPort.OfpPort.PortNo {
updatedRoutes[PathID{Ingress: key.Ingress, Egress: newNNIPort.OfpPort.PortNo}] = []Hop{
val[0],
{DeviceID: newNNIPort.DeviceId, Ingress: val[1].Ingress, Egress: newNNIPort.DevicePortNo},
}
}
updatedRoutes[key] = val
}
dr.Routes = updatedRoutes
}
// reset cleans up the device graph
func (dr *DeviceRoutes) reset() {
dr.rootPortsLock.Lock()
dr.RootPorts = make(map[uint32]uint32)
dr.rootPortsLock.Unlock()
// Do not numGetDeviceCalledLock Routes, logicalPorts as the callee function already holds its numGetDeviceCalledLock.
dr.Routes = make(map[PathID][]Hop)
dr.logicalPorts = make([]*voltha.LogicalPort, 0)
dr.devicesPonPortsLock.Lock()
dr.devicesPonPorts = make(map[string][]*voltha.Port)
dr.devicesPonPortsLock.Unlock()
}
//concatDeviceIdPortId formats a portid using the device id and the port number
func concatDeviceIDPortID(deviceID string, portNo uint32) string {
return fmt.Sprintf("%s:%d", deviceID, portNo)
}
//getReverseRoute returns the reverse of the route
func
|
getReverseRoute
|
identifier_name
|
|
device_route.go
|
ni-ports-combination")
}
return nil
}
// AddPort augments the current set of routes with new routes corresponding to the logical port "lp". If the routes have
// not been built yet then use logical port "lps" to compute all current routes (lps includes lp)
func (dr *DeviceRoutes) AddPort(ctx context.Context, lp *voltha.LogicalPort, lps []*voltha.LogicalPort) error {
logger.Debugw("add-port-to-routes", log.Fields{"port": lp, "len-logical-ports": len(lps)})
dr.routeBuildLock.Lock()
if len(dr.Routes) == 0 {
dr.routeBuildLock.Unlock()
return dr.ComputeRoutes(ctx, lps)
}
// A set of routes exists
if err := dr.addPortAndVerifyPrecondition(lp); err != nil {
dr.reset()
dr.routeBuildLock.Unlock()
return err
}
defer dr.routeBuildLock.Unlock()
// Update the set of root ports, if applicable
if lp.RootPort {
dr.RootPorts[lp.OfpPort.PortNo] = lp.OfpPort.PortNo
}
var copyFromNNIPort *voltha.LogicalPort
// Setup the physical ports to logical ports map
nniPorts := make([]*voltha.LogicalPort, 0)
for _, lport := range dr.logicalPorts {
if lport.RootPort {
nniPorts = append(nniPorts, lport)
if copyFromNNIPort == nil && lport.OfpPort.PortNo != lp.OfpPort.PortNo {
copyFromNNIPort = lport
}
}
}
if copyFromNNIPort == nil {
// Trying to add the same NNI port. Just return
return nil
}
// Adding NNI Port? If we are here we already have an NNI port with a set of routes. Just copy the existing
// routes using an existing NNI port
if lp.RootPort {
dr.copyFromExistingNNIRoutes(lp, copyFromNNIPort)
return nil
}
// Adding a UNI port
for _, nniPort := range nniPorts {
childPonPorts := dr.getDevicePonPorts(lp.DeviceId, nniPort.DeviceId)
if len(childPonPorts) == 0 || len(childPonPorts[0].Peers) == 0 {
// Ports may not have been cached yet - get the device info which sets the PON port cache
if _, err := dr.getDevice(ctx, lp.DeviceId); err != nil {
dr.reset()
return err
}
childPonPorts = dr.getDevicePonPorts(lp.DeviceId, nniPort.DeviceId)
if len(childPonPorts) == 0 || len(childPonPorts[0].Peers) == 0 {
dr.reset()
return status.Errorf(codes.FailedPrecondition, "no-pon-ports-%s", lp.DeviceId)
}
}
// We use the first PON port on the child device
childPonPort := childPonPorts[0]
dr.Routes[PathID{Ingress: nniPort.OfpPort.PortNo, Egress: lp.OfpPort.PortNo}] = []Hop{
{DeviceID: nniPort.DeviceId, Ingress: nniPort.DevicePortNo, Egress: childPonPort.Peers[0].PortNo},
{DeviceID: lp.DeviceId, Ingress: childPonPort.PortNo, Egress: lp.DevicePortNo},
}
dr.Routes[PathID{Ingress: lp.OfpPort.PortNo, Egress: nniPort.OfpPort.PortNo}] = getReverseRoute(
dr.Routes[PathID{Ingress: nniPort.OfpPort.PortNo, Egress: lp.OfpPort.PortNo}])
}
return nil
}
// Print prints routes
func (dr *DeviceRoutes) Print() error {
logger.Debugw("Print", log.Fields{"logical-device-id": dr.logicalDeviceID, "logical-ports": dr.logicalPorts})
if logger.V(log.DebugLevel) {
output := ""
routeNumber := 1
for k, v := range dr.Routes {
key := fmt.Sprintf("LP:%d->LP:%d", k.Ingress, k.Egress)
val := ""
for _, i := range v {
val += fmt.Sprintf("{%d->%s->%d},", i.Ingress, i.DeviceID, i.Egress)
}
val = val[:len(val)-1]
output += fmt.Sprintf("%d:{%s=>%s} ", routeNumber, key, fmt.Sprintf("[%s]", val))
routeNumber++
}
if len(dr.Routes) == 0 {
logger.Debugw("no-routes-found", log.Fields{"logical-device-id": dr.logicalDeviceID})
} else {
logger.Debugw("graph_routes", log.Fields{"lDeviceId": dr.logicalDeviceID, "Routes": output})
}
}
return nil
}
// IsUpToDate returns true if device is up to date
func (dr *DeviceRoutes) IsUpToDate(ld *voltha.LogicalDevice) bool {
dr.routeBuildLock.Lock()
defer dr.routeBuildLock.Unlock()
numNNI, numUNI := 0, 0
if ld != nil {
if len(dr.logicalPorts) != len(ld.Ports) {
return false
}
numNNI = len(dr.RootPorts)
numUNI = len(ld.Ports) - numNNI
}
return len(dr.Routes) == numNNI*numUNI*2
}
// getDevicePonPorts returns all the PON ports of a device whose peer device ID is peerDeviceID
func (dr *DeviceRoutes) getDevicePonPorts(deviceID string, peerDeviceID string) []*voltha.Port {
dr.devicesPonPortsLock.RLock()
defer dr.devicesPonPortsLock.RUnlock()
ponPorts := make([]*voltha.Port, 0)
ports, exist := dr.devicesPonPorts[deviceID]
if !exist {
return ponPorts
}
//fmt.Println("getDevicePonPorts", deviceID, peerDeviceID, ports)
for _, port := range ports {
for _, peer := range port.Peers {
if peer.DeviceId == peerDeviceID {
ponPorts = append(ponPorts, port)
}
}
}
return ponPorts
}
//getDevice returns the from the model and updates the PON ports map of that device.
func (dr *DeviceRoutes) getDevice(ctx context.Context, deviceID string) (*voltha.Device, error) {
device, err := dr.getDeviceFromModel(ctx, deviceID)
if err != nil {
logger.Errorw("device-not-found", log.Fields{"deviceId": deviceID, "error": err})
return nil, err
}
dr.devicesPonPortsLock.Lock()
defer dr.devicesPonPortsLock.Unlock()
for _, port := range device.Ports {
if port.Type == voltha.Port_PON_ONU || port.Type == voltha.Port_PON_OLT {
dr.devicesPonPorts[device.Id] = append(dr.devicesPonPorts[device.Id], port)
}
}
return device, nil
}
//copyFromExistingNNIRoutes copies routes from an existing set of NNI routes
func (dr *DeviceRoutes) copyFromExistingNNIRoutes(newNNIPort *voltha.LogicalPort, copyFromNNIPort *voltha.LogicalPort) {
updatedRoutes := make(map[PathID][]Hop)
for key, val := range dr.Routes {
if key.Ingress == copyFromNNIPort.OfpPort.PortNo {
updatedRoutes[PathID{Ingress: newNNIPort.OfpPort.PortNo, Egress: key.Egress}] = []Hop{
{DeviceID: newNNIPort.DeviceId, Ingress: newNNIPort.DevicePortNo, Egress: val[0].Egress},
val[1],
}
}
if key.Egress == copyFromNNIPort.OfpPort.PortNo {
updatedRoutes[PathID{Ingress: key.Ingress, Egress: newNNIPort.OfpPort.PortNo}] = []Hop{
val[0],
{DeviceID: newNNIPort.DeviceId, Ingress: val[1].Ingress, Egress: newNNIPort.DevicePortNo},
}
}
updatedRoutes[key] = val
}
dr.Routes = updatedRoutes
}
// reset cleans up the device graph
func (dr *DeviceRoutes) reset()
|
{
dr.rootPortsLock.Lock()
dr.RootPorts = make(map[uint32]uint32)
dr.rootPortsLock.Unlock()
// Do not numGetDeviceCalledLock Routes, logicalPorts as the callee function already holds its numGetDeviceCalledLock.
dr.Routes = make(map[PathID][]Hop)
dr.logicalPorts = make([]*voltha.LogicalPort, 0)
dr.devicesPonPortsLock.Lock()
dr.devicesPonPorts = make(map[string][]*voltha.Port)
dr.devicesPonPortsLock.Unlock()
}
|
identifier_body
|
|
device_route.go
|
routes between the logical ports. This will clear up any existing route
func (dr *DeviceRoutes) ComputeRoutes(ctx context.Context, lps []*voltha.LogicalPort) error {
dr.routeBuildLock.Lock()
defer dr.routeBuildLock.Unlock()
logger.Debugw("computing-all-routes", log.Fields{"len-logical-ports": len(lps)})
var err error
defer func() {
// On error, clear the routes - any flow request or a port add/delete will trigger the rebuild
if err != nil {
dr.reset()
}
}()
if len(lps) < 2 {
return status.Error(codes.FailedPrecondition, "not-enough-logical-ports")
}
dr.reset()
dr.logicalPorts = append(dr.logicalPorts, lps...)
// Setup the physical ports to logical ports map, the nni ports as well as the root ports map
physPortToLogicalPortMap := make(map[string]uint32)
nniPorts := make([]*voltha.LogicalPort, 0)
for _, lp := range lps {
physPortToLogicalPortMap[concatDeviceIDPortID(lp.DeviceId, lp.DevicePortNo)] = lp.OfpPort.PortNo
if lp.RootPort {
nniPorts = append(nniPorts, lp)
dr.RootPorts[lp.OfpPort.PortNo] = lp.OfpPort.PortNo
}
}
if len(nniPorts) == 0 {
err = status.Error(codes.FailedPrecondition, "no nni port")
return err
}
var rootDevice *voltha.Device
var childDevice *voltha.Device
var copyFromNNIPort *voltha.LogicalPort
for idx, nniPort := range nniPorts {
if idx == 0 {
copyFromNNIPort = nniPort
} else if len(dr.Routes) > 0 {
dr.copyFromExistingNNIRoutes(nniPort, copyFromNNIPort)
return nil
}
// Get root device
rootDevice, err = dr.getDevice(ctx, nniPort.DeviceId)
if err != nil {
return err
}
if len(rootDevice.Ports) == 0 {
err = status.Errorf(codes.FailedPrecondition, "no-port-%s", rootDevice.Id)
return err
}
for _, rootDevicePort := range rootDevice.Ports {
if rootDevicePort.Type == voltha.Port_PON_OLT {
logger.Debugw("peers", log.Fields{"root-device-id": rootDevice.Id, "port-no": rootDevicePort.PortNo, "len-peers": len(rootDevicePort.Peers)})
for _, rootDevicePeer := range rootDevicePort.Peers {
childDevice, err = dr.getDevice(ctx, rootDevicePeer.DeviceId)
if err != nil {
return err
}
childPonPorts := dr.getDevicePonPorts(childDevice.Id, nniPort.DeviceId)
if len(childPonPorts) < 1 {
err = status.Errorf(codes.Aborted, "no-child-pon-port-%s", childDevice.Id)
return err
}
// We use the first PON port on the ONU whose parent is the root device.
childPonPort := childPonPorts[0].PortNo
for _, childDevicePort := range childDevice.Ports {
if childDevicePort.Type == voltha.Port_ETHERNET_UNI {
childLogicalPort, exist := physPortToLogicalPortMap[concatDeviceIDPortID(childDevice.Id, childDevicePort.PortNo)]
if !exist {
// This can happen if this logical port has not been created yet for that device
continue
}
dr.Routes[PathID{Ingress: nniPort.OfpPort.PortNo, Egress: childLogicalPort}] = []Hop{
{DeviceID: rootDevice.Id, Ingress: nniPort.DevicePortNo, Egress: rootDevicePort.PortNo},
{DeviceID: childDevice.Id, Ingress: childPonPort, Egress: childDevicePort.PortNo},
}
dr.Routes[PathID{Ingress: childLogicalPort, Egress: nniPort.OfpPort.PortNo}] = getReverseRoute(
dr.Routes[PathID{Ingress: nniPort.OfpPort.PortNo, Egress: childLogicalPort}])
}
}
}
}
}
}
return nil
}
// verifyPrecondition verify whether the preconditions are met to proceed with addition of the new logical port
func (dr *DeviceRoutes) addPortAndVerifyPrecondition(lp *voltha.LogicalPort) error {
var exist, nniLogicalPortExist, uniLogicalPortExist bool
for _, existingLogicalPort := range dr.logicalPorts {
nniLogicalPortExist = nniLogicalPortExist || existingLogicalPort.RootPort
uniLogicalPortExist = uniLogicalPortExist || !existingLogicalPort.RootPort
exist = exist || existingLogicalPort.OfpPort.PortNo == lp.OfpPort.PortNo
if nniLogicalPortExist && uniLogicalPortExist && exist {
break
}
}
if !exist {
dr.logicalPorts = append(dr.logicalPorts, lp)
nniLogicalPortExist = nniLogicalPortExist || lp.RootPort
uniLogicalPortExist = uniLogicalPortExist || !lp.RootPort
}
// If we do not have both NNI and UNI ports then return an error
if !(nniLogicalPortExist && uniLogicalPortExist) {
fmt.Println("errors", nniLogicalPortExist, uniLogicalPortExist)
return status.Error(codes.FailedPrecondition, "no-uni-and-nni-ports-combination")
}
return nil
}
// AddPort augments the current set of routes with new routes corresponding to the logical port "lp". If the routes have
// not been built yet then use logical port "lps" to compute all current routes (lps includes lp)
func (dr *DeviceRoutes) AddPort(ctx context.Context, lp *voltha.LogicalPort, lps []*voltha.LogicalPort) error {
logger.Debugw("add-port-to-routes", log.Fields{"port": lp, "len-logical-ports": len(lps)})
dr.routeBuildLock.Lock()
if len(dr.Routes) == 0 {
dr.routeBuildLock.Unlock()
return dr.ComputeRoutes(ctx, lps)
}
// A set of routes exists
if err := dr.addPortAndVerifyPrecondition(lp); err != nil {
dr.reset()
dr.routeBuildLock.Unlock()
return err
}
defer dr.routeBuildLock.Unlock()
// Update the set of root ports, if applicable
if lp.RootPort {
dr.RootPorts[lp.OfpPort.PortNo] = lp.OfpPort.PortNo
}
var copyFromNNIPort *voltha.LogicalPort
// Setup the physical ports to logical ports map
nniPorts := make([]*voltha.LogicalPort, 0)
for _, lport := range dr.logicalPorts {
if lport.RootPort {
nniPorts = append(nniPorts, lport)
if copyFromNNIPort == nil && lport.OfpPort.PortNo != lp.OfpPort.PortNo
|
}
}
if copyFromNNIPort == nil {
// Trying to add the same NNI port. Just return
return nil
}
// Adding NNI Port? If we are here we already have an NNI port with a set of routes. Just copy the existing
// routes using an existing NNI port
if lp.RootPort {
dr.copyFromExistingNNIRoutes(lp, copyFromNNIPort)
return nil
}
// Adding a UNI port
for _, nniPort := range nniPorts {
childPonPorts := dr.getDevicePonPorts(lp.DeviceId, nniPort.DeviceId)
if len(childPonPorts) == 0 || len(childPonPorts[0].Peers) == 0 {
// Ports may not have been cached yet - get the device info which sets the PON port cache
if _, err := dr.getDevice(ctx, lp.DeviceId); err != nil {
dr.reset()
return err
}
childPonPorts = dr.getDevicePonPorts(lp.DeviceId, nniPort.DeviceId)
if len(childPonPorts) == 0 || len(childPonPorts[0].Peers) == 0 {
dr.reset()
return status.Errorf(codes.FailedPrecondition, "no-pon-ports-%s", lp.DeviceId)
}
}
// We use the first PON port on the child device
childPonPort := childPonPorts[0]
dr.Routes[PathID{Ingress: nniPort.OfpPort.PortNo, Egress: lp.OfpPort.PortNo}] = []Hop{
{DeviceID: nniPort.DeviceId, Ingress: nniPort.DevicePortNo, Egress: childPonPort.Peers[0].PortNo},
{
|
{
copyFromNNIPort = lport
}
|
conditional_block
|
device_route.go
|
the routes between the logical ports. This will clear up any existing route
func (dr *DeviceRoutes) ComputeRoutes(ctx context.Context, lps []*voltha.LogicalPort) error {
dr.routeBuildLock.Lock()
defer dr.routeBuildLock.Unlock()
logger.Debugw("computing-all-routes", log.Fields{"len-logical-ports": len(lps)})
var err error
defer func() {
// On error, clear the routes - any flow request or a port add/delete will trigger the rebuild
if err != nil {
dr.reset()
}
}()
if len(lps) < 2 {
return status.Error(codes.FailedPrecondition, "not-enough-logical-ports")
}
dr.reset()
dr.logicalPorts = append(dr.logicalPorts, lps...)
// Setup the physical ports to logical ports map, the nni ports as well as the root ports map
physPortToLogicalPortMap := make(map[string]uint32)
nniPorts := make([]*voltha.LogicalPort, 0)
for _, lp := range lps {
physPortToLogicalPortMap[concatDeviceIDPortID(lp.DeviceId, lp.DevicePortNo)] = lp.OfpPort.PortNo
if lp.RootPort {
nniPorts = append(nniPorts, lp)
dr.RootPorts[lp.OfpPort.PortNo] = lp.OfpPort.PortNo
}
}
if len(nniPorts) == 0 {
err = status.Error(codes.FailedPrecondition, "no nni port")
return err
}
var rootDevice *voltha.Device
var childDevice *voltha.Device
var copyFromNNIPort *voltha.LogicalPort
for idx, nniPort := range nniPorts {
if idx == 0 {
copyFromNNIPort = nniPort
} else if len(dr.Routes) > 0 {
dr.copyFromExistingNNIRoutes(nniPort, copyFromNNIPort)
return nil
}
// Get root device
rootDevice, err = dr.getDevice(ctx, nniPort.DeviceId)
if err != nil {
return err
}
if len(rootDevice.Ports) == 0 {
err = status.Errorf(codes.FailedPrecondition, "no-port-%s", rootDevice.Id)
return err
}
for _, rootDevicePort := range rootDevice.Ports {
if rootDevicePort.Type == voltha.Port_PON_OLT {
logger.Debugw("peers", log.Fields{"root-device-id": rootDevice.Id, "port-no": rootDevicePort.PortNo, "len-peers": len(rootDevicePort.Peers)})
for _, rootDevicePeer := range rootDevicePort.Peers {
childDevice, err = dr.getDevice(ctx, rootDevicePeer.DeviceId)
if err != nil {
return err
}
childPonPorts := dr.getDevicePonPorts(childDevice.Id, nniPort.DeviceId)
if len(childPonPorts) < 1 {
err = status.Errorf(codes.Aborted, "no-child-pon-port-%s", childDevice.Id)
return err
}
// We use the first PON port on the ONU whose parent is the root device.
childPonPort := childPonPorts[0].PortNo
for _, childDevicePort := range childDevice.Ports {
if childDevicePort.Type == voltha.Port_ETHERNET_UNI {
childLogicalPort, exist := physPortToLogicalPortMap[concatDeviceIDPortID(childDevice.Id, childDevicePort.PortNo)]
if !exist {
// This can happen if this logical port has not been created yet for that device
continue
}
dr.Routes[PathID{Ingress: nniPort.OfpPort.PortNo, Egress: childLogicalPort}] = []Hop{
{DeviceID: rootDevice.Id, Ingress: nniPort.DevicePortNo, Egress: rootDevicePort.PortNo},
{DeviceID: childDevice.Id, Ingress: childPonPort, Egress: childDevicePort.PortNo},
}
dr.Routes[PathID{Ingress: childLogicalPort, Egress: nniPort.OfpPort.PortNo}] = getReverseRoute(
dr.Routes[PathID{Ingress: nniPort.OfpPort.PortNo, Egress: childLogicalPort}])
}
}
}
}
}
}
return nil
}
// verifyPrecondition verify whether the preconditions are met to proceed with addition of the new logical port
func (dr *DeviceRoutes) addPortAndVerifyPrecondition(lp *voltha.LogicalPort) error {
var exist, nniLogicalPortExist, uniLogicalPortExist bool
for _, existingLogicalPort := range dr.logicalPorts {
nniLogicalPortExist = nniLogicalPortExist || existingLogicalPort.RootPort
uniLogicalPortExist = uniLogicalPortExist || !existingLogicalPort.RootPort
exist = exist || existingLogicalPort.OfpPort.PortNo == lp.OfpPort.PortNo
if nniLogicalPortExist && uniLogicalPortExist && exist {
|
dr.logicalPorts = append(dr.logicalPorts, lp)
nniLogicalPortExist = nniLogicalPortExist || lp.RootPort
uniLogicalPortExist = uniLogicalPortExist || !lp.RootPort
}
// If we do not have both NNI and UNI ports then return an error
if !(nniLogicalPortExist && uniLogicalPortExist) {
fmt.Println("errors", nniLogicalPortExist, uniLogicalPortExist)
return status.Error(codes.FailedPrecondition, "no-uni-and-nni-ports-combination")
}
return nil
}
// AddPort augments the current set of routes with new routes corresponding to the logical port "lp". If the routes have
// not been built yet then use logical port "lps" to compute all current routes (lps includes lp)
func (dr *DeviceRoutes) AddPort(ctx context.Context, lp *voltha.LogicalPort, lps []*voltha.LogicalPort) error {
logger.Debugw("add-port-to-routes", log.Fields{"port": lp, "len-logical-ports": len(lps)})
dr.routeBuildLock.Lock()
if len(dr.Routes) == 0 {
dr.routeBuildLock.Unlock()
return dr.ComputeRoutes(ctx, lps)
}
// A set of routes exists
if err := dr.addPortAndVerifyPrecondition(lp); err != nil {
dr.reset()
dr.routeBuildLock.Unlock()
return err
}
defer dr.routeBuildLock.Unlock()
// Update the set of root ports, if applicable
if lp.RootPort {
dr.RootPorts[lp.OfpPort.PortNo] = lp.OfpPort.PortNo
}
var copyFromNNIPort *voltha.LogicalPort
// Setup the physical ports to logical ports map
nniPorts := make([]*voltha.LogicalPort, 0)
for _, lport := range dr.logicalPorts {
if lport.RootPort {
nniPorts = append(nniPorts, lport)
if copyFromNNIPort == nil && lport.OfpPort.PortNo != lp.OfpPort.PortNo {
copyFromNNIPort = lport
}
}
}
if copyFromNNIPort == nil {
// Trying to add the same NNI port. Just return
return nil
}
// Adding NNI Port? If we are here we already have an NNI port with a set of routes. Just copy the existing
// routes using an existing NNI port
if lp.RootPort {
dr.copyFromExistingNNIRoutes(lp, copyFromNNIPort)
return nil
}
// Adding a UNI port
for _, nniPort := range nniPorts {
childPonPorts := dr.getDevicePonPorts(lp.DeviceId, nniPort.DeviceId)
if len(childPonPorts) == 0 || len(childPonPorts[0].Peers) == 0 {
// Ports may not have been cached yet - get the device info which sets the PON port cache
if _, err := dr.getDevice(ctx, lp.DeviceId); err != nil {
dr.reset()
return err
}
childPonPorts = dr.getDevicePonPorts(lp.DeviceId, nniPort.DeviceId)
if len(childPonPorts) == 0 || len(childPonPorts[0].Peers) == 0 {
dr.reset()
return status.Errorf(codes.FailedPrecondition, "no-pon-ports-%s", lp.DeviceId)
}
}
// We use the first PON port on the child device
childPonPort := childPonPorts[0]
dr.Routes[PathID{Ingress: nniPort.OfpPort.PortNo, Egress: lp.OfpPort.PortNo}] = []Hop{
{DeviceID: nniPort.DeviceId, Ingress: nniPort.DevicePortNo, Egress: childPonPort.Peers[0].PortNo},
{Device
|
break
}
}
if !exist {
|
random_line_split
|
controller.go
|
ert-manager/pkg/metrics"
"github.com/jetstack/cert-manager/pkg/util"
utilfeature "github.com/jetstack/cert-manager/pkg/util/feature"
"github.com/jetstack/cert-manager/pkg/util/profiling"
)
const controllerAgentName = "cert-manager"
// This sets the informer's resync period to 10 hours
// following the controller-runtime defaults
//and following discussion: https://github.com/kubernetes-sigs/controller-runtime/pull/88#issuecomment-408500629
const resyncPeriod = 10 * time.Hour
func Run(opts *options.ControllerOptions, stopCh <-chan struct{}) error {
rootCtx := cmdutil.ContextWithStopCh(context.Background(), stopCh)
rootCtx, cancelContext := context.WithCancel(rootCtx)
defer cancelContext()
g, rootCtx := errgroup.WithContext(rootCtx)
rootCtx = logf.NewContext(rootCtx, nil, "controller")
log := logf.FromContext(rootCtx)
ctx, kubeCfg, err := buildControllerContext(rootCtx, opts)
if err != nil {
return fmt.Errorf("error building controller context (options %v): %v", opts, err)
}
enabledControllers := opts.EnabledControllers()
log.Info(fmt.Sprintf("enabled controllers: %s", enabledControllers.List()))
// Start metrics server
metricsLn, err := net.Listen("tcp", opts.MetricsListenAddress)
if err != nil {
return fmt.Errorf("failed to listen on prometheus address %s: %v", opts.MetricsListenAddress, err)
}
metricsServer := ctx.Metrics.NewServer(metricsLn)
g.Go(func() error {
<-rootCtx.Done()
// allow a timeout for graceful shutdown
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
defer cancel()
if err := metricsServer.Shutdown(ctx); err != nil {
return err
}
return nil
})
g.Go(func() error {
log.V(logf.InfoLevel).Info("starting metrics server", "address", metricsLn.Addr())
if err := metricsServer.Serve(metricsLn); err != http.ErrServerClosed {
return err
}
return nil
})
// Start profiler if it is enabled
if opts.EnablePprof {
profilerLn, err := net.Listen("tcp", opts.PprofAddress)
if err != nil {
return fmt.Errorf("failed to listen on profiler address %s: %v", opts.PprofAddress, err)
}
profilerMux := http.NewServeMux()
// Add pprof endpoints to this mux
profiling.Install(profilerMux)
profilerServer := &http.Server{
Handler: profilerMux,
}
g.Go(func() error {
<-rootCtx.Done()
// allow a timeout for graceful shutdown
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
defer cancel()
|
}
return nil
})
g.Go(func() error {
log.V(logf.InfoLevel).Info("starting profiler", "address", profilerLn.Addr())
if err := profilerServer.Serve(profilerLn); err != http.ErrServerClosed {
return err
}
return nil
})
}
elected := make(chan struct{})
if opts.LeaderElect {
g.Go(func() error {
log.V(logf.InfoLevel).Info("starting leader election")
leaderElectionClient, err := kubernetes.NewForConfig(rest.AddUserAgent(kubeCfg, "leader-election"))
if err != nil {
return fmt.Errorf("error creating leader election client: %v", err)
}
errorCh := make(chan error, 1)
if err := startLeaderElection(rootCtx, opts, leaderElectionClient, ctx.Recorder, leaderelection.LeaderCallbacks{
OnStartedLeading: func(_ context.Context) {
close(elected)
},
OnStoppedLeading: func() {
select {
case <-rootCtx.Done():
// context was canceled, just return
return
default:
errorCh <- errors.New("leader election lost")
}
},
}); err != nil {
return err
}
select {
case err := <-errorCh:
return err
default:
return nil
}
})
} else {
close(elected)
}
select {
case <-rootCtx.Done(): // Exit early if we are shutting down or if the errgroup has already exited with an error
// Wait for error group to complete and return
return g.Wait()
case <-elected: // Don't launch the controllers unless we have been elected leader
// Continue with setting up controller
}
for n, fn := range controller.Known() {
log := log.WithValues("controller", n)
// only run a controller if it's been enabled
if !enabledControllers.Has(n) {
log.V(logf.InfoLevel).Info("not starting controller as it's disabled")
continue
}
// don't run clusterissuers controller if scoped to a single namespace
if ctx.Namespace != "" && n == clusterissuers.ControllerName {
log.V(logf.InfoLevel).Info("not starting controller as cert-manager has been scoped to a single namespace")
continue
}
iface, err := fn(ctx)
if err != nil {
err = fmt.Errorf("error starting controller: %v", err)
cancelContext()
err2 := g.Wait() // Don't process errors, we already have an error
if err2 != nil {
return utilerrors.NewAggregate([]error{err, err2})
}
return err
}
g.Go(func() error {
log.V(logf.InfoLevel).Info("starting controller")
// TODO: make this either a constant or a command line flag
workers := 5
return iface.Run(workers, rootCtx.Done())
})
}
log.V(logf.DebugLevel).Info("starting shared informer factories")
ctx.SharedInformerFactory.Start(rootCtx.Done())
ctx.KubeSharedInformerFactory.Start(rootCtx.Done())
if utilfeature.DefaultFeatureGate.Enabled(feature.ExperimentalGatewayAPISupport) {
ctx.GWShared.Start(rootCtx.Done())
}
err = g.Wait()
if err != nil {
return fmt.Errorf("error starting controller: %v", err)
}
log.V(logf.InfoLevel).Info("control loops exited")
return nil
}
func buildControllerContext(ctx context.Context, opts *options.ControllerOptions) (*controller.Context, *rest.Config, error) {
log := logf.FromContext(ctx, "build-context")
// Load the users Kubernetes config
kubeCfg, err := clientcmd.BuildConfigFromFlags(opts.APIServerHost, opts.Kubeconfig)
if err != nil {
return nil, nil, fmt.Errorf("error creating rest config: %s", err.Error())
}
kubeCfg.QPS = opts.KubernetesAPIQPS
kubeCfg.Burst = opts.KubernetesAPIBurst
// Add User-Agent to client
kubeCfg = rest.AddUserAgent(kubeCfg, util.CertManagerUserAgent)
// Create a cert-manager api client
intcl, err := clientset.NewForConfig(kubeCfg)
if err != nil {
return nil, nil, fmt.Errorf("error creating internal group client: %s", err.Error())
}
// Create a Kubernetes api client
cl, err := kubernetes.NewForConfig(kubeCfg)
if err != nil {
return nil, nil, fmt.Errorf("error creating kubernetes client: %s", err.Error())
}
var gatewayAvailable bool
// Check if the Gateway API feature gate was enabled
if utilfeature.DefaultFeatureGate.Enabled(feature.ExperimentalGatewayAPISupport) {
// check if the gateway API CRDs are available. If they are not found return an error
// which will cause cert-manager to crashloopbackoff
d := cl.Discovery()
resources, err := d.ServerResourcesForGroupVersion(gwapi.GroupVersion.String())
var GatewayAPINotAvailable = "the Gateway API CRDs do not seem to be present, but " + feature.ExperimentalGatewayAPISupport +
" is set to true. Please install the gateway-api CRDs."
switch {
case apierrors.IsNotFound(err):
return nil, nil, fmt.Errorf("%s (%w)", GatewayAPINotAvailable, err)
case err != nil:
return nil, nil, fmt.Errorf("while checking if the Gateway API CRD is installed: %w", err)
case len(resources.APIResources) == 0:
return nil, nil, fmt.Errorf("%s (found %d APIResources in %s)", GatewayAPINotAvailable, len(resources.APIResources), gwapi.GroupVersion.String())
default:
gatewayAvailable = true
}
}
// Create a GatewayAPI client.
gwcl, err := gwclient.NewForConfig(kubeCfg)
if err != nil {
return nil, nil, fmt.Errorf("error creating kubernetes client: %s", err.Error())
}
nameservers := opts.DNS01RecursiveNameservers
if len(nameservers) == 0 {
nameservers = dnsutil.RecursiveNameservers
}
log.V(logf.InfoLevel).WithValues("nameservers", nameservers).Info("configured acme
|
if err := profilerServer.Shutdown(ctx); err != nil {
return err
|
random_line_split
|
controller.go
|
return utilerrors.NewAggregate([]error{err, err2})
}
return err
}
g.Go(func() error {
log.V(logf.InfoLevel).Info("starting controller")
// TODO: make this either a constant or a command line flag
workers := 5
return iface.Run(workers, rootCtx.Done())
})
}
log.V(logf.DebugLevel).Info("starting shared informer factories")
ctx.SharedInformerFactory.Start(rootCtx.Done())
ctx.KubeSharedInformerFactory.Start(rootCtx.Done())
if utilfeature.DefaultFeatureGate.Enabled(feature.ExperimentalGatewayAPISupport) {
ctx.GWShared.Start(rootCtx.Done())
}
err = g.Wait()
if err != nil {
return fmt.Errorf("error starting controller: %v", err)
}
log.V(logf.InfoLevel).Info("control loops exited")
return nil
}
func buildControllerContext(ctx context.Context, opts *options.ControllerOptions) (*controller.Context, *rest.Config, error) {
log := logf.FromContext(ctx, "build-context")
// Load the users Kubernetes config
kubeCfg, err := clientcmd.BuildConfigFromFlags(opts.APIServerHost, opts.Kubeconfig)
if err != nil {
return nil, nil, fmt.Errorf("error creating rest config: %s", err.Error())
}
kubeCfg.QPS = opts.KubernetesAPIQPS
kubeCfg.Burst = opts.KubernetesAPIBurst
// Add User-Agent to client
kubeCfg = rest.AddUserAgent(kubeCfg, util.CertManagerUserAgent)
// Create a cert-manager api client
intcl, err := clientset.NewForConfig(kubeCfg)
if err != nil {
return nil, nil, fmt.Errorf("error creating internal group client: %s", err.Error())
}
// Create a Kubernetes api client
cl, err := kubernetes.NewForConfig(kubeCfg)
if err != nil {
return nil, nil, fmt.Errorf("error creating kubernetes client: %s", err.Error())
}
var gatewayAvailable bool
// Check if the Gateway API feature gate was enabled
if utilfeature.DefaultFeatureGate.Enabled(feature.ExperimentalGatewayAPISupport) {
// check if the gateway API CRDs are available. If they are not found return an error
// which will cause cert-manager to crashloopbackoff
d := cl.Discovery()
resources, err := d.ServerResourcesForGroupVersion(gwapi.GroupVersion.String())
var GatewayAPINotAvailable = "the Gateway API CRDs do not seem to be present, but " + feature.ExperimentalGatewayAPISupport +
" is set to true. Please install the gateway-api CRDs."
switch {
case apierrors.IsNotFound(err):
return nil, nil, fmt.Errorf("%s (%w)", GatewayAPINotAvailable, err)
case err != nil:
return nil, nil, fmt.Errorf("while checking if the Gateway API CRD is installed: %w", err)
case len(resources.APIResources) == 0:
return nil, nil, fmt.Errorf("%s (found %d APIResources in %s)", GatewayAPINotAvailable, len(resources.APIResources), gwapi.GroupVersion.String())
default:
gatewayAvailable = true
}
}
// Create a GatewayAPI client.
gwcl, err := gwclient.NewForConfig(kubeCfg)
if err != nil {
return nil, nil, fmt.Errorf("error creating kubernetes client: %s", err.Error())
}
nameservers := opts.DNS01RecursiveNameservers
if len(nameservers) == 0 {
nameservers = dnsutil.RecursiveNameservers
}
log.V(logf.InfoLevel).WithValues("nameservers", nameservers).Info("configured acme dns01 nameservers")
HTTP01SolverResourceRequestCPU, err := resource.ParseQuantity(opts.ACMEHTTP01SolverResourceRequestCPU)
if err != nil {
return nil, nil, fmt.Errorf("error parsing ACMEHTTP01SolverResourceRequestCPU: %s", err.Error())
}
HTTP01SolverResourceRequestMemory, err := resource.ParseQuantity(opts.ACMEHTTP01SolverResourceRequestMemory)
if err != nil {
return nil, nil, fmt.Errorf("error parsing ACMEHTTP01SolverResourceRequestMemory: %s", err.Error())
}
HTTP01SolverResourceLimitsCPU, err := resource.ParseQuantity(opts.ACMEHTTP01SolverResourceLimitsCPU)
if err != nil {
return nil, nil, fmt.Errorf("error parsing ACMEHTTP01SolverResourceLimitsCPU: %s", err.Error())
}
HTTP01SolverResourceLimitsMemory, err := resource.ParseQuantity(opts.ACMEHTTP01SolverResourceLimitsMemory)
if err != nil {
return nil, nil, fmt.Errorf("error parsing ACMEHTTP01SolverResourceLimitsMemory: %s", err.Error())
}
// Create event broadcaster
// Add cert-manager types to the default Kubernetes Scheme so Events can be
// logged properly
intscheme.AddToScheme(scheme.Scheme)
gwscheme.AddToScheme(scheme.Scheme)
log.V(logf.DebugLevel).Info("creating event broadcaster")
eventBroadcaster := record.NewBroadcaster()
eventBroadcaster.StartLogging(logf.WithInfof(log.V(logf.DebugLevel)).Infof)
eventBroadcaster.StartRecordingToSink(&clientv1.EventSinkImpl{Interface: cl.CoreV1().Events("")})
recorder := eventBroadcaster.NewRecorder(scheme.Scheme, corev1.EventSource{Component: controllerAgentName})
sharedInformerFactory := informers.NewSharedInformerFactoryWithOptions(intcl, resyncPeriod, informers.WithNamespace(opts.Namespace))
kubeSharedInformerFactory := kubeinformers.NewSharedInformerFactoryWithOptions(cl, resyncPeriod, kubeinformers.WithNamespace(opts.Namespace))
gwSharedInformerFactory := gwinformers.NewSharedInformerFactoryWithOptions(gwcl, resyncPeriod, gwinformers.WithNamespace(opts.Namespace))
acmeAccountRegistry := accounts.NewDefaultRegistry()
return &controller.Context{
RootContext: ctx,
StopCh: ctx.Done(),
RESTConfig: kubeCfg,
Client: cl,
CMClient: intcl,
GWClient: gwcl,
DiscoveryClient: cl.Discovery(),
Recorder: recorder,
KubeSharedInformerFactory: kubeSharedInformerFactory,
SharedInformerFactory: sharedInformerFactory,
GWShared: gwSharedInformerFactory,
GatewaySolverEnabled: gatewayAvailable,
Namespace: opts.Namespace,
Clock: clock.RealClock{},
Metrics: metrics.New(log, clock.RealClock{}),
ACMEOptions: controller.ACMEOptions{
HTTP01SolverImage: opts.ACMEHTTP01SolverImage,
HTTP01SolverResourceRequestCPU: HTTP01SolverResourceRequestCPU,
HTTP01SolverResourceRequestMemory: HTTP01SolverResourceRequestMemory,
HTTP01SolverResourceLimitsCPU: HTTP01SolverResourceLimitsCPU,
HTTP01SolverResourceLimitsMemory: HTTP01SolverResourceLimitsMemory,
DNS01CheckAuthoritative: !opts.DNS01RecursiveNameserversOnly,
DNS01Nameservers: nameservers,
AccountRegistry: acmeAccountRegistry,
DNS01CheckRetryPeriod: opts.DNS01CheckRetryPeriod,
},
IssuerOptions: controller.IssuerOptions{
ClusterIssuerAmbientCredentials: opts.ClusterIssuerAmbientCredentials,
IssuerAmbientCredentials: opts.IssuerAmbientCredentials,
ClusterResourceNamespace: opts.ClusterResourceNamespace,
},
IngressShimOptions: controller.IngressShimOptions{
DefaultIssuerName: opts.DefaultIssuerName,
DefaultIssuerKind: opts.DefaultIssuerKind,
DefaultIssuerGroup: opts.DefaultIssuerGroup,
DefaultAutoCertificateAnnotations: opts.DefaultAutoCertificateAnnotations,
},
CertificateOptions: controller.CertificateOptions{
EnableOwnerRef: opts.EnableCertificateOwnerRef,
CopiedAnnotationPrefixes: opts.CopiedAnnotationPrefixes,
},
SchedulerOptions: controller.SchedulerOptions{
MaxConcurrentChallenges: opts.MaxConcurrentChallenges,
},
}, kubeCfg, nil
}
func startLeaderElection(ctx context.Context, opts *options.ControllerOptions, leaderElectionClient kubernetes.Interface, recorder record.EventRecorder, callbacks leaderelection.LeaderCallbacks) error
|
{
// Identity used to distinguish between multiple controller manager instances
id, err := os.Hostname()
if err != nil {
return fmt.Errorf("error getting hostname: %v", err)
}
// Set up Multilock for leader election. This Multilock is here for the
// transitionary period from configmaps to leases see
// https://github.com/kubernetes-sigs/controller-runtime/pull/1144#discussion_r480173688
lockName := "cert-manager-controller"
lc := resourcelock.ResourceLockConfig{
Identity: id + "-external-cert-manager-controller",
EventRecorder: recorder,
}
ml, err := resourcelock.New(resourcelock.ConfigMapsLeasesResourceLock,
opts.LeaderElectionNamespace,
lockName,
leaderElectionClient.CoreV1(),
leaderElectionClient.CoordinationV1(),
|
identifier_body
|
|
controller.go
|
-manager/pkg/metrics"
"github.com/jetstack/cert-manager/pkg/util"
utilfeature "github.com/jetstack/cert-manager/pkg/util/feature"
"github.com/jetstack/cert-manager/pkg/util/profiling"
)
const controllerAgentName = "cert-manager"
// This sets the informer's resync period to 10 hours
// following the controller-runtime defaults
//and following discussion: https://github.com/kubernetes-sigs/controller-runtime/pull/88#issuecomment-408500629
const resyncPeriod = 10 * time.Hour
func Run(opts *options.ControllerOptions, stopCh <-chan struct{}) error {
rootCtx := cmdutil.ContextWithStopCh(context.Background(), stopCh)
rootCtx, cancelContext := context.WithCancel(rootCtx)
defer cancelContext()
g, rootCtx := errgroup.WithContext(rootCtx)
rootCtx = logf.NewContext(rootCtx, nil, "controller")
log := logf.FromContext(rootCtx)
ctx, kubeCfg, err := buildControllerContext(rootCtx, opts)
if err != nil {
return fmt.Errorf("error building controller context (options %v): %v", opts, err)
}
enabledControllers := opts.EnabledControllers()
log.Info(fmt.Sprintf("enabled controllers: %s", enabledControllers.List()))
// Start metrics server
metricsLn, err := net.Listen("tcp", opts.MetricsListenAddress)
if err != nil {
return fmt.Errorf("failed to listen on prometheus address %s: %v", opts.MetricsListenAddress, err)
}
metricsServer := ctx.Metrics.NewServer(metricsLn)
g.Go(func() error {
<-rootCtx.Done()
// allow a timeout for graceful shutdown
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
defer cancel()
if err := metricsServer.Shutdown(ctx); err != nil {
return err
}
return nil
})
g.Go(func() error {
log.V(logf.InfoLevel).Info("starting metrics server", "address", metricsLn.Addr())
if err := metricsServer.Serve(metricsLn); err != http.ErrServerClosed {
return err
}
return nil
})
// Start profiler if it is enabled
if opts.EnablePprof {
profilerLn, err := net.Listen("tcp", opts.PprofAddress)
if err != nil {
return fmt.Errorf("failed to listen on profiler address %s: %v", opts.PprofAddress, err)
}
profilerMux := http.NewServeMux()
// Add pprof endpoints to this mux
profiling.Install(profilerMux)
profilerServer := &http.Server{
Handler: profilerMux,
}
g.Go(func() error {
<-rootCtx.Done()
// allow a timeout for graceful shutdown
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
defer cancel()
if err := profilerServer.Shutdown(ctx); err != nil {
return err
}
return nil
})
g.Go(func() error {
log.V(logf.InfoLevel).Info("starting profiler", "address", profilerLn.Addr())
if err := profilerServer.Serve(profilerLn); err != http.ErrServerClosed {
return err
}
return nil
})
}
elected := make(chan struct{})
if opts.LeaderElect {
g.Go(func() error {
log.V(logf.InfoLevel).Info("starting leader election")
leaderElectionClient, err := kubernetes.NewForConfig(rest.AddUserAgent(kubeCfg, "leader-election"))
if err != nil {
return fmt.Errorf("error creating leader election client: %v", err)
}
errorCh := make(chan error, 1)
if err := startLeaderElection(rootCtx, opts, leaderElectionClient, ctx.Recorder, leaderelection.LeaderCallbacks{
OnStartedLeading: func(_ context.Context) {
close(elected)
},
OnStoppedLeading: func() {
select {
case <-rootCtx.Done():
// context was canceled, just return
return
default:
errorCh <- errors.New("leader election lost")
}
},
}); err != nil {
return err
}
select {
case err := <-errorCh:
return err
default:
return nil
}
})
} else {
close(elected)
}
select {
case <-rootCtx.Done(): // Exit early if we are shutting down or if the errgroup has already exited with an error
// Wait for error group to complete and return
return g.Wait()
case <-elected: // Don't launch the controllers unless we have been elected leader
// Continue with setting up controller
}
for n, fn := range controller.Known() {
log := log.WithValues("controller", n)
// only run a controller if it's been enabled
if !enabledControllers.Has(n) {
log.V(logf.InfoLevel).Info("not starting controller as it's disabled")
continue
}
// don't run clusterissuers controller if scoped to a single namespace
if ctx.Namespace != "" && n == clusterissuers.ControllerName {
log.V(logf.InfoLevel).Info("not starting controller as cert-manager has been scoped to a single namespace")
continue
}
iface, err := fn(ctx)
if err != nil {
err = fmt.Errorf("error starting controller: %v", err)
cancelContext()
err2 := g.Wait() // Don't process errors, we already have an error
if err2 != nil {
return utilerrors.NewAggregate([]error{err, err2})
}
return err
}
g.Go(func() error {
log.V(logf.InfoLevel).Info("starting controller")
// TODO: make this either a constant or a command line flag
workers := 5
return iface.Run(workers, rootCtx.Done())
})
}
log.V(logf.DebugLevel).Info("starting shared informer factories")
ctx.SharedInformerFactory.Start(rootCtx.Done())
ctx.KubeSharedInformerFactory.Start(rootCtx.Done())
if utilfeature.DefaultFeatureGate.Enabled(feature.ExperimentalGatewayAPISupport) {
ctx.GWShared.Start(rootCtx.Done())
}
err = g.Wait()
if err != nil {
return fmt.Errorf("error starting controller: %v", err)
}
log.V(logf.InfoLevel).Info("control loops exited")
return nil
}
func buildControllerContext(ctx context.Context, opts *options.ControllerOptions) (*controller.Context, *rest.Config, error) {
log := logf.FromContext(ctx, "build-context")
// Load the users Kubernetes config
kubeCfg, err := clientcmd.BuildConfigFromFlags(opts.APIServerHost, opts.Kubeconfig)
if err != nil
|
kubeCfg.QPS = opts.KubernetesAPIQPS
kubeCfg.Burst = opts.KubernetesAPIBurst
// Add User-Agent to client
kubeCfg = rest.AddUserAgent(kubeCfg, util.CertManagerUserAgent)
// Create a cert-manager api client
intcl, err := clientset.NewForConfig(kubeCfg)
if err != nil {
return nil, nil, fmt.Errorf("error creating internal group client: %s", err.Error())
}
// Create a Kubernetes api client
cl, err := kubernetes.NewForConfig(kubeCfg)
if err != nil {
return nil, nil, fmt.Errorf("error creating kubernetes client: %s", err.Error())
}
var gatewayAvailable bool
// Check if the Gateway API feature gate was enabled
if utilfeature.DefaultFeatureGate.Enabled(feature.ExperimentalGatewayAPISupport) {
// check if the gateway API CRDs are available. If they are not found return an error
// which will cause cert-manager to crashloopbackoff
d := cl.Discovery()
resources, err := d.ServerResourcesForGroupVersion(gwapi.GroupVersion.String())
var GatewayAPINotAvailable = "the Gateway API CRDs do not seem to be present, but " + feature.ExperimentalGatewayAPISupport +
" is set to true. Please install the gateway-api CRDs."
switch {
case apierrors.IsNotFound(err):
return nil, nil, fmt.Errorf("%s (%w)", GatewayAPINotAvailable, err)
case err != nil:
return nil, nil, fmt.Errorf("while checking if the Gateway API CRD is installed: %w", err)
case len(resources.APIResources) == 0:
return nil, nil, fmt.Errorf("%s (found %d APIResources in %s)", GatewayAPINotAvailable, len(resources.APIResources), gwapi.GroupVersion.String())
default:
gatewayAvailable = true
}
}
// Create a GatewayAPI client.
gwcl, err := gwclient.NewForConfig(kubeCfg)
if err != nil {
return nil, nil, fmt.Errorf("error creating kubernetes client: %s", err.Error())
}
nameservers := opts.DNS01RecursiveNameservers
if len(nameservers) == 0 {
nameservers = dnsutil.RecursiveNameservers
}
log.V(logf.InfoLevel).WithValues("nameservers", nameservers).Info("configured
|
{
return nil, nil, fmt.Errorf("error creating rest config: %s", err.Error())
}
|
conditional_block
|
controller.go
|
-manager/pkg/metrics"
"github.com/jetstack/cert-manager/pkg/util"
utilfeature "github.com/jetstack/cert-manager/pkg/util/feature"
"github.com/jetstack/cert-manager/pkg/util/profiling"
)
const controllerAgentName = "cert-manager"
// This sets the informer's resync period to 10 hours
// following the controller-runtime defaults
//and following discussion: https://github.com/kubernetes-sigs/controller-runtime/pull/88#issuecomment-408500629
const resyncPeriod = 10 * time.Hour
func Run(opts *options.ControllerOptions, stopCh <-chan struct{}) error {
rootCtx := cmdutil.ContextWithStopCh(context.Background(), stopCh)
rootCtx, cancelContext := context.WithCancel(rootCtx)
defer cancelContext()
g, rootCtx := errgroup.WithContext(rootCtx)
rootCtx = logf.NewContext(rootCtx, nil, "controller")
log := logf.FromContext(rootCtx)
ctx, kubeCfg, err := buildControllerContext(rootCtx, opts)
if err != nil {
return fmt.Errorf("error building controller context (options %v): %v", opts, err)
}
enabledControllers := opts.EnabledControllers()
log.Info(fmt.Sprintf("enabled controllers: %s", enabledControllers.List()))
// Start metrics server
metricsLn, err := net.Listen("tcp", opts.MetricsListenAddress)
if err != nil {
return fmt.Errorf("failed to listen on prometheus address %s: %v", opts.MetricsListenAddress, err)
}
metricsServer := ctx.Metrics.NewServer(metricsLn)
g.Go(func() error {
<-rootCtx.Done()
// allow a timeout for graceful shutdown
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
defer cancel()
if err := metricsServer.Shutdown(ctx); err != nil {
return err
}
return nil
})
g.Go(func() error {
log.V(logf.InfoLevel).Info("starting metrics server", "address", metricsLn.Addr())
if err := metricsServer.Serve(metricsLn); err != http.ErrServerClosed {
return err
}
return nil
})
// Start profiler if it is enabled
if opts.EnablePprof {
profilerLn, err := net.Listen("tcp", opts.PprofAddress)
if err != nil {
return fmt.Errorf("failed to listen on profiler address %s: %v", opts.PprofAddress, err)
}
profilerMux := http.NewServeMux()
// Add pprof endpoints to this mux
profiling.Install(profilerMux)
profilerServer := &http.Server{
Handler: profilerMux,
}
g.Go(func() error {
<-rootCtx.Done()
// allow a timeout for graceful shutdown
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
defer cancel()
if err := profilerServer.Shutdown(ctx); err != nil {
return err
}
return nil
})
g.Go(func() error {
log.V(logf.InfoLevel).Info("starting profiler", "address", profilerLn.Addr())
if err := profilerServer.Serve(profilerLn); err != http.ErrServerClosed {
return err
}
return nil
})
}
elected := make(chan struct{})
if opts.LeaderElect {
g.Go(func() error {
log.V(logf.InfoLevel).Info("starting leader election")
leaderElectionClient, err := kubernetes.NewForConfig(rest.AddUserAgent(kubeCfg, "leader-election"))
if err != nil {
return fmt.Errorf("error creating leader election client: %v", err)
}
errorCh := make(chan error, 1)
if err := startLeaderElection(rootCtx, opts, leaderElectionClient, ctx.Recorder, leaderelection.LeaderCallbacks{
OnStartedLeading: func(_ context.Context) {
close(elected)
},
OnStoppedLeading: func() {
select {
case <-rootCtx.Done():
// context was canceled, just return
return
default:
errorCh <- errors.New("leader election lost")
}
},
}); err != nil {
return err
}
select {
case err := <-errorCh:
return err
default:
return nil
}
})
} else {
close(elected)
}
select {
case <-rootCtx.Done(): // Exit early if we are shutting down or if the errgroup has already exited with an error
// Wait for error group to complete and return
return g.Wait()
case <-elected: // Don't launch the controllers unless we have been elected leader
// Continue with setting up controller
}
for n, fn := range controller.Known() {
log := log.WithValues("controller", n)
// only run a controller if it's been enabled
if !enabledControllers.Has(n) {
log.V(logf.InfoLevel).Info("not starting controller as it's disabled")
continue
}
// don't run clusterissuers controller if scoped to a single namespace
if ctx.Namespace != "" && n == clusterissuers.ControllerName {
log.V(logf.InfoLevel).Info("not starting controller as cert-manager has been scoped to a single namespace")
continue
}
iface, err := fn(ctx)
if err != nil {
err = fmt.Errorf("error starting controller: %v", err)
cancelContext()
err2 := g.Wait() // Don't process errors, we already have an error
if err2 != nil {
return utilerrors.NewAggregate([]error{err, err2})
}
return err
}
g.Go(func() error {
log.V(logf.InfoLevel).Info("starting controller")
// TODO: make this either a constant or a command line flag
workers := 5
return iface.Run(workers, rootCtx.Done())
})
}
log.V(logf.DebugLevel).Info("starting shared informer factories")
ctx.SharedInformerFactory.Start(rootCtx.Done())
ctx.KubeSharedInformerFactory.Start(rootCtx.Done())
if utilfeature.DefaultFeatureGate.Enabled(feature.ExperimentalGatewayAPISupport) {
ctx.GWShared.Start(rootCtx.Done())
}
err = g.Wait()
if err != nil {
return fmt.Errorf("error starting controller: %v", err)
}
log.V(logf.InfoLevel).Info("control loops exited")
return nil
}
func
|
(ctx context.Context, opts *options.ControllerOptions) (*controller.Context, *rest.Config, error) {
log := logf.FromContext(ctx, "build-context")
// Load the users Kubernetes config
kubeCfg, err := clientcmd.BuildConfigFromFlags(opts.APIServerHost, opts.Kubeconfig)
if err != nil {
return nil, nil, fmt.Errorf("error creating rest config: %s", err.Error())
}
kubeCfg.QPS = opts.KubernetesAPIQPS
kubeCfg.Burst = opts.KubernetesAPIBurst
// Add User-Agent to client
kubeCfg = rest.AddUserAgent(kubeCfg, util.CertManagerUserAgent)
// Create a cert-manager api client
intcl, err := clientset.NewForConfig(kubeCfg)
if err != nil {
return nil, nil, fmt.Errorf("error creating internal group client: %s", err.Error())
}
// Create a Kubernetes api client
cl, err := kubernetes.NewForConfig(kubeCfg)
if err != nil {
return nil, nil, fmt.Errorf("error creating kubernetes client: %s", err.Error())
}
var gatewayAvailable bool
// Check if the Gateway API feature gate was enabled
if utilfeature.DefaultFeatureGate.Enabled(feature.ExperimentalGatewayAPISupport) {
// check if the gateway API CRDs are available. If they are not found return an error
// which will cause cert-manager to crashloopbackoff
d := cl.Discovery()
resources, err := d.ServerResourcesForGroupVersion(gwapi.GroupVersion.String())
var GatewayAPINotAvailable = "the Gateway API CRDs do not seem to be present, but " + feature.ExperimentalGatewayAPISupport +
" is set to true. Please install the gateway-api CRDs."
switch {
case apierrors.IsNotFound(err):
return nil, nil, fmt.Errorf("%s (%w)", GatewayAPINotAvailable, err)
case err != nil:
return nil, nil, fmt.Errorf("while checking if the Gateway API CRD is installed: %w", err)
case len(resources.APIResources) == 0:
return nil, nil, fmt.Errorf("%s (found %d APIResources in %s)", GatewayAPINotAvailable, len(resources.APIResources), gwapi.GroupVersion.String())
default:
gatewayAvailable = true
}
}
// Create a GatewayAPI client.
gwcl, err := gwclient.NewForConfig(kubeCfg)
if err != nil {
return nil, nil, fmt.Errorf("error creating kubernetes client: %s", err.Error())
}
nameservers := opts.DNS01RecursiveNameservers
if len(nameservers) == 0 {
nameservers = dnsutil.RecursiveNameservers
}
log.V(logf.InfoLevel).WithValues("nameservers", nameservers).Info("configured ac
|
buildControllerContext
|
identifier_name
|
agvCtrl.py
|
):
global g_point
loadPoint()
if g_point[originPoint] is not None:
return g_point[originPoint]
return originPoint
@lock.lock(g_lock)
def getOriginPoint(point):
global g_point
loadPoint()
for itemIndex in g_point:
if g_point[itemIndex] == point:
return itemIndex
return point
@lock.lock(g_lock)
def loadPoint():
global g_point
filePath = os.path.dirname(__file__)
fileName = "point.cfg"
if filePath:
fileName = filePath + "/" + fileName
g_point = json_codec.load_file(fileName)
@lock.lock(g_lock)
def checkCart(cartId,scanId):
scanId = scanId.strip()
def loadCart():
global g_carts
p = os.path.dirname(__file__)
pp = "cart.cfg"
if p:
pp = p+"/"+pp
g_carts = json_codec.load_file(pp)
|
global g_carts
p = os.path.dirname(__file__)
pp = "cart.cfg"
if p:
pp = p+"/"+pp
json_codec.dump_file(pp,g_carts)
def findCart(scanId):
global g_carts
for c in g_carts:
if g_carts[c] == scanId:
return c
return "unknown"
global g_carts
if g_carts is None:
loadCart()
if cartId in g_carts:
if scanId != g_carts[cartId]:
log.error("货架ID不正确,期望货架:"+cartId+", 实际货架:"+findCart(scanId))
raise Exception("货架ID不正确,期望货架:"+cartId+", 实际货架:"+findCart(scanId))
else:
g_carts[cartId] = scanId
saveCart()
#finishCallback参数: finishCallback(obj)
#obj会自动带上下面三个参数
#obj["agv"] = agvId
#obj["result"] = 0
#obj["resultDesc"] = "success"
def _run(func,args,callback,obj):
def threadFunc(func,args,callback,obj):
hasCallback = False
try:
func(*args)
if utility.is_exited():
return
hasCallback = True
callback(obj)
except Exception as e:
obj["result"] = -1
obj["resultDesc"] = str(e)
log.exception("agvCtrl:",e)
if "agv" in obj:
agvId= obj["agv"]
log.debug("小车:"+agvId+",出现未经处理的异常,正在返航 ")
restAgv(agvId)
freeAgv(agvId)
if not hasCallback:
callback(obj)
t = threading.Thread(target=threadFunc,args=(func,args,callback,obj))
global g_threads
t.start()
g_threads.append(t)
def _initObj(obj,agvId):
obj["agv"] = agvId
obj["result"] = 0
obj["resultDesc"] = "success"
def _call(agvId,locId):
if api.isCartLoc(locId):
api.move(agvId,locId+".1")
lockStockA(agvId,locId)
try:
api.mission(agvId,1) #旋转——》钻入货架——》扫码——》返回货架id号码
except Exception as e:
unlockStockA(agvId,locId)
raise e
else:
api.move(agvId,locId)
def apply(locId):
locId=getOriginPoint(locId)
return api.apply(locId+'.1')
def call(agvId,locId,finishCallback,obj):
_initObj(obj,agvId)
locId=getOriginPoint(locId)
try:
_run(func=_call,args=(agvId,locId),callback=finishCallback,obj=obj)
except Exception as e:
restAgv(agvId)
freeAgv(agvId)
raise e
return agvId
def _moveCart(agvId,srcLoc,locId,cartId):
try:
c = api.mission(agvId,2) #顶升任务,这个也会返回货架ID
if c:
checkCart(cartId,c)
api.move(agvId,srcLoc+".2")
except Exception as e:
#TODO:ycat api.move(agvId,srcLoc+".2")
#TODO:ycat raise e
pass
finally:
unlockStockA(agvId,srcLoc)
loc,type = api.getMissionType("get","",srcLoc)
api.mission(agvId,type) #3随动使小车和货架向右随动,4随动使小车和货架向左随动
loc,type = api.getMissionType("put",srcLoc,locId)
api.move(agvId,loc+".3")
api.mission(agvId,type) #3随动使小车和货架向右随动,4随动使小车和货架向左随动
lockStockA(agvId,locId)
try:
api.move(agvId,locId+".4")
api.mission(agvId,5) #放下货架
api.move(agvId,locId+".5") #返航
finally:
unlockStockA(agvId,locId)
freeAgv(agvId)
#带货架运输
def moveCart(agvId,cartId,srcLoc,locId,finishCallback,obj):
_initObj(obj,agvId)
assert api.isCartLoc(cartId)
#移动货架前,一定是locked状态
#assert api.isLocked(agvId)
srcLoc = getOriginPoint(srcLoc)
locId = getOriginPoint(locId)
try:
_run(func=_moveCart,args=(agvId,srcLoc,locId,cartId),callback=finishCallback,obj=obj)
except Exception as e:
restAgv(agvId)
freeAgv(agvId)
raise e
#不带货架运输
def move(agvId,locId,finishCallback,obj):
_initObj(obj,agvId)
#移动前,一定是locked状态
#assert api.isLocked(agvId)
try:
locId=getOriginPoint(locId)
_run(func=api.move,args=(agvId,locId),callback=finishCallback,obj=obj)
except Exception as e:
freeAgv(agvId)
raise e
#释放对agv的占用
def freeAgv(agvId):
try:
api.unlock(agvId)
except Exception as e:
log.exception("freeAgv",e)
#回归转盘
def restAgv(agvId):
agvId2 = api.getAgvId(agvId)
api.reset(agvId2)
def Init():
import interface.dashboard.dashboardApi
locationEvent.connect(interface.dashboard.dashboardApi.reportAgvLoc)
time.sleep(3)
################# unit test #################
def testgetPoint():
resulta= getPoint("StockA_row7_col4")
assert resulta== "begin_1"
resultb= getPoint("StockA_row8_col4")
assert resultb == "begin_2"
def testgetOrginPoint():
resulta= getOriginPoint("begin_1")
assert resulta== "StockA_row7_col4"
resultb= getOriginPoint("begin_2")
assert resultb == "StockA_row8_col4"
resultc = getOriginPoint("hhahahaa")
assert resultc == "hhahahaa"
def testgetStockA():
assert getStockA("stockA_row10_col3") == 9003
assert getStockA("stockA_row10_col4") == 9004
assert getStockA("stockA_row1_col1") == 1001
assert getStockA("stockA_row2_col2") == 1002
assert getStockA("stockA_row3_col2") == 3002
assert getStockA("stockA_row4_col2") == 3002
assert getStockA("stockA_row4_col2.1") == 3002
assert getStockA("stockB_row4_col2.1") == None
assert getStockA("begin_1") == None
assert getStockA("seat_1") == None
def testcheckCart():
global g_carts
g_carts = None
checkCart("CART9001","591")
checkCart("CART9002","592")
gg = json_codec.load_file("cart.cfg")
assert "CART9001" in gg
assert "CART9002" in gg
assert gg["CART9001"] == "591"
assert gg["CART9002"] == "592"
checkCart("CART9002","592")
checkCart("CART9001","591")
try:
checkCart("CART9002","591")
assert 0
except Exception as e:
s = str(e)
assert s.find("货架ID不正确,期望货架:CART9002, 实际货架:CART9001") != -1
import counter
@counter.count
|
def saveCart():
|
random_line_split
|
agvCtrl.py
|
global g_point
loadPoint()
if g_point[originPoint] is not None:
return g_point[originPoint]
return originPoint
@lock.lock(g_lock)
def getOriginPoint(point):
global g_point
loadPoint()
for itemIndex in g_point:
if g_point[itemIndex] == point:
return itemIndex
return point
@lock.lock(g_lock)
def loadPoint():
global g_point
filePath = os.path.dirname(__file__)
fileName = "point.cfg"
if filePath:
fileName = filePath + "/" + fileName
g_point = json_codec.load_file(fileName)
@lock.lock(g_lock)
def checkCart(cartId,scanId):
scanId = scanId.strip()
def loadCart():
global g_carts
p = os.path.dirname(__file__)
pp = "cart.cfg"
if p:
pp = p+"/"+pp
g_carts = json_codec.load_file(pp)
def saveCart():
global g_carts
p = os.path.dirname(__file__)
pp = "cart.cfg"
if p:
pp = p+"/"+pp
json_codec.dump_file(pp,g_carts)
def findCart(scanId):
global g_carts
for c in g_carts:
if g_carts[c] == scanId:
return c
return "unknown"
global g_carts
if g_carts is None:
loadCart()
if cartId in g_carts:
if scanId != g_carts[cartId]:
log.error("货架ID不正确,期望货架:"+cartId+", 实际货架:"+findCart(scanId))
raise Exception("货架ID不正确,期望货架:"+cartId+", 实际货架:"+findCart(scanId))
else:
g_carts[cartId] = scanId
saveCart()
#finishCallback参数: finishCallback(obj)
#obj会自动带上下面三个参数
#obj["agv"] = agvId
#obj["result"] = 0
#obj["resultDesc"] = "success"
def _run(func,args,callback,obj):
def threadFunc(func,args,callback,obj):
hasCallback = False
try:
func(*args)
if utility.is_exited():
return
hasCallback = True
callback(obj)
except Exception as e:
obj["result"] = -1
obj["resultDesc"] = str(e)
log.exception("agvCtrl:",e)
if "agv" in obj:
agvId= obj["agv"]
log.debug("小车:"+agvId+",出现未经处理的异常,正在返航 ")
restAgv(agvId)
freeAgv(agvId)
if not hasCallback:
callback(obj)
t = threading.Thread(target=threadFunc,args=(func,args,callback,obj))
global g_threads
t.start()
g_threads.append(t)
def _initObj(obj,agvId):
obj["agv"] = agvId
obj["result"] = 0
obj["resultDesc"] = "success"
def _call(agvId,locId):
if api.isCartLoc(locId):
api.move(agvId,locId+".1")
lockStockA(agvId,locId)
try:
api.mission(agvId,1) #旋转——》钻入货架——》扫码——》返回货架id号码
except Exception as e:
unlockStockA(agvId,locId)
raise e
else:
api.move(agvId,locId)
def apply(locId):
locId=getOriginPoint(locId)
return api.apply(locId+'.1')
def call(agvId,locId,finishCallback,obj):
_initObj(obj,agvId)
locId=getOriginPoint(locId)
try:
_run(func=_call,args=(agvId,locId),callback=finishCallback,obj=obj)
except Exception as e:
restAgv(agvId)
freeAgv(agvId)
raise e
return agvId
def _moveCart(agvId,srcLoc,locId,cartId):
try:
c = api.mission(agvId,2) #顶升任务,这个也会返回货架ID
if c:
checkCart(cartId,c)
api.move(agvId,srcLoc+".2")
except Exception as e:
#TODO:ycat api.move(agvId,srcLoc+".2")
#TODO:ycat raise e
pass
finally:
unlockStockA(agvId,srcLoc)
loc,type = api.getMissionType("get","",srcLoc)
api.mission(agvId,type) #3随动使小车和货架向右随动,4随动使小车和货架向左随动
loc,type = api.getMissionType("put",srcLoc,locId)
api.move(agvId,loc+".3")
api.mission(agvId,type) #3随动使小车和货架向右随动,4随动使小车和货架向左随动
lockStockA(agvId,locId)
try:
api.move(agvId,locId+".4")
api.mission(agvId,5) #放下货架
api.move(agvId,locId+".5") #返航
finally:
unlockStockA(agvId,locId)
freeAgv(agvId)
#带货架运输
def moveCart(agvId,cartId,srcLoc,locId,finishCallback,obj):
_initObj(obj,agvId)
assert api.isCartLoc(cartId)
#移动货架前,一定是locked状态
#assert api.isLocked(agvId)
srcLoc = getOriginPoint(srcLoc)
locId = getOriginPoint(locId)
try:
_run(func=_moveCart,args=(agvId,srcLoc,locId,cartId),callback=finishCallback,obj=obj)
except Exception as e:
restAgv(agvId)
freeAgv(agvId)
raise e
#不带货架运输
def move(agvId,locId,finishCallback,obj):
_initObj(obj,agvId)
#移动前,一定是locked状态
#assert api.isLocked(agvId)
try:
locId=getOriginPoint(locId)
_run(func=api.move,args=(agvId,locId),callback=finishCallback,obj=obj)
except Exception as e:
freeAgv(agvId)
raise e
#释放对agv的占用
def freeAgv(agvId):
try:
api.unlock(agvId)
except Exception as e:
log.exception("freeAgv",e)
#回归转盘
def restAgv(agvId):
agvId2 = api.getAgvId(agvId)
api.reset(agvId2)
def Init():
import interface.dashboard.dashboardApi
locationEvent.connect(interface.dashboard.dashboardApi.reportAgvLoc)
time.sleep(3)
################# unit test #################
def testgetPoint():
resulta= getPoint("Stoc
|
_col4")
assert resulta== "begin_1"
resultb= getPoint("StockA_row8_col4")
assert resultb == "begin_2"
def testgetOrginPoint():
resulta= getOriginPoint("begin_1")
assert resulta== "StockA_row7_col4"
resultb= getOriginPoint("begin_2")
assert resultb == "StockA_row8_col4"
resultc = getOriginPoint("hhahahaa")
assert resultc == "hhahahaa"
def testgetStockA():
assert getStockA("stockA_row10_col3") == 9003
assert getStockA("stockA_row10_col4") == 9004
assert getStockA("stockA_row1_col1") == 1001
assert getStockA("stockA_row2_col2") == 1002
assert getStockA("stockA_row3_col2") == 3002
assert getStockA("stockA_row4_col2") == 3002
assert getStockA("stockA_row4_col2.1") == 3002
assert getStockA("stockB_row4_col2.1") == None
assert getStockA("begin_1") == None
assert getStockA("seat_1") == None
def testcheckCart():
global g_carts
g_carts = None
checkCart("CART9001","591")
checkCart("CART9002","592")
gg = json_codec.load_file("cart.cfg")
assert "CART9001" in gg
assert "CART9002" in gg
assert gg["CART9001"] == "591"
assert gg["CART9002"] == "592"
checkCart("CART9002","592")
checkCart("CART9001","591")
try:
checkCart("CART9002","591")
assert 0
except Exception as e:
s = str(e)
assert s.find("货架ID不正确,期望货架:CART9002, 实际货架:CART9001") != -1
import counter
@counter.count
|
kA_row7
|
identifier_name
|
agvCtrl.py
|
):
global g_point
loadPoint()
if g_point[originPoint] is not None:
return g_point[originPoint]
return originPoint
@lock.lock(g_lock)
def getOriginPoint(point):
global g_point
loadPoint()
for itemIndex in g_point:
if g_point[itemIndex] == point:
return itemIndex
return point
@lock.lock(g_lock)
def loadPoint():
global g_point
filePath = os.path.dirname(__file__)
fileName = "point.cfg"
if filePath:
fileName = filePath + "/" + fileName
g_point = json_codec.load_file(fileName)
@lock.lock(g_lock)
def checkCart(cartId,scanId):
scanId = scanId.strip()
def loadCart():
global g_carts
p = os.path.dirname(__file__)
pp = "cart.cfg"
if p:
pp = p+"/"+pp
g_carts = json_codec.load_file(pp)
def saveCart():
global g_carts
p = os.path.dirname(__file__)
pp = "cart.cfg"
if p:
pp = p+"/"+pp
json_codec.dump_file(pp,g_carts)
def findCart(scanId):
global g_carts
for c in g_carts:
if g_carts[c] == scanId:
return c
return "unknown"
global g_carts
if g_carts is None:
loadCart()
if cartId in g_carts:
if scanId != g_carts[cartId]:
log.error("货架ID不正确,期望货架:"+cartId+", 实际货架:"+findCart(scanId))
raise Exception("货架ID不正确,期望货架:"+cartId+", 实际货架:"+findCart(scanId))
else:
g_carts[cartId] = scanId
saveCart()
#finishCallback参数: finishCallback(obj)
#obj会自动带上下面三个参数
#obj["agv"] = agvId
#obj["result"] = 0
#obj["resultDesc"] = "success"
def _run(func,args,callback,obj):
def threadFunc(func,args,callback,obj):
hasCallback = False
try:
func(*args)
if utility.is_exited():
return
hasCallback = True
callback(obj)
except Exception as e:
obj["result"] = -1
obj["resultDesc"] = str(e)
log.exception("agvCtrl:",e)
if "agv" in obj:
agvId= obj["agv"]
log.debug("小车:"+agvId+",出现未经处理的异常,正在返航 ")
restAgv(agvId)
freeAgv(agvId)
if not hasCallback:
callback(obj)
t = threading.Thread(target=threadFunc,args=(func,args,callback,obj))
global g_threads
t.start()
g_threads.append(t)
def _initObj(obj,agvId):
obj["agv"] = agvId
obj["result"] = 0
obj["resultDesc"] = "success"
def _call(agvId,locId):
if api.isCartLoc(locId):
api.move(agvId,locId+".1")
lockStockA(agvId,locId)
try:
api.mission(agvId,1) #旋转——》钻入货架——》扫码——》返回货架id号码
except Exception as e:
unlockStockA(agvId,locId)
raise e
else:
api.move(agvId,locId)
def apply(locId):
locId=getOriginPoint(locId)
return api.apply(locId+'.1')
def call(agvId,locId,finishCallback,obj):
_initObj(obj,agvId)
locId=getOriginPoint(locId)
try:
_run(func=_call,args=(agvId,locId),callback=finishCallback,obj=obj)
except Exception as e:
restAgv(agvId)
freeAgv(agvId)
raise e
return agvId
def _moveCart(agvId,srcLoc,locId,cartId):
try:
c = api.mission(agvId,2) #顶升任务,这个也会返回货架ID
if c:
checkCart(cartId,c)
api.move(agvId,srcLoc+".2")
except Exception as e:
#TODO:ycat api.move(agvId,srcLoc+".2")
#TODO:ycat raise e
pass
finally:
unlockStockA(agvId,srcLoc)
loc,type = api.getMissionType("get","",srcLoc)
api.mission(agvId,type) #3随动使小车和货架向右随动,4随动使小车和货架向左随动
loc,type = api.getMissionType("put",srcLoc,locId)
api.move(agvId,loc+".3")
api.mission(agvId,type) #3随动使小车和货架向右随动,4随动使小车和货架向左随动
lockStockA(agvId,locId)
try:
api.move(agvId,locId+".4")
api.mission(agvId,5) #放下货架
api.move(agvId,locId+".5") #返航
finally:
unlockStockA(agvId,locId)
freeAgv(agvId)
#带货架运输
def moveCart(agvId,cartId,srcLoc,locId,finishCallback,obj):
_initObj(obj,agvId)
assert api.isCartLoc(cartId)
#移动货架前,一定是locked状态
#assert api.isLocked(agvId)
srcLoc = getOriginPoint(srcLoc)
locId = getOriginPoint(locId)
try:
_run(func=_moveCart,args=(agvId,srcLoc,locId,cartId),callback=finishCallback,obj=obj)
except Exception as e:
restAgv(agvId)
freeAgv(agvId)
raise e
#不带货架运输
def move(agvId,locId,finishCallback,obj):
_initObj(obj,agvId)
#移动前,一定是locked状态
#assert api.isLocked(agvId)
try:
locId=getOriginPoint(locId)
_run(func=api.move,args=(agvId,locId),callback=finishCallback,obj=obj)
except Exception as e:
freeAgv(agvId)
raise e
#释放对agv的占用
def freeAgv(agvId):
try:
api.unlock(agvId)
except Exception as e:
log.exception("freeAgv",e)
#回归转盘
def restAgv(agvId):
|
int():
resulta= getPoint("StockA_row7_col4")
assert resulta== "begin_1"
resultb= getPoint("StockA_row8_col4")
assert resultb == "begin_2"
def testgetOrginPoint():
resulta= getOriginPoint("begin_1")
assert resulta== "StockA_row7_col4"
resultb= getOriginPoint("begin_2")
assert resultb == "StockA_row8_col4"
resultc = getOriginPoint("hhahahaa")
assert resultc == "hhahahaa"
def testgetStockA():
assert getStockA("stockA_row10_col3") == 9003
assert getStockA("stockA_row10_col4") == 9004
assert getStockA("stockA_row1_col1") == 1001
assert getStockA("stockA_row2_col2") == 1002
assert getStockA("stockA_row3_col2") == 3002
assert getStockA("stockA_row4_col2") == 3002
assert getStockA("stockA_row4_col2.1") == 3002
assert getStockA("stockB_row4_col2.1") == None
assert getStockA("begin_1") == None
assert getStockA("seat_1") == None
def testcheckCart():
global g_carts
g_carts = None
checkCart("CART9001","591")
checkCart("CART9002","592")
gg = json_codec.load_file("cart.cfg")
assert "CART9001" in gg
assert "CART9002" in gg
assert gg["CART9001"] == "591"
assert gg["CART9002"] == "592"
checkCart("CART9002","592")
checkCart("CART9001","591")
try:
checkCart("CART9002","591")
assert 0
except Exception as e:
s = str(e)
assert s.find("货架ID不正确,期望货架:CART9002, 实际货架:CART9001") != -1
import counter
@counter.count
|
agvId2 = api.getAgvId(agvId)
api.reset(agvId2)
def Init():
import interface.dashboard.dashboardApi
locationEvent.connect(interface.dashboard.dashboardApi.reportAgvLoc)
time.sleep(3)
################# unit test #################
def testgetPo
|
identifier_body
|
agvCtrl.py
|
ow%2 != 1:
row -= 1
return row*1000+col
@lock.lock(g_lock)
def checkTimeout(index,agvId,loc):
global g_stockLock
if index in g_stockLock:
if utility.ticks() - g_stockLock[index] > 10*60*1000:
unlockStockA(agvId,loc)
log.warning("delete timeout locked",index)
#解决在StockA两个车头对撞的问题
def lockStockA(agvId,loc):
global g_stockLock
index = getStockA(loc)
if index is None:
return
if index in g_stockLock:
checkTimeout(index,agvId,loc)
log.warning(agvId,loc,"is locked, wait for unlock")
for i in range(60*5):
if index not in g_stockLock:
break
time.sleep(1)
log.info(agvId,loc,"wait for unlock success")
global g_lock
log.debug(agvId,"lock",loc,index)
g_lock.acquire()
g_stockLock[index] = utility.ticks()
g_lock.release()
@lock.lock(g_lock)
def unlockStockA(agvId,loc):
global g_stockLock
index = getStockA(loc)
if index in g_stockLock:
log.debug(agvId,"unlock",loc,index)
del g_stockLock[index]
@lock.lock(g_lock)
def getPoint(originPoint):
global g_point
loadPoint()
if g_point[originPoint] is not None:
return g_point[originPoint]
return originPoint
@lock.lock(g_lock)
def getOriginPoint(point):
global g_point
loadPoint()
for itemIndex in g_point:
if g_point[itemIndex] == point:
return itemIndex
return point
@lock.lock(g_lock)
def loadPoint():
global g_point
filePath = os.path.dirname(__file__)
fileName = "point.cfg"
if filePath:
fileName = filePath + "/" + fileName
g_point = json_codec.load_file(fileName)
@lock.lock(g_lock)
def checkCart(cartId,scanId):
scanId = scanId.strip()
def loadCart():
global g_carts
p = os.path.dirname(__file__)
pp = "cart.cfg"
if p:
pp = p+"/"+pp
g_carts = json_codec.load_file(pp)
def saveCart():
global g_carts
p = os.path.dirname(__file__)
pp = "cart.cfg"
if p:
pp = p+"/"+pp
json_codec.dump_file(pp,g_carts)
def findCart(scanId):
global g_carts
for c in g_carts:
if g_carts[c] == scanId:
return c
return "unknown"
global g_carts
if g_carts is None:
loadCart()
if cartId in g_carts:
if scanId != g_carts[cartId]:
log.error("货架ID不正确,期望货架:"+cartId+", 实际货架:"+findCart(scanId))
raise Exception("货架ID不正确,期望货架:"+cartId+", 实际货架:"+findCart(scanId))
else:
g_carts[cartId] = scanId
saveCart()
#finishCallback参数: finishCallback(obj)
#obj会自动带上下面三个参数
#obj["agv"] = agvId
#obj["result"] = 0
#obj["resultDesc"] = "success"
def _run(func,args,callback,obj):
def threadFunc(func,args,callback,obj):
hasCallback = False
try:
func(*args)
if utility.is_exited():
return
hasCallback = True
callback(obj)
except Exception as e:
obj["result"] = -1
obj["resultDesc"] = str(e)
log.exception("agvCtrl:",e)
if "agv" in obj:
agvId= obj["agv"]
log.debug("小车:"+agvId+",出现未经处理的异常,正在返航 ")
restAgv(agvId)
freeAgv(agvId)
if not hasCallback:
callback(obj)
t = threading.Thread(target=threadFunc,args=(func,args,callback,obj))
global g_threads
t.start()
g_threads.append(t)
def _initObj(obj,agvId):
obj["agv"] = agvId
obj["result"] = 0
obj["resultDesc"] = "success"
def _call(agvId,locId):
if api.isCartLoc(locId):
api.move(agvId,locId+".1")
lockStockA(agvId,locId)
try:
api.mission(agvId,1) #旋转——》钻入货架——》扫码——》返回货架id号码
except Exception as e:
unlockStockA(agvId,locId)
raise e
else:
api.move(agvId,locId)
def apply(locId):
locId=getOriginPoint(locId)
return api.apply(locId+'.1')
def call(agvId,locId,finishCallback,obj):
_initObj(obj,agvId)
locId=getOriginPoint(locId)
try:
_run(func=_call,args=(agvId,locId),callback=finishCallback,obj=obj)
except Exception as e:
restAgv(agvId)
freeAgv(agvId)
raise e
return agvId
def _moveCart(agvId,srcLoc,locId,cartId):
try:
c = api.mission(agvId,2) #顶升任务,这个也会返回货架ID
if c:
checkCart(cartId,c)
api.move(agvId,srcLoc+".2")
except Exception as e:
#TODO:ycat api.move(agvId,srcLoc+".2")
#TODO:ycat raise e
pass
finally:
unlockStockA(agvId,srcLoc)
loc,type = api.getMissionType("get","",srcLoc)
api.mission(agvId,type) #3随动使小车和货架向右随动,4随动使小车和货架向左随动
loc,type = api.getMissionType("put",srcLoc,locId)
api.move(agvId,loc+".3")
api.mission(agvId,type) #3随动使小车和货架向右随动,4随动使小车和货架向左随动
lockStockA(agvId,locId)
try:
api.move(agvId,locId+".4")
api.mission(agvId,5) #放下货架
api.move(agvId,locId+".5") #返航
finally:
unlockStockA(agvId,locId)
freeAgv(agvId)
#带货架运输
def moveCart(agvId,cartId,srcLoc,locId,finishCallback,obj):
_initObj(obj,agvId)
assert api.isCartLoc(cartId)
#移动货架前,一定是locked状态
#assert api.isLocked(agvId)
srcLoc = getOriginPoint(srcLoc)
locId = getOriginPoint(locId)
try:
_run(func=_moveCart,args=(agvId,srcLoc,locId,cartId),callback=finishCallback,obj=obj)
except Exception as e:
restAgv(agvId)
freeAgv(agvId)
raise e
#不带货架运输
def move(agvId,locId,finishCallback,obj):
_initObj(obj,agvId)
#移动前,一定是locked状态
#assert api.isLocked(agvId)
try:
locId=getOriginPoint(locId)
_run(func=api.move,args=(agvId,locId),callback=finishCallback,obj=obj)
except Exception as e:
freeAgv(agvId)
raise e
#释放对agv的占用
def freeAgv(agvId):
try:
api.unlock(agvId)
except Exception as e:
log.exception("freeAgv",e)
#回归转盘
def restAgv(agvId):
agvId2 = api.getAgvId(agvId)
api.reset(agvId2)
def Init():
import interface.dashboard.dashboardApi
locationEvent.connect(interface.dashboard.dashboardApi.reportAgvLoc)
time.sleep(3)
################# unit test #################
def testgetPoint():
resulta= getPoint("StockA_row7_col4")
assert resulta== "begin_1"
resultb= getPoint("StockA_row8_col4")
assert resultb == "begin_2"
def testgetOrginPoint():
resulta= getOriginPoint("begin_1")
assert resulta== "StockA_row7_col4"
resultb= getOriginPoint("begin_2")
assert resultb == "StockA_row8_col4"
resultc = getOriginPoint("hhahahaa")
assert resultc == "hhahahaa"
def testgetStockA():
assert getStockA("stockA_row10_col3") == 9003
assert getStockA("stockA_row10_col4") == 9004
assert getStockA("stockA_row1_col1") == 1001
assert getStockA("stockA_row2
|
if r
|
conditional_block
|
|
Exam4x2.py
|
: {self.nombre}\nApellido: {self.apellido}' \
f'\nFecha de Nacimiento: {self.nacimiento}\nPais: {self.pais}' \
f'\nEdad: {self.edad}\nEstatus: {self.estatus}\n '
ListaP = list()
def pacientes():
pass
def ModStatus(patients):
NuevoPaciente = paciente('', '', '', '', '', '')
if not patients:
print('No hay Pacientes Agregados')
print('''Agregar uno?
> Si
> No''')
Principal()
else:
while True:
print('\033[1:33m ')
print('|///////////////////////////|')
print('| OPCION MODIFICAR |')
print('|///////////////////////////|')
print('\033[0:0m ')
print('1-Hacer un diagnostico')
print('2-Cambiar estatus a Facellecido')
print('3-Cambiar estatus a Curado')
print('4-Cambiar estatus manualmente (sospechoso/activo/descartado')
print('5-Volver')
try:
opc = int(input('Opcion: '))
if 0 < opc < 5:
nombre = input('Ingrese el Nombre: ')
break
elif opc == 5:
Principal()
except:
system('cls')
print('Opcion Invalida')
input()
Recorrer = 0
while nombre != NuevoPaciente.nombre:
try:
NuevoPaciente = patients[Recorrer]
Recorrer += 1
except:
system('cls')
print('No Encontrado!')
input()
print('')
ModStatus(patients)
if nombre == NuevoPaciente.nombre:
print(NuevoPaciente.nombre)
print('Encontrado!')
print('')
if opc == 1:
try:
print('A continuacion, se le debe realizar al Paciente las siguientes preguntas')
print('Responda honestamente:')
print(' ')
Tos = Diagnosticar(input('Tiene Tos Seca? si/no\n'))
if not Tos:
Flema = Diagnosticar(input('Tiene con Flema? si/no\n'))
else:
Flema = False
Respirar = Diagnosticar(input('Le Cuesta Respirar? o, Siente que le Falta el Aire? si/no\n'))
Fiebre = Diagnosticar(input('Tiene fiebre? si/no\n'))
if Tos == True and Respirar == True and Fiebre == True:
print('Posee COVID-19')
NuevoPaciente.estatus = 'COVId-19 Activo'
elif Flema == True and Fiebre == True or Flema == True and Fiebre == False:
print('Usted PUEDE tener COVID-19')
NuevoPaciente.estatus = 'Sospechoso (Gripe)'
else:
print('Usted PUEDE tener COVID-19')
NuevoPaciente.estatus = 'Descartado (Alergia)'
except:
ModStatus(patients)
if opc == 2:
while True:
system('cls')
respuesta = input('Este Paciente Falleció? si/no\n')
if respuesta.upper() == 'SI':
print('Este Paciente ha Fallecido D:')
NuevoPaciente.estatus = "Fallecido"
ModStatus(patients)
elif respuesta.upper() == 'NO':
ModStatus(patients)
else:
print('Ingrese una Opcion Valida.\n')
if opc == 3:
while True:
system('cls')
respuesta = input('Este Paciente se Recuperó? si/no\n')
if respuesta.upper() == 'SI':
NuevoPaciente.estatus = "Recuperado"
print('Hecho!\n\n')
ModStatus(patients)
elif respuesta.upper() == 'NO':
ModStatus(patients)
else:
print('Ingrese una Opcion Valida.')
if opc == 4:
while True:
system('cls')
print('1-Sospechoso')
print('2-Activo')
print('3-Descartado (Gripe/Fiebre)')
print('4-Volver')
opc2 = input('Opcion: ')
if opc2 == '1':
NuevoPaciente.estatus = "Sospechoso"
ModStatus(patients)
elif opc2 == '2':
NuevoPaciente.estatus = "Posee COVID-19"
ModStatus(patients)
elif opc2 == '3':
while opc2 != '1' and opc2 != '2':
print('''1) Gripe
2) Fiebre''')
opc2 = input('Opcion:')
if opc2 == '1':
NuevoPaciente.estatus = "Descartado (Gripe)"
ModStatus(patients)
elif opc2 == '2':
NuevoPaciente.estatus = "Descartado (Fiebre)"
ModStatus(patients)
else:
print('Opcion Invalida')
print('')
elif opc2 == '4':
ModStatus(patients)
else:
print('Opcion Invalida')
ModStatus(patients)
def AcercaDe():
print('\033[1:30m ')
print('COVID-19:')
print('\033[0:0m ')
print('''La COVID-19 es una enfermedad infecciosa causada por un nuevo
virus que no había sido detectado en humanos hasta la fecha.
El virus causa una enfermedad respiratoria como la gripe (influenza) con diversos síntomas (tos, fiebre, etc.) que,
en casos graves, puede producir una neumonía. Para protegerse puede lavarse las
manos regularmente y evitar tocarse la cara.''')
print('')
input('Siguiente -->')
system('cls')
print('\033[1:30m ')
print('Como se Propaga?')
print('\033[0:0m ')
print('''El nuevo coronavirus se propaga principalmente por contacto directo (1 metro o 3 pies)
con una persona infectada cuando tose o estornuda, o por contacto con sus gotículas
respiratorias (saliva o secreciones nasales).''')
print('')
input('Siguiente -->')
system('cls')
print('\033[1:30m ')
print('Sintomas:')
print('\033[0:0m ')
print('''La COVID-19 se caracteriza por síntomas leves, como, secreciones nasales, dolor de garganta,
tos y fiebre. La enfermedad puede ser más grave en algunas personas y provocar neumonía
o dificultades respiratorias.
Más raramente puede ser mortal. Las personas de edad avanzada y las personas con otras afecciones
médicas (como asma, diabetes o cardiopatías) pueden ser más vulnerables y enfermar de gravedad.''')
print('')
input('Siguiente -->')
system('cls')
print('\033[1:30m ')
print('Prevencion ACTUALMENTE NO HAY CURA PARA LA COVID-19')
print('\033[0:0m ')
print('''Puede reducir el riesgo de infección:
~ Lavándose las manos regularmente con agua y jabón o con desinfectante
de manos a base de alcohol
~ Cubriéndose la nariz y la boca al toser y estornudar con un pañuelo
de papel desechable o con la parte interna del codo
~ Evitando el contacto directo (1 metro o 3 pies) con cualquier persona
con síntomas de resfriado o gripe (influenza)''')
print('')
input('Fin -->')
Principal()
def Diagnosticar(respuesta): # En esta parte se hace el diagnostico del Paciente ya ingresado
if respuesta.upper() == 'SI':
return True
elif respuesta.upper() == 'NO':
return False
else:
print('Escriba una Respuesta Valida')
input()
ModStatus(pacientes)
def agregar(): # Se agrega un Nuevo Paciente
while True:
system('cls')
print('\033[1:36m ')
print('|//////////////////////////|')
print('| AGREGAR PACIENTE |')
print('|//////////////////////////|')
print('\033[0:0m ')
genero = input('''Escriba su Genero:
1) Hombre
2) Mujer
3) Volver''')
if genero != "1" or genero != "2":
while genero != "1" and genero != "2":
if genero == "3":
|
m('cls')
nombre =
|
Principal()
genero = input('Opcion Incorrecta')
syste
|
conditional_block
|
Exam4x2.py
|
('Ingrese el Nombre: ')
break
elif opc == 5:
Principal()
except:
system('cls')
print('Opcion Invalida')
input()
Recorrer = 0
while nombre != NuevoPaciente.nombre:
try:
NuevoPaciente = patients[Recorrer]
Recorrer += 1
except:
system('cls')
print('No Encontrado!')
input()
print('')
ModStatus(patients)
if nombre == NuevoPaciente.nombre:
print(NuevoPaciente.nombre)
print('Encontrado!')
print('')
if opc == 1:
try:
print('A continuacion, se le debe realizar al Paciente las siguientes preguntas')
print('Responda honestamente:')
print(' ')
Tos = Diagnosticar(input('Tiene Tos Seca? si/no\n'))
if not Tos:
Flema = Diagnosticar(input('Tiene con Flema? si/no\n'))
else:
Flema = False
Respirar = Diagnosticar(input('Le Cuesta Respirar? o, Siente que le Falta el Aire? si/no\n'))
Fiebre = Diagnosticar(input('Tiene fiebre? si/no\n'))
if Tos == True and Respirar == True and Fiebre == True:
print('Posee COVID-19')
NuevoPaciente.estatus = 'COVId-19 Activo'
elif Flema == True and Fiebre == True or Flema == True and Fiebre == False:
print('Usted PUEDE tener COVID-19')
NuevoPaciente.estatus = 'Sospechoso (Gripe)'
else:
print('Usted PUEDE tener COVID-19')
NuevoPaciente.estatus = 'Descartado (Alergia)'
except:
ModStatus(patients)
if opc == 2:
while True:
system('cls')
respuesta = input('Este Paciente Falleció? si/no\n')
if respuesta.upper() == 'SI':
print('Este Paciente ha Fallecido D:')
NuevoPaciente.estatus = "Fallecido"
ModStatus(patients)
elif respuesta.upper() == 'NO':
ModStatus(patients)
else:
print('Ingrese una Opcion Valida.\n')
if opc == 3:
while True:
system('cls')
respuesta = input('Este Paciente se Recuperó? si/no\n')
if respuesta.upper() == 'SI':
NuevoPaciente.estatus = "Recuperado"
print('Hecho!\n\n')
ModStatus(patients)
elif respuesta.upper() == 'NO':
ModStatus(patients)
else:
print('Ingrese una Opcion Valida.')
if opc == 4:
while True:
system('cls')
print('1-Sospechoso')
print('2-Activo')
print('3-Descartado (Gripe/Fiebre)')
print('4-Volver')
opc2 = input('Opcion: ')
if opc2 == '1':
NuevoPaciente.estatus = "Sospechoso"
ModStatus(patients)
elif opc2 == '2':
NuevoPaciente.estatus = "Posee COVID-19"
ModStatus(patients)
elif opc2 == '3':
while opc2 != '1' and opc2 != '2':
print('''1) Gripe
2) Fiebre''')
opc2 = input('Opcion:')
if opc2 == '1':
NuevoPaciente.estatus = "Descartado (Gripe)"
ModStatus(patients)
elif opc2 == '2':
NuevoPaciente.estatus = "Descartado (Fiebre)"
ModStatus(patients)
else:
print('Opcion Invalida')
print('')
elif opc2 == '4':
ModStatus(patients)
else:
print('Opcion Invalida')
ModStatus(patients)
def AcercaDe():
print('\033[1:30m ')
print('COVID-19:')
print('\033[0:0m ')
print('''La COVID-19 es una enfermedad infecciosa causada por un nuevo
virus que no había sido detectado en humanos hasta la fecha.
El virus causa una enfermedad respiratoria como la gripe (influenza) con diversos síntomas (tos, fiebre, etc.) que,
en casos graves, puede producir una neumonía. Para protegerse puede lavarse las
manos regularmente y evitar tocarse la cara.''')
print('')
input('Siguiente -->')
system('cls')
print('\033[1:30m ')
print('Como se Propaga?')
print('\033[0:0m ')
print('''El nuevo coronavirus se propaga principalmente por contacto directo (1 metro o 3 pies)
con una persona infectada cuando tose o estornuda, o por contacto con sus gotículas
respiratorias (saliva o secreciones nasales).''')
print('')
input('Siguiente -->')
system('cls')
print('\033[1:30m ')
print('Sintomas:')
print('\033[0:0m ')
print('''La COVID-19 se caracteriza por síntomas leves, como, secreciones nasales, dolor de garganta,
tos y fiebre. La enfermedad puede ser más grave en algunas personas y provocar neumonía
o dificultades respiratorias.
Más raramente puede ser mortal. Las personas de edad avanzada y las personas con otras afecciones
médicas (como asma, diabetes o cardiopatías) pueden ser más vulnerables y enfermar de gravedad.''')
print('')
input('Siguiente -->')
system('cls')
print('\033[1:30m ')
print('Prevencion ACTUALMENTE NO HAY CURA PARA LA COVID-19')
print('\033[0:0m ')
print('''Puede reducir el riesgo de infección:
~ Lavándose las manos regularmente con agua y jabón o con desinfectante
de manos a base de alcohol
~ Cubriéndose la nariz y la boca al toser y estornudar con un pañuelo
de papel desechable o con la parte interna del codo
~ Evitando el contacto directo (1 metro o 3 pies) con cualquier persona
con síntomas de resfriado o gripe (influenza)''')
print('')
input('Fin -->')
Principal()
def Diagnosticar(respuesta): # En esta parte se hace el diagnostico del Paciente ya ingresado
if respuesta.upper() == 'SI':
return True
elif respuesta.upper() == 'NO':
return False
else:
print('Escriba una Respuesta Valida')
input()
ModStatus(pacientes)
def agregar(): # Se agrega un Nuevo Paciente
while True:
system('cls')
print('\033[1:36m ')
print('|//////////////////////////|')
print('| AGREGAR PACIENTE |')
print('|//////////////////////////|')
print('\033[0:0m ')
genero = input('''Escriba su Genero:
1) Hombre
2) Mujer
3) Volver''')
if genero != "1" or genero != "2":
while genero != "1" and genero != "2":
if genero == "3":
Principal()
genero = input('Opcion Incorrecta')
system('cls')
nombre = input('Nombre: ')
apellido = input('Apellido: ')
fecha = input('Fecha de nacimiento: ')
pais = input('Pais de procedencia: ')
while True:
try:
edad = int(input('Edad: '))
if edad > 0:
break
else:
print('Ingrese una edad valida')
print('')
except:
print('Ingresaste una letra/caracter!')
NuevoPaciente = paciente(nombre, apellido, fecha, pais, genero, edad)
return NuevoPaciente
def Principal():
opc = None
whi
|
le opc != '0':
system('cls')
print('\033[1:35m ')
print('|///////////////////|')
print('| COVID-19 |')
print('|///////////////////|')
print('\033[0:0m ')
print('''1) Acerca de Coronavirus
2) Agregar un Paciente
3) Mostrar Todos los Pacientes
4) Opcion Modificar
0) Salir''')
opc = input('Opcion: ')
if opc == '1':
AcercaDe()
elif opc == '2':
Paciente = agregar()
ListaP.append(Paciente)
system('cls')
elif opc == '3':
|
identifier_body
|
|
Exam4x2.py
|
: {self.nombre}\nApellido: {self.apellido}' \
f'\nFecha de Nacimiento: {self.nacimiento}\nPais: {self.pais}' \
f'\nEdad: {self.edad}\nEstatus: {self.estatus}\n '
ListaP = list()
def pacientes():
pass
def ModStatus(patients):
NuevoPaciente = paciente('', '', '', '', '', '')
if not patients:
print('No hay Pacientes Agregados')
print('''Agregar uno?
> Si
> No''')
Principal()
else:
while True:
print('\033[1:33m ')
print('|///////////////////////////|')
print('| OPCION MODIFICAR |')
print('|///////////////////////////|')
print('\033[0:0m ')
print('1-Hacer un diagnostico')
print('2-Cambiar estatus a Facellecido')
print('3-Cambiar estatus a Curado')
print('4-Cambiar estatus manualmente (sospechoso/activo/descartado')
print('5-Volver')
try:
opc = int(input('Opcion: '))
if 0 < opc < 5:
nombre = input('Ingrese el Nombre: ')
break
elif opc == 5:
Principal()
except:
system('cls')
print('Opcion Invalida')
input()
Recorrer = 0
while nombre != NuevoPaciente.nombre:
try:
NuevoPaciente = patients[Recorrer]
Recorrer += 1
except:
system('cls')
print('No Encontrado!')
input()
print('')
ModStatus(patients)
if nombre == NuevoPaciente.nombre:
print(NuevoPaciente.nombre)
print('Encontrado!')
print('')
if opc == 1:
try:
print('A continuacion, se le debe realizar al Paciente las siguientes preguntas')
print('Responda honestamente:')
print(' ')
Tos = Diagnosticar(input('Tiene Tos Seca? si/no\n'))
if not Tos:
Flema = Diagnosticar(input('Tiene con Flema? si/no\n'))
else:
Flema = False
Respirar = Diagnosticar(input('Le Cuesta Respirar? o, Siente que le Falta el Aire? si/no\n'))
Fiebre = Diagnosticar(input('Tiene fiebre? si/no\n'))
if Tos == True and Respirar == True and Fiebre == True:
print('Posee COVID-19')
NuevoPaciente.estatus = 'COVId-19 Activo'
elif Flema == True and Fiebre == True or Flema == True and Fiebre == False:
print('Usted PUEDE tener COVID-19')
NuevoPaciente.estatus = 'Sospechoso (Gripe)'
else:
print('Usted PUEDE tener COVID-19')
NuevoPaciente.estatus = 'Descartado (Alergia)'
except:
ModStatus(patients)
if opc == 2:
while True:
system('cls')
respuesta = input('Este Paciente Falleció? si/no\n')
if respuesta.upper() == 'SI':
print('Este Paciente ha Fallecido D:')
NuevoPaciente.estatus = "Fallecido"
ModStatus(patients)
elif respuesta.upper() == 'NO':
ModStatus(patients)
else:
print('Ingrese una Opcion Valida.\n')
if opc == 3:
while True:
system('cls')
respuesta = input('Este Paciente se Recuperó? si/no\n')
if respuesta.upper() == 'SI':
NuevoPaciente.estatus = "Recuperado"
print('Hecho!\n\n')
ModStatus(patients)
elif respuesta.upper() == 'NO':
ModStatus(patients)
else:
print('Ingrese una Opcion Valida.')
if opc == 4:
while True:
system('cls')
print('1-Sospechoso')
print('2-Activo')
print('3-Descartado (Gripe/Fiebre)')
print('4-Volver')
opc2 = input('Opcion: ')
if opc2 == '1':
NuevoPaciente.estatus = "Sospechoso"
ModStatus(patients)
elif opc2 == '2':
NuevoPaciente.estatus = "Posee COVID-19"
ModStatus(patients)
elif opc2 == '3':
while opc2 != '1' and opc2 != '2':
print('''1) Gripe
2) Fiebre''')
opc2 = input('Opcion:')
if opc2 == '1':
NuevoPaciente.estatus = "Descartado (Gripe)"
ModStatus(patients)
elif opc2 == '2':
NuevoPaciente.estatus = "Descartado (Fiebre)"
ModStatus(patients)
else:
print('Opcion Invalida')
print('')
elif opc2 == '4':
ModStatus(patients)
else:
print('Opcion Invalida')
ModStatus(patients)
def AcercaDe():
print('\033[1:30m ')
print('COVID-19:')
print('\033[0:0m ')
print('''La COVID-19 es una enfermedad infecciosa causada por un nuevo
virus que no había sido detectado en humanos hasta la fecha.
El virus causa una enfermedad respiratoria como la gripe (influenza) con diversos síntomas (tos, fiebre, etc.) que,
en casos graves, puede producir una neumonía. Para protegerse puede lavarse las
manos regularmente y evitar tocarse la cara.''')
print('')
input('Siguiente -->')
system('cls')
print('\033[1:30m ')
print('Como se Propaga?')
print('\033[0:0m ')
print('''El nuevo coronavirus se propaga principalmente por contacto directo (1 metro o 3 pies)
con una persona infectada cuando tose o estornuda, o por contacto con sus gotículas
respiratorias (saliva o secreciones nasales).''')
print('')
input('Siguiente -->')
system('cls')
|
print('\033[0:0m ')
print('''La COVID-19 se caracteriza por síntomas leves, como, secreciones nasales, dolor de garganta,
tos y fiebre. La enfermedad puede ser más grave en algunas personas y provocar neumonía
o dificultades respiratorias.
Más raramente puede ser mortal. Las personas de edad avanzada y las personas con otras afecciones
médicas (como asma, diabetes o cardiopatías) pueden ser más vulnerables y enfermar de gravedad.''')
print('')
input('Siguiente -->')
system('cls')
print('\033[1:30m ')
print('Prevencion ACTUALMENTE NO HAY CURA PARA LA COVID-19')
print('\033[0:0m ')
print('''Puede reducir el riesgo de infección:
~ Lavándose las manos regularmente con agua y jabón o con desinfectante
de manos a base de alcohol
~ Cubriéndose la nariz y la boca al toser y estornudar con un pañuelo
de papel desechable o con la parte interna del codo
~ Evitando el contacto directo (1 metro o 3 pies) con cualquier persona
con síntomas de resfriado o gripe (influenza)''')
print('')
input('Fin -->')
Principal()
def Diagnosticar(respuesta): # En esta parte se hace el diagnostico del Paciente ya ingresado
if respuesta.upper() == 'SI':
return True
elif respuesta.upper() == 'NO':
return False
else:
print('Escriba una Respuesta Valida')
input()
ModStatus(pacientes)
def agregar(): # Se agrega un Nuevo Paciente
while True:
system('cls')
print('\033[1:36m ')
print('|//////////////////////////|')
print('| AGREGAR PACIENTE |')
print('|//////////////////////////|')
print('\033[0:0m ')
genero = input('''Escriba su Genero:
1) Hombre
2) Mujer
3) Volver''')
if genero != "1" or genero != "2":
while genero != "1" and genero != "2":
if genero == "3":
Principal()
genero = input('Opcion Incorrecta')
system('cls')
nombre = input
|
print('\033[1:30m ')
print('Sintomas:')
|
random_line_split
|
Exam4x2.py
|
(self):
system('cls')
print('\033[1:31m ')
print('|///////////////////|')
print('| MOSTRAR PACIENTES |')
print('|///////////////////|')
print('\033[0:0m ')
return f'\nGenero: {self.genero}\nNombre: {self.nombre}\nApellido: {self.apellido}' \
f'\nFecha de Nacimiento: {self.nacimiento}\nPais: {self.pais}' \
f'\nEdad: {self.edad}\nEstatus: {self.estatus}\n '
ListaP = list()
def pacientes():
pass
def ModStatus(patients):
NuevoPaciente = paciente('', '', '', '', '', '')
if not patients:
print('No hay Pacientes Agregados')
print('''Agregar uno?
> Si
> No''')
Principal()
else:
while True:
print('\033[1:33m ')
print('|///////////////////////////|')
print('| OPCION MODIFICAR |')
print('|///////////////////////////|')
print('\033[0:0m ')
print('1-Hacer un diagnostico')
print('2-Cambiar estatus a Facellecido')
print('3-Cambiar estatus a Curado')
print('4-Cambiar estatus manualmente (sospechoso/activo/descartado')
print('5-Volver')
try:
opc = int(input('Opcion: '))
if 0 < opc < 5:
nombre = input('Ingrese el Nombre: ')
break
elif opc == 5:
Principal()
except:
system('cls')
print('Opcion Invalida')
input()
Recorrer = 0
while nombre != NuevoPaciente.nombre:
try:
NuevoPaciente = patients[Recorrer]
Recorrer += 1
except:
system('cls')
print('No Encontrado!')
input()
print('')
ModStatus(patients)
if nombre == NuevoPaciente.nombre:
print(NuevoPaciente.nombre)
print('Encontrado!')
print('')
if opc == 1:
try:
print('A continuacion, se le debe realizar al Paciente las siguientes preguntas')
print('Responda honestamente:')
print(' ')
Tos = Diagnosticar(input('Tiene Tos Seca? si/no\n'))
if not Tos:
Flema = Diagnosticar(input('Tiene con Flema? si/no\n'))
else:
Flema = False
Respirar = Diagnosticar(input('Le Cuesta Respirar? o, Siente que le Falta el Aire? si/no\n'))
Fiebre = Diagnosticar(input('Tiene fiebre? si/no\n'))
if Tos == True and Respirar == True and Fiebre == True:
print('Posee COVID-19')
NuevoPaciente.estatus = 'COVId-19 Activo'
elif Flema == True and Fiebre == True or Flema == True and Fiebre == False:
print('Usted PUEDE tener COVID-19')
NuevoPaciente.estatus = 'Sospechoso (Gripe)'
else:
print('Usted PUEDE tener COVID-19')
NuevoPaciente.estatus = 'Descartado (Alergia)'
except:
ModStatus(patients)
if opc == 2:
while True:
system('cls')
respuesta = input('Este Paciente Falleció? si/no\n')
if respuesta.upper() == 'SI':
print('Este Paciente ha Fallecido D:')
NuevoPaciente.estatus = "Fallecido"
ModStatus(patients)
elif respuesta.upper() == 'NO':
ModStatus(patients)
else:
print('Ingrese una Opcion Valida.\n')
if opc == 3:
while True:
system('cls')
respuesta = input('Este Paciente se Recuperó? si/no\n')
if respuesta.upper() == 'SI':
NuevoPaciente.estatus = "Recuperado"
print('Hecho!\n\n')
ModStatus(patients)
elif respuesta.upper() == 'NO':
ModStatus(patients)
else:
print('Ingrese una Opcion Valida.')
if opc == 4:
while True:
system('cls')
print('1-Sospechoso')
print('2-Activo')
print('3-Descartado (Gripe/Fiebre)')
print('4-Volver')
opc2 = input('Opcion: ')
if opc2 == '1':
NuevoPaciente.estatus = "Sospechoso"
ModStatus(patients)
elif opc2 == '2':
NuevoPaciente.estatus = "Posee COVID-19"
ModStatus(patients)
elif opc2 == '3':
while opc2 != '1' and opc2 != '2':
print('''1) Gripe
2) Fiebre''')
opc2 = input('Opcion:')
if opc2 == '1':
NuevoPaciente.estatus = "Descartado (Gripe)"
ModStatus(patients)
elif opc2 == '2':
NuevoPaciente.estatus = "Descartado (Fiebre)"
ModStatus(patients)
else:
print('Opcion Invalida')
print('')
elif opc2 == '4':
ModStatus(patients)
else:
print('Opcion Invalida')
ModStatus(patients)
def AcercaDe():
print('\033[1:30m ')
print('COVID-19:')
print('\033[0:0m ')
print('''La COVID-19 es una enfermedad infecciosa causada por un nuevo
virus que no había sido detectado en humanos hasta la fecha.
El virus causa una enfermedad respiratoria como la gripe (influenza) con diversos síntomas (tos, fiebre, etc.) que,
en casos graves, puede producir una neumonía. Para protegerse puede lavarse las
manos regularmente y evitar tocarse la cara.''')
print('')
input('Siguiente -->')
system('cls')
print('\033[1:30m ')
print('Como se Propaga?')
print('\033[0:0m ')
print('''El nuevo coronavirus se propaga principalmente por contacto directo (1 metro o 3 pies)
con una persona infectada cuando tose o estornuda, o por contacto con sus gotículas
respiratorias (saliva o secreciones nasales).''')
print('')
input('Siguiente -->')
system('cls')
print('\033[1:30m ')
print('Sintomas:')
print('\033[0:0m ')
print('''La COVID-19 se caracteriza por síntomas leves, como, secreciones nasales, dolor de garganta,
tos y fiebre. La enfermedad puede ser más grave en algunas personas y provocar neumonía
o dificultades respiratorias.
Más raramente puede ser mortal. Las personas de edad avanzada y las personas con otras afecciones
médicas (como asma, diabetes o cardiopatías) pueden ser más vulnerables y enfermar de gravedad.''')
print('')
input('Siguiente -->')
system('cls')
print('\033[1:30m ')
print('Prevencion ACTUALMENTE NO HAY CURA PARA LA COVID-19')
print('\033[0:0m ')
print('''Puede reducir el riesgo de infección:
~ Lavándose las manos regularmente con agua y jabón o con desinfectante
de manos a base de alcohol
~ Cubriéndose la nariz y la boca al toser y estornudar con un pañuelo
de papel desechable o con la parte interna del codo
~ Evitando el contacto directo (1 metro o 3 pies) con cualquier persona
con síntomas de resfriado o gripe (influenza)''')
print('')
input('Fin -->')
Principal()
def Diagnosticar(respuesta): # En esta parte se hace el diagnostico del Paciente ya ingresado
if respuesta.upper() == 'SI':
return True
elif respuesta.upper() == 'NO':
return False
else:
print('Escriba una Respuesta Valida')
input()
ModStatus(pacientes)
def agregar(): # Se agrega un Nuevo Paciente
while True:
system('cls')
print('\033[1:36m ')
print('|//////////////////////////|')
print('| AGREGAR PACIENTE |')
print('|//////////////////////////|')
print('\033[0:0m ')
genero = input('''Escriba su Genero:
1) Hombre
2)
|
mostrar
|
identifier_name
|
|
Simulated_Image_PD_points.py
|
(np.vstack((j11[good].T, j22[good].T)).T)
a, b, c, d = rect
mappa1 = (2.* wam[:, 0] - b - a) / (b - a)
mappa2 = (2.* wam[:, 1] - d - c) / (d - c)
mappa = np.vstack((mappa1.T, mappa2.T)).T
V1 = np.cos(np.multiply(couples[:,0], np.arccos(mappa[:,0].T)))
V2 = np.cos(np.multiply(couples[:,1], np.arccos(mappa[:,1].T)))
V = np.multiply(V1, V2).T
return V
# This function computes a number of Padova points for comparing VSDK and
# polynomial approximation
def _pdpts_vsdk(n, n1):
Pad_x = np.array([0])
while Pad_x.shape[0] < n1:
zn = np.cos(np.linspace(0, 1, n+1) * np.pi);
zn1 = np.cos(np.linspace(0, 1, n+2) * np.pi);
Pad1, Pad2 = np.meshgrid(zn, zn1)
M1, M2 = np.meshgrid(np.linspace(0, n, n+1), np.linspace(0, n+1, n+2))
findM = np.argwhere(np.concatenate(np.mod(M1 + M2, 2).T))
Pad_x = np.concatenate(Pad1.T)[findM]
Pad_y = np.concatenate(Pad2.T)[findM]
n += 1
return Pad_x, Pad_y
# Define the RBF
def _rbfm(ep, r):
return np.exp(-ep*r)
# Compute accuracy indicators (psnr and mse)
def psnr(im1, im2):
mse = np.mean(np.power(im1-im2, 2))
if mse == 0: return 100
pixelmax = 255.0
return 20 * np.log10(pixelmax / np.sqrt(mse))
def mse(im1, im2):
return np.mean(np.power(im1-im2, 2))
def plot_error(cr, err, name_cr, name_err, fig_name):
plt.plot(cr, err, 'o-')
plt.xlabel(name_cr)
plt.ylabel(name_err)
plt.savefig('%s.eps' %fig_name, bbox_inches='tight')
plt.savefig('%s.png' %fig_name, bbox_inches='tight')
plt.close()
def plot_variogram(krig, ylim, fig_name):
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot(krig.lags, krig.semivariance, 'k*')
plt.ylim(ylim)
plt.xlabel('Lag distance')
plt.ylabel('Semivariance')
plt.savefig('%s.eps'%fig_name)
plt.savefig('%s.png'%fig_name)
plt.close()
def plot_image(img, fig_name, pts=None):
plt.imshow(img, cmap='gray', vmin=0, vmax=1)
plt.axis('off')
if pts:
|
plt.savefig('%s.eps' %fig_name,bbox_inches='tight')
plt.savefig('%s.png' %fig_name,bbox_inches='tight')
plt.close()
# Load the image
matr = scipy.io.loadmat('sm_simulata.mat')
Image_large = matr["image_temp"]
plot_image(Image_large, 'Simulated_ImageLarge')
sx, sy = Image_large.shape
lx, rx, ly, ry = 170, 275, 79, 361
Image = Image_large[lx-1:rx, ly-1:ry]
m, n = Image.shape
# Define the polynomial degree
degs = [20,30,40,50,60,70,80,90]
# Initialize
CR_VSDK, CR_POLY = [], []
MSE_VSDK, MSE_POLY = [], []
PSNR_VSDK, PSNR_POLY = [], []
# Define the evaluation points
X, Y = np.meshgrid(range(n), range(m));
x = X.T.flatten()
y = Y.T.flatten()
pts = np.vstack((x,y)).T
ptsv = np.vstack((np.array(x/(n-1)), np.array(y/(m-1)))).T
fvalev = np.array([Image[y[i], x[i]] for i in range(x.shape[0])])
threshold = fvalev > 0
extra_ep = np.zeros(Image.flatten().shape)
extra_ep[threshold] = 1
epoints = np.hstack((ptsv, np.matrix(extra_ep).T))
for idx, deg in enumerate(degs):
print('Testing polynomial of degree %d' %deg)
t1 = time.time()
# Compute Padova points
Pad_x, Pad_y = _pdpts(int(np.floor(2 * deg * np.log(deg))))
pts_x, pts_y = (Pad_x + 1) / 2., (Pad_y + 1) / 2.
xpts, ypts = pts_x * (n-1), pts_y * (m-1)
PDpts = np.vstack((xpts.T, ypts.T)).T
# Compute the function values at Padova points
fval = [Image[int(np.floor(ypts[i])), int(np.floor(xpts[i]))] for i in range(xpts.shape[0])]
fval = np.matrix(fval).T
# Compute the polynomial approximant
cfs, lsp = _wamfit(deg, PDpts, pts, fval)
t2 = time.time()
# Compute the nodes for comparisons with VSDKs
Pad_xv, Pad_yv = _pdpts_vsdk(np.floor(np.sqrt(cfs.shape[0])), cfs.shape[0])
pts_xv, pts_yv = (Pad_xv+1) / 2, (Pad_yv+1) / 2
xptsv, yptsv = pts_xv * (n-1), pts_yv * (m-1)
PDptsv = np.matrix(np.vstack((np.array((xptsv/(n-1)).T), np.array((yptsv/(m-1)).T))).T)
Pti_Mirko = PDptsv.copy()
fvalv = [Image[int(np.floor(yptsv[i])), int(np.floor(xptsv[i]))] for i in range(xptsv.shape[0])]
fvalv = np.matrix(fvalv).T
fvalv_Mirko = fvalv.copy()
threshold = fvalv > 0
extra_ds = np.zeros(fvalv.shape)
extra_ds[threshold] = 1
dsites = np.hstack((PDptsv, np.matrix(extra_ds)))
# Compute kernel and evaluation matrices
NN = dsites.shape[0]
DM = np.zeros((NN, NN))
NN1 = epoints.shape[0]
DM_eval = np.zeros((NN1, NN))
for count in range(3):
dr, cc = np.meshgrid(epoints[:,count], dsites[:,count])
DM_eval = DM_eval + (np.power((dr-cc), 2)).T
dr, cc = np.meshgrid(dsites[:,count], dsites[:,count]);
DM = DM + (np.power((dr-cc),2)).T
IM1 = _rbfm(1, np.sqrt(DM))
EM1 = _rbfm(1, np.sqrt(DM_eval))
# Compute and evaluate the VSDKs interpolant
coef = np.linalg.solve(IM1, fvalv)
Pf = (EM1.dot(coef))
t3 = time.time()
Imageapprox = np.reshape(lsp, (m,n), order='F')
Imageapprox1 = np.reshape(Pf, (m,n), order='F')
snr = psnr(Image,Imageapprox);
snr1 = psnr(Image,Imageapprox1)
rmse = mse(Imageapprox,Image);
rmse1 = mse(Imageapprox1,Image)
#
print('MSE for polynomials: %.3e' %rmse)
print('MSE for VSDKs: %.3e' %rmse1)
#
# Variograms
incr = 10
fvalk, fvalkp, fvalkv, ptk1, ptk2 = [], [], [], [], []
for i in range(0, pts.shape[0], incr):
ptk1.append(pts[i, 1])
ptk2.append(pts[i, 0])
fvalk.append(fvalev[i])
fvalkp.append(lsp[i, 0])
fvalkv.append(Pf[i, 0])
OK = OrdinaryKriging(ptk1, ptk2, fvalk, variogram_model='spherical', nlags=30)
OK1 = OrdinaryKriging(ptk1, ptk2, fvalkp, variogram_model='spherical', nlags=30
|
plt.plot(pts[0], pts[1], 'r.')
|
conditional_block
|
Simulated_Image_PD_points.py
|
(n):
zn = np.cos(np.linspace(0, 1, n+1)*np.pi)
zn1 = np.cos(np.linspace(0, 1, n+2)*np.pi)
Pad1, Pad2 = np.meshgrid(zn, zn1)
f1 = np.linspace(0, n, n+1)
f2 = np.linspace(0, n+1, n+2)
M1, M2 = np.meshgrid(f1,f2)
h = np.array(np.mod(M1 + M2, 2))
g = np.array(np.concatenate(h.T))
findM = np.argwhere(g)
Pad_x = np.matrix(np.concatenate(Pad1.T)[findM])
Pad_y = np.matrix(np.concatenate(Pad2.T)[findM])
return Pad_x, Pad_y
# Compute the coefficients for polynomial approximation
def _wamfit(deg, wam, pts, fval):
both = np.vstack((wam, pts))
rect = [np.min(both[:,0]), np.max(both[:,0]), np.min(both[:,1]), np.max(both[:,1])]
Q, R1, R2 = _wamdop(deg, wam, rect)
DOP = _wamdopeval(deg, R1, R2, pts, rect)
cfs = np.matmul(Q.T, fval)
lsp = np.matmul(DOP, cfs)
return cfs, lsp
# Evaluate the approximant
def _wamdopeval(deg,R1,R2,pts,rect):
W = _chebvand(deg, pts, rect)
TT = np.linalg.solve(R1.T, W.T).T
return np.linalg.solve(R2.T, TT.T).T
# Factorize the Vandermonde matrix
def _wamdop(deg,wam,rect):
V = _chebvand(deg,wam,rect)
Q1, R1 = np.linalg.qr(V)
TT = np.linalg.solve(R1.T,V.T).T
Q, R2 = np.array(np.linalg.qr(TT))
return Q, R1, R2
# Construct the Vandermonde matrix
def _chebvand(deg,wam,rect):
j = np.linspace(0, deg, deg+1)
j1, j2 = np.meshgrid(j, j)
j11 = j1.T.flatten()
j22 = j2.T.flatten()
good = np.argwhere(j11+j22 < deg+1)
couples = np.matrix(np.vstack((j11[good].T, j22[good].T)).T)
a, b, c, d = rect
mappa1 = (2.* wam[:, 0] - b - a) / (b - a)
mappa2 = (2.* wam[:, 1] - d - c) / (d - c)
mappa = np.vstack((mappa1.T, mappa2.T)).T
V1 = np.cos(np.multiply(couples[:,0], np.arccos(mappa[:,0].T)))
V2 = np.cos(np.multiply(couples[:,1], np.arccos(mappa[:,1].T)))
V = np.multiply(V1, V2).T
return V
# This function computes a number of Padova points for comparing VSDK and
# polynomial approximation
def _pdpts_vsdk(n, n1):
Pad_x = np.array([0])
while Pad_x.shape[0] < n1:
zn = np.cos(np.linspace(0, 1, n+1) * np.pi);
zn1 = np.cos(np.linspace(0, 1, n+2) * np.pi);
Pad1, Pad2 = np.meshgrid(zn, zn1)
M1, M2 = np.meshgrid(np.linspace(0, n, n+1), np.linspace(0, n+1, n+2))
findM = np.argwhere(np.concatenate(np.mod(M1 + M2, 2).T))
Pad_x = np.concatenate(Pad1.T)[findM]
Pad_y = np.concatenate(Pad2.T)[findM]
n += 1
return Pad_x, Pad_y
# Define the RBF
def _rbfm(ep, r):
return np.exp(-ep*r)
# Compute accuracy indicators (psnr and mse)
def psnr(im1, im2):
mse = np.mean(np.power(im1-im2, 2))
if mse == 0: return 100
pixelmax = 255.0
return 20 * np.log10(pixelmax / np.sqrt(mse))
def mse(im1, im2):
return np.mean(np.power(im1-im2, 2))
def plot_error(cr, err, name_cr, name_err, fig_name):
plt.plot(cr, err, 'o-')
plt.xlabel(name_cr)
plt.ylabel(name_err)
plt.savefig('%s.eps' %fig_name, bbox_inches='tight')
plt.savefig('%s.png' %fig_name, bbox_inches='tight')
plt.close()
def plot_variogram(krig, ylim, fig_name):
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot(krig.lags, krig.semivariance, 'k*')
plt.ylim(ylim)
plt.xlabel('Lag distance')
plt.ylabel('Semivariance')
plt.savefig('%s.eps'%fig_name)
plt.savefig('%s.png'%fig_name)
plt.close()
def plot_image(img, fig_name, pts=None):
plt.imshow(img, cmap='gray', vmin=0, vmax=1)
plt.axis('off')
if pts:
plt.plot(pts[0], pts[1], 'r.')
plt.savefig('%s.eps' %fig_name,bbox_inches='tight')
plt.savefig('%s.png' %fig_name,bbox_inches='tight')
plt.close()
# Load the image
matr = scipy.io.loadmat('sm_simulata.mat')
Image_large = matr["image_temp"]
plot_image(Image_large, 'Simulated_ImageLarge')
sx, sy = Image_large.shape
lx, rx, ly, ry = 170, 275, 79, 361
Image = Image_large[lx-1:rx, ly-1:ry]
m, n = Image.shape
# Define the polynomial degree
degs = [20,30,40,50,60,70,80,90]
# Initialize
CR_VSDK, CR_POLY = [], []
MSE_VSDK, MSE_POLY = [], []
PSNR_VSDK, PSNR_POLY = [], []
# Define the evaluation points
X, Y = np.meshgrid(range(n), range(m));
x = X.T.flatten()
y = Y.T.flatten()
pts = np.vstack((x,y)).T
ptsv = np.vstack((np.array(x/(n-1)), np.array(y/(m-1)))).T
fvalev = np.array([Image[y[i], x[i]] for i in range(x.shape[0])])
threshold = fvalev > 0
extra_ep = np.zeros(Image.flatten().shape)
extra_ep[threshold] = 1
epoints = np.hstack((ptsv, np.matrix(extra_ep).T))
for idx, deg in enumerate(degs):
print('Testing polynomial of degree %d' %deg)
t1 = time.time()
# Compute Padova points
Pad_x, Pad_y = _pdpts(int(np.floor(2 * deg * np.log(deg))))
pts_x, pts_y = (Pad_x + 1) / 2., (Pad_y + 1) / 2.
xpts, ypts = pts_x * (n-1), pts_y * (m-1)
PDpts = np.vstack((xpts.T, ypts.T)).T
# Compute the function values at Padova points
fval = [Image[int(np.floor(ypts[i])), int(np.floor(xpts[i]))] for i in range(xpts.shape[0])]
fval = np.matrix(fval).T
# Compute the polynomial approximant
cfs, lsp = _wamfit(deg, PDpts, pts, fval)
t2 = time.time()
# Compute the nodes for comparisons with VSDKs
Pad_xv, Pad_yv = _pdpts_vsdk(np.floor(np.sqrt(cfs.shape[0])), cfs.shape[0])
pts_xv, pts_yv = (Pad_xv+1) / 2, (Pad_yv+1) / 2
xptsv, yptsv = pts_xv * (n-1), pts_yv * (m-1)
PDptsv = np.matrix(np.vstack((np.array((xptsv/(n-1)).T), np.array((yptsv/(m-1)).T))).T)
Pti_Mirko = PDptsv.copy()
fvalv = [Image[int(np.floor(yptsv[i])), int(np.floor(xptsv[i]))] for i in range(xptsv.shape[0])]
fvalv = np.matrix(fvalv).T
fvalv_Mirko =
|
_pdpts
|
identifier_name
|
|
Simulated_Image_PD_points.py
|
# Compute the coefficients for polynomial approximation
def _wamfit(deg, wam, pts, fval):
both = np.vstack((wam, pts))
rect = [np.min(both[:,0]), np.max(both[:,0]), np.min(both[:,1]), np.max(both[:,1])]
Q, R1, R2 = _wamdop(deg, wam, rect)
DOP = _wamdopeval(deg, R1, R2, pts, rect)
cfs = np.matmul(Q.T, fval)
lsp = np.matmul(DOP, cfs)
return cfs, lsp
# Evaluate the approximant
def _wamdopeval(deg,R1,R2,pts,rect):
W = _chebvand(deg, pts, rect)
TT = np.linalg.solve(R1.T, W.T).T
return np.linalg.solve(R2.T, TT.T).T
# Factorize the Vandermonde matrix
def _wamdop(deg,wam,rect):
V = _chebvand(deg,wam,rect)
Q1, R1 = np.linalg.qr(V)
TT = np.linalg.solve(R1.T,V.T).T
Q, R2 = np.array(np.linalg.qr(TT))
return Q, R1, R2
# Construct the Vandermonde matrix
def _chebvand(deg,wam,rect):
j = np.linspace(0, deg, deg+1)
j1, j2 = np.meshgrid(j, j)
j11 = j1.T.flatten()
j22 = j2.T.flatten()
good = np.argwhere(j11+j22 < deg+1)
couples = np.matrix(np.vstack((j11[good].T, j22[good].T)).T)
a, b, c, d = rect
mappa1 = (2.* wam[:, 0] - b - a) / (b - a)
mappa2 = (2.* wam[:, 1] - d - c) / (d - c)
mappa = np.vstack((mappa1.T, mappa2.T)).T
V1 = np.cos(np.multiply(couples[:,0], np.arccos(mappa[:,0].T)))
V2 = np.cos(np.multiply(couples[:,1], np.arccos(mappa[:,1].T)))
V = np.multiply(V1, V2).T
return V
# This function computes a number of Padova points for comparing VSDK and
# polynomial approximation
def _pdpts_vsdk(n, n1):
Pad_x = np.array([0])
while Pad_x.shape[0] < n1:
zn = np.cos(np.linspace(0, 1, n+1) * np.pi);
zn1 = np.cos(np.linspace(0, 1, n+2) * np.pi);
Pad1, Pad2 = np.meshgrid(zn, zn1)
M1, M2 = np.meshgrid(np.linspace(0, n, n+1), np.linspace(0, n+1, n+2))
findM = np.argwhere(np.concatenate(np.mod(M1 + M2, 2).T))
Pad_x = np.concatenate(Pad1.T)[findM]
Pad_y = np.concatenate(Pad2.T)[findM]
n += 1
return Pad_x, Pad_y
# Define the RBF
def _rbfm(ep, r):
return np.exp(-ep*r)
# Compute accuracy indicators (psnr and mse)
def psnr(im1, im2):
mse = np.mean(np.power(im1-im2, 2))
if mse == 0: return 100
pixelmax = 255.0
return 20 * np.log10(pixelmax / np.sqrt(mse))
def mse(im1, im2):
return np.mean(np.power(im1-im2, 2))
def plot_error(cr, err, name_cr, name_err, fig_name):
plt.plot(cr, err, 'o-')
plt.xlabel(name_cr)
plt.ylabel(name_err)
plt.savefig('%s.eps' %fig_name, bbox_inches='tight')
plt.savefig('%s.png' %fig_name, bbox_inches='tight')
plt.close()
def plot_variogram(krig, ylim, fig_name):
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot(krig.lags, krig.semivariance, 'k*')
plt.ylim(ylim)
plt.xlabel('Lag distance')
plt.ylabel('Semivariance')
plt.savefig('%s.eps'%fig_name)
plt.savefig('%s.png'%fig_name)
plt.close()
def plot_image(img, fig_name, pts=None):
plt.imshow(img, cmap='gray', vmin=0, vmax=1)
plt.axis('off')
if pts:
plt.plot(pts[0], pts[1], 'r.')
plt.savefig('%s.eps' %fig_name,bbox_inches='tight')
plt.savefig('%s.png' %fig_name,bbox_inches='tight')
plt.close()
# Load the image
matr = scipy.io.loadmat('sm_simulata.mat')
Image_large = matr["image_temp"]
plot_image(Image_large, 'Simulated_ImageLarge')
sx, sy = Image_large.shape
lx, rx, ly, ry = 170, 275, 79, 361
Image = Image_large[lx-1:rx, ly-1:ry]
m, n = Image.shape
# Define the polynomial degree
degs = [20,30,40,50,60,70,80,90]
# Initialize
CR_VSDK, CR_POLY = [], []
MSE_VSDK, MSE_POLY = [], []
PSNR_VSDK, PSNR_POLY = [], []
# Define the evaluation points
X, Y = np.meshgrid(range(n), range(m));
x = X.T.flatten()
y = Y.T.flatten()
pts = np.vstack((x,y)).T
ptsv = np.vstack((np.array(x/(n-1)), np.array(y/(m-1)))).T
fvalev = np.array([Image[y[i], x[i]] for i in range(x.shape[0])])
threshold = fvalev > 0
extra_ep = np.zeros(Image.flatten().shape)
extra_ep[threshold] = 1
epoints = np.hstack((ptsv, np.matrix(extra_ep).T))
for idx, deg in enumerate(degs):
print('Testing polynomial of degree %d' %deg)
t1 = time.time()
# Compute Padova points
Pad_x, Pad_y = _pdpts(int(np.floor(2 * deg * np.log(deg))))
pts_x, pts_y = (Pad_x + 1) / 2., (Pad_y + 1) / 2.
xpts, ypts = pts_x * (n-1), pts_y * (m-1)
PDpts = np.vstack((xpts.T, ypts.T)).T
# Compute the function values at Padova points
fval = [Image[int(np.floor(ypts[i])), int(np.floor(xpts[i]))] for i in range(xpts.shape[0])]
fval = np.matrix(fval).T
# Compute the polynomial approximant
cfs, lsp = _wamfit(deg, PDpts, pts, fval)
t2 = time.time()
# Compute the nodes for comparisons with VSDKs
Pad_xv, Pad_yv = _pdpts_vsdk(np.floor(np.sqrt(cfs.shape[0])), cfs.shape[0])
pts_xv, pts_yv = (Pad_xv+1) / 2, (Pad_yv+1) / 2
xptsv, yptsv = pts_xv * (n-1), pts_yv * (m-1)
PDptsv = np.matrix(np.vstack((np.array((xptsv/(n-1)).T), np.array((yptsv/(m-1)).T))).T)
Pti_Mirko = PDptsv.copy()
fvalv = [Image[int(np.floor(yptsv[i])), int(np.floor(xptsv[i]))] for i in range(xptsv.shape[0])]
fvalv = np.matrix(fvalv).T
fvalv_Mirko = fvalv.copy()
|
zn = np.cos(np.linspace(0, 1, n+1)*np.pi)
zn1 = np.cos(np.linspace(0, 1, n+2)*np.pi)
Pad1, Pad2 = np.meshgrid(zn, zn1)
f1 = np.linspace(0, n, n+1)
f2 = np.linspace(0, n+1, n+2)
M1, M2 = np.meshgrid(f1,f2)
h = np.array(np.mod(M1 + M2, 2))
g = np.array(np.concatenate(h.T))
findM = np.argwhere(g)
Pad_x = np.matrix(np.concatenate(Pad1.T)[findM])
Pad_y = np.matrix(np.concatenate(Pad2.T)[findM])
return Pad_x, Pad_y
|
identifier_body
|
|
Simulated_Image_PD_points.py
|
(np.vstack((j11[good].T, j22[good].T)).T)
a, b, c, d = rect
mappa1 = (2.* wam[:, 0] - b - a) / (b - a)
mappa2 = (2.* wam[:, 1] - d - c) / (d - c)
mappa = np.vstack((mappa1.T, mappa2.T)).T
V1 = np.cos(np.multiply(couples[:,0], np.arccos(mappa[:,0].T)))
V2 = np.cos(np.multiply(couples[:,1], np.arccos(mappa[:,1].T)))
V = np.multiply(V1, V2).T
return V
# This function computes a number of Padova points for comparing VSDK and
# polynomial approximation
def _pdpts_vsdk(n, n1):
Pad_x = np.array([0])
while Pad_x.shape[0] < n1:
zn = np.cos(np.linspace(0, 1, n+1) * np.pi);
zn1 = np.cos(np.linspace(0, 1, n+2) * np.pi);
Pad1, Pad2 = np.meshgrid(zn, zn1)
M1, M2 = np.meshgrid(np.linspace(0, n, n+1), np.linspace(0, n+1, n+2))
findM = np.argwhere(np.concatenate(np.mod(M1 + M2, 2).T))
Pad_x = np.concatenate(Pad1.T)[findM]
Pad_y = np.concatenate(Pad2.T)[findM]
n += 1
return Pad_x, Pad_y
# Define the RBF
def _rbfm(ep, r):
return np.exp(-ep*r)
# Compute accuracy indicators (psnr and mse)
def psnr(im1, im2):
mse = np.mean(np.power(im1-im2, 2))
if mse == 0: return 100
pixelmax = 255.0
return 20 * np.log10(pixelmax / np.sqrt(mse))
def mse(im1, im2):
return np.mean(np.power(im1-im2, 2))
def plot_error(cr, err, name_cr, name_err, fig_name):
plt.plot(cr, err, 'o-')
plt.xlabel(name_cr)
plt.ylabel(name_err)
plt.savefig('%s.eps' %fig_name, bbox_inches='tight')
plt.savefig('%s.png' %fig_name, bbox_inches='tight')
plt.close()
def plot_variogram(krig, ylim, fig_name):
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot(krig.lags, krig.semivariance, 'k*')
plt.ylim(ylim)
plt.xlabel('Lag distance')
plt.ylabel('Semivariance')
plt.savefig('%s.eps'%fig_name)
plt.savefig('%s.png'%fig_name)
plt.close()
def plot_image(img, fig_name, pts=None):
plt.imshow(img, cmap='gray', vmin=0, vmax=1)
plt.axis('off')
if pts:
plt.plot(pts[0], pts[1], 'r.')
plt.savefig('%s.eps' %fig_name,bbox_inches='tight')
plt.savefig('%s.png' %fig_name,bbox_inches='tight')
plt.close()
# Load the image
matr = scipy.io.loadmat('sm_simulata.mat')
Image_large = matr["image_temp"]
plot_image(Image_large, 'Simulated_ImageLarge')
sx, sy = Image_large.shape
lx, rx, ly, ry = 170, 275, 79, 361
Image = Image_large[lx-1:rx, ly-1:ry]
m, n = Image.shape
# Define the polynomial degree
degs = [20,30,40,50,60,70,80,90]
# Initialize
CR_VSDK, CR_POLY = [], []
|
# Define the evaluation points
X, Y = np.meshgrid(range(n), range(m));
x = X.T.flatten()
y = Y.T.flatten()
pts = np.vstack((x,y)).T
ptsv = np.vstack((np.array(x/(n-1)), np.array(y/(m-1)))).T
fvalev = np.array([Image[y[i], x[i]] for i in range(x.shape[0])])
threshold = fvalev > 0
extra_ep = np.zeros(Image.flatten().shape)
extra_ep[threshold] = 1
epoints = np.hstack((ptsv, np.matrix(extra_ep).T))
for idx, deg in enumerate(degs):
print('Testing polynomial of degree %d' %deg)
t1 = time.time()
# Compute Padova points
Pad_x, Pad_y = _pdpts(int(np.floor(2 * deg * np.log(deg))))
pts_x, pts_y = (Pad_x + 1) / 2., (Pad_y + 1) / 2.
xpts, ypts = pts_x * (n-1), pts_y * (m-1)
PDpts = np.vstack((xpts.T, ypts.T)).T
# Compute the function values at Padova points
fval = [Image[int(np.floor(ypts[i])), int(np.floor(xpts[i]))] for i in range(xpts.shape[0])]
fval = np.matrix(fval).T
# Compute the polynomial approximant
cfs, lsp = _wamfit(deg, PDpts, pts, fval)
t2 = time.time()
# Compute the nodes for comparisons with VSDKs
Pad_xv, Pad_yv = _pdpts_vsdk(np.floor(np.sqrt(cfs.shape[0])), cfs.shape[0])
pts_xv, pts_yv = (Pad_xv+1) / 2, (Pad_yv+1) / 2
xptsv, yptsv = pts_xv * (n-1), pts_yv * (m-1)
PDptsv = np.matrix(np.vstack((np.array((xptsv/(n-1)).T), np.array((yptsv/(m-1)).T))).T)
Pti_Mirko = PDptsv.copy()
fvalv = [Image[int(np.floor(yptsv[i])), int(np.floor(xptsv[i]))] for i in range(xptsv.shape[0])]
fvalv = np.matrix(fvalv).T
fvalv_Mirko = fvalv.copy()
threshold = fvalv > 0
extra_ds = np.zeros(fvalv.shape)
extra_ds[threshold] = 1
dsites = np.hstack((PDptsv, np.matrix(extra_ds)))
# Compute kernel and evaluation matrices
NN = dsites.shape[0]
DM = np.zeros((NN, NN))
NN1 = epoints.shape[0]
DM_eval = np.zeros((NN1, NN))
for count in range(3):
dr, cc = np.meshgrid(epoints[:,count], dsites[:,count])
DM_eval = DM_eval + (np.power((dr-cc), 2)).T
dr, cc = np.meshgrid(dsites[:,count], dsites[:,count]);
DM = DM + (np.power((dr-cc),2)).T
IM1 = _rbfm(1, np.sqrt(DM))
EM1 = _rbfm(1, np.sqrt(DM_eval))
# Compute and evaluate the VSDKs interpolant
coef = np.linalg.solve(IM1, fvalv)
Pf = (EM1.dot(coef))
t3 = time.time()
Imageapprox = np.reshape(lsp, (m,n), order='F')
Imageapprox1 = np.reshape(Pf, (m,n), order='F')
snr = psnr(Image,Imageapprox);
snr1 = psnr(Image,Imageapprox1)
rmse = mse(Imageapprox,Image);
rmse1 = mse(Imageapprox1,Image)
#
print('MSE for polynomials: %.3e' %rmse)
print('MSE for VSDKs: %.3e' %rmse1)
#
# Variograms
incr = 10
fvalk, fvalkp, fvalkv, ptk1, ptk2 = [], [], [], [], []
for i in range(0, pts.shape[0], incr):
ptk1.append(pts[i, 1])
ptk2.append(pts[i, 0])
fvalk.append(fvalev[i])
fvalkp.append(lsp[i, 0])
fvalkv.append(Pf[i, 0])
OK = OrdinaryKriging(ptk1, ptk2, fvalk, variogram_model='spherical', nlags=30)
OK1 = OrdinaryKriging(ptk1, ptk2, fvalkp, variogram_model='spherical', nlags=30)
|
MSE_VSDK, MSE_POLY = [], []
PSNR_VSDK, PSNR_POLY = [], []
|
random_line_split
|
workerthread.rs
|
0, steal_fails: 0, sleep_us: 0, first_after: 1},
}
}
pub fn get_stealer(&self) -> Stealer<Task<Arg,Ret>> {
assert!(!self.started);
self.stealer.clone()
}
pub fn add_other_stealer(&mut self, stealer: Stealer<Task<Arg,Ret>>) {
assert!(!self.started);
self.other_stealers.push(stealer);
self.threadcount += 1;
}
pub fn spawn(mut self) -> thread_scoped::JoinGuard<'a, ()> {
assert!(!self.started);
self.started = true;
unsafe {
thread_scoped::scoped(move|| {
self.main_loop();
})
}
}
fn main_loop(mut self) {
loop {
match self.supervisor_port.recv() {
Err(_) => break, // PoolSupervisor has been dropped, lets quit.
Ok(_) => { // Supervisor instruct to start working
loop {
self.process_queue();
match self.steal() {
Some(task) => self.execute_task(task),
None => break, // Give up for now
}
}
}
}
if self.supervisor_channel.send(SupervisorMsg::OutOfWork(self.id)).is_err() {
break; // Supervisor shut down, so we also shut down
}
}
}
fn process_queue(&mut self) {
while let Some(task) = self.deque.pop() {
self.execute_task(task);
}
}
fn execute_task(&mut self, task: Task<Arg, Ret>) {
let mut next_task: Option<Task<Arg,Ret>> = Some(task);
while let Some(task) = next_task {
if cfg!(feature = "threadstats") {self.stats.exec_tasks += 1;}
let fun = task.algo.fun;
match (fun)(task.arg) {
TaskResult::Done(ret) => {
self.handle_done(task.join, ret);
next_task = None;
},
TaskResult::Fork(args, joinarg) => {
next_task = self.handle_fork(task.algo, task.join, args, joinarg);
}
}
}
}
fn steal(&mut self) -> Option<Task<Arg,Ret>> {
if self.other_stealers.len() == 0 {
None // No one to steal from
} else {
let mut backoff_sleep: u32 = BACKOFF_INC_US;
for try in 0.. {
match self.try_steal() {
Some(task) => {
if cfg!(feature = "threadstats") && self.stats.first_after == 1 {
self.stats.first_after = self.stats.sleep_us;
}
return Some(task);
}
None => if try > STEAL_TRIES_UNTIL_BACKOFF {
self.sleepers.fetch_add(1, Ordering::SeqCst); // Check number here and set special state if last worker
if cfg!(feature = "threadstats") {self.stats.sleep_us += backoff_sleep as usize;}
unsafe { usleep(backoff_sleep); }
backoff_sleep = backoff_sleep + BACKOFF_INC_US;
if self.threadcount == self.sleepers.load(Ordering::SeqCst) {
break; // Give up
} else {
if self.threadcount == self.sleepers.fetch_sub(1, Ordering::SeqCst) {
self.sleepers.fetch_add(1, Ordering::SeqCst);
break; // Also give up
}
}
},
}
}
None
}
}
/// Try to steal tasks from the other workers.
/// Starts at a random worker and tries every worker until a task is stolen or
/// every worker has been tried once.
fn try_steal(&mut self) -> Option<Task<Arg,Ret>> {
let len = self.other_stealers.len();
let start_victim = self.rng.gen_range(0, len);
for offset in 0..len {
match self.other_stealers[(start_victim + offset) % len].steal() {
Stolen::Data(task) => {
if cfg!(feature = "threadstats") {self.stats.steals += 1;}
return Some(task);
}
Stolen::Empty | Stolen::Abort => {
if cfg!(feature = "threadstats") {self.stats.steal_fails += 1;}
continue;
}
}
}
None
}
fn handle_fork(&self,
algo: Algorithm<Arg, Ret>,
join: ResultReceiver<Ret>,
args: Vec<Arg>,
joinarg: Option<Ret>) -> Option<Task<Arg,Ret>>
{
let len: usize = args.len();
if len == 0 {
self.handle_fork_zero(algo, join, joinarg);
None
} else {
match algo.style {
AlgoStyle::Reduce(reducestyle) => {
let (vector, mut ptr_iter) = create_result_vec::<Ret>(len);
let mut sub_join = Box::new(JoinBarrier {
ret_counter: AtomicUsize::new(len),
joinfun: reducestyle,
joinarg: joinarg,
joinfunarg: vector,
parent: join,
});
let mut args_iter = args.into_iter();
let first_task = Task {
algo: algo.clone(),
arg: args_iter.next().unwrap(),
join: ResultReceiver::Join(ptr_iter.next().unwrap(), unsafe{Box::from_raw(&mut *sub_join)}),
};
loop {
match (args_iter.next(), ptr_iter.next()) {
(Some(arg), Some(ptr)) => {
let forked_task = Task {
algo: algo.clone(),
arg: arg,
join: ResultReceiver::Join(ptr, unsafe{Box::from_raw(&mut *sub_join)}),
};
self.deque.push(forked_task);
},
_ => break,
}
}
mem::forget(sub_join); // Don't drop here, last task will take care of that in handle_done
Some(first_task)
},
AlgoStyle::Search => {
for arg in args.into_iter() {
let forked_task = Task {
algo: algo.clone(),
arg: arg,
join: join.clone(),
};
self.deque.push(forked_task);
}
None
}
}
}
}
fn handle_fork_zero(&self, algo: Algorithm<Arg, Ret>, join: ResultReceiver<Ret>, joinarg: Option<Ret>) {
match algo.style {
AlgoStyle::Reduce(ref reducestyle) => {
let joinres = match *reducestyle {
ReduceStyle::NoArg(ref joinfun) => (joinfun)(&Vec::new()[..]),
ReduceStyle::Arg(ref joinfun) => {
let arg = joinarg.unwrap();
(joinfun)(&arg, &Vec::new()[..])
}
};
self.handle_done(join, joinres);
},
_ => (),
}
}
fn handle_done(&self, join: ResultReceiver<Ret>, value: Ret) {
match join {
ResultReceiver::Join(ptr, joinbarrier) => {
unsafe { write(*ptr, value); } // Writes without dropping since only null in place
if joinbarrier.ret_counter.fetch_sub(1, Ordering::SeqCst) == 1 {
let joinres = match joinbarrier.joinfun {
ReduceStyle::NoArg(ref joinfun) => (joinfun)(&joinbarrier.joinfunarg),
ReduceStyle::Arg(ref joinfun) => {
let joinarg = match joinbarrier.joinarg.as_ref() {
None => panic!("Algorithm has ReduceStyle::Arg, but no extra arg passed"),
Some(arg) => arg,
};
(joinfun)(joinarg, &joinbarrier.joinfunarg)
},
};
self.handle_done(joinbarrier.parent, joinres);
} else {
mem::forget(joinbarrier) // Don't drop if we are not last task
}
}
ResultReceiver::Channel(channel) => {
channel.lock().unwrap().send(value).unwrap();
}
}
}
}
#[cfg(feature = "threadstats")]
impl<Arg: Send, Ret: Send + Sync> Drop for WorkerThread<Arg, Ret> {
fn drop(&mut self) {
println!("Worker[{}] (t: {}, steals: {}, failed: {}, sleep: {}, first: {})",
self.id,
self.stats.exec_tasks,
self.stats.steals,
self.stats.steal_fails,
self.stats.sleep_us,
self.stats.first_after);
}
}
struct ThreadStats {
pub steals: usize,
pub steal_fails: usize,
pub exec_tasks: usize,
pub sleep_us: usize,
pub first_after: usize,
}
fn create_result_vec<Ret>(n: usize) -> (Vec<Ret>, PtrIter<Ret>) {
let mut rets: Vec<Ret> = Vec::with_capacity(n);
unsafe {
rets.set_len(n); // Force it to expand. Values in this will be invalid
let ptr_0: *mut Ret = rets.get_unchecked_mut(0);
let ptr_iter = PtrIter {
ptr_0: ptr_0,
offset: 0,
};
(rets, ptr_iter)
}
}
struct P
|
trIter<
|
identifier_name
|
|
workerthread.rs
|
Sync> {
id: usize,
started: bool,
supervisor_port: Receiver<()>,
supervisor_channel: Sender<SupervisorMsg<Arg, Ret>>,
deque: Worker<Task<Arg, Ret>>,
stealer: Stealer<Task<Arg, Ret>>,
other_stealers: Vec<Stealer<Task<Arg, Ret>>>,
rng: XorShiftRng,
sleepers: Arc<AtomicUsize>,
threadcount: usize,
stats: ThreadStats,
}
impl<'a, Arg: Send + 'a, Ret: Send + Sync + 'a> WorkerThread<Arg,Ret> {
pub fn new(id: usize,
port: Receiver<()>,
channel: Sender<SupervisorMsg<Arg,Ret>>,
supervisor_queue: Stealer<Task<Arg, Ret>>,
sleepers: Arc<AtomicUsize>) -> WorkerThread<Arg,Ret> {
let (worker, stealer) = deque::new();
WorkerThread {
id: id,
started: false,
supervisor_port: port,
supervisor_channel: channel,
deque: worker,
stealer: stealer,
other_stealers: vec![supervisor_queue],
rng: weak_rng(),
sleepers: sleepers,
threadcount: 1, // Myself
stats: ThreadStats{exec_tasks: 0, steals: 0, steal_fails: 0, sleep_us: 0, first_after: 1},
}
}
pub fn get_stealer(&self) -> Stealer<Task<Arg,Ret>> {
assert!(!self.started);
self.stealer.clone()
}
pub fn add_other_stealer(&mut self, stealer: Stealer<Task<Arg,Ret>>) {
assert!(!self.started);
self.other_stealers.push(stealer);
self.threadcount += 1;
}
pub fn spawn(mut self) -> thread_scoped::JoinGuard<'a, ()> {
assert!(!self.started);
self.started = true;
unsafe {
thread_scoped::scoped(move|| {
self.main_loop();
})
}
}
fn main_loop(mut self) {
loop {
match self.supervisor_port.recv() {
Err(_) => break, // PoolSupervisor has been dropped, lets quit.
Ok(_) => { // Supervisor instruct to start working
loop {
self.process_queue();
match self.steal() {
Some(task) => self.execute_task(task),
None => break, // Give up for now
}
}
}
}
if self.supervisor_channel.send(SupervisorMsg::OutOfWork(self.id)).is_err() {
break; // Supervisor shut down, so we also shut down
}
}
}
fn process_queue(&mut self) {
while let Some(task) = self.deque.pop() {
self.execute_task(task);
}
}
fn execute_task(&mut self, task: Task<Arg, Ret>) {
let mut next_task: Option<Task<Arg,Ret>> = Some(task);
while let Some(task) = next_task {
if cfg!(feature = "threadstats") {self.stats.exec_tasks += 1;}
let fun = task.algo.fun;
match (fun)(task.arg) {
TaskResult::Done(ret) => {
self.handle_done(task.join, ret);
next_task = None;
},
TaskResult::Fork(args, joinarg) => {
next_task = self.handle_fork(task.algo, task.join, args, joinarg);
}
}
}
}
fn steal(&mut self) -> Option<Task<Arg,Ret>> {
if self.other_stealers.len() == 0 {
None // No one to steal from
} else {
let mut backoff_sleep: u32 = BACKOFF_INC_US;
for try in 0.. {
match self.try_steal() {
Some(task) => {
if cfg!(feature = "threadstats") && self.stats.first_after == 1 {
self.stats.first_after = self.stats.sleep_us;
}
return Some(task);
}
None => if try > STEAL_TRIES_UNTIL_BACKOFF {
self.sleepers.fetch_add(1, Ordering::SeqCst); // Check number here and set special state if last worker
if cfg!(feature = "threadstats") {self.stats.sleep_us += backoff_sleep as usize;}
unsafe { usleep(backoff_sleep); }
backoff_sleep = backoff_sleep + BACKOFF_INC_US;
if self.threadcount == self.sleepers.load(Ordering::SeqCst) {
break; // Give up
} else {
if self.threadcount == self.sleepers.fetch_sub(1, Ordering::SeqCst) {
self.sleepers.fetch_add(1, Ordering::SeqCst);
break; // Also give up
}
}
},
}
}
None
}
}
/// Try to steal tasks from the other workers.
/// Starts at a random worker and tries every worker until a task is stolen or
/// every worker has been tried once.
fn try_steal(&mut self) -> Option<Task<Arg,Ret>> {
let len = self.other_stealers.len();
let start_victim = self.rng.gen_range(0, len);
for offset in 0..len {
match self.other_stealers[(start_victim + offset) % len].steal() {
Stolen::Data(task) => {
if cfg!(feature = "threadstats") {self.stats.steals += 1;}
return Some(task);
}
Stolen::Empty | Stolen::Abort => {
if cfg!(feature = "threadstats") {self.stats.steal_fails += 1;}
continue;
}
}
}
None
}
fn handle_fork(&self,
algo: Algorithm<Arg, Ret>,
join: ResultReceiver<Ret>,
args: Vec<Arg>,
joinarg: Option<Ret>) -> Option<Task<Arg,Ret>>
{
let len: usize = args.len();
if len == 0 {
self.handle_fork_zero(algo, join, joinarg);
None
} else {
match algo.style {
AlgoStyle::Reduce(reducestyle) => {
let (vector, mut ptr_iter) = create_result_vec::<Ret>(len);
let mut sub_join = Box::new(JoinBarrier {
ret_counter: AtomicUsize::new(len),
joinfun: reducestyle,
joinarg: joinarg,
joinfunarg: vector,
parent: join,
});
let mut args_iter = args.into_iter();
let first_task = Task {
algo: algo.clone(),
arg: args_iter.next().unwrap(),
join: ResultReceiver::Join(ptr_iter.next().unwrap(), unsafe{Box::from_raw(&mut *sub_join)}),
};
loop {
match (args_iter.next(), ptr_iter.next()) {
(Some(arg), Some(ptr)) => {
let forked_task = Task {
algo: algo.clone(),
arg: arg,
join: ResultReceiver::Join(ptr, unsafe{Box::from_raw(&mut *sub_join)}),
};
self.deque.push(forked_task);
},
_ => break,
}
}
mem::forget(sub_join); // Don't drop here, last task will take care of that in handle_done
Some(first_task)
},
AlgoStyle::Search => {
for arg in args.into_iter() {
let forked_task = Task {
algo: algo.clone(),
arg: arg,
join: join.clone(),
};
self.deque.push(forked_task);
}
None
}
}
}
}
fn handle_fork_zero(&self, algo: Algorithm<Arg, Ret>, join: ResultReceiver<Ret>, joinarg: Option<Ret>) {
match algo.style {
AlgoStyle::Reduce(ref reducestyle) => {
let joinres = match *reducestyle {
ReduceStyle::NoArg(ref joinfun) => (joinfun)(&Vec::new()[..]),
ReduceStyle::Arg(ref joinfun) => {
let arg = joinarg.unwrap();
(joinfun)(&arg, &Vec::new()[..])
}
};
self.handle_done(join, joinres);
},
_ => (),
}
}
fn handle_done(&self, join: ResultReceiver<Ret>, value: Ret) {
match join {
ResultReceiver::Join(ptr, joinbarrier) => {
unsafe { write(*ptr, value); } // Writes without dropping since only null in place
if joinbarrier.ret_counter.fetch_sub(1, Ordering::SeqCst) == 1 {
let joinres = match joinbarrier.joinfun {
ReduceStyle::NoArg(ref joinfun) => (joinfun)(&joinbarrier.joinfunarg),
ReduceStyle::Arg(ref joinfun) => {
let joinarg = match joinbarrier.joinarg.as_ref() {
None => panic!("Algorithm has ReduceStyle::Arg, but no extra arg passed"),
Some(arg) => arg,
};
(joinfun)(joinarg, &joinbarrier.joinfunarg)
},
};
self.handle_done(joinbarrier.parent, joinres);
} else {
|
mem::forget(joinbarrier) // Don't drop if we are not last task
}
|
conditional_block
|
|
workerthread.rs
|
{
assert!(!self.started);
self.other_stealers.push(stealer);
self.threadcount += 1;
}
pub fn spawn(mut self) -> thread_scoped::JoinGuard<'a, ()> {
assert!(!self.started);
self.started = true;
unsafe {
thread_scoped::scoped(move|| {
self.main_loop();
})
}
}
fn main_loop(mut self) {
loop {
match self.supervisor_port.recv() {
Err(_) => break, // PoolSupervisor has been dropped, lets quit.
Ok(_) => { // Supervisor instruct to start working
loop {
self.process_queue();
match self.steal() {
Some(task) => self.execute_task(task),
None => break, // Give up for now
}
}
}
}
if self.supervisor_channel.send(SupervisorMsg::OutOfWork(self.id)).is_err() {
break; // Supervisor shut down, so we also shut down
}
}
}
fn process_queue(&mut self) {
while let Some(task) = self.deque.pop() {
self.execute_task(task);
}
}
fn execute_task(&mut self, task: Task<Arg, Ret>) {
let mut next_task: Option<Task<Arg,Ret>> = Some(task);
while let Some(task) = next_task {
if cfg!(feature = "threadstats") {self.stats.exec_tasks += 1;}
let fun = task.algo.fun;
match (fun)(task.arg) {
TaskResult::Done(ret) => {
self.handle_done(task.join, ret);
next_task = None;
},
TaskResult::Fork(args, joinarg) => {
next_task = self.handle_fork(task.algo, task.join, args, joinarg);
}
}
}
}
fn steal(&mut self) -> Option<Task<Arg,Ret>> {
if self.other_stealers.len() == 0 {
None // No one to steal from
} else {
let mut backoff_sleep: u32 = BACKOFF_INC_US;
for try in 0.. {
match self.try_steal() {
Some(task) => {
if cfg!(feature = "threadstats") && self.stats.first_after == 1 {
self.stats.first_after = self.stats.sleep_us;
}
return Some(task);
}
None => if try > STEAL_TRIES_UNTIL_BACKOFF {
self.sleepers.fetch_add(1, Ordering::SeqCst); // Check number here and set special state if last worker
if cfg!(feature = "threadstats") {self.stats.sleep_us += backoff_sleep as usize;}
unsafe { usleep(backoff_sleep); }
backoff_sleep = backoff_sleep + BACKOFF_INC_US;
if self.threadcount == self.sleepers.load(Ordering::SeqCst) {
break; // Give up
} else {
if self.threadcount == self.sleepers.fetch_sub(1, Ordering::SeqCst) {
self.sleepers.fetch_add(1, Ordering::SeqCst);
break; // Also give up
}
}
},
}
}
None
}
}
/// Try to steal tasks from the other workers.
/// Starts at a random worker and tries every worker until a task is stolen or
/// every worker has been tried once.
fn try_steal(&mut self) -> Option<Task<Arg,Ret>> {
let len = self.other_stealers.len();
let start_victim = self.rng.gen_range(0, len);
for offset in 0..len {
match self.other_stealers[(start_victim + offset) % len].steal() {
Stolen::Data(task) => {
if cfg!(feature = "threadstats") {self.stats.steals += 1;}
return Some(task);
}
Stolen::Empty | Stolen::Abort => {
if cfg!(feature = "threadstats") {self.stats.steal_fails += 1;}
continue;
}
}
}
None
}
fn handle_fork(&self,
algo: Algorithm<Arg, Ret>,
join: ResultReceiver<Ret>,
args: Vec<Arg>,
joinarg: Option<Ret>) -> Option<Task<Arg,Ret>>
{
let len: usize = args.len();
if len == 0 {
self.handle_fork_zero(algo, join, joinarg);
None
} else {
match algo.style {
AlgoStyle::Reduce(reducestyle) => {
let (vector, mut ptr_iter) = create_result_vec::<Ret>(len);
let mut sub_join = Box::new(JoinBarrier {
ret_counter: AtomicUsize::new(len),
joinfun: reducestyle,
joinarg: joinarg,
joinfunarg: vector,
parent: join,
});
let mut args_iter = args.into_iter();
let first_task = Task {
algo: algo.clone(),
arg: args_iter.next().unwrap(),
join: ResultReceiver::Join(ptr_iter.next().unwrap(), unsafe{Box::from_raw(&mut *sub_join)}),
};
loop {
match (args_iter.next(), ptr_iter.next()) {
(Some(arg), Some(ptr)) => {
let forked_task = Task {
algo: algo.clone(),
arg: arg,
join: ResultReceiver::Join(ptr, unsafe{Box::from_raw(&mut *sub_join)}),
};
self.deque.push(forked_task);
},
_ => break,
}
}
mem::forget(sub_join); // Don't drop here, last task will take care of that in handle_done
Some(first_task)
},
AlgoStyle::Search => {
for arg in args.into_iter() {
let forked_task = Task {
algo: algo.clone(),
arg: arg,
join: join.clone(),
};
self.deque.push(forked_task);
}
None
}
}
}
}
fn handle_fork_zero(&self, algo: Algorithm<Arg, Ret>, join: ResultReceiver<Ret>, joinarg: Option<Ret>) {
match algo.style {
AlgoStyle::Reduce(ref reducestyle) => {
let joinres = match *reducestyle {
ReduceStyle::NoArg(ref joinfun) => (joinfun)(&Vec::new()[..]),
ReduceStyle::Arg(ref joinfun) => {
let arg = joinarg.unwrap();
(joinfun)(&arg, &Vec::new()[..])
}
};
self.handle_done(join, joinres);
},
_ => (),
}
}
fn handle_done(&self, join: ResultReceiver<Ret>, value: Ret) {
match join {
ResultReceiver::Join(ptr, joinbarrier) => {
unsafe { write(*ptr, value); } // Writes without dropping since only null in place
if joinbarrier.ret_counter.fetch_sub(1, Ordering::SeqCst) == 1 {
let joinres = match joinbarrier.joinfun {
ReduceStyle::NoArg(ref joinfun) => (joinfun)(&joinbarrier.joinfunarg),
ReduceStyle::Arg(ref joinfun) => {
let joinarg = match joinbarrier.joinarg.as_ref() {
None => panic!("Algorithm has ReduceStyle::Arg, but no extra arg passed"),
Some(arg) => arg,
};
(joinfun)(joinarg, &joinbarrier.joinfunarg)
},
};
self.handle_done(joinbarrier.parent, joinres);
} else {
mem::forget(joinbarrier) // Don't drop if we are not last task
}
}
ResultReceiver::Channel(channel) => {
channel.lock().unwrap().send(value).unwrap();
}
}
}
}
#[cfg(feature = "threadstats")]
impl<Arg: Send, Ret: Send + Sync> Drop for WorkerThread<Arg, Ret> {
fn drop(&mut self) {
println!("Worker[{}] (t: {}, steals: {}, failed: {}, sleep: {}, first: {})",
self.id,
self.stats.exec_tasks,
self.stats.steals,
self.stats.steal_fails,
self.stats.sleep_us,
self.stats.first_after);
}
}
struct ThreadStats {
pub steals: usize,
pub steal_fails: usize,
pub exec_tasks: usize,
pub sleep_us: usize,
pub first_after: usize,
}
fn create_result_vec<Ret>(n: usize) -> (Vec<Ret>, PtrIter<Ret>) {
let mut rets: Vec<Ret> = Vec::with_capacity(n);
unsafe {
rets.set_len(n); // Force it to expand. Values in this will be invalid
let ptr_0: *mut Ret = rets.get_unchecked_mut(0);
let ptr_iter = PtrIter {
ptr_0: ptr_0,
offset: 0,
};
(rets, ptr_iter)
}
}
struct PtrIter<Ret> {
ptr_0: *mut Ret,
offset: isize,
}
impl<Ret> Iterator for PtrIter<Ret> {
type Item = Unique<Ret>;
fn next(&mut self) -> Option<Self::Item> {
|
let ptr = unsafe { Unique::new(self.ptr_0.offset(self.offset)) };
self.offset += 1;
Some(ptr)
}
|
identifier_body
|
|
workerthread.rs
|
{Task,JoinBarrier,TaskResult,ResultReceiver,AlgoStyle,ReduceStyle,Algorithm};
use ::poolsupervisor::SupervisorMsg;
static STEAL_TRIES_UNTIL_BACKOFF: u32 = 30;
static BACKOFF_INC_US: u32 = 10;
pub struct WorkerThread<Arg: Send, Ret: Send + Sync> {
id: usize,
started: bool,
supervisor_port: Receiver<()>,
supervisor_channel: Sender<SupervisorMsg<Arg, Ret>>,
deque: Worker<Task<Arg, Ret>>,
stealer: Stealer<Task<Arg, Ret>>,
other_stealers: Vec<Stealer<Task<Arg, Ret>>>,
rng: XorShiftRng,
sleepers: Arc<AtomicUsize>,
threadcount: usize,
stats: ThreadStats,
}
impl<'a, Arg: Send + 'a, Ret: Send + Sync + 'a> WorkerThread<Arg,Ret> {
pub fn new(id: usize,
port: Receiver<()>,
channel: Sender<SupervisorMsg<Arg,Ret>>,
supervisor_queue: Stealer<Task<Arg, Ret>>,
sleepers: Arc<AtomicUsize>) -> WorkerThread<Arg,Ret> {
let (worker, stealer) = deque::new();
WorkerThread {
id: id,
started: false,
supervisor_port: port,
supervisor_channel: channel,
deque: worker,
stealer: stealer,
other_stealers: vec![supervisor_queue],
rng: weak_rng(),
sleepers: sleepers,
threadcount: 1, // Myself
stats: ThreadStats{exec_tasks: 0, steals: 0, steal_fails: 0, sleep_us: 0, first_after: 1},
}
}
pub fn get_stealer(&self) -> Stealer<Task<Arg,Ret>> {
assert!(!self.started);
self.stealer.clone()
}
pub fn add_other_stealer(&mut self, stealer: Stealer<Task<Arg,Ret>>) {
assert!(!self.started);
self.other_stealers.push(stealer);
self.threadcount += 1;
}
pub fn spawn(mut self) -> thread_scoped::JoinGuard<'a, ()> {
assert!(!self.started);
self.started = true;
unsafe {
thread_scoped::scoped(move|| {
self.main_loop();
})
}
}
fn main_loop(mut self) {
loop {
match self.supervisor_port.recv() {
Err(_) => break, // PoolSupervisor has been dropped, lets quit.
Ok(_) => { // Supervisor instruct to start working
loop {
self.process_queue();
match self.steal() {
Some(task) => self.execute_task(task),
None => break, // Give up for now
}
}
}
}
if self.supervisor_channel.send(SupervisorMsg::OutOfWork(self.id)).is_err() {
break; // Supervisor shut down, so we also shut down
}
}
}
fn process_queue(&mut self) {
while let Some(task) = self.deque.pop() {
self.execute_task(task);
}
}
fn execute_task(&mut self, task: Task<Arg, Ret>) {
let mut next_task: Option<Task<Arg,Ret>> = Some(task);
while let Some(task) = next_task {
if cfg!(feature = "threadstats") {self.stats.exec_tasks += 1;}
let fun = task.algo.fun;
match (fun)(task.arg) {
TaskResult::Done(ret) => {
self.handle_done(task.join, ret);
next_task = None;
},
TaskResult::Fork(args, joinarg) => {
next_task = self.handle_fork(task.algo, task.join, args, joinarg);
}
}
}
}
fn steal(&mut self) -> Option<Task<Arg,Ret>> {
if self.other_stealers.len() == 0 {
None // No one to steal from
} else {
let mut backoff_sleep: u32 = BACKOFF_INC_US;
for try in 0.. {
match self.try_steal() {
Some(task) => {
if cfg!(feature = "threadstats") && self.stats.first_after == 1 {
self.stats.first_after = self.stats.sleep_us;
}
return Some(task);
}
None => if try > STEAL_TRIES_UNTIL_BACKOFF {
self.sleepers.fetch_add(1, Ordering::SeqCst); // Check number here and set special state if last worker
if cfg!(feature = "threadstats") {self.stats.sleep_us += backoff_sleep as usize;}
unsafe { usleep(backoff_sleep); }
backoff_sleep = backoff_sleep + BACKOFF_INC_US;
if self.threadcount == self.sleepers.load(Ordering::SeqCst) {
break; // Give up
} else {
if self.threadcount == self.sleepers.fetch_sub(1, Ordering::SeqCst) {
self.sleepers.fetch_add(1, Ordering::SeqCst);
break; // Also give up
}
}
},
}
}
None
|
/// Try to steal tasks from the other workers.
/// Starts at a random worker and tries every worker until a task is stolen or
/// every worker has been tried once.
fn try_steal(&mut self) -> Option<Task<Arg,Ret>> {
let len = self.other_stealers.len();
let start_victim = self.rng.gen_range(0, len);
for offset in 0..len {
match self.other_stealers[(start_victim + offset) % len].steal() {
Stolen::Data(task) => {
if cfg!(feature = "threadstats") {self.stats.steals += 1;}
return Some(task);
}
Stolen::Empty | Stolen::Abort => {
if cfg!(feature = "threadstats") {self.stats.steal_fails += 1;}
continue;
}
}
}
None
}
fn handle_fork(&self,
algo: Algorithm<Arg, Ret>,
join: ResultReceiver<Ret>,
args: Vec<Arg>,
joinarg: Option<Ret>) -> Option<Task<Arg,Ret>>
{
let len: usize = args.len();
if len == 0 {
self.handle_fork_zero(algo, join, joinarg);
None
} else {
match algo.style {
AlgoStyle::Reduce(reducestyle) => {
let (vector, mut ptr_iter) = create_result_vec::<Ret>(len);
let mut sub_join = Box::new(JoinBarrier {
ret_counter: AtomicUsize::new(len),
joinfun: reducestyle,
joinarg: joinarg,
joinfunarg: vector,
parent: join,
});
let mut args_iter = args.into_iter();
let first_task = Task {
algo: algo.clone(),
arg: args_iter.next().unwrap(),
join: ResultReceiver::Join(ptr_iter.next().unwrap(), unsafe{Box::from_raw(&mut *sub_join)}),
};
loop {
match (args_iter.next(), ptr_iter.next()) {
(Some(arg), Some(ptr)) => {
let forked_task = Task {
algo: algo.clone(),
arg: arg,
join: ResultReceiver::Join(ptr, unsafe{Box::from_raw(&mut *sub_join)}),
};
self.deque.push(forked_task);
},
_ => break,
}
}
mem::forget(sub_join); // Don't drop here, last task will take care of that in handle_done
Some(first_task)
},
AlgoStyle::Search => {
for arg in args.into_iter() {
let forked_task = Task {
algo: algo.clone(),
arg: arg,
join: join.clone(),
};
self.deque.push(forked_task);
}
None
}
}
}
}
fn handle_fork_zero(&self, algo: Algorithm<Arg, Ret>, join: ResultReceiver<Ret>, joinarg: Option<Ret>) {
match algo.style {
AlgoStyle::Reduce(ref reducestyle) => {
let joinres = match *reducestyle {
ReduceStyle::NoArg(ref joinfun) => (joinfun)(&Vec::new()[..]),
ReduceStyle::Arg(ref joinfun) => {
let arg = joinarg.unwrap();
(joinfun)(&arg, &Vec::new()[..])
}
};
self.handle_done(join, joinres);
},
_ => (),
}
}
fn handle_done(&self, join: ResultReceiver<Ret>, value: Ret) {
match join {
ResultReceiver::Join(ptr, joinbarrier) => {
unsafe { write(*ptr, value); } // Writes without dropping since only null in place
if joinbarrier.ret_counter.fetch_sub(1, Ordering::SeqCst) == 1 {
let joinres = match joinbarrier.joinfun {
ReduceStyle::NoArg(ref joinfun) => (joinfun)(&joinbarrier.joinfunarg),
ReduceStyle::Arg(ref joinfun) => {
let joinarg = match joinbarrier.joinarg.as_ref() {
None => panic!("Algorithm has Reduce
|
}
}
|
random_line_split
|
server.go
|
: Primary ping view number " + strconv.Itoa(int(args.Viewnum)) + "--> Current viewnum is " + strconv.Itoa(int(vs.currentView.Viewnum)))
// fmt.Println("CAODAN ERHAO *(*(*(*(*(*(*(*(")
}
} else if args.Me == vs.currentView.Backup {
// If backup pings
// Do nothing.
testLog("ACKED = False && BACKUP -> " + args.Me + " pinged view " + strconv.Itoa(int(args.Viewnum)))
// fmt.Println("TEST #4: ", vs.currentView)
} else {
// If others pings
// put it into idle
if !idleContains(vs.idle, args.Me) {
testLog("ACKED = False && OTHERS -> " + args.Me + " added to idle")
vs.idle = append(vs.idle, args.Me)
}
testLog("ACKED = False && OTHERS -> " + args.Me + " pinged view " + strconv.Itoa(int(args.Viewnum)))
}
} else {
// If acked is true
if args.Me == vs.currentView.Primary { // If primary pings
// If ping number equals to current view
// doing nothing
// If ping number less than current view but more than 0
// Do nothing
if args.Viewnum == 0 {
// If ping number is 0 (Primary restarts)
testLog("ACKED = True && PRIMARY -> " + args.Me + " pinged view " + strconv.Itoa(int(args.Viewnum)) + " Primary RESTARTS!!!")
testLog("ACKED = True && PRIMARY (View before changed) " + convertView(vs.currentView))
if vs.currentView.Backup != "" {
// If has backup
// promote backup and put the old primary into idle
vs.currentView.Primary = vs.currentView.Backup
vs.currentView.Backup = args.Me
// Change ack
vs.acked = false
// Increase view number
vs.increaseViewNum()
// fmt.Println("TEST #5: ", vs.currentView)
testLog("ACKED = True to False && PRIMARY (View after changed) " + convertView(vs.currentView))
} else {
testLog("ACKED = True && PRIMARY is STUCK " + args.Me + " pinged " + strconv.Itoa(int(args.Viewnum)))
// If no backup
// STUCK!!
}
}
} else if args.Me == vs.currentView.Backup {
// If backup pings
if args.Viewnum == 0 {
// If ping nunmber is 0(restart)
// Set backup to "", put old backup to idle, set ack = false
testLog("ACKED = True && BACKUP -> " + args.Me + " pinged view " + strconv.Itoa(int(args.Viewnum)) + " Backup RESTARTS!!!")
vs.currentView.Backup = ""
if !idleContains(vs.idle, args.Me) {
vs.idle = append(vs.idle, args.Me)
}
vs.acked = false
// Increase view number
vs.increaseViewNum()
testLog("ACKED = True to False && BACKUP -> Current view is " + convertView(vs.currentView))
// fmt.Println("TEST #6: ", vs.currentView)
}
} else {
// If others pings
testLog("ACKED = True && OTHERS -> " + args.Me + " pinged " + strconv.Itoa(int(args.Viewnum)))
if vs.currentView.Backup == "" {
// If no backup
// put ping server into backup and set acked = false
// fmt.Println("Before backup added: ", vs.currentView)
// fmt.Println("Add backup: ", args.Me)
if (idleContains(vs.idle, args.Me)) {
vs.currentView.Backup = vs.idle[0]
vs.idle = vs.idle[1:]
} else {
vs.currentView.Backup = args.Me
}
vs.acked = false
// Increase view number
vs.increaseViewNum()
testLog("ACKED = True to False && OTHERS -> View after change " + convertView(vs.currentView))
// fmt.Println("After backup added: ", vs.currentView)
} else {
// If already has backup
// put it into idle
// fmt.Println("TEST #600: put", args.Me, " in idle")
if !idleContains(vs.idle, args.Me) {
vs.idle = append(vs.idle, args.Me)
}
}
}
}
}
// Set up the return view
vs.pingKeeper[args.Me] = time.Now()
reply.View = vs.currentView
testLog("Returning view" + convertView(vs.currentView))
vs.viewMu.Unlock()
return nil
}
//
// server Get() RPC handler.
//
func (vs *ViewServer) Get(args *GetArgs, reply *GetReply) error {
vs.viewMu.Lock()
reply.View = vs.currentView
vs.viewMu.Unlock()
return nil
}
//
// tick() is called once per PingInterval; it should notice
// if servers have died or recovered, and change the view
// accordingly.
func (vs *ViewServer) tick() {
vs.viewMu.Lock()
// Primary !== ""
// See if any server died
for k, v := range vs.pingKeeper{
server := k
difference := time.Since(v)
if difference > PingInterval * DeadPings {
switch server {
case vs.currentView.Primary:
testLog("TICKED PRIMARY DIED " + convertView(vs.currentView))
// Primary died
// Check if acked is true
// fmt.Println("Primary: ", vs.currentView.Primary, " died")
if vs.acked {
// Check if backup is available
if vs.currentView.Backup != "" {
// fmt.Println("Put backup: ", vs.currentView.Backup, " in")
// Turn backup into primary
vs.currentView.Primary = vs.currentView.Backup
vs.currentView.Backup = ""
// Turn idle into backup
if len(vs.idle) > 0 {
// fmt.Println("TEST #150: ", vs.currentView)
vs.currentView.Backup = vs.idle[0]
// fmt.Println("TEST #151: ", vs.currentView)
vs.idle = vs.idle[1:]
}
vs.acked = false
vs.pingKeeper[k] = time.Now()
vs.increaseViewNum();
testLog("ACKED = TRUE && PRIMARY DIED -> New view is " + convertView(vs.currentView))
}
// fmt.Println("TEST #1: ", vs.currentView)
} else {
// crash!!!!
}
case vs.currentView.Backup:
// Backup died
// Check if acked is true
// fmt.Println("Backup: ", vs.currentView.Backup, " died")
testLog("TICKED BACKUP DIED " + convertView(vs.currentView))
if vs.acked {
// fmt.Println("TEST #180: ", vs.currentView)
vs.currentView.Backup = ""
if len(vs.idle) > 0 {
vs.currentView.Backup = vs.idle[0]
// fmt.Println("TEST #2: ", vs.currentView)
vs.idle = vs.idle[1:]
}
vs.acked = false
vs.increaseViewNum();
testLog("ACKED = TRUE && BACKUP DIED -> New view is " + convertView(vs.currentView))
} else {
// crash!!!!
}
default:
// Idle died
// Delete from idle
for i, idleServer := range vs.idle {
if server == idleServer {
vs.idle = append(vs.idle[0:i], vs.idle[i+1:]...)
}
}
}
}
}
vs.viewMu.Unlock()
}
//
// tell the server to shut itself down.
// for testing.
// please don't change this function.
//
func (vs *ViewServer) Kill() {
vs.dead = true
vs.l.Close()
}
func StartServer(me string) *ViewServer
|
{
vs := new(ViewServer)
vs.me = me
// Your vs.* initializations here.
vs.idle = make([]string, 0)
vs.currentView = View{0, "", ""}
vs.acked = false
vs.pingKeeper = make(map[string]time.Time)
// tell net/rpc about our RPC server and handlers.
rpcs := rpc.NewServer()
rpcs.Register(vs)
// prepare to receive connections from clients.
// change "unix" to "tcp" to use over a network.
os.Remove(vs.me) // only needed for "unix"
l, e := net.Listen("unix", vs.me)
if e != nil {
log.Fatal("listen error: ", e)
}
|
identifier_body
|
|
server.go
|
view# " + strconv.Itoa(int(view.Viewnum)) + ", primary: " + view.Primary + ", backup: " + view.Backup + ". "
}
//
// server Ping RPC handler.
//
func (vs *ViewServer) Ping(args *PingArgs, reply *PingReply) error {
vs.viewMu.Lock()
if vs.getViewNum() == 0 {
// If current view number in ping is 0
// If ping view number is 0
if args.Viewnum == 0 {
// put the ping server into primary
vs.currentView.Primary = args.Me
vs.acked = false
vs.increaseViewNum()
} else {
// If ping view number is greater than 0
// ignore this case
}
} else {
// If current view number in ping is greater than 0
if vs.acked == false {
// If acked is false
if args.Me == vs.currentView.Primary {
// If primary pings
// If ping number equals to current view
if args.Viewnum == vs.getViewNum() {
// set acked = true
vs.acked = true
if vs.currentView.Backup == "" && len(vs.idle) > 0 {
testLog("ACKED = False && PRIMARY -> Move Idle " + vs.idle[0] + "to backup")
vs.currentView.Backup = vs.idle[0]
vs.idle = vs.idle[1:]
}
testLog("ACKED = False && PRIMARY -> Current view: " + convertView(vs.currentView) + " is acked by primary" + args.Me)
// fmt.Println(vs.currentView, " acked by ", vs.currentView.Primary)
} else {
// If ping number less than current view(including the case when primary restarts)
// ignore this case
testLog("ACKED = False && PRIMARY -> WRONG CASE: Primary ping view number " + strconv.Itoa(int(args.Viewnum)) + "--> Current viewnum is " + strconv.Itoa(int(vs.currentView.Viewnum)))
// fmt.Println("CAODAN ERHAO *(*(*(*(*(*(*(*(")
}
} else if args.Me == vs.currentView.Backup {
// If backup pings
// Do nothing.
testLog("ACKED = False && BACKUP -> " + args.Me + " pinged view " + strconv.Itoa(int(args.Viewnum)))
// fmt.Println("TEST #4: ", vs.currentView)
} else {
// If others pings
// put it into idle
if !idleContains(vs.idle, args.Me) {
testLog("ACKED = False && OTHERS -> " + args.Me + " added to idle")
vs.idle = append(vs.idle, args.Me)
}
testLog("ACKED = False && OTHERS -> " + args.Me + " pinged view " + strconv.Itoa(int(args.Viewnum)))
}
} else {
// If acked is true
if args.Me == vs.currentView.Primary { // If primary pings
// If ping number equals to current view
// doing nothing
// If ping number less than current view but more than 0
// Do nothing
if args.Viewnum == 0 {
// If ping number is 0 (Primary restarts)
testLog("ACKED = True && PRIMARY -> " + args.Me + " pinged view " + strconv.Itoa(int(args.Viewnum)) + " Primary RESTARTS!!!")
testLog("ACKED = True && PRIMARY (View before changed) " + convertView(vs.currentView))
if vs.currentView.Backup != "" {
// If has backup
// promote backup and put the old primary into idle
vs.currentView.Primary = vs.currentView.Backup
vs.currentView.Backup = args.Me
// Change ack
vs.acked = false
// Increase view number
vs.increaseViewNum()
// fmt.Println("TEST #5: ", vs.currentView)
testLog("ACKED = True to False && PRIMARY (View after changed) " + convertView(vs.currentView))
} else {
testLog("ACKED = True && PRIMARY is STUCK " + args.Me + " pinged " + strconv.Itoa(int(args.Viewnum)))
// If no backup
// STUCK!!
}
}
} else if args.Me == vs.currentView.Backup {
// If backup pings
if args.Viewnum == 0 {
// If ping nunmber is 0(restart)
// Set backup to "", put old backup to idle, set ack = false
testLog("ACKED = True && BACKUP -> " + args.Me + " pinged view " + strconv.Itoa(int(args.Viewnum)) + " Backup RESTARTS!!!")
vs.currentView.Backup = ""
if !idleContains(vs.idle, args.Me) {
vs.idle = append(vs.idle, args.Me)
}
vs.acked = false
// Increase view number
vs.increaseViewNum()
testLog("ACKED = True to False && BACKUP -> Current view is " + convertView(vs.currentView))
// fmt.Println("TEST #6: ", vs.currentView)
}
} else {
// If others pings
testLog("ACKED = True && OTHERS -> " + args.Me + " pinged " + strconv.Itoa(int(args.Viewnum)))
if vs.currentView.Backup == "" {
// If no backup
// put ping server into backup and set acked = false
// fmt.Println("Before backup added: ", vs.currentView)
// fmt.Println("Add backup: ", args.Me)
if (idleContains(vs.idle, args.Me)) {
vs.currentView.Backup = vs.idle[0]
vs.idle = vs.idle[1:]
} else {
vs.currentView.Backup = args.Me
}
vs.acked = false
// Increase view number
vs.increaseViewNum()
testLog("ACKED = True to False && OTHERS -> View after change " + convertView(vs.currentView))
// fmt.Println("After backup added: ", vs.currentView)
} else {
// If already has backup
// put it into idle
// fmt.Println("TEST #600: put", args.Me, " in idle")
if !idleContains(vs.idle, args.Me) {
vs.idle = append(vs.idle, args.Me)
}
}
}
}
}
// Set up the return view
vs.pingKeeper[args.Me] = time.Now()
reply.View = vs.currentView
testLog("Returning view" + convertView(vs.currentView))
vs.viewMu.Unlock()
return nil
}
//
// server Get() RPC handler.
//
func (vs *ViewServer) Get(args *GetArgs, reply *GetReply) error {
vs.viewMu.Lock()
reply.View = vs.currentView
vs.viewMu.Unlock()
return nil
}
//
// tick() is called once per PingInterval; it should notice
// if servers have died or recovered, and change the view
// accordingly.
func (vs *ViewServer) tick() {
vs.viewMu.Lock()
// Primary !== ""
// See if any server died
for k, v := range vs.pingKeeper{
server := k
difference := time.Since(v)
if difference > PingInterval * DeadPings {
switch server {
case vs.currentView.Primary:
testLog("TICKED PRIMARY DIED " + convertView(vs.currentView))
// Primary died
// Check if acked is true
// fmt.Println("Primary: ", vs.currentView.Primary, " died")
if vs.acked {
// Check if backup is available
|
if vs.currentView.Backup != "" {
// fmt.Println("Put backup: ", vs.currentView.Backup, " in")
// Turn backup into primary
vs.currentView.Primary = vs.currentView.Backup
vs.currentView.Backup = ""
// Turn idle into backup
if len(vs.idle) > 0 {
// fmt.Println("TEST #150: ", vs.currentView)
vs.currentView.Backup = vs.idle[0]
// fmt.Println("TEST #151: ", vs.currentView)
vs.idle = vs.idle[1:]
}
vs.acked = false
vs.pingKeeper[k] = time.Now()
vs.increaseViewNum();
testLog("ACKED = TRUE && PRIMARY DIED -> New view is " + convertView(vs.currentView))
}
// fmt.Println("TEST #1: ", vs.currentView)
} else {
// crash!!!!
}
case vs.currentView.Backup:
// Backup died
// Check if acked is true
// fmt.Println("Backup: ", vs.currentView.Backup, " died")
testLog("TICKED BACKUP D
|
random_line_split
|
|
server.go
|
view# " + strconv.Itoa(int(view.Viewnum)) + ", primary: " + view.Primary + ", backup: " + view.Backup + ". "
}
//
// server Ping RPC handler.
//
func (vs *ViewServer) Ping(args *PingArgs, reply *PingReply) error {
vs.viewMu.Lock()
if vs.getViewNum() == 0 {
// If current view number in ping is 0
// If ping view number is 0
if args.Viewnum == 0 {
// put the ping server into primary
vs.currentView.Primary = args.Me
vs.acked = false
vs.increaseViewNum()
} else {
// If ping view number is greater than 0
// ignore this case
}
} else {
// If current view number in ping is greater than 0
if vs.acked == false {
// If acked is false
if args.Me == vs.currentView.Primary {
// If primary pings
// If ping number equals to current view
if args.Viewnum == vs.getViewNum() {
// set acked = true
vs.acked = true
if vs.currentView.Backup == "" && len(vs.idle) > 0 {
testLog("ACKED = False && PRIMARY -> Move Idle " + vs.idle[0] + "to backup")
vs.currentView.Backup = vs.idle[0]
vs.idle = vs.idle[1:]
}
testLog("ACKED = False && PRIMARY -> Current view: " + convertView(vs.currentView) + " is acked by primary" + args.Me)
// fmt.Println(vs.currentView, " acked by ", vs.currentView.Primary)
} else {
// If ping number less than current view(including the case when primary restarts)
// ignore this case
testLog("ACKED = False && PRIMARY -> WRONG CASE: Primary ping view number " + strconv.Itoa(int(args.Viewnum)) + "--> Current viewnum is " + strconv.Itoa(int(vs.currentView.Viewnum)))
// fmt.Println("CAODAN ERHAO *(*(*(*(*(*(*(*(")
}
} else if args.Me == vs.currentView.Backup {
// If backup pings
// Do nothing.
testLog("ACKED = False && BACKUP -> " + args.Me + " pinged view " + strconv.Itoa(int(args.Viewnum)))
// fmt.Println("TEST #4: ", vs.currentView)
} else {
// If others pings
// put it into idle
if !idleContains(vs.idle, args.Me) {
testLog("ACKED = False && OTHERS -> " + args.Me + " added to idle")
vs.idle = append(vs.idle, args.Me)
}
testLog("ACKED = False && OTHERS -> " + args.Me + " pinged view " + strconv.Itoa(int(args.Viewnum)))
}
} else {
// If acked is true
if args.Me == vs.currentView.Primary { // If primary pings
// If ping number equals to current view
// doing nothing
// If ping number less than current view but more than 0
// Do nothing
if args.Viewnum == 0 {
// If ping number is 0 (Primary restarts)
testLog("ACKED = True && PRIMARY -> " + args.Me + " pinged view " + strconv.Itoa(int(args.Viewnum)) + " Primary RESTARTS!!!")
testLog("ACKED = True && PRIMARY (View before changed) " + convertView(vs.currentView))
if vs.currentView.Backup != "" {
// If has backup
// promote backup and put the old primary into idle
vs.currentView.Primary = vs.currentView.Backup
vs.currentView.Backup = args.Me
// Change ack
vs.acked = false
// Increase view number
vs.increaseViewNum()
// fmt.Println("TEST #5: ", vs.currentView)
testLog("ACKED = True to False && PRIMARY (View after changed) " + convertView(vs.currentView))
} else {
testLog("ACKED = True && PRIMARY is STUCK " + args.Me + " pinged " + strconv.Itoa(int(args.Viewnum)))
// If no backup
// STUCK!!
}
}
} else if args.Me == vs.currentView.Backup {
// If backup pings
if args.Viewnum == 0 {
// If ping nunmber is 0(restart)
// Set backup to "", put old backup to idle, set ack = false
testLog("ACKED = True && BACKUP -> " + args.Me + " pinged view " + strconv.Itoa(int(args.Viewnum)) + " Backup RESTARTS!!!")
vs.currentView.Backup = ""
if !idleContains(vs.idle, args.Me) {
vs.idle = append(vs.idle, args.Me)
}
vs.acked = false
// Increase view number
vs.increaseViewNum()
testLog("ACKED = True to False && BACKUP -> Current view is " + convertView(vs.currentView))
// fmt.Println("TEST #6: ", vs.currentView)
}
} else {
// If others pings
testLog("ACKED = True && OTHERS -> " + args.Me + " pinged " + strconv.Itoa(int(args.Viewnum)))
if vs.currentView.Backup == ""
|
else {
// If already has backup
// put it into idle
// fmt.Println("TEST #600: put", args.Me, " in idle")
if !idleContains(vs.idle, args.Me) {
vs.idle = append(vs.idle, args.Me)
}
}
}
}
}
// Set up the return view
vs.pingKeeper[args.Me] = time.Now()
reply.View = vs.currentView
testLog("Returning view" + convertView(vs.currentView))
vs.viewMu.Unlock()
return nil
}
//
// server Get() RPC handler.
//
func (vs *ViewServer) Get(args *GetArgs, reply *GetReply) error {
vs.viewMu.Lock()
reply.View = vs.currentView
vs.viewMu.Unlock()
return nil
}
//
// tick() is called once per PingInterval; it should notice
// if servers have died or recovered, and change the view
// accordingly.
func (vs *ViewServer) tick() {
vs.viewMu.Lock()
// Primary !== ""
// See if any server died
for k, v := range vs.pingKeeper{
server := k
difference := time.Since(v)
if difference > PingInterval * DeadPings {
switch server {
case vs.currentView.Primary:
testLog("TICKED PRIMARY DIED " + convertView(vs.currentView))
// Primary died
// Check if acked is true
// fmt.Println("Primary: ", vs.currentView.Primary, " died")
if vs.acked {
// Check if backup is available
if vs.currentView.Backup != "" {
// fmt.Println("Put backup: ", vs.currentView.Backup, " in")
// Turn backup into primary
vs.currentView.Primary = vs.currentView.Backup
vs.currentView.Backup = ""
// Turn idle into backup
if len(vs.idle) > 0 {
// fmt.Println("TEST #150: ", vs.currentView)
vs.currentView.Backup = vs.idle[0]
// fmt.Println("TEST #151: ", vs.currentView)
vs.idle = vs.idle[1:]
}
vs.acked = false
vs.pingKeeper[k] = time.Now()
vs.increaseViewNum();
testLog("ACKED = TRUE && PRIMARY DIED -> New view is " + convertView(vs.currentView))
}
// fmt.Println("TEST #1: ", vs.currentView)
} else {
// crash!!!!
}
case vs.currentView.Backup:
// Backup died
// Check if acked is true
// fmt.Println("Backup: ", vs.currentView.Backup, " died")
testLog("TICKED BACK
|
{
// If no backup
// put ping server into backup and set acked = false
// fmt.Println("Before backup added: ", vs.currentView)
// fmt.Println("Add backup: ", args.Me)
if (idleContains(vs.idle, args.Me)) {
vs.currentView.Backup = vs.idle[0]
vs.idle = vs.idle[1:]
} else {
vs.currentView.Backup = args.Me
}
vs.acked = false
// Increase view number
vs.increaseViewNum()
testLog("ACKED = True to False && OTHERS -> View after change " + convertView(vs.currentView))
// fmt.Println("After backup added: ", vs.currentView)
}
|
conditional_block
|
server.go
|
(message string) {
// Log the put operation into log file.
f, err := os.OpenFile("TestLog.txt", os.O_APPEND|os.O_RDWR|os.O_CREATE , 0777)
if err != nil {
panic(err)
}
defer f.Close()
if _, err = f.WriteString(message + "\n"); err != nil {
panic(err)
}
}
func (vs *ViewServer) increaseViewNum() {
testLog("IncreaseViewNum called *** New view, Primary: " + vs.currentView.Primary + ", Backup is " + vs.currentView.Backup)
vs.mu.Lock()
vs.currentView.Viewnum++
vs.mu.Unlock()
}
func (vs *ViewServer) getViewNum() (result uint){
vs.mu.Lock()
result = vs.currentView.Viewnum
vs.mu.Unlock()
return
}
func convertView(view View) string {
return "view# " + strconv.Itoa(int(view.Viewnum)) + ", primary: " + view.Primary + ", backup: " + view.Backup + ". "
}
//
// server Ping RPC handler.
//
func (vs *ViewServer) Ping(args *PingArgs, reply *PingReply) error {
vs.viewMu.Lock()
if vs.getViewNum() == 0 {
// If current view number in ping is 0
// If ping view number is 0
if args.Viewnum == 0 {
// put the ping server into primary
vs.currentView.Primary = args.Me
vs.acked = false
vs.increaseViewNum()
} else {
// If ping view number is greater than 0
// ignore this case
}
} else {
// If current view number in ping is greater than 0
if vs.acked == false {
// If acked is false
if args.Me == vs.currentView.Primary {
// If primary pings
// If ping number equals to current view
if args.Viewnum == vs.getViewNum() {
// set acked = true
vs.acked = true
if vs.currentView.Backup == "" && len(vs.idle) > 0 {
testLog("ACKED = False && PRIMARY -> Move Idle " + vs.idle[0] + "to backup")
vs.currentView.Backup = vs.idle[0]
vs.idle = vs.idle[1:]
}
testLog("ACKED = False && PRIMARY -> Current view: " + convertView(vs.currentView) + " is acked by primary" + args.Me)
// fmt.Println(vs.currentView, " acked by ", vs.currentView.Primary)
} else {
// If ping number less than current view(including the case when primary restarts)
// ignore this case
testLog("ACKED = False && PRIMARY -> WRONG CASE: Primary ping view number " + strconv.Itoa(int(args.Viewnum)) + "--> Current viewnum is " + strconv.Itoa(int(vs.currentView.Viewnum)))
// fmt.Println("CAODAN ERHAO *(*(*(*(*(*(*(*(")
}
} else if args.Me == vs.currentView.Backup {
// If backup pings
// Do nothing.
testLog("ACKED = False && BACKUP -> " + args.Me + " pinged view " + strconv.Itoa(int(args.Viewnum)))
// fmt.Println("TEST #4: ", vs.currentView)
} else {
// If others pings
// put it into idle
if !idleContains(vs.idle, args.Me) {
testLog("ACKED = False && OTHERS -> " + args.Me + " added to idle")
vs.idle = append(vs.idle, args.Me)
}
testLog("ACKED = False && OTHERS -> " + args.Me + " pinged view " + strconv.Itoa(int(args.Viewnum)))
}
} else {
// If acked is true
if args.Me == vs.currentView.Primary { // If primary pings
// If ping number equals to current view
// doing nothing
// If ping number less than current view but more than 0
// Do nothing
if args.Viewnum == 0 {
// If ping number is 0 (Primary restarts)
testLog("ACKED = True && PRIMARY -> " + args.Me + " pinged view " + strconv.Itoa(int(args.Viewnum)) + " Primary RESTARTS!!!")
testLog("ACKED = True && PRIMARY (View before changed) " + convertView(vs.currentView))
if vs.currentView.Backup != "" {
// If has backup
// promote backup and put the old primary into idle
vs.currentView.Primary = vs.currentView.Backup
vs.currentView.Backup = args.Me
// Change ack
vs.acked = false
// Increase view number
vs.increaseViewNum()
// fmt.Println("TEST #5: ", vs.currentView)
testLog("ACKED = True to False && PRIMARY (View after changed) " + convertView(vs.currentView))
} else {
testLog("ACKED = True && PRIMARY is STUCK " + args.Me + " pinged " + strconv.Itoa(int(args.Viewnum)))
// If no backup
// STUCK!!
}
}
} else if args.Me == vs.currentView.Backup {
// If backup pings
if args.Viewnum == 0 {
// If ping nunmber is 0(restart)
// Set backup to "", put old backup to idle, set ack = false
testLog("ACKED = True && BACKUP -> " + args.Me + " pinged view " + strconv.Itoa(int(args.Viewnum)) + " Backup RESTARTS!!!")
vs.currentView.Backup = ""
if !idleContains(vs.idle, args.Me) {
vs.idle = append(vs.idle, args.Me)
}
vs.acked = false
// Increase view number
vs.increaseViewNum()
testLog("ACKED = True to False && BACKUP -> Current view is " + convertView(vs.currentView))
// fmt.Println("TEST #6: ", vs.currentView)
}
} else {
// If others pings
testLog("ACKED = True && OTHERS -> " + args.Me + " pinged " + strconv.Itoa(int(args.Viewnum)))
if vs.currentView.Backup == "" {
// If no backup
// put ping server into backup and set acked = false
// fmt.Println("Before backup added: ", vs.currentView)
// fmt.Println("Add backup: ", args.Me)
if (idleContains(vs.idle, args.Me)) {
vs.currentView.Backup = vs.idle[0]
vs.idle = vs.idle[1:]
} else {
vs.currentView.Backup = args.Me
}
vs.acked = false
// Increase view number
vs.increaseViewNum()
testLog("ACKED = True to False && OTHERS -> View after change " + convertView(vs.currentView))
// fmt.Println("After backup added: ", vs.currentView)
} else {
// If already has backup
// put it into idle
// fmt.Println("TEST #600: put", args.Me, " in idle")
if !idleContains(vs.idle, args.Me) {
vs.idle = append(vs.idle, args.Me)
}
}
}
}
}
// Set up the return view
vs.pingKeeper[args.Me] = time.Now()
reply.View = vs.currentView
testLog("Returning view" + convertView(vs.currentView))
vs.viewMu.Unlock()
return nil
}
//
// server Get() RPC handler.
//
func (vs *ViewServer) Get(args *GetArgs, reply *GetReply) error {
vs.viewMu.Lock()
reply.View = vs.currentView
vs.viewMu.Unlock()
return nil
}
//
// tick() is called once per PingInterval; it should notice
// if servers have died or recovered, and change the view
// accordingly.
func (vs *ViewServer) tick() {
vs.viewMu.Lock()
// Primary !== ""
// See if any server died
for k, v := range vs.pingKeeper{
server := k
difference := time.Since(v)
if difference > PingInterval * DeadPings {
switch server {
case vs.currentView.Primary:
testLog("TICKED PRIMARY DIED " + convertView(vs.currentView))
// Primary died
// Check if acked is true
// fmt.Println("Primary: ", vs.currentView.Primary, " died")
if vs.acked {
// Check if backup is available
if vs.currentView.Backup != "" {
// fmt.Println("Put backup: ", vs.currentView.Backup, " in")
// Turn backup into primary
vs.currentView.Primary = vs.currentView.Backup
vs.currentView.Backup = ""
// Turn idle into backup
if len(vs.idle) > 0 {
// fmt.Println
|
testLog
|
identifier_name
|
|
listener.go
|
, batches them and then publishes them to a NATS
// subject.
type Listener struct {
c *config.Config
nc *nats.Conn
stats *stats.Stats
probes probes.Probes
batch *batch.Batch
wg sync.WaitGroup
stop chan struct{}
mu sync.Mutex // only used for HTTP listener
}
// Stop shuts down a running listener. It should be called exactly
// once for every Listener instance.
func (l *Listener) Stop() {
l.probes.SetReady(false)
l.probes.SetAlive(false)
close(l.stop)
l.wg.Wait()
l.nc.Close()
l.probes.Close()
}
func newListener(c *config.Config) (*Listener, error) {
l := &Listener{
c: c,
stop: make(chan struct{}),
stats: stats.New(
statReceived,
statSent,
statReadErrors,
statFailedNATSPublish,
),
probes: probes.Listen(c.ProbePort),
batch: batch.New(int(c.BatchMaxSize.Bytes())),
}
nc, err := nats.Connect(l.c.NATSAddress, nats.MaxReconnects(-1), nats.Name(l.c.Name))
if err != nil {
return nil, err
}
l.nc = nc
return l, nil
}
func (l *Listener) setupUDP(configBufSize int) (*net.UDPConn, error) {
serverAddr, err := net.ResolveUDPAddr("udp", fmt.Sprintf(":%d", l.c.Port))
if err != nil {
return nil, fmt.Errorf("failed to create UDP socket: %v", err)
}
sc, err := net.ListenUDP("udp", serverAddr)
if err != nil {
return nil, err
}
bufSize := roundUpToPageSize(configBufSize)
if bufSize != configBufSize {
log.Printf("rounding up receive buffer to nearest page size (now %d bytes)", bufSize)
}
if err := sc.SetReadBuffer(bufSize); err != nil {
return nil, err
}
log.Printf("listener bound to UDP socket: %v\n", sc.LocalAddr().String())
return sc, nil
}
func roundUpToPageSize(n int) int {
pageSize := os.Getpagesize()
if n <= 0 {
return pageSize
}
return (n + pageSize - 1) / pageSize * pageSize
}
func (l *Listener) listenUDP(sc *net.UDPConn) {
defer func() {
sc.Close()
l.wg.Done()
}()
l.probes.SetReady(true)
for {
// Read deadline is used so that the stop channel can be
// periodically checked.
sc.SetReadDeadline(time.Now().Add(time.Second))
bytesRead, err := l.batch.ReadOnceFrom(sc)
if err != nil && !isTimeout(err) {
l.stats.Inc(statReadErrors)
}
if bytesRead > 0 {
if l.c.Debug {
log.Printf("listener read %d bytes", bytesRead)
}
l.stats.Inc(statReceived)
l.batch.EnsureNewline()
}
l.maybeSendBatch()
select {
case <-l.stop:
return
default:
}
}
}
func (l *Listener) setupHTTP() *http.Server {
l.wg.Add(1)
go l.oldBatchSender()
mux := http.NewServeMux()
mux.HandleFunc("/write", l.handleHTTPWrite)
return &http.Server{
Addr: fmt.Sprintf(":%d", l.c.Port),
Handler: mux,
}
}
// oldBatchSender is a goroutine which sends the batch when it reached
// the configured maximum age. It is only used with the HTTP listener
// because the UDP listener does batch age handling in-line.
func (l *Listener) oldBatchSender() {
defer l.wg.Done()
for {
l.mu.Lock()
waitTime := l.c.BatchMaxAge.Duration - l.batch.Age()
l.mu.Unlock()
select {
case <-time.After(waitTime):
l.mu.Lock()
if l.batch.Age() >= l.c.BatchMaxAge.Duration {
l.sendBatch()
}
l.mu.Unlock()
case <-l.stop:
return
}
}
}
func (l *Listener) handleHTTPWrite(w http.ResponseWriter, r *http.Request) {
bytesRead, err := l.readHTTPBody(r)
if bytesRead > 0 {
if l.c.Debug {
log.Printf("HTTP listener read %d bytes", bytesRead)
}
l.stats.Inc(statReceived)
l.mu.Lock()
l.maybeSendBatch()
l.mu.Unlock()
}
if err != nil {
l.stats.Inc(statReadErrors)
}
w.WriteHeader(http.StatusNoContent)
}
func (l *Listener) readHTTPBody(r *http.Request) (int64, error) {
precision := r.URL.Query().Get("precision")
if precision == "" || precision == "ns" {
// Fast-path when timestamps are already in nanoseconds - no
// need for conversion.
return l.readHTTPBodyNanos(r)
}
// Non-nanosecond precison specified. Read lines individually and
// convert timestamps to nanoseconds.
count, err := l.readHTTPBodyWithPrecision(r, precision)
return int64(count), err
}
func (l *Listener) readHTTPBodyNanos(r *http.Request) (int64, error) {
l.mu.Lock()
defer l.mu.Unlock()
return l.batch.ReadFrom(r.Body)
}
func (l *Listener) readHTTPBodyWithPrecision(r *http.Request, precision string) (int, error) {
scanner := bufio.NewScanner(r.Body)
// scanLines is like bufio.ScanLines but the returned lines
// includes the trailing newlines. Leaving the newline on the line
// is useful for incoming lines that don't contain a timestamp and
// therefore should pass through unchanged.
scanner.Split(scanLines)
bytesRead := 0
for scanner.Scan() {
line := scanner.Bytes()
bytesRead += len(line)
if len(line) <= 1 {
continue
}
newLine := applyTimestampPrecision(line, precision)
l.mu.Lock()
l.batch.Append(newLine)
l.mu.Unlock()
}
return bytesRead, scanner.Err()
}
func scanLines(data []byte, atEOF bool) (advance int, token []byte, err error) {
if atEOF && len(data) == 0 {
return 0, nil, nil
}
if i := bytes.IndexByte(data, '\n'); i >= 0 {
// We have a full newline-terminated line.
return i + 1, data[0 : i+1], nil
}
// If we're at EOF, we have a final, non-terminated line. Return it.
if atEOF {
return len(data), data, nil
}
// Request more data.
return 0, nil, nil
}
func applyTimestampPrecision(line []byte, precision string) []byte {
ts, offset := influx.ExtractTimestamp(line)
if offset == -1 {
return line
}
newTs, err := influx.SafeCalcTime(ts, precision)
if err != nil {
return line
}
newLine := make([]byte, offset, offset+influx.MaxTsLen+1)
copy(newLine, line[:offset])
newLine = strconv.AppendInt(newLine, newTs, 10)
return append(newLine, '\n')
}
func (l *Listener) listenHTTP(server *http.Server) {
defer l.wg.Done()
go func() {
l.probes.SetReady(true)
err := server.ListenAndServe()
if err == nil || err == http.ErrServerClosed {
return
}
log.Fatal(err)
}()
// Close the server if the stop channel is closed.
<-l.stop
server.Close()
}
func (l *Listener) maybeSendBatch() {
if l.shouldSend() {
l.sendBatch()
}
}
func (l *Listener) shouldSend() bool {
if l.batch.Writes() >= l.c.BatchMaxCount {
return true
}
if l.batch.Age() >= l.c.BatchMaxAge.Duration {
return true
}
// If the batch size is within a (maximum) UDP datagram of the
// configured target batch size, then force a send to avoid
// growing the batch unnecessarily (allocations hurt performance).
if int(l.c.BatchMaxSize.Bytes())-l.batch.Size() <= maxUDPDatagramSize {
return true
}
return false
}
func (l *Listener) sendBatch()
|
{
if l.batch.Size() < 1 {
return // Nothing to do
}
l.stats.Inc(statSent)
// The goal is for the batch size to never be bigger than what
// NATS will accept but there is a small chance that a series of
// large incoming chunks could cause the batch to grow beyond the
// intended limit. For these cases, use the batchsplitter just in
// case. batchsplitter has very low overhead when no splitting is
// required.
splitter := batchsplitter.New(l.batch.Bytes(), config.MaxNATSMsgSize)
for splitter.Next() {
if err := l.nc.Publish(l.c.NATSSubject[0], splitter.Chunk()); err != nil {
l.stats.Inc(statFailedNATSPublish)
l.handleNatsError(err)
}
}
|
identifier_body
|
|
listener.go
|
2/probes"
"github.com/jumptrading/influx-spout/v2/stats"
nats "github.com/nats-io/nats.go"
)
const (
// Listener stats counters
statReceived = "received"
statSent = "sent"
statReadErrors = "read_errors"
statFailedNATSPublish = "failed_nats_publish"
// The maximum possible UDP read size.
maxUDPDatagramSize = 65536
)
// StartListener initialises a listener, starts its statistician
// goroutine and runs it's main loop. It never returns.
//
// The listener reads incoming UDP packets, batches them up and sends
// them onwards to a NATS subject.
func StartListener(c *config.Config) (_ *Listener, err error) {
listener, err := newListener(c)
if err != nil {
return nil, err
}
defer func() {
if err != nil {
listener.Stop()
}
}()
sc, err := listener.setupUDP(int(c.ReadBufferSize.Bytes()))
if err != nil {
return nil, err
}
listener.wg.Add(2)
go listener.startStatistician()
go listener.listenUDP(sc)
log.Printf("UDP listener publishing to [%s] at %s", c.NATSSubject[0], c.NATSAddress)
return listener, nil
}
// StartHTTPListener initialises listener configured to accept lines
// from HTTP request bodies instead of via UDP. It starts the listener
// and its statistician and never returns.
func StartHTTPListener(c *config.Config) (*Listener, error) {
listener, err := newListener(c)
if err != nil {
return nil, err
}
server := listener.setupHTTP()
listener.wg.Add(2)
go listener.startStatistician()
go listener.listenHTTP(server)
log.Printf("HTTP listener publishing to [%s] at %s", c.NATSSubject[0], c.NATSAddress)
return listener, nil
}
// Listener accepts measurements in InfluxDB Line Protocol format via
// UDP or HTTP, batches them and then publishes them to a NATS
// subject.
type Listener struct {
c *config.Config
nc *nats.Conn
stats *stats.Stats
probes probes.Probes
batch *batch.Batch
wg sync.WaitGroup
stop chan struct{}
mu sync.Mutex // only used for HTTP listener
}
// Stop shuts down a running listener. It should be called exactly
// once for every Listener instance.
func (l *Listener) Stop() {
l.probes.SetReady(false)
l.probes.SetAlive(false)
close(l.stop)
l.wg.Wait()
l.nc.Close()
l.probes.Close()
}
func newListener(c *config.Config) (*Listener, error) {
l := &Listener{
c: c,
stop: make(chan struct{}),
stats: stats.New(
statReceived,
statSent,
statReadErrors,
statFailedNATSPublish,
),
probes: probes.Listen(c.ProbePort),
batch: batch.New(int(c.BatchMaxSize.Bytes())),
}
nc, err := nats.Connect(l.c.NATSAddress, nats.MaxReconnects(-1), nats.Name(l.c.Name))
if err != nil {
return nil, err
}
l.nc = nc
return l, nil
}
func (l *Listener) setupUDP(configBufSize int) (*net.UDPConn, error) {
serverAddr, err := net.ResolveUDPAddr("udp", fmt.Sprintf(":%d", l.c.Port))
if err != nil {
return nil, fmt.Errorf("failed to create UDP socket: %v", err)
}
sc, err := net.ListenUDP("udp", serverAddr)
if err != nil {
return nil, err
}
bufSize := roundUpToPageSize(configBufSize)
if bufSize != configBufSize {
log.Printf("rounding up receive buffer to nearest page size (now %d bytes)", bufSize)
}
if err := sc.SetReadBuffer(bufSize); err != nil {
return nil, err
}
log.Printf("listener bound to UDP socket: %v\n", sc.LocalAddr().String())
return sc, nil
}
func roundUpToPageSize(n int) int {
pageSize := os.Getpagesize()
if n <= 0 {
return pageSize
}
return (n + pageSize - 1) / pageSize * pageSize
}
func (l *Listener) listenUDP(sc *net.UDPConn) {
defer func() {
sc.Close()
l.wg.Done()
}()
l.probes.SetReady(true)
for
|
return
default:
}
}
}
func (l *Listener) setupHTTP() *http.Server {
l.wg.Add(1)
go l.oldBatchSender()
mux := http.NewServeMux()
mux.HandleFunc("/write", l.handleHTTPWrite)
return &http.Server{
Addr: fmt.Sprintf(":%d", l.c.Port),
Handler: mux,
}
}
// oldBatchSender is a goroutine which sends the batch when it reached
// the configured maximum age. It is only used with the HTTP listener
// because the UDP listener does batch age handling in-line.
func (l *Listener) oldBatchSender() {
defer l.wg.Done()
for {
l.mu.Lock()
waitTime := l.c.BatchMaxAge.Duration - l.batch.Age()
l.mu.Unlock()
select {
case <-time.After(waitTime):
l.mu.Lock()
if l.batch.Age() >= l.c.BatchMaxAge.Duration {
l.sendBatch()
}
l.mu.Unlock()
case <-l.stop:
return
}
}
}
func (l *Listener) handleHTTPWrite(w http.ResponseWriter, r *http.Request) {
bytesRead, err := l.readHTTPBody(r)
if bytesRead > 0 {
if l.c.Debug {
log.Printf("HTTP listener read %d bytes", bytesRead)
}
l.stats.Inc(statReceived)
l.mu.Lock()
l.maybeSendBatch()
l.mu.Unlock()
}
if err != nil {
l.stats.Inc(statReadErrors)
}
w.WriteHeader(http.StatusNoContent)
}
func (l *Listener) readHTTPBody(r *http.Request) (int64, error) {
precision := r.URL.Query().Get("precision")
if precision == "" || precision == "ns" {
// Fast-path when timestamps are already in nanoseconds - no
// need for conversion.
return l.readHTTPBodyNanos(r)
}
// Non-nanosecond precison specified. Read lines individually and
// convert timestamps to nanoseconds.
count, err := l.readHTTPBodyWithPrecision(r, precision)
return int64(count), err
}
func (l *Listener) readHTTPBodyNanos(r *http.Request) (int64, error) {
l.mu.Lock()
defer l.mu.Unlock()
return l.batch.ReadFrom(r.Body)
}
func (l *Listener) readHTTPBodyWithPrecision(r *http.Request, precision string) (int, error) {
scanner := bufio.NewScanner(r.Body)
// scanLines is like bufio.ScanLines but the returned lines
// includes the trailing newlines. Leaving the newline on the line
// is useful for incoming lines that don't contain a timestamp and
// therefore should pass through unchanged.
scanner.Split(scanLines)
bytesRead := 0
for scanner.Scan() {
line := scanner.Bytes()
bytesRead += len(line)
if len(line) <= 1 {
continue
}
newLine := applyTimestampPrecision(line, precision)
l.mu.Lock()
l.batch.Append(newLine)
l.mu.Unlock()
}
return bytesRead, scanner.Err()
}
func scanLines(data []byte, atEOF bool) (advance int, token []byte, err error) {
if atEOF && len(data) == 0 {
return 0, nil, nil
}
if i := bytes.IndexByte(data, '\n'); i >= 0 {
// We have a full newline-terminated line.
return i + 1, data[0 : i+1], nil
}
// If we're at EOF, we have a final, non-terminated line. Return it.
if atEOF {
return len(data), data, nil
}
// Request more data.
return 0, nil, nil
}
func applyTimestampPrecision(line []byte, precision string) []byte {
ts, offset := influx.ExtractTimestamp(line)
if offset == -1 {
return line
}
newTs, err := influx.SafeCalcTime(ts, precision)
if err != nil {
return line
}
newLine := make([]byte, offset, offset+influx.MaxTsLen+1)
copy(newLine, line[:offset])
newLine = strconv.AppendInt(newLine, newTs, 10
|
{
// Read deadline is used so that the stop channel can be
// periodically checked.
sc.SetReadDeadline(time.Now().Add(time.Second))
bytesRead, err := l.batch.ReadOnceFrom(sc)
if err != nil && !isTimeout(err) {
l.stats.Inc(statReadErrors)
}
if bytesRead > 0 {
if l.c.Debug {
log.Printf("listener read %d bytes", bytesRead)
}
l.stats.Inc(statReceived)
l.batch.EnsureNewline()
}
l.maybeSendBatch()
select {
case <-l.stop:
|
conditional_block
|
listener.go
|
2/probes"
"github.com/jumptrading/influx-spout/v2/stats"
nats "github.com/nats-io/nats.go"
)
const (
// Listener stats counters
statReceived = "received"
statSent = "sent"
statReadErrors = "read_errors"
statFailedNATSPublish = "failed_nats_publish"
// The maximum possible UDP read size.
maxUDPDatagramSize = 65536
)
// StartListener initialises a listener, starts its statistician
// goroutine and runs it's main loop. It never returns.
//
// The listener reads incoming UDP packets, batches them up and sends
// them onwards to a NATS subject.
func StartListener(c *config.Config) (_ *Listener, err error) {
listener, err := newListener(c)
if err != nil {
return nil, err
}
defer func() {
if err != nil {
listener.Stop()
}
}()
sc, err := listener.setupUDP(int(c.ReadBufferSize.Bytes()))
if err != nil {
return nil, err
}
listener.wg.Add(2)
go listener.startStatistician()
go listener.listenUDP(sc)
log.Printf("UDP listener publishing to [%s] at %s", c.NATSSubject[0], c.NATSAddress)
return listener, nil
}
// StartHTTPListener initialises listener configured to accept lines
// from HTTP request bodies instead of via UDP. It starts the listener
// and its statistician and never returns.
func StartHTTPListener(c *config.Config) (*Listener, error) {
listener, err := newListener(c)
if err != nil {
return nil, err
}
server := listener.setupHTTP()
listener.wg.Add(2)
go listener.startStatistician()
go listener.listenHTTP(server)
log.Printf("HTTP listener publishing to [%s] at %s", c.NATSSubject[0], c.NATSAddress)
return listener, nil
}
// Listener accepts measurements in InfluxDB Line Protocol format via
// UDP or HTTP, batches them and then publishes them to a NATS
// subject.
type Listener struct {
c *config.Config
nc *nats.Conn
stats *stats.Stats
probes probes.Probes
batch *batch.Batch
wg sync.WaitGroup
stop chan struct{}
mu sync.Mutex // only used for HTTP listener
}
// Stop shuts down a running listener. It should be called exactly
// once for every Listener instance.
func (l *Listener) Stop() {
l.probes.SetReady(false)
l.probes.SetAlive(false)
close(l.stop)
l.wg.Wait()
l.nc.Close()
l.probes.Close()
}
func newListener(c *config.Config) (*Listener, error) {
l := &Listener{
c: c,
stop: make(chan struct{}),
stats: stats.New(
statReceived,
statSent,
statReadErrors,
statFailedNATSPublish,
),
probes: probes.Listen(c.ProbePort),
batch: batch.New(int(c.BatchMaxSize.Bytes())),
}
nc, err := nats.Connect(l.c.NATSAddress, nats.MaxReconnects(-1), nats.Name(l.c.Name))
if err != nil {
return nil, err
}
l.nc = nc
return l, nil
}
func (l *Listener) setupUDP(configBufSize int) (*net.UDPConn, error) {
serverAddr, err := net.ResolveUDPAddr("udp", fmt.Sprintf(":%d", l.c.Port))
if err != nil {
return nil, fmt.Errorf("failed to create UDP socket: %v", err)
}
sc, err := net.ListenUDP("udp", serverAddr)
if err != nil {
return nil, err
}
bufSize := roundUpToPageSize(configBufSize)
if bufSize != configBufSize {
log.Printf("rounding up receive buffer to nearest page size (now %d bytes)", bufSize)
}
if err := sc.SetReadBuffer(bufSize); err != nil {
return nil, err
}
log.Printf("listener bound to UDP socket: %v\n", sc.LocalAddr().String())
return sc, nil
}
func roundUpToPageSize(n int) int {
pageSize := os.Getpagesize()
if n <= 0 {
return pageSize
}
return (n + pageSize - 1) / pageSize * pageSize
}
func (l *Listener) listenUDP(sc *net.UDPConn) {
defer func() {
sc.Close()
l.wg.Done()
}()
l.probes.SetReady(true)
for {
// Read deadline is used so that the stop channel can be
// periodically checked.
sc.SetReadDeadline(time.Now().Add(time.Second))
bytesRead, err := l.batch.ReadOnceFrom(sc)
if err != nil && !isTimeout(err) {
l.stats.Inc(statReadErrors)
}
if bytesRead > 0 {
if l.c.Debug {
log.Printf("listener read %d bytes", bytesRead)
}
l.stats.Inc(statReceived)
l.batch.EnsureNewline()
}
l.maybeSendBatch()
select {
case <-l.stop:
return
default:
}
}
}
func (l *Listener) setupHTTP() *http.Server {
l.wg.Add(1)
go l.oldBatchSender()
mux := http.NewServeMux()
mux.HandleFunc("/write", l.handleHTTPWrite)
return &http.Server{
Addr: fmt.Sprintf(":%d", l.c.Port),
Handler: mux,
}
}
// oldBatchSender is a goroutine which sends the batch when it reached
// the configured maximum age. It is only used with the HTTP listener
// because the UDP listener does batch age handling in-line.
func (l *Listener) oldBatchSender() {
defer l.wg.Done()
for {
l.mu.Lock()
waitTime := l.c.BatchMaxAge.Duration - l.batch.Age()
l.mu.Unlock()
select {
case <-time.After(waitTime):
l.mu.Lock()
if l.batch.Age() >= l.c.BatchMaxAge.Duration {
l.sendBatch()
}
l.mu.Unlock()
case <-l.stop:
return
}
}
}
func (l *Listener) handleHTTPWrite(w http.ResponseWriter, r *http.Request) {
bytesRead, err := l.readHTTPBody(r)
if bytesRead > 0 {
if l.c.Debug {
log.Printf("HTTP listener read %d bytes", bytesRead)
}
l.stats.Inc(statReceived)
l.mu.Lock()
l.maybeSendBatch()
l.mu.Unlock()
}
if err != nil {
l.stats.Inc(statReadErrors)
}
w.WriteHeader(http.StatusNoContent)
}
func (l *Listener) readHTTPBody(r *http.Request) (int64, error) {
precision := r.URL.Query().Get("precision")
if precision == "" || precision == "ns" {
// Fast-path when timestamps are already in nanoseconds - no
// need for conversion.
return l.readHTTPBodyNanos(r)
}
// Non-nanosecond precison specified. Read lines individually and
// convert timestamps to nanoseconds.
count, err := l.readHTTPBodyWithPrecision(r, precision)
return int64(count), err
}
func (l *Listener)
|
(r *http.Request) (int64, error) {
l.mu.Lock()
defer l.mu.Unlock()
return l.batch.ReadFrom(r.Body)
}
func (l *Listener) readHTTPBodyWithPrecision(r *http.Request, precision string) (int, error) {
scanner := bufio.NewScanner(r.Body)
// scanLines is like bufio.ScanLines but the returned lines
// includes the trailing newlines. Leaving the newline on the line
// is useful for incoming lines that don't contain a timestamp and
// therefore should pass through unchanged.
scanner.Split(scanLines)
bytesRead := 0
for scanner.Scan() {
line := scanner.Bytes()
bytesRead += len(line)
if len(line) <= 1 {
continue
}
newLine := applyTimestampPrecision(line, precision)
l.mu.Lock()
l.batch.Append(newLine)
l.mu.Unlock()
}
return bytesRead, scanner.Err()
}
func scanLines(data []byte, atEOF bool) (advance int, token []byte, err error) {
if atEOF && len(data) == 0 {
return 0, nil, nil
}
if i := bytes.IndexByte(data, '\n'); i >= 0 {
// We have a full newline-terminated line.
return i + 1, data[0 : i+1], nil
}
// If we're at EOF, we have a final, non-terminated line. Return it.
if atEOF {
return len(data), data, nil
}
// Request more data.
return 0, nil, nil
}
func applyTimestampPrecision(line []byte, precision string) []byte {
ts, offset := influx.ExtractTimestamp(line)
if offset == -1 {
return line
}
newTs, err := influx.SafeCalcTime(ts, precision)
if err != nil {
return line
}
newLine := make([]byte, offset, offset+influx.MaxTsLen+1)
copy(newLine, line[:offset])
newLine = strconv.AppendInt(newLine, newTs, 10
|
readHTTPBodyNanos
|
identifier_name
|
listener.go
|
2/probes"
"github.com/jumptrading/influx-spout/v2/stats"
nats "github.com/nats-io/nats.go"
)
const (
// Listener stats counters
statReceived = "received"
statSent = "sent"
statReadErrors = "read_errors"
statFailedNATSPublish = "failed_nats_publish"
// The maximum possible UDP read size.
maxUDPDatagramSize = 65536
)
// StartListener initialises a listener, starts its statistician
// goroutine and runs it's main loop. It never returns.
//
// The listener reads incoming UDP packets, batches them up and sends
// them onwards to a NATS subject.
func StartListener(c *config.Config) (_ *Listener, err error) {
listener, err := newListener(c)
if err != nil {
return nil, err
}
defer func() {
if err != nil {
listener.Stop()
}
}()
sc, err := listener.setupUDP(int(c.ReadBufferSize.Bytes()))
if err != nil {
return nil, err
}
listener.wg.Add(2)
go listener.startStatistician()
go listener.listenUDP(sc)
log.Printf("UDP listener publishing to [%s] at %s", c.NATSSubject[0], c.NATSAddress)
return listener, nil
}
// StartHTTPListener initialises listener configured to accept lines
// from HTTP request bodies instead of via UDP. It starts the listener
// and its statistician and never returns.
func StartHTTPListener(c *config.Config) (*Listener, error) {
listener, err := newListener(c)
if err != nil {
return nil, err
}
server := listener.setupHTTP()
listener.wg.Add(2)
go listener.startStatistician()
go listener.listenHTTP(server)
log.Printf("HTTP listener publishing to [%s] at %s", c.NATSSubject[0], c.NATSAddress)
return listener, nil
}
// Listener accepts measurements in InfluxDB Line Protocol format via
// UDP or HTTP, batches them and then publishes them to a NATS
// subject.
type Listener struct {
c *config.Config
nc *nats.Conn
stats *stats.Stats
probes probes.Probes
batch *batch.Batch
wg sync.WaitGroup
stop chan struct{}
mu sync.Mutex // only used for HTTP listener
}
// Stop shuts down a running listener. It should be called exactly
// once for every Listener instance.
func (l *Listener) Stop() {
l.probes.SetReady(false)
l.probes.SetAlive(false)
close(l.stop)
l.wg.Wait()
l.nc.Close()
l.probes.Close()
}
func newListener(c *config.Config) (*Listener, error) {
l := &Listener{
c: c,
stop: make(chan struct{}),
stats: stats.New(
statReceived,
statSent,
statReadErrors,
statFailedNATSPublish,
),
probes: probes.Listen(c.ProbePort),
batch: batch.New(int(c.BatchMaxSize.Bytes())),
}
nc, err := nats.Connect(l.c.NATSAddress, nats.MaxReconnects(-1), nats.Name(l.c.Name))
if err != nil {
return nil, err
}
l.nc = nc
return l, nil
}
func (l *Listener) setupUDP(configBufSize int) (*net.UDPConn, error) {
serverAddr, err := net.ResolveUDPAddr("udp", fmt.Sprintf(":%d", l.c.Port))
if err != nil {
return nil, fmt.Errorf("failed to create UDP socket: %v", err)
}
sc, err := net.ListenUDP("udp", serverAddr)
if err != nil {
return nil, err
}
bufSize := roundUpToPageSize(configBufSize)
if bufSize != configBufSize {
log.Printf("rounding up receive buffer to nearest page size (now %d bytes)", bufSize)
}
if err := sc.SetReadBuffer(bufSize); err != nil {
return nil, err
}
log.Printf("listener bound to UDP socket: %v\n", sc.LocalAddr().String())
return sc, nil
}
func roundUpToPageSize(n int) int {
pageSize := os.Getpagesize()
if n <= 0 {
return pageSize
}
return (n + pageSize - 1) / pageSize * pageSize
}
func (l *Listener) listenUDP(sc *net.UDPConn) {
defer func() {
sc.Close()
l.wg.Done()
}()
l.probes.SetReady(true)
for {
// Read deadline is used so that the stop channel can be
// periodically checked.
sc.SetReadDeadline(time.Now().Add(time.Second))
bytesRead, err := l.batch.ReadOnceFrom(sc)
if err != nil && !isTimeout(err) {
l.stats.Inc(statReadErrors)
}
if bytesRead > 0 {
if l.c.Debug {
log.Printf("listener read %d bytes", bytesRead)
}
l.stats.Inc(statReceived)
l.batch.EnsureNewline()
}
l.maybeSendBatch()
select {
case <-l.stop:
return
default:
}
}
}
func (l *Listener) setupHTTP() *http.Server {
l.wg.Add(1)
go l.oldBatchSender()
mux := http.NewServeMux()
mux.HandleFunc("/write", l.handleHTTPWrite)
return &http.Server{
Addr: fmt.Sprintf(":%d", l.c.Port),
Handler: mux,
}
}
// oldBatchSender is a goroutine which sends the batch when it reached
// the configured maximum age. It is only used with the HTTP listener
// because the UDP listener does batch age handling in-line.
func (l *Listener) oldBatchSender() {
defer l.wg.Done()
for {
l.mu.Lock()
waitTime := l.c.BatchMaxAge.Duration - l.batch.Age()
l.mu.Unlock()
select {
case <-time.After(waitTime):
l.mu.Lock()
if l.batch.Age() >= l.c.BatchMaxAge.Duration {
l.sendBatch()
}
l.mu.Unlock()
case <-l.stop:
return
}
}
}
func (l *Listener) handleHTTPWrite(w http.ResponseWriter, r *http.Request) {
bytesRead, err := l.readHTTPBody(r)
if bytesRead > 0 {
if l.c.Debug {
log.Printf("HTTP listener read %d bytes", bytesRead)
}
l.stats.Inc(statReceived)
l.mu.Lock()
l.maybeSendBatch()
l.mu.Unlock()
}
if err != nil {
l.stats.Inc(statReadErrors)
}
w.WriteHeader(http.StatusNoContent)
}
func (l *Listener) readHTTPBody(r *http.Request) (int64, error) {
precision := r.URL.Query().Get("precision")
if precision == "" || precision == "ns" {
// Fast-path when timestamps are already in nanoseconds - no
// need for conversion.
return l.readHTTPBodyNanos(r)
}
// Non-nanosecond precison specified. Read lines individually and
// convert timestamps to nanoseconds.
count, err := l.readHTTPBodyWithPrecision(r, precision)
return int64(count), err
}
func (l *Listener) readHTTPBodyNanos(r *http.Request) (int64, error) {
l.mu.Lock()
defer l.mu.Unlock()
return l.batch.ReadFrom(r.Body)
}
func (l *Listener) readHTTPBodyWithPrecision(r *http.Request, precision string) (int, error) {
scanner := bufio.NewScanner(r.Body)
// scanLines is like bufio.ScanLines but the returned lines
// includes the trailing newlines. Leaving the newline on the line
// is useful for incoming lines that don't contain a timestamp and
// therefore should pass through unchanged.
scanner.Split(scanLines)
bytesRead := 0
for scanner.Scan() {
line := scanner.Bytes()
bytesRead += len(line)
if len(line) <= 1 {
|
continue
}
newLine := applyTimestampPrecision(line, precision)
l.mu.Lock()
l.batch.Append(newLine)
l.mu.Unlock()
}
return bytesRead, scanner.Err()
}
func scanLines(data []byte, atEOF bool) (advance int, token []byte, err error) {
if atEOF && len(data) == 0 {
return 0, nil, nil
}
if i := bytes.IndexByte(data, '\n'); i >= 0 {
// We have a full newline-terminated line.
return i + 1, data[0 : i+1], nil
}
// If we're at EOF, we have a final, non-terminated line. Return it.
if atEOF {
return len(data), data, nil
}
// Request more data.
return 0, nil, nil
}
func applyTimestampPrecision(line []byte, precision string) []byte {
ts, offset := influx.ExtractTimestamp(line)
if offset == -1 {
return line
}
newTs, err := influx.SafeCalcTime(ts, precision)
if err != nil {
return line
}
newLine := make([]byte, offset, offset+influx.MaxTsLen+1)
copy(newLine, line[:offset])
newLine = strconv.AppendInt(newLine, newTs, 10)
|
random_line_split
|
|
messages.go
|
unmarshal(out interface{}, packet []byte, expectedType uint8) error {
if len(packet) == 0 {
return ParseError{expectedType}
}
if packet[0] != expectedType {
return UnexpectedMessageError{expectedType, packet[0]}
}
packet = packet[1:]
v := reflect.ValueOf(out).Elem()
structType := v.Type()
var ok bool
for i := 0; i < v.NumField(); i++ {
field := v.Field(i)
t := field.Type()
switch t.Kind() {
case reflect.Bool:
if len(packet) < 1 {
return ParseError{expectedType}
}
field.SetBool(packet[0] != 0)
packet = packet[1:]
case reflect.Array:
if t.Elem().Kind() != reflect.Uint8 {
panic("array of non-uint8")
}
if len(packet) < t.Len() {
return ParseError{expectedType}
}
for j := 0; j < t.Len(); j++ {
field.Index(j).Set(reflect.ValueOf(packet[j]))
}
packet = packet[t.Len():]
case reflect.Uint32:
var u32 uint32
if u32, packet, ok = parseUint32(packet); !ok {
return ParseError{expectedType}
}
field.SetUint(uint64(u32))
case reflect.String:
var s []byte
if s, packet, ok = parseString(packet); !ok {
return ParseError{expectedType}
}
field.SetString(string(s))
case reflect.Slice:
switch t.Elem().Kind() {
case reflect.Uint8:
if structType.Field(i).Tag.Get("ssh") == "rest" {
field.Set(reflect.ValueOf(packet))
packet = nil
} else {
var s []byte
if s, packet, ok = parseString(packet); !ok {
return ParseError{expectedType}
}
field.Set(reflect.ValueOf(s))
}
case reflect.String:
var nl []string
if nl, packet, ok = parseNameList(packet); !ok {
return ParseError{expectedType}
}
field.Set(reflect.ValueOf(nl))
default:
panic("slice of unknown type")
}
case reflect.Ptr:
if t == bigIntType {
var n *big.Int
if n, packet, ok = parseInt(packet); !ok {
return ParseError{expectedType}
}
field.Set(reflect.ValueOf(n))
} else {
panic("pointer to unknown type")
}
default:
panic("unknown type")
}
}
if len(packet) != 0 {
return ParseError{expectedType}
}
return nil
}
// marshal serializes the message in msg, using the given message type.
func marshal(msgType uint8, msg interface{}) []byte {
var out []byte
out = append(out, msgType)
v := reflect.ValueOf(msg)
structType := v.Type()
for i := 0; i < v.NumField(); i++ {
field := v.Field(i)
t := field.Type()
switch t.Kind() {
case reflect.Bool:
var v uint8
if field.Bool() {
v = 1
}
out = append(out, v)
case reflect.Array:
if t.Elem().Kind() != reflect.Uint8 {
panic("array of non-uint8")
}
for j := 0; j < t.Len(); j++ {
out = append(out, byte(field.Index(j).Uint()))
}
case reflect.Uint32:
u32 := uint32(field.Uint())
out = append(out, byte(u32>>24))
out = append(out, byte(u32>>16))
out = append(out, byte(u32>>8))
out = append(out, byte(u32))
case reflect.String:
s := field.String()
out = append(out, byte(len(s)>>24))
out = append(out, byte(len(s)>>16))
out = append(out, byte(len(s)>>8))
out = append(out, byte(len(s)))
out = append(out, s...)
case reflect.Slice:
switch t.Elem().Kind() {
case reflect.Uint8:
length := field.Len()
if structType.Field(i).Tag.Get("ssh") != "rest" {
out = append(out, byte(length>>24))
out = append(out, byte(length>>16))
out = append(out, byte(length>>8))
out = append(out, byte(length))
}
for j := 0; j < length; j++ {
out = append(out, byte(field.Index(j).Uint()))
}
case reflect.String:
var length int
for j := 0; j < field.Len(); j++ {
if j != 0 {
length++ /* comma */
}
length += len(field.Index(j).String())
}
out = append(out, byte(length>>24))
out = append(out, byte(length>>16))
out = append(out, byte(length>>8))
out = append(out, byte(length))
for j := 0; j < field.Len(); j++ {
if j != 0 {
out = append(out, ',')
}
out = append(out, field.Index(j).String()...)
}
default:
panic("slice of unknown type")
}
case reflect.Ptr:
if t == bigIntType {
var n *big.Int
nValue := reflect.ValueOf(&n)
nValue.Elem().Set(field)
needed := intLength(n)
oldLength := len(out)
if cap(out)-len(out) < needed {
newOut := make([]byte, len(out), 2*(len(out)+needed))
copy(newOut, out)
out = newOut
}
out = out[:oldLength+needed]
marshalInt(out[oldLength:], n)
} else {
panic("pointer to unknown type")
}
}
}
return out
}
var bigOne = big.NewInt(1)
func parseString(in []byte) (out, rest []byte, ok bool) {
if len(in) < 4 {
return
}
length := uint32(in[0])<<24 | uint32(in[1])<<16 | uint32(in[2])<<8 | uint32(in[3])
if uint32(len(in)) < 4+length {
return
}
out = in[4 : 4+length]
rest = in[4+length:]
ok = true
return
}
var (
comma = []byte{','}
emptyNameList = []string{}
)
func parseNameList(in []byte) (out []string, rest []byte, ok bool) {
contents, rest, ok := parseString(in)
if !ok {
return
}
if len(contents) == 0 {
out = emptyNameList
return
}
parts := bytes.Split(contents, comma)
out = make([]string, len(parts))
for i, part := range parts {
out[i] = string(part)
}
return
}
func parseInt(in []byte) (out *big.Int, rest []byte, ok bool) {
contents, rest, ok := parseString(in)
if !ok {
return
}
out = new(big.Int)
if len(contents) > 0 && contents[0]&0x80 == 0x80 {
// This is a negative number
notBytes := make([]byte, len(contents))
for i := range notBytes {
notBytes[i] = ^contents[i]
}
out.SetBytes(notBytes)
out.Add(out, bigOne)
out.Neg(out)
} else {
// Positive number
out.SetBytes(contents)
}
ok = true
return
}
func parseUint32(in []byte) (out uint32, rest []byte, ok bool) {
if len(in) < 4 {
return
}
out = uint32(in[0])<<24 | uint32(in[1])<<16 | uint32(in[2])<<8 | uint32(in[3])
rest = in[4:]
ok = true
return
}
func nameListLength(namelist []string) int {
length := 4 /* uint32 length prefix */
for i, name := range namelist {
if i != 0 {
length++ /* comma */
}
length += len(name)
}
return length
}
func intLength(n *big.Int) int {
length := 4 /* length bytes */
if n.Sign() < 0 {
nMinus1 := new(big.Int).Neg(n)
nMinus1.Sub(nMinus1, bigOne)
bitLen := nMinus1.BitLen()
if bitLen%8 == 0 {
// The number will need 0xff padding
length++
}
length += (bitLen + 7) / 8
} else if n.Sign() == 0
|
{
// A zero is the zero length string
}
|
conditional_block
|
|
messages.go
|
rest"`
}
// See RFC 4254, section 5.4.
type channelRequestSuccessMsg struct {
PeersId uint32
}
// See RFC 4254, section 5.4.
type channelRequestFailureMsg struct {
PeersId uint32
}
// See RFC 4254, section 5.3
type channelCloseMsg struct {
PeersId uint32
}
// See RFC 4254, section 5.3
type channelEOFMsg struct {
PeersId uint32
}
// See RFC 4254, section 4
type globalRequestMsg struct {
Type string
WantReply bool
}
// See RFC 4254, section 5.2
type windowAdjustMsg struct {
PeersId uint32
AdditionalBytes uint32
}
// See RFC 4252, section 7
type userAuthPubKeyOkMsg struct {
Algo string
PubKey string
}
// unmarshal parses the SSH wire data in packet into out using reflection.
// expectedType is the expected SSH message type. It either returns nil on
// success, or a ParseError or UnexpectedMessageError on error.
func unmarshal(out interface{}, packet []byte, expectedType uint8) error {
if len(packet) == 0 {
return ParseError{expectedType}
}
if packet[0] != expectedType {
return UnexpectedMessageError{expectedType, packet[0]}
}
packet = packet[1:]
v := reflect.ValueOf(out).Elem()
structType := v.Type()
var ok bool
for i := 0; i < v.NumField(); i++ {
field := v.Field(i)
t := field.Type()
switch t.Kind() {
case reflect.Bool:
if len(packet) < 1 {
return ParseError{expectedType}
}
field.SetBool(packet[0] != 0)
packet = packet[1:]
case reflect.Array:
if t.Elem().Kind() != reflect.Uint8 {
panic("array of non-uint8")
}
if len(packet) < t.Len() {
return ParseError{expectedType}
}
for j := 0; j < t.Len(); j++ {
field.Index(j).Set(reflect.ValueOf(packet[j]))
}
packet = packet[t.Len():]
case reflect.Uint32:
var u32 uint32
if u32, packet, ok = parseUint32(packet); !ok {
return ParseError{expectedType}
}
field.SetUint(uint64(u32))
case reflect.String:
var s []byte
if s, packet, ok = parseString(packet); !ok {
return ParseError{expectedType}
}
field.SetString(string(s))
case reflect.Slice:
switch t.Elem().Kind() {
case reflect.Uint8:
if structType.Field(i).Tag.Get("ssh") == "rest" {
field.Set(reflect.ValueOf(packet))
packet = nil
} else {
var s []byte
if s, packet, ok = parseString(packet); !ok {
return ParseError{expectedType}
}
field.Set(reflect.ValueOf(s))
}
case reflect.String:
var nl []string
if nl, packet, ok = parseNameList(packet); !ok {
return ParseError{expectedType}
}
field.Set(reflect.ValueOf(nl))
default:
panic("slice of unknown type")
}
case reflect.Ptr:
if t == bigIntType {
var n *big.Int
if n, packet, ok = parseInt(packet); !ok {
return ParseError{expectedType}
}
field.Set(reflect.ValueOf(n))
} else {
panic("pointer to unknown type")
}
default:
panic("unknown type")
}
}
if len(packet) != 0 {
return ParseError{expectedType}
}
return nil
}
// marshal serializes the message in msg, using the given message type.
func marshal(msgType uint8, msg interface{}) []byte {
var out []byte
out = append(out, msgType)
v := reflect.ValueOf(msg)
structType := v.Type()
for i := 0; i < v.NumField(); i++ {
field := v.Field(i)
t := field.Type()
switch t.Kind() {
case reflect.Bool:
var v uint8
if field.Bool() {
v = 1
}
out = append(out, v)
case reflect.Array:
if t.Elem().Kind() != reflect.Uint8 {
panic("array of non-uint8")
}
for j := 0; j < t.Len(); j++ {
out = append(out, byte(field.Index(j).Uint()))
}
case reflect.Uint32:
u32 := uint32(field.Uint())
out = append(out, byte(u32>>24))
out = append(out, byte(u32>>16))
out = append(out, byte(u32>>8))
out = append(out, byte(u32))
case reflect.String:
s := field.String()
out = append(out, byte(len(s)>>24))
out = append(out, byte(len(s)>>16))
out = append(out, byte(len(s)>>8))
out = append(out, byte(len(s)))
out = append(out, s...)
case reflect.Slice:
switch t.Elem().Kind() {
case reflect.Uint8:
length := field.Len()
if structType.Field(i).Tag.Get("ssh") != "rest" {
out = append(out, byte(length>>24))
out = append(out, byte(length>>16))
out = append(out, byte(length>>8))
out = append(out, byte(length))
}
for j := 0; j < length; j++ {
out = append(out, byte(field.Index(j).Uint()))
}
case reflect.String:
var length int
for j := 0; j < field.Len(); j++ {
if j != 0 {
length++ /* comma */
}
length += len(field.Index(j).String())
}
out = append(out, byte(length>>24))
out = append(out, byte(length>>16))
out = append(out, byte(length>>8))
out = append(out, byte(length))
for j := 0; j < field.Len(); j++ {
if j != 0 {
out = append(out, ',')
}
out = append(out, field.Index(j).String()...)
}
default:
panic("slice of unknown type")
}
case reflect.Ptr:
if t == bigIntType {
var n *big.Int
nValue := reflect.ValueOf(&n)
nValue.Elem().Set(field)
needed := intLength(n)
oldLength := len(out)
if cap(out)-len(out) < needed {
newOut := make([]byte, len(out), 2*(len(out)+needed))
copy(newOut, out)
out = newOut
}
out = out[:oldLength+needed]
marshalInt(out[oldLength:], n)
} else {
panic("pointer to unknown type")
}
}
}
return out
}
var bigOne = big.NewInt(1)
func parseString(in []byte) (out, rest []byte, ok bool) {
if len(in) < 4 {
return
}
length := uint32(in[0])<<24 | uint32(in[1])<<16 | uint32(in[2])<<8 | uint32(in[3])
if uint32(len(in)) < 4+length {
return
}
out = in[4 : 4+length]
rest = in[4+length:]
ok = true
return
}
var (
comma = []byte{','}
emptyNameList = []string{}
)
func parseNameList(in []byte) (out []string, rest []byte, ok bool) {
contents, rest, ok := parseString(in)
if !ok {
return
}
if len(contents) == 0 {
out = emptyNameList
return
}
parts := bytes.Split(contents, comma)
out = make([]string, len(parts))
for i, part := range parts {
out[i] = string(part)
}
return
}
func parseInt(in []byte) (out *big.Int, rest []byte, ok bool) {
contents, rest, ok := parseString(in)
if !ok {
return
}
out = new(big.Int)
if len(contents) > 0 && contents[0]&0x80 == 0x80 {
// This is a negative number
notBytes := make([]byte, len(contents))
for i := range notBytes {
notBytes[i] = ^contents[i]
}
out.SetBytes(notBytes)
out.Add(out, bigOne)
out.Neg(out)
} else {
// Positive number
out.SetBytes(contents)
}
ok = true
return
}
func
|
parseUint32
|
identifier_name
|
|
messages.go
|
252, section 5.1
type userAuthFailureMsg struct {
Methods []string
PartialSuccess bool
}
// See RFC 4254, section 5.1.
type channelOpenMsg struct {
ChanType string
PeersId uint32
PeersWindow uint32
MaxPacketSize uint32
TypeSpecificData []byte `ssh:"rest"`
}
// See RFC 4254, section 5.1.
type channelOpenConfirmMsg struct {
PeersId uint32
MyId uint32
MyWindow uint32
MaxPacketSize uint32
TypeSpecificData []byte `ssh:"rest"`
}
// See RFC 4254, section 5.1.
type channelOpenFailureMsg struct {
PeersId uint32
Reason uint32
Message string
Language string
}
type channelRequestMsg struct {
PeersId uint32
Request string
WantReply bool
RequestSpecificData []byte `ssh:"rest"`
}
// See RFC 4254, section 5.4.
type channelRequestSuccessMsg struct {
PeersId uint32
}
// See RFC 4254, section 5.4.
type channelRequestFailureMsg struct {
PeersId uint32
}
// See RFC 4254, section 5.3
type channelCloseMsg struct {
PeersId uint32
}
// See RFC 4254, section 5.3
type channelEOFMsg struct {
PeersId uint32
}
// See RFC 4254, section 4
type globalRequestMsg struct {
Type string
WantReply bool
}
// See RFC 4254, section 5.2
type windowAdjustMsg struct {
PeersId uint32
AdditionalBytes uint32
}
// See RFC 4252, section 7
type userAuthPubKeyOkMsg struct {
Algo string
PubKey string
}
// unmarshal parses the SSH wire data in packet into out using reflection.
// expectedType is the expected SSH message type. It either returns nil on
// success, or a ParseError or UnexpectedMessageError on error.
func unmarshal(out interface{}, packet []byte, expectedType uint8) error {
if len(packet) == 0 {
return ParseError{expectedType}
}
if packet[0] != expectedType {
return UnexpectedMessageError{expectedType, packet[0]}
}
packet = packet[1:]
v := reflect.ValueOf(out).Elem()
structType := v.Type()
var ok bool
for i := 0; i < v.NumField(); i++ {
field := v.Field(i)
t := field.Type()
switch t.Kind() {
case reflect.Bool:
if len(packet) < 1 {
return ParseError{expectedType}
}
field.SetBool(packet[0] != 0)
packet = packet[1:]
case reflect.Array:
if t.Elem().Kind() != reflect.Uint8 {
panic("array of non-uint8")
}
if len(packet) < t.Len() {
return ParseError{expectedType}
}
for j := 0; j < t.Len(); j++ {
field.Index(j).Set(reflect.ValueOf(packet[j]))
}
packet = packet[t.Len():]
case reflect.Uint32:
var u32 uint32
if u32, packet, ok = parseUint32(packet); !ok {
return ParseError{expectedType}
}
field.SetUint(uint64(u32))
case reflect.String:
var s []byte
if s, packet, ok = parseString(packet); !ok {
return ParseError{expectedType}
}
field.SetString(string(s))
case reflect.Slice:
switch t.Elem().Kind() {
case reflect.Uint8:
if structType.Field(i).Tag.Get("ssh") == "rest" {
field.Set(reflect.ValueOf(packet))
packet = nil
} else {
var s []byte
if s, packet, ok = parseString(packet); !ok {
return ParseError{expectedType}
}
field.Set(reflect.ValueOf(s))
}
case reflect.String:
var nl []string
if nl, packet, ok = parseNameList(packet); !ok {
return ParseError{expectedType}
}
field.Set(reflect.ValueOf(nl))
default:
panic("slice of unknown type")
}
case reflect.Ptr:
if t == bigIntType {
var n *big.Int
if n, packet, ok = parseInt(packet); !ok {
return ParseError{expectedType}
}
field.Set(reflect.ValueOf(n))
} else {
panic("pointer to unknown type")
}
default:
panic("unknown type")
}
}
if len(packet) != 0 {
return ParseError{expectedType}
}
return nil
}
// marshal serializes the message in msg, using the given message type.
func marshal(msgType uint8, msg interface{}) []byte {
var out []byte
out = append(out, msgType)
v := reflect.ValueOf(msg)
structType := v.Type()
for i := 0; i < v.NumField(); i++ {
field := v.Field(i)
t := field.Type()
switch t.Kind() {
case reflect.Bool:
var v uint8
if field.Bool() {
v = 1
}
out = append(out, v)
case reflect.Array:
if t.Elem().Kind() != reflect.Uint8 {
panic("array of non-uint8")
}
for j := 0; j < t.Len(); j++ {
out = append(out, byte(field.Index(j).Uint()))
}
case reflect.Uint32:
u32 := uint32(field.Uint())
out = append(out, byte(u32>>24))
out = append(out, byte(u32>>16))
out = append(out, byte(u32>>8))
out = append(out, byte(u32))
case reflect.String:
s := field.String()
out = append(out, byte(len(s)>>24))
out = append(out, byte(len(s)>>16))
out = append(out, byte(len(s)>>8))
out = append(out, byte(len(s)))
out = append(out, s...)
case reflect.Slice:
switch t.Elem().Kind() {
case reflect.Uint8:
length := field.Len()
if structType.Field(i).Tag.Get("ssh") != "rest" {
out = append(out, byte(length>>24))
out = append(out, byte(length>>16))
out = append(out, byte(length>>8))
out = append(out, byte(length))
}
for j := 0; j < length; j++ {
out = append(out, byte(field.Index(j).Uint()))
}
case reflect.String:
var length int
for j := 0; j < field.Len(); j++ {
if j != 0 {
length++ /* comma */
}
length += len(field.Index(j).String())
}
out = append(out, byte(length>>24))
out = append(out, byte(length>>16))
out = append(out, byte(length>>8))
out = append(out, byte(length))
for j := 0; j < field.Len(); j++ {
if j != 0 {
out = append(out, ',')
}
out = append(out, field.Index(j).String()...)
}
default:
panic("slice of unknown type")
}
case reflect.Ptr:
if t == bigIntType {
var n *big.Int
nValue := reflect.ValueOf(&n)
nValue.Elem().Set(field)
needed := intLength(n)
oldLength := len(out)
if cap(out)-len(out) < needed {
newOut := make([]byte, len(out), 2*(len(out)+needed))
copy(newOut, out)
out = newOut
}
out = out[:oldLength+needed]
marshalInt(out[oldLength:], n)
} else {
panic("pointer to unknown type")
}
}
}
|
return out
}
var bigOne = big.NewInt(1)
func parseString(in []byte) (out, rest []byte, ok bool) {
if len(in) < 4 {
return
}
length := uint32(in[0])<<24 | uint32(in[1])<<16 | uint32(in[2])<<8 | uint32(in[3])
if uint32(len(in)) < 4+length {
return
}
out = in[4 : 4+length]
rest = in[4+length:]
ok = true
return
}
var (
comma = []byte{','}
emptyNameList = []string{}
)
func parseNameList(in []byte) (out []string, rest []byte, ok bool) {
contents
|
random_line_split
|
|
messages.go
|
[0]}
}
packet = packet[1:]
v := reflect.ValueOf(out).Elem()
structType := v.Type()
var ok bool
for i := 0; i < v.NumField(); i++ {
field := v.Field(i)
t := field.Type()
switch t.Kind() {
case reflect.Bool:
if len(packet) < 1 {
return ParseError{expectedType}
}
field.SetBool(packet[0] != 0)
packet = packet[1:]
case reflect.Array:
if t.Elem().Kind() != reflect.Uint8 {
panic("array of non-uint8")
}
if len(packet) < t.Len() {
return ParseError{expectedType}
}
for j := 0; j < t.Len(); j++ {
field.Index(j).Set(reflect.ValueOf(packet[j]))
}
packet = packet[t.Len():]
case reflect.Uint32:
var u32 uint32
if u32, packet, ok = parseUint32(packet); !ok {
return ParseError{expectedType}
}
field.SetUint(uint64(u32))
case reflect.String:
var s []byte
if s, packet, ok = parseString(packet); !ok {
return ParseError{expectedType}
}
field.SetString(string(s))
case reflect.Slice:
switch t.Elem().Kind() {
case reflect.Uint8:
if structType.Field(i).Tag.Get("ssh") == "rest" {
field.Set(reflect.ValueOf(packet))
packet = nil
} else {
var s []byte
if s, packet, ok = parseString(packet); !ok {
return ParseError{expectedType}
}
field.Set(reflect.ValueOf(s))
}
case reflect.String:
var nl []string
if nl, packet, ok = parseNameList(packet); !ok {
return ParseError{expectedType}
}
field.Set(reflect.ValueOf(nl))
default:
panic("slice of unknown type")
}
case reflect.Ptr:
if t == bigIntType {
var n *big.Int
if n, packet, ok = parseInt(packet); !ok {
return ParseError{expectedType}
}
field.Set(reflect.ValueOf(n))
} else {
panic("pointer to unknown type")
}
default:
panic("unknown type")
}
}
if len(packet) != 0 {
return ParseError{expectedType}
}
return nil
}
// marshal serializes the message in msg, using the given message type.
func marshal(msgType uint8, msg interface{}) []byte {
var out []byte
out = append(out, msgType)
v := reflect.ValueOf(msg)
structType := v.Type()
for i := 0; i < v.NumField(); i++ {
field := v.Field(i)
t := field.Type()
switch t.Kind() {
case reflect.Bool:
var v uint8
if field.Bool() {
v = 1
}
out = append(out, v)
case reflect.Array:
if t.Elem().Kind() != reflect.Uint8 {
panic("array of non-uint8")
}
for j := 0; j < t.Len(); j++ {
out = append(out, byte(field.Index(j).Uint()))
}
case reflect.Uint32:
u32 := uint32(field.Uint())
out = append(out, byte(u32>>24))
out = append(out, byte(u32>>16))
out = append(out, byte(u32>>8))
out = append(out, byte(u32))
case reflect.String:
s := field.String()
out = append(out, byte(len(s)>>24))
out = append(out, byte(len(s)>>16))
out = append(out, byte(len(s)>>8))
out = append(out, byte(len(s)))
out = append(out, s...)
case reflect.Slice:
switch t.Elem().Kind() {
case reflect.Uint8:
length := field.Len()
if structType.Field(i).Tag.Get("ssh") != "rest" {
out = append(out, byte(length>>24))
out = append(out, byte(length>>16))
out = append(out, byte(length>>8))
out = append(out, byte(length))
}
for j := 0; j < length; j++ {
out = append(out, byte(field.Index(j).Uint()))
}
case reflect.String:
var length int
for j := 0; j < field.Len(); j++ {
if j != 0 {
length++ /* comma */
}
length += len(field.Index(j).String())
}
out = append(out, byte(length>>24))
out = append(out, byte(length>>16))
out = append(out, byte(length>>8))
out = append(out, byte(length))
for j := 0; j < field.Len(); j++ {
if j != 0 {
out = append(out, ',')
}
out = append(out, field.Index(j).String()...)
}
default:
panic("slice of unknown type")
}
case reflect.Ptr:
if t == bigIntType {
var n *big.Int
nValue := reflect.ValueOf(&n)
nValue.Elem().Set(field)
needed := intLength(n)
oldLength := len(out)
if cap(out)-len(out) < needed {
newOut := make([]byte, len(out), 2*(len(out)+needed))
copy(newOut, out)
out = newOut
}
out = out[:oldLength+needed]
marshalInt(out[oldLength:], n)
} else {
panic("pointer to unknown type")
}
}
}
return out
}
var bigOne = big.NewInt(1)
func parseString(in []byte) (out, rest []byte, ok bool) {
if len(in) < 4 {
return
}
length := uint32(in[0])<<24 | uint32(in[1])<<16 | uint32(in[2])<<8 | uint32(in[3])
if uint32(len(in)) < 4+length {
return
}
out = in[4 : 4+length]
rest = in[4+length:]
ok = true
return
}
var (
comma = []byte{','}
emptyNameList = []string{}
)
func parseNameList(in []byte) (out []string, rest []byte, ok bool) {
contents, rest, ok := parseString(in)
if !ok {
return
}
if len(contents) == 0 {
out = emptyNameList
return
}
parts := bytes.Split(contents, comma)
out = make([]string, len(parts))
for i, part := range parts {
out[i] = string(part)
}
return
}
func parseInt(in []byte) (out *big.Int, rest []byte, ok bool) {
contents, rest, ok := parseString(in)
if !ok {
return
}
out = new(big.Int)
if len(contents) > 0 && contents[0]&0x80 == 0x80 {
// This is a negative number
notBytes := make([]byte, len(contents))
for i := range notBytes {
notBytes[i] = ^contents[i]
}
out.SetBytes(notBytes)
out.Add(out, bigOne)
out.Neg(out)
} else {
// Positive number
out.SetBytes(contents)
}
ok = true
return
}
func parseUint32(in []byte) (out uint32, rest []byte, ok bool) {
if len(in) < 4 {
return
}
out = uint32(in[0])<<24 | uint32(in[1])<<16 | uint32(in[2])<<8 | uint32(in[3])
rest = in[4:]
ok = true
return
}
func nameListLength(namelist []string) int {
length := 4 /* uint32 length prefix */
for i, name := range namelist {
if i != 0 {
length++ /* comma */
}
length += len(name)
}
return length
}
func intLength(n *big.Int) int
|
{
length := 4 /* length bytes */
if n.Sign() < 0 {
nMinus1 := new(big.Int).Neg(n)
nMinus1.Sub(nMinus1, bigOne)
bitLen := nMinus1.BitLen()
if bitLen%8 == 0 {
// The number will need 0xff padding
length++
}
length += (bitLen + 7) / 8
} else if n.Sign() == 0 {
// A zero is the zero length string
} else {
bitLen := n.BitLen()
if bitLen%8 == 0 {
// The number will need 0x00 padding
length++
}
length += (bitLen + 7) / 8
|
identifier_body
|
|
siadir.go
|
UnknownPath is an error when a siadir cannot be found with the given path
ErrUnknownPath = errors.New("no siadir known with that path")
// ErrUnknownThread is an error when a siadir is trying to be closed by a
// thread that is not in the threadMap
ErrUnknownThread = errors.New("thread should not be calling Close(), does not have control of the siadir")
)
type (
// SiaDir contains the metadata information about a renter directory
SiaDir struct {
metadata Metadata
// siaPath is the path to the siadir on the sia network
siaPath modules.SiaPath
// rootDir is the path to the root directory on disk
rootDir string
// Utility fields
deleted bool
deps modules.Dependencies
mu sync.Mutex
wal *writeaheadlog.WAL
}
// Metadata is the metadata that is saved to disk as a .siadir file
Metadata struct {
// For each field in the metadata there is an aggregate value and a
// siadir specific value. If a field has the aggregate prefix it means
// that the value takes into account all the siafiles and siadirs in the
// sub tree. The definition of aggregate and siadir specific values is
// otherwise the same.
//
// Health is the health of the most in need siafile that is not stuck
//
// LastHealthCheckTime is the oldest LastHealthCheckTime of any of the
// siafiles in the siadir and is the last time the health was calculated
// by the health loop
//
// MinRedundancy is the minimum redundancy of any of the siafiles in the
// siadir
//
// ModTime is the last time any of the siafiles in the siadir was
// updated
//
// NumFiles is the total number of siafiles in a siadir
//
// NumStuckChunks is the sum of all the Stuck Chunks of any of the
// siafiles in the siadir
//
// NumSubDirs is the number of sub-siadirs in a siadir
//
// Size is the total amount of data stored in the siafiles of the siadir
//
// StuckHealth is the health of the most in need siafile in the siadir,
// stuck or not stuck
// The following fields are aggregate values of the siadir. These values are
// the totals of the siadir and any sub siadirs, or are calculated based on
// all the values in the subtree
AggregateHealth float64 `json:"aggregatehealth"`
AggregateLastHealthCheckTime time.Time `json:"aggregatelasthealthchecktime"`
AggregateMinRedundancy float64 `json:"aggregateminredundancy"`
AggregateModTime time.Time `json:"aggregatemodtime"`
AggregateNumFiles uint64 `json:"aggregatenumfiles"`
AggregateNumStuckChunks uint64 `json:"aggregatenumstuckchunks"`
AggregateNumSubDirs uint64 `json:"aggregatenumsubdirs"`
AggregateSize uint64 `json:"aggregatesize"`
AggregateStuckHealth float64 `json:"aggregatestuckhealth"`
// The following fields are information specific to the siadir that is not
// an aggregate of the entire sub directory tree
Health float64 `json:"health"`
LastHealthCheckTime time.Time `json:"lasthealthchecktime"`
MinRedundancy float64 `json:"minredundancy"`
ModTime time.Time `json:"modtime"`
NumFiles uint64 `json:"numfiles"`
NumStuckChunks uint64 `json:"numstuckchunks"`
NumSubDirs uint64 `json:"numsubdirs"`
Size uint64 `json:"size"`
StuckHealth float64 `json:"stuckhealth"`
}
)
// DirReader is a helper type that allows reading a raw .siadir from disk while
// keeping the file in memory locked.
type DirReader struct {
f *os.File
sd *SiaDir
}
// Close closes the underlying file.
func (sdr *DirReader) Close() error {
sdr.sd.mu.Unlock()
return sdr.f.Close()
}
// Read calls Read on the underlying file.
func (sdr *DirReader) Read(b []byte) (int, error) {
return sdr.f.Read(b)
}
// Stat returns the FileInfo of the underlying file.
func (sdr *DirReader) Stat() (os.FileInfo, error) {
return sdr.f.Stat()
}
// New creates a new directory in the renter directory and makes sure there is a
// metadata file in the directory and creates one as needed. This method will
// also make sure that all the parent directories are created and have metadata
// files as well and will return the SiaDir containing the information for the
// directory that matches the siaPath provided
func New(siaPath modules.SiaPath, rootDir string, wal *writeaheadlog.WAL) (*SiaDir, error) {
// Create path to directory and ensure path contains all metadata
updates, err := createDirMetadataAll(siaPath, rootDir)
if err != nil {
return nil, err
}
// Create metadata for directory
md, update, err := createDirMetadata(siaPath, rootDir)
if err != nil {
return nil, err
}
// Create SiaDir
sd := &SiaDir{
metadata: md,
deps: modules.ProdDependencies,
siaPath: siaPath,
rootDir: rootDir,
wal: wal,
}
return sd, managedCreateAndApplyTransaction(wal, append(updates, update)...)
}
// createDirMetadata makes sure there is a metadata file in the directory and
// creates one as needed
func createDirMetadata(siaPath modules.SiaPath, rootDir string) (Metadata, writeaheadlog.Update, error) {
// Check if metadata file exists
_, err := os.Stat(siaPath.SiaDirMetadataSysPath(rootDir))
if err == nil || !os.IsNotExist(err) {
return Metadata{}, writeaheadlog.Update{}, err
}
// Initialize metadata, set Health and StuckHealth to DefaultDirHealth so
// empty directories won't be viewed as being the most in need. Initialize
// ModTimes.
md := Metadata{
AggregateHealth: DefaultDirHealth,
AggregateModTime: time.Now(),
AggregateStuckHealth: DefaultDirHealth,
Health: DefaultDirHealth,
ModTime: time.Now(),
StuckHealth: DefaultDirHealth,
}
path := siaPath.SiaDirMetadataSysPath(rootDir)
update, err := createMetadataUpdate(path, md)
return md, update, err
}
// loadSiaDirMetadata loads the directory metadata from disk.
func loadSiaDirMetadata(path string, deps modules.Dependencies) (md Metadata, err error) {
// Open the file.
file, err := deps.Open(path)
if err != nil {
return Metadata{}, err
}
defer file.Close()
// Read the file
bytes, err := ioutil.ReadAll(file)
if err != nil {
return Metadata{}, err
}
// Parse the json object.
err = json.Unmarshal(bytes, &md)
return
}
// LoadSiaDir loads the directory metadata from disk
func LoadSiaDir(rootDir string, siaPath modules.SiaPath, deps modules.Dependencies, wal *writeaheadlog.WAL) (sd *SiaDir, err error) {
sd = &SiaDir{
deps: deps,
siaPath: siaPath,
rootDir: rootDir,
wal: wal,
}
sd.metadata, err = loadSiaDirMetadata(siaPath.SiaDirMetadataSysPath(rootDir), modules.ProdDependencies)
return sd, err
}
// delete removes the directory from disk and marks it as deleted. Once the directory is
// deleted, attempting to access the directory will return an error.
func (sd *SiaDir) delete() error
|
// Delete removes the directory from disk and marks it as deleted. Once the directory is
// deleted, attempting to access the directory will return an error.
func (sd *SiaDir) Delete() error {
sd.mu.Lock()
defer sd.mu.Unlock()
return sd.delete()
}
// Deleted returns the deleted field of the siaDir
func (sd *SiaDir) Deleted() bool {
sd.mu.Lock()
defer sd.mu.Unlock()
return sd.deleted
}
// DirReader creates a io.ReadCloser that can be used to read the raw SiaDir
// from disk.
func (sd *SiaDir) DirReader() (*DirReader, error)
|
{
update := sd.createDeleteUpdate()
err := sd.createAndApplyTransaction(update)
sd.deleted = true
return err
}
|
identifier_body
|
siadir.go
|
UnknownPath is an error when a siadir cannot be found with the given path
ErrUnknownPath = errors.New("no siadir known with that path")
// ErrUnknownThread is an error when a siadir is trying to be closed by a
// thread that is not in the threadMap
ErrUnknownThread = errors.New("thread should not be calling Close(), does not have control of the siadir")
)
type (
// SiaDir contains the metadata information about a renter directory
SiaDir struct {
metadata Metadata
// siaPath is the path to the siadir on the sia network
siaPath modules.SiaPath
// rootDir is the path to the root directory on disk
rootDir string
// Utility fields
deleted bool
deps modules.Dependencies
mu sync.Mutex
wal *writeaheadlog.WAL
}
// Metadata is the metadata that is saved to disk as a .siadir file
Metadata struct {
// For each field in the metadata there is an aggregate value and a
// siadir specific value. If a field has the aggregate prefix it means
// that the value takes into account all the siafiles and siadirs in the
// sub tree. The definition of aggregate and siadir specific values is
// otherwise the same.
//
// Health is the health of the most in need siafile that is not stuck
//
// LastHealthCheckTime is the oldest LastHealthCheckTime of any of the
// siafiles in the siadir and is the last time the health was calculated
// by the health loop
//
// MinRedundancy is the minimum redundancy of any of the siafiles in the
// siadir
//
// ModTime is the last time any of the siafiles in the siadir was
// updated
//
// NumFiles is the total number of siafiles in a siadir
//
// NumStuckChunks is the sum of all the Stuck Chunks of any of the
// siafiles in the siadir
//
// NumSubDirs is the number of sub-siadirs in a siadir
//
// Size is the total amount of data stored in the siafiles of the siadir
//
// StuckHealth is the health of the most in need siafile in the siadir,
// stuck or not stuck
// The following fields are aggregate values of the siadir. These values are
// the totals of the siadir and any sub siadirs, or are calculated based on
// all the values in the subtree
AggregateHealth float64 `json:"aggregatehealth"`
AggregateLastHealthCheckTime time.Time `json:"aggregatelasthealthchecktime"`
AggregateMinRedundancy float64 `json:"aggregateminredundancy"`
AggregateModTime time.Time `json:"aggregatemodtime"`
AggregateNumFiles uint64 `json:"aggregatenumfiles"`
AggregateNumStuckChunks uint64 `json:"aggregatenumstuckchunks"`
AggregateNumSubDirs uint64 `json:"aggregatenumsubdirs"`
AggregateSize uint64 `json:"aggregatesize"`
AggregateStuckHealth float64 `json:"aggregatestuckhealth"`
// The following fields are information specific to the siadir that is not
// an aggregate of the entire sub directory tree
Health float64 `json:"health"`
LastHealthCheckTime time.Time `json:"lasthealthchecktime"`
MinRedundancy float64 `json:"minredundancy"`
ModTime time.Time `json:"modtime"`
NumFiles uint64 `json:"numfiles"`
NumStuckChunks uint64 `json:"numstuckchunks"`
NumSubDirs uint64 `json:"numsubdirs"`
Size uint64 `json:"size"`
StuckHealth float64 `json:"stuckhealth"`
}
)
// DirReader is a helper type that allows reading a raw .siadir from disk while
// keeping the file in memory locked.
type DirReader struct {
f *os.File
sd *SiaDir
}
// Close closes the underlying file.
func (sdr *DirReader) Close() error {
sdr.sd.mu.Unlock()
return sdr.f.Close()
}
// Read calls Read on the underlying file.
func (sdr *DirReader)
|
(b []byte) (int, error) {
return sdr.f.Read(b)
}
// Stat returns the FileInfo of the underlying file.
func (sdr *DirReader) Stat() (os.FileInfo, error) {
return sdr.f.Stat()
}
// New creates a new directory in the renter directory and makes sure there is a
// metadata file in the directory and creates one as needed. This method will
// also make sure that all the parent directories are created and have metadata
// files as well and will return the SiaDir containing the information for the
// directory that matches the siaPath provided
func New(siaPath modules.SiaPath, rootDir string, wal *writeaheadlog.WAL) (*SiaDir, error) {
// Create path to directory and ensure path contains all metadata
updates, err := createDirMetadataAll(siaPath, rootDir)
if err != nil {
return nil, err
}
// Create metadata for directory
md, update, err := createDirMetadata(siaPath, rootDir)
if err != nil {
return nil, err
}
// Create SiaDir
sd := &SiaDir{
metadata: md,
deps: modules.ProdDependencies,
siaPath: siaPath,
rootDir: rootDir,
wal: wal,
}
return sd, managedCreateAndApplyTransaction(wal, append(updates, update)...)
}
// createDirMetadata makes sure there is a metadata file in the directory and
// creates one as needed
func createDirMetadata(siaPath modules.SiaPath, rootDir string) (Metadata, writeaheadlog.Update, error) {
// Check if metadata file exists
_, err := os.Stat(siaPath.SiaDirMetadataSysPath(rootDir))
if err == nil || !os.IsNotExist(err) {
return Metadata{}, writeaheadlog.Update{}, err
}
// Initialize metadata, set Health and StuckHealth to DefaultDirHealth so
// empty directories won't be viewed as being the most in need. Initialize
// ModTimes.
md := Metadata{
AggregateHealth: DefaultDirHealth,
AggregateModTime: time.Now(),
AggregateStuckHealth: DefaultDirHealth,
Health: DefaultDirHealth,
ModTime: time.Now(),
StuckHealth: DefaultDirHealth,
}
path := siaPath.SiaDirMetadataSysPath(rootDir)
update, err := createMetadataUpdate(path, md)
return md, update, err
}
// loadSiaDirMetadata loads the directory metadata from disk.
func loadSiaDirMetadata(path string, deps modules.Dependencies) (md Metadata, err error) {
// Open the file.
file, err := deps.Open(path)
if err != nil {
return Metadata{}, err
}
defer file.Close()
// Read the file
bytes, err := ioutil.ReadAll(file)
if err != nil {
return Metadata{}, err
}
// Parse the json object.
err = json.Unmarshal(bytes, &md)
return
}
// LoadSiaDir loads the directory metadata from disk
func LoadSiaDir(rootDir string, siaPath modules.SiaPath, deps modules.Dependencies, wal *writeaheadlog.WAL) (sd *SiaDir, err error) {
sd = &SiaDir{
deps: deps,
siaPath: siaPath,
rootDir: rootDir,
wal: wal,
}
sd.metadata, err = loadSiaDirMetadata(siaPath.SiaDirMetadataSysPath(rootDir), modules.ProdDependencies)
return sd, err
}
// delete removes the directory from disk and marks it as deleted. Once the directory is
// deleted, attempting to access the directory will return an error.
func (sd *SiaDir) delete() error {
update := sd.createDeleteUpdate()
err := sd.createAndApplyTransaction(update)
sd.deleted = true
return err
}
// Delete removes the directory from disk and marks it as deleted. Once the directory is
// deleted, attempting to access the directory will return an error.
func (sd *SiaDir) Delete() error {
sd.mu.Lock()
defer sd.mu.Unlock()
return sd.delete()
}
// Deleted returns the deleted field of the siaDir
func (sd *SiaDir) Deleted() bool {
sd.mu.Lock()
defer sd.mu.Unlock()
return sd.deleted
}
// DirReader creates a io.ReadCloser that can be used to read the raw SiaDir
// from disk.
func (sd *SiaDir) DirReader() (*DirReader, error) {
|
Read
|
identifier_name
|
siadir.go
|
that path")
// ErrUnknownThread is an error when a siadir is trying to be closed by a
// thread that is not in the threadMap
ErrUnknownThread = errors.New("thread should not be calling Close(), does not have control of the siadir")
)
type (
// SiaDir contains the metadata information about a renter directory
SiaDir struct {
metadata Metadata
// siaPath is the path to the siadir on the sia network
siaPath modules.SiaPath
// rootDir is the path to the root directory on disk
rootDir string
// Utility fields
deleted bool
deps modules.Dependencies
mu sync.Mutex
wal *writeaheadlog.WAL
}
// Metadata is the metadata that is saved to disk as a .siadir file
Metadata struct {
// For each field in the metadata there is an aggregate value and a
// siadir specific value. If a field has the aggregate prefix it means
// that the value takes into account all the siafiles and siadirs in the
// sub tree. The definition of aggregate and siadir specific values is
// otherwise the same.
//
// Health is the health of the most in need siafile that is not stuck
//
// LastHealthCheckTime is the oldest LastHealthCheckTime of any of the
// siafiles in the siadir and is the last time the health was calculated
// by the health loop
//
// MinRedundancy is the minimum redundancy of any of the siafiles in the
// siadir
//
// ModTime is the last time any of the siafiles in the siadir was
// updated
//
// NumFiles is the total number of siafiles in a siadir
//
// NumStuckChunks is the sum of all the Stuck Chunks of any of the
// siafiles in the siadir
//
// NumSubDirs is the number of sub-siadirs in a siadir
//
// Size is the total amount of data stored in the siafiles of the siadir
//
// StuckHealth is the health of the most in need siafile in the siadir,
// stuck or not stuck
// The following fields are aggregate values of the siadir. These values are
// the totals of the siadir and any sub siadirs, or are calculated based on
// all the values in the subtree
AggregateHealth float64 `json:"aggregatehealth"`
AggregateLastHealthCheckTime time.Time `json:"aggregatelasthealthchecktime"`
AggregateMinRedundancy float64 `json:"aggregateminredundancy"`
AggregateModTime time.Time `json:"aggregatemodtime"`
AggregateNumFiles uint64 `json:"aggregatenumfiles"`
AggregateNumStuckChunks uint64 `json:"aggregatenumstuckchunks"`
AggregateNumSubDirs uint64 `json:"aggregatenumsubdirs"`
AggregateSize uint64 `json:"aggregatesize"`
AggregateStuckHealth float64 `json:"aggregatestuckhealth"`
// The following fields are information specific to the siadir that is not
// an aggregate of the entire sub directory tree
Health float64 `json:"health"`
LastHealthCheckTime time.Time `json:"lasthealthchecktime"`
MinRedundancy float64 `json:"minredundancy"`
ModTime time.Time `json:"modtime"`
NumFiles uint64 `json:"numfiles"`
NumStuckChunks uint64 `json:"numstuckchunks"`
NumSubDirs uint64 `json:"numsubdirs"`
Size uint64 `json:"size"`
StuckHealth float64 `json:"stuckhealth"`
}
)
// DirReader is a helper type that allows reading a raw .siadir from disk while
// keeping the file in memory locked.
type DirReader struct {
f *os.File
sd *SiaDir
}
// Close closes the underlying file.
func (sdr *DirReader) Close() error {
sdr.sd.mu.Unlock()
return sdr.f.Close()
}
// Read calls Read on the underlying file.
func (sdr *DirReader) Read(b []byte) (int, error) {
return sdr.f.Read(b)
}
// Stat returns the FileInfo of the underlying file.
func (sdr *DirReader) Stat() (os.FileInfo, error) {
return sdr.f.Stat()
}
// New creates a new directory in the renter directory and makes sure there is a
// metadata file in the directory and creates one as needed. This method will
// also make sure that all the parent directories are created and have metadata
// files as well and will return the SiaDir containing the information for the
// directory that matches the siaPath provided
func New(siaPath modules.SiaPath, rootDir string, wal *writeaheadlog.WAL) (*SiaDir, error) {
// Create path to directory and ensure path contains all metadata
updates, err := createDirMetadataAll(siaPath, rootDir)
if err != nil {
return nil, err
}
// Create metadata for directory
md, update, err := createDirMetadata(siaPath, rootDir)
if err != nil {
return nil, err
}
// Create SiaDir
sd := &SiaDir{
metadata: md,
deps: modules.ProdDependencies,
siaPath: siaPath,
rootDir: rootDir,
wal: wal,
}
return sd, managedCreateAndApplyTransaction(wal, append(updates, update)...)
}
// createDirMetadata makes sure there is a metadata file in the directory and
// creates one as needed
func createDirMetadata(siaPath modules.SiaPath, rootDir string) (Metadata, writeaheadlog.Update, error) {
// Check if metadata file exists
_, err := os.Stat(siaPath.SiaDirMetadataSysPath(rootDir))
if err == nil || !os.IsNotExist(err) {
return Metadata{}, writeaheadlog.Update{}, err
}
// Initialize metadata, set Health and StuckHealth to DefaultDirHealth so
// empty directories won't be viewed as being the most in need. Initialize
// ModTimes.
md := Metadata{
AggregateHealth: DefaultDirHealth,
AggregateModTime: time.Now(),
AggregateStuckHealth: DefaultDirHealth,
Health: DefaultDirHealth,
ModTime: time.Now(),
StuckHealth: DefaultDirHealth,
}
path := siaPath.SiaDirMetadataSysPath(rootDir)
update, err := createMetadataUpdate(path, md)
return md, update, err
}
// loadSiaDirMetadata loads the directory metadata from disk.
func loadSiaDirMetadata(path string, deps modules.Dependencies) (md Metadata, err error) {
// Open the file.
file, err := deps.Open(path)
if err != nil {
return Metadata{}, err
}
defer file.Close()
// Read the file
bytes, err := ioutil.ReadAll(file)
if err != nil {
return Metadata{}, err
}
// Parse the json object.
err = json.Unmarshal(bytes, &md)
return
}
// LoadSiaDir loads the directory metadata from disk
func LoadSiaDir(rootDir string, siaPath modules.SiaPath, deps modules.Dependencies, wal *writeaheadlog.WAL) (sd *SiaDir, err error) {
sd = &SiaDir{
deps: deps,
siaPath: siaPath,
rootDir: rootDir,
wal: wal,
}
sd.metadata, err = loadSiaDirMetadata(siaPath.SiaDirMetadataSysPath(rootDir), modules.ProdDependencies)
return sd, err
}
// delete removes the directory from disk and marks it as deleted. Once the directory is
// deleted, attempting to access the directory will return an error.
func (sd *SiaDir) delete() error {
update := sd.createDeleteUpdate()
err := sd.createAndApplyTransaction(update)
sd.deleted = true
return err
}
// Delete removes the directory from disk and marks it as deleted. Once the directory is
// deleted, attempting to access the directory will return an error.
func (sd *SiaDir) Delete() error {
sd.mu.Lock()
defer sd.mu.Unlock()
return sd.delete()
}
// Deleted returns the deleted field of the siaDir
func (sd *SiaDir) Deleted() bool {
sd.mu.Lock()
defer sd.mu.Unlock()
return sd.deleted
}
// DirReader creates a io.ReadCloser that can be used to read the raw SiaDir
// from disk.
func (sd *SiaDir) DirReader() (*DirReader, error) {
sd.mu.Lock()
if sd.deleted
|
{
sd.mu.Unlock()
return nil, errors.New("can't copy deleted SiaDir")
}
|
conditional_block
|
|
siadir.go
|
ErrUnknownThread = errors.New("thread should not be calling Close(), does not have control of the siadir")
)
type (
// SiaDir contains the metadata information about a renter directory
SiaDir struct {
metadata Metadata
// siaPath is the path to the siadir on the sia network
siaPath modules.SiaPath
// rootDir is the path to the root directory on disk
rootDir string
// Utility fields
deleted bool
deps modules.Dependencies
mu sync.Mutex
wal *writeaheadlog.WAL
}
// Metadata is the metadata that is saved to disk as a .siadir file
Metadata struct {
// For each field in the metadata there is an aggregate value and a
// siadir specific value. If a field has the aggregate prefix it means
// that the value takes into account all the siafiles and siadirs in the
// sub tree. The definition of aggregate and siadir specific values is
// otherwise the same.
//
// Health is the health of the most in need siafile that is not stuck
//
// LastHealthCheckTime is the oldest LastHealthCheckTime of any of the
// siafiles in the siadir and is the last time the health was calculated
// by the health loop
//
// MinRedundancy is the minimum redundancy of any of the siafiles in the
// siadir
//
// ModTime is the last time any of the siafiles in the siadir was
// updated
//
// NumFiles is the total number of siafiles in a siadir
//
// NumStuckChunks is the sum of all the Stuck Chunks of any of the
// siafiles in the siadir
//
// NumSubDirs is the number of sub-siadirs in a siadir
//
// Size is the total amount of data stored in the siafiles of the siadir
//
// StuckHealth is the health of the most in need siafile in the siadir,
// stuck or not stuck
// The following fields are aggregate values of the siadir. These values are
// the totals of the siadir and any sub siadirs, or are calculated based on
// all the values in the subtree
AggregateHealth float64 `json:"aggregatehealth"`
AggregateLastHealthCheckTime time.Time `json:"aggregatelasthealthchecktime"`
AggregateMinRedundancy float64 `json:"aggregateminredundancy"`
AggregateModTime time.Time `json:"aggregatemodtime"`
AggregateNumFiles uint64 `json:"aggregatenumfiles"`
AggregateNumStuckChunks uint64 `json:"aggregatenumstuckchunks"`
AggregateNumSubDirs uint64 `json:"aggregatenumsubdirs"`
AggregateSize uint64 `json:"aggregatesize"`
AggregateStuckHealth float64 `json:"aggregatestuckhealth"`
// The following fields are information specific to the siadir that is not
// an aggregate of the entire sub directory tree
Health float64 `json:"health"`
LastHealthCheckTime time.Time `json:"lasthealthchecktime"`
MinRedundancy float64 `json:"minredundancy"`
ModTime time.Time `json:"modtime"`
NumFiles uint64 `json:"numfiles"`
NumStuckChunks uint64 `json:"numstuckchunks"`
NumSubDirs uint64 `json:"numsubdirs"`
Size uint64 `json:"size"`
StuckHealth float64 `json:"stuckhealth"`
}
)
// DirReader is a helper type that allows reading a raw .siadir from disk while
// keeping the file in memory locked.
type DirReader struct {
f *os.File
sd *SiaDir
}
// Close closes the underlying file.
func (sdr *DirReader) Close() error {
sdr.sd.mu.Unlock()
return sdr.f.Close()
}
// Read calls Read on the underlying file.
func (sdr *DirReader) Read(b []byte) (int, error) {
return sdr.f.Read(b)
}
// Stat returns the FileInfo of the underlying file.
func (sdr *DirReader) Stat() (os.FileInfo, error) {
return sdr.f.Stat()
}
// New creates a new directory in the renter directory and makes sure there is a
// metadata file in the directory and creates one as needed. This method will
// also make sure that all the parent directories are created and have metadata
// files as well and will return the SiaDir containing the information for the
// directory that matches the siaPath provided
func New(siaPath modules.SiaPath, rootDir string, wal *writeaheadlog.WAL) (*SiaDir, error) {
// Create path to directory and ensure path contains all metadata
updates, err := createDirMetadataAll(siaPath, rootDir)
if err != nil {
return nil, err
}
// Create metadata for directory
md, update, err := createDirMetadata(siaPath, rootDir)
if err != nil {
return nil, err
}
// Create SiaDir
sd := &SiaDir{
metadata: md,
deps: modules.ProdDependencies,
siaPath: siaPath,
rootDir: rootDir,
wal: wal,
}
return sd, managedCreateAndApplyTransaction(wal, append(updates, update)...)
}
// createDirMetadata makes sure there is a metadata file in the directory and
// creates one as needed
func createDirMetadata(siaPath modules.SiaPath, rootDir string) (Metadata, writeaheadlog.Update, error) {
// Check if metadata file exists
_, err := os.Stat(siaPath.SiaDirMetadataSysPath(rootDir))
if err == nil || !os.IsNotExist(err) {
return Metadata{}, writeaheadlog.Update{}, err
}
// Initialize metadata, set Health and StuckHealth to DefaultDirHealth so
// empty directories won't be viewed as being the most in need. Initialize
// ModTimes.
md := Metadata{
AggregateHealth: DefaultDirHealth,
AggregateModTime: time.Now(),
AggregateStuckHealth: DefaultDirHealth,
Health: DefaultDirHealth,
ModTime: time.Now(),
StuckHealth: DefaultDirHealth,
}
path := siaPath.SiaDirMetadataSysPath(rootDir)
update, err := createMetadataUpdate(path, md)
return md, update, err
}
// loadSiaDirMetadata loads the directory metadata from disk.
func loadSiaDirMetadata(path string, deps modules.Dependencies) (md Metadata, err error) {
// Open the file.
file, err := deps.Open(path)
if err != nil {
return Metadata{}, err
}
defer file.Close()
// Read the file
bytes, err := ioutil.ReadAll(file)
if err != nil {
return Metadata{}, err
}
// Parse the json object.
err = json.Unmarshal(bytes, &md)
return
}
// LoadSiaDir loads the directory metadata from disk
func LoadSiaDir(rootDir string, siaPath modules.SiaPath, deps modules.Dependencies, wal *writeaheadlog.WAL) (sd *SiaDir, err error) {
sd = &SiaDir{
deps: deps,
siaPath: siaPath,
rootDir: rootDir,
wal: wal,
}
sd.metadata, err = loadSiaDirMetadata(siaPath.SiaDirMetadataSysPath(rootDir), modules.ProdDependencies)
return sd, err
}
// delete removes the directory from disk and marks it as deleted. Once the directory is
// deleted, attempting to access the directory will return an error.
func (sd *SiaDir) delete() error {
update := sd.createDeleteUpdate()
err := sd.createAndApplyTransaction(update)
sd.deleted = true
return err
}
// Delete removes the directory from disk and marks it as deleted. Once the directory is
// deleted, attempting to access the directory will return an error.
func (sd *SiaDir) Delete() error {
sd.mu.Lock()
defer sd.mu.Unlock()
return sd.delete()
}
// Deleted returns the deleted field of the siaDir
func (sd *SiaDir) Deleted() bool {
sd.mu.Lock()
defer sd.mu.Unlock()
return sd.deleted
}
// DirReader creates a io.ReadCloser that can be used to read the raw SiaDir
// from disk.
func (sd *SiaDir) DirReader() (*DirReader, error) {
sd.mu.Lock()
if sd.deleted {
sd.mu.Unlock()
return nil, errors.New("can't copy deleted SiaDir")
|
}
// Open file.
path := sd.siaPath.SiaDirMetadataSysPath(sd.rootDir)
f, err := os.Open(path)
if err != nil {
|
random_line_split
|
|
fpdf.go
|
{0xDD, 0x2F, 0x00}, // DD2F00
{0xE1, 0x00, 0x06}, // E10006
{0xE5, 0x00, 0x3F}, // E5003F
*/
{0x00, 0xBF, 0xA9}, // 00BFA9
{0x00, 0xC2, 0x66}, // 00C266
{0x00, 0xC5, 0x21}, // 00C521
{0x25, 0xC9, 0x00}, // 25C900
{0x6F, 0xCC, 0x00}, // 6FCC00
{0xBB, 0xD0, 0x00}, // BBD000
{0xD3, 0x9D, 0x00}, // D39D00
{0xD7, 0x53, 0x00}, // D75300
{0xDA, 0x06, 0x00}, // DA0600
{0xDE, 0x00, 0x48}, // DE0048
{0xE1, 0x00, 0x99}, // E10099
{0xDB, 0x00, 0xE5}, // DB00E5
}
DeltaGradientColors = [][]int{
{0xF5, 0x00, 0x2B}, // E5002B
{0xA8, 0x00, 0x1C}, // 98001C
{0x7C, 0x00, 0x0E}, // 4C000E
{0x70, 0x70, 0x70}, // 000000
{0x00, 0x6C, 0x03}, // 004C03
{0x00, 0x98, 0x07}, // 009807
{0x00, 0xE5, 0x0B}, // 00E50B
}
)
// }}}
// {{{ groundspeedToRGB, groundspeedDeltaToRGB
func groundspeedToRGB(speed float64) []int {
if speed >= SpeedGradientMax { return SpeedGradientColors[len(SpeedGradientColors)-1] }
if speed <= SpeedGradientMin { return SpeedGradientColors[0] }
f := (speed-SpeedGradientMin) / (SpeedGradientMax-SpeedGradientMin)
i := int (f * float64(len(SpeedGradientColors) - 2))
return SpeedGradientColors[i+1]
}
func groundspeedDeltaToRGB(delta float64) []int
|
// }}}
// {{{ altitudeToY, distNMToX
func altitudeToY(alt float64) float64 {
distY := (alt/ApproachHeightFeet) * ApproachBoxHeight
y := ApproachBoxHeight - distY // In PDF, the Y scale goes down the page
return y + ApproachBoxOffsetY
}
func distNMToX(distNM float64) float64 {
distX := (distNM/ApproachWidthNM) * ApproachBoxWidth // How many X units away from SFO
x := ApproachBoxWidth - distX // SFO is on the right of the box
return x + ApproachBoxOffsetX
}
// }}}
// {{{ DrawSpeedGradientKey, DrawDeltaGradientKey
func DrawSpeedGradientKey(pdf *gofpdf.Fpdf) {
width,height := 8,4
// Allow for the underflow & overflow colors at either end of the gradient
speedPerBox := (SpeedGradientMax-SpeedGradientMin) / float64(len(SpeedGradientColors)-2)
for i,rgb := range SpeedGradientColors {
x,y := ApproachBoxOffsetX, ApproachBoxHeight-float64((i-1)*height)
pdf.SetFillColor(rgb[0], rgb[1], rgb[2])
pdf.Rect(x+2.0, y, float64(width), float64(height), "F")
min := SpeedGradientMin + float64(i)*speedPerBox
pdf.MoveTo(x+float64(width)+2.0, y)
text := fmt.Sprintf(">=%.0f knots GS", min)
if i==0 { text = fmt.Sprintf("<%.0f knots GS", min) }
pdf.Cell(30, float64(height), text)
}
}
func DrawDeltaGradientKey(pdf *gofpdf.Fpdf) {
width,height := 8,4
labels := []string{
"braking: by >8 knots within 5s",
"braking: by 4-8 knots within 5s",
"braking: by 0-4 knots within 5s",
"no change",
"accelerating: by 0-4 knots within 5s",
"accelerating: by 4-8 knots within 5s",
"accelerating: by >8 knots within 5s",
}
for i,rgb := range DeltaGradientColors {
x,y := ApproachBoxOffsetX, ApproachBoxHeight-float64((i-1)*height)
pdf.SetFillColor(rgb[0], rgb[1], rgb[2])
pdf.Rect(x+2.0, y, float64(width), float64(height), "F")
pdf.MoveTo(x+float64(width)+2.0, y)
pdf.Cell(30, float64(height), labels[i])
}
}
// }}}
// {{{ DrawTitle
func DrawTitle(pdf *gofpdf.Fpdf, title string) {
pdf.MoveTo(10, ApproachBoxHeight + ApproachBoxOffsetY + 10)
pdf.Cell(40, 10, title)
}
// }}}
// {{{ DrawApproachFrame
func DrawApproachFrame(pdf *gofpdf.Fpdf) {
pdf.SetLineWidth(0.05)
pdf.SetDrawColor(0xa0, 0xa0, 0xa0)
pdf.MoveTo(ApproachBoxOffsetX, ApproachBoxOffsetY)
pdf.LineTo(ApproachBoxOffsetX+ApproachBoxWidth, ApproachBoxOffsetY)
pdf.LineTo(ApproachBoxOffsetX+ApproachBoxWidth, ApproachBoxOffsetY+ApproachBoxHeight)
pdf.LineTo(ApproachBoxOffsetX, ApproachBoxOffsetY+ApproachBoxHeight)
pdf.LineTo(ApproachBoxOffsetX, ApproachBoxOffsetY)
pdf.DrawPath("D")
// X axis tickmarks and labels
pdf.SetLineWidth(0.05)
pdf.SetFont("Arial", "", 8)
for _,nm := range []float64{10,20,30,40,50,60,70,80} {
pdf.SetDrawColor(0x00, 0x00, 0x00)
pdf.MoveTo(distNMToX(nm), ApproachBoxHeight+ApproachBoxOffsetY)
pdf.LineTo(distNMToX(nm), ApproachBoxHeight+ApproachBoxOffsetY+1.5)
pdf.SetDrawColor(0xa0, 0xa0, 0xa0)
pdf.MoveTo(distNMToX(nm), ApproachBoxHeight+ApproachBoxOffsetY)
pdf.LineTo(distNMToX(nm), ApproachBoxOffsetY)
pdf.MoveTo(distNMToX(nm)-4, ApproachBoxHeight+ApproachBoxOffsetY+2)
pdf.Cell(30, float64(4), fmt.Sprintf("%.0f NM", nm))
}
pdf.MoveTo(distNMToX(0)-4, ApproachBoxHeight+ApproachBoxOffsetY+2)
pdf.Cell(30, float64(4), "SFO")
pdf.DrawPath("D")
// Y axis gridlines and labels
pdf.SetLineWidth(0.0
|
{
f := delta / 4.0 // How many 5knot increments this delta is
f += 3.0 // [0,1,2] are braking, [3] is nochange, [4,5,6] are accelerating
i := int(f)
if i<0 { i = 0 }
if i>6 { i = 6 }
rgbw := DeltaGradientColors[i]
fAbs := math.Abs(delta/4.0)
widthPercent := int (fAbs * 0.33 * 100)
if widthPercent < 10 { widthPercent = 10 }
rgbw = append(rgbw, widthPercent)
return rgbw
}
|
identifier_body
|
fpdf.go
|
+ApproachBoxHeight)
pdf.LineTo(ApproachBoxOffsetX, ApproachBoxOffsetY+ApproachBoxHeight)
pdf.LineTo(ApproachBoxOffsetX, ApproachBoxOffsetY)
pdf.DrawPath("D")
// X axis tickmarks and labels
pdf.SetLineWidth(0.05)
pdf.SetFont("Arial", "", 8)
for _,nm := range []float64{10,20,30,40,50,60,70,80} {
pdf.SetDrawColor(0x00, 0x00, 0x00)
pdf.MoveTo(distNMToX(nm), ApproachBoxHeight+ApproachBoxOffsetY)
pdf.LineTo(distNMToX(nm), ApproachBoxHeight+ApproachBoxOffsetY+1.5)
pdf.SetDrawColor(0xa0, 0xa0, 0xa0)
pdf.MoveTo(distNMToX(nm), ApproachBoxHeight+ApproachBoxOffsetY)
pdf.LineTo(distNMToX(nm), ApproachBoxOffsetY)
pdf.MoveTo(distNMToX(nm)-4, ApproachBoxHeight+ApproachBoxOffsetY+2)
pdf.Cell(30, float64(4), fmt.Sprintf("%.0f NM", nm))
}
pdf.MoveTo(distNMToX(0)-4, ApproachBoxHeight+ApproachBoxOffsetY+2)
pdf.Cell(30, float64(4), "SFO")
pdf.DrawPath("D")
// Y axis gridlines and labels
pdf.SetLineWidth(0.05)
pdf.SetDrawColor(0xa0, 0xa0, 0xa0)
for _,alt := range []float64{5000, 10000, 15000, 20000} {
pdf.MoveTo(ApproachBoxOffsetX, altitudeToY(alt))
pdf.LineTo(ApproachBoxOffsetX+ApproachBoxWidth, altitudeToY(alt))
pdf.MoveTo(ApproachBoxOffsetX+ApproachBoxWidth+0.5, altitudeToY(alt)-2)
pdf.Cell(30, float64(4), fmt.Sprintf("%.0fft", alt))
}
pdf.DrawPath("D")
}
// }}}
// {{{ DrawSFOClassB
func DrawSFOClassB(pdf *gofpdf.Fpdf) {
pdf.SetDrawColor(0x00, 0x00, 0x66)
pdf.SetLineWidth(0.45)
pdf.MoveTo(ApproachBoxOffsetX+ApproachBoxWidth, altitudeToY(10000.0))
// Should really parse this all out of the constants in geo/sfo ...
pdf.LineTo(distNMToX(30.0), altitudeToY(10000.0))
pdf.LineTo(distNMToX(30.0), altitudeToY( 8000.0))
pdf.LineTo(distNMToX(25.0), altitudeToY( 8000.0))
pdf.LineTo(distNMToX(25.0), altitudeToY( 6000.0))
pdf.LineTo(distNMToX(20.0), altitudeToY( 6000.0))
pdf.LineTo(distNMToX(20.0), altitudeToY( 4000.0))
pdf.LineTo(distNMToX(15.0), altitudeToY( 4000.0))
pdf.LineTo(distNMToX(15.0), altitudeToY( 3000.0))
pdf.LineTo(distNMToX(10.0), altitudeToY( 3000.0))
pdf.LineTo(distNMToX(10.0), altitudeToY( 1500.0))
pdf.LineTo(distNMToX( 7.0), altitudeToY( 1500.0))
pdf.LineTo(distNMToX( 7.0), altitudeToY( 0.0))
pdf.DrawPath("D")
}
// }}}
// {{{ DrawWaypoints
type WaypointFurniture struct {
Name string
Min,Max float64
}
func DrawWaypoints(pdf *gofpdf.Fpdf) {
pdf.SetDrawColor(0xa0, 0xa0, 0x20)
pdf.SetTextColor(0xa0, 0xa0, 0x20)
pdf.SetFont("Arial", "B", 8)
wpFurn := []WaypointFurniture{
{"EPICK", 10000, 15000},
{"EDDYY", 5850, 6150},
{"SWELS", 4550, 4850},
{"MENLO", 3850, 4150},
// {"SKUNK", 11850, 12150},
// {"BOLDR", 9850, 10150},
}
for _,wp := range wpFurn {
nm := sfo.KLatlongSFO.DistNM(sfo.KFixes[wp.Name])
yOffset := 5.5
if wp.Name == "SWELS" { yOffset = 9 }
pdf.MoveTo(distNMToX(nm)-5.5, ApproachBoxHeight+ApproachBoxOffsetY+yOffset)
pdf.Cell(30, float64(4), wp.Name)
// pdf.Cell(30, float64(4), fmt.Sprintf("EPICK (%.1fNM)", epickNM))
pdf.SetLineWidth(1.3)
pdf.MoveTo(distNMToX(nm), altitudeToY(wp.Min))
pdf.LineTo(distNMToX(nm), altitudeToY(wp.Max))
pdf.SetLineWidth(0.5)
pdf.MoveTo(distNMToX(nm), altitudeToY(-100))
pdf.LineTo(distNMToX(nm), altitudeToY(100))
}
pdf.DrawPath("D")
pdf.SetTextColor(0x00, 0x00, 0x00)
pdf.SetFont("Arial", "", 10)
}
// }}}
// {{{ DrawTrack
func trackpointToApproachXY(tp fdb.Trackpoint) (float64, float64) {
return distNMToX(tp.DistNM(sfo.KLatlongSFO)), altitudeToY(tp.IndicatedAltitude)
}
func DrawTrack(pdf *gofpdf.Fpdf, tInput fdb.Track, colorscheme ColorScheme) {
pdf.SetDrawColor(0xff, 0x00, 0x00)
pdf.SetLineWidth(0.25)
pdf.SetAlpha(0.5, "")
// We don't need trackpoints every 200ms
sampleRate := time.Second * 5
t := tInput.SampleEvery(sampleRate, false)
if len(t) == 0 { return }
for i,_ := range t[1:] {
if t[i].IndicatedAltitude < 100 && t[i+1].IndicatedAltitude < 100 { continue }
x1,y1 := trackpointToApproachXY(t[i])
x2,y2 := trackpointToApproachXY(t[i+1])
// ... or compare against x2/y2 and clip against frame ...
if x1 < ApproachBoxOffsetX { continue }
if y1 < ApproachBoxOffsetY { continue }
rgb := []int{0xFF,0x00,0x00}
switch colorscheme {
case ByGroundspeed: rgb = groundspeedToRGB(t[i].GroundSpeed)
case ByDeltaGroundspeed: rgb = groundspeedDeltaToRGB(t[i+1].GroundSpeed - t[i].GroundSpeed)
}
pdf.SetLineWidth(0.25)
if len(rgb)>3 {
pdf.SetLineWidth(float64(rgb[3]) / 100.0)
}
pdf.SetDrawColor(rgb[0], rgb[1], rgb[2])
pdf.Line(x1,y1,x2,y2)
}
pdf.DrawPath("D")
pdf.SetAlpha(1.0, "")
}
// }}}
// {{{ NewApproachPdf
func NewApproachPdf(colorscheme ColorScheme) *gofpdf.Fpdf {
pdf := gofpdf.New("L", "mm", "Letter", "")
pdf.AddPage()
pdf.SetFont("Arial", "", 10)
DrawApproachFrame(pdf)
DrawSFOClassB(pdf)
DrawWaypoints(pdf)
if colorscheme == ByDeltaGroundspeed {
DrawDeltaGradientKey(pdf)
} else {
DrawSpeedGradientKey(pdf)
}
return pdf
}
// }}}
// {{{ WriteTrack
func WriteTrack(output io.Writer, t fdb.Track) error {
pdf := NewApproachPdf(ByGroundspeed)
|
random_line_split
|
||
fpdf.go
|
OffsetY + 10)
pdf.Cell(40, 10, title)
}
// }}}
// {{{ DrawApproachFrame
func DrawApproachFrame(pdf *gofpdf.Fpdf) {
pdf.SetLineWidth(0.05)
pdf.SetDrawColor(0xa0, 0xa0, 0xa0)
pdf.MoveTo(ApproachBoxOffsetX, ApproachBoxOffsetY)
pdf.LineTo(ApproachBoxOffsetX+ApproachBoxWidth, ApproachBoxOffsetY)
pdf.LineTo(ApproachBoxOffsetX+ApproachBoxWidth, ApproachBoxOffsetY+ApproachBoxHeight)
pdf.LineTo(ApproachBoxOffsetX, ApproachBoxOffsetY+ApproachBoxHeight)
pdf.LineTo(ApproachBoxOffsetX, ApproachBoxOffsetY)
pdf.DrawPath("D")
// X axis tickmarks and labels
pdf.SetLineWidth(0.05)
pdf.SetFont("Arial", "", 8)
for _,nm := range []float64{10,20,30,40,50,60,70,80} {
pdf.SetDrawColor(0x00, 0x00, 0x00)
pdf.MoveTo(distNMToX(nm), ApproachBoxHeight+ApproachBoxOffsetY)
pdf.LineTo(distNMToX(nm), ApproachBoxHeight+ApproachBoxOffsetY+1.5)
pdf.SetDrawColor(0xa0, 0xa0, 0xa0)
pdf.MoveTo(distNMToX(nm), ApproachBoxHeight+ApproachBoxOffsetY)
pdf.LineTo(distNMToX(nm), ApproachBoxOffsetY)
pdf.MoveTo(distNMToX(nm)-4, ApproachBoxHeight+ApproachBoxOffsetY+2)
pdf.Cell(30, float64(4), fmt.Sprintf("%.0f NM", nm))
}
pdf.MoveTo(distNMToX(0)-4, ApproachBoxHeight+ApproachBoxOffsetY+2)
pdf.Cell(30, float64(4), "SFO")
pdf.DrawPath("D")
// Y axis gridlines and labels
pdf.SetLineWidth(0.05)
pdf.SetDrawColor(0xa0, 0xa0, 0xa0)
for _,alt := range []float64{5000, 10000, 15000, 20000} {
pdf.MoveTo(ApproachBoxOffsetX, altitudeToY(alt))
pdf.LineTo(ApproachBoxOffsetX+ApproachBoxWidth, altitudeToY(alt))
pdf.MoveTo(ApproachBoxOffsetX+ApproachBoxWidth+0.5, altitudeToY(alt)-2)
pdf.Cell(30, float64(4), fmt.Sprintf("%.0fft", alt))
}
pdf.DrawPath("D")
}
// }}}
// {{{ DrawSFOClassB
func DrawSFOClassB(pdf *gofpdf.Fpdf) {
pdf.SetDrawColor(0x00, 0x00, 0x66)
pdf.SetLineWidth(0.45)
pdf.MoveTo(ApproachBoxOffsetX+ApproachBoxWidth, altitudeToY(10000.0))
// Should really parse this all out of the constants in geo/sfo ...
pdf.LineTo(distNMToX(30.0), altitudeToY(10000.0))
pdf.LineTo(distNMToX(30.0), altitudeToY( 8000.0))
pdf.LineTo(distNMToX(25.0), altitudeToY( 8000.0))
pdf.LineTo(distNMToX(25.0), altitudeToY( 6000.0))
pdf.LineTo(distNMToX(20.0), altitudeToY( 6000.0))
pdf.LineTo(distNMToX(20.0), altitudeToY( 4000.0))
pdf.LineTo(distNMToX(15.0), altitudeToY( 4000.0))
pdf.LineTo(distNMToX(15.0), altitudeToY( 3000.0))
pdf.LineTo(distNMToX(10.0), altitudeToY( 3000.0))
pdf.LineTo(distNMToX(10.0), altitudeToY( 1500.0))
pdf.LineTo(distNMToX( 7.0), altitudeToY( 1500.0))
pdf.LineTo(distNMToX( 7.0), altitudeToY( 0.0))
pdf.DrawPath("D")
}
// }}}
// {{{ DrawWaypoints
type WaypointFurniture struct {
Name string
Min,Max float64
}
func DrawWaypoints(pdf *gofpdf.Fpdf) {
pdf.SetDrawColor(0xa0, 0xa0, 0x20)
pdf.SetTextColor(0xa0, 0xa0, 0x20)
pdf.SetFont("Arial", "B", 8)
wpFurn := []WaypointFurniture{
{"EPICK", 10000, 15000},
{"EDDYY", 5850, 6150},
{"SWELS", 4550, 4850},
{"MENLO", 3850, 4150},
// {"SKUNK", 11850, 12150},
// {"BOLDR", 9850, 10150},
}
for _,wp := range wpFurn {
nm := sfo.KLatlongSFO.DistNM(sfo.KFixes[wp.Name])
yOffset := 5.5
if wp.Name == "SWELS" { yOffset = 9 }
pdf.MoveTo(distNMToX(nm)-5.5, ApproachBoxHeight+ApproachBoxOffsetY+yOffset)
pdf.Cell(30, float64(4), wp.Name)
// pdf.Cell(30, float64(4), fmt.Sprintf("EPICK (%.1fNM)", epickNM))
pdf.SetLineWidth(1.3)
pdf.MoveTo(distNMToX(nm), altitudeToY(wp.Min))
pdf.LineTo(distNMToX(nm), altitudeToY(wp.Max))
pdf.SetLineWidth(0.5)
pdf.MoveTo(distNMToX(nm), altitudeToY(-100))
pdf.LineTo(distNMToX(nm), altitudeToY(100))
}
pdf.DrawPath("D")
pdf.SetTextColor(0x00, 0x00, 0x00)
pdf.SetFont("Arial", "", 10)
}
// }}}
// {{{ DrawTrack
func trackpointToApproachXY(tp fdb.Trackpoint) (float64, float64) {
return distNMToX(tp.DistNM(sfo.KLatlongSFO)), altitudeToY(tp.IndicatedAltitude)
}
func DrawTrack(pdf *gofpdf.Fpdf, tInput fdb.Track, colorscheme ColorScheme) {
pdf.SetDrawColor(0xff, 0x00, 0x00)
pdf.SetLineWidth(0.25)
pdf.SetAlpha(0.5, "")
// We don't need trackpoints every 200ms
sampleRate := time.Second * 5
t := tInput.SampleEvery(sampleRate, false)
if len(t) == 0 { return }
for i,_ := range t[1:] {
if t[i].IndicatedAltitude < 100 && t[i+1].IndicatedAltitude < 100 { continue }
x1,y1 := trackpointToApproachXY(t[i])
x2,y2 := trackpointToApproachXY(t[i+1])
// ... or compare against x2/y2 and clip against frame ...
if x1 < ApproachBoxOffsetX { continue }
if y1 < ApproachBoxOffsetY { continue }
rgb := []int{0xFF,0x00,0x00}
switch colorscheme {
case ByGroundspeed: rgb = groundspeedToRGB(t[i].GroundSpeed)
case ByDeltaGroundspeed: rgb = groundspeedDeltaToRGB(t[i+1].GroundSpeed - t[i].GroundSpeed)
}
pdf.SetLineWidth(0.25)
if len(rgb)>3 {
pdf.SetLineWidth(float64(rgb[3]) / 100.0)
}
pdf.SetDrawColor(rgb[0], rgb[1], rgb[2])
pdf.Line(x1,y1,x2,y2)
}
pdf.DrawPath("D")
pdf.SetAlpha(1.0, "")
}
// }}}
// {{{ NewApproachPdf
func
|
NewApproachPdf
|
identifier_name
|
|
fpdf.go
|
aking: by >8 knots within 5s",
"braking: by 4-8 knots within 5s",
"braking: by 0-4 knots within 5s",
"no change",
"accelerating: by 0-4 knots within 5s",
"accelerating: by 4-8 knots within 5s",
"accelerating: by >8 knots within 5s",
}
for i,rgb := range DeltaGradientColors {
x,y := ApproachBoxOffsetX, ApproachBoxHeight-float64((i-1)*height)
pdf.SetFillColor(rgb[0], rgb[1], rgb[2])
pdf.Rect(x+2.0, y, float64(width), float64(height), "F")
pdf.MoveTo(x+float64(width)+2.0, y)
pdf.Cell(30, float64(height), labels[i])
}
}
// }}}
// {{{ DrawTitle
func DrawTitle(pdf *gofpdf.Fpdf, title string) {
pdf.MoveTo(10, ApproachBoxHeight + ApproachBoxOffsetY + 10)
pdf.Cell(40, 10, title)
}
// }}}
// {{{ DrawApproachFrame
func DrawApproachFrame(pdf *gofpdf.Fpdf) {
pdf.SetLineWidth(0.05)
pdf.SetDrawColor(0xa0, 0xa0, 0xa0)
pdf.MoveTo(ApproachBoxOffsetX, ApproachBoxOffsetY)
pdf.LineTo(ApproachBoxOffsetX+ApproachBoxWidth, ApproachBoxOffsetY)
pdf.LineTo(ApproachBoxOffsetX+ApproachBoxWidth, ApproachBoxOffsetY+ApproachBoxHeight)
pdf.LineTo(ApproachBoxOffsetX, ApproachBoxOffsetY+ApproachBoxHeight)
pdf.LineTo(ApproachBoxOffsetX, ApproachBoxOffsetY)
pdf.DrawPath("D")
// X axis tickmarks and labels
pdf.SetLineWidth(0.05)
pdf.SetFont("Arial", "", 8)
for _,nm := range []float64{10,20,30,40,50,60,70,80} {
pdf.SetDrawColor(0x00, 0x00, 0x00)
pdf.MoveTo(distNMToX(nm), ApproachBoxHeight+ApproachBoxOffsetY)
pdf.LineTo(distNMToX(nm), ApproachBoxHeight+ApproachBoxOffsetY+1.5)
pdf.SetDrawColor(0xa0, 0xa0, 0xa0)
pdf.MoveTo(distNMToX(nm), ApproachBoxHeight+ApproachBoxOffsetY)
pdf.LineTo(distNMToX(nm), ApproachBoxOffsetY)
pdf.MoveTo(distNMToX(nm)-4, ApproachBoxHeight+ApproachBoxOffsetY+2)
pdf.Cell(30, float64(4), fmt.Sprintf("%.0f NM", nm))
}
pdf.MoveTo(distNMToX(0)-4, ApproachBoxHeight+ApproachBoxOffsetY+2)
pdf.Cell(30, float64(4), "SFO")
pdf.DrawPath("D")
// Y axis gridlines and labels
pdf.SetLineWidth(0.05)
pdf.SetDrawColor(0xa0, 0xa0, 0xa0)
for _,alt := range []float64{5000, 10000, 15000, 20000} {
pdf.MoveTo(ApproachBoxOffsetX, altitudeToY(alt))
pdf.LineTo(ApproachBoxOffsetX+ApproachBoxWidth, altitudeToY(alt))
pdf.MoveTo(ApproachBoxOffsetX+ApproachBoxWidth+0.5, altitudeToY(alt)-2)
pdf.Cell(30, float64(4), fmt.Sprintf("%.0fft", alt))
}
pdf.DrawPath("D")
}
// }}}
// {{{ DrawSFOClassB
func DrawSFOClassB(pdf *gofpdf.Fpdf) {
pdf.SetDrawColor(0x00, 0x00, 0x66)
pdf.SetLineWidth(0.45)
pdf.MoveTo(ApproachBoxOffsetX+ApproachBoxWidth, altitudeToY(10000.0))
// Should really parse this all out of the constants in geo/sfo ...
pdf.LineTo(distNMToX(30.0), altitudeToY(10000.0))
pdf.LineTo(distNMToX(30.0), altitudeToY( 8000.0))
pdf.LineTo(distNMToX(25.0), altitudeToY( 8000.0))
pdf.LineTo(distNMToX(25.0), altitudeToY( 6000.0))
pdf.LineTo(distNMToX(20.0), altitudeToY( 6000.0))
pdf.LineTo(distNMToX(20.0), altitudeToY( 4000.0))
pdf.LineTo(distNMToX(15.0), altitudeToY( 4000.0))
pdf.LineTo(distNMToX(15.0), altitudeToY( 3000.0))
pdf.LineTo(distNMToX(10.0), altitudeToY( 3000.0))
pdf.LineTo(distNMToX(10.0), altitudeToY( 1500.0))
pdf.LineTo(distNMToX( 7.0), altitudeToY( 1500.0))
pdf.LineTo(distNMToX( 7.0), altitudeToY( 0.0))
pdf.DrawPath("D")
}
// }}}
// {{{ DrawWaypoints
type WaypointFurniture struct {
Name string
Min,Max float64
}
func DrawWaypoints(pdf *gofpdf.Fpdf) {
pdf.SetDrawColor(0xa0, 0xa0, 0x20)
pdf.SetTextColor(0xa0, 0xa0, 0x20)
pdf.SetFont("Arial", "B", 8)
wpFurn := []WaypointFurniture{
{"EPICK", 10000, 15000},
{"EDDYY", 5850, 6150},
{"SWELS", 4550, 4850},
{"MENLO", 3850, 4150},
// {"SKUNK", 11850, 12150},
// {"BOLDR", 9850, 10150},
}
for _,wp := range wpFurn {
nm := sfo.KLatlongSFO.DistNM(sfo.KFixes[wp.Name])
yOffset := 5.5
if wp.Name == "SWELS" { yOffset = 9 }
pdf.MoveTo(distNMToX(nm)-5.5, ApproachBoxHeight+ApproachBoxOffsetY+yOffset)
pdf.Cell(30, float64(4), wp.Name)
// pdf.Cell(30, float64(4), fmt.Sprintf("EPICK (%.1fNM)", epickNM))
pdf.SetLineWidth(1.3)
pdf.MoveTo(distNMToX(nm), altitudeToY(wp.Min))
pdf.LineTo(distNMToX(nm), altitudeToY(wp.Max))
pdf.SetLineWidth(0.5)
pdf.MoveTo(distNMToX(nm), altitudeToY(-100))
pdf.LineTo(distNMToX(nm), altitudeToY(100))
}
pdf.DrawPath("D")
pdf.SetTextColor(0x00, 0x00, 0x00)
pdf.SetFont("Arial", "", 10)
}
// }}}
// {{{ DrawTrack
func trackpointToApproachXY(tp fdb.Trackpoint) (float64, float64) {
return distNMToX(tp.DistNM(sfo.KLatlongSFO)), altitudeToY(tp.IndicatedAltitude)
}
func DrawTrack(pdf *gofpdf.Fpdf, tInput fdb.Track, colorscheme ColorScheme) {
pdf.SetDrawColor(0xff, 0x00, 0x00)
pdf.SetLineWidth(0.25)
pdf.SetAlpha(0.5, "")
// We don't need trackpoints every 200ms
sampleRate := time.Second * 5
t := tInput.SampleEvery(sampleRate, false)
if len(t) == 0 { return }
for i,_ := range t[1:] {
if t[i].IndicatedAltitude < 100 && t[i+1].IndicatedAltitude < 100
|
{ continue }
|
conditional_block
|
|
auth.go
|
byte(secretSalt),
}
}
/*******************************************************************************
* Compute a salted hash of the specified clear text password. The hash is suitable
* for storage and later use for validation of input passwords, using the
* companion function PasswordHashIsValid. Thus, the hash is required to be
* cryptographically secure. The 256-bit SHA-2 algorithm, aka "SHA-256",
* is used.
*/
func (authSvc *AuthService) CreatePasswordHash(pswd string) []byte {
var h []byte = authSvc.computeHash(pswd).Sum([]byte{})
return h
}
/*******************************************************************************
* Validate session Id: return true if valid, false otherwise. Thus, a return
* of true indicates that the sessionId is recognized as having been created
* by this server and that it is not expired and is still considered to represent
* an active session.
*/
func (authSvc *AuthService) sessionIdIsValid(sessionId string) bool {
return authSvc.validateSessionId(sessionId)
}
/*******************************************************************************
* Create a new user session. This presumes that the credentials have been verified.
*/
func (authSvc *AuthService) createSession(creds *apitypes.Credentials) *apitypes.SessionToken {
var sessionId string = authSvc.createUniqueSessionId()
var token *apitypes.SessionToken = apitypes.NewSessionToken(sessionId, creds.UserId)
// Cache the new session token, so that this Server can recognize it in future
// exchanges during this session.
authSvc.Sessions[sessionId] = creds
fmt.Println("Created session for session id " + sessionId)
return token
}
/*******************************************************************************
* Remove the specified session Id from the set of authenticated session Ids.
* This effectively logs out the owner of that session.
*/
func (authSvc *AuthService) invalidateSessionId(sessionId string) {
authSvc.Sessions[sessionId] = nil
}
/*******************************************************************************
* Clear all sessions that are cached in the auth service. The effect is that,
* after calling this method, no user is logged in.
*/
func (authSvc *AuthService) clearAllSessions() {
authSvc.Sessions = make(map[string]*apitypes.Credentials)
}
/*******************************************************************************
* Verify that a request belongs to a valid session:
* Obtain the SessionId cookie, if any, and validate it; return nil if no SessionId
* cookie is found or the SessionId is not valid.
*/
func (authSvc *AuthService) authenticateRequestCookie(httpReq *http.Request) *apitypes.SessionToken {
var sessionToken *apitypes.SessionToken = nil
fmt.Println("authenticating request...")
var sessionId = getSessionIdFromCookie(httpReq)
if sessionId != "" {
fmt.Println("obtained session id:", sessionId)
sessionToken = authSvc.identifySession(sessionId) // returns nil if invalid
}
return sessionToken
}
/*******************************************************************************
*
*/
func (authService *AuthService) addSessionIdToResponse(sessionToken *apitypes.SessionToken,
writer http.ResponseWriter) {
// Set cookie containing the session Id.
var cookie = &http.Cookie{
Name: "SessionId",
Value: sessionToken.UniqueSessionId,
//Path:
//Domain:
//Expires:
//RawExpires:
MaxAge: 86400,
Secure: false, //....change to true later.
HttpOnly: true,
//Raw:
//Unparsed:
}
http.SetCookie(writer, cookie)
}
/*******************************************************************************
* Determine if a specified action is allowed on a specified resource.
* All handlers call this function.
* The set of ACLs owned by the resource are used to make the determination.
* At most one field of the actionMask may be true.
*/
func (authService *AuthService) authorized(dbClient DBClient, sessionToken *apitypes.SessionToken,
actionMask []bool, resourceId string) (bool, error) {
/* Rules:
A party can access a resource if the party,
has an ACL entry for the resource; or,
the resource belongs to a repo or realm for which the party has an ACL entry.
In this context, a user is a party if the user is explicitly the party or if
the user belongs to a group that is explicitly the party.
Groups may not belong to other groups.
The user must have the required access mode (CreateIn, Read, Write, Exec, Delete).
No access mode implies any other access mode.
The access modes have the following meanings:
CreateIn - The party can create resources that will be owned by the target resource.
Read - The party can obtain the contents of the target resource.
Write - The party can modify the contents of the target resource.
Exec - The party can compel SafeHarbor to perform the actions specified by
the target resource (e.g., execute a Dockerfile).
Delete - The party can Delete the target resource.
*/
if sessionToken == nil { return false, utilities.ConstructServerError("No session token") }
// Identify the user.
var userId string = sessionToken.AuthenticatedUserid
fmt.Println("userid=", userId)
var user User
var err error
user, err = dbClient.dbGetUserByUserId(userId)
if user == nil {
return false, utilities.ConstructServerError("user object cannot be identified from user id " + userId)
}
// Special case: Allow user all capabilities for their own user object.
if user.getId() == resourceId { return true, nil }
// Verify that at most one field of the actionMask is true.
var nTrue = 0
for _, b := range actionMask {
if b {
if nTrue == 1 {
return false, utilities.ConstructUserError("More than one field in mask may not be true")
}
nTrue++
}
}
// Check if the user or a group that the user belongs to has the permission
// that is specified by the actionMask.
var party Party = user // start with the user.
var resource Resource
resource, err = dbClient.getResource(resourceId)
if err != nil { return false, err }
if resource == nil {
return false, utilities.ConstructUserError("Resource with Id " + resourceId + " not found")
}
var groupIds []string = user.getGroupIds()
var groupIndex = -1
for { // the user, and then each group that the user belongs to...
// See if the party (user or group) has an ACL entry for the resource.
var partyCanAccessResourceDirectoy bool
partyCanAccessResourceDirectoy, err =
authService.partyHasAccess(dbClient, party, actionMask, resource)
if err != nil { return false, err }
if partyCanAccessResourceDirectoy { return true, nil }
// See if any of the party's parent resources have access.
var parentId string = resource.getParentId()
if parentId != "" {
var parent Resource
parent, err = dbClient.getResource(parentId)
if err != nil { return false, err }
var parentHasAccess bool
parentHasAccess, err = authService.partyHasAccess(dbClient, party, actionMask, parent)
if err != nil { return false, err }
if parentHasAccess { return true, nil }
}
groupIndex++
if groupIndex == len(groupIds) { return false, nil }
var err error
party, err = dbClient.getParty(groupIds[groupIndex]) // check next group
if err != nil { return false, err }
}
return false, nil // no access rights found
}
/*******************************************************************************
* Return the SHA-256 hash of the content of the specified file. Should not be salted
* because the hash is intended to be reproducible by third parties, given the
* original file.
*/
func (authSvc *AuthService) ComputeFileDigest(filepath string) ([]byte, error) {
return utilities.ComputeFileDigest(sha256.New(), filepath)
}
/*******************************************************************************
* Compute a SHA-256 has of the specified string. Salt the hash so that the
* hash value cannot be forged or identified via a lookup table.
*/
func (authSvc *AuthService) computeHash(s string) hash.Hash {
var hash hash.Hash = sha256.New()
var bytes []byte = []byte(s)
hash.Write(authSvc.secretSalt)
hash.Write(bytes)
return hash
}
/*******************************************************************************
*
*/
func (authSvc *AuthService) compareHashValues(h1, h2 []byte) bool {
if len(h1) != len(h2) { return false }
for i, b := range h1 { if b != h2[i] { return false } }
return true
}
/***************************** Internal Functions ******************************/
/*******************************************************************************
* Return true if the party has the right implied by the actionMask, for
* the specified Resource, based on the ACLEntries that the resource has. Do not
* attempt to determine if the resource''s owning Resource has applicable ACLEntries.
* At most one elemente of the actionMask may be true.
*/
func (authSvc *AuthService) partyHasAccess(dbClient DBClient, party Party,
actionMask []bool, resource Resource) (bool, error) {
// Discover which field of the action mask is set.
var action int = -1
for i, entry := range actionMask {
if entry {
if action != -1
|
action = i
}
}
if action == -1 {
|
{ return false, utilities.ConstructUserError("More than one field set in action mask") }
|
conditional_block
|
auth.go
|
...")
var sessionId = getSessionIdFromCookie(httpReq)
if sessionId != "" {
fmt.Println("obtained session id:", sessionId)
sessionToken = authSvc.identifySession(sessionId) // returns nil if invalid
}
return sessionToken
}
/*******************************************************************************
*
*/
func (authService *AuthService) addSessionIdToResponse(sessionToken *apitypes.SessionToken,
writer http.ResponseWriter) {
// Set cookie containing the session Id.
var cookie = &http.Cookie{
Name: "SessionId",
Value: sessionToken.UniqueSessionId,
//Path:
//Domain:
//Expires:
//RawExpires:
MaxAge: 86400,
Secure: false, //....change to true later.
HttpOnly: true,
//Raw:
//Unparsed:
}
http.SetCookie(writer, cookie)
}
/*******************************************************************************
* Determine if a specified action is allowed on a specified resource.
* All handlers call this function.
* The set of ACLs owned by the resource are used to make the determination.
* At most one field of the actionMask may be true.
*/
func (authService *AuthService) authorized(dbClient DBClient, sessionToken *apitypes.SessionToken,
actionMask []bool, resourceId string) (bool, error) {
/* Rules:
A party can access a resource if the party,
has an ACL entry for the resource; or,
the resource belongs to a repo or realm for which the party has an ACL entry.
In this context, a user is a party if the user is explicitly the party or if
the user belongs to a group that is explicitly the party.
Groups may not belong to other groups.
The user must have the required access mode (CreateIn, Read, Write, Exec, Delete).
No access mode implies any other access mode.
The access modes have the following meanings:
CreateIn - The party can create resources that will be owned by the target resource.
Read - The party can obtain the contents of the target resource.
Write - The party can modify the contents of the target resource.
Exec - The party can compel SafeHarbor to perform the actions specified by
the target resource (e.g., execute a Dockerfile).
Delete - The party can Delete the target resource.
*/
if sessionToken == nil { return false, utilities.ConstructServerError("No session token") }
// Identify the user.
var userId string = sessionToken.AuthenticatedUserid
fmt.Println("userid=", userId)
var user User
var err error
user, err = dbClient.dbGetUserByUserId(userId)
if user == nil {
return false, utilities.ConstructServerError("user object cannot be identified from user id " + userId)
}
// Special case: Allow user all capabilities for their own user object.
if user.getId() == resourceId { return true, nil }
// Verify that at most one field of the actionMask is true.
var nTrue = 0
for _, b := range actionMask {
if b {
if nTrue == 1 {
return false, utilities.ConstructUserError("More than one field in mask may not be true")
}
nTrue++
}
}
// Check if the user or a group that the user belongs to has the permission
// that is specified by the actionMask.
var party Party = user // start with the user.
var resource Resource
resource, err = dbClient.getResource(resourceId)
if err != nil { return false, err }
if resource == nil {
return false, utilities.ConstructUserError("Resource with Id " + resourceId + " not found")
}
var groupIds []string = user.getGroupIds()
var groupIndex = -1
for { // the user, and then each group that the user belongs to...
// See if the party (user or group) has an ACL entry for the resource.
var partyCanAccessResourceDirectoy bool
partyCanAccessResourceDirectoy, err =
authService.partyHasAccess(dbClient, party, actionMask, resource)
if err != nil { return false, err }
if partyCanAccessResourceDirectoy { return true, nil }
// See if any of the party's parent resources have access.
var parentId string = resource.getParentId()
if parentId != "" {
var parent Resource
parent, err = dbClient.getResource(parentId)
if err != nil { return false, err }
var parentHasAccess bool
parentHasAccess, err = authService.partyHasAccess(dbClient, party, actionMask, parent)
if err != nil { return false, err }
if parentHasAccess { return true, nil }
}
groupIndex++
if groupIndex == len(groupIds) { return false, nil }
var err error
party, err = dbClient.getParty(groupIds[groupIndex]) // check next group
if err != nil { return false, err }
}
return false, nil // no access rights found
}
/*******************************************************************************
* Return the SHA-256 hash of the content of the specified file. Should not be salted
* because the hash is intended to be reproducible by third parties, given the
* original file.
*/
func (authSvc *AuthService) ComputeFileDigest(filepath string) ([]byte, error) {
return utilities.ComputeFileDigest(sha256.New(), filepath)
}
/*******************************************************************************
* Compute a SHA-256 has of the specified string. Salt the hash so that the
* hash value cannot be forged or identified via a lookup table.
*/
func (authSvc *AuthService) computeHash(s string) hash.Hash {
var hash hash.Hash = sha256.New()
var bytes []byte = []byte(s)
hash.Write(authSvc.secretSalt)
hash.Write(bytes)
return hash
}
/*******************************************************************************
*
*/
func (authSvc *AuthService) compareHashValues(h1, h2 []byte) bool {
if len(h1) != len(h2) { return false }
for i, b := range h1 { if b != h2[i] { return false } }
return true
}
/***************************** Internal Functions ******************************/
/*******************************************************************************
* Return true if the party has the right implied by the actionMask, for
* the specified Resource, based on the ACLEntries that the resource has. Do not
* attempt to determine if the resource''s owning Resource has applicable ACLEntries.
* At most one elemente of the actionMask may be true.
*/
func (authSvc *AuthService) partyHasAccess(dbClient DBClient, party Party,
actionMask []bool, resource Resource) (bool, error) {
// Discover which field of the action mask is set.
var action int = -1
for i, entry := range actionMask {
if entry {
if action != -1 { return false, utilities.ConstructUserError("More than one field set in action mask") }
action = i
}
}
if action == -1 { return false, nil } // no action mask fields were set.
var entries []string = party.getACLEntryIds()
for _, entryId := range entries { // for each of the party's ACL entries...
var entry ACLEntry
var err error
entry, err = dbClient.getACLEntry(entryId)
if err != nil { return false, err }
if entry.getResourceId() == resource.getId() { // if the entry references the resource,
var mask []bool = entry.getPermissionMask()
if mask[action] { return true, nil } // party has access to the resource
}
}
return false, nil
}
/*******************************************************************************
* Returns the "SessionId" cookie value, or "" if there is none.
* Used by authenticateRequestCookie.
*/
func getSessionIdFromCookie(httpReq *http.Request) string {
assertThat(httpReq != nil, "In getSessionIdFromCookie, httpReq is nil")
assertThat(httpReq.Header != nil, "In getSessionIdFromCookie, httpReq.Header is nil")
var cookie *http.Cookie
var err error
cookie, err = httpReq.Cookie("SessionId")
if err != nil {
fmt.Println("No SessionId cookie found.")
return ""
}
var sessionId = cookie.Value
if len(sessionId) == 0 { return "" }
return sessionId
}
/*******************************************************************************
* Validate the specified session id. If valid, return a apitypes.SessionToken with
* the identity of the session owner.
*/
func (authSvc *AuthService) identifySession(sessionId string) *apitypes.SessionToken {
var credentials *apitypes.Credentials = authSvc.Sessions[sessionId]
if credentials == nil {
fmt.Println("No session found for session id", sessionId)
return nil
}
return apitypes.NewSessionToken(sessionId, credentials.UserId)
}
/*******************************************************************************
* Validate session Id: return true if valid, false otherwise.
* See also createUniqueSessionId.
*/
func (authSvc *AuthService) validateSessionId(sessionId string) bool
|
{
var parts []string = strings.Split(sessionId, ":")
if len(parts) != 2 {
fmt.Println("Ill-formatted sessionId:", sessionId)
return false
}
var uniqueNonRandomValue string = parts[0]
var untrustedHash string = parts[1]
var empty = []byte{}
var actualSaltedHashBytes []byte = authSvc.computeHash(uniqueNonRandomValue).Sum(empty)
return untrustedHash == fmt.Sprintf("%x", actualSaltedHashBytes)
}
|
identifier_body
|
|
auth.go
|
[]byte(secretSalt),
}
}
/*******************************************************************************
* Compute a salted hash of the specified clear text password. The hash is suitable
* for storage and later use for validation of input passwords, using the
* companion function PasswordHashIsValid. Thus, the hash is required to be
* cryptographically secure. The 256-bit SHA-2 algorithm, aka "SHA-256",
* is used.
*/
func (authSvc *AuthService) CreatePasswordHash(pswd string) []byte {
var h []byte = authSvc.computeHash(pswd).Sum([]byte{})
return h
}
/*******************************************************************************
* Validate session Id: return true if valid, false otherwise. Thus, a return
* of true indicates that the sessionId is recognized as having been created
* by this server and that it is not expired and is still considered to represent
* an active session.
*/
func (authSvc *AuthService) sessionIdIsValid(sessionId string) bool {
return authSvc.validateSessionId(sessionId)
}
/*******************************************************************************
* Create a new user session. This presumes that the credentials have been verified.
*/
func (authSvc *AuthService) createSession(creds *apitypes.Credentials) *apitypes.SessionToken {
var sessionId string = authSvc.createUniqueSessionId()
var token *apitypes.SessionToken = apitypes.NewSessionToken(sessionId, creds.UserId)
// Cache the new session token, so that this Server can recognize it in future
// exchanges during this session.
authSvc.Sessions[sessionId] = creds
fmt.Println("Created session for session id " + sessionId)
return token
}
/*******************************************************************************
* Remove the specified session Id from the set of authenticated session Ids.
* This effectively logs out the owner of that session.
*/
func (authSvc *AuthService) invalidateSessionId(sessionId string) {
authSvc.Sessions[sessionId] = nil
}
/*******************************************************************************
* Clear all sessions that are cached in the auth service. The effect is that,
* after calling this method, no user is logged in.
*/
func (authSvc *AuthService) clearAllSessions() {
authSvc.Sessions = make(map[string]*apitypes.Credentials)
}
/*******************************************************************************
* Verify that a request belongs to a valid session:
* Obtain the SessionId cookie, if any, and validate it; return nil if no SessionId
* cookie is found or the SessionId is not valid.
*/
func (authSvc *AuthService) authenticateRequestCookie(httpReq *http.Request) *apitypes.SessionToken {
var sessionToken *apitypes.SessionToken = nil
fmt.Println("authenticating request...")
var sessionId = getSessionIdFromCookie(httpReq)
if sessionId != "" {
fmt.Println("obtained session id:", sessionId)
sessionToken = authSvc.identifySession(sessionId) // returns nil if invalid
}
return sessionToken
}
/*******************************************************************************
*
*/
func (authService *AuthService) addSessionIdToResponse(sessionToken *apitypes.SessionToken,
writer http.ResponseWriter) {
// Set cookie containing the session Id.
var cookie = &http.Cookie{
Name: "SessionId",
Value: sessionToken.UniqueSessionId,
//Path:
//Domain:
//Expires:
//RawExpires:
MaxAge: 86400,
Secure: false, //....change to true later.
HttpOnly: true,
//Raw:
//Unparsed:
}
http.SetCookie(writer, cookie)
}
/*******************************************************************************
* Determine if a specified action is allowed on a specified resource.
* All handlers call this function.
* The set of ACLs owned by the resource are used to make the determination.
* At most one field of the actionMask may be true.
*/
func (authService *AuthService) authorized(dbClient DBClient, sessionToken *apitypes.SessionToken,
actionMask []bool, resourceId string) (bool, error) {
/* Rules:
A party can access a resource if the party,
has an ACL entry for the resource; or,
the resource belongs to a repo or realm for which the party has an ACL entry.
In this context, a user is a party if the user is explicitly the party or if
the user belongs to a group that is explicitly the party.
Groups may not belong to other groups.
The user must have the required access mode (CreateIn, Read, Write, Exec, Delete).
No access mode implies any other access mode.
The access modes have the following meanings:
CreateIn - The party can create resources that will be owned by the target resource.
Read - The party can obtain the contents of the target resource.
Write - The party can modify the contents of the target resource.
Exec - The party can compel SafeHarbor to perform the actions specified by
the target resource (e.g., execute a Dockerfile).
Delete - The party can Delete the target resource.
*/
if sessionToken == nil { return false, utilities.ConstructServerError("No session token") }
// Identify the user.
var userId string = sessionToken.AuthenticatedUserid
fmt.Println("userid=", userId)
var user User
var err error
user, err = dbClient.dbGetUserByUserId(userId)
if user == nil {
return false, utilities.ConstructServerError("user object cannot be identified from user id " + userId)
|
if user.getId() == resourceId { return true, nil }
// Verify that at most one field of the actionMask is true.
var nTrue = 0
for _, b := range actionMask {
if b {
if nTrue == 1 {
return false, utilities.ConstructUserError("More than one field in mask may not be true")
}
nTrue++
}
}
// Check if the user or a group that the user belongs to has the permission
// that is specified by the actionMask.
var party Party = user // start with the user.
var resource Resource
resource, err = dbClient.getResource(resourceId)
if err != nil { return false, err }
if resource == nil {
return false, utilities.ConstructUserError("Resource with Id " + resourceId + " not found")
}
var groupIds []string = user.getGroupIds()
var groupIndex = -1
for { // the user, and then each group that the user belongs to...
// See if the party (user or group) has an ACL entry for the resource.
var partyCanAccessResourceDirectoy bool
partyCanAccessResourceDirectoy, err =
authService.partyHasAccess(dbClient, party, actionMask, resource)
if err != nil { return false, err }
if partyCanAccessResourceDirectoy { return true, nil }
// See if any of the party's parent resources have access.
var parentId string = resource.getParentId()
if parentId != "" {
var parent Resource
parent, err = dbClient.getResource(parentId)
if err != nil { return false, err }
var parentHasAccess bool
parentHasAccess, err = authService.partyHasAccess(dbClient, party, actionMask, parent)
if err != nil { return false, err }
if parentHasAccess { return true, nil }
}
groupIndex++
if groupIndex == len(groupIds) { return false, nil }
var err error
party, err = dbClient.getParty(groupIds[groupIndex]) // check next group
if err != nil { return false, err }
}
return false, nil // no access rights found
}
/*******************************************************************************
* Return the SHA-256 hash of the content of the specified file. Should not be salted
* because the hash is intended to be reproducible by third parties, given the
* original file.
*/
func (authSvc *AuthService) ComputeFileDigest(filepath string) ([]byte, error) {
return utilities.ComputeFileDigest(sha256.New(), filepath)
}
/*******************************************************************************
* Compute a SHA-256 has of the specified string. Salt the hash so that the
* hash value cannot be forged or identified via a lookup table.
*/
func (authSvc *AuthService) computeHash(s string) hash.Hash {
var hash hash.Hash = sha256.New()
var bytes []byte = []byte(s)
hash.Write(authSvc.secretSalt)
hash.Write(bytes)
return hash
}
/*******************************************************************************
*
*/
func (authSvc *AuthService) compareHashValues(h1, h2 []byte) bool {
if len(h1) != len(h2) { return false }
for i, b := range h1 { if b != h2[i] { return false } }
return true
}
/***************************** Internal Functions ******************************/
/*******************************************************************************
* Return true if the party has the right implied by the actionMask, for
* the specified Resource, based on the ACLEntries that the resource has. Do not
* attempt to determine if the resource''s owning Resource has applicable ACLEntries.
* At most one elemente of the actionMask may be true.
*/
func (authSvc *AuthService) partyHasAccess(dbClient DBClient, party Party,
actionMask []bool, resource Resource) (bool, error) {
// Discover which field of the action mask is set.
var action int = -1
for i, entry := range actionMask {
if entry {
if action != -1 { return false, utilities.ConstructUserError("More than one field set in action mask") }
action = i
}
}
if action == -1 { return
|
}
// Special case: Allow user all capabilities for their own user object.
|
random_line_split
|
auth.go
|
byte(secretSalt),
}
}
/*******************************************************************************
* Compute a salted hash of the specified clear text password. The hash is suitable
* for storage and later use for validation of input passwords, using the
* companion function PasswordHashIsValid. Thus, the hash is required to be
* cryptographically secure. The 256-bit SHA-2 algorithm, aka "SHA-256",
* is used.
*/
func (authSvc *AuthService)
|
(pswd string) []byte {
var h []byte = authSvc.computeHash(pswd).Sum([]byte{})
return h
}
/*******************************************************************************
* Validate session Id: return true if valid, false otherwise. Thus, a return
* of true indicates that the sessionId is recognized as having been created
* by this server and that it is not expired and is still considered to represent
* an active session.
*/
func (authSvc *AuthService) sessionIdIsValid(sessionId string) bool {
return authSvc.validateSessionId(sessionId)
}
/*******************************************************************************
* Create a new user session. This presumes that the credentials have been verified.
*/
func (authSvc *AuthService) createSession(creds *apitypes.Credentials) *apitypes.SessionToken {
var sessionId string = authSvc.createUniqueSessionId()
var token *apitypes.SessionToken = apitypes.NewSessionToken(sessionId, creds.UserId)
// Cache the new session token, so that this Server can recognize it in future
// exchanges during this session.
authSvc.Sessions[sessionId] = creds
fmt.Println("Created session for session id " + sessionId)
return token
}
/*******************************************************************************
* Remove the specified session Id from the set of authenticated session Ids.
* This effectively logs out the owner of that session.
*/
func (authSvc *AuthService) invalidateSessionId(sessionId string) {
authSvc.Sessions[sessionId] = nil
}
/*******************************************************************************
* Clear all sessions that are cached in the auth service. The effect is that,
* after calling this method, no user is logged in.
*/
func (authSvc *AuthService) clearAllSessions() {
authSvc.Sessions = make(map[string]*apitypes.Credentials)
}
/*******************************************************************************
* Verify that a request belongs to a valid session:
* Obtain the SessionId cookie, if any, and validate it; return nil if no SessionId
* cookie is found or the SessionId is not valid.
*/
func (authSvc *AuthService) authenticateRequestCookie(httpReq *http.Request) *apitypes.SessionToken {
var sessionToken *apitypes.SessionToken = nil
fmt.Println("authenticating request...")
var sessionId = getSessionIdFromCookie(httpReq)
if sessionId != "" {
fmt.Println("obtained session id:", sessionId)
sessionToken = authSvc.identifySession(sessionId) // returns nil if invalid
}
return sessionToken
}
/*******************************************************************************
*
*/
func (authService *AuthService) addSessionIdToResponse(sessionToken *apitypes.SessionToken,
writer http.ResponseWriter) {
// Set cookie containing the session Id.
var cookie = &http.Cookie{
Name: "SessionId",
Value: sessionToken.UniqueSessionId,
//Path:
//Domain:
//Expires:
//RawExpires:
MaxAge: 86400,
Secure: false, //....change to true later.
HttpOnly: true,
//Raw:
//Unparsed:
}
http.SetCookie(writer, cookie)
}
/*******************************************************************************
* Determine if a specified action is allowed on a specified resource.
* All handlers call this function.
* The set of ACLs owned by the resource are used to make the determination.
* At most one field of the actionMask may be true.
*/
func (authService *AuthService) authorized(dbClient DBClient, sessionToken *apitypes.SessionToken,
actionMask []bool, resourceId string) (bool, error) {
/* Rules:
A party can access a resource if the party,
has an ACL entry for the resource; or,
the resource belongs to a repo or realm for which the party has an ACL entry.
In this context, a user is a party if the user is explicitly the party or if
the user belongs to a group that is explicitly the party.
Groups may not belong to other groups.
The user must have the required access mode (CreateIn, Read, Write, Exec, Delete).
No access mode implies any other access mode.
The access modes have the following meanings:
CreateIn - The party can create resources that will be owned by the target resource.
Read - The party can obtain the contents of the target resource.
Write - The party can modify the contents of the target resource.
Exec - The party can compel SafeHarbor to perform the actions specified by
the target resource (e.g., execute a Dockerfile).
Delete - The party can Delete the target resource.
*/
if sessionToken == nil { return false, utilities.ConstructServerError("No session token") }
// Identify the user.
var userId string = sessionToken.AuthenticatedUserid
fmt.Println("userid=", userId)
var user User
var err error
user, err = dbClient.dbGetUserByUserId(userId)
if user == nil {
return false, utilities.ConstructServerError("user object cannot be identified from user id " + userId)
}
// Special case: Allow user all capabilities for their own user object.
if user.getId() == resourceId { return true, nil }
// Verify that at most one field of the actionMask is true.
var nTrue = 0
for _, b := range actionMask {
if b {
if nTrue == 1 {
return false, utilities.ConstructUserError("More than one field in mask may not be true")
}
nTrue++
}
}
// Check if the user or a group that the user belongs to has the permission
// that is specified by the actionMask.
var party Party = user // start with the user.
var resource Resource
resource, err = dbClient.getResource(resourceId)
if err != nil { return false, err }
if resource == nil {
return false, utilities.ConstructUserError("Resource with Id " + resourceId + " not found")
}
var groupIds []string = user.getGroupIds()
var groupIndex = -1
for { // the user, and then each group that the user belongs to...
// See if the party (user or group) has an ACL entry for the resource.
var partyCanAccessResourceDirectoy bool
partyCanAccessResourceDirectoy, err =
authService.partyHasAccess(dbClient, party, actionMask, resource)
if err != nil { return false, err }
if partyCanAccessResourceDirectoy { return true, nil }
// See if any of the party's parent resources have access.
var parentId string = resource.getParentId()
if parentId != "" {
var parent Resource
parent, err = dbClient.getResource(parentId)
if err != nil { return false, err }
var parentHasAccess bool
parentHasAccess, err = authService.partyHasAccess(dbClient, party, actionMask, parent)
if err != nil { return false, err }
if parentHasAccess { return true, nil }
}
groupIndex++
if groupIndex == len(groupIds) { return false, nil }
var err error
party, err = dbClient.getParty(groupIds[groupIndex]) // check next group
if err != nil { return false, err }
}
return false, nil // no access rights found
}
/*******************************************************************************
* Return the SHA-256 hash of the content of the specified file. Should not be salted
* because the hash is intended to be reproducible by third parties, given the
* original file.
*/
func (authSvc *AuthService) ComputeFileDigest(filepath string) ([]byte, error) {
return utilities.ComputeFileDigest(sha256.New(), filepath)
}
/*******************************************************************************
* Compute a SHA-256 has of the specified string. Salt the hash so that the
* hash value cannot be forged or identified via a lookup table.
*/
func (authSvc *AuthService) computeHash(s string) hash.Hash {
var hash hash.Hash = sha256.New()
var bytes []byte = []byte(s)
hash.Write(authSvc.secretSalt)
hash.Write(bytes)
return hash
}
/*******************************************************************************
*
*/
func (authSvc *AuthService) compareHashValues(h1, h2 []byte) bool {
if len(h1) != len(h2) { return false }
for i, b := range h1 { if b != h2[i] { return false } }
return true
}
/***************************** Internal Functions ******************************/
/*******************************************************************************
* Return true if the party has the right implied by the actionMask, for
* the specified Resource, based on the ACLEntries that the resource has. Do not
* attempt to determine if the resource''s owning Resource has applicable ACLEntries.
* At most one elemente of the actionMask may be true.
*/
func (authSvc *AuthService) partyHasAccess(dbClient DBClient, party Party,
actionMask []bool, resource Resource) (bool, error) {
// Discover which field of the action mask is set.
var action int = -1
for i, entry := range actionMask {
if entry {
if action != -1 { return false, utilities.ConstructUserError("More than one field set in action mask") }
action = i
}
}
if action == -1 { return
|
CreatePasswordHash
|
identifier_name
|
_typecheck.py
|
"AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Minimal runtime type checking library.
This module should not be considered public API.
"""
# TODO(ericmc,shoyer): Delete this in favor of using pytype or mypy
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import functools
import re
from tensorflow.python.util import tf_inspect
# used for register_type_abbreviation and _type_repr below.
_TYPE_ABBREVIATIONS = {}
class Type(object):
"""Base class for type checker types.
The custom types defined in this module are based on types in the standard
library's typing module (in Python 3.5):
https://docs.python.org/3/library/typing.html
The only difference should be that we use actual instances of Type classes to
represent custom types rather than the metaclass magic typing uses to create
new class objects. In practice, all this should mean is that we use
`List(int)` rather than `List[int]`.
Custom types should implement __instancecheck__ and inherit from Type. Every
argument in the constructor must be a type or Type instance, and these
arguments must be stored as a tuple on the `_types` attribute.
"""
def __init__(self, *types):
self._types = types
def __repr__(self):
args_repr = ", ".join(repr(t) for t in self._types)
return "typecheck.%s(%s)" % (type(self).__name__, args_repr)
class _SingleArgumentType(Type):
"""Use this subclass for parametric types that accept only one argument."""
def __init__(self, tpe):
super(_SingleArgumentType, self).__init__(tpe)
@property
def _type(self):
tpe, = self._types # pylint: disable=unbalanced-tuple-unpacking
return tpe
class _TwoArgumentType(Type):
"""Use this subclass for parametric types that accept two arguments."""
def __init__(self, first_type, second_type):
super(_TwoArgumentType, self).__init__(first_type, second_type)
class Union(Type):
"""A sum type.
A correct type is any of the types provided.
"""
def __instancecheck__(self, instance):
return isinstance(instance, self._types)
class Optional(_SingleArgumentType):
"""An optional type.
A correct type is either the provided type or NoneType.
"""
def __instancecheck__(self, instance):
# types.NoneType does not exist in Python 3
return isinstance(instance, (self._type, type(None)))
class List(_SingleArgumentType):
"""A typed list.
A correct type is a list where each element has the single provided type.
"""
def __instancecheck__(self, instance):
return (isinstance(instance, list)
and all(isinstance(x, self._type) for x in instance))
class Sequence(_SingleArgumentType):
"""A typed sequence.
A correct type is a sequence where each element has the single provided type.
"""
def __instancecheck__(self, instance):
return (isinstance(instance, collections.Sequence)
and all(isinstance(x, self._type) for x in instance))
class Collection(_SingleArgumentType):
"""A sized, iterable container.
A correct type is an iterable and container with known size where each element
has the single provided type.
We use this in preference to Iterable because we check each instance of the
iterable at runtime, and hence need to avoid iterables that could be
exhausted.
"""
def __instancecheck__(self, instance):
return (isinstance(instance, collections.Iterable)
and isinstance(instance, collections.Sized)
and isinstance(instance, collections.Container)
and all(isinstance(x, self._type) for x in instance))
class Tuple(Type):
"""A typed tuple.
A correct type is a tuple with the correct length where each element has
the correct type.
"""
def __instancecheck__(self, instance):
return (isinstance(instance, tuple)
and len(instance) == len(self._types)
and all(isinstance(x, t) for x, t in zip(instance, self._types)))
class Mapping(_TwoArgumentType):
"""A typed mapping.
A correct type has the correct parametric types for keys and values.
"""
def __instancecheck__(self, instance):
key_type, value_type = self._types # pylint: disable=unbalanced-tuple-unpacking
return (isinstance(instance, collections.Mapping)
and all(isinstance(k, key_type) for k in instance.keys())
and all(isinstance(k, value_type) for k in instance.values()))
class Dict(Mapping):
|
def _replace_forward_references(t, context):
"""Replace forward references in the given type."""
if isinstance(t, str):
return context[t]
elif isinstance(t, Type):
return type(t)(*[_replace_forward_references(t, context) for t in t._types]) # pylint: disable=protected-access
else:
return t
def register_type_abbreviation(name, alias):
"""Register an abbreviation for a type in typecheck tracebacks.
This makes otherwise very long typecheck errors much more readable.
Example:
typecheck.register_type_abbreviation(tf.Dimension, 'tf.Dimension')
Args:
name: type or class to abbreviate.
alias: string alias to substitute.
"""
_TYPE_ABBREVIATIONS[name] = alias
def _type_repr(t):
"""A more succinct repr for typecheck tracebacks."""
string = repr(t)
for type_, alias in _TYPE_ABBREVIATIONS.items():
string = string.replace(repr(type_), alias)
string = re.sub(r"<(class|type) '([\w.]+)'>", r"\2", string)
string = re.sub(r"typecheck\.(\w+)", r"\1", string)
return string
class Error(TypeError):
"""Exception for typecheck failures."""
def accepts(*types):
"""A decorator which checks the input types of a function.
Based on:
http://stackoverflow.com/questions/15299878/how-to-use-python-decorators-to-check-function-arguments
The above draws from:
https://www.python.org/dev/peps/pep-0318/
Args:
*types: A list of Python types.
Returns:
A function to use as a decorator.
"""
def check_accepts(f):
"""Check the types."""
spec = tf_inspect.getargspec(f)
num_function_arguments = len(spec.args)
if len(types) != num_function_arguments:
raise Error(
"Function %r has %d arguments but only %d types were provided in the "
"annotation." % (f, num_function_arguments, len(types)))
if spec.defaults:
num_defaults = len(spec.defaults)
for (name, a, t) in zip(spec.args[-num_defaults:],
spec.defaults,
types[-num_defaults:]):
allowed_type = _replace_forward_references(t, f.__globals__)
if not isinstance(a, allowed_type):
raise Error("default argument value %r of type %r is not an instance "
"of the allowed type %s for the %s argument to %r"
% (a, type(a), _type_repr(allowed_type), name, f))
@functools.wraps(f)
def new_f(*args, **kwds):
"""A helper function."""
for (a, t) in zip(args, types):
allowed_type = _replace_forward_references(t, f.__globals__)
if not isinstance(a, allowed_type):
raise Error("%r of type %r is not an instance of the allowed type %s "
"for %r" % (a, type(a), _type_repr(allowed_type), f))
return f(*args, **kwds)
return new_f
return check_accepts
def returns(*types):
"""A decorator which checks the return types of a function.
Based on:
http://stackoverflow.com/questions/15299878/how-to-use-python-decorators-to-check-function-arguments
The above draws from:
https://www.python.org/dev/peps/pep-0318/
Args:
*types: A list of Python types.
A list of one element corresponds to a single return value.
A list of several elements corresponds to several return values.
Note that a function with no explicit return value has an implicit
NoneType return and should be annotated correspondingly.
Returns:
A function to use as a decorator.
"""
def check_returns(f):
"""Check the types."""
if not types:
raise TypeError("A return type annotation must contain at least one type")
@functools.wraps(f)
def new_f(*args, **kwds):
"""A helper function."""
return_value = f(*args, **kwds)
if len(types) == 1:
|
"""A typed dict.
A correct type has the correct parametric types for keys and values.
"""
def __instancecheck__(self, instance):
return (isinstance(instance, dict)
and super(Dict, self).__instancecheck__(instance))
|
identifier_body
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.