repo_name
stringlengths 5
100
| path
stringlengths 4
231
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 6
947k
| score
float64 0
0.34
| prefix
stringlengths 0
8.16k
| middle
stringlengths 3
512
| suffix
stringlengths 0
8.17k
|
---|---|---|---|---|---|---|---|---|
Samuel789/MediPi
|
MedManagementWeb/env/lib/python3.5/site-packages/ruamel/yaml/scalarfloat.py
|
Python
|
apache-2.0
| 3,378 | 0.00148 |
# coding: utf-8
from __future__ import print_function, absolute_import, division, unicode_literals
import sys
from .compat import no_limit_int # NOQA
if False: # MYPY
from typing import Text, Any, Dict, List # NOQA
__all__ = ["ScalarFloat", "ExponentialFloat", "ExponentialCapsFloat"]
class ScalarFloat(float):
def __new__(cls, *args, **kw):
# type: (Any, Any, Any) -> Any
width = kw.pop('width', None) # type: ignore
prec = kw.pop('prec', None) # type: ignore
m_sign = kw.pop('m_sign', None) # type: ignore
m_lead0 = kw.pop('m_lead0', 0) # type: ignore
exp = kw.pop('exp', None) # type: ignore
e_width = kw.pop('e_width', None) # type: ignore
e_sign = kw.pop('e_sign', None) # type: ignore
underscore = kw.pop('underscore', None) # type: ignore
v = float.__new__(cls, *args, **kw) # type: ignore
v._width = width
v._prec = prec
v._m_sign = m_sign
v._m_lead0 = m_lead0
v._exp = exp
v._e_width = e_width
v._e_sign = e_sign
v._underscore = underscore
return v
def __iadd__(self, a): # type: ignore
# type: (Any) -> Any
x = type(self)(self + a)
x._width = self._width # type: ignore
x._underscore = self._underscore[:] if self._underscore is not None else None # type: ignore # NOQA
return x
def __ifloordiv__(self, a): # type: ignore
# type: (Any) -> Any
x = type(self)(self // a)
x._width = self._width # type: ignore
x._underscore = self._underscore[:] if self._underscore is not None else None # type: ignore # NOQA
return x
def __imul__(self, a): # type: ignore
# type: (Any) -> Any
x = type(self)(self * a)
x._width = self._width # type: ignore
x._underscore = self._underscore[:] if self._underscore is not None else None # type: ignore # NOQA
return x
def __ipow__(self, a): # type: ignore
# type: (Any) -> Any
x = type(self)(self ** a)
x._width = self._width # type: ignore
x._underscore = self._underscore[:] if self._underscore is not None else None # type: ignore # NOQA
return x
def __isub__(self, a): # type: ignore
# type: (Any) -> Any
x = type(self)(self - a)
x._width = self._width # type: ignore
x._underscore = self._underscore[:] if self._underscore is not None else None # type: ignore # NOQA
return x
def dump(self, out=sys.stdout):
# type: (Any) -> Any
print('ScalarFloat({}| w:{}, p:{}, s:{}, lz:{}|{}, w:{}, s:{})'.format(
self, self._width, self._prec, self._m_sign, self._m_lead0, # type: ignore
self._exp, self._e_widt
|
h, self._e_sign), file=out) # type: ignore
class ExponentialFloat(ScalarFloat):
def __new__(cls, value, width=None, underscore
|
=None):
# type: (Any, Any, Any) -> Any
return ScalarFloat.__new__(cls, value, width=width, underscore=underscore)
class ExponentialCapsFloat(ScalarFloat):
def __new__(cls, value, width=None, underscore=None):
# type: (Any, Any, Any) -> Any
return ScalarFloat.__new__(cls, value, width=width, underscore=underscore)
|
wandb/client
|
wandb/vendor/pygments/lexers/rdf.py
|
Python
|
mit
| 9,398 | 0.00117 |
# -*- coding: utf-8 -*-
"""
pygments.lexers.rdf
~~~~~~~~~~~~~~~~~~~
Lexers for semantic web and RDF query languages and markup.
:copyright: Copyright 2006-2017 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
from pygments.lexer import RegexLexer, bygroups, default
from pygments.token import Keyword, Punctuation, String, Number, Operator, Generic, \
Whitespace, Name, Literal, Comment, Text
__all__ = ['SparqlLexer', 'TurtleLexer']
class SparqlLexer(RegexLexer):
"""
Lexer for `SPARQL <http://www.w3.org/TR/rdf-sparql-query/>`_ query language.
.. versionadded:: 2.0
"""
name = 'SPARQL'
aliases = ['sparql']
filenames = ['*.rq', '*.sparql']
mimetypes = ['application/sparql-query']
# character group definitions ::
PN_CHARS_BASE_GRP = (u'a-zA-Z'
u'\u00c0-\u00d6'
u'\u00d8-\u00f6'
u'\u00f8-\u02ff'
u'\u0370-\u037d'
u'\u037f-\u1fff'
u'\u200c-\u200d'
u'\u2070-\u218f'
u'\u2c00-\u2fef'
u'\u3001-\ud7ff'
u'\uf900-\ufdcf'
u'\ufdf0-\ufffd')
PN_CHARS_U_GRP = (PN_CHARS_BASE_GRP + '_')
PN_CHARS_GRP = (PN_CHARS_U_GRP +
r'\-' +
r'0-9' +
u'\u00b7' +
u'\u0300-\u036f' +
u'\u203f-\u2040')
HEX_GRP = '0-9A-Fa-f'
PN_LOCAL_ESC_CHARS_GRP = r' _~.\-!$&"()*+,;=/?#@%'
# terminal productions ::
PN_CHARS_BASE = '[' + PN_CHARS_BASE_GRP + ']'
PN_CHARS_U = '[' + PN_CHARS_U_GRP + ']'
PN_CHARS = '[' + PN_CHARS_GRP + ']'
HEX = '[' + HEX_GRP + ']'
PN_LOCAL_ESC_CHARS = '[' + PN_LOCAL_ESC_CHARS_GRP + ']'
IRIREF = r'<(?:[^<>"{}|^`\\\x00-\x20])*>'
BLANK_NODE_LABEL = '_:[0-9' + PN_CHARS_U_GRP + '](?:[' + PN_CHARS_GRP + \
'.]*' + PN_CHARS + ')?'
PN_PREFIX = PN_CHARS_BASE + '(?:[' + PN_CHARS_GRP + '.]*' + PN_CHARS + ')?'
VARNAME = u'[0-9' + PN_CHARS_U_GRP + '][' + PN_CHARS_U_GRP + \
u'0-9\u00b7\u0300-\u036f\u203f-\u2040]*'
PERCENT = '%' + HEX + HEX
PN_LOCAL_ESC = r'\\' + PN_LOCAL_ESC_CHARS
PLX = '(?:' + PERCENT + ')|(?:' + PN_LOCAL_ESC + ')'
PN_LOCAL = ('(?:[' + PN_CHARS_U_GRP + ':0-9' + ']|' + PLX + ')' +
'(?:(?:[' + PN_CHARS_GRP + '.:]|' + PLX + ')*(?:[' +
PN_CHARS_GRP + ':]|' + PLX + '))?')
EXPONENT = r'[eE][+-]?\d+'
# Lexer token definitions ::
tokens = {
'root': [
(r'\s+', Text),
# keywords ::
(r'((?i)select|construct|describe|ask|where|filter|group\s+by|minus|'
r'distinct|reduced|from\s+named|from|order\s+by|desc|asc|limit|'
r'offset|bindings|load|clear|drop|create|add|move|copy|'
r'insert\s+data|delete\s+data|delete\s+where|delete|insert|'
r'using\s+named|using|graph|default|named|all|optional|service|'
r'silent|bind|union|not\s+in|in|as|having|to|prefix|base)\b', Keyword),
(r'(a)\b', Keyword),
# IRIs ::
('(' + IRIREF + ')', Name.Label),
# blank nodes ::
('(' + BLANK_NODE_LABEL + ')', Name.Label),
# # variables ::
('[?$]' + VARNAME, Name.Variable),
# prefixed names ::
(r'(' + PN_PREFIX + ')?(\:)(' + PN_LOCAL + ')?',
bygroups(Name.Namespace, Punctuation, Name.Tag)),
# function names ::
(r'((?i)str|lang|langmatches|datatype|bound|iri|uri|bnode|rand|abs|'
r'ceil|floor|round|concat|strlen|ucase|lcase|encode_for_uri|'
r'contains|strstarts|strends|strbefore|strafter|year|month|day|'
r'hours|minutes|seconds|timezone|tz|now|md5|sha1|sha256|sha384|'
r'sha512|coalesce|if|strlang|strdt|sameterm|isiri|isuri|isblank|'
r'isliteral|isnumeric|regex|substr|replace|exists|not\s+exists|'
r'count|sum|min|max|avg|sample|group_concat|separator)\b',
Name.Function),
# boolean literals ::
(r'(true|false)', Keyword.Constant),
# double literals ::
(r'[+\-]?(\d+\.\d*' + EXPONENT + '|\.?\d+' + EXPONENT + ')', Number.Float),
# decimal literals ::
(r'[+\-]?(\d+\.\d*|\.\d+)', Number.Float),
# integer literals ::
(r'[+\-]?\d+', Number.Integer),
# operators ::
(r'(\|\||&&|=|\*|\-|\+|/|!=|<=|>=|!|<|>)', Operator),
# punctuation characters ::
(r'[(){}.;,:^\[\]]', Punctuation),
# line comments ::
(r'#[^\n]*', Comment),
# strings ::
(r'"""', String, 'triple-double-quoted-string'),
(r'"', String, 'single-double-quoted-string'),
(r"'''", String, 'triple-single-quoted-string'),
(r"'", String, 'single-single-quoted-string'),
],
'triple-double-quoted-string': [
(r'"""', String, 'end-of-string'),
(r'[^\\]+', String),
(r'\\', String, 'string-escape'),
],
'single-double-quoted-string': [
(r'"', String, 'end-of-string'),
(r'[^"\\\n]+', String),
(r'\\', String, 'string-escape'),
],
'triple-single-quoted-string': [
(r"'''", String, 'end-of-string'),
(r'[^\\]+', String),
(r'\\', String.Escape, 'string-escape'),
],
'single-single-quoted-string': [
(r"'", String, 'end-of-string'),
(r"[^'\\\n]+", String),
(r'\\', String, 'string-escape'),
],
'string-escape': [
(r'u' + HEX + '{4}', String.Escape, '#pop'),
(r'U' + HEX + '{8}', String.Escape, '#pop'),
(r'.', String.Escape, '#pop'),
],
'end-of-string': [
(r'(@)([a-zA-Z]+(?:-[a-zA-Z0-9]+)*)',
bygroups(Operator, Name.Function), '#pop:2'),
(r'\^\^', Operator, '#pop:2'),
default('#pop:2'),
],
}
class TurtleLexer(RegexLexer):
"""
Lexer for `Turtle <http://www.w3.org/TR/turtle/>`_ data language.
.. versionadded:: 2.1
"""
name = 'Turtle'
aliases = ['turtle']
filenames = ['*.ttl']
mimetypes = ['text/turtle', 'application/x-turtle']
flags = re.IGNORECASE
patterns = {
'PNAME_NS': r'((?:[a-z][\w-]*)?\:)', # Simplified character range
'IRIREF': r'(<[^<>"{}|^`\\\x00-\x20]*>)'
}
|
# PNAME_NS PN_LOCAL (with
|
simplified character range)
patterns['PrefixedName'] = r'%(PNAME_NS)s([a-z][\w-]*)' % patterns
tokens = {
'root': [
(r'\s+', Whitespace),
# Base / prefix
(r'(@base|BASE)(\s+)%(IRIREF)s(\s*)(\.?)' % patterns,
bygroups(Keyword, Whitespace, Name.Variable, Whitespace,
Punctuation)),
(r'(@prefix|PREFIX)(\s+)%(PNAME_NS)s(\s+)%(IRIREF)s(\s*)(\.?)' % patterns,
bygroups(Keyword, Whitespace, Name.Namespace, Whitespace,
Name.Variable, Whitespace, Punctuation)),
# The shorthand predicate 'a'
(r'(?<=\s)a(?=\s)', Keyword.Type),
# IRIREF
(r'%(IRIREF)s' % patterns, Name.Variable),
# PrefixedName
(r'%(PrefixedName)s' % patterns,
bygroups(Name.Namespace, Name.Tag)),
# Comment
(r'#[^\n]+', Comment),
(r'\b(true|false)\b', Literal),
(r'[+\-]?\d*\.\d+', Number.Float),
(r'[+\-]?\d*(:?\.\d+)?E[+\-]?\d+', Number.Float),
(r'[+\-]?\d+', Number.Integer),
(r'[\[\](){}.;,:^]', Punctuation),
(r'"""', String, 'triple-double-quoted-string'),
(r'"', String, 'single-double-quoted-string'),
(r"'''", String, 'triple-single-quoted-string'),
(r"'", String, 'singl
|
tudennis/LeetCode---kamyu104-11-24-2015
|
Python/remove-duplicates-from-sorted-list-ii.py
|
Python
|
mit
| 1,452 | 0.004132 |
from __future__ import print_function
# Time: O(n)
# Space: O(1)
#
# Given a sorted linked list, delete all nodes that have duplicate numbers,
# leaving only distinct numbers from the original list.
#
# For example,
# Given 1->2->3->3->4->4->5, return 1->2->5.
# Given 1->1->1->2->3, return 2->3.
#
# Definition for singly-linked list.
class ListNode:
def __init__(self, x):
self.val = x
self.next = None
def __repr__(self):
if self is None:
return "Nil"
else:
return "{} -> {}".format(self.val, repr(self.next))
class Solution(object):
def deleteDuplicates(self, head):
"""
:type head: ListNode
:rtype: ListNode
"""
|
dummy = ListNode(0)
pre, cur = dummy, head
while cur:
if cur.next and cur.next.val == cur.val:
val = cur.val;
while cur and cur.val == val:
cur = cur.next
pre.next = cur
else:
pre.next = cur
pre = cur
cur = cur.next
return dummy.next
if __name__ =
|
= "__main__":
head, head.next, head.next.next = ListNode(1), ListNode(2), ListNode(3)
head.next.next.next, head.next.next.next.next = ListNode(3), ListNode(4)
head.next.next.next.next.next, head.next.next.next.next.next.next = ListNode(4), ListNode(5)
print(Solution().deleteDuplicates(head))
|
v-legoff/pa-poc3
|
src/service/default/wiki.py
|
Python
|
bsd-3-clause
| 10,806 | 0.005552 |
# Copyright (c) 2012 LE GOFF Vincent
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
# OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""Module containing the wiki service (WikiService class).
This service is used to convert some text to HTML. The converting
process, its rules, are described in the class documentation.
"""
import re
from service import Service
class WikiService(Service):
"""Class describing the wiki service.
This service is made to manipulate text and convert it to HTML format.
The converting rules are highly customizable by inheriting from this
class. Most of the constants used to convert text to HTML
are created in the constructor. Therefore, others may be added
very simply.
Here is a short example that shows how to add a new markup
to the wiki syntax. You can add a new 'wiki.py' file in your
'services' package placed in your bundle, and then paste the following
code into the file:
>>> from ext.aboard.service.default import WikiService
>>> class wiki(WikiService) # It's important to name -that class Wiki
... def __init__(self):
... Wikiservice.__init__(self)
... self.add_markup("bold", "b", "<strong>{string}</strong>")
You can also change the delimiters for the markup, add new regular
expressions, delete markups, etc. Have a look at the class methods.
"""
name = "wiki"
def __init__(self, start="<", end=">", close="/"):
"""Service constructor."""
Service.__init__(self)
self.markup_delimiter_start = start
self.markup_delimiter_end = end
self.markup_delimiter_close = close
self.expressions = []
self.exceptions = []
# Add the exceptions
self.add_except_expression("@", "@")
self.add_except_markup("pre")
# Add the expressions and markups
self.add_expression("italic", "/(.*?)/", "<em>\\1</em>")
self.add_expression("bold", r"\*(.*?)\*", "<strong>\\1</strong>")
self.add_expression(
"header1",
r"^(\s*)h1\.\s+(.*?)\s*$",
r"\1<h1>\2</h1>",
re.MULTILINE
)
# Test (o delete)
text = """
h1. A test
This is some text with *that* in bold,
But @*this part* h1. should@ not be interpreted at all.
<pre>
This one is a *long*
non /interpreted/ text, somehow.</pre>
and, finally, /this should be in italic/ and *bold*.
Well, @that *one* again@.
"""
def add_expression(self, name, regexp, replacement, options=0):
"""Add a new regular expression.
This methods automatically compiles the given regular expression and
adds the result to the self.expressions list.
Expected arguments:
name -- the name of the expression (see below)
regexp -- the regular expression which will be compiled
options [optionnal] -- the regular expression options.
An expression name should be a unique identifier. It's mostlu used
to replace an expression (if a developer decides to change the
rule to create bold text, for instance, he will use this identifier).
"""
name = name.lower()
names = [line[0] for line in self.expressions]
if name in names:
raise ValueError("the identifier {} already exists in the " \
"expression list. Use the 'replace_expression' " \
"method to replace it".format(repr(name)))
compiled = re.compile(regexp, options)
self.expressions.append((name, compiled, replacement))
def replace_expressions(self, name, regexp, replacement, options=0):
"""Replace an existing expression using its identifier.
The expected arguments are the same as the 'add_expression' method.
Instead of simply adding a new expression, though, it first delete
the expression with the name. This is very useful to define a new
rule for certain formatting.
"""
name = name.lower()
names = [line[0] for line in self.expressions]
if name not in names:
raise ValueError("the identifier {} doesn't exists in the " \
"expression list. Use the 'add_expression' " \
"method to add it".format(repr(name)))
compiled = re.compile(regexp, options)
exp_pos = names.find(name)
del self.expressions[exp_pos]
self.expressions.insert(exp_pos, (name, compiled, replacement))
def remove_expression(self, name):
"""Remove the expression identified by its name."""
name = name.lower()
names = [line[0] for line in self.expressions]
if name not in names:
raise ValueError("the identifier {} doesn't exists in the " \
"expression list.".format(repr(name)))
exp_pos = names.find(name)
del self.expressions[exp_pos]
def add_except_expression(self, start, end, options=0):
"""Add an expression for a Wiki exception.
Exceptions are not interpreted. If this expression is found, it is
|
deleted and its content (the second group) is copied into a
temporary field and paste in the original text, unchanged, at the end of the process.
"""
self.exceptions.append((start, end, options))
def add_markup(self, name, markup, html):
|
"""Add a new markup.
A wiki markup is by default close to a HTML markup. It should
begin with > (<), end with < (>). To close the markup
after the text to select, it use another > followed
by /, the markup and the < symbol.
These three symbols (markup_delimiter_start, markup_delimiter_end
and markup_delimiter_close) are instance attributes and can be
set in the constructor of a subclass. this allows to
set new markup symbols, brackets for instance.
Note: the 'html' parameter should contain the '{string}'
sub-string to identify a replacement. For instance:
>>> wiki.add_markup("italic", "i", "<em>{string}</em>")
That code will allow text like:
We <i>made</i> it!
To:
We <em>made</em> it!
"""
start = self.markup_delimiter_start
end = self.markup_delimiter_end
close = self.markup_delimiter_close
regexp = start + markup
|
Robpol86/libnl
|
libnl/nl80211/iw_scan.py
|
Python
|
lgpl-2.1
| 29,804 | 0.001711 |
"""http://git.kernel.org/cgit/linux/kernel/git/jberg/iw.git/tree/scan.c?id=v3.17.
Copyright (c) 2015 Robert Pooley
Copyright (c) 2007, 2008 Johannes Berg
Copyright (c) 2007 Andy Lutomirski
Copyright (c) 2007 Mike Kershaw
Copyright (c) 2008-2009 Luis R. Rodriguez
Permission to use, copy, modify, and/or distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
"""
from libnl.attr import nla_policy, NLA_U16, NLA_U32, NLA_U64, NLA_U8
from libnl.misc import c_int8, c_uint8, SIZEOF_S8, SIZEOF_U8
from libnl.nl80211 import nl80211
from libnl.nl80211.iw_util import ampdu_space, get_ht_capability, get_ht_mcs, get_ssid
WLAN_CAPABILITY_ESS = 1 << 0
WLAN_CAPABILITY_IBSS = 1 << 1
WLAN_CAPABILITY_CF_POLLABLE = 1 << 2
WLAN_CAPABILITY_CF_POLL_REQUEST = 1 << 3
WLAN_CAPABILITY_PRIVACY = 1 << 4
WLAN_CAPABILITY_SHORT_PREAMBLE = 1 << 5
WLAN_CAPABILITY_PBCC = 1 << 6
WLAN_CAPABILITY_CHANNEL_AGILITY = 1 << 7
WLAN_CAPABILITY_SPECTRUM_MGMT = 1 << 8
WLAN_CAPABILITY_QOS = 1 << 9
WLAN_CAPABILITY_SHORT_SLOT_TIME = 1 << 10
WLAN_CAPABILITY_APSD = 1 << 11
WLAN_CAPABILITY_RADIO_MEASURE = 1 << 12
WLAN_CAPABILITY_DSSS_OFDM = 1 << 13
WLAN_CAPABILITY_DEL_BACK = 1 << 14
WLAN_CAPABILITY_IMM_BACK = 1 << 15
# DMG (60gHz) 802.11ad
WLAN_CAPABILITY_DMG_TYPE_MASK = 3 << 0
WLAN_CAPABILITY_DMG_TYPE_IBSS = 1 << 0 # Tx by: STA
WLAN_CAPABILITY_DMG_TYPE_PBSS = 2 << 0 # Tx by: PCP
WLAN_CAPABILITY_DMG_TYPE_AP = 3 << 0 # Tx by: AP
WLAN_CAPABILITY_DMG_CBAP_ONLY = 1 << 2
WLAN_CAPABILITY_DMG_CBAP_SOURCE = 1 << 3
WLAN_CAPABILITY_DMG_PRIVACY = 1 << 4
WLAN_CAPABILITY_DMG_ECPAC = 1 << 5
WLAN_CAPABILITY_DMG_SPECTRUM_MGMT = 1 << 8
WLAN_CAPABILITY_DMG_RADIO_MEASURE = 1 << 12
IEEE80211_COUNTRY_EXTENSION_ID = 201
BSS_MEMBERSHIP_SELECTOR_VHT_PHY = 126
BSS_MEMBERSHIP_SELECTOR_HT_PHY = 127
ms_oui = b'\x00\x50\xf2'
ieee80211_oui = b'\x00\x0f\xac'
wfa_oui = b'\x50\x6f\x9a'
country_env_str = lambda e: {'I': 'Indoor only', 'O': 'Outdoor only', ' ': 'Indoor/Outdoor'}.get(e, 'bogus')
wifi_wps_dev_passwd_id = lambda e: {0: 'Default (PIN)', 1: 'User-specified', 2: 'Machine-specified', 3: 'Rekey',
4: 'PushButton', 5: 'Registrar-specified'}.get(e, '??')
ht_secondary_offset = ('no secondary', 'above', '[reserved!]', 'below')
class ieee80211_country_ie_triplet(object):
"""http://git.kernel.org/cgit/linux/kernel/git/jberg/iw.git/tree/scan.c?id=v3.17#n60."""
def __init__(self, data):
"""Constructor."""
self.first_channel = c_uint8.from_buffer(data[:SIZEOF_U8]).value
self.reg_extension_id = self.first_channel
data = data[SIZEOF_U8:]
self.num_channels = c_uint8.from_buffer(data[:SIZEOF_U8]).value
self.reg_class = self.num_channels
data = data[SIZEOF_U8:]
self.max_power = c_int8.from_buffer(data[:SIZEOF_S8]).value
self.coverage_class = c_uint8.from_buffer(data[:SIZEOF_U8]).value
self.chans = self.ext = self
def get_supprates(_, data):
"""http://git.kernel.org/cgit/linux/kernel/git/jberg/iw.git/tree/scan.c?id=v3.17#n227.
Positional arguments:
data -- bytearray data to read.
"""
answer = list()
for i in range(len(data)):
r = data[i] & 0x7f
if r == BSS_MEMBERSHIP_SELECTOR_VHT_PHY and data[i] & 0x80:
value = 'VHT'
elif r == BSS_MEMBERSHIP_SELECTOR_HT_PHY and data[i] & 0x80:
value = 'HT'
else:
value = '{0}.{1}'.format(int(r / 2), int(5 * (r & 1)))
answer.append('{0}{1}'.format(value, '*' if data[i] & 0x80 else ''))
return answer
def get_country(_, data):
"""http://git.kernel.org/cgit/linux/kernel/git/jberg/iw.git/tree/scan.c?id=v3.17#n267.
Positional arguments:
data -- bytearray data to read.
Returns:
Dict.
"""
answers = {'Environment': country_env_str(chr(data[2]))}
data = data[3:]
while len(data) >= 3:
triplet = ieee80211_country_ie_triplet(data)
if triplet.ext.reg_extension_id >= IEEE80211_COUNTRY_EXTENSION_ID:
answers['Extension ID'] = triplet.ext.reg_extension_id
answers['Regulatory Class'] = triplet.ext.reg_class
answers['Coverage class'] = triplet.ext.coverage_class
answers['up to dm'] = triplet.ext.coverage_class * 450
data = data[3:]
continue
if triplet.chans.first_channel <= 14: # 2 GHz.
end_channel = triplet.chans.first_channel + (triplet.chans.num_channels - 1)
else:
end_channel = triplet.chans.first_channel + (4 * (triplet.chans.num_channels - 1))
answers['Channels dBm'] = triplet.chans.max_power
answers['Channels'] = (triplet.chans.first_channel, end_channel)
data = data[3:]
return answers
def get_erp(_, data):
"""http://git.kernel.org/cgit/linux/kernel/git/jberg/iw.git/tree/scan.c?id=v3.17#n323.
Positional arguments:
data -- bytearray data to read.
Returns:
String.
"""
if data[0] == 0x00:
return '<no flags>'
if data[0] & 0x01:
return 'NonERP_Present'
if data[0] & 0x02:
return 'Use_Protecti
|
on'
if data[0] & 0x04:
return 'Barker_Preamble_Mode'
return ''
def get_cipher(data):
"""http://git.kernel.org/cgit/linux/kernel/git/jberg/iw.git/tree/scan.c?id=v3.17#n336.
|
Positional arguments:
data -- bytearray data to read.
Returns:
WiFi stream cipher used by the access point (string).
"""
legend = {0: 'Use group cipher suite', 1: 'WEP-40', 2: 'TKIP', 4: 'CCMP', 5: 'WEP-104', }
key = data[3]
if ieee80211_oui == bytes(data[:3]):
legend.update({6: 'AES-128-CMAC', 8: 'GCMP', })
elif ms_oui != bytes(data[:3]):
key = None
return legend.get(key, '{0:02x}-{1:02x}-{2:02x}:{3}'.format(data[0], data[1], data[2], data[3]))
def get_auth(data):
"""http://git.kernel.org/cgit/linux/kernel/git/jberg/iw.git/tree/scan.c?id=v3.17#n393.
Positional arguments:
data -- bytearray data to read.
Returns:
WiFi authentication method used by the access point (string).
"""
legend = {1: 'IEEE 802.1X"', 2: 'PSK', }
key = data[3]
if ieee80211_oui == bytes(data[:3]):
legend.update({3: 'FT/IEEE 802.1X', 4: 'FT/PSK', 5: 'IEEE 802.1X/SHA-256', 6: 'PSK/SHA-256', 7: 'TDLS/TPK', })
elif ms_oui != bytes(data[:3]):
key = None
return legend.get(key, '{0:02x}-{1:02x}-{2:02x}:{3}'.format(data[0], data[1], data[2], data[3]))
def get_rsn_ie(defcipher, defauth, data):
"""http://git.kernel.org/cgit/linux/kernel/git/jberg/iw.git/tree/scan.c?id=v3.17#n441.
Positional arguments:
defcipher -- default cipher if not in data (string).
defauth -- default authentication suites if not in data (string).
data -- bytearray data to read.
Returns:
Dict.
"""
answers = dict()
answers['version'] = data[0] + (data[1] << 8)
data = data[2:]
if len(data) < 4:
answers['group_cipher'] = answers['pairwise_ciphers'] = defcipher
return answers
answers['group_cipher'] = get_cipher(data)
data = data[4:]
if len(data) < 2:
answers['pairwise_ciphers'] = defcipher
return answers
count = data[0] | (data[1] << 8)
if 2 + (count * 4) > len(data):
answers['bogus tail data'] = data
return answers
answers['pairwise_ciphers'] = ' '.join(get_cipher(data[2 + (i * 4):]) for i in range(count))
data = data[2 + (count * 4):]
if len(data) < 2:
answers['authentication_suites'] = defauth
return answers
count = data[0] | (data[1] <
|
IvarsKarpics/mxcube
|
gui/bricks/TaskToolBoxBrick.py
|
Python
|
lgpl-3.0
| 8,196 | 0.000488 |
#
# Project: MXCuBE
# https://github.com/mxcube
#
# This file is part of MXCuBE software.
#
# MXCuBE is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# MXCuBE is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with MXCuBE. If not, see <http://www.gnu.org/licenses/>.
import logging
from gui.utils import QtImport
from gui.BaseComponents import BaseWidget
from gui.widgets.task_toolbox_widget import TaskToolBoxWidget
from HardwareRepository import HardwareRepository as HWR
__credits__ = ["MXCuBE collaboration"]
__license__ = "LGPLv3+"
__category__ = "General"
class TaskToolBoxBrick(BaseWidget):
request_tree_brick = QtImport.pyqtSignal()
def __init__(self, *args):
BaseWidget.__init__(self, *args)
# Internal values -----------------------------------------------------
self.ispyb_logged_in = False
self.tree_brick = None
# Properties ----------------------------------------------------------
self.add_property("useOscStartCbox", "boolean", False)
self.add_property("useCompression", "boolean", False)
#self.add_property("availableTasks", "string", "discrete char helical")
self.add_property("showDiscreetTask", "boolean", True)
self.add_property("showHelicalTask", "boolean", True)
self.add_property("showCharTask", "boolean", True)
self.add_property("showAdvancedTask", "boolean", True)
self.add_property("showStillScanTask", "boolean", False)
self.add_property("showCollectNowButton", "boolean", False)
# Signals -------------------------------------------------------------
self.define_signal("request_tree_brick", ())
# Slots ---------------------------------------------------------------
self.define_slot("logged_in", ())
self.define_slot("set_session", ())
self.define_slot("selection_changed", ())
self.define_slot("user_group_saved", ())
self.define_slot("set_tree_brick", ())
# Graphic elements ----------------------------------------------------
self.task_tool_box_widget = TaskToolBoxWidget(self)
# Layout --------------------------------------------------------------
self.main_layout = QtImport.QVBoxLayout(self)
self.main_layout.addWidget(self.task_tool_box_widget)
self.main_layout.setSpacing(0)
self.main_layout.setContentsMargins(0, 0, 0, 0)
self.setLayout(self.main_layout)
# SizePolicies --------------------------------------------------------
# self.setSizePolicy(QtImport.QSizePolicy.MinimumExpanding,
# QtImport.QSizePolicy.MinimumExpanding)
# Other ---------------------------------------------------------------
HWR.beamline.sample_view.connect("pointSelected", self.point_selected)
def set_expert_mode(self, expert):
self.task_tool_box_widget.set_expert_mode(expert)
def run(self):
if HWR.beamline.session.session_id:
self.setEnabled(True)
#self.task_tool_box_widget.set_available_tasks(self["availableTasks"])
self.request_tree_brick.emit()
self.task_tool_box_widget.adjust_width(self.width())
def user_group_saved(self, new_user_group):
HWR.beamline.session.set_user_group(str(new_user_group))
self.task_tool_box_widget.update_data_path_model()
path = (
HWR.beamline.session.get_base_image_directory()
+ "/"
+ str(new_user_group)
)
msg = "Image path is: %s" % path
logging.getLogger("GUI").info(msg)
@QtImport.pyqtSlot(BaseWidget)
def set_tree_brick(self, brick):
self.tree_brick = brick
self.tree_brick.compression_state = self["useCompression"] == 1
self.task_tool_box_widget.set_tree_brick(brick)
@QtImport.pyqtSlot(int, str, str, int, str, str, bool)
def set_session(
self,
session_id,
t_prop_code=None,
prop_number=None,
prop_id=None,
start_date=None,
prop_code=None,
is_inhouse=None,
):
"""
Connected to the slot set_session and is called after a
request to get the current session from LIMS (ISPyB) is
made. The signal is normally emitted by the brick that
handles LIMS login, ie ProposalBrick.
The session_id is '' if no session could be retrieved.
"""
if session_id is "":
self.logged_in(True)
@QtImport.pyqtSlot(bool)
def logged_in(self, logged_in):
"""
Handels the signal logged_in from the brick the handles
LIMS (ISPyB) login, ie ProposalBrick. The signal is
emitted when a user was succesfully logged in.
"""
logged_in = True
|
self.ispyb_logged_in = logged_in
if HWR.beamline.session is not None:
HWR.beamline.session.set_user_group("")
self.setEnabled(logged_in)
self.task_tool_box_widget.ispyb_logged_in(logged_in)
def property
|
_changed(self, property_name, old_value, new_value):
if property_name == "useOscStartCbox":
self.task_tool_box_widget.use_osc_start_cbox(new_value)
elif property_name == "useCompression":
self.task_tool_box_widget.enable_compression(new_value)
elif property_name == "showCollectNowButton":
self.task_tool_box_widget.collect_now_button.setVisible(new_value)
elif property_name == "showDiscreetTask":
if not new_value:
self.task_tool_box_widget.hide_task(
self.task_tool_box_widget.discrete_page
)
elif property_name == "showHelicalTask":
if not new_value:
self.task_tool_box_widget.hide_task(
self.task_tool_box_widget.helical_page
)
elif property_name == "showCharTask":
if not new_value:
self.task_tool_box_widget.hide_task(self.task_tool_box_widget.char_page)
elif property_name == "showAdvancedTask":
if not new_value:
self.task_tool_box_widget.hide_task(
self.task_tool_box_widget.advanced_page
)
elif property_name == "showStillScanTask":
if not new_value:
self.task_tool_box_widget.hide_task(
self.task_tool_box_widget.still_scan_page
)
def selection_changed(self, items):
"""
Connected to the signal "selection_changed" of the TreeBrick.
Called when the selection in the tree changes.
"""
self.task_tool_box_widget.selection_changed(items)
def point_selected(self, selected_position):
self.task_tool_box_widget.helical_page.centred_position_selection(
selected_position
)
self.task_tool_box_widget.discrete_page.centred_position_selection(
selected_position
)
self.task_tool_box_widget.char_page.centred_position_selection(
selected_position
)
self.task_tool_box_widget.energy_scan_page.centred_position_selection(
selected_position
)
self.task_tool_box_widget.xrf_spectrum_page.centred_position_selection(
selected_position
)
self.task_tool_box_widget.discrete_page.refresh_current_item()
self.task_tool_box_widget.helical_page.refresh_current_item()
self.task_tool_box_widget.char_page.refresh_current_item()
self.task_tool_box_widget.energy_scan_page.refresh_current_item()
self.task_tool_box_widget.xrf_spectrum_page.refresh
|
hongquan/saleor
|
saleor/dashboard/customer/urls.py
|
Python
|
bsd-3-clause
| 234 | 0 |
from django.conf.ur
|
ls import patterns, url
from . import views
urlpatterns = patter
|
ns(
'',
url(r'^$', views.customer_list, name='customers'),
url(r'^(?P<pk>[0-9]+)/$', views.customer_details, name='customer-details')
)
|
ypzhang/jusha
|
book/scripts/d2d_kernel/cumemcpy_to_direct_offset1.py
|
Python
|
lgpl-3.0
| 5,058 | 0.028865 |
#!/usr/bin/python
import numpy as np
#a = np.linspace(0.,10.,100)
#b = np.sqrt(a)
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import csv
def import_text(filename, separator):
for line in csv.reader(open(filename), delimiter=separator,
skipinitialspace=True):
if line:
yield line
def to_num(s):
try:
return int(s)
except ValueError:
return float(s)
def to_float(s):
try:
return float(s)
except ValueError:
return int(s)
def column(matrix, i):
return [row[i] for row in matrix]
def bandwidth(timings, sizes):
result = []
for i in range(0, len(timings)):
result.append((2*to_float(sizes[i]))/(to_float(timings[i])*1000000000.0))
return result
#read data
table = []
for data in import_text('./0_cudamemcpy_offset1.dat', ' '):
table.append(data)
#print column(table, 0)[1:]
size = column(table, 1)[1:]
size_string = column(table, 0)[1:]
#print size_string
# data
char_t = column(table, 2)[1:]
#short_t = column(table, 3)[1:]
#float_t = column(table, 4)[1:]
#double_t = column(table, 5)[1:]
#float3_t = column(table, 6)[1:]
#float4_t = column(table, 7)[1:]
char_bw = bandwidth(char_t, size)
#short_bw = bandwidth(short_t, size)
#float_bw = bandwidth(float_t, size)
#double_bw = bandwidth(double_t, size)
#float3_bw = bandwidth(float3_t, size)
#float4_bw = bandwidth(float4_t, size)
# read other table
di_table = []
for di_data in import_text('./1_direct_offset1.dat', ' '):
di_table.append(di_data)
#print column(table, 0)[1:]
#size_string = column(table, 0)[1:]
#print size_string
# data
di_char_t = column(di_table, 2)[1:]
di_short_t = column(di_table, 3)[1:]
di_float_t = column(di_table, 4)[1:]
di_double_t = column(di_table, 5)[1:]
di_float3_t = column(di_table, 6)[1:]
di_float4_t = column(di_table, 7)[1:]
di_char_bw = bandwidth(di_char_t, size)
di_short_bw = bandwidth(di_short_t, size)
di_float_bw = bandwidth(di_float_t, size)
di_double_bw = bandwidth(di_double_t, size)
di_float3_bw = bandwidth(di_float3_t, size)
di_float4_bw = bandwidth(di_float4_t, size)
size_np = np.array(size)
# normalize the size
for i in range(0, len(size)):
size_np[i] = i+1
# size_np[len(size)-1-i] = to_num(to_num(size_np[len(size)-1-i])/to_num(size_np[0])) #to_float(size[i])/to_float(size[0])
#print to_float(size[11])
#print to_float(float4_t[11])
#print (to_float(2*sizes[i])/(to_float(timings[i])*1000000000.0))
#print c
|
har_bw
#print float_bw
#print float
# start drawing
fig = plt.figure()
ax = fig.add_subplot(111)
ax.set_title("cuMemcpy v.s. d2d_direct_kernel (address not aligned)");
ax.set_xlabel(table[0][0])
ax.set_ylabel('Bandwidth (GB/sec)')
#print len(size_st
|
ring)
#print len(char_bw)
fig.add_subplot(ax)
#ax.set_ylim([180,260])
print size_np
print size_string
#ax.set_xticklabels(size_np, range(len(size_np)))
ax.set_xticklabels(size_string)
#fig.xticks(size_np, size_string)
#ax.set_xticks(size_np, ('128K', '256K', '512K', '1M', '2M', '4M', '8M', '16M', '32M', '64M'))
#ax.set_autoscaley_on(False)
ax.plot(size_np, char_bw, linestyle = '-', color = 'blue', marker='o', linewidth = 1, label='cudaMemcpy')
#ax.plot(size, short_bw, linestyle = '-', color = 'red', linewidth = 1, label='cudaMemcpy_short')
#ax.plot(size, float_bw, linestyle = '-', color = 'c', linewidth = 1, label='cudaMemcpy_float')
#ax.plot(size, double_bw, linestyle = '-', color = 'm', linewidth = 1, label='cudaMemcpy_double')
#ax.plot(size, float3_bw, linestyle = '-', color = 'k', linewidth = 1, label='cudaMemcpy_float3')
#ax.plot(size, float4_bw, linestyle = '-', color = 'y', linewidth = 1, label='cudaMemcpy_float4')
ax.plot(size_np, di_char_bw, linestyle = ':', color = 'blue', marker='o', linewidth = 2, label='d2d_direct_char')
ax.plot(size_np, di_short_bw, linestyle = ':', color = 'red', marker='s', linewidth = 2, label='d2d_direct_short')
ax.plot(size_np, di_float_bw, linestyle = ':', color = 'c', marker='p', linewidth = 2, label='d2d_direct_float')
ax.plot(size_np, di_double_bw, linestyle = ':', color = 'm', marker='*', linewidth = 2, label='d2d_direct_double')
ax.plot(size_np, di_float3_bw, linestyle = ':', color = 'k', marker='h', linewidth = 2, label='d2d_direct_float3')
ax.plot(size_np, di_float4_bw, linestyle = ':', color = 'y', marker='x', linewidth = 2, label='d2d_direct_float4')
size_num=range(len(size))
#print size_num
print size_string
box = ax.get_position()
ax.set_position([box.x0, box.y0+box.height*0.1, box.width, box.height*0.9])
#ax.legend(loc='upper center', bbox_to_anchor=(0.5, 1.05), ncol = 6, fancybox = True, shadow = True, prop={'size':9}, )
#ax.legend(loc='upper center', ncol = 3, fancybox = True, shadow = True, prop={'size':9}, )
#ax.legend(loc='upper left', ncol = 1, fancybox = True, shadow = True, prop={'size':9}, )
ax.legend(loc='upper center', ncol = 4, bbox_to_anchor=(0.5,-0.1), fancybox = True, shadow = True, prop={'size':9}, )
plt.show()
fig.savefig('cudaMemcpy_vs_d2d_offset1.pdf')
|
MHendricks/Motionbuilder-Remote
|
iphelper.py
|
Python
|
mit
| 3,612 | 0.027409 |
# A Gui interface allowing the binary illiterate to figure out the ip address the Arduino has been assigned.
import os
import re
from PySide.QtCore import QFile, QMetaObject, QSignalMapper, Slot, QRegExp
from PySide.QtGui import QDialog, QPushButton, QRegExpValidator
from PySide.QtUiTools import QUiLoader
class IPHelper(QDialog):
def __init__(self, parent=None):
super(IPHelper, self).__init__(parent)
f = QFile(os.path.join(os.path.split(__file__)[0], 'iphelper.ui'))
loadUi(f, self)
f.close()
self.ipAddress = None
# create validators
validator = QRegExpValidator(QRegExp('\d{,3}'))
self.uiFirstTetTXT.setValidator(validator)
self.uiSecondTetTXT.setValidator(validator)
self.uiThirdTetTXT.setValidator(validator)
self.uiFourthTetTXT.setValidator(validator)
# build a map of the buttons
self.buttons = [None]*16
self.signalMapper = QSignalMapper(self)
self.signalMapper.mapped.connect(self.tetMap)
for button in self.findChildren(QPushButton):
match = re.findall(r'^uiTrellis(\d{,2})BTN$', button.objectName())
if match:
i = int(match[0])
self.buttons[i] = button
if i >= 12:
self.signalMapper.setMapping(button, i)
button.clicked.connect(self.signalMapper.map)
self.tetMap(12)
@Slot()
def accept(self):
self.ipAddress = '{}.{}.{}.{}'.format(self.uiFirstTetTXT.text(), self.uiSecondTetTXT.text(), self.uiThirdTetTXT.text(), self.uiFourthTetTXT.text())
super(IPHelper, self).accept()
@Slot(int)
def tetMap(self, index):
button = self.buttons[index]
if not button.isChecked():
return
for i in range(12, 16):
b = self.buttons[i]
if b != button:
b.setChecked(False)
# update the buttons to match the current value of the text
for edit in (self.uiFirstTetTXT, self.ui
|
SecondTetTXT, self.uiThirdTetTXT, self.uiFourthTetTXT):
edit.setProperty('active', False)
if index == 12:
val = int(self.uiFourthTetTXT.text())
self.uiFourthTetTXT.setProperty('active', True)
elif index == 13:
val = int(self.uiThirdTetTXT.text())
self.uiThirdTetTXT.setProperty('active', True)
elif index == 14:
val = int(self.uiSecondTetTXT.text())
self.uiSecondTetTXT.setProperty('active', True)
|
elif index == 15:
val = int(self.uiFirstTetTXT.text())
self.uiFirstTetTXT.setProperty('active', True)
for i in range(8):
b = self.buttons[i]
b.blockSignals(True)
b.setChecked(2**i & val)
b.blockSignals(False)
# force a refresh of the styleSheet
self.setStyleSheet(self.styleSheet())
@Slot()
def buttonPressed(self):
total = 0
for i in range(8):
if self.buttons[i].isChecked():
total += 2**i
total = unicode(total)
if self.uiTrellis12BTN.isChecked():
self.uiFourthTetTXT.setText(total)
elif self.uiTrellis13BTN.isChecked():
self.uiThirdTetTXT.setText(total)
elif self.uiTrellis14BTN.isChecked():
self.uiSecondTetTXT.setText(total)
elif self.uiTrellis15BTN.isChecked():
self.uiFirstTetTXT.setText(total)
# Code to load a ui file like using PyQt4
# https://www.mail-archive.com/pyside@lists.openbossa.org/msg01401.html
class MyQUiLoader(QUiLoader):
def __init__(self, baseinstance):
super(MyQUiLoader, self).__init__()
self.baseinstance = baseinstance
def createWidget(self, className, parent=None, name=""):
widget = super(MyQUiLoader, self).createWidget(className, parent, name)
if parent is None:
return self.baseinstance
else:
setattr(self.baseinstance, name, widget)
return widget
def loadUi(uifile, baseinstance=None):
loader = MyQUiLoader(baseinstance)
ui = loader.load(uifile)
QMetaObject.connectSlotsByName(ui)
return ui
|
Pakoach/Sick-Beard
|
sickbeard/databases/mainDB.py
|
Python
|
gpl-3.0
| 33,825 | 0.005026 |
# Author: Nic Wolfe <nic@wolfeden.ca>
# URL: http://code.google.com/p/sickbeard/
#
# This file is part of Sick Beard.
#
# Sick Beard is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Sick Beard is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Sick Beard. If not, see <http://www.gnu.org/licenses/>.
import sickbeard
import os.path
import datetime
from sickbeard import db, common, helpers, logger
from sickbeard.providers.generic import GenericProvider
from sickbeard import encodingKludge as ek
from sickbeard.name_parser.parser import NameParser, InvalidNameException
MAX_DB_VERSION = 19
class MainSanityCheck(db.DBSanityCheck):
def check(self):
self.fix_duplicate_shows()
self.fix_duplicate_episodes()
self.fix_orphan_episodes()
def fix_duplicate_shows(self):
sqlResults = self.connection.select("SELECT show_id, tvdb_id, COUNT(tvdb_id) as count FROM tv_shows GROUP BY tvdb_id HAVING count > 1")
for cur_duplicate in sqlResults:
logger.log(u"Duplicate show detected! tvdb_id: " + str(cur_duplicate["tvdb_id"]) + u" count: " + str(cur_duplicate["count"]), logger.DEBUG)
cur_dupe_results = self.connection.select("SELECT show_id, tvdb_id FROM tv_shows WHERE tvdb_id = ? LIMIT ?",
[cur_duplicate["tvdb_id"], int(cur_duplicate["count"]) - 1]
)
for cur_dupe_id in cur_dupe_results:
logger.log(u"Deleting duplicate show with tvdb_id: " + str(cur_dupe_id["tvdb_id"]) + u" show_id: " + str(cur_dupe_id["show_id"]))
|
self.connection.action("DELETE FROM tv_shows WHERE show_id = ?", [cur_dupe_id["show_id"]])
else:
logger.log(u"No duplicate show, check passed")
def fix_duplicate_episodes(self):
sqlResults = self.connection.select("SELECT showid, season, episode, COUNT(showid) as count FROM tv_episodes GROUP BY showid
|
, season, episode HAVING count > 1")
for cur_duplicate in sqlResults:
logger.log(u"Duplicate episode detected! showid: " + str(cur_duplicate["showid"]) + u" season: " + str(cur_duplicate["season"]) + u" episode: " + str(cur_duplicate["episode"]) + u" count: " + str(cur_duplicate["count"]), logger.DEBUG)
cur_dupe_results = self.connection.select("SELECT episode_id FROM tv_episodes WHERE showid = ? AND season = ? and episode = ? ORDER BY episode_id DESC LIMIT ?",
[cur_duplicate["showid"], cur_duplicate["season"], cur_duplicate["episode"], int(cur_duplicate["count"]) - 1]
)
for cur_dupe_id in cur_dupe_results:
logger.log(u"Deleting duplicate episode with episode_id: " + str(cur_dupe_id["episode_id"]))
self.connection.action("DELETE FROM tv_episodes WHERE episode_id = ?", [cur_dupe_id["episode_id"]])
else:
logger.log(u"No duplicate episode, check passed")
def fix_orphan_episodes(self):
sqlResults = self.connection.select("SELECT episode_id, showid, tv_shows.tvdb_id FROM tv_episodes LEFT JOIN tv_shows ON tv_episodes.showid=tv_shows.tvdb_id WHERE tv_shows.tvdb_id is NULL")
for cur_orphan in sqlResults:
logger.log(u"Orphan episode detected! episode_id: " + str(cur_orphan["episode_id"]) + " showid: " + str(cur_orphan["showid"]), logger.DEBUG)
logger.log(u"Deleting orphan episode with episode_id: " + str(cur_orphan["episode_id"]))
self.connection.action("DELETE FROM tv_episodes WHERE episode_id = ?", [cur_orphan["episode_id"]])
else:
logger.log(u"No orphan episode, check passed")
def backupDatabase(version):
helpers.backupVersionedFile(db.dbFilename(), version)
# ======================
# = Main DB Migrations =
# ======================
# Add new migrations at the bottom of the list; subclass the previous migration.
class InitialSchema (db.SchemaUpgrade):
def test(self):
return self.hasTable("tv_shows")
def execute(self):
queries = [
"CREATE TABLE tv_shows (show_id INTEGER PRIMARY KEY, location TEXT, show_name TEXT, tvdb_id NUMERIC, network TEXT, genre TEXT, runtime NUMERIC, quality NUMERIC, airs TEXT, status TEXT, seasonfolders NUMERIC, paused NUMERIC, startyear NUMERIC);",
"CREATE TABLE tv_episodes (episode_id INTEGER PRIMARY KEY, showid NUMERIC, tvdbid NUMERIC, name TEXT, season NUMERIC, episode NUMERIC, description TEXT, airdate NUMERIC, hasnfo NUMERIC, hastbn NUMERIC, status NUMERIC, location TEXT);",
"CREATE TABLE info (last_backlog NUMERIC, last_tvdb NUMERIC);",
"CREATE TABLE history (action NUMERIC, date NUMERIC, showid NUMERIC, season NUMERIC, episode NUMERIC, quality NUMERIC, resource TEXT, provider NUMERIC);",
"CREATE TABLE episode_links (episode_id INTEGER, link TEXT);",
"CREATE TABLE imdb_info (tvdb_id INTEGER PRIMARY KEY, imdb_id TEXT, title TEXT, year NUMERIC, akas TEXT, runtimes NUMERIC, genres TEXT, countries TEXT, country_codes TEXT, certificates TEXT, rating TEXT, votes INTEGER, last_update NUMERIC);",
"CREATE TABLE processed_files (episode_id INTEGER, filename TEXT, md5 TEXT);",
"CREATE TABLE frenchtorrentdb_history (date TEXT, link TEXT);"
]
for query in queries:
self.connection.action(query)
class AddTvrId (InitialSchema):
def test(self):
return self.hasColumn("tv_shows", "tvr_id")
def execute(self):
self.addColumn("tv_shows", "tvr_id")
class AddTvrName (AddTvrId):
def test(self):
return self.hasColumn("tv_shows", "tvr_name")
def execute(self):
self.addColumn("tv_shows", "tvr_name", "TEXT", "")
class AddImdbId (InitialSchema):
def test(self):
return self.hasColumn("tv_shows", "imdb_id")
def execute(self):
self.addColumn("tv_shows", "imdb_id", "TEXT", "")
class AddAirdateIndex (AddTvrName):
def test(self):
return self.hasTable("idx_tv_episodes_showid_airdate")
def execute(self):
self.connection.action("CREATE INDEX idx_tv_episodes_showid_airdate ON tv_episodes(showid,airdate);")
class NumericProviders (AddAirdateIndex):
def test(self):
return self.connection.tableInfo("history")['provider']['type'] == 'TEXT'
histMap = {-1: 'unknown',
1: 'newzbin',
2: 'tvbinz',
3: 'nzbs',
4: 'eztv',
5: 'nzbmatrix',
6: 'tvnzb',
7: 'ezrss',
8: 'thepiratebay',
9: 'kat'}
def execute(self):
self.connection.action("ALTER TABLE history RENAME TO history_old;")
self.connection.action("CREATE TABLE history (action NUMERIC, date NUMERIC, showid NUMERIC, season NUMERIC, episode NUMERIC, quality NUMERIC, resource TEXT, provider TEXT);")
for x in self.histMap.keys():
self.upgradeHistory(x, self.histMap[x])
def upgradeHistory(self, number, name):
oldHistory = self.connection.action("SELECT * FROM history_old").fetchall()
for curResult in oldHistory:
sql = "INSERT INTO history (action, date, showid, season, episode, quality, resource, provider) VALUES (?,?,?,?,?,?,?,?)"
provider = 'unknown'
try:
provider = self.histMap[int(curResult["provider"])]
except ValueError:
provider = curResult["provider"]
args = [curResult["action"], curResult["date"], curResult["showid"], curResult["season"], curResult["episode"], curResult["quality"], curResult["resource"], pro
|
shashankrajput/seq2seq
|
seq2seq/test/attention_test.py
|
Python
|
apache-2.0
| 3,532 | 0.005663 |
# Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Unit tests for attention functions.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import tensorflow as tf
import numpy as np
from seq2seq.decoders.attention import AttentionLayerDot
from seq2seq.decoders.attention import AttentionLayerBahdanau
class AttentionLayerTest(tf.test.TestCase):
"""
Tests the AttentionLayer mod
|
ule.
"""
def setUp(self):
super(AttentionLayerTest, self).setUp()
tf.logging.set_verbosity(tf.logging.INFO)
self.batch_siz
|
e = 8
self.attention_dim = 128
self.input_dim = 16
self.seq_len = 10
self.state_dim = 32
def _create_layer(self):
"""Creates the attention layer. Should be implemented by child classes"""
raise NotImplementedError
def _test_layer(self):
"""Tests Attention layer with a given score type"""
inputs_pl = tf.placeholder(tf.float32, (None, None, self.input_dim))
inputs_length_pl = tf.placeholder(tf.int32, [None])
state_pl = tf.placeholder(tf.float32, (None, self.state_dim))
attention_fn = self._create_layer()
scores, context = attention_fn(
query=state_pl,
keys=inputs_pl,
values=inputs_pl,
values_length=inputs_length_pl)
with self.test_session() as sess:
sess.run(tf.global_variables_initializer())
feed_dict = {}
feed_dict[inputs_pl] = np.random.randn(self.batch_size, self.seq_len,
self.input_dim)
feed_dict[state_pl] = np.random.randn(self.batch_size, self.state_dim)
feed_dict[inputs_length_pl] = np.arange(self.batch_size) + 1
scores_, context_ = sess.run([scores, context], feed_dict)
np.testing.assert_array_equal(scores_.shape,
[self.batch_size, self.seq_len])
np.testing.assert_array_equal(context_.shape,
[self.batch_size, self.input_dim])
for idx, batch in enumerate(scores_, 1):
# All scores that are padded should be zero
np.testing.assert_array_equal(batch[idx:], np.zeros_like(batch[idx:]))
# Scores should sum to 1
scores_sum = np.sum(scores_, axis=1)
np.testing.assert_array_almost_equal(scores_sum, np.ones([self.batch_size]))
class AttentionLayerDotTest(AttentionLayerTest):
"""Tests the AttentionLayerDot class"""
def _create_layer(self):
return AttentionLayerDot(
params={"num_units": self.attention_dim},
mode=tf.contrib.learn.ModeKeys.TRAIN)
def test_layer(self):
self._test_layer()
class AttentionLayerBahdanauTest(AttentionLayerTest):
"""Tests the AttentionLayerBahdanau class"""
def _create_layer(self):
return AttentionLayerBahdanau(
params={"num_units": self.attention_dim},
mode=tf.contrib.learn.ModeKeys.TRAIN)
def test_layer(self):
self._test_layer()
if __name__ == "__main__":
tf.test.main()
|
mattgd/UnitConverter
|
units/__init__.py
|
Python
|
mit
| 1,001 | 0 |
# -*- coding: utf-8 -*-
from converters.circle import circle
from converters.currency import currency
from converters.electric import electric
from converters.force import force
from converters.pressure import pressure
from converters.speed import speed
from converters.temperature import temperature
class UnitsManager(object):
'''
Class responsible to manage the unit converters
of this application.
'''
_units = [
circle,
currency,
electric,
force,
pressure,
speed,
temperature,
]
def __iter__(self):
return (x for x in self._units)
def regis
|
ter(self, converter):
"""
Met
|
hod that receives a new converter and adds it to
this manager.
Useful to add custom new methods without needing to edit
the core of this application.
"""
if converter is not None and callable(converter):
self._units.append(converter)
UNITS = UnitsManager()
|
hujiajie/chromium-crosswalk
|
tools/telemetry/telemetry/testing/simple_mock.py
|
Python
|
bsd-3-clause
| 3,810 | 0.013386 |
# Copyright 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""A very very simple mock object harness."""
from types import ModuleType
DONT_CARE = ''
class MockFunctionCall(object):
def __init__(self, name):
self.name = name
self.args = tuple()
self.return_value = None
self.when_called_handlers = []
def WithArgs(self, *args):
self.args = args
return self
def WillReturn(self, value):
self.return_value = value
return self
def WhenCalled(self, handler):
self.when_called_handlers.append(handler)
def VerifyEquals(self, got):
if self.name != got.name:
raise Exception('Self %s, got %s' % (repr(self), repr(got)))
if len(self.args) != len(got.args):
raise Exception('Self %s, got %s' % (repr(self), repr(got)))
for i in range(len(self.args)):
self_a = self.args[i]
got_a = got.args[i]
if self_a == DONT_CARE:
continue
if self_a != got_a:
raise Exception('Self %s, got %s' % (repr(self), repr(got)))
def __repr__(self):
def arg_to_text(a):
if a == DONT_CARE:
return '_'
return repr(a)
args_text = ', '.join([arg_to_text(a) for a in self.args])
if self.return_value in (None, DONT_CARE):
return '%s(%s)' % (self.name, args_text)
return '%s(%s)->%s' % (self.name, args_text, repr(self.return_value))
class MockTrace(object):
def __init__(self):
self.expected_calls = []
self.next_call_index = 0
class MockObject(object):
def __init__(self, parent_mock=None):
if parent_mock:
self._trace = parent_mock._trace # pylint: disable=protected-access
else:
self._trace = MockTrace()
def __setattr__(self, name, value):
if (not hasattr(self, '_trace') or
hasattr(value, 'is_hook')):
object.__setattr__(self, name, value)
return
assert isinstance(value, MockObject)
object.__setattr__(self, name, value)
def SetAttribute(self, name, value):
setattr(self, name, value)
def ExpectCall(self, func_name, *args):
assert self._trace.next_call_index == 0
if not hasattr(self, func_name):
self._install_hook(func_name)
call = MockFunctionCall(func_name)
self._trace.expected_calls.append(ca
|
ll)
call.WithArgs(*args)
return call
def _install_hook(self, func_name):
def handler(*args, **_):
got_call = MockFunctionCall(
func_name).WithArgs(*args).WillReturn(DONT_CARE)
if self._trace.next_call_index >= len(self._trace.expected_calls):
raise Exception(
'Call to %s was not expected, at end of programmed trace.' %
repr(got_call))
expected_call = self._trace.expected_
|
calls[
self._trace.next_call_index]
expected_call.VerifyEquals(got_call)
self._trace.next_call_index += 1
for h in expected_call.when_called_handlers:
h(*args)
return expected_call.return_value
handler.is_hook = True
setattr(self, func_name, handler)
class MockTimer(object):
""" A mock timer to fake out the timing for a module.
Args:
module: module to fake out the time
"""
def __init__(self, module=None):
self._elapsed_time = 0
self._module = module
self._actual_time = None
if module:
assert isinstance(module, ModuleType)
self._actual_time = module.time
self._module.time = self
def sleep(self, time):
self._elapsed_time += time
def time(self):
return self._elapsed_time
def SetTime(self, time):
self._elapsed_time = time
def __del__(self):
self.Restore()
def Restore(self):
if self._module:
self._module.time = self._actual_time
self._module = None
self._actual_time = None
|
Reagankm/KnockKnock
|
venv/lib/python3.4/site-packages/nltk/corpus/reader/chunked.py
|
Python
|
gpl-2.0
| 8,206 | 0.000731 |
# Natural Language Toolkit: Chunked Corpus Reader
#
# Copyright (C) 2001-2015 NLTK Project
# Author: Steven Bird <stevenbird1@gmail.com>
# Edward Loper <edloper@gmail.com>
# URL: <http://nltk.org/>
# For license information, see LICENSE.TXT
"""
A reader for corpora that contain chunked (and optionally tagged)
documents.
"""
import os.path, codecs
import nltk
from nltk.corpus.reader.bracket_parse import BracketParseCorpusReader
from nltk import compat
from nltk.tree import Tree
from nltk.tokenize import *
from nltk.chunk import tagstr2tree
from nltk.corpus.reader.util import *
from nltk.corpus.reader.api import *
class ChunkedCorpusReader(CorpusReader):
"""
Reader for chunked (and optionally tagged) corpora. Paragraphs
are split using a block reader. They are then tokenized into
sentences using a sentence tokenizer. Finally, these sentences
are parsed into chunk trees using a string-to-chunktree conversion
function. Each of these steps can be performed using a default
function or a custom function. By default, paragraphs are split
on blank lines; sentences are listed one per line; and sentences
are parsed into chunk trees using ``nltk.chunk.tagstr2tree``.
"""
def __init__(self, root, fileids, extension='',
str2chunktree=tagstr2tree,
sent_tokenizer=RegexpTokenizer('\n', gaps=True),
para_block_reader=read_blankline_block,
encoding='utf8'):
"""
:param root: The root directory for this corpus.
:param fileids: A list or regexp specifying the fileids in this corpus.
"""
CorpusReader.__init__(self, root, fileids, encoding)
self._cv_args = (str2chunktree, sent_tokenizer, para_block_reader)
"""Arguments for corpus views generated by this corpus: a tuple
(str2chunktree, sent_tokenizer, para_block_tokenizer)"""
def raw(self, fileids=None):
"""
:return: the given file(s) as a single string.
:rtype: str
"""
if fileids is None: fileids = self._fileids
elif isinstance(fileids, compat.string_types): fileids = [fileids]
return concat([self.open(f).read() for f in fileids])
def words(self, fileids=None):
"""
:return: the given file(s) as a list of words
and punctuation symbols.
:rtype: list(str)
"""
return concat([ChunkedCorpusView(f, enc, 0, 0, 0, 0, *self._cv_args)
for (f, enc) in self.abspaths(fileids, True)])
def sents(self, fileids=None):
"""
:return: the given file(s) as a list of
sentences or utterances, each encoded as a list of word
strings.
:rtype: list(list(str))
"""
return concat([ChunkedCorpusView(f, enc, 0, 1, 0, 0, *self._cv_args)
for (f, enc) in self.abspaths(fileids, True)])
def paras(self, fileids=None):
"""
:return: the given file(s) as a list of
paragraphs, each encoded as a list of sentences, which are
in turn encoded as lists of word strings.
:rtype: list(list(list(str)))
"""
return concat([ChunkedCorpusView(f, enc, 0, 1, 1, 0, *self._cv_args)
for (f, enc) in self.abspaths(fileids, True)])
def tagged_words(self, fileids=None):
"""
:return: the given file(s) as a list of tagged
|
words and punctuation symbols, encoded as tuples
``(word,tag)``.
:rtype: list(tuple(str,str))
"""
return concat([ChunkedCo
|
rpusView(f, enc, 1, 0, 0, 0, *self._cv_args)
for (f, enc) in self.abspaths(fileids, True)])
def tagged_sents(self, fileids=None):
"""
:return: the given file(s) as a list of
sentences, each encoded as a list of ``(word,tag)`` tuples.
:rtype: list(list(tuple(str,str)))
"""
return concat([ChunkedCorpusView(f, enc, 1, 1, 0, 0, *self._cv_args)
for (f, enc) in self.abspaths(fileids, True)])
def tagged_paras(self, fileids=None):
"""
:return: the given file(s) as a list of
paragraphs, each encoded as a list of sentences, which are
in turn encoded as lists of ``(word,tag)`` tuples.
:rtype: list(list(list(tuple(str,str))))
"""
return concat([ChunkedCorpusView(f, enc, 1, 1, 1, 0, *self._cv_args)
for (f, enc) in self.abspaths(fileids, True)])
def chunked_words(self, fileids=None):
"""
:return: the given file(s) as a list of tagged
words and chunks. Words are encoded as ``(word, tag)``
tuples (if the corpus has tags) or word strings (if the
corpus has no tags). Chunks are encoded as depth-one
trees over ``(word,tag)`` tuples or word strings.
:rtype: list(tuple(str,str) and Tree)
"""
return concat([ChunkedCorpusView(f, enc, 1, 0, 0, 1, *self._cv_args)
for (f, enc) in self.abspaths(fileids, True)])
def chunked_sents(self, fileids=None):
"""
:return: the given file(s) as a list of
sentences, each encoded as a shallow Tree. The leaves
of these trees are encoded as ``(word, tag)`` tuples (if
the corpus has tags) or word strings (if the corpus has no
tags).
:rtype: list(Tree)
"""
return concat([ChunkedCorpusView(f, enc, 1, 1, 0, 1, *self._cv_args)
for (f, enc) in self.abspaths(fileids, True)])
def chunked_paras(self, fileids=None):
"""
:return: the given file(s) as a list of
paragraphs, each encoded as a list of sentences, which are
in turn encoded as a shallow Tree. The leaves of these
trees are encoded as ``(word, tag)`` tuples (if the corpus
has tags) or word strings (if the corpus has no tags).
:rtype: list(list(Tree))
"""
return concat([ChunkedCorpusView(f, enc, 1, 1, 1, 1, *self._cv_args)
for (f, enc) in self.abspaths(fileids, True)])
def _read_block(self, stream):
return [tagstr2tree(t) for t in read_blankline_block(stream)]
class ChunkedCorpusView(StreamBackedCorpusView):
def __init__(self, fileid, encoding, tagged, group_by_sent,
group_by_para, chunked, str2chunktree, sent_tokenizer,
para_block_reader):
StreamBackedCorpusView.__init__(self, fileid, encoding=encoding)
self._tagged = tagged
self._group_by_sent = group_by_sent
self._group_by_para = group_by_para
self._chunked = chunked
self._str2chunktree = str2chunktree
self._sent_tokenizer = sent_tokenizer
self._para_block_reader = para_block_reader
def read_block(self, stream):
block = []
for para_str in self._para_block_reader(stream):
para = []
for sent_str in self._sent_tokenizer.tokenize(para_str):
sent = self._str2chunktree(sent_str)
# If requested, throw away the tags.
if not self._tagged:
sent = self._untag(sent)
# If requested, throw away the chunks.
if not self._chunked:
sent = sent.leaves()
# Add the sentence to `para`.
if self._group_by_sent:
para.append(sent)
else:
para.extend(sent)
# Add the paragraph to `block`.
if self._group_by_para:
block.append(para)
else:
block.extend(para)
# Return the block
return block
def _untag(self, tree):
for i, child in enumerate(tree):
if isinstance(child, Tree):
self._untag(child)
elif isinstance(child, tuple):
tree[i] = child[0]
else:
raise ValueError('expected child to be Tree or tup
|
consbio/parserutils
|
parserutils/dates.py
|
Python
|
bsd-3-clause
| 989 | 0 |
import datetime
from dateutil import parser
from .numbers import is_number
from .strings import STRING_TYPES
DATE_TYPES = (datetime.date, datetime.datetime)
def parse_dates(d, default='today'):
""" Parses one or more dates from d """
|
if default == 'today':
default = datetime.datetime.today()
if d is None:
return default
elif isinstance(d, DATE_TYPES):
return d
elif is_number(d):
# Treat as milliseconds since 1970
d = d if isinstance(d, float) else float(d)
return datetime.datetime.utcfromtimestamp(d)
elif not isinstance(d, STRING_TYPES):
if hasattr(d, '__iter__'):
return [parse_dates(s, default) for s in d]
else:
retur
|
n default
elif len(d) == 0:
# Behaves like dateutil.parser < version 2.5
return default
else:
try:
return parser.parse(d)
except (AttributeError, ValueError):
return default
|
opendaylight/faas
|
demos/env_mininet/lsw1Demo.py
|
Python
|
epl-1.0
| 6,912 | 0.012297 |
#!/usr/bin/python
import argparse
import requests,json
from requests.auth import HTTPBasicAuth
from subprocess import call
import time
import sys
import os
from vas_config_sw1 import *
DEFAULT_PORT='8181'
USERNAME='admin'
PASSWORD='admin'
OPER_OVSDB_TOPO='/restconf/operational/network-topology:network-topology/topology/ovsdb:1'
def get(host, port, uri):
url = 'http://' + host + ":" + port + uri
#print url
r = requests.get(url, auth=HTTPBasicAuth(USERNAME, PASSWORD))
jsondata=json.loads(r.text)
return jsondata
def put(host, port, uri, data, debug=False):
'''Perform a PUT rest operation, using the URL and data provided'''
url='http://'+host+":"+port+uri
headers = {'Content-type': 'application/yang.data+json',
'Accept': 'application/yang.data+json'}
if debug == True:
print "PUT %s" % url
print json.dumps(data, indent=4, sort_keys=True)
r = requests.put(url, data=json.dumps(data), headers=headers, auth=HTTPBasicAuth(USERNAME, PASSWORD))
if debug == True:
|
print r.text
r.raise_for_status()
def post(host, port, uri, data, debug=False):
'''Perform a POST rest operation, using the URL and data provided'''
url='http://'+host+":"+port+uri
headers = {'Content-type': 'application/yang.data+json',
'Accept': 'application/yang.data+json'}
if debug == True:
print "POST %s" % url
print json.dumps(data, indent=4, sort_keys=True)
r = requests.post(url, data=json.dumps(d
|
ata), headers=headers, auth=HTTPBasicAuth(USERNAME, PASSWORD))
if debug == True:
print r.text
r.raise_for_status()
# Main definition - constants
# =======================
# MENUS FUNCTIONS
# =======================
# Main menu
# =======================
# MAIN PROGRAM
# =======================
# Main Program
NODE_ID_OVSDB = ''
SUBNET_2_LSW = {"10.0.35.1":"vswitch-1", "10.0.36.1":"vswitch-1"}
PORTIDX_OF_LSW = {"vswitch-1":1, "vswitch-2":1}
def rpc_create_logic_switch_uri():
return "/restconf/operations/fabric-service:create-logical-switch"
def rpc_create_logic_switch_data(name):
return {
"input" : {
"fabric-id": "fabric:1",
"name":name
}
}
def rpc_create_logic_router_uri():
return "/restconf/operations/fabric-service:create-logical-router"
def rpc_create_logic_router_data(name):
return {
"input" : {
"fabric-id": "fabric:1",
"name":name
}
}
def rpc_create_logic_port_uri():
return "/restconf/operations/fabric-service:create-logical-port"
def rpc_create_logic_port_data(deviceName, portName):
return {
"input" : {
"fabric-id": "fabric:1",
"name":portName,
"logical-device":deviceName
}
}
def rpc_register_endpoint_uri():
return "/restconf/operations/fabric-endpoint:register-endpoint"
BRIDGE_REF_P="/network-topology:network-topology/network-topology:topology[network-topology:topology-id='ovsdb:1']/network-topology:node[network-topology:node-id='%s']"
TP_REF_P="/network-topology:network-topology/network-topology:topology[network-topology:topology-id='ovsdb:1']/network-topology:node[network-topology:node-id='%s']/network-topology:termination-point[network-topology:tp-id='%s']"
def rpc_register_endpoint_data(host, nodeid):
mac = host["mac"]
ip = host["ip"].split("/")[0]
gw = host["gw"]
lsw = SUBNET_2_LSW[gw]
lport = lsw + "-p-" + str(PORTIDX_OF_LSW[lsw])
PORTIDX_OF_LSW[lsw] += 1
#physical location
bridge = host["switch"]
port = host["switch"] + "-eth" + str(host["ofport"])
noderef = BRIDGE_REF_P % (nodeid)
tpref = TP_REF_P % (nodeid, port)
return {
"input" : {
"fabric-id":"fabric:1",
"mac-address":mac,
"ip-address":ip,
"gateway":gw,
"logical-location" : {
"node-id": lsw,
"tp-id": lport
},
"location" : {
"node-ref": noderef,
"tp-ref": tpref,
"access-type":"vlan",
"access-segment":"111"
}
}
}
def rpc_create_gateway_uri():
return "/restconf/operations/fabric-service:create-gateway"
def rpc_create_gateway_data(ipaddr, network, switchName):
return {
"input" : {
"fabric-id": "fabric:1",
"ip-address":ipaddr,
"network":network,
"logical-router":"vrouter-1",
"logical-switch":switchName
}
}
def pause():
print "press Enter key to continue..."
raw_input()
if __name__ == "__main__":
# Launch main menu
# Some sensible defaults
controller = os.environ.get('ODL')
if controller == None:
sys.exit("No controller set.")
print "get ovsdb node-id"
ovsdb_topo = get(controller, DEFAULT_PORT,OPER_OVSDB_TOPO)["topology"]
for topo_item in ovsdb_topo:
if topo_item["node"] is not None:
for ovsdb_node in topo_item["node"]:
#if ovsdb_node.has_key("ovsdb:ovs-version"):
if ovsdb_node.has_key("ovsdb:bridge-name") and ovsdb_node["ovsdb:bridge-name"] == "sw1":
#uuid_ovsdb = ovsdb_node["node-id"][13:]
#NODE_ID_OVSDB = ovsdb_node["node-id"]
node_sw1 = ovsdb_node["node-id"]
print "sw1=", node_sw1
if ovsdb_node.has_key("ovsdb:bridge-name") and ovsdb_node["ovsdb:bridge-name"] == "sw2":
node_sw2 = ovsdb_node["node-id"]
print "sw2=", node_sw2
print "create_logic_switch ..."
pause()
post(controller, DEFAULT_PORT, rpc_create_logic_switch_uri(), rpc_create_logic_switch_data("vswitch-1"), True)
print "create_logic_port ..."
pause()
post(controller, DEFAULT_PORT, rpc_create_logic_port_uri(), rpc_create_logic_port_data("vswitch-1", "vswitch-1-p-1"), True)
post(controller, DEFAULT_PORT, rpc_create_logic_port_uri(), rpc_create_logic_port_data("vswitch-1", "vswitch-1-p-2"), True)
post(controller, DEFAULT_PORT, rpc_create_logic_port_uri(), rpc_create_logic_port_data("vswitch-1", "vswitch-1-p-3"), True)
post(controller, DEFAULT_PORT, rpc_create_logic_port_uri(), rpc_create_logic_port_data("vswitch-1", "vswitch-1-p-4"), True)
post(controller, DEFAULT_PORT, rpc_create_logic_port_uri(), rpc_create_logic_port_data("vswitch-1", "vswitch-1-p-5"), True)
print "registering endpoints ..."
pause()
for host in hosts:
if host["switch"] == "sw1":
post(controller, DEFAULT_PORT, rpc_register_endpoint_uri(), rpc_register_endpoint_data(host, node_sw1), True)
if host["switch"] == "sw2":
post(controller, DEFAULT_PORT, rpc_register_endpoint_uri(), rpc_register_endpoint_data(host, node_sw2), True)
|
mjwestcott/PyPokertools
|
tests/test_isomorph.py
|
Python
|
mit
| 533 | 0.003752 |
from examples.isomorph import (
get_all_canonicals,
get_canonical,
get_translation_dict,
)
from pokertools import cards_from_str as flop
def test_isomorph():
assert len(get_all_canonicals()) == 1755
assert get_canonical(flop('6s 8d
|
7c')) == flop('6c 7d 8h')
assert get_translation_dict(flop('6s 8d 7c')) == {'c': 'd', 'd': 'h', 'h': 's', 's': 'c'}
assert get_canonica
|
l(flop('Qs Qd 4d')) == flop('4c Qc Qd')
assert get_translation_dict(flop('Qs Qd 4d')) == {'c': 'h', 'd': 'c', 'h': 's', 's': 'd'}
|
jistr/rejviz
|
rejviz/tests/test_utils.py
|
Python
|
apache-2.0
| 1,998 | 0 |
# Licensed under the Apache License, Version 2.0 (the "License"); you
# may not use this file except in compliance with the License. You may
# obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless requ
|
ired
|
by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied. See the License for the specific language governing
# permissions and limitations under the License.
import rejviz.tests.utils as tutils
from rejviz import utils
class UtilsTest(tutils.TestCase):
def test_parse_keyvals(self):
expected = {'a': 'b', 'c': 'd'}
self.assertEqual(expected, utils.parse_keyvals("a=b,c=d"))
self.assertEqual(expected, utils.parse_keyvals("a:b/c:d", '/', ':'))
def test_extract_domain_or_image_args(self):
args1 = ['--something', '-d', 'domain', 'somethingelse']
args2 = ['-b', '--something', '-a', 'image', 'somethingelse']
args3 = ['-b', '-c', '--something']
self.assertEqual(['-d', 'domain'],
utils.extract_domain_or_image_args(args1))
self.assertEqual(['-a', 'image'],
utils.extract_domain_or_image_args(args2))
self.assertRaises(ValueError,
utils.extract_domain_or_image_args, args3)
def test_extract_image_args_from_disks(self):
args1 = ['--disk', '/path/to/image,opt1=val1,opt2=val2']
args2 = ['--disk', 'opt1=val1,path=/path/to/image,opt2=val2']
args3 = ['-b', '-c', '--something']
self.assertEqual(['-a', '/path/to/image'],
utils.extract_image_args_from_disks(args1))
self.assertEqual(['-a', '/path/to/image'],
utils.extract_image_args_from_disks(args2))
self.assertRaises(ValueError,
utils.extract_domain_or_image_args, args3)
|
jnewland/home-assistant
|
homeassistant/components/homematicip_cloud/weather.py
|
Python
|
apache-2.0
| 2,991 | 0 |
"""Support for HomematicIP Cloud weather devices."""
import logging
from homematicip.aio.device import (
AsyncWeatherSensor, AsyncWeatherSensorPlus, AsyncWeatherSensorPro)
from homematicip.aio.home import AsyncHome
from homeassistant.components.weather import WeatherEntity
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import TEMP_CELSIUS
from homeassistant.core import HomeAssistant
from . import DOMAIN as HMIPC_DOMAIN, HMIPC_HAPID, HomematicipGenericDevice
_LOGGER = logging.getLogger(__name__)
async def async_setup_platform(
hass, config, async_add_entities, discovery_info=None):
"""Set up the HomematicIP Cloud weather sensor."""
pass
async def async_setup_entry(hass: HomeAssistant, config_entry: ConfigEntry,
async_add_entities) -> None:
"""Set up the HomematicIP weather sensor from a config entry."""
home = hass.data[HMIPC_DOMAIN][config_entry.data[HMIPC_HAPID]].home
devices = []
for device in home.devices:
if isinstance(device, AsyncWeatherSensorPro):
devices.append(HomematicipWeatherSensorPro(home, device))
elif isinstance(device, (AsyncWeatherSensor, AsyncWeatherSensorPlus)):
devices.append(HomematicipWeatherSensor(home, device))
if devices:
async_add_entities(devices)
class HomematicipWeatherSensor(HomematicipGenericDevice, WeatherEntity):
"""representation of a Ho
|
mematicIP Cloud weather sensor plus & basic."""
def __init__(self, home: AsyncHome, device) -> None:
"""Initialize the weather sensor."""
super().__init__(home, device)
@property
def name(self) -> str:
"""Return the name of the sensor."""
return self._device.label
@property
def temperature(self) -> floa
|
t:
"""Return the platform temperature."""
return self._device.actualTemperature
@property
def temperature_unit(self) -> str:
"""Return the unit of measurement."""
return TEMP_CELSIUS
@property
def humidity(self) -> int:
"""Return the humidity."""
return self._device.humidity
@property
def wind_speed(self) -> float:
"""Return the wind speed."""
return self._device.windSpeed
@property
def attribution(self) -> str:
"""Return the attribution."""
return "Powered by Homematic IP"
@property
def condition(self) -> str:
"""Return the current condition."""
if hasattr(self._device, "raining") and self._device.raining:
return 'rainy'
if self._device.storm:
return 'windy'
if self._device.sunshine:
return 'sunny'
return ''
class HomematicipWeatherSensorPro(HomematicipWeatherSensor):
"""representation of a HomematicIP weather sensor pro."""
@property
def wind_bearing(self) -> float:
"""Return the wind bearing."""
return self._device.windDirection
|
SanketDG/coala-bears
|
tests/yml/RAMLLintBearTest.py
|
Python
|
agpl-3.0
| 602 | 0 |
from bears.yml.RAMLLintBear import RAMLLintBear
from tests.LocalBearTestHelper import verify_local_bear
good_file = """
#%RAML 0.8
title: World Music API
baseUri: http://example.api.com/{version}
version: v1
"""
bad_file = """#%RAML 0.8
title: Failing RAML
version: 1
baseUri: http://example.com
/resource
|
:
description: hello
post:
"""
RAMLLintBearTest = verify_local_bear(RAMLLintBear,
valid_files=(good_file,),
|
invalid_files=(bad_file,),
tempfile_kwargs={"suffix": ".raml"})
|
LuoZijun/uOffice
|
temp/ooxmlx/samples/01_basic/parse.py
|
Python
|
gpl-3.0
| 697 | 0.001435 |
import sys
import six
import logging
import ooxml
from ooxml import parse, serialize, importer
logging.basicConfig(filename='ooxml.log', level=logging.INFO)
if len(sys.argv) > 1:
file_name = sys.argv[1]
dfile = ooxml.read_from_file(file_name)
six.print_("\n-[HTML]-----------------------------\n")
six.print_(serialize.serialize(dfile.document))
six.print_("\n-[CSS STYLE]------------------------\n")
six.print_(serialize.serialize_styles(dfile.document))
|
six.print_("\n-[USED STYLES]----------------------\n")
six.print_(dfile.document.used_styles)
six.print_("\n-[USED FONT SIZES]------------------\n")
si
|
x.print_(dfile.document.used_font_size)
|
Sybrand/digital-panda
|
panda-tray/download.py
|
Python
|
mit
| 12,881 | 0.000854 |
from bucket.local import LocalProvider
import config
import statestore
import logging
import os
import threading
import traceback
import messages
from send2trash import send2trash
from worker import BaseWorker
class Download(BaseWorker):
def __init__(self, objectStore, outputQueue):
BaseWorker.__init__(self)
self.objectStore = objectStore
self.outputQueue = outputQueue
self.localStore = LocalProvider()
c = config.Config()
self.localSyncPath = c.get_home_folder()
self.tempDownloadFolder = c.get_temporary_folder()
self.state = statestore.StateStore(c.username)
self.lock = threading.Lock()
self.running = True
self.trashFolder = c.get_trash_folder()
def stop(self):
logging.info('Download::stop')
self.objectStore.stop()
self.running = False
def _get_working_message(self):
return messages.Status('Looking for files to download')
def perform(self):
# get the current directory
#logging.debug('Download::perform')
self.outputQueue.put(self._get_working_message())
files = self.objectStore.list_dir(None)
for f in files:
if not self.running:
break
#logging.debug('f.path = %r' % f.path)
if f.isFolder:
if f.name == self.trashFolder:
# we don't download the trash folder
continue
else:
skipChildren = self.download_folder(f)
# if we deleted a bunch of stuff - it might
# mean our files list is out of wack
# so lets rather just break out - and restart
# next time round
if skipChildren:
logging.info('break')
break
else:
self.download_file(f)
self.outputQueue.put(messages.Status('Local files up to date'))
def download_file(self, f):
localPath = self.get_local_path(f.path)
if not os.path.exists(localPath):
self._set_hadWorkToDo(True)
#logging.debug('does not exist: %s' % localPath)
if self.already_synced_file(f.path):
# if we've already downloaded this file,
# it means we have to delete it remotely!
logging.info('delete remote version of %s' % localPath)
self.delete_remote_file(f.path)
else:
# lets get the file
head, tail = os.path.split(localPath)
self.outputQueue.put(messages.Status('Downloading %s' % tail))
tmpFile = self.get_tmp_filename()
if os.path.exists(tmpFile):
# if a temporary file with the same name
# exists, delete it
os.remove(tmpFile)
self.objectStore.download_object(f.path, tmpFile)
os.rename(tmpFile, localPath)
localMD = self.localStore.get_last_modified_date(localPath)
self.state.markObjectAsSynced(f.path, f.hash, localMD)
self.outputQueue.put(self._get_working_message())
else:
# the file already exists - do we overwrite it?
syncInfo = self.state.getObjectSyncInfo(f.path)
if syncInfo:
localMD = self.localStore.get_last_modified_date(localPath)
if syncInfo.dateModified != localMD:
# the dates differ! we need to calculate the hash!
localFileInfo = self.localStore.get_file_info(localPath)
if localFileInfo.hash != f.hash:
# hmm - ok, if the online one, has the same hash
# as I synced, then it means the local file
# has changed!
if syncInfo.hash == f.hash:
# online and synced have the same version!
# that means the local one has changed
# so we're not downloading anything
# the upload process should handle this
pass
else:
logging.warn('TODO: the files differ - which '
'one do I use?')
else:
# all good - the files are the same
# we can update our local sync info
self.state.markObjectAsSynced(f.path,
localFileInfo.hash,
localMD)
else:
# dates are the same, so we can assume the hash
# hasn't changed
if syncInfo.hash != f.hash:
# if the sync info is the same as the local file
# then it must mean the remote file has changed!
get_file_info = self.localStore.get_file_info
localFileInfo = get_file_info(localPath)
if localFileInfo.hash == syncInfo.hash:
self.replace_file(f, localPath)
else:
logging.info('remote hash: %r' % f.hash)
logging.info('local hash: %r' % localFileInfo.hash)
logging.info('sync hash: %r' % syncInfo.hash)
logging.warn('sync hash differs from local hash!')
else:
# sync hash is same as remote hash, and the file date
# hasn't changed. we assume this to mean, there have
# been no changes
pass
else:
# TODO: we need to do something here!
# the file exists locally, and remotely - but we don't have any
# record of having downloaded it
localFileInfo = self.localStore.get_file_info(localPath)
if localFileInfo.hash == f.hash:
localMD = self.localStore.get_last_modified_date(localPath)
self.state.markObjectAsSynced(f.path,
localFile
|
Info.hash,
|
localMD)
else:
# we don't have any history of this file - and the hash
# from local differs from remote! WHAT DO WE DO!
logging.error('TODO: HASH differs! Which is which????: %r'
% f.path)
pass
def replace_file(self, f, localPath):
self._set_hadWorkToDo(True)
head, tail = os.path.split(localPath)
self.outputQueue.put(messages.Status('Downloading %s' % tail))
tmpFile = self.get_tmp_filename()
if os.path.exists(tmpFile):
# if a temporary file with the same name exists, remove it
os.remove(tmpFile)
self.objectStore.download_object(f.path, tmpFile)
send2trash(localPath)
os.rename(tmpFile, localPath)
localMD = self.localStore.get_last_modified_date(localPath)
self.state.markObjectAsSynced(f.path,
f.hash,
localMD)
self.outputQueue.put(self._get_working_message())
def get_tmp_filename(self):
return os.path.join(self.tempDownloadFolder, 'tmpfile')
def download_folder(self, folder):
if not self.running:
# return true, to indicate that children can be skipped
return True
# does the folder exist locally?
#logging.debug('download_folder(%s)' % folder.path)
localPath = self.get_local_path(folder.path)
downloadFolderContents = True
skipChildren = False
if not os.path.exists(localPath):
self._set_hadWor
|
nccgroup/lapith
|
controller/viewer_controller.py
|
Python
|
agpl-3.0
| 22,502 | 0.004 |
# Nessus results viewing tools
#
# Developed by Felix Ingram, f.ingram@gmail.com, @lllamaboy
# http://www.github.com/nccgroup/lapith
#
# Released under AGPL. See LICENSE for more information
import wx
import os
from model.Nessus import NessusFile, NessusTreeItem, MergedNessusReport, NessusReport, NessusItem
import difflib
from drop_target import MyFileDropTarget
from view import (
ViewerView,
SaveDialog,
ID_Load_Files,
ID_Merge_Files,
ID_Generate_CSV,
ID_Generate_VulnXML,
ID_Generate_RST,
ID_About,
)
from wx.lib.wordwrap import wordwrap
import csv
from xml.sax.saxutils import escape
from datetime import datetime
from jinja2 import Template
SEVERITY = {0:"Other", 1:"Low", 2:"Med", 3:"High", 4:"Critical"}
OUTPUT_TEMPLATE=Template("""\
{{item.name}}
{{hosts_count}} hosts with this issue
{% for host in hosts %}
{{host}}{% endfor %}
---------------------------------------------
{% for host in identical_hosts %}
{{host}}{% endfor %}
{{ initial_output }}
""")
RST_TEMPLATE=Template("""\
{%- for vuln in vulns %}{% if not vuln.name.startswith("PORT:") %}{{ vuln.name }}
{% for a in vuln.name %}={% endfor %}
.. affectedhosts::{% for host in merged_scans.hosts_with_pid(vuln.pid) %}{% for item in host.items_for_pid(vuln.pid) %}
{{ host.address }}, {{ item.info_dict.port }}/{{ item.info_dict.protocol }}
{%- endfor %}{%- endfor %}
:severity:`{{ vuln.item.info_dict["severity_text"] }}`
:cvss:`{{ vuln.item.info_dict["cvss_base_score"] }}`
:cvss:`{{ vuln.item.info_dict["cvss_vector"] }}`
Description
-----------
{{ "\n".join(vuln.iss
|
ue.initial_output.splitlines()[7:])|replace("Plugin Output:", "Plugin Output::\n") }}
{% endif %}
Recommendation
--------------
References
----------
{% if vuln.item.info_dict["cve"] %}
CVE:
{% for cve in vuln.item.info_dict["cve"] %}
{{ cve }}: `http://web.nvd.nist.gov/view/vuln/detail?vulnId={{ cve }}`
{%- endfor %}
{%- endif %}
{% if vuln.item.info_dict["bid"] %}
BID:
{% for bid in vuln.item.inf
|
o_dict["bid"] %}
{{ bid }}: `http://www.securityfocus.com/bid/{{ bid }}`
{%- endfor %}
{%- endif %}
{% if vuln.item.info_dict["xref"] %}
Other References:
{% for xref in vuln.item.info_dict["xref"] %}
{{ xref }}
{%- endfor %}
{%- endif %}
{% if vuln.item.info_dict["see_also"] %}
See also:
{% for xref in vuln.item.info_dict["see_also"] %}
{{ xref }}
{%- endfor %}
{%- endif %}
{% endfor %}
""")
VULNXML_TEMPLATE=Template("""<?xml version="1.0"?>
<Results Date="{{ timestamp|e }}" Tool="Lapith">
<Hosts>{% for host in hosts %}
<Host dnsname="{{ host.dns_name|e }}" ipv6="" ipv4="{{ host.address|e }}">
<Vulns>
{% for vuln in host.items %}<Vuln TestPhase="" id="{{ vuln.pid|e }}">
<Data Type="afh:TCP Ports" encoding="">{{ vuln.info_dict.port }}/{{ vuln.info_dict.protocol }}</Data>
</Vuln>
{% endfor %}</Vulns>
</Host>
{% endfor %}</Hosts>
<Vulns>
{% for vuln in vulns %}
<Vuln group="" id="{{ vuln.pid|e }}">
<Title>{{ vuln.name|e }}</Title>
<Description encoding="">
{{ "\n".join(vuln.issue.initial_output.splitlines()[7:])|replace("Plugin Output:", "Plugin Output::\n") | e}}
------------------------
{{ vuln.diffs|e }}
</Description>
<Recommendation encoding=""></Recommendation>
<References/>
<Category/>
<Patches/>
<CVSS>
<OverallScore>{% if vuln.item.info_dict["cvss_base_score"] %}{{ vuln.item.info_dict["cvss_base_score"]|e }}{% else %}{{ vuln.severity|e }}{% endif %}</OverallScore>
<Vector>{{ vuln.item.info_dict["cvss_vector"]|replace("CVSS2#", "")|e }}</Vector>
</CVSS>
<Severity>{{ vuln.severity|e }}</Severity>
</Vuln>
{% endfor %}
</Vulns>
<Groups/>
</Results>
""")
ID_Save_Results = wx.NewId()
class ViewerController:
def __init__(self):
# def initView(self):
self.view = ViewerView()
## Instance vars
self.files = []
self.tests = []
self.tree_hooks = {}
self._search_text = ""
## Flags
self._in_search = False
## Dialog paths
self._save_path = os.getcwd()
self._open_path = os.getcwd()
self.create_tree()
drop_target = MyFileDropTarget(self.view.tree,
{
"nessus": self.drop_action,
},
self.view.display.write
)
self.view.tree.SetDropTarget(drop_target)
self.bind_events()
self.view.Layout()
self.view.Show()
#self.view.search.SetFocus()
def drop_action(self, file_):
self.files.append(NessusFile(file_))
self.create_scan_trees()
def on_do_search(self, event):
text = self.view.search.GetValue()
self.search(text)
def search(self, text):
self._in_search = True
self._search_text = text
for host in self.files:
pass
#hook = self.hooks[host.name][FILES]
#if self.view.tree.IsExpanded(hook): ## Only need to do it for expanded
#files = host.get_full_files(search=text)
#self.view.tree.DeleteChildren(hook)
#for f in files:
#item = self.view.tree.AppendItem(hook, f.name, 0)
#self.view.tree.SetPyData(item, f)
#self.view.tree.SortChildren(hook)
self.view.search.SetFocus()
self._in_search = False
def add_output_page(self, title, text, font="Courier New"):
display = self.view.CreateTextCtrl(font=font)
display.SetValue(text)
self.delete_page_with_title(title)
self.view.notebook.AddPage(display, title)
return self.view.notebook.GetPageIndex(display)
def load_files(self, event):
wildcard = "Nessus files (*.nessus)|*.nessus|" \
"All files (*.*)|*.*"
dlg = wx.FileDialog(
self.view, message="Choose a file",
defaultDir=os.getcwd(),
defaultFile="",
wildcard=wildcard,
style=wx.OPEN | wx.MULTIPLE | wx.CHANGE_DIR
)
if dlg.ShowModal() == wx.ID_OK:
# This returns a Python list of files that were selected.
paths = dlg.GetPaths()
if paths:
for path in paths:
self.files.append(NessusFile(path))
self._open_path = paths[0].rsplit(os.sep, 1)[0]
dlg.Destroy()
self.create_scan_trees()
def delete_page_with_title(self, title):
notebook = self.view.notebook
page_count = notebook.GetPageCount()
for i in xrange(page_count):
if notebook.GetPageText(i) == title:
notebook.DeletePage(i)
def create_tree(self):
self.view.tree.DeleteAllItems()
self.view.tree.AddRoot("Scans")
self.create_scan_trees()
self.view.tree.Expand(self.view.tree.GetRootItem())
def create_scan_trees(self):
scans = self.view.tree.GetRootItem()
self.view.tree.DeleteChildren(scans)
for file_ in self.files:
self.create_scan_tree(file_, scans)
self.view.tree.Expand(scans)
def sorted_tree_items(self, report, items):
list_ = list(set([NessusTreeItem(report, i) for i in items]))
list_.sort()
return list_
def create_scan_tree(self, file_, hosts):
reports = file_.get_all_reports()
scans_hook = self.view.tree.GetRootItem()
file_hook = self.view.tree.AppendItem(scans_hook, file_.short_name, 0)
for report in reports:
scan = self.view.tree.AppendItem(file_hook, report.reportname, 0)
self.view.tree.SetPyData(scan, report)
info = self.view.tree.AppendItem(scan, "Info", 0)
self.view.tree.SetPyData(info
|
maralla/completor.vim
|
pythonx/completers/lsp/action.py
|
Python
|
mit
| 3,344 | 0.000299 |
# -*- coding: utf-8 -*-
import re
import logging
from completor.utils import check_subseq
from .utils import parse_uri
word_pat = re.compile(r'([\d\w]+)', re.U)
word_ends = re.compile(r'[\d\w]+$', re.U)
logger = logging.getLogger("completor")
# [
# [{
# u'range': {
# u'start': {u'line': 273, u'character': 5},
# u'end': {u'line': 273, u'character': 12}
# },
# u'uri': u'file:///home/linuxbrew/.linuxbrew/Cellar/go/1.12.4/libexec/src/fmt/print.go' # noqa
# }]
# ]
def gen_jump_list(ft, name, data):
res = []
if not data:
return res
items = data[0]
if items is None:
return res
for item in items:
uri = parse_uri(item['uri'])
if ft == 'go':
uri = uri.replace('%21', '!')
start = item['range']['start']
res.append({
'filename': uri,
'lnum': start['line'] + 1,
'col': start['character'] + 1,
'name': name,
})
return res
# [
# [
# {
# u'newText': u'',
# u'range': {
# u'start': {u'line': 8, u'character': 0},
# u'end': {u'line': 9, u'character': 0}
# }
# }, {
# u'newText': u'',
# u'rang
|
e': {
# u'start': {u'line': 9, u'character': 0},
# u'end': {u'line': 10, u'character': 0}
# }
# }, {
# u'newText': u'\tfmt.Println()\n',
# u'range': {
# u'start': {u'line': 10, u'character': 0},
# u'end': {u'line': 10, u'character':
|
0}
# }
# }, {
# u'newText': u'}\n',
# u'range': {
# u'start': {u'line': 10, u'character': 0},
# u'end': {u'line': 10, u'character': 0}
# }
# }
# ]
# ]
def format_text(data):
if not data:
return
for item in data[0]:
pass
def get_completion_word(item, insert_text):
if insert_text != b'label':
try:
return item['textEdit']['newText'], \
item['textEdit']['range']['start']['character']
except KeyError:
pass
label = item['label'].strip()
match = word_pat.match(label)
return match.groups()[0] if match else '', -1
hiddenLines = ["on pkg.go.dev"]
escapes = re.compile(r'''\\([\\\x60*{}[\]()#+\-.!_>~|"$%&'\/:;<=?@^])''',
re.UNICODE)
escape_types = ['go', 'json']
def _shouldHidden(line):
for item in hiddenLines:
if item in line:
return True
return False
def gen_hover_doc(ft, value):
if ft not in escape_types:
return value
lines = []
for l in value.split("\n"):
if _shouldHidden(l):
continue
lines.append(escapes.sub(r"\1", l).replace(' ', ' '))
return "\n".join(lines)
def filter_items(items, input_data):
target = ''
match = word_ends.search(input_data)
if match:
target = match.group()
if not target:
return items
filtered = []
for item in items:
score = check_subseq(target, item[1])
if score is None:
continue
filtered.append((item, score))
filtered.sort(key=lambda x: x[1])
return [e for e, _ in filtered]
|
vasily-v-ryabov/ui-automation-course
|
1_Lyalyushkin/objc_constants.py
|
Python
|
bsd-3-clause
| 588 | 0 |
from ctypes import POINTER
from ctypes import c_long
from ctypes import c_uint32
from ctypes import c_void_p
CFIndex = c_long
CFStringEncoding = c_uint32
CFString = c_void_p
CFArray = c_void_p
CFDictionary =
|
c_void_p
CFError = c_void_p
CFType = c_void_p
CFAllocatorRef = c_void_p
CFStringRef = POINTER(CFString)
CFArrayRef = POINTER(CFArray)
CFDictionaryRef = POINTER(CFDictionary)
CFErrorRef = POINTER(CFError)
CFTypeRef = POINTER(CFType)
kCFStringEncodingUTF8 = CFStringEncoding(0x08000100)
kCGWindowListOptionAll = 0
|
kCGWindowListOptionOnScreenOnly = (1 << 0)
kCGNullWindowID = 0
|
AzamYahya/shogun
|
examples/undocumented/python_modular/multiclass_chaidtree_modular.py
|
Python
|
gpl-3.0
| 1,100 | 0.033636 |
#!/usr/bin/env python
from numpy import array, dtype, int32
traindat = '../data/fm_train_real.dat'
testdat = '../data/fm_test_real.dat'
label_traindat = '../data/label_train_multiclass.dat'
# set both input attributes as continuous i.e. 2
feattypes = array([2, 2],dtype=int32)
parameter_list = [[traindat,testdat,label_traindat,feattypes]]
def multiclass_chaidtree_modular(train=traindat,test=testdat,labels=label_traindat,ft=feattypes):
try:
from modshogun import RealFeatures, MulticlassLabels, CSVFile, CHAIDTree
except
|
ImportError:
print("Could not import Shogun modules")
return
# wrap features and labels into Shogun objects
feats_train=RealFeatures(CSVFile(train))
feats_test=RealFeatures(CSVFile(test))
train_labels=MulticlassLabels(CSVFile(labels))
# CHAID Tree formation with nom
|
inal dependent variable
c=CHAIDTree(0,feattypes,10)
c.set_labels(train_labels)
c.train(feats_train)
# Classify test data
output=c.apply_multiclass(feats_test).get_labels()
return c,output
if __name__=='__main__':
print('CHAIDTree')
multiclass_chaidtree_modular(*parameter_list[0])
|
knuu/competitive-programming
|
atcoder/agc/agc026_b.py
|
Python
|
mit
| 517 | 0 |
for _ in range(in
|
t(input())):
A, B, C, D = map(int, input().split())
if A < B or C + D < B:
print("No")
continue
elif C >= B - 1:
print("Yes")
continue
ret = []
s_set = set()
now = A
while True:
now %= B
if now in s_set:
print("Yes", ret)
break
else:
s_set.add(now)
if now <= C:
now += D
ret.append(now)
else:
print("No", ret)
bre
|
ak
|
edibledinos/pwnypack
|
tests/test_packing.py
|
Python
|
mit
| 4,642 | 0.004093 |
import pytest
import pwny
target_little_endian = pwny.Target(arch=pwny.Target.Arch.unknown, endian=pwny.Target.Endian.little)
target_big_endian = pwny.Target(arch=pwny.Target.Arch.unknown, endian=pwny.Target.Endian.big)
def test_pack():
assert pwny.pack('I', 0x41424344) == b'DCBA'
def test_pack_format_with_endian():
assert pwny.pack('>I', 0x41424344) == b'ABCD'
def test_pack_explicit_endian():
assert pwny.pack('I', 0x41424344, endian=pwny.Target.Endian.big) == b'ABCD'
def test_pack_explicit_target():
assert pwny.pack('I', 0x41424344, target=target_big_endian) == b'ABCD'
@pytest.mark.xfail(raises=NotImplementedError)
def test_pack_invalid_endian():
pwny.pack('I', 1, endian='invalid')
def test_unpack():
assert pwny.unpack('I', b'DCBA') == (0x41424344,)
def test_unpack_format_with_endian():
assert pwny.unpack('>I', b'ABCD') == (0x41424344,)
def test_unpack_explicit_endian():
assert pwny.unpack('I', b'ABCD', endian=pwny.Target.Endian.big) == (0x41424344,)
def test_unpack_explicit_target():
assert pwny.unpack('I', b'ABCD', target=target_big_endian) == (0x41424344,)
@pytest.mark.xfail(raises=NotImplementedError)
def test_unpack_invalid_endian():
pwny.unpack('I', 'AAAA', endian='invalid')
def test_pack_size():
# This tests both pack_size in general as well as not padding the byte.
assert pwny.pack_size('bq') == 9
short_signed_data = [
[8, -0x7f, b'\x81'],
[16, -0x7fff, b'\x80\x01'],
[32, -0x7fffffff, b'\x80\x00\x00\x01'],
[64, -0x7fffffffffffffff, b'\x80\x00\x00\x00\x00\x00\x00\x01'],
]
short_unsigned_data = [
[8, 0x61, b'a'],
[16, 0x6162, b'ab'],
[32, 0x61626364, b'abcd'],
[64, 0x6162636465666768, b'abcdefgh'],
]
def test_short_form_pack():
for width, num, bytestr in short_signed_data:
f = 'p%d' % width
yield check_short_form_pack, f, num, bytestr[::-1]
yield check_short_form_pack_endian, f, num, bytestr[::-1], pwny.Target.Endian.little
yield check_short_form_pack_endian, f, num, bytestr, pwny.Target.Endian.big
for width, num, bytestr in short_unsigned_data:
f = 'P%d' % width
yield check_short_form_pack, f, num, bytestr[::-1]
yield check_short_form_pack_endian, f, num, bytestr[::-1], pwny.Target.Endian.little
yield check_short_form_pack_endian, f, num, bytestr, pwny.Target.Endian.big
def test_short_form_unpack():
for width, num, bytestr in short_signed_data:
f = 'u%d' % width
yield check_short_form_unpack, f, num, bytestr[::-1]
yield check_short_form_unpack_endian, f, num, bytestr[::-1], pwny.Target.Endian.little
yield check_short_form_unpack_endian, f, num, bytestr, pwny.Target.Endian.big
for width, num, bytestr in short_unsigned_data:
f = 'U%d' % width
yield check_short_form_unpack, f, num, bytestr[::-1]
yield check_short_form_unpack_endian, f, num, bytestr[::-1], pwny.Target.Endian.little
yield check_short_form_unpack_endian, f, num, bytestr, pwny.Target.Endian.big
def test_pointer_pack():
yield check_short_form_pack, 'p', -66052, b'\xfc\xfd\xfe\xff'
yield check_short_form_pack_endian, 'p', -66052, b'\xfc\xfd\xfe\xff', pwny.Target.Endian.little
yield check_short_form_pack_endian, 'p', -66052, b'\xff\xfe\xfd\xfc', pwny.Target.Endian.big
yield check_short_form_pack, 'P', 4294901244, b'\xfc\xfd\xfe\xff'
yield check_short_form_pack_endian, 'P', 4294901244, b'\xfc\xfd\xfe\xff', pwny.Target.Endian.little
yield check_short_form_pack_endian, 'P', 4294901244, b'\xff\xfe\xfd\xfc', pwny.Target.Endian.big
def test_pointer_unpack():
yield check_short_form_unpack, 'u', -66052, b'\xfc\xfd\xfe\xff'
yield check_short_form_unpack_endian, 'u', -66052, b'\xfc\xfd\xfe\xff', pwny.Target.Endian.little
yield check_short_form_unpack_endian, 'u', -66052, b'\xff\xfe\xfd\xfc', pwny.Target.Endian.big
yield check_short_form_unpack, 'U', 4294901244, b'\xfc\xfd\xfe\xff'
yield check_short_form_unpack_endian, 'U', 429
|
4901244, b'\xfc\xfd\xfe\xff', pwny.Target.Endian.little
yield
|
check_short_form_unpack_endian, 'U', 4294901244, b'\xff\xfe\xfd\xfc', pwny.Target.Endian.big
def check_short_form_pack(f, num, bytestr):
assert getattr(pwny, f)(num) == bytestr
def check_short_form_pack_endian(f, num, bytestr, endian):
assert getattr(pwny, f)(num, endian=endian) == bytestr
def check_short_form_unpack(f, num, bytestr):
assert getattr(pwny, f)(bytestr) == num
def check_short_form_unpack_endian(f, num, bytestr, endian):
assert getattr(pwny, f)(bytestr, endian=endian) == num
|
jgmize/kuma
|
kuma/wiki/urls.py
|
Python
|
mpl-2.0
| 5,894 | 0.00017 |
from django.conf.urls import include, url
from django.views.generic import TemplateView
from kuma.attachments.feeds import AttachmentsFeed
from kuma.attachments.views import edit_attachment
from . import feeds, views
from .constants import DOCUMENT_PATH_RE
# These patterns inherit (?P<document_path>[^\$]+).
document_patterns = [
url(r'^$',
views.document.document,
name='wiki.document'),
url(r'^\$revision/(?P<revision_id>\d+)$',
views.revision.revision,
name='wiki.revision'),
url(r'^\$history$',
views.list.revisions,
name='wiki.document_revisions'),
url(r'^\$edit$',
views.edit.edit,
name='wiki.edit'),
url(r'^\$files$',
edit_attachment,
name='attachments.edit_attachment'),
url(r'^\$edit/(?P<revision_id>\d+)$',
views.edit.edit,
name='wiki.new_revision_based_on'),
url(r'^\$compare$',
views.revision.compare,
name='wiki.compare_revisions'),
url(r'^\$children$',
views.document.children,
name='wiki.children'),
url(r'^\$translate$',
views.translate.translate,
name='wiki.translate'),
url(r'^\$locales$',
views.translate.select_locale,
name='wiki.select_locale'),
url(r'^\$json$',
views.document.as_json,
name='wiki.json_slug'),
url(r'^\$styles$',
views.document.styles,
name='wiki.styles'),
url(r'^\$toc$',
views.document.toc,
name='wiki.toc'),
url(r'^\$move$',
views.document.move,
name='wiki.move'),
url(r'^\$quick-review$',
views.revision.quick_review,
name='wiki.quick_review'),
url(r'^\$samples/(?P<sample_name>.+)/files/(?P<attachment_id>\d+)/(?P<filename>.+)$',
views.code.raw_code_sample_file,
name='wiki.raw_code_sample_file'),
url(r'^\$samples/(?P<sample_name>.+)$',
views.code.code_sample,
name='wiki.code_sample'),
url(r'^\$revert/(?P<revision_id>\d+)$',
views.delete.revert_document,
name='wiki.revert_document'),
url(r'^\$repair_breadcrumbs$',
views.document.repair_breadcrumbs,
name='wiki.repair_breadcrumbs'),
url(r'^\$delete$',
views.delete.delete_document,
name='wiki.delete_document'),
url(r'^\$restore$',
views.delete.restore_document,
name='wiki.restore_document'),
url(r'^\$purge$',
views.delete.purge_document,
name='wiki.purge_document'),
# Un/Subscribe to document edit notifications.
url(r'^\$subscribe$',
views.document.subscribe,
name='wiki.subscribe'),
# Un/Subscribe to document tree edit notifications.
url(r'^\$subscribe_to_tree$',
views.document.subscribe_to_tree,
name='wiki.subscribe_to_tree'),
]
urlpatterns = [
url(r'^/ckeditor_config.js$',
views.misc.ckeditor_config,
name='wiki.ckeditor_config'),
# internals
url(r'^.json$',
views.document.as_json,
name='wiki.json'),
url(r'^/preview-wiki-content$',
views.revision.preview,
name='wiki.preview'),
url(r'^/move-requested$',
TemplateView.as_view(template_name='wiki/move_requested.html'),
name='wiki.move_requested'),
url(r'^/get-documents$',
views.misc.autosuggest_documents,
name='wiki.autosuggest_d
|
ocuments'),
url(r'^/load/$',
views.misc.load_documents,
name='wiki
|
.load_documents'),
# Special pages
url(r'^/templates$',
views.list.templates,
name='wiki.list_templates'),
url(r'^/tags$',
views.list.tags,
name='wiki.list_tags'),
url(r'^/tag/(?P<tag>.+)$',
views.list.documents,
name='wiki.tag'),
url(r'^/new$',
views.create.create,
name='wiki.create'),
url(r'^/all$',
views.list.documents,
name='wiki.all_documents'),
url(r'^/with-errors$',
views.list.with_errors,
name='wiki.errors'),
url(r'^/without-parent$',
views.list.without_parent,
name='wiki.without_parent'),
url(r'^/top-level$',
views.list.top_level,
name='wiki.top_level'),
url(r'^/needs-review/(?P<tag>[^/]+)$',
views.list.needs_review,
name='wiki.list_review_tag'),
url(r'^/needs-review/?',
views.list.needs_review,
name='wiki.list_review'),
url(r'^/localization-tag/(?P<tag>[^/]+)$',
views.list.with_localization_tag,
name='wiki.list_with_localization_tag'),
url(r'^/localization-tag/?',
views.list.with_localization_tag,
name='wiki.list_with_localization_tags'),
# Akismet Revision
url(r'^/submit_akismet_spam$',
views.akismet_revision.submit_akismet_spam,
name='wiki.submit_akismet_spam'),
# Feeds
url(r'^/feeds/(?P<format>[^/]+)/all/?',
feeds.DocumentsRecentFeed(),
name="wiki.feeds.recent_documents"),
url(r'^/feeds/(?P<format>[^/]+)/l10n-updates/?',
feeds.DocumentsUpdatedTranslationParentFeed(),
name="wiki.feeds.l10n_updates"),
url(r'^/feeds/(?P<format>[^/]+)/tag/(?P<tag>[^/]+)',
feeds.DocumentsRecentFeed(),
name="wiki.feeds.recent_documents"),
url(r'^/feeds/(?P<format>[^/]+)/needs-review/(?P<tag>[^/]+)',
feeds.DocumentsReviewFeed(),
name="wiki.feeds.list_review_tag"),
url(r'^/feeds/(?P<format>[^/]+)/needs-review/?',
feeds.DocumentsReviewFeed(),
name="wiki.feeds.list_review"),
url(r'^/feeds/(?P<format>[^/]+)/revisions/?',
feeds.RevisionsFeed(),
name="wiki.feeds.recent_revisions"),
url(r'^/feeds/(?P<format>[^/]+)/files/?',
AttachmentsFeed(),
name="attachments.feeds.recent_files"),
url(r'^/(?P<document_path>%s)' % DOCUMENT_PATH_RE.pattern,
include(document_patterns)),
]
|
knutfrode/opendrift
|
examples/example_grid_time.py
|
Python
|
gpl-2.0
| 1,294 | 0.001546 |
#!/usr/bin/env python
from datetime import timedelta
import numpy as np
from opendrift.readers import reader_basemap_landmask
from opendrift.readers import reader_netCDF_CF_generic
from opendrift.models.oceandrift import OceanDrift
o = OceanDrift(loglevel=0) # Set loglevel to 0 for debug information
reader_norkyst = reader_netCDF_CF_generic.Reader(o.test_data_folder() +
'16Nov2015_NorKyst_z_surface/norkyst800_subset_16Nov2015.nc')
# Landmask (Basemap)
reader_basemap = reader_basemap_landmask.Reader(
llcrnrlon=4.0, llcrnrlat=59.9,
|
urcrnrlon=5.5, urcrnrlat=61.2,
resolution='h', projection='merc')
o.add_reader([reader_basemap, reader_norkyst])
# Seeding some particles
lons = np.linspace(4.4, 4.6, 10)
lats = np.linspace(60.0, 60.1, 10)
lons, lats
|
= np.meshgrid(lons, lats)
lons = lons.ravel()
lats = lats.ravel()
# Seed oil elements on a grid at regular time interval
start_time = reader_norkyst.start_time
time_step = timedelta(hours=6)
num_steps = 10
for i in range(num_steps+1):
o.seed_elements(lons, lats, radius=0, number=100,
time=start_time + i*time_step)
# Running model (until end of driver data)
o.run(steps=66*4, time_step=900)
# Print and plot results
print(o)
o.animation()
|
kmee/bank-statement-reconcile
|
__unported__/account_invoice_reference/__openerp__.py
|
Python
|
agpl-3.0
| 6,213 | 0.000161 |
# -*- coding: utf-8 -*-
##############################################################################
#
# Author: Guewen Baconnier
# Copyright 2014 Camptocamp SA
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{'name': 'Invoices Reference',
'version': '1.0',
'author': 'Camptocamp',
'maintainer': 'Camptocamp',
'license': 'AGPL-3',
'category': 'category',
'complexity': "easy",
'depends': ['account',
],
'description': """
Invoices Reference
==================
Aims to simplify the "references" fields on the invoices.
We observed difficulties for the users to f
|
ile the references (name,
origin, free reference) and above all, to understand which field will be
copied in the reference field of the move and move lines.
The approach here is to state simple rules with one concern: consistency.
The reference of the move lines must be the number of the document at their very
origin (number of a sales order, of an external document like a supplier
invoice, ...). The goal is for the accountant to be able to trace to the
sou
|
rce document from a ledger).
The description of a line should always be... well, a description. Not a number
or a cryptic reference.
It particularly fits with other modules of the bank-statement-reconcile series
as account_advanced_reconcile_transaction_ref.
Fields
------
Enumerating the information we need in an invoice, we find that the
mandatory fields are:
* Invoice Number
* Description
* Internal Reference ("our reference")
* External Reference ("customer or supplier reference")
* Optionally, a technical transaction reference (credit card payment gateways,
SEPA, ...)
Now, on the move lines:
* Name
* Reference
* Optionally, a technical transaction reference (added by the module
`base_transaction_id`)
Let's see how the information will be organized with this module.
Customers Invoices / Refunds
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+-----------------+-----------------+------------------------------+
| Information | Invoice field | Instead of (in base modules) |
+=================+=================+==============================+
| Invoice number | Invoice number | Invoice number |
+-----------------+-----------------+------------------------------+
| Description | Name | -- |
+-----------------+-----------------+------------------------------+
| Internal Ref | Origin | Origin |
+-----------------+-----------------+------------------------------+
| External Ref | Reference | Name |
+-----------------+-----------------+------------------------------+
Information propagated to the move lines:
+-----------------+------------------------------------+
| Move line field | Invoice field |
+=================+====================================+
| Description | Name |
+-----------------+------------------------------------+
| Reference | Origin, or Invoice number if empty |
+-----------------+------------------------------------+
Supplier Invoices / Refunds
~~~~~~~~~~~~~~~~~~~~~~~~~~~
Supplier invoices have an additional field `supplier_invoice_number`
that we consider as redundant with the reference field. This field is kept
and even set as mandatory, while the reference field is hidden.
+-----------------+-----------------+------------------------------+
| Information | Invoice field | Instead of (in base modules) |
+=================+=================+==============================+
| Invoice number | Invoice number | Invoice number |
+-----------------+-----------------+------------------------------+
| Description | Name | -- |
+-----------------+-----------------+------------------------------+
| Internal Ref | Origin | Origin |
+-----------------+-----------------+------------------------------+
| External Ref | Supplier number | Supplier number |
+-----------------+-----------------+------------------------------+
The reference field is hidden when the reference type is "free reference",
because it is already filed in the Supplier invoice number.
Information propagated to the move lines:
+-----------------+---------------------------------------------+
| Move line field | Invoice field |
+=================+=============================================+
| Description | Name |
+-----------------+---------------------------------------------+
| Reference | Supplier number, or Invoice number if empty |
+-----------------+---------------------------------------------+
""",
'website': 'http://www.camptocamp.com',
'data': ['account_invoice_view.xml',
],
'test': ['test/out_invoice_with_origin.yml',
'test/out_invoice_without_origin.yml',
'test/in_invoice_with_supplier_number.yml',
'test/in_invoice_without_supplier_number.yml',
'test/out_refund_with_origin.yml',
'test/out_refund_without_origin.yml',
'test/in_refund_with_supplier_number.yml',
'test/in_refund_without_supplier_number.yml',
],
'installable': False,
'auto_install': False,
}
|
keishi/chromium
|
ui/gl/generate_bindings.py
|
Python
|
bsd-3-clause
| 57,794 | 0.014759 |
#!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""code generator for GL/GLES extension wrangler."""
import os
import collections
import re
import sys
GL_FUNCTIONS = [
{ 'return_type': 'void',
'names': ['glActiveTexture'],
'arguments': 'GLenum texture', },
{ 'return_type': 'void',
'names': ['glAttachShader'],
'arguments': 'GLuint program, GLuint shader', },
{ 'return_type': 'void',
'names': ['glBeginQuery'],
'arguments': 'GLenum target, GLuint id', },
{ 'return_type': 'void',
'names': ['glBeginQueryARB', 'glBeginQueryEXT'],
'arguments': 'GLenum target, GLuint id', },
{ 'return_type': 'void',
'names': ['glBindAttribLocation'],
'arguments': 'GLuint program, GLuint index, const char* name', },
{ 'return_type': 'void',
'names': ['glBindBuffer'],
'arguments': 'GLenum target, GLuint buffer', },
{ 'return_type': 'void',
'names': ['glBindFragDataLocation'],
'arguments': 'GLuint program, GLuint colorNumber, const char* name', },
{ 'ret
|
urn_type': 'void',
'names': ['glBindFragDataLocationIndexed'],
'arguments':
'GLuint program, GLuint colorNumber, GLuint index, const char* name', },
{ 'return_type': 'void',
'names': ['glBindFramebufferEXT', 'glBindFramebuffer'],
'arguments': 'GLenum target, GLuint framebuffer', },
{ 'return_type': 'void',
'names': ['glBindRenderbufferEXT', 'glBindRenderbuffer'],
'arguments': 'GLenum target, GLuint
|
renderbuffer', },
{ 'return_type': 'void',
'names': ['glBindTexture'],
'arguments': 'GLenum target, GLuint texture', },
{ 'return_type': 'void',
'names': ['glBlendColor'],
'arguments': 'GLclampf red, GLclampf green, GLclampf blue, GLclampf alpha', },
{ 'return_type': 'void',
'names': ['glBlendEquation'],
'arguments': ' GLenum mode ', },
{ 'return_type': 'void',
'names': ['glBlendEquationSeparate'],
'arguments': 'GLenum modeRGB, GLenum modeAlpha', },
{ 'return_type': 'void',
'names': ['glBlendFunc'],
'arguments': 'GLenum sfactor, GLenum dfactor', },
{ 'return_type': 'void',
'names': ['glBlendFuncSeparate'],
'arguments':
'GLenum srcRGB, GLenum dstRGB, GLenum srcAlpha, GLenum dstAlpha', },
{ 'return_type': 'void',
'names': ['glBlitFramebufferEXT', 'glBlitFramebuffer'],
'arguments': 'GLint srcX0, GLint srcY0, GLint srcX1, GLint srcY1, '
'GLint dstX0, GLint dstY0, GLint dstX1, GLint dstY1, '
'GLbitfield mask, GLenum filter', },
{ 'return_type': 'void',
'names': ['glBlitFramebufferANGLE', 'glBlitFramebuffer'],
'arguments': 'GLint srcX0, GLint srcY0, GLint srcX1, GLint srcY1, '
'GLint dstX0, GLint dstY0, GLint dstX1, GLint dstY1, '
'GLbitfield mask, GLenum filter', },
{ 'return_type': 'void',
'names': ['glBufferData'],
'arguments': 'GLenum target, GLsizei size, const void* data, GLenum usage', },
{ 'return_type': 'void',
'names': ['glBufferSubData'],
'arguments': 'GLenum target, GLint offset, GLsizei size, const void* data', },
{ 'return_type': 'GLenum',
'names': ['glCheckFramebufferStatusEXT',
'glCheckFramebufferStatus'],
'arguments': 'GLenum target',
'logging_code': """
GL_SERVICE_LOG("GL_RESULT: " << GLES2Util::GetStringEnum(result));
""", },
{ 'return_type': 'void',
'names': ['glClear'],
'arguments': 'GLbitfield mask', },
{ 'return_type': 'void',
'names': ['glClearColor'],
'arguments': 'GLclampf red, GLclampf green, GLclampf blue, GLclampf alpha', },
{ 'return_type': 'void',
'names': ['glClearDepth'],
'arguments': 'GLclampd depth', },
{ 'return_type': 'void',
'names': ['glClearDepthf'],
'arguments': 'GLclampf depth', },
{ 'return_type': 'void',
'names': ['glClearStencil'],
'arguments': 'GLint s', },
{ 'return_type': 'void',
'names': ['glColorMask'],
'arguments':
'GLboolean red, GLboolean green, GLboolean blue, GLboolean alpha', },
{ 'return_type': 'void',
'names': ['glCompileShader'],
'arguments': 'GLuint shader', },
{ 'return_type': 'void',
'names': ['glCompressedTexImage2D'],
'arguments':
'GLenum target, GLint level, GLenum internalformat, GLsizei width, '
'GLsizei height, GLint border, GLsizei imageSize, const void* data', },
{ 'return_type': 'void',
'names': ['glCompressedTexSubImage2D'],
'arguments':
'GLenum target, GLint level, GLint xoffset, GLint yoffset, '
'GLsizei width, GLsizei height, GLenum format, GLsizei imageSize, '
'const void* data', },
{ 'return_type': 'void',
'names': ['glCopyTexImage2D'],
'arguments':
'GLenum target, GLint level, GLenum internalformat, GLint x, GLint y, '
'GLsizei width, GLsizei height, GLint border', },
{ 'return_type': 'void',
'names': ['glCopyTexSubImage2D'],
'arguments':
'GLenum target, GLint level, GLint xoffset, '
'GLint yoffset, GLint x, GLint y, GLsizei width, GLsizei height', },
{ 'return_type': 'GLuint',
'names': ['glCreateProgram'],
'arguments': 'void', },
{ 'return_type': 'GLuint',
'names': ['glCreateShader'],
'arguments': 'GLenum type', },
{ 'return_type': 'void',
'names': ['glCullFace'],
'arguments': 'GLenum mode', },
{ 'return_type': 'void',
'names': ['glDeleteBuffersARB', 'glDeleteBuffers'],
'arguments': 'GLsizei n, const GLuint* buffers', },
{ 'return_type': 'void',
'names': ['glDeleteFramebuffersEXT', 'glDeleteFramebuffers'],
'arguments': 'GLsizei n, const GLuint* framebuffers', },
{ 'return_type': 'void',
'names': ['glDeleteProgram'],
'arguments': 'GLuint program', },
{ 'return_type': 'void',
'names': ['glDeleteQueries'],
'arguments': 'GLsizei n, const GLuint* ids', },
{ 'return_type': 'void',
'names': ['glDeleteQueriesARB', 'glDeleteQueriesEXT'],
'arguments': 'GLsizei n, const GLuint* ids', },
{ 'return_type': 'void',
'names': ['glDeleteRenderbuffersEXT', 'glDeleteRenderbuffers'],
'arguments': 'GLsizei n, const GLuint* renderbuffers', },
{ 'return_type': 'void',
'names': ['glDeleteShader'],
'arguments': 'GLuint shader', },
{ 'return_type': 'void',
'names': ['glDeleteTextures'],
'arguments': 'GLsizei n, const GLuint* textures', },
{ 'return_type': 'void',
'names': ['glDepthFunc'],
'arguments': 'GLenum func', },
{ 'return_type': 'void',
'names': ['glDepthMask'],
'arguments': 'GLboolean flag', },
{ 'return_type': 'void',
'names': ['glDepthRange'],
'arguments': 'GLclampd zNear, GLclampd zFar', },
{ 'return_type': 'void',
'names': ['glDepthRangef'],
'arguments': 'GLclampf zNear, GLclampf zFar', },
{ 'return_type': 'void',
'names': ['glDetachShader'],
'arguments': 'GLuint program, GLuint shader', },
{ 'return_type': 'void',
'names': ['glDisable'],
'arguments': 'GLenum cap', },
{ 'return_type': 'void',
'names': ['glDisableVertexAttribArray'],
'arguments': 'GLuint index', },
{ 'return_type': 'void',
'names': ['glDrawArrays'],
'arguments': 'GLenum mode, GLint first, GLsizei count', },
{ 'return_type': 'void',
'names': ['glDrawBuffer'],
'arguments': 'GLenum mode', },
{ 'return_type': 'void',
'names': ['glDrawBuffersARB'],
'arguments': 'GLsizei n, const GLenum* bufs', },
{ 'return_type': 'void',
'names': ['glDrawElements'],
'arguments':
'GLenum mode, GLsizei count, GLenum type, const void* indices', },
{ 'return_type': 'void',
'names': ['glEGLImageTargetTexture2DOES'],
'arguments': 'GLenum target, GLeglImageOES image', },
{ 'return_type': 'void',
'names': ['glEGLImageTargetRenderbufferStorageOES'],
'arguments': 'GLenum target, GLeglImageOES image', },
{ 'return_type': 'void',
'names': ['glEnable'],
'arguments': 'GLenum cap', },
{ 'return_type': 'void',
'names': ['glEnableVertexAttribArray'],
'arguments': 'GLuint index', },
{ 'return_type': 'void',
'names': ['glEndQuery'],
'arguments': 'GLenum target', },
{ 'return_type': 'void',
'names': ['glEndQueryARB', 'glEndQueryEXT'],
'arguments': 'GLenum target', },
{ 'return_type': 'void',
'names': ['glFinish'],
'arguments': 'void', },
{ 'return_type': 'void',
'names': ['glFlush'],
'arguments': 'void', },
{ 'return_type': 'void',
'names': ['glFramebufferRenderbufferEXT', 'glFramebufferRenderb
|
DT9/programming-problems
|
2017/microsoft17/1.py
|
Python
|
apache-2.0
| 262 | 0.007634 |
import sys
import numpy as np
|
def check_symmetric(a, tol=1e-8):
return np.allclose(a, a.T, atol=tol)
for line in sys.stdin:
a = np.matrix(line)
f = check_symmetric(a)
if not f:
print("Not s
|
ymmetric")
else:
print("Symmetric")
|
skirpichev/omg
|
diofant/tests/printing/test_conventions.py
|
Python
|
bsd-3-clause
| 3,727 | 0.000537 |
from diofant import (Derivative, Function, Integral, bell, besselj, cos, exp,
legendre, oo, symbols)
from diofant.printing.conventions import requires_partial, split_super_sub
__all__ = ()
def test_super_sub():
assert split_super_sub('beta_13_2') == ('beta', [], ['13', '2'])
assert split_super_sub('beta_132_20') == ('beta', [], ['132', '20'])
assert spl
|
it_super_sub('beta_13') == ('beta', [], ['13'])
assert split_super_sub('x_a_b') == ('
|
x', [], ['a', 'b'])
assert split_super_sub('x_1_2_3') == ('x', [], ['1', '2', '3'])
assert split_super_sub('x_a_b1') == ('x', [], ['a', 'b1'])
assert split_super_sub('x_a_1') == ('x', [], ['a', '1'])
assert split_super_sub('x_1_a') == ('x', [], ['1', 'a'])
assert split_super_sub('x_1^aa') == ('x', ['aa'], ['1'])
assert split_super_sub('x_1__aa') == ('x', ['aa'], ['1'])
assert split_super_sub('x_11^a') == ('x', ['a'], ['11'])
assert split_super_sub('x_11__a') == ('x', ['a'], ['11'])
assert split_super_sub('x_a_b_c_d') == ('x', [], ['a', 'b', 'c', 'd'])
assert split_super_sub('x_a_b^c^d') == ('x', ['c', 'd'], ['a', 'b'])
assert split_super_sub('x_a_b__c__d') == ('x', ['c', 'd'], ['a', 'b'])
assert split_super_sub('x_a^b_c^d') == ('x', ['b', 'd'], ['a', 'c'])
assert split_super_sub('x_a__b_c__d') == ('x', ['b', 'd'], ['a', 'c'])
assert split_super_sub('x^a^b_c_d') == ('x', ['a', 'b'], ['c', 'd'])
assert split_super_sub('x__a__b_c_d') == ('x', ['a', 'b'], ['c', 'd'])
assert split_super_sub('x^a^b^c^d') == ('x', ['a', 'b', 'c', 'd'], [])
assert split_super_sub('x__a__b__c__d') == ('x', ['a', 'b', 'c', 'd'], [])
assert split_super_sub('alpha_11') == ('alpha', [], ['11'])
assert split_super_sub('alpha_11_11') == ('alpha', [], ['11', '11'])
assert split_super_sub('') == ('', [], [])
def test_requires_partial():
x, y, z, t, nu = symbols('x y z t nu')
n = symbols('n', integer=True)
f = x * y
assert requires_partial(Derivative(f, x)) is True
assert requires_partial(Derivative(f, y)) is True
# integrating out one of the variables
assert requires_partial(Derivative(Integral(exp(-x * y), (x, 0, oo)), y, evaluate=False)) is False
# bessel function with smooth parameter
f = besselj(nu, x)
assert requires_partial(Derivative(f, x)) is True
assert requires_partial(Derivative(f, nu)) is True
# bessel function with integer parameter
f = besselj(n, x)
assert requires_partial(Derivative(f, x)) is False
# this is not really valid (differentiating with respect to an integer)
# but there's no reason to use the partial derivative symbol there. make
# sure we don't throw an exception here, though
assert requires_partial(Derivative(f, n)) is False
# bell polynomial
f = bell(n, x)
assert requires_partial(Derivative(f, x)) is False
# again, invalid
assert requires_partial(Derivative(f, n)) is False
# legendre polynomial
f = legendre(0, x)
assert requires_partial(Derivative(f, x)) is False
f = legendre(n, x)
assert requires_partial(Derivative(f, x)) is False
# again, invalid
assert requires_partial(Derivative(f, n)) is False
f = x ** n
assert requires_partial(Derivative(f, x)) is False
assert requires_partial(Derivative(Integral((x*y) ** n * exp(-x * y), (x, 0, oo)), y, evaluate=False)) is False
# parametric equation
f = (exp(t), cos(t))
g = sum(f)
assert requires_partial(Derivative(g, t)) is False
# function of unspecified variables
f = symbols('f', cls=Function)
assert requires_partial(Derivative(f, x)) is False
assert requires_partial(Derivative(f, x, y)) is True
|
jucimarjr/IPC_2017-1
|
lista06/lista06_lista04_questao14.py
|
Python
|
apache-2.0
| 1,317 | 0.006897 |
#---------------------------------------------------------------------------
# Introdução a Programação de Computadores - IPC
# Universidade do Estado do Amazonas - UEA
# Prof. Jucimar Jr
# Gabriel de Queiroz Sousa 1715310044
# Lucas Gabriel Silveira Duarte 1715310053
# Matheus de Oliveira Marques 1515310514
# Rodrigo Duarte de Souza 1115140049
#
# Leet é uma forma de se escrever o alfabeto latino usando outros símbolos em lugar das letras,
# como números por exemplo. A própria palavra leet
|
admite muitas variações, como l33t ou 1337.
# O uso do leet reflete uma subcultura relacionada ao mundo dos jogos de computador e internet,
# sendo muito usada para confundir os iniciantes e
|
afirmar-se como parte de um grupo. Pesquise
# sobre as principais formas de traduzir as letras. Depois, faça um programa que peça uma texto
# e transforme-o para a grafia leet speak.
#---------------------------------------------------------------------------
leet = (('a', '4'), ('l', '1'), ('e', '3'), ('s', '5'), ('g', '6'), ('r', '12'), ('t', '7'), ('q', '9'))
sring = input("Informe palavra = ")
nova = sring
print("Inicialmente: ", sring)
for antigo, novo in leet:
nova = nova.replace(antigo, novo)
print("Finalmente = ", nova)
|
Patola/Cura
|
cura/Settings/CuraContainerStack.py
|
Python
|
lgpl-3.0
| 18,426 | 0.011397 |
# Copyright (c) 2018 Ultimaker B.V.
# Cura is released under the terms of the LGPLv3 or higher.
from typing import Any, cast, List, Optional
from PyQt5.QtCore import pyqtProperty, pyqtSignal, QObject
from UM.Application import Application
from UM.Decorators import override
from UM.FlameProfiler import pyqtSlot
from UM.Logger import Logger
from UM.Settings.ContainerStack import ContainerStack, InvalidContainerStackError
from UM.Settings.InstanceContainer import InstanceContainer
from UM.Settings.DefinitionContainer import DefinitionContainer
from UM.Settings.ContainerRegistry import ContainerRegistry
from UM.Settings.Interfaces import ContainerInterface, DefinitionContainerInterface
from cura.Settings import cura_empty_instance_containers
from . import Exceptions
## Base class for Cura related stacks that want to enforce certain containers are available.
#
# This class makes sure that the stack has the following containers set: user changes, quality
# changes, quality, material, variant, definition changes and finally definition. Initially,
# these will be equal to the empty instance container.
#
# The container types are determined based on the following criteria:
# - user: An InstanceContainer with the metadata entry "type" set to "user".
# - quality changes: An InstanceContainer with the metadata entry "type" set to "quality_changes".
# - quality: An InstanceContainer with the metadata entry "type" set to "quality".
# - material: An InstanceContainer with the metadata entry "type" set to "material".
# - variant: An InstanceContainer with the metadata entry "type" set to "variant".
# - definition changes: An InstanceContainer with the metadata entry "type" set to "definition_changes".
# - definition: A DefinitionContainer.
#
# Internally, this class ensures the mentioned containers are always there and kept in a specific order.
# This also means that operations on the stack that modifies the container ordering is prohibited and
# will raise an exception.
class CuraContainerStack(ContainerStack):
def __init__(self, container_id: str) -> None:
super().__init__(container_id)
self._empty_instance_container = cura_empty_instance_containers.empty_container #type: InstanceContainer
self._empty_quality_changes = cura_empty_instance_containers.empty_quality_changes_container #type: InstanceContainer
self._empty_quality = cura_empty_instance_containers.empty_quality_container #type: InstanceContainer
self._empty_material = cura_empty_instance_containers.empty_material_container #type: InstanceContainer
self._empty_variant = cura_empty_instance_containers.empty_variant_container #type: InstanceContainer
self._containers = [self._empty_instance_container for i in range(len(_ContainerIndexes.IndexTypeMap))] #type: List[ContainerInterface]
self._containers[_ContainerIndexes.QualityChanges] = self._empty_quality_changes
self._containers[_ContainerIndexes.Quality] = self._empty_quality
self._containers[_ContainerIndexes.Material] = self._empty_material
self._containers[_ContainerIndexes.Variant] = self._empty_variant
self.containersChanged.connect(self._onContainersChanged)
import cura.CuraApplication #Here to prevent circular imports.
self.setMetaDataEntry("setting_version", cura.CuraApplication.CuraApplication.SettingVersion)
# This is emitted whenever the containersChanged signal from the ContainerStack base class is emitted.
pyqtContainersChanged = pyqtSignal()
## Set the user changes container.
#
# \param new_user_changes The new user changes container. It is expected to have a "type" metadata entry with the value "user".
def setUserChanges(self, new_user_changes: InstanceContainer) -> None:
self.replaceContainer(_ContainerIndexes.UserChanges, new_user_changes)
## Get the user changes container.
#
# \return The user changes container. Should always be a valid container, but can be equal to the empty InstanceContainer.
@pyqtProperty(InstanceContainer, fset = setUserChanges, notify = pyqtContainersChanged)
def userChanges(self) -> InstanceContainer:
return cast(InstanceContainer, self._containers[_ContainerIndexes.UserChanges])
## Set the quality changes container.
#
# \param new_quality_changes The new quality changes container. It is expected to have a "type" metadata entry with the value "quality_changes".
def setQualityChanges(self, new_quality_changes: InstanceContainer, postpone_emit = False) -> None:
self.replaceContainer(_ContainerIndexes.QualityChanges, new_quality_changes, postpone_emit = postpone_emit)
## Get the quality changes container.
#
# \return The quality changes container. Should always be a valid container, but can be equal to the empty InstanceContainer.
@pyqtProperty(InstanceContainer, fset = setQualityChanges, notify = pyqtContainersChanged)
def qualityChanges(self) -> InstanceContainer:
return cast(InstanceContainer, self._containers[_ContainerIndexes.QualityChanges])
## Set the quality container.
#
# \param new_quality The new quality container. It is expected to have a "type" metadata entry with the value "quality".
def setQuality(self, new_quality: InstanceContainer, postpone_emit: bool = False) -> None:
self.replaceContainer(_ContainerIndexes.Quality, new_quality, postpone_emit = postpone_emit)
## Get the quality container.
#
# \return The quality container. Should always be a valid container, but can be equal to the empty InstanceContainer.
@pyqtProperty(InstanceContainer, fset = setQuality, notify = pyqtContainersChanged)
def quality(self) -> InstanceContainer:
return cast(InstanceContainer, self._containers[_ContainerIndexes.Quality])
## Set the material container.
#
# \param new_material The new material container. It is expected to have a "type" metadata entry with the value "material".
def setMaterial(self, new_material: InstanceContainer, postpone_emit: bool = False) -> None:
self.replaceContainer(_ContainerIndexes.Material, new_material, postpone_emit = postpone_emit)
## Get the material container.
#
# \return The material container. Should always be a valid container, but can be equal to the empty InstanceContainer.
@pyqtProperty(InstanceContainer, fset = setMaterial, notify =
|
pyqtContainersChanged)
def material(self) -> InstanceContainer:
return cast(InstanceContainer, self._containers[_ContainerIndexes.Material])
## Set the variant container.
#
# \param new_variant The new variant container. It is expected to have a "type" metadata entry with the value "variant".
def setVariant
|
(self, new_variant: InstanceContainer) -> None:
self.replaceContainer(_ContainerIndexes.Variant, new_variant)
## Get the variant container.
#
# \return The variant container. Should always be a valid container, but can be equal to the empty InstanceContainer.
@pyqtProperty(InstanceContainer, fset = setVariant, notify = pyqtContainersChanged)
def variant(self) -> InstanceContainer:
return cast(InstanceContainer, self._containers[_ContainerIndexes.Variant])
## Set the definition changes container.
#
# \param new_definition_changes The new definition changes container. It is expected to have a "type" metadata entry with the value "definition_changes".
def setDefinitionChanges(self, new_definition_changes: InstanceContainer) -> None:
self.replaceContainer(_ContainerIndexes.DefinitionChanges, new_definition_changes)
## Get the definition changes container.
#
# \return The definition changes container. Should always be a valid container, but can be equal to the empty InstanceContainer.
@pyqtProperty(InstanceContainer, fset = setDefinitionChanges, notify = pyqtContainersChanged)
def definitionChanges(self) -> InstanceContainer:
return cast(InstanceContainer, self._containers[_ContainerIndexes.DefinitionChanges])
|
haylesr/angr
|
tests/test_path_groups.py
|
Python
|
bsd-2-clause
| 4,184 | 0.00478 |
import nose
import angr
import logging
l = logging.getLogger("angr_tests.path_groups")
import os
location = str(os.path.join(os.path.dirname(os.path.realpath(__file__)), '../../binaries/tests'))
addresses_fauxware = {
'armel': 0x8524,
'armhf': 0x104c9, # addr+1 to force thumb
#'i386': 0x8048524, # commenting out because of the freaking stack check
'mips': 0x400710,
'mipsel': 0x4006d0,
'ppc': 0x1000054c,
'ppc64': 0x10000698,
'x86_64': 0x400664
}
def run_fauxware(arch, threads):
p = angr.Project(location + '/' + arch + '/fauxware', load_options={'auto_load_libs': False})
pg = p.factory.path_group(threads=threads)
nose.tools.assert_equal(len(pg.active), 1)
nose.tools.assert_equal(pg.active[0].length, 0)
# step until the backdoor split occurs
pg2 = pg.step(until=lambda lpg: len(lpg.active) > 1, step_func=lambda lpg: lpg.prune())
nose.tools.assert_equal(len(pg2.active), 2)
nose.tools.assert_true(any("SOSNEAKY" in s for s in pg2.mp_active.state.posix.dumps(0).mp_items))
nose.tools.assert_false(all("SOSNEAKY" in s for s in pg2.mp_active.state.posix.dumps(0).mp_items))
# separate out the backdoor and normal paths
pg3 = pg2.stash(lambda path: "SOSNEAKY" in path.state.posix.dumps(0), to_stash="backdoor").stash_all(to_stash="auth")
nose.tools.assert_equal(len(pg3.active), 0)
nose.tools.assert_equal(len(pg3.backdoor), 1)
nose.tools.assert_equal(len(pg3.auth), 1)
# step the backdoor path until it returns to main
pg4 = pg3.step(until=lambda lpg: lpg.backdoor[0].jumpkinds[-1] == 'Ijk_Ret', stash='backdoor')
main_addr = pg4.backdoor[0].addr
nose.tools.assert_equal(len(pg4.active), 0)
nose.tools.assert_equal(len(pg4.backdoor), 1)
nose.tools.assert_equal(len(pg4.auth), 1)
# now step the real path until the real authentication paths return to the same place
pg5 = pg4.explore(find=main_addr, num_find=2, stash='auth').unstash_all(from_stash='found', to_stash='auth')
|
nose.tools.assert_equal(len(
|
pg5.active), 0)
nose.tools.assert_equal(len(pg5.backdoor), 1)
nose.tools.assert_equal(len(pg5.auth), 2)
# now unstash everything
pg6 = pg5.unstash_all(from_stash='backdoor').unstash_all(from_stash='auth')
nose.tools.assert_equal(len(pg6.active), 3)
nose.tools.assert_equal(len(pg6.backdoor), 0)
nose.tools.assert_equal(len(pg6.auth), 0)
nose.tools.assert_equal(len(set(pg6.mp_active.addr.mp_items)), 1)
# now merge them!
pg7 = pg6.merge()
nose.tools.assert_equal(len(pg7.active), 1)
nose.tools.assert_equal(len(pg7.backdoor), 0)
nose.tools.assert_equal(len(pg7.auth), 0)
#import ipdb; ipdb.set_trace()
#print pg2.mp_active.addr.mp_map(hex).mp_items
# test selecting paths to step
pg_a = p.factory.path_group(immutable=True)
pg_b = pg_a.step(until=lambda lpg: len(lpg.active) > 1, step_func=lambda lpg: lpg.prune().drop(stash='pruned'))
pg_c = pg_b.step(selector_func=lambda p: p is pg_b.active[0], step_func=lambda lpg: lpg.prune().drop(stash='pruned'))
nose.tools.assert_is(pg_b.active[1], pg_c.active[0])
nose.tools.assert_is_not(pg_b.active[0], pg_c.active[1])
total_active = len(pg_c.active)
# test special stashes
nose.tools.assert_equals(len(pg_c.stashed), 0)
pg_d = pg_c.stash(filter_func=lambda p: p is pg_c.active[1], to_stash='asdf')
nose.tools.assert_equals(len(pg_d.stashed), 0)
nose.tools.assert_equals(len(pg_d.asdf), 1)
nose.tools.assert_equals(len(pg_d.active), total_active-1)
pg_e = pg_d.stash(from_stash=pg_d.ALL, to_stash='fdsa')
nose.tools.assert_equals(len(pg_e.asdf), 0)
nose.tools.assert_equals(len(pg_e.active), 0)
nose.tools.assert_equals(len(pg_e.fdsa), total_active)
pg_f = pg_e.stash(from_stash=pg_e.ALL, to_stash=pg_e.DROP)
nose.tools.assert_true(all(len(s) == 0 for s in pg_f.stashes.values()))
def test_fauxware():
for arch in addresses_fauxware:
yield run_fauxware, arch, None
yield run_fauxware, arch, 2
if __name__ == "__main__":
for func, march, threads in test_fauxware():
print 'testing ' + march
func(march, threads)
|
munhyunsu/UsedMarketAnalysis
|
ruliweb_analyzer/db_5th_group.py
|
Python
|
gpl-3.0
| 1,617 | 0.009895 |
#!/usr/bin/env python3
import os # makedirs
import sys # argv, exit
import csv # Dic
|
tReader
def cutoffdict(cdict):
rdict = dict()
for key in cdict.keys():
candi = cdict[key]
top = max(candi, key = candi.get)
if candi[top] > (sum(candi.values())*0.5):
rdict[key] = top
return rdict
def groupbyprefix(src_path):
des_path = src_path.split('/')[-1]
src_file = open(src_path, 'r')
src_csv = csv.DictReader(src_file)
des_file
|
= open('./dbdays/' + des_path, 'w')
des_csv = csv.DictWriter(des_file, fieldnames = [
'ipprefix', 'district', 'city'])
des_csv.writeheader()
cdict = dict()
for row in src_csv:
cprefix = row['ipprefix']
ccity = row['district'] +' ' + row['city']
cdict[cprefix] = {ccity: cdict.get(cprefix, dict()).get(ccity, 0) + 1}
wdict = cutoffdict(cdict)
for prefix in wdict.keys():
district = wdict[prefix].split(' ')[0]
city = wdict[prefix].split(' ')[1]
des_csv.writerow({'ipprefix': prefix,
'district': district,
'city': city})
def main(argv):
if len(argv) < 2:
print('We need 1 arguments')
print('.py [SRC]')
sys.exit()
src_path = argv[1]
os.makedirs('./dbdays', exist_ok = True)
sit = os.scandir(src_path)
for entry in sit:
if not entry.name.startswith('.') and entry.is_file():
cip = entry.path
groupbyprefix(cip)
if __name__ == '__main__':
sys.exit(main(sys.argv))
|
joachimmetz/dfvfs
|
dfvfs/path/ewf_path_spec.py
|
Python
|
apache-2.0
| 777 | 0.005148 |
# -*- coding: utf-8 -*-
"""The EWF image path specification implementation."""
from dfvfs.lib import defin
|
itions
from dfvfs.path import factory
from dfvfs.path import path_spec
class EWFPathSpec(path_spec.PathSpec):
"""EWF image path specification."""
TYPE_INDICATOR = definitions.TYPE_INDICATOR_EWF
def __init__(self, parent=None, **kwargs):
"""Initializ
|
es a path specification.
Note that the EWF file path specification must have a parent.
Args:
parent (Optional[PathSpec]): parent path specification.
Raises:
ValueError: when parent is not set.
"""
if not parent:
raise ValueError('Missing parent value.')
super(EWFPathSpec, self).__init__(parent=parent, **kwargs)
factory.Factory.RegisterPathSpec(EWFPathSpec)
|
CacaoMovil/guia-de-cacao-django
|
cacao_app/configuracion/forms.py
|
Python
|
bsd-3-clause
| 762 | 0.001316 |
# -*- coding: utf-8 -*-
from collections import OrderedDict
from django import forms
from django.utils.translation import ugettext_lazy as _
from envelope.forms import ContactForm
class ContactForm(ContactForm):
template_name = "envelope/contact_email.txt"
html_template_name = "envelope/contact_email.html"
phone = forms.CharField(label='Teléfono', required=False)
country = forms.CharField(label
|
='País', required=False)
def __init__(self, *args, **kwargs):
super(ContactForm, self).__init__(*args, **kwargs)
self.fields['email'].required = False
ContactForm.base_fields = OrderedDict(
(k, ContactForm.base_fields[k])
for k in [
'sender', 'subject', 'ema
|
il', 'phone', 'country', 'message',
]
)
|
HMSBeagle1831/rapidscience
|
rlp/bookmarks/templatetags/bookmarks.py
|
Python
|
mit
| 990 | 0.00202 |
from django import template
register = template.Library()
@register.assignment_tag(takes_context=True)
def has_bookmark_permission(context, action):
"""Checks if the current user can bookmark the action item.
Returns a boolean.
Syntax::
{% has_bookmark_permis
|
sion action %}
"""
request = context['request']
if not request.user.is_authenticated():
return False
has_permission = True
if action.target.approval_required and not request.user.can_access_all_projects:
has_permission = False
if not has_permission:
return False
return True
@register.assignment_tag(takes_context=True)
def get_exis
|
ting_bookmark(context, action):
request = context['request']
if not request.user.is_authenticated():
return None
existing_bookmark = request.user.bookmark_set.filter(
object_pk=action.action_object.pk, content_type=action.action_object_content_type
).first()
return existing_bookmark
|
Reagankm/KnockKnock
|
venv/lib/python3.4/site-packages/nltk/cluster/util.py
|
Python
|
gpl-2.0
| 9,689 | 0.001858 |
# Natural Language Toolkit: Clusterer Utilities
#
# Copyright (C) 2001-2015 NLTK Project
# Author: Trevor Cohn <tacohn@cs.mu.oz.au>
# URL: <http://nltk.org/>
# For license information, see LICENSE.TXT
from __future__ import print_function, unicode_literals
import copy
from sys import stdout
from math import sqrt
try:
import numpy
except ImportError:
pass
from nltk.cluster.api import ClusterI
from nltk.compat import python_2_unicode_compatible
class VectorSpaceClusterer(ClusterI):
"""
Abstract clusterer which takes tokens and maps them into a vector space.
Optionally performs singular value decomposition to reduce the
dimensionality.
"""
def __init__(self, normalise=False, svd_dimensions=None):
"""
:param normalise: should vectors be normalised to length 1
:type normalise: boolean
:param svd_dimensions: number of dimensions to use in reducing vector
dimensionsionality with SVD
:type svd_dimensions: int
"""
self._Tt = None
self._should_normalise = normalise
self._svd_dimensions = svd_dimensions
def cluster(self, vectors, assign_clusters=False, trace=False):
assert len(vectors) > 0
# normalise the vectors
if self._should_normalise:
vectors = list(map(self._normalise, vectors))
# use SVD to reduce the dimensionality
if self._svd_dimensions and self._svd_dimensions < len(vectors[0]):
[u, d, vt] = numpy.linalg.svd(numpy.transpose(numpy.array(vectors)))
S = d[:self._svd_dimensions] * \
numpy.identity(self._svd_dimensions, numpy.float64)
T = u[:,:self._svd_dimensions]
Dt = vt[:self._svd_dimensions,:]
vectors = numpy.transpose(numpy.dot(S, Dt))
self._Tt = numpy.transpose(T)
# call abstract method to cluster the vectors
self.cluster_vectorspace(vectors, trace)
# assign the vectors to clusters
if assign_clusters:
print(self._Tt, vectors)
return [self.classify(vector) for vector in vectors]
def cluster_vectorspace(self, vectors, trace):
"""
Finds the clusters using the given set of vectors.
"""
raise NotImplementedError()
def classify(self, vector):
if self._should_normalise:
vector = self._normalise(vector)
if self._Tt is not None:
vector = numpy.dot(self._Tt, vector)
cluster = self.classify_vectorspace(vector)
return self.cluster_name(cluster)
def classify_vectorspace(self, vector):
"""
Returns the index of the appropriate cluster for the vector.
"""
raise NotImplementedError()
def likelihood(self, vector, label):
if self._should_normalise:
vector = self._normalise(vector)
if self._Tt is not None:
vector = numpy.dot(self._Tt, vector)
return self.likelihood_vectorspace(vector, label)
def likelihood_vectorspace(self, vector, cluster):
"""
Returns the likelihood of the vector belonging to the cluster.
"""
predicted = self.classify_vectorspace(vector)
return (1.0 if cluster == predicted else 0.0)
def vector(self, vector):
"""
Returns the vector after normalisation and dimensionality reduction
"""
if self._should_normalise:
vector = self._normalise(vector)
if self._Tt is not None:
vector = numpy.dot(self._Tt, vector)
return vector
def _normalise(self, vector):
"""
Normalises the vector to unit length.
"""
return vector / sqrt(numpy.dot(vector, vector))
def euclidean_distance(u, v):
"""
Returns the euclidean distance between vectors u and v. This is equivalent
to the length of the vector (u - v).
"""
diff = u - v
return sqrt(numpy.dot(diff, diff))
def cosine_distance(u, v):
"""
Returns 1 minus the cosine of the angle between vectors v and u. This is equal to
1 - (u.v / |u||v|).
"""
return 1 - (numpy.dot(u, v) / (sqrt(numpy.dot(u, u)) * sqrt(numpy.dot(v, v))))
class _DendrogramNode(object):
""" Tree node of a dendrogram. """
def __init__(self, value, *children):
self._value = value
self._children = children
def leaves(self, values=True):
if self._children:
leaves = []
for child in self._children:
leaves.extend(child.leaves(values))
return leaves
elif values:
return [self._value]
else:
return [self]
def groups(self, n):
queue = [(self._value, self)]
while len(queue) < n:
priority, node = queue.pop()
if not node._children:
queue.push((priority, node))
break
for child in node._children:
if child._children:
queue.append((child._value, child))
else:
queue.append((0, child))
# makes the earliest merges at the start, latest at the end
queue.sort()
groups = []
for priority, node in queue:
groups.append(node.leaves())
return groups
@python_2_unicode_compatible
class Dendrogram(object):
"""
Represents a dendrogram, a tree with a specified branching order. This
must be initialised with the leaf items, then iteratively call merge for
each branch. This class constructs a tree representing the order of calls
to the merge function.
"""
def __init__(self, items=[]):
"""
:param items: the items at the leaves of the dendrogram
:type items: sequence of (any)
"""
self._items = [_DendrogramNode(item) for item in items]
self._original_items = copy.copy(self._items)
self._merge = 1
def merge(self, *indices):
"""
Merges nodes at given indices in the dendrogram. The nodes will be
combined which then replaces the first node specified. All other nodes
involved in the merge will be removed.
:param indices: indices of the items to merge (at least two)
:type indices: seq of int
"""
assert len(indices) >= 2
node = _DendrogramNode(self._merge, *[self._items[i] for i in indices])
self._merge += 1
self._items[indices[0]] = node
for i in indices[1:]:
del self._items[i]
def groups(sel
|
f, n):
"""
Finds the n-groups of items (leaves) reachable from a cut at depth n.
:param n: number of groups
:type n: int
"""
if len(self._items) > 1:
root = _DendrogramNode(self._merge, *self._items)
else:
ro
|
ot = self._items[0]
return root.groups(n)
def show(self, leaf_labels=[]):
"""
Print the dendrogram in ASCII art to standard out.
:param leaf_labels: an optional list of strings to use for labeling the leaves
:type leaf_labels: list
"""
# ASCII rendering characters
JOIN, HLINK, VLINK = '+', '-', '|'
# find the root (or create one)
if len(self._items) > 1:
root = _DendrogramNode(self._merge, *self._items)
else:
root = self._items[0]
leaves = self._original_items
if leaf_labels:
last_row = leaf_labels
else:
last_row = ["%s" % leaf._value for leaf in leaves]
# find the bottom row and the best cell width
width = max(map(len, last_row)) + 1
lhalf = width / 2
rhalf = width - lhalf - 1
# display functions
def format(centre, left=' ', right=' '):
return '%s%s%s' % (lhalf*left, centre, right*rhalf)
def display(str):
stdout.write(str)
# for each merge, top down
queue = [(root._value, root)]
verticals = [ format(' ') for leaf in leaves ]
while queue:
|
pmaupin/playtag
|
doc/make.py
|
Python
|
mit
| 745 | 0.005369 |
#!/usr/bin/env python
# REQUIRES both rst2pdf and wikir project from google code.
import sys
import subprocess
sys.path.insert(0, '../../rson/py2x')
from rson import loads
from simplejson import dumps
subprocess.call('../../rst2pdf/bin/rst2pdf manual.txt -e preprocess -e dotted_toc -o
|
manual.pdf'.split())
lines = iter(open('manual.txt', 'rb').read().splitlines())
badstuff = 'page:: space:: footer:: ##Page## contents::'.split()
result = []
for line in lines:
for check in badstuf
|
f:
if check in line:
break
else:
result.append(line)
result.append('')
result = '\n'.join(result)
from wikir import publish_string
result = publish_string(result)
f = open('manual.wiki', 'wb')
f.write(result)
f.close()
|
etutionbd/openfire-restapi
|
ofrestapi/system.py
|
Python
|
gpl-3.0
| 1,675 | 0.001194 |
# -*- coding: utf-8 -*-
from requests import (get, post, delete)
from .base import Base
class System(Base):
def __init__(self, host, secret, endpoint='/plugins/restapi/v1/system/properties'):
"""
:param host: Scheme://Host/ for API requests
:param secret: Shared secret key for API requests
:param endpoint: Endpoint for API requests
"""
super(System, self).__init__(host, secret, endpoint)
def get_props(self):
"""
Retrieve all system properties
"""
return self._submit_request(get, self.endpoint)
def get_prop(self, key):
"""
Retrieve system property
:param key: The name of system property
"""
endpoint = '/'.join([self.endpoint, key])
return self._submit_request(get, endpoint)
def update_prop(self, key, value):
"""
Create or update a system property
:param key: The name of system property
:param value: The value of system property
"""
payload = {
'@key': key,
'@value': value,
}
return self._submit_request(post, self.endpoint, json=payload)
def delete_prop(self, key):
"""
Delete a system property
:param key: The name of system property
"""
endpoint = '/'.join([self.endpoint, key])
return self._submit_request(delete, endpoint)
def get_co
|
ncurrent_sessions(self):
"""
Retrieve concurrent sessions
"""
endpoint = '/'.join([self.endpoint.rpartition('/')[0], 'statistics', 's
|
essions'])
return self._submit_request(get, endpoint)
|
chayapan/django-sentry
|
tests/sentry/web/helpers/tests.py
|
Python
|
bsd-3-clause
| 2,132 | 0.002345 |
# -*- coding: utf-8 -*-
from __future__ import absolute_import
import mock
from django.contrib.auth.models import User
from sentry.constants import MEMBER_USER
from sentry.models import Project
from sentry.web.helpers import get_project_list
from tests.base import TestCase
class GetProjectListTEst(TestCase):
def setUp(self):
self.user = User.objects.create(username="admin", email="admin@localhost")
self.project = Project.objects.get()
assert self.project.public is True
self.project2 = Project.objects.create(name='Test', slug='test', owner=self.user, pu
|
blic=False)
@mock.patch('sentry.models.Team.objects.get_for_user', mock.Mock(return_value={}))
def test_includes_public_projects_without_access(self):
project_list = get_project_list(self.user)
self.assertEquals(len(project_list), 1)
self.assertIn(self.project.id, project_list)
@mock.patch('sentry.models.Team.objects.get_for_user', mock.Mock(return_value={}))
def test_does_exclude_public_projects_without_access(self):
|
project_list = get_project_list(self.user, MEMBER_USER)
self.assertEquals(len(project_list), 0)
@mock.patch('sentry.models.Team.objects.get_for_user')
def test_does_include_private_projects_without_access(self, get_for_user):
get_for_user.return_value = {self.project2.team.id: self.project2.team}
project_list = get_project_list(self.user)
get_for_user.assert_called_once_with(self.user, None)
self.assertEquals(len(project_list), 2)
self.assertIn(self.project.id, project_list)
self.assertIn(self.project2.id, project_list)
@mock.patch('sentry.models.Team.objects.get_for_user')
def test_does_exclude_public_projects_but_include_private_with_access(self, get_for_user):
get_for_user.return_value = {self.project2.team.id: self.project2.team}
project_list = get_project_list(self.user, MEMBER_USER)
get_for_user.assert_called_once_with(self.user, MEMBER_USER)
self.assertEquals(len(project_list), 1)
self.assertIn(self.project2.id, project_list)
|
schef/schef.github.io
|
source/11/mc-11-05-sk-mt.py
|
Python
|
mit
| 3,429 | 0.022164 |
#!/usr/bin/python
# Written by Stjepan Horvat
# ( zvanstefan@gmail.com )
# by the exercises from David Lucal Burge - Perfect Pitch Ear Traning Supercourse
# Thanks to Wojciech M. Zabolotny ( wzab@ise.pw.edu.pl ) for snd-virmidi example
# ( wzab@ise.pw.edu.pl )
import random
import time
import sys
import re
fname="/dev/snd/midiC2D0"
#fname=sys.argv[1]
fin=open(fname,"rb")
fout=open(fname,"wb")
#keymin=int(sys.argv[2])
#keymax=int(sys.argv[3])
#keymin=int(60)
#keymax=int(72)
#c major scale
print ("Exercise 10-17:")
print ("Aural chord analisys. First you have to unlock the sound by ear. And then you have to indentify. It's a very powerful tehnique to stabilize perfect pitch.")
#from c to c'' white tones
#c major scale
notes = [ 36, 38, 40, 41, 43, 45, 47, 48, 50, 52, 53, 55, 57, 59, 60, 62, 64, 65, 67, 69, 71, 72, 74, 76, 77, 79, 81, 83, 84, 86, 88, 89, 91, 93, 95, 96 ]
noteC = [ 36, 48, 60, 72, 84, 96 ]
def playNote(noteOne, noteTwo, noteThree):
fout.write((chr(0x90)+chr(noteOne)+chr(127)).encode('utf-8'))
fout.flush()
time.sleep(0.7)
fout.write((chr(0x80)+chr(noteOne)+chr(127)).encode('utf-8'))
fout.flush()
fout.write((chr(0x90)+chr(noteTwo)+chr(127)).encode('utf-8'))
fout.flush()
time.sleep(0.7)
fout.write((chr(0x80)+chr(noteTwo)+chr(127)).encode('utf-8'))
fout.flush()
fout.write((chr(0x90)+chr(noteThree)+chr(127)).encode('utf-8'))
fout.flush()
time.sleep(0.7)
fout.write((chr(0x80)+chr(noteThree)+chr(127)).encode('utf-8'))
fout.flush()
def nameNote(note):
if note in noteC:
return("C")
elif note-2 in noteC:
return("D")
elif note-4 in noteC:
return("E")
elif note-5 in noteC:
return("F")
elif note-7 in noteC:
return("G")
elif note-9 in noteC:
return("A")
elif note-11 in noteC:
return("H")
def name2Note(name):
if name == "c":
return(60)
if name == "d":
return(62)
if name == "e":
return(64)
if name == "f":
return(65)
if name == "g":
return(67)
if name == "a":
return(69)
if name == "h":
return(71)
usage = "Usage: 1-repeat, <note> <note> \"c d e\", ?-usage."
round = 1
a = re.compile("^[a-h] [a-h] [a-h]$")
try:
print(usage)
while True:
noteOne = random.choice(notes)
while True:
while True:
noteTwo = random.choice(notes)
if nameNote(noteOne) != nameNote(noteTwo):
break
while True:
noteThree = random.choice(notes)
if nameNote(noteOne) != nameNote(noteTwo):
break
if nameNote(noteOne) != nameNote(noteThree):
break
match = False
while not match:
done = False
playNote(noteOne, noteTwo, noteThree)
while not done:
n = input("? ")
if n == "1":
playNote(noteOne, noteTwo, noteThree)
if n == "?":
print(usage)
if n == "help":
print(nameNote(noteOne).lower(
|
), nameNote(noteTwo).lower(), nameNote(noteThree).lower())
elif a.match(n):
|
splitNote = n.split()
if splitNote[0] == nameNote(noteOne).lower() and splitNote[1] == nameNote(noteTwo).lower() and splitNote[2] == nameNote(noteThree).lower():
round += 1
print("Correct. Next round. " + str(round) + ".:")
done = True
match = True
else:
playNote(name2Note(splitNote[0]), name2Note(splitNote[1]), name2Note(splitNote[2]))
except KeyboardInterrupt:
pass
|
emmdim/guifiAnalyzer
|
plot/plotsServices.py
|
Python
|
gpl-3.0
| 4,467 | 0.004477 |
#!/usr/bin/env python
import datetime as dt
import numpy as np
import matplotlib.pyplot as plt
#import matplotlib.dates as mdates
#import matplotlib.cbook as cbook
#from matplotlib import pyplot as plt
from matplotlib.dates import date2num
from statsmodels.distributions.empirical_distribution import ECDF
from collections import Counter
from ..guifiwrapper.guifiwrapper import *
#root = 3671
#root = 2444
root = 17711
g = CNMLWrapper(root)
import os
basedir = os.path.join(os.getcwd(), 'figs')
baseservicesdir = os.path.join(basedir, 'services')
for d in [basedir, baseservicesdir]:
if not os.path.exists(d):
os.makedirs(d)
user = ['meteo', 'radio', 'web', 'VPS', 'tv', 'wol', 'Proxy', 'mail', 'irc',
'teamspeak', 'ftp', 'asterisk', 'apt-cache', 'AP', 'IM', 'p2p',
'VPN', 'Streaming', 'games', 'cam']
mgmt = ['iperf', 'LDAP', 'DNS', 'SNPgraphs', 'NTP', 'AirControl']
# Extract user services and frequencies
#userServices = [s.type for s in g.services.values() if s.type in user]
#totalServices = len(userServices)
#userServices = Counter(userServices).items()
#userServicesNumber = len(userServices)
#userTypes = [typ for (typ,values) in userServices]
#userValues = [float(value)/float(totalServices) for (typ,value) in userServices]
# Extract mgmt services and frequencies
services = [s.type for s in g.services.values() if s.type in user]
totalServices = len(services)
services = Counter(services).items()
from operator import itemgetter
sercices = services.sort(key=itemgetter(1), reverse=True)
servicesNumber = len(services)
types = [typ for (typ, value) in services]
values = [float(value) / float(totalServices) for (typ, value) in services]
ind = np.arange(servicesNumber)
width = 0.35
fig = plt.figure()
fig.set_canvas(plt.gcf().canvas)
#ax = fig.add_subplot(121)
ax = fig.add_subplot(111)
rects = ax.bar(ind, values, width, color='black')
ax.set_xlim(-width, len(ind) + width)
ax.set_ylim(0, 0.7)
# ax.set_ylim(0,45)
ax.set_ylabel('Frequency')
#ax.set_xlabel('Service Type')
ax.set_title('User Services Frequency')
xTickMarks = [str(i) for i in types]
ax.set_xticks(ind + width)
xtickNames = ax.set_xticklabels(xTickMarks)
plt.setp(xtickNames, rotation=45, fontsize=13)
services1 = [s.type for s in g.services.values() if s.type in mgmt]
totalServices1 = len(services1)
services1 = Counter(services1).items()
sercices1 = services1.sort(key=itemgetter(1), reverse=True)
servicesNumber1 = len(services1)
types1 = [typ for (typ, value1) in services1]
values1 = [float(value) / float(totalServices1) for (typ, value) in services1]
if False:
# Disable analytical mgmt frequency image
ind1 = np.arange(servicesNumber1)
ax1 = fig.add_subplot(122)
rects = ax1.bar(ind1, value
|
s1, width, color='black')
ax1.set_xlim(-width, len(ind1) + width)
ax1.set_ylim(0, 0.7)
# ax.set_ylim(0,45)
# ax1.set_ylabel('Frequency')
#ax1.set_xlabel('Service Type')
ax1.set_title('Management Services Frequency')
xTickMarks1 = [str(i) for i in types1]
ax1.set_xticks(ind1 + width)
xtickNames1 = ax1.set_xticklabels(xTickM
|
arks1)
plt.setp(xtickNames1, rotation=0, fontsize=13)
plt.show()
figfile = os.path.join(baseservicesdir, str(root) + "services_frequency")
fig.savefig(figfile, format='png', dpi=fig.dpi)
# Other categories
for s in g.services.values():
if s.type in mgmt:
s.type = "Management"
elif s.type != "Proxy":
s.type = "Other services"
services = [s.type for s in g.services.values()]
totalServices = len(services)
services = Counter(services).items()
sercices = services.sort(key=itemgetter(1), reverse=True)
servicesNumber = len(services)
types = [typ for (typ, value) in services]
values = [float(value) / float(totalServices) for (typ, value) in services]
ind = np.arange(servicesNumber)
width = 0.35
fig = plt.figure()
fig.set_canvas(plt.gcf().canvas)
ax = fig.add_subplot(111)
rects = ax.bar(ind, values, width, color='black')
ax.set_xlim(-width, len(ind) + width)
ax.set_ylim(0, 0.7)
# ax.set_ylim(0,45)
ax.set_ylabel('Frequency')
#ax.set_xlabel('Service Type')
ax.set_title(' Service Categories Frequency')
xTickMarks = [str(i) for i in types]
ax.set_xticks(ind + width)
xtickNames = ax.set_xticklabels(xTickMarks)
plt.setp(xtickNames, rotation=0, fontsize=12)
plt.show()
figfile = os.path.join(
baseservicesdir,
str(root) +
"services_frequency_categories")
fig.savefig(figfile, format='png', dpi=fig.dpi)
|
arashzamani/lstm_nlg_ver1
|
language_parser/SemanticVector.py
|
Python
|
gpl-3.0
| 679 | 0.002946 |
# -*- coding: utf-8 -*-
import gensim, loggi
|
ng
class SemanticVector:
model = ''
def __init__(self, structure):
self.structure = structure
def model_word2vec(self, min_count=15, window=15, size=100):
print 'preparing sentences list'
sentences = self.structure.prepare_list_of_words_in_sentences()
print 'start modeling'
self.model = gensim.models.Word2Vec(sentences, size=size, window=window, min_count=min_count, workers=4, sample=0.001, sg=0)
return self.model
def save_model(se
|
lf, name):
self.model.save(name)
def load_model(self, name):
self.model = gensim.models.Word2Vec.load(name)
|
jinzekid/codehub
|
数据结构与算法/heap_sort/类定义操作定义堆结果以及排序.py
|
Python
|
gpl-3.0
| 2,192 | 0.008205 |
# 类结构的堆排序
class DLinkHeap(object):
def __init__(self, list=None, N = 0):
self.dList = list
self.lengthSize = N
# 插入数据
def insert_heap(self, data):
self.dList.append(data)
self.lengthSize += 1
# 初始化堆结构
def init_heap(self):
n = self.lengthSize
for i in range(n):
self.sift_down(i)
# 交换数据
def swap(self, a, b):
tmp = self.dList[a]
self.dList[a] = self.dList[b]
self.dList[b] = tmp
# 向下调整节点
def sift_down(self, size):
n = size
t = 0
tmp_pos = 0
# 注意python的/运算,是取浮点数
while t < int(n/2):
if self.dList[t] > self.dList[2*t+1]:
tmp_pos = 2*t+1
else:
tmp_pos = t
if 2*t+2 < n:
if self.dList[tmp_pos] > self.dList[2*t+2]:
tmp_pos = 2*t+2
if t != tmp_pos:
self.swap(tmp_pos, t)
t = tmp_pos
else:
break
# 向上调整节点
def sift_up(self, size):
n = size
i = n - 1
flag = 0
while i > 0 and flag == 0:
parent_i = int(i/2)
if self.dList[i] < self.dList[parent_i]:
self.swap(i, parent_i)
i = parent_i
else:
flag = 1
# 堆排序
def heap_sort(self):
n = self.lengthSize
while n > 0:
|
self.swap(0, n-1)
n -= 1
self.sift_down(n)
# 打印堆数据
def print_heap(self, size):
for idx in range(size):
print(self.dList[idx], end=" ")
print()
if __name__ == "__main__":
k = 0
# 读取n个数
n = int(input())
# 输入列表
input_L = list(map(int, input().split()))
L = input_L
dLinkHeap = DLinkHeap(L, n)
dLinkHeap.init_heap()
dLinkHeap.print_heap(n)
print("-----after
|
sort-----")
dLinkHeap.heap_sort()
dLinkHeap.print_heap(n)
|
avanzosc/odoo-addons
|
slide_channel_technology/models/__init__.py
|
Python
|
agpl-3.0
| 115 | 0 |
from . import slide_chan
|
nel_technology_category
from . import slide_channel_technolog
|
y
from . import slide_channel
|
SoftFx/TTWebClient-Python
|
TTWebClientSample/public_currencies.py
|
Python
|
mit
| 796 | 0.002513 |
#!/usr/bin/python3
__author__ = 'ivan.shynkarenka'
import argparse
from TTWebClient.TickTraderWebClient import Tick
|
TraderWebClient
def main():
parser = argparse.ArgumentParser(description='TickTrader Web API sample')
parser.add_argument('web_api_address', help='TickTrader Web API address')
args = parser.parse_args()
# Create instance of the TickTrader Web API client
client = TickTraderWebClient(args.web_api_address)
# Public currencies
currencies = client.get_public_all_currencies()
for c in currencies:
print('Currency: {0}'.format(c['Name']))
currency = client.get_
|
public_currency(currencies[0]['Name'])
print("{0} currency precision: {1}".format(currency[0]['Name'], currency[0]['Precision']))
if __name__ == '__main__':
main()
|
ramprasathdgl/TangoWithDjango
|
TangoWithDjango/rango/admin.py
|
Python
|
gpl-3.0
| 287 | 0.017422 |
from django.c
|
ontrib import admin
# Register your models here.
from django.contrib import admin
from rango.models import Category, Page
class PageAdmin(admin.ModelAdmin):
list_display = ('title', 'category', 'url')
admin.site.regis
|
ter(Category)
admin.site.register(Page,PageAdmin)
|
elivre/arfe
|
e2014/SCRIPTS/055-rede2014_rede_gephi_com_ipca_csv.py
|
Python
|
mit
| 3,896 | 0.014117 |
#!/usr/bin/env python
# coding: utf-8
# # rede_gephi_com_ipca_csv
# In[6]:
ano_eleicao = '2014'
rede =f'rede{ano_eleicao}'
csv_dir = f'/home/neilor/{rede}'
# In[7]:
dbschema = f'rede{ano_eleicao}'
table_edges = f"{dbschema}.gephi_edges_com_ipca_2018"
table_nodes = f"{dbschema}.gephi_nodes_com_ipca_2018"
table_receitas = f"{dbschema}.receitas_com_ipca_2018"
table_candidaturas = f"{dbschema}.candidaturas_com_ipca_2018"
table_municipios = f"{dbschema}.municipios_{ano_eleicao}"
# In[8]:
import sys
sys.path.append('../')
import mod_tse as mtse
# In[9]:
import os
home = os.environ["HOME"]
local_dir = f'{home}/temp'
# In[10]:
mtse.execute_query(f"update {table_municipios} set rede= 'N';")
# ## REDE BRASIL
# In[11]:
def salva_rede_brasil(csv_dir,rede):
rede_dir_BR = f'{csv_dir}/{rede}_Brasil'
os.makedirs(rede_dir_BR)
edges_csv_query=f"""copy
(
select * from {table_edges}
)
TO '{rede_dir_BR}/{rede}_Brasil_edges.csv' DELIMITER ';' CSV HEADER;
"""
mtse.exec
|
ute_query(edges_csv_query)
nodes_csv_query=f"""copy
(
select * from {table_nodes}
)
TO '{rede_dir_BR}/{rede}_Brasil_nodes.
|
csv' DELIMITER ';' CSV HEADER;
"""
mtse.execute_query(nodes_csv_query)
candidaturas_csv_query=f"""copy
(
select * from {table_candidaturas}
)
TO '{rede_dir_BR}/{rede}_Brasil_candidaturas.csv' DELIMITER ';' CSV HEADER;
"""
mtse.execute_query(candidaturas_csv_query)
receitas_csv_query=f"""copy
(
select * from {table_receitas}
)
TO '{rede_dir_BR}/{rede}_Brasil_receitas.csv' DELIMITER ';' CSV HEADER;
"""
mtse.execute_query(receitas_csv_query)
# ## REDES POR ESTADO
# In[12]:
def salva_rede_csv_uf(csv_dir,rede,sg_uf):
rede_dir_uf = f'{csv_dir}/{rede}_{sg_uf}'
os.makedirs(rede_dir_uf)
edges_query=f"""copy
(
select * from {table_edges} where ue ='{sg_uf}'
)
TO '{rede_dir_uf}/{rede}_{sg_uf}_edges.csv' DELIMITER ';' CSV HEADER;
"""
mtse.execute_query(edges_query)
nodes_query=f"""copy
(
select * from {table_nodes} where ue ='{sg_uf}'
)
TO '{rede_dir_uf}/{rede}_{sg_uf}_nodes.csv' DELIMITER ';' CSV HEADER;
"""
mtse.execute_query(nodes_query)
candidaturas_csv_query=f"""copy
(
select * from {table_candidaturas} where sg_uf ='{sg_uf}'
)
TO '{rede_dir_uf}/{rede}_{sg_uf}_candidaturas.csv' DELIMITER ';' CSV HEADER;
"""
mtse.execute_query(candidaturas_csv_query)
receitas_csv_query=f"""copy
(
select * from {table_receitas} where receptor_uf ='{sg_uf}'
)
TO '{rede_dir_uf}/{rede}_{sg_uf}_receitas.csv' DELIMITER ';' CSV HEADER;
"""
mtse.execute_query(receitas_csv_query)
# In[13]:
import pandas as pd
import shutil
if os.path.exists(csv_dir):
shutil.rmtree(csv_dir)
os.makedirs(csv_dir)
salva_rede_brasil(csv_dir,rede)
df_uf = mtse.pandas_query(f'select sg_uf from {table_candidaturas} group by sg_uf order by sg_uf')
for index, row in df_uf.iterrows():
sg_uf = row['sg_uf']
salva_rede_csv_uf(csv_dir,rede,sg_uf)
# In[14]:
import datetime
print(datetime.datetime.now())
# In[ ]:
|
craigds/django-mpathy
|
tests/test_db_consistency.py
|
Python
|
bsd-3-clause
| 2,474 | 0.000404 |
import pytest
from django.db import connection, IntegrityError
from .models import MyTree
def flush_constraints():
# the default db setup is to have constraints DEFERRED.
# So IntegrityErrors only happen when the transaction commits.
# Django's testcase thing does eventually flush the constraints but to
# actually test it *within* a testcase we have to flush it manually.
connection.cursor().execute("SET CONSTRAINTS ALL IMMEDIATE")
def test_node_creation_simple(db):
MyTree.objects.create(label='root1')
MyTree.objects.create(label='root2')
def test_node_creation_with_no_label(db):
# You need a label
with pytest.raises(ValueError):
|
MyTree.objects.create(label='')
with pytest.raises(ValueError):
MyTree.objects.create(label=None)
with pytest.raises(ValueError):
MyTree.objects.create()
def test_root_node_already_exists(db):
MyTree.objects.create(label='root1')
with pytest.raises(IntegrityError):
MyTree.objects.creat
|
e(label='root1')
def test_same_label_but_different_parent(db):
root1 = MyTree.objects.create(label='root1')
MyTree.objects.create(label='root1', parent=root1)
def test_same_label_as_sibling(db):
root1 = MyTree.objects.create(label='root1')
MyTree.objects.create(label='child', parent=root1)
with pytest.raises(IntegrityError):
MyTree.objects.create(label='child', parent=root1)
def test_parent_is_self_errors(db):
root1 = MyTree.objects.create(label='root1')
root1.parent = root1
with pytest.raises(IntegrityError):
root1.save()
flush_constraints()
def test_parent_is_remote_ancestor_errors(db):
root1 = MyTree.objects.create(label='root1')
child2 = MyTree.objects.create(label='child2', parent=root1)
desc3 = MyTree.objects.create(label='desc3', parent=child2)
with pytest.raises(IntegrityError):
# To test this integrity error, have to update table without calling save()
# (because save() changes `ltree` to match `parent_id`)
MyTree.objects.filter(pk=desc3.pk).update(parent=root1)
flush_constraints()
def test_parent_is_descendant_errors(db):
root1 = MyTree.objects.create(label='root1')
child2 = MyTree.objects.create(label='child2', parent=root1)
desc3 = MyTree.objects.create(label='desc3', parent=child2)
child2.parent = desc3
with pytest.raises(IntegrityError):
child2.save()
flush_constraints()
|
alogg/dolfin
|
test/unit/nls/python/PETScSNESSolver.py
|
Python
|
gpl-3.0
| 2,986 | 0.005023 |
"""Unit test for the SNES nonlinear solver"""
# Copyright (C) 2012 Patrick E. Farrell
#
# This file is part of DOLFIN.
#
# DOLFIN is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# DOLFIN is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with DOLFIN. If not, see <http://www.gnu.org/licenses/>.
#
# First added: 2012-10-17
# Last changed: 2012-10-26
"""Solve the Yamabe PDE which arises in the differential geometry of general
relativity. http://arxiv.org/abs/1107.0360.
The Yamabe equation is highly nonlinear and supports many solutions. However,
only one of these is of physical relevance -- the positive solution.
This unit test demonstrates the capability of the SNES solver to accept bounds
on the resulting solution. The plain Newton method converges to an unphysical
negative solution, while the SNES solution with {sign: nonnegative} converges
to the physical positive solution.
"""
from d
|
olfin import *
import unittest
try:
parameters["linear_algebra_backend"] = "PETSc"
except Run
|
timeError:
import sys; sys.exit(0)
parameters["form_compiler"]["quadrature_degree"] = 5
mesh = Mesh("doughnut.xml.gz")
V = FunctionSpace(mesh, "CG", 1)
bcs = [DirichletBC(V, 1.0, "on_boundary")]
u = Function(V)
v = TestFunction(V)
u.interpolate(Constant(-1000.0))
r = sqrt(triangle.x[0]**2 + triangle.x[1]**2)
rho = 1.0/r**3
F = (8*inner(grad(u), grad(v))*dx +
rho * inner(u**5, v)*dx +
(-1.0/8.0)*inner(u, v)*dx)
newton_solver_parameters = {"nonlinear_solver": "newton",
"linear_solver": "lu",
"newton_solver": {"maximum_iterations": 100,
"report": False}}
snes_solver_parameters = {"nonlinear_solver": "snes",
"linear_solver": "lu",
"snes_solver": {"maximum_iterations": 100,
"sign": "nonnegative",
"report": False}}
class SNESSolverTester(unittest.TestCase):
def test_snes_solver(self):
solve(F == 0, u, bcs, solver_parameters=snes_solver_parameters)
self.assertTrue(u.vector().min() >= 0)
def test_newton_solver(self):
solve(F == 0, u, bcs, solver_parameters=newton_solver_parameters)
self.assertTrue(u.vector().min() < 0)
if __name__ == "__main__":
# Turn off DOLFIN output
set_log_active(False)
print ""
print "Testing DOLFIN nls/PETScSNESSolver interface"
print "--------------------------------------------"
unittest.main()
|
quittle/bazel_toolbox
|
assert/scripts/assert_equal.py
|
Python
|
apache-2.0
| 4,276 | 0.005847 |
# Copyright (c) 2016-2017 Dustin Doloff
# Licensed under Apache License v2.0
import argparse
import difflib
import hashlib
import os
import subprocess
import zipfile
# Resets color formatting
COLOR_END = '\33[0m'
# Modifies characters or color
COLOR_BOLD = '\33[1m'
COLOR_DISABLED = '\33[02m' # Mostly just means darker
# Sets the text color
COLOR_GREEN = '\33[32m'
COLOR_YELLOW = '\33[33m'
COLOR_RED = '\33[31m'
def parse_args():
parser = argparse.ArgumentParser(description='Asserts files are the same')
parser.add_argument('--stamp', type=argparse.FileType('w+'), required=True,
help='Stamp file to record action completed')
parser.add_argument('--files', type=str, nargs='+', required=True)
return parser.parse_args()
def bytes_to_str(bytes):
return bytes.decode('utf-8', 'backslashreplace')
def color_diff(text_a, text_b):
"""
Compares two pieces of text and returns a tuple
The first value is a colorized diff of the texts.
The second value is a boolean, True if there was a diff, False if there wasn't.
"""
sequence_matcher = difflib.SequenceMatcher(None, text_a, text_b)
colorized_diff = ''
diff = False
for opcode, a0, a1, b0, b1 in sequence_matcher.get_opcodes():
if opcode == 'equal':
colorized_diff += bytes_to_str(sequence_matcher.a[a0:a1])
elif opcode == 'insert':
colorized_diff += COLOR_BOLD + COLOR_GREEN + bytes_to_str(sequence_matcher.b[b0:b1]) + COLOR_END
diff = True
elif opcode == 'delete':
colorized_diff += COLOR_BOLD + COLOR_RED + bytes_to_str(sequence_matcher.a[a0:a1]) + COLOR_END
diff = True
elif opcode == 'replace':
colorized_diff += (COLOR_BOLD + COLOR_YELL
|
OW + bytes_to_str(sequence_mat
|
cher.a[a0:a1]) +
COLOR_DISABLED + bytes_to_str(sequence_matcher.b[b0:b1]) + COLOR_END)
diff = True
else:
raise RuntimeError('unexpected opcode ' + opcode)
return colorized_diff, diff
def hash_file(file):
"""
Computes the SHA-256 hash of the file
file - The file to hash
"""
hasher = hashlib.sha256()
with open(file, 'rb') as f:
for block in iter(lambda: f.read(1024), b''):
hasher.update(block)
return hasher.digest()
def summarize(file):
"""
Summarizes a file via it's metadata to provide structured text for diffing
"""
summary = None
if zipfile.is_zipfile(file):
with zipfile.ZipFile(file) as zf:
summary = ''
for info in zf.infolist():
summary += 'Entry: ('
summary += ', '.join(s + ': ' + repr(getattr(info, s)) for s in info.__slots__)
summary += ') ' + os.linesep
assert summary is not None, 'Unable to summarize %s' % file
return summary
def main():
args = parse_args()
files = args.files
assert len(files) >= 2, 'There must be at least two files to compare'
files_hashes = set()
max_file_size = 0
for file in files:
files_hashes.add(hash_file(file))
max_file_size = max(max_file_size, os.stat(file).st_size)
# Check hashes first
if len(files_hashes) != 1:
for i in range(len(files) - 1):
file_a = files[i]
file_b = files[i + 1]
file_a_contents = None
file_b_contents = None
if max_file_size > 1024 * 1024:
file_a_contents = summarize(file_a)
file_b_contents = summarize(file_b)
else:
with open(file_a, 'rb') as a:
file_a_contents = a.read()
with open(file_b, 'rb') as b:
file_b_contents = b.read()
diff, problem = color_diff(file_a_contents, file_b_contents)
assert not problem, 'File {a} does not match {b}:{newline}{diff}'.format(
a = file_a,
b = file_b,
newline = os.linesep,
diff = diff)
assert False, 'File hashes don\'t match.'
with args.stamp as stamp_file:
stamp_file.write(str(args))
if __name__ == '__main__':
main()
|
depp/sggl
|
sggl.py
|
Python
|
bsd-2-clause
| 232 | 0 |
#!/usr/bin/env python3
# Copyright 2015 Dietrich Epp.
# This file is part of SGGL. SGGL is licensed under the terms
|
of the
# 2-clause BSD license. For more information, see LICENSE.tx
|
t.
import glgen.__main__
glgen.__main__.main()
|
wemanuel/smry
|
smry/server-auth/ls/google-cloud-sdk/lib/googlecloudsdk/compute/subcommands/instance_groups/managed/set_target_pools.py
|
Python
|
apache-2.0
| 3,208 | 0.004364 |
# Copyright 2015 Google Inc. All Rights Reserved.
"""Command for setting target pools of instance group manager."""
from googlecloudsdk.calliope import arg_parsers
from googlecloudsdk.calliope import exceptions
from googlecloudsdk.compute.lib import base_classes
from googlecloudsdk.compute.lib import utils
class SetTargetPools(base_classes.BaseAsyncMutator):
"""Set instances target pools of instance group manager."""
@staticmethod
def Args(parser):
parser.add_argument('instance_group_manager',
help='Instance group manager name.')
mutually_exclusive_group = parser.add_mutually_exclusive_group()
mutually_exclusive_group.add_argument(
'--clear-target-pools',
action='store_true',
help='Do not add instances to any Compute Engine Target Pools.')
mutually_exclusive_group.add_argument(
'--target-pools',
type=arg_parsers.ArgList(min_length=1),
action=arg_parsers.FloatingListValuesCatcher(),
metavar='TARGET_POOL',
help=('Compute Engine Target Pools to add the instances to. '
'Target Pools must can specified by name or by URL. Example: '
'--target-pool target-pool-1,target-pool-2'))
utils.AddZoneFlag(
parser,
resource_type='instance group manager',
operation_type='set target pools')
@property
def method(self):
return 'SetTargetPools'
@property
def service(self):
return self.compute.instanceGroupManagers
@property
def resource_type(self):
return 'instanceGroupManagers'
def _ValidateArgs(self, args):
if not args.clear_target_pools and args.target_pools is None:
raise exceptions.InvalidArgumentException(
'--target-pools', 'not passed but --clear-target-pools not present '
'either.')
def CreateRequests(self, args):
self._ValidateArgs(args)
ref = self.CreateZonalReference(args.instance_group_manager, args.zone)
region = utils.ZoneNameToRegionName(ref.zone)
if args.clear_target_pools:
pool_refs = []
else:
pool_refs = self.CreateRegionalReferences(
args.target_pools, region, resource_type='targetPools')
pools = [pool_ref.SelfLink() for pool_ref in pool_refs]
request = (
self.messages.ComputeInstanceGroupManagersSetTargetPoolsRequest(
instanceGroupManager=ref.Name(),
instanceGroupManagersSetTargetPoolsRequest=(
|
self.messages.InstanceGroupMan
|
agersSetTargetPoolsRequest(
targetPools=pools,
)
),
project=self.project,
zone=ref.zone,)
)
return [request]
SetTargetPools.detailed_help = {
'brief': 'Set instance template for instance group manager.',
'DESCRIPTION': """
*{command}* sets the target pools for an existing instance group
manager.
The new target pools won't apply to existing instances in the group
unless they are recreated using the recreate-instances command. But any
new instances created in the managed instance group will be added to all
of the provided target pools for load balancing purposes.
""",
}
|
vivek8943/tracker_project
|
tracker_project/tracker/migrations/0001_initial.py
|
Python
|
mit
| 1,059 | 0.002833 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import django.contrib.gis.db.models.fields
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='Incident',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=150)),
('description', models.TextField(max_length=1000)),
('severity', models.CharField(default='ME', max_length=2, choices=[('UR', 'Urgent'), ('HI', 'High'), ('ME', 'Medium'), ('LO', 'Low'), ('IN', 'Info')]
|
)),
('closed', models.BooleanField(default=False)),
('location', django.contrib.gis.db.models.fields.PointField(srid=4326)),
('created', models.DateTimeField(auto_now_add=True)),
],
|
options={
},
bases=(models.Model,),
),
]
|
gltn/stdm
|
stdm/ui/wizard/profile_tenure_view.py
|
Python
|
gpl-2.0
| 67,557 | 0.000148 |
"""
/***************************************************************************
Name : ProfileTenureView
Description : A widget for rendering a profile's social tenure
relationship.
Date : 9/October/2016
copyright : John Kahiu
email : gkahiu at gmail dot com
***************************************************************************/
/***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************/
"""
import math
from qgis.PyQt.QtCore import (
pyqtSignal,
QFile,
QIODevice,
QLineF,
QPointF,
QRect,
QRectF,
QSize,
QSizeF,
Qt
)
from qgis.PyQt.QtGui import (
QBrush,
QColor,
QCursor,
QFont,
QFontMetrics,
QIcon,
QImage,
QLinearGradient,
QKeyEvent,
QPainter,
QPainterPath,
QPen,
QPolygonF,
QTextLayout
)
from qgis.PyQt.QtWidgets import (
QApplication,
QComboBox,
QDialog,
QGraphicsItem,
QGraphicsLineItem,
QGraphicsScene,
QGraphicsTextItem,
QGraphicsView,
QGridLayout,
QLabel,
QMessageBox,
QSizePolicy,
QSpacerItem,
QToolButton,
QWidget
)
from stdm.ui.gui_utils import GuiUtils
from stdm.ui.image_export_settings import ImageExportSettings
class Arrow(QGraphicsLineItem):
"""
Renders an arrow object (with line and arrow head) from one item to
another. The arrow head size can be customized by specifying the angle
and width of the arrow base.
"""
def __init__(self, start_item, end_item, base_width=None,
tip_angle=None, fill_arrow_head=False,
parent_item=None):
"""
Class constructor
:param start_point: Arrow start item.
:type start_point: BaseTenureItem
:param end_point: Arrow end item.
:type end_point: BaseTenureItem
:param base_width: Width (in pixels) of the arrow base. If not
specified, it defaults to 9.0.
:type base_width: float
:param tip_angle: Angle (in radians) between the two line components
at the tip of the arrow. If not specified, it defaults to
math.radians(50.0).
Minimum math.radians(10.0)
Maximum math.radians(<90.0)
:type tip_angle: float
:param fill_arrow_head: True to close and fill the arrow head with
the specified pen and brush settings. Defaults to False.
:type fill_arrow_head: bool
:param parent_item: Parent item.
:type parent_item: QGraphicsItem
:param scene: Scene object.
:type scene: QGraphicsScene
"""
super(Arrow, self).__init__(parent_item)
self._start_
|
item = start_item
sel
|
f._end_item = end_item
self.base_width = base_width
if self.base_width is None:
self.base_width = 9.0
self._angle = tip_angle
if tip_angle is None:
self._angle = math.radians(50.0)
self.fill_arrow_head = fill_arrow_head
self.setPen(
QPen(
Qt.black,
1,
Qt.SolidLine,
Qt.RoundCap,
Qt.MiterJoin
)
)
self.brush = QBrush(Qt.black)
self._arrow_head_points = []
@property
def start_item(self):
"""
:return: Returns the start item for the arrow.
:rtype: BaseTenureItem
"""
return self._start_item
@property
def end_item(self):
"""
:return: Returns the end item for the arrow.
:rtype: BaseTenureItem
"""
return self._end_item
@property
def start_point(self):
"""
:return: Returns the arrow start point.
:rtype: QPointF
"""
return self._start_item.pos()
@property
def end_point(self):
"""
:return: Returns the arrow end point.
:rtype: QPointF
"""
return self._end_item.pos()
def boundingRect(self):
extra = (self.base_width + self.pen().widthF()) / 2.0
p1 = self.line().p1()
p2 = self.line().p2()
rect = QRectF(
p1, QSizeF(p2.x() - p1.x(), p2.y() - p1.y())
).normalized().adjusted(-extra, -extra, extra, extra)
return rect
def arrow_head_polygon(self):
"""
:return: Returns the arrow head as a QPolygonF object.
:rtype: QPolygonF
"""
return QPolygonF(self._arrow_head_points)
def shape(self):
path = super(Arrow, self).shape()
path.addPolygon(self.arrow_head_polygon())
return path
@property
def angle(self):
"""
:return: Returns the value of the angle at the tip in radians.
:rtype: float
"""
return self._angle
@angle.setter
def angle(self, angle):
"""
Sets the value of the angle to be greater than or equal to
math.radians(10.0) and less than math.radians(90).
:param angle: Angle at the tip of the arrow in radians.
:type angle: float
"""
min_angle = math.radians(10.0)
max_angle = math.radians(90)
if angle < min_angle:
self._angle = min_angle
elif angle > max_angle:
self._angle = max_angle
else:
self._angle = angle
self.update()
@property
def arrow_points(self):
"""
:return: Returns a collection of points used to draw the arrow head.
:rtype: list(QPointF)
"""
return self._arrow_head_points
def update_position(self):
"""
Updates the position of the line and arrowhead when the positions of
the start and end items change.
"""
line = QLineF(
self.mapFromScene(self.start_item.center()),
self.mapFromScene(self.end_item.center())
)
self.setLine(line)
def _intersection_point(self, item, reference_line):
# Computes the intersection point between the item's line segments
# with the reference line.
intersect_point = QPointF()
for l in item.line_segments():
intersect_type = l.intersect(reference_line, intersect_point)
if intersect_type == QLineF.BoundedIntersection:
return intersect_point
return None
def paint(self, painter, option, widget):
"""
Draw the arrow item.
"""
if self._start_item.collidesWithItem(self._end_item):
return
painter.setPen(self.pen())
center_line = QLineF(self.start_item.center(), self.end_item.center())
# Get intersection points
start_intersection_point = self._intersection_point(
self._start_item,
center_line
)
end_intersection_point = self._intersection_point(
self._end_item,
center_line
)
# Do not draw if there are no intersection points
if start_intersection_point is None or end_intersection_point is None:
return
arrow_line = QLineF(start_intersection_point, end_intersection_point)
self.setLine(arrow_line)
arrow_length = arrow_line.length()
# Setup computation parameters
cnt_factor = (self.base_width / 2.0) / (
math.tan(self._angle / 2.0) * arrow_length
)
cnt_point_delta = (self.base_width / 2.0) / arrow_length
# Get arrow base along the line
arrow_base_x = end_intersection_point.x() - (arrow_line.dx() * cnt
|
hackliff/domobot
|
kinect/pySVM/test/plotLinearSVC.py
|
Python
|
apache-2.0
| 1,628 | 0.0043 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
SVM-SVC (Support Vector Classification)
=========================================================
The classification application of the SVM is used below. The
`Iris <http://en.wikipedia.org/wiki/Iris_flower_data_set>`_
dataset has been used for this example
The decision boundaries, are shown with all the points in the training-set.
"""
print __doc__
import sys
import numpy as np
import pylab as pl
from sklearn import svm, datasets
# import some data to play with
#iris = datasets.load_iris()
#X = iris.data[:, :2] # we only take the first two features.
#Y = iris.target
XTmp, Y = datasets.load_svmlight_file("../SVMData.txt")
X = XTmp.toarray()
h = .02 # step size in the mesh
clf = svm.SVC(C=1.0, kernel='linear')
# we create an instance of SVM Classifier and fit the data.
clf.fit(X, Y)
# Plot the decision boundary. For that, we will asign a color to each
# point in the mesh [x_min, m_max]x[y_min, y_max].
x_min, x_max = X[:, 0].min() - .5, X[:, 0].max() + .5
y_min, y_max = X[:, 1].min() - .5, X[:, 1].max() + .5
xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))
Z = clf.predict(np.c_[xx.ravel()
|
, yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
pl.figure(1, figsize=(4, 3))
pl.pcolormesh(xx, yy, Z, cmap=pl.cm.Paired)
# Plot also the training points
pl.scatter(X[:, 0], X[:, 1], c=Y, cmap=pl.cm.Paired)
pl.xlabel('Sepal length')
pl.ylabel('Sepal width')
pl.xlim(xx.min(),
|
xx.max())
pl.ylim(yy.min(), yy.max())
pl.xticks(())
pl.yticks(())
pl.show()
|
ulif/pulp
|
server/test/unit/server/db/test_manage.py
|
Python
|
gpl-2.0
| 40,647 | 0.003174 |
"""
Test the pulp.server.db.manage module.
"""
from argparse import Namespace
from cStringIO import StringIO
import os
from mock import call, inPy3k, MagicMock, patch
from mongoengine.queryset import DoesNotExist
from ... import base
from pulp.common.compat import all, json
from pulp.server.db import manage
from pulp.server.db.migrate import models
from pulp.server.db.model import MigrationTracker
import pulp.plugins.types.database as types_db
import migration_packages.a
import migration_packages.b
import migration_packages.duplicate_versions
import migration_packages.platform
import migration_packages.raise_exception
import migration_packages.version_gap
import migration_packages.version_zero
import migration_packages.z
# This is used for mocking
_test_type_json = '''{"types": [{
"id" : "test_type_id",
"display_name" : "Test Type",
"description" : "Test Type",
"unit_key" : ["attribute_1", "attribute_2", "attribute_3"],
"search_indexes" : ["attribute_1", "attribute_3"]
}]}'''
# This is used to mock the entry_point system for discovering migration packages.
def iter_entry_points(name):
class FakeEntryPoint(object):
def __init__(self, migration_package):
self._migration_package = migration_package
def load(self):
return self._migration_package
test_packages = [
migration_packages.a,
migration_packages.duplicate_versions,
migration_packages.raise_exception,
migration_packages.version_gap,
migration_packages.version_zero,
migration_packages.z,
]
if name == models.MIGRATIONS_ENTRY_POINT:
return [FakeEntryPoint(package) for package in test_packages]
return []
# Mock 1.0.0 has a built in mock_open, and one day when we upgrade to 1.0.0 we can use that. In the
# meantime, I've included the example for mock_open as listed in the Mock 0.8 docs, slightly
# modified to allow read_data to just be a str.
# http://www.voidspace.org.uk/python/mock/0.8/examples.html?highlight=open#mocking-open
if inPy3k:
file_spec = [
'_CHUNK_SIZE', '__enter__', '__eq__', '__exit__',
'__format__', '__ge__', '__gt__', '__hash__', '__iter__', '__le__',
'__lt__', '__ne__', '__next__', '__repr__', '__str__',
'_checkClosed', '_checkReadable', '_checkSeekable',
'_checkWritable', 'buffer', 'close', 'closed', 'detach',
'encoding', 'errors', 'fileno', 'flush', 'isatty',
'line_buffering', 'mode', 'name',
'newlines', 'peek', 'raw', 'read', 'read1', 'readable',
'readinto', 'readline', 'readlines', 'seek', 'seekable', 'tell',
'truncate', 'writable', 'write', 'writelines']
else:
file_spec = file
def mock_open(mock=None, read_data=None):
if mock is None:
mock = MagicMock(spec=file_spec)
handle = MagicMock(spec=file_spec)
handle.write.return_value = None
fake_file = StringIO(read_data)
if read_data is None:
if hasattr(handle, '__enter__'):
handle.__enter__.return_value = handle
else:
if hasattr(handle, '__enter__'):
handle.__enter__.return_value = fake_file
handle.read = fake_file.read
mock.return_value = handle
return mock
class MigrationTest(base.PulpServerTests):
def clean(self):
super(MigrationTest, self).clean()
# Make sure each test doesn't have any lingering MigrationTrackers
MigrationTracker.objects().delete()
class TestMigrateDatabase(MigrationTest):
@patch('pulp.server.db.manage.logging.getLogger')
@patch('pulp.server.db.migrate.models.get_migration_packages', auto_spec=True)
def test_migration_removed(self, mock_get_packages, mock_getLogger):
"""
ensure that if a migration raises the MigrationRemovedError, it bubbles up.
"""
mock_package = MagicMock()
mock_package.current_version = 6
mock_package.latest_available_version = 7
mock_package.name = 'foo'
mock_migration = MagicMock()
mock_migration.version = 7
mock_package.unapplied_migrations = [mock_migration]
e = models.MigrationRemovedError('0006', '1.2.0', '1.1.0', 'foo')
mock_package.apply_migration.side_effect = e
mock_get_packages.return_value = [mock_package]
options = MagicMock()
options.dry_run = False
with self.assertRaises(models.MigrationRemovedError) as assertion:
manage.migrate_database(options)
self.assertTrue(assertion.exception is e)
class TestManageDB(MigrationTest):
def clean(self):
super(self.__class__, self).clean()
types_db.clean()
@patch.object(manage, 'PluginManager')
@patch.object(manage, 'model')
def test_ensure_database_indexes(self, mock_model, mock_plugin_manager):
"""
Make sure that the ensure_indexes method is called for all
the appropriate platform models
"""
test_model = MagicMock()
mock_plugin_manager.return_value.unit_models.items.return_value = [('test-unit',
test_model)]
manage.ensure_database_indexes()
test_model.ensure_indexes.assert_called_once_with()
@patch.object(manage, 'PluginManager')
@patch.object(manage, 'model')
def test_ensure_database_indexes_throws_exception(self, mock_model, mock_plugin_manager):
"""
Make sure that the ensure_indexes method is called for all
the appropriate platform models
"""
test_model = MagicMock()
test_model.unit_key_fields = ('1', '2', '3')
unit_key_index = {'fields': test_model.unit_key_fields, 'unique': True}
test_model._meta.__getitem__.side_effect = [[unit_key_index]]
mock_plugin_manager.return_value.unit_models.items.return_value = [('test-unit',
test_model)]
with self.assertRaises(ValueError) as context:
manage.ensure_database_indexes()
self.assertEqual(context.exception.message, "Content unit type 'test-unit' explicitly "
|
"defines an index for its unit key. This is "
"not allowed because the platform handlesit "
"for you.")
@patch.object(manage, 'ensure_database_indexes')
@patch('logging.config.fileConfig')
@patch('pkg_resources.iter_entry_point
|
s', iter_entry_points)
@patch('pulp.server.db.manage.connection.initialize')
@patch('pulp.server.db.manage.factory')
@patch('pulp.server.db.manage.logging.getLogger')
@patch('pulp.server.db.manage.RoleManager.ensure_super_user_role')
@patch('pulp.server.db.manage.managers.UserManager.ensure_admin')
@patch('pulp.server.db.migrate.models.pulp.server.db.migrations',
migration_packages.platform)
@patch('sys.argv', ["pulp-manage-db"])
@patch.object(models.MigrationPackage, 'apply_migration')
def test_admin_is_ensured(self, apply_migration, ensure_admin, ensure_super_user_role,
getLogger, factory, initialize, fileConfig, ensure_db_indexes):
"""
pulp-manage-db is responsible for making sure the admin user and role are in place. This
test makes sure the manager methods that do that are called.
"""
logger = MagicMock()
getLogger.return_value = logger
code = manage.main()
self.assertEqual(code, os.EX_OK)
# Make sure all the right logging happens
expected_messages = ('Ensuring the admin role and user are in place.',
'Admin role and user are in place.')
info_messages = ''.join([mock_call[1][0] for mock_call in logger.info.mock_calls])
for msg in expected_messages:
self.assertTrue(msg in info_messages)
# Make sure the admin user and role creation methods were called. We'll leave it up to other
# tests to make sure they work.
ensure_admin.as
|
jie/microgate
|
test_server.py
|
Python
|
mit
| 1,151 | 0.001738 |
from flask import Flask
from flask import request
from flask import jsonify
from flask import abort
import time
app = Flask(__name__)
@app.route('/api/1', defaults={'path': ''}, methods=['GET', 'POST'])
@app.route('/api/1/<path:path>', methods=['GET', 'POST'])
def api1(path):
time.sleep(20)
return jsonify({
'userinfo': {
'username': 'zhouyang',
'pk': 10,
'birthday': '2010101'
}
})
@app.route('/api/2', defaults={'path': ''}, methods=['GET', 'POST'])
@app.route('/api/2/<path:path>', metho
|
ds=['GET', 'POST'])
def api2(path):
return abort(400, 'you did a bad request')
@app.route('/api/3', defaults={'path': ''}, methods=['GET', 'POST'])
@app.route('/api/3/<path:path>', methods=['GET', 'POST'])
def api3(path):
userId = request.args.get('userId')
return jsonify({
'userinfo': {
'userId': userId
}
})
@app.route('/usercenter/userinfo', methods=['GET', 'POST'])
def api4():
return jsonify({
'use
|
rinfo': {
'username': 'zhouyang'
}
})
if __name__ == '__main__':
app.run(port=1330, host='0.0.0.0')
|
liulion/mayavi
|
mayavi/tools/animator.py
|
Python
|
bsd-3-clause
| 7,087 | 0.000564 |
"""
Simple utility code for animations.
"""
# Author: Prabhu Ramachandran <prabhu at aerodotiitbdotacdotin>
# Copyright (c) 2009, Enthought, Inc.
# License: BSD Style.
import types
from functools import wraps
try:
from decorator import decorator
HAS_DECORATOR = True
except ImportError:
HAS_DECORATOR = False
from pyface.timer.api import Timer
from traits.api import HasTraits, Button, Instance, Range
from traitsui.api import View, Group, Item
###############################################################################
# `Animator` class.
###############################################################################
class Animator(HasTraits):
""" Convenience class to manage a timer and present a convenient
UI. This is based on the code in `tvtk.tools.visual`.
Here is a simple example of using this class::
>>> from mayavi import mlab
>>> def anim():
... f = mlab.gcf()
... while 1:
... f.scene.camera.azimuth(10)
... f.scene.render()
... yield
...
>>> anim = anim()
>>> t = Animator(500, anim.next)
>>> t.edit_traits()
This makes it very easy to animate your visualizations and control
it from a simple UI.
**Notes**
If you want to modify the data plotted by an `mlab` function call,
please refer to the section on: :ref:`mlab-animating-data`
"""
########################################
# Traits.
start = Button('Start Animation')
stop = Button('Stop Animation')
delay = Range(10, 100000, 500,
desc='frequency with which timer is called')
# The internal timer we manage.
timer = Instance(Timer)
######################################################################
# User interface view
traits_view
|
= View(Group(Item('start'),
Item('stop'),
show_labels=False),
|
Item('_'),
Item(name='delay'),
title='Animation Controller',
buttons=['OK'])
######################################################################
# Initialize object
def __init__(self, millisec, callable, *args, **kwargs):
"""Constructor.
**Parameters**
:millisec: int specifying the delay in milliseconds
between calls to the callable.
:callable: callable function to call after the specified
delay.
:\*args: optional arguments to be passed to the callable.
:\*\*kwargs: optional keyword arguments to be passed to the callable.
"""
HasTraits.__init__(self)
self.delay = millisec
self.ui = None
self.timer = Timer(millisec, callable, *args, **kwargs)
######################################################################
# `Animator` protocol.
######################################################################
def show(self):
"""Show the animator UI.
"""
self.ui = self.edit_traits()
def close(self):
"""Close the animator UI.
"""
if self.ui is not None:
self.ui.dispose()
######################################################################
# Non-public methods, Event handlers
def _start_fired(self):
self.timer.Start(self.delay)
def _stop_fired(self):
self.timer.Stop()
def _delay_changed(self, value):
t = self.timer
if t is None:
return
if t.IsRunning():
t.Stop()
t.Start(value)
###############################################################################
# Decorators.
def animate(func=None, delay=500, ui=True):
""" A convenient decorator to animate a generator that performs an
animation. The `delay` parameter specifies the delay (in
milliseconds) between calls to the decorated function. If `ui` is
True, then a simple UI for the animator is also popped up. The
decorated function will return the `Animator` instance used and a
user may call its `Stop` method to stop the animation.
If an ordinary function is decorated a `TypeError` will be raised.
**Parameters**
:delay: int specifying the time interval in milliseconds between
calls to the function.
:ui: bool specifying if a UI controlling the animation is to be
provided.
**Returns**
The decorated function returns an `Animator` instance.
**Examples**
Here is the example provided in the Animator class documentation::
>>> from mayavi import mlab
>>> @mlab.animate
... def anim():
... f = mlab.gcf()
... while 1:
... f.scene.camera.azimuth(10)
... f.scene.render()
... yield
...
>>> a = anim() # Starts the animation.
For more specialized use you can pass arguments to the decorator::
>>> from mayavi import mlab
>>> @mlab.animate(delay=500, ui=False)
... def anim():
... f = mlab.gcf()
... while 1:
... f.scene.camera.azimuth(10)
... f.scene.render()
... yield
...
>>> a = anim() # Starts the animation without a UI.
**Notes**
If you want to modify the data plotted by an `mlab` function call,
please refer to the section on: :ref:`mlab-animating-data`.
"""
class Wrapper(object):
# The wrapper which calls the decorated function.
def __init__(self, function):
self.func = function
self.ui = ui
self.delay = delay
def __call__(self, *args, **kw):
if isinstance(self.func, types.GeneratorType):
f = self.func
else:
f = self.func(*args, **kw)
if isinstance(f, types.GeneratorType):
a = Animator(self.delay, f.next)
if self.ui:
a.show()
return a
else:
msg = 'The function "%s" must be a generator '\
'(use yield)!' % (self.func.__name__)
raise TypeError(msg)
def decorator_call(self, func, *args, **kw):
return self(*args, **kw)
def _wrapper(function):
# Needed to create the Wrapper in the right scope.
if HAS_DECORATOR:
# The decorator calls a callable with (func, *args, **kw) signature
return decorator(Wrapper(function).decorator_call, function)
else:
return wraps(function)(Wrapper(function))
if func is None:
return _wrapper
else:
return _wrapper(func)
|
ilbay/PyMangaDownloader
|
Ui_NewMangaDialog.py
|
Python
|
gpl-2.0
| 2,334 | 0.003428 |
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'NewMangaDialog.ui'
#
# Created: Wed Jul 24 19:06:21 2013
# by: PyQt4 UI code generator 4.10.2
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore, QtGui
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
def _fromUtf8(s):
return s
try:
_encoding = QtGui.QApplication.UnicodeUTF8
def _translate(context, text
|
, disambig):
return QtGui.QApplication.translate(context, text, disambig, _encoding)
except AttributeError:
def _translate(context,
|
text, disambig):
return QtGui.QApplication.translate(context, text, disambig)
class Ui_NewMangaDialog(object):
def setupUi(self, NewMangaDialog):
NewMangaDialog.setObjectName(_fromUtf8("NewMangaDialog"))
NewMangaDialog.resize(231, 78)
self.gridLayout = QtGui.QGridLayout(NewMangaDialog)
self.gridLayout.setObjectName(_fromUtf8("gridLayout"))
self.splitter = QtGui.QSplitter(NewMangaDialog)
self.splitter.setOrientation(QtCore.Qt.Horizontal)
self.splitter.setObjectName(_fromUtf8("splitter"))
self.label = QtGui.QLabel(self.splitter)
self.label.setObjectName(_fromUtf8("label"))
self.mangaLineEdit = QtGui.QLineEdit(self.splitter)
self.mangaLineEdit.setObjectName(_fromUtf8("mangaLineEdit"))
self.gridLayout.addWidget(self.splitter, 0, 0, 1, 1)
self.buttonBox = QtGui.QDialogButtonBox(NewMangaDialog)
self.buttonBox.setOrientation(QtCore.Qt.Horizontal)
self.buttonBox.setStandardButtons(QtGui.QDialogButtonBox.Cancel|QtGui.QDialogButtonBox.Ok)
self.buttonBox.setObjectName(_fromUtf8("buttonBox"))
self.gridLayout.addWidget(self.buttonBox, 1, 0, 1, 1)
self.retranslateUi(NewMangaDialog)
QtCore.QObject.connect(self.buttonBox, QtCore.SIGNAL(_fromUtf8("accepted()")), NewMangaDialog.accept)
QtCore.QObject.connect(self.buttonBox, QtCore.SIGNAL(_fromUtf8("rejected()")), NewMangaDialog.reject)
QtCore.QMetaObject.connectSlotsByName(NewMangaDialog)
def retranslateUi(self, NewMangaDialog):
NewMangaDialog.setWindowTitle(_translate("NewMangaDialog", "Dialog", None))
self.label.setText(_translate("NewMangaDialog", "Manga:", None))
|
faarwa/EngSocP5
|
zxing/cpp/scons/scons-local-2.0.0.final.0/SCons/Platform/darwin.py
|
Python
|
gpl-3.0
| 1,758 | 0.001706 |
"""engine.SCons.Platform.darwin
Platform-specific initialization for Mac OS X systems.
There normally shouldn't be any need to import this module directly. It
will usually be imported through the generic SCons.Platform.Platform()
selection method.
"""
#
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"),
|
to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or
|
substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Platform/darwin.py 5023 2010/06/14 22:05:46 scons"
import posix
def generate(env):
posix.generate(env)
env['SHLIBSUFFIX'] = '.dylib'
env['ENV']['PATH'] = env['ENV']['PATH'] + ':/sw/bin'
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
|
26fe/jsonstat.py
|
jsonstat/downloader.py
|
Python
|
lgpl-3.0
| 3,966 | 0.001261 |
# -*- coding: utf-8 -*-
# This file is part of https://github.com/26fe/jsonstat.py
# Copyright (C) 2016-2021 gf <gf@26fe.com>
# See LICENSE file
# stdlib
import time
import os
import hashlib
# packages
import requests
# jsonstat
from jsonstat.exceptions import JsonStatException
class Downloader:
"""Helper class to download json stat files.
It has a very simple cache mechanism
"""
def __init__(self, cache_dir="./data", time_to_live=None):
"""initialize downloader
:param cache_dir: directory where to store downloaded files, if cache_dir is None files are not stored
:param time_to_live: how many seconds to store file on disk, None is infinity, 0 for not to store
"""
if cache_dir is not None:
self.__cache_dir = os.path.abspath(cache_dir)
else:
self.__cache_dir = None
self.__time_to_live = time_to_live
self.__session = requests.session()
def cache_dir(self):
return self.__cache_dir
def download(self, url, filename=None, time_to_live=None):
"""Download url from internet
|
.
Store the downloaded content into <cache_dir>/file.
If <cache_dir>/file exists, it returns content from disk
:param url: page to be downloaded
:param filename: filename where to store the content of url, None if we want not store
:param time_to_live: how many seconds to store file on disk,
None use default time_to_live,
|
0 don't use cached version if any
:returns: the content of url (str type)
"""
pathname = self.__build_pathname(filename, url)
# note: html must be a str type not byte type
if time_to_live == 0 or not self.__is_cached(pathname):
response = self.__session.get(url)
response.raise_for_status()
html = response.text
self.__write_page_to_cache(pathname, html)
else:
html = self.__read_page_from_file(pathname)
return html
def __build_pathname(self, filename, url):
if self.__cache_dir is None:
return None
if filename is None:
filename = hashlib.md5(url.encode('utf-8')).hexdigest()
pathname = os.path.join(self.__cache_dir, filename)
return pathname
def __is_cached(self, pathname):
"""check if pathname exists
:param pathname:
:returns: True if the file can be retrieved from the disk (cache)
"""
if pathname is None:
return False
if not os.path.exists(pathname):
return False
if self.__time_to_live is None:
return True
cur = time.time()
mtime = os.stat(pathname).st_mtime
# print("last modified: %s" % time.ctime(mtime))
return cur - mtime < self.__time_to_live
def __write_page_to_cache(self, pathname, content):
"""write content to pathname
:param pathname:
:param content:
"""
if pathname is None:
return
# create cache directory only the fist time it is needed
if not os.path.exists(self.__cache_dir):
os.makedirs(self.__cache_dir)
if not os.path.isdir(self.__cache_dir):
msg = "cache_dir '{}' is not a directory".format(self.__cache_dir)
raise JsonStatException(msg)
# note:
# in python 3 file must be open without b (binary) option to write string
# otherwise the following error will be generated
# TypeError: a bytes-like object is required, not 'str'
with open(pathname, 'w') as f:
f.write(content)
@staticmethod
def __read_page_from_file(pathname):
"""it reads content from pathname
:param pathname:
"""
with open(pathname, 'r') as f:
content = f.read()
return content
|
steelart/ask-navalny
|
django-backend/config/config.py
|
Python
|
mit
| 148 | 0 |
# Use default debug configuration or local configuration
try:
from .config_local impor
|
t *
except ImportErr
|
or:
from .config_default import *
|
zhanglongqi/pymodslave
|
ModSlaveSettingsRTU.py
|
Python
|
gpl-2.0
| 2,993 | 0.009021 |
#--------------------------------------------------------------------------
|
-----
# Name: ModSlaveSettingsRTU
# Purpose:
#
# Author: ElBar
#
# Created: 17/04/2012
# Copyright: (c) ElBar 2012
# Licence: <your licence>
#-------------------------------------------------------------------------------
#!/us
|
r/bin/env python
from PyQt4 import QtGui,QtCore
from Ui_settingsModbusRTU import Ui_SettingsModbusRTU
import Utils
#add logging capability
import logging
#-------------------------------------------------------------------------------
class ModSlaveSettingsRTUWindow(QtGui.QDialog):
""" Class wrapper for RTU settings ui """
def __init__(self):
super(ModSlaveSettingsRTUWindow,self).__init__()
#init value
self.rtu_port = 1
self.baud_rate = 9600
self.byte_size = 8
self.parity = 'None'
self.stop_bits = '1'
self._logger = logging.getLogger("modbus_tk")
self.setupUI()
def setupUI(self):
#create window from ui
self.ui=Ui_SettingsModbusRTU()
self.ui.setupUi(self)
#set init values
self._set_values()
#signals-slots
self.accepted.connect(self._OK_pressed)
self.rejected.connect(self._cancel_pressed)
def _set_values(self):
"""set param values to ui"""
self._logger.info("Set param values to UI")
self.ui.cmbPort.setEditText(str(self.rtu_port))
self.ui.cmbBaud.setCurrentIndex(self.ui.cmbBaud.findText(str(self.baud_rate)))
self.ui.cmbDataBits.setCurrentIndex(self.ui.cmbDataBits.findText(str(self.byte_size)))
self.ui.cmbParity.setCurrentIndex(self.ui.cmbParity.findText(self.parity))
self.ui.cmbStopBits.setCurrentIndex(self.ui.cmbStopBits.findText(str(self.stop_bits)))
def _get_values(self):
"""get param values from ui"""
self._logger.info("Get param values from UI")
self.rtu_port = int(self.ui.cmbPort.currentText())
self.baud_rate = self.ui.cmbBaud.currentText()
self.byte_size = self.ui.cmbDataBits.currentText()
self.parity = self.ui.cmbParity.currentText()
self.stop_bits = self.ui.cmbStopBits.currentText()
def _OK_pressed(self):
"""new values are accepted"""
port = str(self.ui.cmbPort.currentText())
if (port.isdigit() and int(port) >= 1 and int(port) <= 16):#port must be an integer
self._get_values()
else:
self.rtu_port = 1
self._set_values()
self._get_values()
self._logger.error("Port must be an integer between 1 and 16")
Utils.errorMessageBox("Port must be an integer between 1 and 16")
def _cancel_pressed(self):
"""new values are rejected"""
self._set_values()
def showEvent(self,QShowEvent):
"""set values for controls"""
self._set_values()
#-------------------------------------------------------------------------------
|
sfloresk/NCA-Container-Builder
|
NCABase/app/sijax_handlers/single_access_handler.py
|
Python
|
apache-2.0
| 13,867 | 0.00786 |
"""
Helper for views.py
"""
from base_handler import base_handler
import traceback
import app.model
from flask import g, render_template
class single_access_handler(base_handler):
def __init__(self):
"""
Manages all the operations that are involved with a single port association with EPGs
(for virtual port channel association the vpc_access_handler is used)
:return:
"""
try:
self.cobra_apic_object = single_access_handler.init_connec
|
tions()
self.exception = None
except Exception as e:
self.exception = e
print traceback.print_exc()
def get_create_single_access_networks(self, obj_response, form_values):
# Check if there has been connection errors
if self.exception is not None:
obj_response.script("create_notification('Connection problem',
|
'" + str(self.exception).replace("'", "").
replace('"', '').replace("\n", "")[0:100] + "', 'danger', 0)")
return
# Load the sel_create_single_access_network select with the networks within the selected group
try:
network_ap = self.cobra_apic_object.get_nca_ap(form_values['sel_create_single_access_group'])
item_list = []
if network_ap is not None:
networks = self.cobra_apic_object.get_epg_by_ap(str(network_ap.dn))
for network in networks:
# Creates a dynamic object
network_do = type('network_do', (object,), {})
network_do.key = str(network.dn)
network_do.text = network.name
item_list.append(network_do)
html_response = render_template('select_partial.html', item_list=item_list)
obj_response.html("#sel_create_single_access_network", html_response)
except Exception as e:
print traceback.print_exc()
obj_response.script("create_notification('Can not retrieve networks', '" + str(e).replace("'", "").
replace('"', '').replace("\n", "")[0:100] + "', 'danger', 0)")
finally:
g.db.close()
obj_response.html("#create_single_access_response", '')
def get_create_single_access_ports(self, obj_response, form_values):
# Check if there has been connection errors
if self.exception is not None:
obj_response.script("create_notification('Connection problem', '" + str(self.exception).replace("'", "").
replace('"', '').replace("\n", "")[0:100] + "', 'danger', 0)")
return
# Load the sel_create_single_access_port select with the available ports within the selected leaf
try:
ports = self.cobra_apic_object.get_available_ports(form_values['sel_create_single_access_leaf'])
item_list = []
for i in range(0, len(ports[0])):
# Creates a dynamic object
port_do = type('port_do', (object,), {})
port_do.key = ports[0][i]
port_do.text = ports[1][i]
item_list.append(port_do)
html_response = render_template('select_partial.html', item_list=item_list)
obj_response.html("#sel_create_single_access_port", html_response)
except Exception as e:
print traceback.print_exc()
obj_response.script("create_notification('Can not retrieve ports', '" + str(e).replace("'", "").
replace('"', '').replace("\n", "")[0:100] + "', 'danger', 0)")
finally:
g.db.close()
obj_response.html("#create_single_access_response", '')
def create_single_access(self, obj_response, form_values):
# Check if there has been connection errors
if self.exception is not None:
obj_response.script("create_notification('Connection problem', '" + str(self.exception).replace("'", "").
replace('"', '').replace("\n", "")[0:100] + "', 'danger', 0)")
return
# Creates switch profiles, interface profiles, policy groups and static bindings to associate a port
# to an EPG
try:
port_id = form_values['sel_create_single_access_port'].split('[')[-1][:-1].replace('/','-')
switch_id = form_values['sel_create_single_access_leaf'].split('/')[-1]
if form_values['create_port_access_type'] == 'single_vlan':
network_o = app.model.network.select().where(app.model.network.epg_dn ==
form_values['sel_create_single_access_network'])
if len(network_o) > 0:
self.cobra_apic_object.create_single_access(network_o[0].epg_dn,
form_values['sel_create_single_access_leaf'],
form_values['sel_create_single_access_port'],
network_o[0].encapsulation,
'migration-tool',
'if_policy_' + switch_id + '_' + port_id,
'single_access_' + switch_id + '_' + port_id)
obj_response.script("create_notification('Assigned', '', 'success', 5000)")
else:
obj_response.script(
"create_notification('Network not found in local database', '', 'danger', 0)")
elif form_values['create_port_access_type'] == 'vlan_profile':
network_profilexnetworks = app.model.network_profilexnetwork.select().where(
app.model.network_profilexnetwork.network_profile == int(form_values['sel_profile_create_port_access']))
for network_profile in network_profilexnetworks:
network_o = app.model.network.select().where(app.model.network.id == network_profile.network.id)
if len(network_o) > 0:
self.cobra_apic_object.create_single_access(network_o[0].epg_dn,
form_values['sel_create_single_access_leaf'],
form_values['sel_create_single_access_port'],
network_o[0].encapsulation,
'migration-tool',
'if_policy_' + switch_id + '_' + port_id,
'single_access_' + switch_id + '_' + port_id)
else:
ex = Exception()
ex.message = 'Some networks where not assigned because they are not in the local database'
raise ex
obj_response.script("create_notification('Assigned', '', 'success', 5000)")
except Exception as e:
print traceback.print_exc()
obj_response.script("create_notification('Can not create single access', '" + str(e).replace("'", "").
replace('"', '').replace("\n", "")[0:100] + "', 'danger', 0)")
finally:
g.db.close()
obj_response.html("#create_single_access_response", '')
def get_delete_single_access_networks(self, obj_response, form_values):
# Check if there has been connection errors
if self.exception is not None:
obj_response.script("create_notification('Connection problem', '" + str(self.exception).replace("'", "").
replace('"', '').replace("\n", "")[0:100] + "', 'danger', 0)")
return
# Load the sel_delete_single_access_network select with the network within the selected group
try:
network_ap = self.cobra_apic_object.get_nca_ap(form_values['sel_delete_single_a
|
boland1992/SeisSuite
|
seissuite/sort_later/find_holes.py
|
Python
|
gpl-3.0
| 17,030 | 0.021315 |
# -*- coding: utf-8 -*-
"""
Created on Mon Jun 20 12:28:32 2015
@author: boland
"""
import sys
sys.path.append('/home/boland/Anaconda/lib/python2.7/site-packages')
import pickle
import numpy as np
import matplotlib.pyplot as plt
from scipy.cluster.vq import kmeans
import multiprocessing as mp
import pyproj
import os
import itertools
import datetime
import pointshape as ps
from math import sqrt, atan2, radians,degrees, cos, tan, sin, asin
import random
import uuid
shape_path = "/home/boland/Dropbox/University/UniMelb/AGOS/PROGRAMS/ANT/Versions/26.04.2015/shapefiles/aus.shp"
N = 130
#enter km spacing between path density points
km_points = 20.0
# reference elipsoid to calculate distance
wgs84 = pyproj.Geod(ellps='WGS84')
nbins = 200
def haversine(coordinates):
"""
Calculate the great circle distance between two points
on the earth (specified in decimal degrees)
"""
lon1, lat1, lon2, lat2= coordinates[0],coordinates[1],\
coordinates[2],coordinates[3]
# convert decimal degrees to radians
lon1, lat1, lon2, lat2 = map(radians, [lon1, lat1, lon2, lat2])
# haversine formula
dlon, dlat = lon2 - lon1, lat2 - lat1
a = sin(dlat/2)**2 + cos(lat1) * cos(lat2) * sin(dlon/2)**2
c = 2 * asin(sqrt(a))
km = 6367 * c
return km
def haversine2(lon1, lat1, lon2, lat2):
"""
Calculate the great circle distance between two points
on the earth (specified in decimal degrees)
"""
# convert decimal degrees to radians
lon1, lat1, lon2, lat2 = map(radians, [lon1, lat1, lon2, lat2])
# haversine formula
dlon, dlat = lon2 - lon1, lat2 - lat1
a = sin(dlat/2)**2 + cos(lat1) * cos(lat2) * sin(dlon/2)**2
c = 2 * asin(sqrt(a))
km = 6367 * c
return km
def geodesic(coord1, coord2, npts):
"""
Returns a list of *npts* points along the geodesic between
(and including) *coord1* and *coord2*, in an array of
shape (*npts*, 2).
@rtype: L{ndarray}
"""
if npts < 2:
raise Exception('nb of points must be at least 2')
path = wgs84.npts(lon1=coord1[0], lat1=coord1[1],
lon2=coord2[0], lat2=coord2[1],
npts=npts-2)
return np.array([coord1] + path + [coord2])
def new_geodesic(lon1,lat1,lon2,lat2, npts):
"""
Returns a list of *npts* points along the geodesic between
(and including) *coord1* and *coord2*, in an array of
shape (*npts*, 2).
@rtype: L{ndarray}
"""
if npts < 2:
raise Exception('nb of points must be at least 2')
path = wgs84.npts(lon1=lon1, lat1=lat1,
lon2=lon2, lat2=lat2,
npts=npts-2)
return np.array([[lon1,lat1]] + path + [[lon2,lat2]])
def cluster_points(coord_points, N):
"""
Function that returns k which is an nx2 matrix of lon-lat vector columns
containing the optimal cluster centroid spacings within a large set of random
numbers e.g. those produced by the many_points() function above!
"""
k = kmeans(coord_points, N)
return k[0]
def paths_func(path_info, km=km_points):
lon1, lat1, lon2, lat2 = path_info[0], \
path_info[1], path_info[2], path_info[3]
#lon1, lat1, lon2, lat2, dist = path_info[0], \
#path_info[1], path_info[2], path_info[3], \
#path_info[4]
dist = haversine2(lon1, lat1, lon2, lat2)
# interpoint distance <= 1 km, and nb of points >= 100
npts = max(int((np.ceil(dist) + 1)/km), 100)
path = new_geodesic(lon1,lat1,lon2,lat2, npts)
#print("still going strong\n")
length = len(path)
lons = [lon1 for i in range(0,length)]
lats = [lat1 for i in range(0,length)]
path = np.column_stack((path,lons,lats))
return path
def HIST2D(nbins,paths, grad=False):
H, xedges, yedges = np.histogram2d(paths[:,0],paths[:,1],bins=nbins)
#name = "path_density_2Dhist.png"
if grad:
H = np.abs(np.asarray(np.gradient(H)[0]))#name = "path_density_2Dhist_grad.png"
# H need
|
s to be rotated and flipped
H = np.rot90(H)
H = np.flipud(H)
# Mask zeros
Hmasked = np.ma.masked_where(H
|
==0,H) # Mask pixels with a value of zero
return Hmasked
#fig = plt.figure()
#plt.pcolormesh(xedges,yedges,Hmasked)
#plt.xlabel('longitude (degrees)')
#plt.ylabel('longitude (degrees)')
#cbar = plt.colorbar()
#cbar.ax.set_ylabel('Counts')
#fig.savefig(name)
def latitude(dist, sigma01, alpha0, lon0):
sigma = sigma01 + dist#/R
lat = degrees(asin(cos(alpha0)*sin(sigma)))
#alpha = atan2(tan(alpha0),cos(sigma))
return lat
def longitude(dist, sigma01, alpha0, lon0):
sigma = sigma01 + dist#/R
lon = degrees(atan2(sin(alpha0)*sin(sigma), cos(sigma))) + degrees(lon0)
#alpha = atan2(tan(alpha0),cos(sigma))
return lon
vlat_func = np.vectorize(latitude)
vlon_func = np.vectorize(longitude)
def waypoint_init(path_info, km=km_points):
R = 6371
lon1, lat1, lon2, lat2, dist = radians(path_info[0]), \
radians(path_info[1]), radians(path_info[2]), \
radians(path_info[3]), radians(path_info[4])
#lon1, lat1, lon2, lat2, dist = map(radians, [path_info[0],path_info[1],path_info[2],path_info[3],path_info[4]])
lon_diff = lon2-lon1
alpha1 = atan2(sin(lon_diff),(cos(lat1)*tan(lat2)-sin(lat1)*cos(lon_diff)))
#alpha2 = atan2(sin(lon_diff),(-cos(lat2)*tan(lat1)+sin(lat2)*cos(lon_diff)))
#try:
#sigma12 = acos(sin(lat1)*sin(lat2)+cos(lat1)*cos(lat2)*cos(lon_diff))
#except:
#return
sigma01, alpha0 = atan2(tan(lat1), cos(alpha1)), asin(sin(alpha1)*cos(lat1))
#sigma02 = sigma01+sigma12
lon01 = atan2(sin(alpha0)*sin(sigma01), cos(sigma01))
lon0 = lon1 - lon01
npts = max(int((np.ceil(dist) + 1)/km), 100)
all_d = np.linspace(0,dist,npts)/R
lons, lats = vlon_func(all_d, sigma01, alpha0, lon0), vlat_func(all_d, sigma01, alpha0, lon0)
return np.column_stack((lons, lats))
t_total0 = datetime.datetime.now()
t0 = datetime.datetime.now()
ideal_path = 'ideal_coordinates.pickle'
#if no paths have been done before, start afresh!
if not os.path.exists(ideal_path):
M = 1e5
many_points = ps.points_in_shape(shape_path, M)
coords = cluster_points(many_points,N)
#else import already processed coordinates if the program has already done so.
else:
f = open(name=ideal_path, mode='rb')
coords = pickle.load(f)
f.close()
#generate N kmeans cluster points from massive M number of randomly distributed
#points inside the shape file.
lonmin = np.floor(min(coords[:,0]))
lonmax = np.ceil(max(coords[:,0]))
latmin = np.floor(min(coords[:,1]))
latmax = np.ceil(max(coords[:,1]))
print lonmin,lonmax,latmin,latmax
#coords1 = [coord1 for coord1 in coords for coord2 in coords]
#coords2 = [coord2 for coord1 in coords for coord2 in coords]
#columns = np.column_stack((coords1, coords2))
kappa = [np.vstack([[coord1[0],coord1[1],coord2[0],coord2[1]]\
for coord2 in coords]) for coord1 in coords]
def spread_paths(nets):
#pool = mp.Pool()
#paths = pool.map(new_paths, nets)
#pool.close()
#pool.join()
paths = map(paths_func, nets)
#create a flattened numpy array of size 2xN from the paths created!
#paths = np.asarray(list(itertools.chain(*paths)))
#keep all but the repeated coordinates by keeping only unique whole rows!
#method is slowed without the b contiguous array
#b = np.ascontiguousarray(paths).view(np.dtype((np.void, paths.dtype.itemsize * paths.shape[1])))
#_, idx = np.unique(b, return_index=True)
#paths = np.unique(b).view(paths.dtype).reshape(-1, paths.shape[1])
#plt.figure()
#plt.scatter(paths[:,0],paths[:,1])
#name = uuid.uuid4()
#plt.savefig('{}.png'.format(name))
return paths
t0 = datetime.datetime.now()
pool = mp.Pool()
paths = pool.map(spread_paths, kappa)
pool.close()
pool.join()
t1 = datetime.datetime.now()
print t1-t0
#paths = list(paths)
counter = 0
#cd Desktop/Link\ to\ SIMULA
|
nkhare/rockstor-core
|
src/rockstor/storageadmin/views/network.py
|
Python
|
gpl-3.0
| 10,047 | 0.001294 |
"""
Copyright (c) 2012-2013 RockStor, Inc. <http://rockstor.com>
This file is part of RockStor.
RockStor is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published
by the Free Software Foundation; either version 2 of the License,
or (at your option) any later version.
RockStor is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import re
from tempfile import mkstemp
from shutil import move
from django.db import transaction
from django.conf import settings
from rest_framework.response import Response
from storageadmin.models import (NetworkInterface, Appliance)
from storageadmin.util import handle_exception
from storageadmin.serializers import NetworkInterfaceSerializer
from system.osi import (config_network_device, get_net_config, update_issue)
from system.samba import update_samba_discovery
from system.services import superctl
import rest_framework_custom as rfc
import logging
logger = logging.getLogger(__name__)
class NetworkMixin(object):
@staticmethod
def _update_ni_obj(nio, values):
nio.dname = values.get('dname', None)
nio.mac = values.get('mac', None)
nio.method = values.get('method', 'manual')
nio.autoconnect = values.get('autoconnect', 'no')
nio.netmask = values.get('netmask', None)
nio.ipaddr = values.get('ipaddr', No
|
ne)
nio.gateway = values.get('gateway', None)
nio.dns_servers = values.get('dns_servers', None)
nio.ctype = values.get('ctype', None)
nio.dtype = values.get('dtype', None)
nio.dspeed = values.get('dspeed', None)
nio.state = values.get('state', None)
return nio
@staticmethod
def _update_nginx(ipaddr=None):
#update nginx config and restart the service
conf = '%s/etc
|
/nginx/nginx.conf' % settings.ROOT_DIR
fo, npath = mkstemp()
with open(conf) as ifo, open(npath, 'w') as tfo:
for line in ifo.readlines():
if (re.search('listen.*80 default_server', line) is not None):
substr = 'listen 80'
if (ipaddr is not None):
substr = 'listen %s:80' % ipaddr
line = re.sub(r'listen.*80', substr, line)
elif (re.search('listen.*443 default_server', line) is not None):
substr = 'listen 443'
if (ipaddr is not None):
substr = 'listen %s:443' % ipaddr
line = re.sub(r'listen.*443', substr, line)
tfo.write(line)
move(npath, conf)
superctl('nginx', 'restart')
class NetworkListView(rfc.GenericView, NetworkMixin):
serializer_class = NetworkInterfaceSerializer
def get_queryset(self, *args, **kwargs):
with self._handle_exception(self.request):
self._net_scan()
#to be deprecated soon
update_samba_discovery()
return NetworkInterface.objects.all()
@classmethod
@transaction.atomic
def _net_scan(cls):
config_d = get_net_config(all=True)
for dconfig in config_d.values():
ni = None
if (NetworkInterface.objects.filter(
name=dconfig['name']).exists()):
ni = NetworkInterface.objects.get(name=dconfig['name'])
ni = cls._update_ni_obj(ni, dconfig)
else:
ni = NetworkInterface(name=dconfig.get('name', None),
dname=dconfig.get('dname', None),
dtype=dconfig.get('dtype', None),
dspeed=dconfig.get('dspeed', None),
mac=dconfig.get('mac', None),
method=dconfig.get('method', None),
autoconnect=dconfig.get('autoconnect', None),
netmask=dconfig.get('netmask', None),
ipaddr=dconfig.get('ipaddr', None),
gateway=dconfig.get('gateway', None),
dns_servers=dconfig.get('dns_servers', None),
ctype=dconfig.get('ctype', None),
state=dconfig.get('state', None))
ni.save()
devices = []
for ni in NetworkInterface.objects.all():
if (ni.dname not in config_d):
logger.debug('network interface(%s) does not exist in the '
'system anymore. Removing from db' % (ni.name))
ni.delete()
else:
devices.append(ni)
serializer = NetworkInterfaceSerializer(devices, many=True)
return Response(serializer.data)
def post(self, request):
with self._handle_exception(request):
return self._net_scan()
class NetworkDetailView(rfc.GenericView, NetworkMixin):
serializer_class = NetworkInterfaceSerializer
def get(self, *args, **kwargs):
try:
data = NetworkInterface.objects.get(name=self.kwargs['iname'])
serialized_data = NetworkInterfaceSerializer(data)
return Response(serialized_data.data)
except:
return Response()
@transaction.atomic
def delete(self, request, iname):
with self._handle_exception(request):
if (NetworkInterface.objects.filter(name=iname).exists()):
i = NetworkInterface.objects.get(name=iname)
i.delete()
return Response()
def _validate_netmask(self, request):
netmask = request.data.get('netmask', None)
e_msg = ('Provided netmask value(%s) is invalid. You can provide it '
'in a IP address format(eg: 255.255.255.0) or number of '
'bits(eg: 24)' % netmask)
if (netmask is None):
handle_exception(Exception(e_msg), request)
bits = 0
try:
bits = int(netmask)
except ValueError:
#assume ip address format was provided
bits = sum([bin(int(x)).count('1') for x in '255.255.255'.split('.')])
if (bits < 1 or bits > 32):
e_msg = ('Provided netmask value(%s) is invalid. Number of '
'bits in netmask must be between 1-32' % netmask)
handle_exception(Exception(e_msg), request)
return bits
@transaction.atomic
def put(self, request, iname):
with self._handle_exception(request):
if (not NetworkInterface.objects.filter(name=iname).exists()):
e_msg = ('Netowrk interface(%s) does not exist.' % iname)
handle_exception(Exception(e_msg), request)
ni = NetworkInterface.objects.get(name=iname)
itype = request.data.get('itype')
if (itype != 'management'):
itype = 'io'
method = request.data.get('method')
ni.onboot = 'yes'
if (method == 'auto'):
config_network_device(ni.name)
elif (method == 'manual'):
ipaddr = request.data.get('ipaddr')
for i in NetworkInterface.objects.filter(ipaddr=ipaddr):
if (i.id != ni.id):
e_msg = ('IP: %s already in use by another '
'interface: %s' % (ni.ipaddr, i.name))
handle_exception(Exception(e_msg), request)
netmask = self._validate_netmask(request)
gateway = request.data.get('gateway', None)
dns_servers = request.data.get('dns_servers', None)
config_network_device(ni.name, dtype=ni.dtype, method='manual',
|
skimpax/python-rfxcom
|
tests/protocol/test_temperature.py
|
Python
|
bsd-3-clause
| 3,484 | 0 |
from unittest import TestCase
from rfxcom.protocol.temperature import Temperature
from rfxcom.exceptions import (InvalidPacketLength, UnknownPacketSubtype,
UnknownPacketType)
class TemperatureTestCase(TestCase):
def setUp(self):
self.data = bytearray(b'\x08\x50\x02\x11\x70\x02\x00\xA7\x89')
self.parser = Temperature()
def test_parse_bytes(self):
self.assertTrue(self.parser.validate_packet(self.data))
self.assertTrue(self.parser.can_handle(self.data))
result = self.p
|
arser.load(self.data)
self.assertEquals(result, {
'packet_length': 8,
'packet_type': 80,
'packet_type_name': 'Temperature sensors',
|
'sequence_number': 17,
'packet_subtype': 2,
'packet_subtype_name':
'THC238/268,THN132,THWR288,THRN122,THN122,AW129/131',
'temperature': 16.7,
'id': '0x7002',
# 'channel': 2, TBC
'signal_level': 8,
'battery_level': 9
})
self.assertEquals(str(self.parser), "<Temperature ID:0x7002>")
def test_parse_bytes2(self):
self.data = bytearray(b'\x08\x50\x03\x02\xAE\x01\x00\x63\x59')
self.assertTrue(self.parser.validate_packet(self.data))
self.assertTrue(self.parser.can_handle(self.data))
result = self.parser.load(self.data)
self.assertEquals(result, {
'packet_length': 8,
'packet_type': 80,
'packet_type_name': 'Temperature sensors',
'sequence_number': 2,
'packet_subtype': 3,
'packet_subtype_name': 'THWR800',
'temperature': 9.9,
'id': '0xAE01',
# 'channel': 1, TBC
'signal_level': 5,
'battery_level': 9
})
self.assertEquals(str(self.parser), "<Temperature ID:0xAE01>")
def test_parse_bytes_negative_temp(self):
self.data = bytearray(b'\x08\x50\x06\x02\xAE\x01\x80\x55\x59')
self.assertTrue(self.parser.validate_packet(self.data))
self.assertTrue(self.parser.can_handle(self.data))
result = self.parser.load(self.data)
self.assertEquals(result, {
'packet_length': 8,
'packet_type': 80,
'packet_type_name': 'Temperature sensors',
'sequence_number': 2,
'packet_subtype': 6,
'packet_subtype_name': 'TS15C',
'temperature': -8.5,
'id': '0xAE01',
# 'channel': 1, TBC
'signal_level': 5,
'battery_level': 9
})
self.assertEquals(str(self.parser), "<Temperature ID:0xAE01>")
def test_validate_bytes_short(self):
data = self.data[:1]
with self.assertRaises(InvalidPacketLength):
self.parser.validate_packet(data)
def test_validate_unkown_packet_type(self):
self.data[1] = 0xFF
self.assertFalse(self.parser.can_handle(self.data))
with self.assertRaises(UnknownPacketType):
self.parser.validate_packet(self.data)
def test_validate_unknown_sub_type(self):
self.data[2] = 0xEE
self.assertFalse(self.parser.can_handle(self.data))
with self.assertRaises(UnknownPacketSubtype):
self.parser.validate_packet(self.data)
def test_log_name(self):
self.assertEquals(self.parser.log.name, 'rfxcom.protocol.Temperature')
|
ctongfei/nexus
|
torch/remove_classes.py
|
Python
|
mit
| 259 | 0.003861 |
#! /usr/bin/env python3
import sys
in_class = False
for l in sys.stdin:
if l.startswith("class"):
in_class = True
if in_class:
if l
|
.startswith("};"):
|
in_class = False
continue
else:
print(l, end='')
|
shinexwang/Classy
|
Main/webParser.py
|
Python
|
apache-2.0
| 10,545 | 0.000379 |
"""
Copyright 2013 Shine Wang
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import urllib
import re
from HTMLParser import HTMLParser
from courseClasses import Course, Lecture, Tutorial, Reserve
class CustomHTMLParser(HTMLParser):
"""this class reads a HTML stream, then parses out the "data" fields"""
def __init__(self, webData):
HTMLParser.__init__(self)
self.webData = webData
def handle_data(self, data):
"""takes out the data"""
self.webData.append(data.strip())
class WebParser:
""""A WebParser is created for each and every course,
to parse the corresponding web page"""
requestURL = "http://www.adm.uwaterloo.ca/cgi-bin/" \
"cgiwrap/infocour/salook.pl"
def __init__(self):
self.webData = []
self.index = -1
self.session = None
self.thisCourse = None
def run(self, courseString, sessionString):
"""this is the method that the main class can call
if successful, returns the Course class
if not, returns an error message"""
self.session = self.parseSession(sessionString)
if self.session is None:
return "SessionNameWrongError"
courseString = map(lambda x: x.upper(), courseString.split())
try:
self.thisCourse = Course(self.session, courseString[0],
courseString[1])
except:
return "CourseNameWrongError"
if self.getWebData(self.thisCourse):
return "WebPageError"
elif self.parseWebData():
return "CourseNotFoundError"
else:
self.processCourseInfo()
self.postProcess(self.thisCourse)
return self.thisCourse
def parseSession(self, sessionString):
try:
ret = "1"
ret += sessionString.split()[1][-2:] # last 2 digits of year
tempMap = (("fall", "9"), ("winter", "1"), ("spring", "5"))
for season in tempMap:
if season[0] in sessionString.lower():
ret += season[1]
return ret
except:
return None
def getWebData(self, course):
"""submits a POST query, initializes HTMLParser"""
try:
params = urllib.urlencode({"sess": course.session,
"subject": course.subject,
"cournum": course.catalogNumber})
page = urllib.urlopen(WebParser.requestURL, params)
parser = CustomHTMLParser(self.webData)
# we use .replace() because HTMLParser ignores " ",
# which would screwn up our table
parser.feed(page.read().replace(" ", " "))
except:
return "WebPageError"
def parseWebData(self):
"""We try to find the beginning of the desired table"""
# now, we find the start index and pass that on along
# with the webData
for i in xrange(len(self.webData)-3):
if self.webData[i] == self.thisCourse.subject \
and self.webData[i+2] == self.thisCourse.catalogNumber:
self.index = i
break
if self.index == -1: # website not found
return "CourseNotFound"
def processCourseInfo(self):
"""now, we do the heavy-duty processing of the data table"""
# sets basic attrs of thisCourse
self.thisCourse.units = self.webData[self.index+4]
self.thisCourse.title = self.webData[self.index+6]
while self.webData[self.index] != "Instructor":
self.index += 1
# processing row-by-row
while not self.endOfRow(self.webData[self.index]):
if self.webData[self.index] != "":
self.processSlot()
self.index += 1
if self.index == len(self.webData):
return
def processSlot(self):
"""we check to see if this is the BEGINNING of a valid row"""
if (self.webData[self.index+1][:3].upper() == "LEC"
or self.webData[self.index+1][:3].upper() == "LAB") \
and "ONLINE" not in self.webData[self.index+2]:
# we don't want online classes!
# processing a lecture row
lec = Lecture()
if self.processClass(lec, self.index, self.webData):
return
self.thisCourse.lectures.append(lec)
elif self.webData[self.index+1][:3].upper() == "TUT":
# processing a tutorial row
tut = Tutorial()
if self.processClass(tut, self.index, self.webData):
return
self.thisCourse.tutorials.append(tut)
elif self.webData[self.index][:7].upper() == "RESERVE":
# processing a reserve row
res = Reserve()
self.processReserve(res, self.index, self.webData)
if self.thisCourse.lectures:
self.thisCourse.lectures[-1].reserves.append(res)
# note: we leave out the TST (exam?) times for now
def processReserve(self, res, index, webData):
"""processing reservations for certain types of students"""
res.name = webData[index][9:]
# we remove the "only" suffix (which is annoyingly pointless)
if "only" in res.name:
res.name = res.name[:-5]
# also, the "students" suffx
if "students" in res.name or "Students" in res.name:
res.name = res.name[:-9]
# now, we merge the match list
while not webData[index].isdigit():
index += 1
# retriving enrollment numbers
res.enrlCap = int(webData[index])
res.enrlTotal = int(webData[index+1])
def processClass(self, lec, index, webData):
"""we process a typical lecture or tutorial row"""
attr1 = ["classNumber", "compSec", "campusLocation"]
for i in xrange(len(attr1)):
setattr(lec, attr1[i], webData[index+i].strip())
index += 6
attr2 = ["enrlCap", "enrlTotal", "waitCap", "waitTotal"]
for i in xrange(len(attr2)):
setattr(lec, attr2[i], int(webData[index+i]))
index += 4
# parsing the "Times Days/Date" field
match = re.search(r"([:\d]+)-([:\d]+)(\w+)", webData[index])
if not match:
# we return an error message in the "TBA" case
return "NoTimeError"
attr3 = ["startTime", "endTime", "days"]
for i in xrange(len(attr3)):
setattr(lec, attr3[i], match.group(i+1).strip())
index += 1
if len(webData[index].split()) == 2:
# sometimes, no building, room, and instructor will be given
|
# this is mostly for Laurier courses
lec.building, lec.room = webData[index].split()
lec.instructor = webData[index+1].strip()
def endOfRow(self, data):
"""returns true if the current data-cell is the last cell
of this course; else - false"""
# the last cell is of the form: ##/##
|
-##/## or
# "Information last updated
if re.search(r"\d+/\d+-\d+/\d+", data) or \
"Information last updated" in data:
return True
else:
return False
def postProcess(self, course):
"""this function will convert the class times to minutes-past-
the-previous-midnight, and converts the days to numbers.
Also, some reservation-postprocessing"""
map(lambda x: x.calcMiscSeats(), course.lectures)
for lec in course.lectures:
lec.courseID = c
|
interhui/py_task
|
task/trigger/cron_trigger.py
|
Python
|
artistic-2.0
| 3,634 | 0.008255 |
# coding=utf-8
'''
cron trigger
@author: Huiyugeng
'''
import datetime
import trigger
class CronTrigger(trigger.Trigger):
def __init__(self, cron):
trigger.Trigger.__init__(self, 0, 1);
self.cron = cron
def _is_match(self):
parser = CronParser(self.cron)
_date = datetime.date.today()
_time = datetime.datetime.now()
return parser._is_match(_date, _time)
class CronParser():
def __init__(self, cron):
cron_item = cron.split(' ')
if len(cron_item) == 6 or len(cron_item) == 7:
self.second_set = self._parse_integer(cron_item[0], 0, 59)
self.minute_set = self._parse_integer(cron_item[1], 0, 59)
self.hour_set = self._parse_integer(cron_item[2], 0, 23)
self.day_of_month_set = self._parse_integer(cron_item[3], 1, 31)
self.month_set = self._parse_month(cron_item[4])
self.day_of_week_set = self._parse_day_of_week(cron_item[5])
if len(cron_item) == 7:
self.year_set = self._parse_integer(cron_item[6], 1970, 2100)
def _parse_integer(self, value, min_val, max_val):
result = []
range_items = []
if ',' in value:
range_items = value.split(',')
else:
range_items.append(value)
for range_item in range_items:
temp_result = []
interval = 1
if '/' in range_item:
temp = range_item.split('/')
range_item = temp[0]
interval = int(temp[1])
|
if interval < 1:
interval = 1
if '*' in range_item:
temp_result.extend(self._add_to_set(min_val, max_val))
elif '-' in ran
|
ge_item:
item = range_item.split('-')
temp_result.extend(self._add_to_set(int(item[0]), int(item[1])))
else:
temp_result.append(int(range_item))
count = 0
for item in temp_result:
if count % interval == 0:
result.append(item)
count = count + 1
return result
def _add_to_set(self, start, end):
result = [i for i in range(start, end + 1)]
return result
def _parse_month(self, value):
months = ["JAN", "FEB", "MAR", "APR", "MAY", "JUN", "JUL", "AUG", "SEP", "OCT", "NOV", "DEC"]
for i in range(0, 12):
value = value.replace(months[i], str(i + 1))
return self._parse_integer(value, 1, 12);
def _parse_day_of_week(self, value):
day_of_weeks = ["MON", "TUE", "WED", "THU", "FRI", "SAT", "SUN"]
for i in range(0, 7):
value = value.replace(day_of_weeks[i], str(i + 1));
return self._parse_integer(value, 1, 7);
def _is_match(self, _date, _time):
# In Python datetime's weekday Monday is 0 and Sunday is 6
day_of_week = _date.weekday() + 1
result = True and \
_time.second in self.second_set and \
_time.minute in self.minute_set and \
_time.hour in self.hour_set and \
_date.day in self.day_of_month_set and \
_date.month in self.month_set and \
_date.year in self.year_set and \
day_of_week in self.day_of_week_set
return result
|
ioef/tlslite-ng
|
tlslite/recordlayer.py
|
Python
|
lgpl-2.1
| 29,445 | 0.001834 |
# Copyright (c) 2014, Hubert Kario
#
# Efthimios Iosifidis - Speck Cipher Additions
# See the LICENSE file for legal information regarding use of this file.
"""Implementation of the TLS Record Layer protocol"""
import socket
import errno
import hashlib
from .constants import ContentType, CipherSuite
from .messages import RecordHeader3, RecordHeader2, Message
from .utils.cipherfactory import createAESGCM, createAES, createRC4, \
createTripleDES, createCHACHA20,createSPECK, createSPECK128GCM, createSPECK192GCM
from .utils.codec import Parser, Writer
from .utils.compat import compatHMAC
from .utils.cryptomath import getRandomBytes
from .utils.constanttime import ct_compare_digest, ct_check_cbc_mac_and_pad
from .errors import TLSRecordOverflow, TLSIllegalParameterException,\
TLSAbruptCloseError, TLSDecryptionFailed, TLSBadRecordMAC
from .mathtls import createMAC_SSL, createHMAC, PRF_SSL, PRF, PRF_1_2, \
PRF_1_2_SHA384
class RecordSocket(object):
"""Socket wrapper for reading and writing TLS Records"""
def __init__(self, sock):
"""
Assign socket to wrapper
@type sock: socket.socket
"""
self.sock = sock
self.version = (0, 0)
def _sockSendAll(self, data):
"""
Send all data through socket
@type data: bytearray
@param data: data to send
@raise socket.error: when write to socket failed
"""
while 1:
try:
bytesSent = self.sock.send(data)
except socket.error as why:
if why.args[0] in (errno.EWOULDBLOCK, errno.EAGAIN):
yield 1
continue
raise
if bytesSent == len(data):
return
data = data[bytesSent:]
yield 1
def send(self, msg):
"""
Send the message through socket.
@type msg: bytearray
@param msg: TLS message to send
@raise socket.error: when write to socket failed
"""
data = msg.write()
header = RecordHeader3().create(self.version,
msg.contentType,
len(data))
data = header.write() + data
for result in self._sockSendAll(data):
yield result
def _sockRecvAll(self, length):
"""
Read exactly the amount of bytes specified in L{length} from raw socket.
@rtype: generator
@return: generator that will return 0 or 1 in case the socket is non
blocking and would block and bytearray in case the read finished
@raise TLSAbruptCloseError: when the socket closed
"""
buf = bytearray(0)
if length == 0:
yield buf
while True:
try:
socketBytes = self.sock.recv(length - len(buf))
except socket.error as why:
if why.args[0] in (errno.EWOULDBLOCK, errno.EAGAIN):
yield 0
continue
else:
raise
#if the connection closed, raise socket error
if len(socketBytes) == 0:
raise TLSAbruptCloseError()
buf += bytearray(socketBytes)
if len(buf) == length:
yield buf
def _recvHeader(self):
"""Read a single record header from socket"""
#Read the next record header
buf = bytearray(0)
ssl2 = False
result = None
for result in self._sockRecvAll(1):
if result in (0, 1):
yield result
else: break
assert result is not None
buf += result
if buf[0] in ContentType.all:
ssl2 = False
# SSLv3 record layer header is 5 bytes long, we already read 1
result = None
for result in self._sockRecvAll(4):
if result in (0, 1):
yield result
else: break
assert result is not None
buf += result
# XXX this should be 'buf[0] & 128', otherwise hello messages longer
# than 127 bytes won't be properly parsed
elif buf[0] == 128:
ssl2 = True
# in SSLv2 we need to read 2 bytes in total to know the size of
# header, we already read 1
result = None
for result in self._sockRecvAll(1):
if result in (0, 1):
yield result
else: break
assert result is not None
buf += result
else:
raise TLSIllegalParameterException(
"Record header type doesn't specify known type")
#Parse the record header
if ssl2:
record = RecordHeader2().parse(Parser(buf))
else:
record = RecordHeader3().parse(Parser(buf))
yield record
def recv(self):
"""
Read a single record from socket, handle SSLv2 and SSLv3 record layer
@rtype: generator
@return: generator that returns 0 or 1 in case the read would be
blocking or a tuple containing record header (object) and record
data (bytearray) read from socket
@raise socket.error: In case of network error
@raise TLSAbruptCloseError: When the socket was closed on the other
side in middle of record receiving
@raise TLSRecordOverflow: When the received record was longer than
allowed by TLS
@raise TLSIllegalParameterException: When the record header was
malformed
"""
record = None
for record in self._recvHeader():
if record in (0, 1):
yield record
else: break
assert record is not None
#Check the record header fields
# 18432 = 2**14 (basic record size limit) + 1024 (maximum compression
# overhead) + 1024 (maximum encryption overhead)
if record.length > 18432:
raise TLSRecordOverflow()
#Read the record contents
buf = bytearray(0)
result = None
for result in self._sockRecvAll(record.length):
if result in (0, 1):
yield result
else: break
assert result is not None
buf += result
yield (record, buf)
class ConnectionState(object):
"""Preserve the connection state for reading and writing data to records"""
def __init__(self):
"""Create an instance with empty encryption and MACing contexts"""
self.macContext = None
self.encContext = None
self.fixedNonce = None
self.seqnum = 0
def getSeqNumBytes(self):
"""Return encoded sequence number and increment it."""
writer = Writer()
writer.add(self.seqnum, 8)
|
self.seqnum += 1
return writer.bytes
class RecordLayer(object):
"""
Implementation of TLS record layer protocol
@ivar version: the TLS version to use (tuple encoded as on the wire)
@ivar sock: underlying socket
@ivar client: whether the conne
|
ction should use encryption
@ivar encryptThenMAC: use the encrypt-then-MAC mechanism for record
integrity
"""
def __init__(self, sock):
self.sock = sock
self._recordSocket = RecordSocket(sock)
self._version = (0, 0)
self.client = True
self._writeState = ConnectionState()
self._readState = ConnectionState()
self._pendingWriteState = ConnectionState()
self._pendingReadState = ConnectionState()
self.fixedIVBlock = None
self.encryptThenMAC = False
@property
def blockSize(self):
"""Return the size of block used by current symmetric cipher (R/O)"""
return self._writeState.encContext.block_size
@property
def version(self):
"""Return the TLS version used by record layer"""
return self._version
@version.setter
def version(self, val):
"""Set the TLS version used by record layer"""
self._version = val
s
|
ddalex/p9
|
sign/admin.py
|
Python
|
mit
| 1,555 | 0.009646 |
from django.contrib import admin
from django.contrib.admin.filters import RelatedFieldListFilter
from .models import ClientLog, Client, Feedback
def client_id(obj):
return obj.client.externid
class AliveClientsRelatedFieldListFilter(RelatedFieldListFilter):
def __init__(self, field, request, *args, **kwargs):
field.rel.limit_choices_to = {'status': Client.STATUS_ALIVE }
super(AliveClientsRelatedFieldListFilter, self).__init__(field, request, *args, **kwargs)
class ClientLogAdmin(admin.ModelAdmin):
list_display = ('client', 'tag', 'log', 'updated')
list_filter = ('client', )
ordering = ('-updated',)
search_fields = ("client__ip", "client__externid", "log", "tag",)
def formfield_for_foreignkey(self, db_field, request, **kwargs):
if db_field.name == "client":
kwargs["queryset"] = Client.objects.filter(status = Client.STATUS_ALIVE)
return super(ClientLogAdmin, self).formfield_for_foreignkey(db_field, request, **kwargs)
admin.site.register(ClientLog, ClientLogAdmin)
class ClientAdmin(admin.ModelAdmin):
list_display = ("status", "externid", "ip", "updated", "created", "useragent")
list_filter =
|
("status", "useragent", "failures", "complets")
ordering = ("status", "-updated", "-created", )
search_fi
|
elds = ("ip", "useragent", "externid", )
admin.site.register(Client, ClientAdmin)
class FeedbackAdmin(admin.ModelAdmin):
list_display = ("id", "useremail", "ip", "created")
ordering = ("-id",)
admin.site.register(Feedback, FeedbackAdmin)
|
AcerFeng/videoSpider
|
spider/models.py
|
Python
|
apache-2.0
| 2,458 | 0.003211 |
from sqlalchemy import Column, String, BigInteger
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy import create_engine
import time
BaseModel = declarative_base()
class Video(BaseModel):
__tablename__ = 'video'
id = Column(BigInteger, primary_key=True, autoincrement=True)
name = Column(String(80), nullable=False)
image = Column(String(200))
desc = Column(String(100))
play_num = Column(String(50))
update_num = Column(String(50))
link = Column(String(200))
score = Column(String(10))
platform = Column(String(10), nullable=False) # 来源平台
video_category = Column(String(10), nullable=False) # 视频大分类:电视剧、电影、综艺
series_region = Column(String(20)) # 电视剧地区分类:全部热播、内地、网剧、韩剧、美剧
movie_region = Column(String(20)) # 电影地区分类:全部热播、院线、内地、香港、美国
veriety_region = Column(String(20)) # 综艺分类:热门
created_at = Column(BigInteger, default=time.time)
engine = create_engine('mysql+pymysql://root:123456@localhost:3306/videoSpider?charset=utf8mb4')
BaseModel.metadata.create_all(engine)
"""
data = {
'name' : name.get_text(),
'image' : 'http:' + image.get('r-lazyload'),
'desc' : ' '.join(desc.get_text().strip().split()),
'play_number' : num.get_text(),
'update_status' : status,
|
'link' : link.get('href')
}
# 视频类型:电视剧、电影、综艺
Video_large_type = Enum('Video_large_type', ('Series', 'Movies', 'Variety'))
# 电视剧类型:全部热播、内地、网剧、韩剧、美剧
Series_region = Enum('Series_region', ('All', 'Local', 'Net', 'SouthKorea', 'EuropeAndAmerica'))
# 电影类型:全部热播、院线、内地、香港、美国
Movie_region = Enum('Movie_region', ('All', 'Cinemas', 'Local', 'HongKong', 'America'))
# 综艺类型:全部热播
Varie
|
ty_type = Enum('Variety_type', ('Hot'))
"""
class RequestModel(object):
def __init__(self, source_url, platform, video_category, *, series_region=None, movie_region=None, veriety_region=None):
self.source_url = source_url
self.platform = platform
self.video_category = video_category
self.series_region = series_region
self.movie_region = movie_region
self.veriety_region = veriety_region
|
icemoon1987/stock_monitor
|
model/turtle.py
|
Python
|
gpl-2.0
| 6,761 | 0.00419 |
#!/usr/bin/env python
#coding=utf-8
import sys
sys.path.append("..")
import urllib
import myjson
from datetime import datetime, date, timedelta
import time
from define import *
from data_interface.stock_dataset import stock_dataset
class turtle(object):
"""
turtle model
"""
def get_mean(self, data, end_index, k):
if end_index < k-1:
return 0
else:
sum = 0
for num in data[end_index-k+1 : end_index+1]:
sum += num
return float(sum / (k * 1.0))
def get_max(self, data, end_index, k):
if end_index < k:
return 0
else:
tmp = data[end_index-k : end_index]
max = tmp[0]
for num in tmp:
if num > max:
max = num
return max
def get_max_date(self, dataset, end_index, k):
if end_index < k:
return 0
else:
tmp = dataset.data[end_index-k : end_index]
max = tmp[0].close_price
date = tmp[0].date
for num in tmp:
if num.close_price > max:
max = num.close_price
date = num.date
return (max, date)
def get_min(self, data, end_index, k):
if end_index < k:
return 0
else:
tmp = data[end_index-k : end_index]
min = tmp[0]
for num in tmp:
if num < min:
min = num
return min
def get_min_date(self, dataset, end_index, k):
if end_index < k:
return 0
else:
tmp = dataset.data[end_index-k : end_index]
min = tmp[0].close_price
date = tmp[0].date
for num in tmp:
if num.close_price < min:
min = num.close_price
date = num.date
return (min, date)
def get_trading_plan(self, dataset, date_str):
"""
get trading plan of 28 lundong model, return a empty map when no decision can be made
choise:
-3: not enough data, do not trade
-2: date_str error
-1: unknown problem, do not trade
0: sell all
1: sell half
2: close_price unsatisfy, do not trade
3: mean line unsatisfy, do not trade
4: buy
策略:
1. 我只将此交易模型用于小时图
2. 一般也只用于趋势性较强的大盘指数,不用于个股
3. 首先绘制10小时和100小时两根无线,然后绘制50小时最高点和最低点曲线,再加25小时内最低点曲线(如果你使用通达信,可直接复制我下面的指标源代码,记得设为图叠加)。
4. 买入条件:当且仅当10小时无线大于100小时均线的前提下,小时收盘突破此前50小时的最高点时做多。
5. 平仓条件:当小时收盘跌破此前25小时的最低点时平仓一半,跌破此前50小时最低点时全部平仓。
"""
result = {}
result["choise"] = -1
# Get stock data by
|
date_str. If not exist, return.
data = dataset.get_data(date_str)
if data == None:
result["choise"] = -2
return result
data_index = dataset.get_data_index(date_str)
close_prices = [ item.close_price for item in data
|
set.data ]
result["close_price"] = close_prices[data_index]
result["10_mean"] = self.get_mean(close_prices, data_index, 10)
result["100_mean"] = self.get_mean(close_prices, data_index, 100)
result["50_max"] = self.get_max(close_prices, data_index, 50)
result["50_min"] = self.get_min(close_prices, data_index, 50)
result["25_min"] = self.get_min(close_prices, data_index, 25)
if result["10_mean"] == 0 or result["100_mean"] == 0 or result["50_max"] == 0 or result["50_min"] == 0 or result["25_min"] == 0:
result["choise"] = -3
elif result["close_price"] < result["50_min"]:
result["choise"] = 0
elif result["close_price"] < result["25_min"]:
result["choise"] = 1
elif result["close_price"] > result["50_max"]:
if result["10_mean"] < result["100_mean"]:
result["choise"] = 3
else:
result["choise"] = 4
else:
result["choise"] = 2
return result
def get_trading_plan3(self, dataset, date_str):
"""
策略 https://www.jisilu.cn/question/66127
1. 买入条件:收盘价超过60个交易日里面的盘中最高价(不是收盘价中的最高)
2. 卖出条件:收盘价低于38个交易日里面的盘中最低价
3. 其他时候维持原状。
choise:
-3: not enough data, do not trade
-2: date_str error
-1: unknown problem, do not trade
0: sell all
1: sell half
2: close_price unsatisfy, do not trade
3: mean line unsatisfy, do not trade
4: buy
"""
# Get stock data by date_str. If not exist, return.
result = {}
result["file_end_date"] = dataset.data[-2].date #dataset.data[-1].date的数据为读取文件后加入的今天的数据
result["start_buy_date"] = dataset.data[0 - BUY_DAYS].date
result["start_sell_date"] = dataset.data[0 - SELL_DAYS].date
result["date"] = date_str
result["BUY_DAYS"] = str(BUY_DAYS)
result["SELL_DAYS"] = str(SELL_DAYS)
result["choise"] = -1
result["info"] = "unknown problem, do not trade"
data = dataset.get_data(date_str)
if data == None:
result["choise"] = -2
result["info"] = "date_str error"
return result
data_index = dataset.get_data_index(date_str)
result["close_price"] = dataset.data[data_index].close_price
result["max_date"] = self.get_max_date(dataset, data_index, BUY_DAYS)
result["min_date"] = self.get_min_date(dataset, data_index, SELL_DAYS)
result["BUY_DAYS"] = str(BUY_DAYS)
result["SELL_DAYS"] = str(SELL_DAYS)
if result["close_price"] > result["max_date"][0]:
result["choise"] = 4
result["info"] = "buy"
elif result["close_price"] < result["min_date"][0]:
result["choise"] = 0
result["info"] = "sell all"
elif result["close_price"] < result["max_date"][0] and result["close_price"] > result["min_date"][0]:
result["choise"] = 2
result["info"] = "hold on"
return result
if __name__ == '__main__':
pass
|
benjyw/pants
|
src/python/pants/backend/python/lint/isort/skip_field.py
|
Python
|
apache-2.0
| 542 | 0 |
# Copyright 2021 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from pants.backend.python.target_types import PythonLibrary, Pytho
|
nTests
from pants.engine.target import BoolField
class SkipIsortField(BoolField):
alias = "skip_isort"
default = False
help = "If true, don't run isort on this target's code."
def rules
|
():
return [
PythonLibrary.register_plugin_field(SkipIsortField),
PythonTests.register_plugin_field(SkipIsortField),
]
|
tylerclair/canvas_admin_scripts
|
course_copy_csv.py
|
Python
|
mit
| 684 | 0.00731 |
import requests
import csv
from configparser import ConfigParser
config = ConfigParser()
config.read("config.cfg")
token = config.get("auth", "token")
domain = config.get("instance", "domain")
headers = {"Authorization" : "Bearer %s" % token}
source_course_id
|
= 311693
csv_file = ""
payload = {'migration_type': 'course_copy_importer', 'settings[source_course_id]': source_course_id}
with open(csv_file, 'rb') as courses:
coursesreader = csv.reader(courses)
for course in coursesreader:
u
|
ri = domain + "/api/v1/courses/sis_course_id:%s/content_migrations" % course
r = requests.post(uri, headers=headers,data=payload)
print r.status_code + " " + course
|
Osmose/pontoon
|
pontoon/base/tests/test_placeables.py
|
Python
|
bsd-3-clause
| 1,460 | 0.002055 |
from django_nose.tools import assert_equal
from pontoon.base.tests import TestCase
from pontoon.base.utils import Newli
|
neEscapePlaceable, mark_placeables
class PlaceablesTests(TestCase):
def test_newline_escape_placeable(self):
"""Test detecting newline escape sequences"""
placeable = NewlineEscapePlaceable
assert_equal(placeable.parse(u'A string\\n')[1], placeable([u'\\n
|
']))
assert_equal(placeable.parse(u'\\nA string')[0], placeable([u'\\n']))
assert_equal(placeable.parse(u'A\\nstring')[1], placeable([u'\\n']))
assert_equal(placeable.parse(u'A string'), None)
assert_equal(placeable.parse(u'A\nstring'), None)
def test_mark_newline_escape_placeables(self):
"""Test detecting newline escape sequences"""
assert_equal(
mark_placeables(u'A string\\n'),
u'A string<mark class="placeable" title="Escaped newline">\\n</mark>'
)
assert_equal(
mark_placeables(u'\\nA string'),
u'<mark class="placeable" title="Escaped newline">\\n</mark>A string'
)
assert_equal(
mark_placeables(u'A\\nstring'),
u'A<mark class="placeable" title="Escaped newline">\\n</mark>string'
)
assert_equal(
mark_placeables(u'A string'),
u'A string'
)
assert_equal(
mark_placeables(u'A\nstring'),
u'A\nstring'
)
|
akretion/l10n-brazil
|
l10n_br_account/models/account_tax.py
|
Python
|
agpl-3.0
| 4,286 | 0 |
# Copyright (C) 2009 - TODAY Renato Lima - Akretion
# License AGPL-3 - See http://www.gnu.org/licenses/agpl-3.0.html
from odoo import api, fields, models
class AccountTax(models.Model):
_inherit = 'account.tax'
fiscal_tax_ids = fields.Many2many(
comodel_name='l10n_br_fiscal.tax',
relation='l10n_br_fiscal_account_tax_rel',
colunm1='account_tax_id',
colunm2='fiscal_tax_id',
string='Fiscal Taxes',
|
)
@api.multi
def compute_all(
self,
price_unit,
currency=None,
quantity=None,
product=None,
partner=None,
fiscal_taxes=None,
operation_line=False,
ncm=None,
nbm=None,
cest=None,
discount_value=None,
insurance_value=None,
other_costs_value=None,
freight_value=None,
fiscal_price=Non
|
e,
fiscal_quantity=None,
uot=None,
icmssn_range=None
):
""" Returns all information required to apply taxes
(in self + their children in case of a tax goup).
We consider the sequence of the parent for group of taxes.
Eg. considering letters as taxes and alphabetic order
as sequence :
[G, B([A, D, F]), E, C] will be computed as [A, D, F, C, E, G]
RETURN: {
'total_excluded': 0.0, # Total without taxes
'total_included': 0.0, # Total with taxes
'taxes': [{ # One dict for each tax in self
# and their children
'id': int,
'name': str,
'amount': float,
'sequence': int,
'account_id': int,
'refund_account_id': int,
'analytic': boolean,
}]
} """
taxes_results = super().compute_all(
price_unit, currency, quantity, product, partner)
if not fiscal_taxes:
fiscal_taxes = self.env['l10n_br_fiscal.tax']
product = product or self.env['product.product']
# FIXME Should get company from document?
fiscal_taxes_results = fiscal_taxes.compute_taxes(
company=self.env.user.company_id,
partner=partner,
product=product,
price_unit=price_unit,
quantity=quantity,
uom_id=product.uom_id,
fiscal_price=fiscal_price or price_unit,
fiscal_quantity=fiscal_quantity or quantity,
uot_id=uot or product.uot_id,
ncm=ncm or product.ncm_id,
nbm=nbm or product.nbm_id,
cest=cest or product.cest_id,
discount_value=discount_value,
insurance_value=insurance_value,
other_costs_value=other_costs_value,
freight_value=freight_value,
operation_line=operation_line,
icmssn_range=icmssn_range)
account_taxes_by_domain = {}
for tax in self:
tax_domain = tax.tax_group_id.fiscal_tax_group_id.tax_domain
account_taxes_by_domain.update({tax.id: tax_domain})
for account_tax in taxes_results['taxes']:
fiscal_tax = fiscal_taxes_results.get(
account_taxes_by_domain.get(account_tax.get('id'))
)
if fiscal_tax:
tax = self.filtered(lambda t: t.id == account_tax.get('id'))
if not fiscal_tax.get('tax_include') and not tax.deductible:
taxes_results['total_included'] += fiscal_tax.get(
'tax_value')
account_tax.update({
'id': account_tax.get('id'),
'name': '{0} ({1})'.format(
account_tax.get('name'),
fiscal_tax.get('name')
),
'amount': fiscal_tax.get('tax_value'),
'base': fiscal_tax.get('base'),
'tax_include': fiscal_tax.get('tax_include'),
})
if tax.deductible:
account_tax.update({
'amount': fiscal_tax.get('tax_value', 0.0) * -1,
})
return taxes_results
|
jkthompson/block-chain-analytics
|
block.py
|
Python
|
mit
| 10,671 | 0.023803 |
import struct
import hashlib
magic_number = 0xD9B4BEF9
block_prefix_format = 'I32s32sIII'
def read_uint1(stream):
return ord(stream.read(1))
def read_uint2(stream):
return struct.unpack('H', stream.read(2))[0]
def read_uint4(stream):
return struct.unpack('I', stream.read(4))[0]
def read_uint8(stream):
return struct.unpack('Q', stream.read(8))[0]
def read_hash32(stream):
return stream.read(32)[::-1] #reverse it since we are little endian
def read_merkle32(stream):
return stream.read(32)[::-1] #reverse it
def read_time(stream):
utctime = read_uint4(stream)
#Todo: convert to datetime object
return utctime
def read_varint(stream):
ret = read_uint1(stream)
if ret < 0xfd: #one byte int
return ret
if ret == 0xfd: #unit16_t in next two bytes
return read_uint2(stream)
if ret == 0xfe: #uint32_t in next 4 bytes
return read_uint4(stream)
if ret == 0xff: #uint42_t in next 8 bytes
return read_uint8(stream)
return -1
def get_hexstring(bytebuffer):
#return ''.join(('%x'%ord(a)) for a in bytebuffer)
return bytebuffer.encode('hex')
def find_magic_number(stream):
'''read byte stream until a magic number is found, returns None if end of stream is reached'''
while True:
byte = stream.read(1)
if not byte: return None # EOF
if (ord(byte) == 0xf9):
stream.seek(-1,1) # move back 1 byte and try to read all 4 bytes
magic = read_uint4(stream)
if (magic == 0xd9b4bef9):
return magic
class Tx_Input(object):
def __init__(self):
super(Tx_Input, self).__init__()
def parse(self, stream):
self.prevhash = read_hash32(stream)
self.prevtx_out_idx = read_uint4(stream)
self.txin_script_len = read_varint(stream)
# TODO in later modules we will convert scriptSig to its own class
self.scriptSig = stream.read(self.txin_script_len)
self.sequence_no = read_uint4(stream)
def updateTxDict(self,txDict):
'''txDict holds arrays of Tx_Input values'''
txDict['txIn_prevhash'] = txDict.get('txIn_prevhash', [])
txDict['txIn_prevhash'].append(get_hexstring(self.prevhash))
txDict['txIn_prevtx_out_idx'] = txDict.get('txIn_prevtx_out_idx', [])
txDict['txIn_prevtx_out_idx'].append(self.prevtx_out_idx)
txDict['txIn_txin_script_len'] = txDict.get('txIn_txin_script_len', [])
txDict['txIn_txin_script_len'] .append(self.txin_script_len)
txDict['txIn_scriptSig'] = txDict.get('txIn_scriptSig', [])
txDict['txIn_scriptSig'].append(get_hexstring(self.scriptSig))
txDict['txIn_sequence_no'] = txDict.get('txIn_sequence_no', [])
txDict['txIn_sequence_no'].append(self.sequence_no)
return txDict
def __str__(self):
return 'PrevHash: %s \nPrev Tx out index: %d \nTxin Script Len: %d \nscriptSig: %s \nSequence: %8x' % \
(get_hexstring(self.prevhash),
self.prevtx_out_idx,
self.txin_script_len,
get_hexstring(self.scriptSig),
self.sequence_no)
def __repr__(self):
return __str__(self)
class Tx_Output(object):
def __init__(self):
|
super(Tx_Output, self).__init__()
pass
def parse(self, stream):
self.value = read_uint8(stream)
self.txout_script_len = read_varint(stream)
self.scriptPubKey = stream.read(self.txout_script_len)
def updateTxDict(self,txDict):
'''txDict holds arrays of Tx_O
|
utput values'''
txDict['txOut_value'] = txDict.get('txOut_value', [])
txDict['txOut_value'].append(self.value)
txDict['txOut_script_len'] = txDict.get('txOut_script_len', [])
txDict['txOut_script_len'].append(self.txout_script_len)
txDict['txOut_scriptPubKey'] = txDict.get('txOut_scriptPubKey', [])
txDict['txOut_scriptPubKey'].append(get_hexstring(self.scriptPubKey))
return txDict
def __str__(self):
return 'Value (satoshis): %d (%f btc)\nTxout Script Len: %d\nscriptPubKey: %s' %\
(self.value, (1.0*self.value)/100000000.00,
self.txout_script_len,
get_hexstring(self.scriptPubKey))
def __repr__(self):
return __str__(self)
class Transaction(object):
"""Holds one Transaction as part of a block"""
def __init__(self):
super(Transaction, self).__init__()
self.version = None
self.in_cnt = None
self.inputs = None
self.out_cnt = None
self.outputs = None
self.lock_time = None
def parse(self,stream):
#TODO: error checking
self.version = read_uint4(stream)
self.in_cnt = read_varint(stream)
self.inputs = []
if self.in_cnt > 0:
for i in range(0, self.in_cnt):
input = Tx_Input()
input.parse(stream)
self.inputs.append(input)
self.out_cnt = read_varint(stream)
self.outputs = []
if self.out_cnt > 0:
for i in range(0, self.out_cnt):
output = Tx_Output()
output.parse(stream)
self.outputs.append(output)
self.lock_time = read_uint4(stream)
def updateTxDict(self,txDict):
txDict['tx_version'] = self.version
txDict['in_cnt'] = self.in_cnt
txDict['out_cnt'] = self.out_cnt
txDict['lock_time'] = self.lock_time
for i in range(self.in_cnt):
txDict = self.inputs[i].updateTxDict(txDict)
for i in range(self.out_cnt):
txDict = self.outputs[i].updateTxDict(txDict)
return txDict
def __str__(self):
s = 'Version: %d\nInputs count: %d\n---Inputs---\n%s\nOutputs count: %d\n---Outputs---\n%s\nLock_time:%8x' % (self.version, self.in_cnt,
'\n'.join(str(i) for i in self.inputs),
self.out_cnt,
'\n'.join(str(o) for o in self.outputs),
self.lock_time)
return s
class BlockHeader(object):
"""BlockHeader represents the header of the block"""
def __init__(self):
super( BlockHeader, self).__init__()
self.version = None
self.prevhash = None
self.merklehash = None
self.time = None
self.bits = None
self.nonce = None
self.blockprefix = None
self.blockhash = None
def parse(self, stream):
#TODO: error checking
self.version = read_uint4(stream)
self.prevhash = read_hash32(stream)
self.merklehash = read_merkle32(stream)
self.time = read_time(stream)
self.bits = read_uint4(stream)
self.nonce = read_uint4(stream)
# construct the prefix and hash
self.blockprefix = ( struct.pack("<L", self.version) + self.prevhash[::-1] + \
self.merklehash[::-1] + struct.pack("<LLL", self.time, self.bits, self.nonce))
self.blockhash = hashlib.sha256(hashlib.sha256(self.blockprefix).digest()).digest()[::-1]
def updateTxDict(self, txDict):
txDict['version'] = self.version
txDict['prevhash'] = get_hexstring(self.prevhash)
txDict['merklehash'] = get_hexstring(self.merklehash)
txDict['time'] = self.time
txDict['bits'] = self.bits
txDict['nonce'] = self.nonce
txDict['blockprefix'] = get_hexstring(self.blockprefix)
txDict['blockhash'] = get_hexstring(self.blockhash)
return txDict
def __str__(self):
return "\n\t\tVersion: %d \n\t\tPreviousHash: %s \n\t\tMerkle: %s \n\t\tTime: %8x \n\t\tBits: %8x \n\t\tNonce: %8x \n\t\tPrefix: %s \n\t\tBlockHash: %s \n\t\t" % (self.version, \
get_hexstring(self.prevhash), \
get_hexstring(self.merklehash), \
self.time, \
self.bits, \
self.nonce, \
get_hexstring(self.blockprefix), \
get_hexstring(self.blockhash))
def __repr__(self):
return __str__(self)
class Block(object):
"""A block to be parsed from file"""
def __init__(self):
self.magic_no = -1
self.blocksize = 0
self.blockheader = None
self.transaction_cnt = 0
self.transactions = None
def parseBlock(self, bf):
self.magic_no = find_magic_number(bf)
|
synth3tk/the-blue-alliance
|
controllers/api/api_event_controller.py
|
Python
|
mit
| 6,883 | 0.001017 |
import json
import logging
import webapp2
from datetime import datetime
from google.appengine.ext import ndb
from controllers.api.api_base_controller import ApiBaseController
from database.event_query import EventListQuery
from helpers.award_helper import AwardHelper
from helpers.district_helper import DistrictHelper
from helpers.event_insights_helper import EventInsightsHelper
from helpers.model_to_dict import ModelToDict
from models.event import Event
class ApiEventController(ApiBaseController):
CACHE_KEY_FORMAT = "apiv2_event_controller_{}" # (event_key)
CACHE_VERSION = 4
CACHE_HEADER_LENGTH = 60 * 60
def __init__(self, *args, **kw):
super(ApiEventController, self).__init__(*args, **kw)
self.event_key = self.request.route_kwargs["event_key"]
self._partial_cache_key = self.CACHE_KEY_FORMAT.format(self.event_key)
@property
def _validators(self):
return [("event_id_validator", self.event_key)]
def _set_event(self, event_key):
self.event = Event.get_by_id(event_key)
if self.event is None:
self._errors = json.dumps({"404": "%s event not found" % self.event_key})
self.abort(404)
def _track_call(self, event_key):
self._track_call_defer('event', event
|
_key)
def _render(self, event_key):
self._set_event(event_key)
event_dict = ModelToDict.eventConverter(self.event)
return json.dumps(event_dict, ensure_ascii=True)
class ApiEventTeamsController(ApiEventController):
CACHE_KEY_FORMAT = "apiv2_event_teams_controller_{}" # (event_key)
CACHE_VERSION = 3
CACHE_HEADER_LENGTH = 60 * 60 * 24
def __init__(self, *args, **kw):
super(ApiEventTeamsController, self).__init__(*args, **kw)
self._parti
|
al_cache_key = self.CACHE_KEY_FORMAT.format(self.event_key)
def _track_call(self, event_key):
self._track_call_defer('event/teams', event_key)
def _render(self, event_key):
self._set_event(event_key)
teams = filter(None, self.event.teams)
team_dicts = [ModelToDict.teamConverter(team) for team in teams]
return json.dumps(team_dicts, ensure_ascii=True)
class ApiEventMatchesController(ApiEventController):
CACHE_KEY_FORMAT = "apiv2_event_matches_controller_{}" # (event_key)
CACHE_VERSION = 2
CACHE_HEADER_LENGTH = 61
def __init__(self, *args, **kw):
super(ApiEventMatchesController, self).__init__(*args, **kw)
self._partial_cache_key = self.CACHE_KEY_FORMAT.format(self.event_key)
def _track_call(self, event_key):
self._track_call_defer('event/matches', event_key)
def _render(self, event_key):
self._set_event(event_key)
matches = self.event.matches
match_dicts = [ModelToDict.matchConverter(match) for match in matches]
return json.dumps(match_dicts, ensure_ascii=True)
class ApiEventStatsController(ApiEventController):
CACHE_KEY_FORMAT = "apiv2_event_stats_controller_{}" # (event_key)
CACHE_VERSION = 5
CACHE_HEADER_LENGTH = 61
def __init__(self, *args, **kw):
super(ApiEventStatsController, self).__init__(*args, **kw)
self._partial_cache_key = self.CACHE_KEY_FORMAT.format(self.event_key)
def _track_call(self, event_key):
self._track_call_defer('event/stats', event_key)
def _render(self, event_key):
self._set_event(event_key)
stats = {}
matchstats = self.event.matchstats
if matchstats:
stats.update(matchstats)
year_specific = EventInsightsHelper.calculate_event_insights(self.event.matches, self.event.year)
if year_specific:
stats['year_specific'] = year_specific
return json.dumps(stats)
class ApiEventRankingsController(ApiEventController):
CACHE_KEY_FORMAT = "apiv2_event_rankings_controller_{}" # (event_key)
CACHE_VERSION = 1
CACHE_HEADER_LENGTH = 61
def __init__(self, *args, **kw):
super(ApiEventRankingsController, self).__init__(*args, **kw)
self._partial_cache_key = self.CACHE_KEY_FORMAT.format(self.event_key)
def _track_call(self, event_key):
self._track_call_defer('event/rankings', event_key)
def _render(self, event_key):
self._set_event(event_key)
ranks = json.dumps(Event.get_by_id(event_key).rankings)
if ranks is None or ranks == 'null':
return '[]'
else:
return ranks
class ApiEventAwardsController(ApiEventController):
CACHE_KEY_FORMAT = "apiv2_event_awards_controller_{}" # (event_key)
CACHE_VERSION = 4
CACHE_HEADER_LENGTH = 60 * 60
def __init__(self, *args, **kw):
super(ApiEventAwardsController, self).__init__(*args, **kw)
self._partial_cache_key = self.CACHE_KEY_FORMAT.format(self.event_key)
def _track_call(self, event_key):
self._track_call_defer('event/awards', event_key)
def _render(self,event_key):
self._set_event(event_key)
award_dicts = [ModelToDict.awardConverter(award) for award in AwardHelper.organizeAwards(self.event.awards)]
return json.dumps(award_dicts, ensure_ascii=True)
class ApiEventDistrictPointsController(ApiEventController):
CACHE_KEY_FORMAT = "apiv2_event_district_points_controller_{}" # (event_key)
CACHE_VERSION = 1
CACHE_HEADER_LENGTH = 61
def __init__(self, *args, **kw):
super(ApiEventDistrictPointsController, self).__init__(*args, **kw)
self.partial_cache_key = self.CACHE_KEY_FORMAT.format(self.event_key)
def _track_call(self, event_key):
self._track_call_defer('event/district_points', event_key)
def _render(self, event_key):
self._set_event(event_key)
points = DistrictHelper.calculate_event_points(self.event)
return json.dumps(points, ensure_ascii=True)
class ApiEventListController(ApiBaseController):
CACHE_KEY_FORMAT = "apiv2_event_list_controller_{}" # (year)
CACHE_VERSION = 2
CACHE_HEADER_LENGTH = 60 * 60 * 24
def __init__(self, *args, **kw):
super(ApiEventListController, self).__init__(*args, **kw)
self.year = int(self.request.route_kwargs.get("year") or datetime.now().year)
self._partial_cache_key = self.CACHE_KEY_FORMAT.format(self.year)
@property
def _validators(self):
return []
def _track_call(self, *args, **kw):
self._track_call_defer('event/list', self.year)
def _render(self, year=None):
if self.year < 1992 or self.year > datetime.now().year + 1:
self._errors = json.dumps({"404": "No events found for %s" % self.year})
self.abort(404)
events = EventListQuery(self.year).fetch()
event_list = [ModelToDict.eventConverter(event) for event in events]
return json.dumps(event_list, ensure_ascii=True)
|
pympler/pympler
|
test/test_process.py
|
Python
|
apache-2.0
| 5,151 | 0.002136 |
"""
Check the measured process sizes. If we are on a platform which supports
multiple measuring facilities (e.g. Linux), check if the reported sizes match.
This should help to protect against scaling errors (e.g. Byte vs KiB) or using
the wrong value for a different measure (e.g. resident in physical memory vs
virtual memory size).
"""
import sys
import unittest
from unittest import mock
from pympler import process
class ProcessMemoryTests(unittest.TestCase):
def _match_sizes(self, pi1, pi2, ignore=[]):
"""
Match sizes by comparing each set field. Process size may change
inbetween two measurements.
"""
if pi1.available and pi2.available:
for arg in ('vsz', 'rss', 'data_segment', 'shared_segment',
'stack_segment', 'code_segment'):
if arg in ignore:
continue
size1 = getattr(pi1, arg)
size2 = getattr(pi2, arg)
if size1 and size2:
delta = abs(size1 - size2)
# Allow for a difference of the size of two pages or 5%
if delta > pi1.pagesize * 2 and delta > size1 * 0.05:
self.fail("%s mismatch: %d != %d" % (arg, size1, size2))
if pi1.pagefaults and pi2.pagefaults:
# If both records report pagefaults compare the reported
# number. If a pagefault happens after taking the first
# snapshot and before taking the second the latter will show a
# higher pagefault number. In that case take another snapshot
# with the first variant and check it's now reporting a higher
# number as well. We assume pagefaults statistics are
# monotonic.
if pi1.pagefaults < pi2.pagefaults:
pi1.update()
if pi1.pagefaults < pi2.pagefaults:
pf1 = pi1.pagefaults
pf2 = pi2.pagefaults
self.fail("Pagefault mismatch: %d != %d" % (pf1, pf2))
else:
self.assertEqual(pi1.page
|
faults, pi2.pagefaults)
if pi1.pagesize and pi2.pagesize:
self.assertEqual(pi1.pagesize, pi2.pagesize)
def test_ps_vs_proc_sizes(self):
'''Test process sizes match: ps util
|
vs /proc/self/stat
'''
psinfo = process._ProcessMemoryInfoPS()
procinfo = process._ProcessMemoryInfoProc()
self._match_sizes(psinfo, procinfo)
def test_ps_vs_getrusage(self):
'''Test process sizes match: ps util vs getrusage
'''
psinfo = process._ProcessMemoryInfoPS()
try:
resinfo = process._ProcessMemoryInfoResource()
except AttributeError:
pass
else:
self._match_sizes(psinfo, resinfo, ignore=['rss'])
if psinfo.available and resinfo.available:
self.assertTrue(resinfo.rss >= psinfo.rss)
def test_proc_vs_getrusage(self):
'''Test process sizes match: /proc/self/stat util vs getrusage
'''
procinfo = process._ProcessMemoryInfoProc()
try:
resinfo = process._ProcessMemoryInfoResource()
except AttributeError:
pass
else:
self._match_sizes(procinfo, resinfo, ignore=['rss'])
if procinfo.available and resinfo.available:
self.assertTrue(resinfo.rss >= procinfo.rss)
def test_get_current_threads(self):
'''Test thread info is extracted.'''
tinfos = process.get_current_threads()
for tinfo in tinfos:
self.assertEqual(type(tinfo.ident), int)
self.assertEqual(type(tinfo.name), type(''))
self.assertEqual(type(tinfo.daemon), type(True))
self.assertNotEqual(tinfo.ident, 0)
def test_proc(self):
'''Test reading proc stats with mock data.'''
mock_stat = mock.mock_open(read_data='22411 (cat) R 22301 22411 22301 34818 22411 4194304 82 0 0 0 0 0 0 0 20 0 1 0 709170 8155136 221 18446744073709551615 94052544688128 94052544719312 140729623469552 0 0 0 0 0 0 0 0 0 17 6 0 0 0 0 0 94052546816624 94052546818240 94052566347776 140729623473446 140729623473466 140729623473466 140729623478255 0')
mock_status = mock.mock_open(read_data='Name: cat\n\nVmData: 2 kB\nMultiple colons: 1:1')
with mock.patch('builtins.open', new_callable=mock.mock_open) as mock_file:
mock_file.side_effect = [mock_stat.return_value, mock_status.return_value]
procinfo = process._ProcessMemoryInfoProc()
self.assertTrue(procinfo.available)
self.assertEqual(procinfo.vsz, 8155136)
self.assertEqual(procinfo.data_segment, 2048)
if __name__ == "__main__":
suite = unittest.TestSuite()
tclasses = [ ProcessMemoryTests, ]
for tclass in tclasses:
names = unittest.getTestCaseNames(tclass, 'test_')
suite.addTests(map(tclass, names))
if not unittest.TextTestRunner().run(suite).wasSuccessful():
sys.exit(1)
|
fbradyirl/home-assistant
|
tests/components/homekit/common.py
|
Python
|
apache-2.0
| 317 | 0 |
"""Collection of fixtures and functions for the HomeKit t
|
ests."""
from unittest.mock import patch
def patch_debounce():
"""Return patch for debounce method."""
return patch(
"homeassistant.components.homekit.accessories.debounce",
lambda f: lambda
|
*args, **kwargs: f(*args, **kwargs),
)
|
userzimmermann/python-jinjatools
|
jinjatools/env.py
|
Python
|
gpl-3.0
| 1,555 | 0.001286 |
# python-jinjatools
#
# Various tools for Jinja2,
# including new filters and tests based on python-moretools,
# a JinjaLoader class for Django,
# and a simple JinjaBuilder class for SCons.
#
# Copyright (C) 2011-2015 Stefan Zimmermann <zimmermann.code@gmail.com>
#
# python-jinjatools is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# python-jinjatools is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with python-jinjatools. If not, see <http://www.gnu.org/licenses/>.
all
|
= ['Environment']
from itertools import chain
import jinja2
class Environment(jinja2.
|
Environment):
def __init__(self, filters={}, tests={}, globals={}, **kwargs):
jinja2.Environment.__init__(self, **kwargs)
morefilters = __import__('jinjatools.filters').filters.filters
for name, func in chain(morefilters.items(), filters.items()):
self.filters[name] = func
for name, func in tests.items():
self.tests[name] = func
for name, value in globals.items():
self.globals[name] = value
# from .filters import filters as morefilters
|
CenturylinkTechnology/ansible-modules-extras
|
clustering/consul_session.py
|
Python
|
gpl-3.0
| 9,707 | 0.001959 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# (c) 2015, Steve Gargan <steve.gargan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GN
|
U General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = """
module: consul_session
short_description: "manipulate consul sessions"
description:
- allows the addition, modification and deletion of sessions in a consul
cluster. These sessions can then be used in conjunction with key value pairs
to implement distributed locks. In depth documentation for worki
|
ng with
sessions can be found here http://www.consul.io/docs/internals/sessions.html
requirements:
- "python >= 2.6"
- python-consul
- requests
version_added: "2.0"
author: "Steve Gargan @sgargan"
options:
state:
description:
- whether the session should be present i.e. created if it doesn't
exist, or absent, removed if present. If created, the ID for the
session is returned in the output. If absent, the name or ID is
required to remove the session. Info for a single session, all the
sessions for a node or all available sessions can be retrieved by
specifying info, node or list for the state; for node or info, the
node name or session id is required as parameter.
required: false
choices: ['present', 'absent', 'info', 'node', 'list']
default: present
name:
description:
- the name that should be associated with the session. This is opaque
to Consul and not required.
required: false
default: None
delay:
description:
- the optional lock delay that can be attached to the session when it
is created. Locks for invalidated sessions ar blocked from being
acquired until this delay has expired. Durations are in seconds
default: 15
required: false
node:
description:
- the name of the node that with which the session will be associated.
by default this is the name of the agent.
required: false
default: None
datacenter:
description:
- name of the datacenter in which the session exists or should be
created.
required: false
default: None
checks:
description:
- a list of checks that will be used to verify the session health. If
all the checks fail, the session will be invalidated and any locks
associated with the session will be release and can be acquired once
the associated lock delay has expired.
required: false
default: None
host:
description:
- host of the consul agent defaults to localhost
required: false
default: localhost
port:
description:
- the port on which the consul agent is running
required: false
default: 8500
scheme:
description:
- the protocol scheme on which the consul agent is running
required: false
default: http
version_added: "2.1"
validate_certs:
description:
- whether to verify the tls certificate of the consul agent
required: false
default: True
version_added: "2.1"
behavior:
description:
- the optional behavior that can be attached to the session when it
is created. This can be set to either ‘release’ or ‘delete’. This
controls the behavior when a session is invalidated.
default: release
required: false
version_added: "2.2"
"""
EXAMPLES = '''
- name: register basic session with consul
consul_session:
name: session1
- name: register a session with an existing check
consul_session:
name: session_with_check
checks:
- existing_check_name
- name: register a session with lock_delay
consul_session:
name: session_with_delay
delay: 20s
- name: retrieve info about session by id
consul_session: id=session_id state=info
- name: retrieve active sessions
consul_session: state=list
'''
try:
import consul
from requests.exceptions import ConnectionError
python_consul_installed = True
except ImportError:
python_consul_installed = False
def execute(module):
state = module.params.get('state')
if state in ['info', 'list', 'node']:
lookup_sessions(module)
elif state == 'present':
update_session(module)
else:
remove_session(module)
def lookup_sessions(module):
datacenter = module.params.get('datacenter')
state = module.params.get('state')
consul_client = get_consul_api(module)
try:
if state == 'list':
sessions_list = consul_client.session.list(dc=datacenter)
#ditch the index, this can be grabbed from the results
if sessions_list and sessions_list[1]:
sessions_list = sessions_list[1]
module.exit_json(changed=True,
sessions=sessions_list)
elif state == 'node':
node = module.params.get('node')
if not node:
module.fail_json(
msg="node name is required to retrieve sessions for node")
sessions = consul_client.session.node(node, dc=datacenter)
module.exit_json(changed=True,
node=node,
sessions=sessions)
elif state == 'info':
session_id = module.params.get('id')
if not session_id:
module.fail_json(
msg="session_id is required to retrieve indvidual session info")
session_by_id = consul_client.session.info(session_id, dc=datacenter)
module.exit_json(changed=True,
session_id=session_id,
sessions=session_by_id)
except Exception as e:
module.fail_json(msg="Could not retrieve session info %s" % e)
def update_session(module):
name = module.params.get('name')
delay = module.params.get('delay')
checks = module.params.get('checks')
datacenter = module.params.get('datacenter')
node = module.params.get('node')
behavior = module.params.get('behavior')
consul_client = get_consul_api(module)
try:
session = consul_client.session.create(
name=name,
behavior=behavior,
node=node,
lock_delay=delay,
dc=datacenter,
checks=checks
)
module.exit_json(changed=True,
session_id=session,
name=name,
behavior=behavior,
delay=delay,
checks=checks,
node=node)
except Exception as e:
module.fail_json(msg="Could not create/update session %s" % e)
def remove_session(module):
session_id = module.params.get('id')
if not session_id:
module.fail_json(msg="""A session id must be supplied in order to
remove a session.""")
consul_client = get_consul_api(module)
try:
consul_client.session.destroy(session_id)
module.exit_json(changed=True,
session_id=session_id)
except Exception as e:
module.fail_json(msg="Could not remove session with id '%s' %s" % (
session_id, e))
def get_consul_api(module):
return consul.Consul(host=module.params.get('host'),
p
|
haypo/trollius
|
tests/test_sslproto.py
|
Python
|
apache-2.0
| 2,350 | 0 |
"""Tests for asyncio/sslproto.py."""
try:
import ssl
except ImportError:
ssl = None
import trollius as asyncio
from trollius import ConnectionResetError
from trollius import sslproto
from trollius import test_utils
from trollius.test_utils import mock
from trollius.test_utils import unittest
@unittest.skipIf(ssl is None, 'No ssl module')
class SslProtoHandshakeTests(test_utils.TestCase):
def setUp
|
(self):
self.loop = asyncio.new_event_loop()
self.set_event_loop(self.loop)
def ssl_protocol(self, waiter=None):
sslcontext = test_utils.dummy_ssl_context()
app_proto = asyncio.Protocol()
proto = sslproto.SSLProtocol(self.loop, app_proto, sslcontext, waiter)
self.addCleanup(proto._app_transport.close)
return proto
|
def connection_made(self, ssl_proto, do_handshake=None):
transport = mock.Mock()
sslpipe = mock.Mock()
sslpipe.shutdown.return_value = b''
if do_handshake:
sslpipe.do_handshake.side_effect = do_handshake
else:
def mock_handshake(callback):
return []
sslpipe.do_handshake.side_effect = mock_handshake
with mock.patch('trollius.sslproto._SSLPipe', return_value=sslpipe):
ssl_proto.connection_made(transport)
def test_cancel_handshake(self):
# Python issue #23197: cancelling an handshake must not raise an
# exception or log an error, even if the handshake failed
waiter = asyncio.Future(loop=self.loop)
ssl_proto = self.ssl_protocol(waiter)
handshake_fut = asyncio.Future(loop=self.loop)
def do_handshake(callback):
exc = Exception()
callback(exc)
handshake_fut.set_result(None)
return []
waiter.cancel()
self.connection_made(ssl_proto, do_handshake)
with test_utils.disable_logger():
self.loop.run_until_complete(handshake_fut)
def test_eof_received_waiter(self):
waiter = asyncio.Future(loop=self.loop)
ssl_proto = self.ssl_protocol(waiter)
self.connection_made(ssl_proto)
ssl_proto.eof_received()
test_utils.run_briefly(self.loop)
self.assertIsInstance(waiter.exception(), ConnectionResetError)
if __name__ == '__main__':
unittest.main()
|
LMescheder/AdversarialVariationalBayes
|
avb/iaf/models/full0.py
|
Python
|
mit
| 698 | 0.004298 |
import tensorflow as tf
|
from tensorflow.contrib import slim as slim
from avb.ops import *
import math
def encoder(x, config, is_training=True):
df_dim = config['df_dim']
z_dim = config['z_dim']
a_dim = config['iaf_a_dim']
# Center x at 0
x = 2*x - 1
net = flatten_spatial(x)
net = slim.fully_connected(net, 300, activation_fn=tf.nn.so
|
ftplus, scope="fc_0")
net = slim.fully_connected(net, 300, activation_fn=tf.nn.softplus, scope="fc_1")
zmean = slim.fully_connected(net, z_dim, activation_fn=None)
log_zstd = slim.fully_connected(net, z_dim, activation_fn=None)
a = slim.fully_connected(net, a_dim, activation_fn=None)
return zmean, log_zstd, a
|
gupon/ConnectorC4D
|
ConnectorC4D.py
|
Python
|
mit
| 6,128 | 0.046671 |
"""
2015 gupon.jp
Connector for C4D Python Generator
"""
import c4d, math, itertools, random
from c4d.modules import mograph as mo
#userdata id
ID_SPLINE_TYPE = 2
ID_SPLINE_CLOSED = 4
ID_SPLINE_INTERPOLATION = 5
ID_
|
SPLINE_SUB = 6
ID_SPLINE_ANGLE = 8
ID_SPLINE_MAXIMUMLENGTH = 9
ID_USE_SCREEN_DIST = 10
ID_USE_MAXSEG = 15
ID_MAXSEG_NUM = 13
ID_USE_CENTER = 19
ID_CENTER_OBJ = 18
class Point:
def __init__(self, p):
self.world = p
sel
|
f.screen = c4d.Vector(0)
def calc2D(self, bd):
self.screen = bd.WS(self.world)
self.screen.z = 0
class PointGroup:
def __init__(self):
self.points = []
def AddPoint(self, point):
self.points.append(Point(point))
def Make2DPoints(self):
bd = doc.GetRenderBaseDraw()
for point in self.points:
point.calc2D(bd)
def MakeCombsWith(self, target):
combs = []
for pA in self.points:
for pB in target.points:
combs.append([pA, pB])
return combs
def MakeCombsInOrder(self):
combs = []
for i,pA in enumerate(self.points):
if i == len(self.points)-1:
combs.append([pA, self.points[0]])
else:
combs.append([pA, self.points[i+1]])
return combs
def GetPoint(self, index):
return self.points[index]
def GetAllPoints(self):
return self.points
def GetNumPoints(self):
return len(self.points)
def SetSplineGUI():
UD = op.GetUserDataContainer()
intermediatePoints = op[c4d.ID_USERDATA, ID_SPLINE_INTERPOLATION]
for id, bc in UD:
if id[1].id == ID_SPLINE_SUB:
if intermediatePoints == c4d.SPLINEOBJECT_INTERPOLATION_NATURAL \
or intermediatePoints == c4d.SPLINEOBJECT_INTERPOLATION_UNIFORM:
bc[c4d.DESC_HIDE] = False
else:
bc[c4d.DESC_HIDE] = True
if id[1].id == ID_SPLINE_ANGLE:
if intermediatePoints == c4d.SPLINEOBJECT_INTERPOLATION_ADAPTIVE \
or intermediatePoints == c4d.SPLINEOBJECT_INTERPOLATION_SUBDIV:
bc[c4d.DESC_HIDE] = False
else:
bc[c4d.DESC_HIDE] = True
if id[1].id == ID_SPLINE_MAXIMUMLENGTH:
if intermediatePoints == c4d.SPLINEOBJECT_INTERPOLATION_SUBDIV:
bc[c4d.DESC_HIDE] = False
else:
bc[c4d.DESC_HIDE] = True
if id[1].id == ID_MAXSEG_NUM:
bc[c4d.DESC_HIDE] = not op[c4d.ID_USERDATA, ID_USE_MAXSEG]
if id[1].id == ID_CENTER_OBJ:
bc[c4d.DESC_HIDE] = not op[c4d.ID_USERDATA, ID_USE_CENTER]
op.SetUserDataContainer(id, bc)
def SetSplineAttributes(obj):
obj[c4d.SPLINEOBJECT_TYPE] = op[c4d.ID_USERDATA, ID_SPLINE_TYPE]
obj[c4d.SPLINEOBJECT_CLOSED] = op[c4d.ID_USERDATA, ID_SPLINE_CLOSED]
obj[c4d.SPLINEOBJECT_INTERPOLATION] = op[c4d.ID_USERDATA, ID_SPLINE_INTERPOLATION]
obj[c4d.SPLINEOBJECT_SUB] = op[c4d.ID_USERDATA, ID_SPLINE_SUB]
obj[c4d.SPLINEOBJECT_ANGLE] = op[c4d.ID_USERDATA, ID_SPLINE_ANGLE]
obj[c4d.SPLINEOBJECT_MAXIMUMLENGTH] = op[c4d.ID_USERDATA, ID_SPLINE_MAXIMUMLENGTH]
obj.Message(c4d.MSG_UPDATE)
def GetPointsFromObjects(targetList):
step = op[c4d.ID_USERDATA, 12]
# add every points to list
pointGroups = []
baseMg = op.GetMg()
for target in targetList:
if target != None :
group = PointGroup()
moData = mo.GeGetMoData(target)
if moData==None:
group.AddPoint(target.GetMg().off * ~baseMg)
else:
if not moData.GetCount():
continue
moList = moData.GetArray(c4d.MODATA_MATRIX)
clonerMg = target.GetMg()
for i,data in enumerate(moList):
if i % step == 0:
group.AddPoint(data.off * clonerMg * ~baseMg)
pointGroups.append(group)
return pointGroups
def SetCombinations(pointGroups, obj):
bd = doc.GetRenderBaseDraw()
maxDist = op[c4d.ID_USERDATA, 1]
excludeSame = op[c4d.ID_USERDATA, 11]
maxSegNum = op[c4d.ID_USERDATA, 13]
useMaxSeg = op[c4d.ID_USERDATA, 15]
useCenter = op[c4d.ID_USERDATA, ID_USE_CENTER]
useScreenDist = op[c4d.ID_USERDATA, 10]
if useScreenDist:
for group in pointGroups:
group.Make2DPoints()
frame = bd.GetSafeFrame()
baseLength = frame["cr"] - frame["cl"]
maxDist = baseLength * maxDist/1000
_combs = []
inOrder = False
# if inOrder:
# for group in pointGroups:
# _combs = _combs + group.MakeCombsInOrder()
if useCenter:
target = op[c4d.ID_USERDATA, ID_CENTER_OBJ]
if target:
pA = Point(target.GetMg().off * ~op.GetMg())
for group in pointGroups:
for pB in group.GetAllPoints():
_combs.append([pA, pB])
else:
print "no target found"
return
else:
if excludeSame:
numGroups = len(pointGroups)
for i in range(numGroups-1):
groupA = pointGroups[i]
for j in range(i+1, numGroups):
groupB = pointGroups[j]
_combs = _combs + groupA.MakeCombsWith(groupB)
else:
allPoints = []
for group in pointGroups:
allPoints = allPoints + group.GetAllPoints()
numPoints = len(allPoints)
for i in range(numPoints-1):
for j in range(i+1, numPoints):
_combs.append([allPoints[i], allPoints[j]])
combs = []
for comb in _combs:
v0 = comb[0].screen if useScreenDist else comb[0].world
v1 = comb[1].screen if useScreenDist else comb[1].world
if c4d.Vector(v1 - v0).GetLength() < maxDist:
combs.append(comb)
random.shuffle(combs)
obj.ResizeObject(len(combs) * 2)
for i, comb in enumerate(combs):
a = comb[0].world
b = comb[1].world
addP = True
if useMaxSeg:
if maxSegNum:
acnt = 0
bcnt = 0
for p in obj.GetAllPoints():
if p == a: acnt += 1
if p == b: bcnt += 1
if acnt >= maxSegNum or bcnt >= maxSegNum:
addP = False
break
else:
addP = False
if addP:
obj.SetPoint(i * 2 + 0, a)
obj.SetPoint(i * 2 + 1, b)
obj.MakeVariableTag(c4d.Tsegment, len(combs))
for i in range(len(combs)):
obj.SetSegment(i, 2, False)
def main():
random.seed(100)
obj = c4d.BaseObject(c4d.Ospline)
targetListData = op[c4d.ID_USERDATA, 3]
numTargets = targetListData.GetObjectCount()
if numTargets < 1:
return obj
targetList = []
for i in range(numTargets):
targetList.append(targetListData.ObjectFromIndex(doc, i))
pointGroups = GetPointsFromObjects(targetList)
if len(pointGroups) < 1:
return obj
SetCombinations(pointGroups, obj)
SetSplineGUI()
SetSplineAttributes(obj)
return obj
|
grlurton/hiv_retention_metrics
|
src/models/cohort_analysis_function.py
|
Python
|
mit
| 4,874 | 0.008822 |
import pandas as pd
import numpy as np
from dateutil.relativedelta import relativedelta
#### Utilities
def get_first_visit_date(data_patient):
''' Determines the first visit for a given patient'''
#IDEA Could be parallelized in Dask
data_patient['first_visit_date'] = min(data_patient.visit_date)
return data_patient
def subset_analysis_data(data, date_analysis):
''' Function that subsets the full dataset to only the data available for a certain analysis date'''
if type(data.date_entered.iloc[0]) is str :
data.date_entered = pd.to_datetime(data.date_entered)
data = data[data.date_entered < date_analysis]
return data
def subset_cohort(data, horizon_date, horizon_time, bandwidth):
''' Function that subsets data from a cohort that has initiated care a year before the horizon_date, and after a year + bandwith'''
horizon_date = pd.to_datetime(horizon_date)
data['first_visit_date'] = pd.to_datetime(data['first_visit_date'])
cohort_data = data[(data['first_visit_date'] >= horizon_date - relativedelta(days=horizon_time + bandwidth)) &
(data['first_visit_date'] < horizon_date - relativedelta(days=horizon_time))]
return cohort_data
#### Standard reporting
def status_patient(data_patient, reference_date, grace_period):
''' Determines the status of a patient at a given reference_date, given the data available at a given analysis_date
TODO Also select the available data for Death and Transfer and other outcomes based on data entry time
'''
#IDEA Could be par
|
allelized in Dask
data_patient = get_first_visit_date(data_patient)
date_out = pd.NaT
date_last_appointment = pd.to_datetime(max(data_patient.next_visit_date))
late_time = reference_date - date_last_appointment
if late_time.days >
|
grace_period:
status = 'LTFU'
date_out = date_last_appointment
if late_time.days <= grace_period:
status = 'Followed'
if (data_patient.reasonDescEn.iloc[0] is not np.nan) & (pd.to_datetime(data_patient.discDate.iloc[0]) < reference_date):
status = data_patient.reasonDescEn.iloc[0]
date_out = pd.to_datetime(data_patient.discDate.iloc[0])
return pd.DataFrame([{'status': status,
'late_time': late_time,
'last_appointment': date_last_appointment,
'date_out':date_out ,
'first_visit_date':data_patient.first_visit_date.iloc[0],
'facility':data_patient.facility.iloc[0]}])
def horizon_outcome(data_cohort, reference_date, horizon_time):
# TODO Make sure dates are dates
data_cohort['first_visit_date'] = pd.to_datetime(data_cohort['first_visit_date']) #TODO This conversion should happen earlier
data_cohort.loc[:, 'horizon_date'] = data_cohort['first_visit_date'] + np.timedelta64(horizon_time, 'D')
data_cohort.loc[: , 'horizon_status'] = data_cohort['status']
# If the patient exited the cohort after his horizon date, still consider him followed
# BUG This is marginally invalid, for example if a patient was considered LTFU before he died
data_cohort.horizon_status[~(data_cohort['status'] == 'Followed') & (data_cohort['date_out'] > data_cohort['horizon_date'])] = 'Followed'
return data_cohort
## Transversal description only
def n_visits(data, month):
reporting_month = pd.to_datetime(data['visit_date']).dt.to_period('M')
n_vis = sum(reporting_month == month)
return n_vis
def make_report(data, reference_date, date_analysis, grace_period, horizon_time, cohort_width):
assert reference_date <= date_analysis, 'You should not analyze a period before you have the data (date of analysis is before reference date)'
if type(reference_date) is str :
reference_date = pd.to_datetime(reference_date)
if type(date_analysis) is str:
date_analysis = pd.to_datetime(date_analysis)
report_data = subset_analysis_data(data, date_analysis)
if len(report_data) > 0:
month = reference_date.to_period('M') - 1
n_visits_month = report_data.groupby('facility').apply(n_visits, month)
df_status = report_data.groupby('patient_id').apply(status_patient, reference_date, 90)
cohort_data = subset_cohort(df_status, reference_date, horizon_time, cohort_width)
# print(df_status.head())
horizon_outcome_data = horizon_outcome(cohort_data, month, 365)
transversal_reports = df_status.groupby('facility').status.value_counts()
longitudinal_reports = horizon_outcome_data.groupby('facility').status.value_counts()
out_reports = {'transversal':transversal_reports,
'longitudinal':longitudinal_reports,
'n_visits':n_visits_month}
return out_reports
# QUESTION What are the form_types
|
MikeLing/shogun
|
examples/undocumented/python/graphical/classifier_perceptron_graphical.py
|
Python
|
gpl-3.0
| 2,302 | 0.032146 |
#!/usr/bin/env python
import numpy as np
import matplotlib.pyplot as plt
import latex_plot_inits
parameter_list = [[20, 5, 1., 1000, 1, None, 5], [100, 5, 1., 1000, 1, None, 10]]
def classifier_perceptron_graphical(n=100, distance=5, learn_rate=1., max_iter=1000, num_threads=1, seed=None, nperceptrons=5):
from shogun import RealFeatures, BinaryLabels
from shogun import Perceptron
from shogun import MSG_INFO
# 2D data
_DIM = 2
# To get the nice message that the perceptron has converged
dummy = BinaryLabels()
dummy.io.set_loglev
|
el(MSG_INFO)
np.random.seed(seed)
# Produce some (probably) linearly separable training data by hand
# Two Gaussians at a far enough distance
X = np.array(np.random.randn(
|
_DIM,n))+distance
Y = np.array(np.random.randn(_DIM,n))
label_train_twoclass = np.hstack((np.ones(n), -np.ones(n)))
fm_train_real = np.hstack((X,Y))
feats_train = RealFeatures(fm_train_real)
labels = BinaryLabels(label_train_twoclass)
perceptron = Perceptron(feats_train, labels)
perceptron.set_learn_rate(learn_rate)
perceptron.set_max_iter(max_iter)
perceptron.set_initialize_hyperplane(False)
# Find limits for visualization
x_min = min(np.min(X[0,:]), np.min(Y[0,:]))
x_max = max(np.max(X[0,:]), np.max(Y[0,:]))
y_min = min(np.min(X[1,:]), np.min(Y[1,:]))
y_max = max(np.max(X[1,:]), np.max(Y[1,:]))
for i in xrange(nperceptrons):
# Initialize randomly weight vector and bias
perceptron.set_w(np.random.random(2))
perceptron.set_bias(np.random.random())
# Run the perceptron algorithm
perceptron.train()
# Construct the hyperplane for visualization
# Equation of the decision boundary is w^T x + b = 0
b = perceptron.get_bias()
w = perceptron.get_w()
hx = np.linspace(x_min-1,x_max+1)
hy = -w[1]/w[0] * hx
plt.plot(hx, -1/w[1]*(w[0]*hx+b))
# Plot the two-class data
plt.scatter(X[0,:], X[1,:], s=40, marker='o', facecolors='none', edgecolors='b')
plt.scatter(Y[0,:], Y[1,:], s=40, marker='s', facecolors='none', edgecolors='r')
# Customize the plot
plt.axis([x_min-1, x_max+1, y_min-1, y_max+1])
plt.title('Rosenblatt\'s Perceptron Algorithm')
plt.xlabel('x')
plt.ylabel('y')
plt.show()
return perceptron
if __name__=='__main__':
print('Perceptron graphical')
classifier_perceptron_graphical(*parameter_list[0])
|
spacy-io/spaCy
|
spacy/lang/ru/__init__.py
|
Python
|
mit
| 905 | 0.001105 |
from typing import Optional
from thinc.api import Model
from .stop_words import STOP_WORDS
from .tokenizer_exceptions import TOKENIZER_EXCEPTIONS
from .lex_attrs import LEX_ATTRS
from .lemmatizer import RussianLemmatizer
from ...language import Language
class RussianDefaults(Language.Defaults):
tokenizer_exceptions = TOK
|
ENIZER_EXCEPTIONS
lex_attr_getters = LEX_ATTRS
stop_words = S
|
TOP_WORDS
class Russian(Language):
lang = "ru"
Defaults = RussianDefaults
@Russian.factory(
"lemmatizer",
assigns=["token.lemma"],
default_config={"model": None, "mode": "pymorphy2", "overwrite": False},
default_score_weights={"lemma_acc": 1.0},
)
def make_lemmatizer(
nlp: Language,
model: Optional[Model],
name: str,
mode: str,
overwrite: bool,
):
return RussianLemmatizer(nlp.vocab, model, name, mode=mode, overwrite=overwrite)
__all__ = ["Russian"]
|
WindowsPhoneForensics/find_my_texts_wp8
|
find_my_texts_wp8/wp8_sms_integrated.py
|
Python
|
gpl-3.0
| 24,866 | 0.004987 |
#! /usr/bin/env python
# Python script to parse SMStext messages from a Windows 8.0 phone's store.vol file
# Author: cheeky4n6monkey@gmail.com (Adrian Leong)
#
# Special Thanks to Detective Cindy Murphy (@cindymurph) and the Madison, WI Police Department (MPD)
# for the test data and encouragement.
# Thanks also to JoAnn Gibb (Ohio Attorney Generals Office) and Brian McGarry (Garda) for providing testing
# data/feedback.
#
# WARNING: This program is provided "as-is" and has been tested with 2 types of Windows Phone 8.0
# (Nokia Lumia 520, HTC PM23300)
# See http://cheeky4n6monkey.blogspot.com/ for further details.
# Copyright (C) 2014, 2015 Adrian Leong (cheeky4n6monkey@gmail.com)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You can view the GNU General Public License at <http://www.gnu.org/licenses/>
"""
Data Notes:
===========
\\Users\WPCOMMSSERVICES\APPDATA\Local\Unistore\store.vol contains SMS text messages, contact and limited MMS
information.
\\Users\WPCOMMSSERVICES\APPDATA\Local\UserData\Phone contains call log information.
\SharedData\Comms\Unistore\data contains various .dat files for MMS messages
From analysis of MPD store.vol test data (Nokia 520 Windows 8 phone) there are two areas of focus (tables?) for SMS data
Area 1 = The "SMStext" content area. Each SMS message has its own record within this area.
Each content record seems to follow one of these structures:
[?][FILETIME1][?][FILETIME2][?][PHONE0][[1 byte]["IPM.SMStext" string][1 byte][PHONE1][1 byte][PHONE2][1 byte][PHONE3][1 byte][Received Message][?][FILETIME3][?][FILETIME4]
or
[?][FILE
|
TIME1][?][FILETIME2][?]["IPM.SMStext" string][1 byte][Sent Message][?][FILETIME3][?][FILETIME4]
? = unknown / varying number of bytes
All strings are Unicode UTF-16-LE and null terminated
FILETIMEs are 8 byte LE and record the number of 100 ns intervals since 1 JAN 1601 (ie MS FILETIME)
For MPD test data, there seems to consistently be:
0xBF bytes between FILETIME
|
2 and "SMStext" for Sent SMS (0xB7 bytes between start of "IPM.SMStext" and start of
FILETIME2)
0xEA bytes between FILETIME2 and "SMStext" for Recvd SMS (subject to length of PHONE0)
For the supplied OHIO data, There seems to consistently be:
0xB4 bytes between FILETIME2 and "SMStext" for Sent SMS
0xDF bytes between FILETIME2 and "SMStext" for Recvd SMS (subject to length of PHONE0)
CHECK YOUR DATA OFFSETS! They will probably vary between phones / data sets.
Unfortunately, sent SMS does not record the destination phone number in Area 1 records.
For these, we need to check an area of store.vol we'll call Area 2. The records in Area 2 look like:
[?][FILETIMEX][0x1B bytes]["SMS" string][1 byte][PHONEX][?]
Note: the Area 2 record formats seemed consistent between the Nokia 520 and HTC phones.
FILETIMEX value seems to correspond exactly to an Area 1 record's FILETIME2 field.
So we might be able to find out the destination number of a sent SMS by doing a search of Area2 fields for a specific
FILETIMEX value.
This seems to work well with our MPD test data.
Program Notes:
==============
Given a specified input store.vol and output TSV filename, this script will
- Search for "SMStext" entries (in Area 1 ie "Message" table) and store the sent/recvd direction, FILETIME2, Text
message, Offset of the Text Message and PHONE1.
- For any sent SMS, it will also look up the destination phone number (in Area 2 ie "Recipient" table) using
FILETIME2 / FILETIMEX as a key.
- Print out results to a nominated Tab Separated Variable file format (screen output is not typically large enough)
Known Issues:
- Offsets might have to be adjusted between phones/datasets particularly between the start of FILETIME2 and the start
of "SMStext".
This script version tries an experimental method of calculating the offset so the user doesn't have to
(theoretically).
- There may be unprintable characters in null term string fields AFTER the NULL but before the 0x1 field marker. Added
goto_next_field function to handle these.
- If the script does not detect Unicode digits 0x11 bytes before the start of "SMStext", it ass-umes that the message is
a Sent SMS (ie no numbers). This also means that SMS with one/two digit phone numbers might not be identified
correctly as received.
Change history:
v2014-08-30:
- Revised for non-printable characters appearing after the null in nullterm unicode strings but before the 0x1.
- Assumes each field is demarcated by 0x01 bytes.
- Also adjusted the max offset range for Sent SMS FILETIME2 based on test data. Increased it to 0xEA (from 0xC4).
v2014-09-01:
- Changed logic so that if we don't see Unicode digits before "SMStext", the script assumes the message is a Sent SMS
(no numbers).
- Decreased Sent SMS "find_timestamp" min parameter based on 1SEP data to x7D (from 0xAF)
v2014-09-05:
- Added trace output for when the script skips record extractions (ie when it can't find/read fields)
- Adjusted minimum "find_timestamp" parameters based on MPD log data to 0x9B for received SMS
v2014-09-29:
- Modified read_nullterm_unistring so it returns whatever valid characters it has read on a bad read exception.
Previously, it was returning an empty string. This was done to handle emoticons ...
v2014-10-05:
- Renamed script from "win8sms-ex2.py" to "wp8-find_my_texts_wp8.py"
v2015-07-10:
- Changed script to search for hex strings in chunks of CHUNK_SIZE rather than in one big read
(makes it quicker when running against whole .bin files). Thanks to Boss Rob :)
v2015-07-12:
- Removed "all_indices" function which was commented out in previous version
- Adjusted some comments
"""
import struct
import sys
import string
import datetime
import codecs
from optparse import OptionParser
import os
__author__ = 'Adrian Leong'
__version__ = "wp8_sms_integrated.py v2015-07-12(modified)"
def read_nullterm_unistring(f):
"""
Read in unicode chars one at a time until a null char ie "0x00 0x00"
Returns empty string on error otherwise it filters out return/newlines and returns the string read
:rtype :
:param f:
:type f:
:return:
:rtype:
"""
readstrg = ""
terminated_flag = True
unprintablechars = False
begin = f.tell()
while (terminated_flag):
try:
# print "char at " + hex(f.tell()).rstrip("L")
readchar = f.read(1)
if (ord(readchar) == 0): # bailout if null char
terminated_flag = False
if (terminated_flag):
if (readchar in string.printable) and (readchar != "\r") and (readchar != "\n"):
readstrg += readchar
else:
readstrg += " "
unprintablechars = True
# print "unprintable at " + hex(f.tell()-1).rstrip("L")
except (IOError, ValueError):
print ("Warning ... bad unicode string at offset " + hex(begin).rstrip("L"))
exctype, value = sys.exc_info()[:2]
print ("Exception type = ", exctype, ", value = ", value)
# readstrg = ""
return readstrg # returns partial strings
if (unprintablechars):
print ("String substitution(s) due to unrecognized/unprintable characters at " + hex(begin).rstrip("L"))
return readstrg
def read_filetime(f):
"""
Author - Adrian Leong
Read in 8 byte MS FILETIME (number of 100 ns since 1 Jan 1601) and
Returns equivalent unix epoch offset or 0 on error
:param f:
:type f:
:return:
:rtype:
"""
begin = f.tell()
try:
# print "time at offset: " + str(begin)
mstime = struct.unpack('<Q', f.read(8))[0]
except struct.error:
print ("Bad FILETIME extraction at " + hex(begin).rstrip("L"))
|
jawilson/home-assistant
|
homeassistant/components/utility_meter/__init__.py
|
Python
|
apache-2.0
| 7,390 | 0.000947 |
"""Support for tracking consumption over given periods of time."""
from datetime import timedelta
import logging
from croniter import croniter
import voluptuous as vol
from homeassistant.components.sensor import DOMAIN as SENSOR_DOMAIN
from homeassistant.const import CONF_NAME
from homeassistant.helpers import discovery
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.dispatcher import async_dispatcher_send
from homeassistant.helpers.entity_component import EntityComponent
from homeassistant.helpers.restore_state import RestoreEntity
from .const import (
ATTR_TARIFF,
CONF_CRON_PATTERN,
CONF_METER,
CONF_METER_DELTA_VALUES,
CONF
|
_METER_NET_CONSUMPTION,
CONF_METER_OFFSET,
CONF_METER_TYPE,
CONF_SOURCE_SENSOR,
CONF_TARIFF,
CONF_TARIFF_ENTITY,
CONF_TARIFFS,
DATA_TARIFF_SENSORS,
DATA_UTILITY,
DOMAIN,
METER_TYPES,
SERVICE_RESET,
SERVICE_SELECT_NEXT_TARIFF,
SERVICE_SELECT_TARIFF,
SIGNAL_RESET_METER,
)
_LOGGER = logging.getLogger(__name__)
TARIFF_ICON = "mdi:clock-outline"
ATTR_TARIFFS = "tariffs
|
"
DEFAULT_OFFSET = timedelta(hours=0)
def validate_cron_pattern(pattern):
"""Check that the pattern is well-formed."""
if croniter.is_valid(pattern):
return pattern
raise vol.Invalid("Invalid pattern")
def period_or_cron(config):
"""Check that if cron pattern is used, then meter type and offsite must be removed."""
if CONF_CRON_PATTERN in config and CONF_METER_TYPE in config:
raise vol.Invalid(f"Use <{CONF_CRON_PATTERN}> or <{CONF_METER_TYPE}>")
if (
CONF_CRON_PATTERN in config
and CONF_METER_OFFSET in config
and config[CONF_METER_OFFSET] != DEFAULT_OFFSET
):
raise vol.Invalid(
f"When <{CONF_CRON_PATTERN}> is used <{CONF_METER_OFFSET}> has no meaning"
)
return config
def max_28_days(config):
"""Check that time period does not include more then 28 days."""
if config.days >= 28:
raise vol.Invalid(
"Unsupported offset of more then 28 days, please use a cron pattern."
)
return config
METER_CONFIG_SCHEMA = vol.Schema(
vol.All(
{
vol.Required(CONF_SOURCE_SENSOR): cv.entity_id,
vol.Optional(CONF_NAME): cv.string,
vol.Optional(CONF_METER_TYPE): vol.In(METER_TYPES),
vol.Optional(CONF_METER_OFFSET, default=DEFAULT_OFFSET): vol.All(
cv.time_period, cv.positive_timedelta, max_28_days
),
vol.Optional(CONF_METER_DELTA_VALUES, default=False): cv.boolean,
vol.Optional(CONF_METER_NET_CONSUMPTION, default=False): cv.boolean,
vol.Optional(CONF_TARIFFS, default=[]): vol.All(
cv.ensure_list, [cv.string]
),
vol.Optional(CONF_CRON_PATTERN): validate_cron_pattern,
},
period_or_cron,
)
)
CONFIG_SCHEMA = vol.Schema(
{DOMAIN: vol.Schema({cv.slug: METER_CONFIG_SCHEMA})}, extra=vol.ALLOW_EXTRA
)
async def async_setup(hass, config):
"""Set up an Utility Meter."""
component = EntityComponent(_LOGGER, DOMAIN, hass)
hass.data[DATA_UTILITY] = {}
register_services = False
for meter, conf in config.get(DOMAIN).items():
_LOGGER.debug("Setup %s.%s", DOMAIN, meter)
hass.data[DATA_UTILITY][meter] = conf
hass.data[DATA_UTILITY][meter][DATA_TARIFF_SENSORS] = []
if not conf[CONF_TARIFFS]:
# only one entity is required
hass.async_create_task(
discovery.async_load_platform(
hass,
SENSOR_DOMAIN,
DOMAIN,
[{CONF_METER: meter, CONF_NAME: conf.get(CONF_NAME, meter)}],
config,
)
)
else:
# create tariff selection
await component.async_add_entities(
[TariffSelect(meter, list(conf[CONF_TARIFFS]))]
)
hass.data[DATA_UTILITY][meter][CONF_TARIFF_ENTITY] = "{}.{}".format(
DOMAIN, meter
)
# add one meter for each tariff
tariff_confs = []
for tariff in conf[CONF_TARIFFS]:
tariff_confs.append(
{
CONF_METER: meter,
CONF_NAME: f"{meter} {tariff}",
CONF_TARIFF: tariff,
}
)
hass.async_create_task(
discovery.async_load_platform(
hass, SENSOR_DOMAIN, DOMAIN, tariff_confs, config
)
)
register_services = True
if register_services:
component.async_register_entity_service(SERVICE_RESET, {}, "async_reset_meters")
component.async_register_entity_service(
SERVICE_SELECT_TARIFF,
{vol.Required(ATTR_TARIFF): cv.string},
"async_select_tariff",
)
component.async_register_entity_service(
SERVICE_SELECT_NEXT_TARIFF, {}, "async_next_tariff"
)
return True
class TariffSelect(RestoreEntity):
"""Representation of a Tariff selector."""
def __init__(self, name, tariffs):
"""Initialize a tariff selector."""
self._name = name
self._current_tariff = None
self._tariffs = tariffs
self._icon = TARIFF_ICON
async def async_added_to_hass(self):
"""Run when entity about to be added."""
await super().async_added_to_hass()
if self._current_tariff is not None:
return
state = await self.async_get_last_state()
if not state or state.state not in self._tariffs:
self._current_tariff = self._tariffs[0]
else:
self._current_tariff = state.state
@property
def should_poll(self):
"""If entity should be polled."""
return False
@property
def name(self):
"""Return the name of the select input."""
return self._name
@property
def icon(self):
"""Return the icon to be used for this entity."""
return self._icon
@property
def state(self):
"""Return the state of the component."""
return self._current_tariff
@property
def extra_state_attributes(self):
"""Return the state attributes."""
return {ATTR_TARIFFS: self._tariffs}
async def async_reset_meters(self):
"""Reset all sensors of this meter."""
_LOGGER.debug("reset meter %s", self.entity_id)
async_dispatcher_send(self.hass, SIGNAL_RESET_METER, self.entity_id)
async def async_select_tariff(self, tariff):
"""Select new option."""
if tariff not in self._tariffs:
_LOGGER.warning(
"Invalid tariff: %s (possible tariffs: %s)",
tariff,
", ".join(self._tariffs),
)
return
self._current_tariff = tariff
self.async_write_ha_state()
async def async_next_tariff(self):
"""Offset current index."""
current_index = self._tariffs.index(self._current_tariff)
new_index = (current_index + 1) % len(self._tariffs)
self._current_tariff = self._tariffs[new_index]
self.async_write_ha_state()
|
EnigmaBridge/ebstall.py
|
ebstall/osutil.py
|
Python
|
mit
| 18,396 | 0.002174 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import print_function
import collections
import logging
import os
import platform
import re
import subprocess
import types
import util
import json
from ebstall.versions import Version
from ebstall.util import normalize_string
logger = logging.getLogger(__name__)
CLI_DEFAULTS_DEFAULT = dict(
packager='source'
)
CLI_DEFAULTS_DEBIAN = dict(
packager='apt-get'
)
CLI_DEFAULTS_CENTOS = dict(
packager='yum'
)
CLI_DEFAULTS_DARWIN = dict(
packager='source'
)
FLAVORS = {
'debian': 'debian',
'ubuntu': 'debian',
'kubuntu': 'debian',
'kali': 'debian',
'centos': 'redhat',
'centos linux': 'redhat',
'fedora': 'redhat',
'red hat enterprise linux server': 'redhat',
'rhel': 'redhat',
'amazon': 'redhat',
'amzn': 'redhat',
'gentoo': 'gentoo',
'gentoo base system': 'gentoo',
'darwin': 'darwin',
'opensuse': 'suse',
'suse': 'suse',
}
CLI_DEFAULTS = {
"default": CLI_DEFAULTS_DEFAULT,
"debian": CLI_DEFAULTS_DEBIAN,
"ubuntu": CLI_DEFAULTS_DEBIAN,
"centos": CLI_DEFAULTS_CENTOS,
"centos linux": CLI_DEFAULTS_CENTOS,
"fedora": CLI_DEFAULTS_CENTOS,
"red hat enterprise linux server": CLI_DEFAULTS_CENTOS,
"rhel": CLI_DEFAULTS_CENTOS,
"amazon": CLI_DEFAULTS_CENTOS,
"amzn": CLI_DEFAULTS_CENTOS,
"gentoo": CLI_DEFAULTS_DEFAULT,
"gentoo base system": CLI_DEFAULTS_DEFAULT,
"darwin": CLI_DEFAULTS_DARWIN,
"opensuse": CLI_DEFAULTS_DEFAULT,
"suse": CLI_DEFAULTS_DEFAULT,
}
"""CLI defaults."""
# Start system
START_INITD = 'init.d'
ST
|
ART_SYSTEMD = 'systemd'
# Pkg manager
PKG_YUM = 'yum'
PKG_APT = 'apt-get'
FAMILY_REDHAT = 'redhat'
FAMILY_DEBIAN = 'debian'
# redhat / debian
YUMS = ['redhat', 'fedora', 'ce
|
ntos', 'rhel', 'amzn', 'amazon']
DEBS = ['debian', 'ubuntu', 'kali']
class OSInfo(object):
"""OS information, name, version, like - similarity"""
def __init__(self, name=None, version=None, version_major=None, like=None, family=None,
packager=None, start_system=None, has_os_release=False, fallback_detection=False, long_name=None,
*args, **kwargs):
self.name = name
self.long_name = long_name
self.version_major = version_major
self.version = version
self.like = like
self.family = family
self.packager = packager
self.start_system = start_system
self.has_os_release = has_os_release
self.fallback_detection = fallback_detection
def __str__(self):
return 'OSInfo(%r)' % json.dumps(self.to_json())
def __repr__(self):
return 'OSInfo(%r)' % json.dumps(self.to_json())
def to_json(self):
"""
Converts to the JSON
:return:
"""
js = collections.OrderedDict()
js['name'] = self.name
js['long_name'] = self.long_name
js['version_major'] = self.version_major
js['version'] = self.version
js['like'] = self.like
js['family'] = self.family
js['packager'] = self.packager
js['start_system'] = self.start_system
js['has_os_release'] = self.has_os_release
js['fallback_detection'] = self.fallback_detection
return js
class PackageInfo(object):
"""
Basic information about particular package
"""
def __init__(self, name, version, arch, repo, size=None, section=None):
self._version = None
self.name = name
self.version = version
self.arch = arch
self.repo = repo
self.size = size
self.section = section
@property
def version(self):
return self._version
@version.setter
def version(self, val):
self._version = Version(val)
def __str__(self):
return '%s-%s.%s' % (self.name, self.version, self.arch)
def __repr__(self):
return 'PackageInfo(name=%r, version=%r, arch=%r, repo=%r, size=%r, section=%r)' \
% (self.name, self.version, self.arch, self.repo, self.size, self.section)
def to_json(self):
"""
Converts to the JSON
:return:
"""
js = collections.OrderedDict()
js['name'] = self.name
js['version'] = str(self.version)
js['arch'] = self.arch
js['repo'] = self.repo
if self.size is not None:
js['size'] = self.size
if self.section is not None:
js['section'] = self.section
return js
@classmethod
def from_json(cls, js):
"""
Converts json dict to the object
:param js:
:return:
"""
obj = cls(name=js['name'], version=js['version'], arch=js['arch'], repo=js['repo'])
if 'size' in js:
obj.size = js['size']
if 'section' in js:
obj.section = js['section']
return obj
def get_os():
"""
Returns basic information about the OS.
:return: OSInfo
"""
# At first - parse os-release
ros = OSInfo()
os_release_path = '/etc/os-release'
if os.path.isfile(os_release_path):
ros.name = _get_systemd_os_release_var("ID", filepath=os_release_path)
ros.version = _get_systemd_os_release_var("VERSION_ID", filepath=os_release_path)
ros.like = _get_systemd_os_release_var("ID_LIKE", os_release_path).split(" ")
ros.long_name = _get_systemd_os_release_var("PRETTY_NAME", filepath=os_release_path)
ros.has_os_release = True
if not ros.long_name:
ros.long_name = _get_systemd_os_release_var("NAME", filepath=os_release_path)
# Try /etc/redhat-release and /etc/debian_version
if not ros.has_os_release or ros.like is None or ros.version is None or ros.name is None:
os_redhat_release(ros)
os_debian_version(ros)
os_issue(ros)
# like detection
os_like_detect(ros)
os_family_detect(ros)
# Major version
os_major_version(ros)
# Packager detection - yum / apt-get
os_packager(ros)
# Start system - init.d / systemd
os_start_system(ros)
return ros
def os_family_detect(ros):
"""
OS Family (redhat, debian, ...)
:param ros:
:return:
"""
if util.startswith(ros.like, YUMS):
ros.family = FAMILY_REDHAT
if util.startswith(ros.like, DEBS):
ros.family = FAMILY_DEBIAN
if ros.family is not None:
if sum([1 for x in YUMS if ros.name.lower().startswith(x)]) > 0:
ros.family = FAMILY_REDHAT
if sum([1 for x in DEBS if ros.name.lower().startswith(x)]) > 0:
ros.family = FAMILY_DEBIAN
return
def os_packager(ros):
if ros.like is not None:
if util.startswith(ros.like, YUMS):
ros.packager = PKG_YUM
if util.startswith(ros.like, DEBS):
ros.packager = PKG_APT
return ros
if ros.name is not None:
if sum([1 for x in YUMS if ros.name.lower().startswith(x)]) > 0:
ros.packager = PKG_YUM
if sum([1 for x in DEBS if ros.name.lower().startswith(x)]) > 0:
ros.packager = PKG_APT
return
if os.path.exists('/etc/yum'):
ros.packager = PKG_YUM
if os.path.exists('/etc/apt/sources.list'):
ros.packager = PKG_APT
def os_start_system(ros):
if os.path.exists('/etc/systemd'):
ros.start_system = START_SYSTEMD
else:
ros.start_system = START_INITD
return ros
def os_issue(ros):
if os.path.exists('/etc/issue'):
with open('/etc/issue', 'r') as fh:
issue = fh.readline().strip()
issue = re.sub(r'\\[a-z]', '', issue).strip()
match1 = re.match(r'^(.+?)\s+release\s+(.+?)$', issue, re.IGNORECASE)
match2 = re.match(r'^(.+?)\s+([0-9.]+)\s*(LTS)?$', issue, re.IGNORECASE)
if match1:
ros.long_name = match1.group(1).strip()
ros.version = match1.group(2).strip()
elif match2:
ros.long_name = match2.group(1).strip()
ros.version = match2.group(2).strip()
else:
ros.long_name = iss
|
wuga214/Django-Wuga
|
env/bin/viewer.py
|
Python
|
apache-2.0
| 1,056 | 0.000947 |
#!/Users/wuga/Documents/website/wuga/env/bin/python2.7
#
# The Python Imaging Library
# $Id$
#
from __future__ import print_function
import sys
if sys.version_info[0] > 2:
import tkinter
|
else:
import Tkinter as tkinter
from PIL import Image, ImageTk
#
# an image viewer
class UI(tkinter.Label):
def __init__(self, master, im):
if im.mode == "1":
# bitmap image
self.image = ImageTk.BitmapImage(im, foreground="wh
|
ite")
tkinter.Label.__init__(self, master, image=self.image, bd=0,
bg="black")
else:
# photo image
self.image = ImageTk.PhotoImage(im)
tkinter.Label.__init__(self, master, image=self.image, bd=0)
#
# script interface
if __name__ == "__main__":
if not sys.argv[1:]:
print("Syntax: python viewer.py imagefile")
sys.exit(1)
filename = sys.argv[1]
root = tkinter.Tk()
root.title(filename)
im = Image.open(filename)
UI(root, im).pack()
root.mainloop()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.