text
stringlengths 6
947k
| repo_name
stringlengths 5
100
| path
stringlengths 4
231
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 6
947k
| score
float64 0
0.34
|
---|---|---|---|---|---|---|
from google.appengine.ext import webapp
from google.appengine.ext.webapp.util import run_wsgi_app
from model import FreebieItem, Distributor, Contributor
import datetime
import logging
head = '''
<html>
<head>
<title>%s</title>
<script src="/static/sorttable.js"></script>
<style>
body {
background-color: #000000;
color: #FFFFFF;
}
input {
background-color: #000000;
color: #FF0000;
outline-color: #000000;
border-color: #FF0000;
}
table.sortable thead {
background-color:#202020;
color:#FFFFFF;
font-weight: bold;
cursor: default;
}
</style>
</head>
<body>
<b><a href="/freebielist/">Freebies</a> | <a href="/freebielist/distributors">Distributors</a> | <a href="/freebielist/contributors">Contributors</a></b><p>
'''
end = '''
</body>
</html>
'''
class Distributors(webapp.RequestHandler):
def get(self):
message = '''<h1>List of Distributors</h1>
<p>This lists all Distributors currently in the distribution system as of %s.</p>
<table class="sortable" border=\"1\">''' % datetime.datetime.utcnow().isoformat(' ')
message += '<tr><th>Row</th><th>Distributor</th><th>Key</th></tr><br />\n'
query = Distributor.gql("")
dists = []
for record in query:
s = '<td>%s</td><td>%s</td>\n' % (record.avname, record.avkey)
if (s in dists) == False:
dists += [s]
for i in range(0,len(dists)):
message += '<tr><td>%d</td>%s' % (i+1, dists[i])
message += "</table>"
self.response.out.write((head % 'Distributor List') + message + end)
class Contributors(webapp.RequestHandler):
def get(self):
message = '''<h1>List of Contributors</h1>
<p>This lists all Contributors currently in the distribution system as of %s.</p>
<table class="sortable" border=\"1\">''' % datetime.datetime.utcnow().isoformat(' ')
message += '<tr><th>Row</th><th>Contributor</th><th>Key</th></tr><br />\n'
query = Contributor.gql("")
dists = []
for record in query:
s = '<td>%s</td><td>%s</td>\n' % (record.avname, record.avkey)
if (s in dists) == False:
dists += [s]
for i in range(0,len(dists)):
message += '<tr><td>%d</td>%s' % (i+1, dists[i])
message += "</table>"
self.response.out.write((head % 'Contributor List') + message + end)
class MainPage(webapp.RequestHandler):
def get(self):
message = '''<h1>List of Freebie items</h1>
<p>This lists all item currently in the distribution system as of %s.</p>
<table class="sortable" border=\"1\">''' % datetime.datetime.utcnow().isoformat(' ')
message += '<tr><th>Row</th><th>Owner</th><th>Giver ID</th><th>Name</th><th>Version</th><th>Update Date</th><th>Distributor Location</th><th>Texture Key</th><th>Texture Server</th><th>Texture Updatetime</th></tr><br />\n'
query = FreebieItem.gql("")
content =[]
for record in query:
owner = record.freebie_owner
if (owner == None):
owner = '***Not assigned***'
if (record.freebie_texture_update == None):
i = -1
else:
i = record.freebie_texture_update
content += ['<td>%s</td><td>%s</td><td>%s</td><td>%s</td><td>%s</td><td>%s</td><td>%s</td><td>%s</td><td>%d</td>\n' % (owner, record.freebie_giver, record.freebie_name, record.freebie_version, record.freebie_timedate, record.freebie_location, record.freebie_texture_key, record.freebie_texture_serverkey, i)]
content = sorted(content)
for i in range(0,len(content)):
message += '<tr><td>%d</td>%s' % (i+1, content[i])
message += "</table>"
self.response.out.write((head % 'Freebie Items List') + message + end)
application = webapp.WSGIApplication([
(r'/.*?/distributors',Distributors),
(r'/.*?/contributors',Contributors),
('.*', MainPage)
],
debug=True)
def real_main():
run_wsgi_app(application)
def profile_main():
# This is the main function for profiling
# We've renamed our original main() above to real_main()
import cProfile, pstats, StringIO
prof = cProfile.Profile()
prof = prof.runctx("real_main()", globals(), locals())
stream = StringIO.StringIO()
stats = pstats.Stats(prof, stream=stream)
stats.sort_stats("time") # Or cumulative
stats.print_stats(80) # 80 = how many to print
# The rest is optional.
# stats.print_callees()
# stats.print_callers()
logging.info("Profile data:\n%s", stream.getvalue())
if __name__ == "__main__":
profile_main()
|
nirea/collardata
|
freebielist.py
|
Python
|
gpl-2.0
| 4,787 | 0.005849 |
from PyQt5.QtDesigner import *
|
ales-erjavec/anyqt
|
AnyQt/_backport/QtDesigner.py
|
Python
|
gpl-3.0
| 30 | 0.033333 |
# orm/persistence.py
# Copyright (C) 2005-2016 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""private module containing functions used to emit INSERT, UPDATE
and DELETE statements on behalf of a :class:`.Mapper` and its descending
mappers.
The functions here are called only by the unit of work functions
in unitofwork.py.
"""
import operator
from itertools import groupby, chain
from .. import sql, util, exc as sa_exc
from . import attributes, sync, exc as orm_exc, evaluator
from .base import state_str, _attr_as_key, _entity_descriptor
from ..sql import expression
from ..sql.base import _from_objects
from . import loading
def _bulk_insert(
mapper, mappings, session_transaction, isstates, return_defaults):
base_mapper = mapper.base_mapper
cached_connections = _cached_connection_dict(base_mapper)
if session_transaction.session.connection_callable:
raise NotImplementedError(
"connection_callable / per-instance sharding "
"not supported in bulk_insert()")
if isstates:
if return_defaults:
states = [(state, state.dict) for state in mappings]
mappings = [dict_ for (state, dict_) in states]
else:
mappings = [state.dict for state in mappings]
else:
mappings = list(mappings)
connection = session_transaction.connection(base_mapper)
for table, super_mapper in base_mapper._sorted_tables.items():
if not mapper.isa(super_mapper):
continue
records = (
(None, state_dict, params, mapper,
connection, value_params, has_all_pks, has_all_defaults)
for
state, state_dict, params, mp,
conn, value_params, has_all_pks,
has_all_defaults in _collect_insert_commands(table, (
(None, mapping, mapper, connection)
for mapping in mappings),
bulk=True, return_defaults=return_defaults
)
)
_emit_insert_statements(base_mapper, None,
cached_connections,
super_mapper, table, records,
bookkeeping=return_defaults)
if return_defaults and isstates:
identity_cls = mapper._identity_class
identity_props = [p.key for p in mapper._identity_key_props]
for state, dict_ in states:
state.key = (
identity_cls,
tuple([dict_[key] for key in identity_props])
)
def _bulk_update(mapper, mappings, session_transaction,
isstates, update_changed_only):
base_mapper = mapper.base_mapper
cached_connections = _cached_connection_dict(base_mapper)
def _changed_dict(mapper, state):
return dict(
(k, v)
for k, v in state.dict.items() if k in state.committed_state or k
in mapper._primary_key_propkeys
)
if isstates:
if update_changed_only:
mappings = [_changed_dict(mapper, state) for state in mappings]
else:
mappings = [state.dict for state in mappings]
else:
mappings = list(mappings)
if session_transaction.session.connection_callable:
raise NotImplementedError(
"connection_callable / per-instance sharding "
"not supported in bulk_update()")
connection = session_transaction.connection(base_mapper)
for table, super_mapper in base_mapper._sorted_tables.items():
if not mapper.isa(super_mapper):
continue
records = _collect_update_commands(None, table, (
(None, mapping, mapper, connection,
(mapping[mapper._version_id_prop.key]
if mapper._version_id_prop else None))
for mapping in mappings
), bulk=True)
_emit_update_statements(base_mapper, None,
cached_connections,
super_mapper, table, records,
bookkeeping=False)
def save_obj(
base_mapper, states, uowtransaction, single=False):
"""Issue ``INSERT`` and/or ``UPDATE`` statements for a list
of objects.
This is called within the context of a UOWTransaction during a
flush operation, given a list of states to be flushed. The
base mapper in an inheritance hierarchy handles the inserts/
updates for all descendant mappers.
"""
# if batch=false, call _save_obj separately for each object
if not single and not base_mapper.batch:
for state in _sort_states(states):
save_obj(base_mapper, [state], uowtransaction, single=True)
return
states_to_update = []
states_to_insert = []
cached_connections = _cached_connection_dict(base_mapper)
for (state, dict_, mapper, connection,
has_identity,
row_switch, update_version_id) in _organize_states_for_save(
base_mapper, states, uowtransaction
):
if has_identity or row_switch:
states_to_update.append(
(state, dict_, mapper, connection, update_version_id)
)
else:
states_to_insert.append(
(state, dict_, mapper, connection)
)
for table, mapper in base_mapper._sorted_tables.items():
if table not in mapper._pks_by_table:
continue
insert = _collect_insert_commands(table, states_to_insert)
update = _collect_update_commands(
uowtransaction, table, states_to_update)
_emit_update_statements(base_mapper, uowtransaction,
cached_connections,
mapper, table, update)
_emit_insert_statements(base_mapper, uowtransaction,
cached_connections,
mapper, table, insert)
_finalize_insert_update_commands(
base_mapper, uowtransaction,
chain(
(
(state, state_dict, mapper, connection, False)
for state, state_dict, mapper, connection in states_to_insert
),
(
(state, state_dict, mapper, connection, True)
for state, state_dict, mapper, connection,
update_version_id in states_to_update
)
)
)
def post_update(base_mapper, states, uowtransaction, post_update_cols):
"""Issue UPDATE statements on behalf of a relationship() which
specifies post_update.
"""
cached_connections = _cached_connection_dict(base_mapper)
states_to_update = list(_organize_states_for_post_update(
base_mapper,
states, uowtransaction))
for table, mapper in base_mapper._sorted_tables.items():
if table not in mapper._pks_by_table:
continue
update = (
(state, state_dict, sub_mapper, connection)
for
state, state_dict, sub_mapper, connection in states_to_update
if table in sub_mapper._pks_by_table
)
update = _collect_post_update_commands(base_mapper, uowtransaction,
table, update,
post_update_cols)
_emit_post_update_statements(base_mapper, uowtransaction,
cached_connections,
mapper, table, update)
def delete_obj(base_mapper, states, uowtransaction):
"""Issue ``DELETE`` statements for a list of objects.
This is called within the context of a UOWTransaction during a
flush operation.
"""
cached_connections = _cached_connection_dict(base_mapper)
states_to_delete = list(_organize_states_for_delete(
base_mapper,
states,
uowtransaction))
table_to_mapper = base_mapper._sorted_tables
for table in reversed(list(table_to_mapper.keys())):
mapper = table_to_mapper[table]
if table not in mapper._pks_by_table:
continue
elif mapper.inherits and mapper.passive_deletes:
continue
delete = _collect_delete_commands(base_mapper, uowtransaction,
table, states_to_delete)
_emit_delete_statements(base_mapper, uowtransaction,
cached_connections, mapper, table, delete)
for state, state_dict, mapper, connection, \
update_version_id in states_to_delete:
mapper.dispatch.after_delete(mapper, connection, state)
def _organize_states_for_save(base_mapper, states, uowtransaction):
"""Make an initial pass across a set of states for INSERT or
UPDATE.
This includes splitting out into distinct lists for
each, calling before_insert/before_update, obtaining
key information for each state including its dictionary,
mapper, the connection to use for the execution per state,
and the identity flag.
"""
for state, dict_, mapper, connection in _connections_for_states(
base_mapper, uowtransaction,
states):
has_identity = bool(state.key)
instance_key = state.key or mapper._identity_key_from_state(state)
row_switch = update_version_id = None
# call before_XXX extensions
if not has_identity:
mapper.dispatch.before_insert(mapper, connection, state)
else:
mapper.dispatch.before_update(mapper, connection, state)
if mapper._validate_polymorphic_identity:
mapper._validate_polymorphic_identity(mapper, state, dict_)
# detect if we have a "pending" instance (i.e. has
# no instance_key attached to it), and another instance
# with the same identity key already exists as persistent.
# convert to an UPDATE if so.
if not has_identity and \
instance_key in uowtransaction.session.identity_map:
instance = \
uowtransaction.session.identity_map[instance_key]
existing = attributes.instance_state(instance)
if not uowtransaction.was_already_deleted(existing):
if not uowtransaction.is_deleted(existing):
raise orm_exc.FlushError(
"New instance %s with identity key %s conflicts "
"with persistent instance %s" %
(state_str(state), instance_key,
state_str(existing)))
base_mapper._log_debug(
"detected row switch for identity %s. "
"will update %s, remove %s from "
"transaction", instance_key,
state_str(state), state_str(existing))
# remove the "delete" flag from the existing element
uowtransaction.remove_state_actions(existing)
row_switch = existing
if (has_identity or row_switch) and mapper.version_id_col is not None:
update_version_id = mapper._get_committed_state_attr_by_column(
row_switch if row_switch else state,
row_switch.dict if row_switch else dict_,
mapper.version_id_col)
yield (state, dict_, mapper, connection,
has_identity, row_switch, update_version_id)
def _organize_states_for_post_update(base_mapper, states,
uowtransaction):
"""Make an initial pass across a set of states for UPDATE
corresponding to post_update.
This includes obtaining key information for each state
including its dictionary, mapper, the connection to use for
the execution per state.
"""
return _connections_for_states(base_mapper, uowtransaction, states)
def _organize_states_for_delete(base_mapper, states, uowtransaction):
"""Make an initial pass across a set of states for DELETE.
This includes calling out before_delete and obtaining
key information for each state including its dictionary,
mapper, the connection to use for the execution per state.
"""
for state, dict_, mapper, connection in _connections_for_states(
base_mapper, uowtransaction,
states):
mapper.dispatch.before_delete(mapper, connection, state)
if mapper.version_id_col is not None:
update_version_id = \
mapper._get_committed_state_attr_by_column(
state, dict_,
mapper.version_id_col)
else:
update_version_id = None
yield (
state, dict_, mapper, connection, update_version_id)
def _collect_insert_commands(
table, states_to_insert,
bulk=False, return_defaults=False):
"""Identify sets of values to use in INSERT statements for a
list of states.
"""
for state, state_dict, mapper, connection in states_to_insert:
if table not in mapper._pks_by_table:
continue
params = {}
value_params = {}
propkey_to_col = mapper._propkey_to_col[table]
eval_none = mapper._insert_cols_evaluating_none[table]
for propkey in set(propkey_to_col).intersection(state_dict):
value = state_dict[propkey]
col = propkey_to_col[propkey]
if value is None and propkey not in eval_none:
continue
elif not bulk and isinstance(value, sql.ClauseElement):
value_params[col.key] = value
else:
params[col.key] = value
if not bulk:
for colkey in mapper._insert_cols_as_none[table].\
difference(params).difference(value_params):
params[colkey] = None
if not bulk or return_defaults:
has_all_pks = mapper._pk_keys_by_table[table].issubset(params)
if mapper.base_mapper.eager_defaults:
has_all_defaults = mapper._server_default_cols[table].\
issubset(params)
else:
has_all_defaults = True
else:
has_all_defaults = has_all_pks = True
if mapper.version_id_generator is not False \
and mapper.version_id_col is not None and \
mapper.version_id_col in mapper._cols_by_table[table]:
params[mapper.version_id_col.key] = \
mapper.version_id_generator(None)
yield (
state, state_dict, params, mapper,
connection, value_params, has_all_pks,
has_all_defaults)
def _collect_update_commands(
uowtransaction, table, states_to_update,
bulk=False):
"""Identify sets of values to use in UPDATE statements for a
list of states.
This function works intricately with the history system
to determine exactly what values should be updated
as well as how the row should be matched within an UPDATE
statement. Includes some tricky scenarios where the primary
key of an object might have been changed.
"""
for state, state_dict, mapper, connection, \
update_version_id in states_to_update:
if table not in mapper._pks_by_table:
continue
pks = mapper._pks_by_table[table]
value_params = {}
propkey_to_col = mapper._propkey_to_col[table]
if bulk:
params = dict(
(propkey_to_col[propkey].key, state_dict[propkey])
for propkey in
set(propkey_to_col).intersection(state_dict).difference(
mapper._pk_keys_by_table[table])
)
has_all_defaults = True
else:
params = {}
for propkey in set(propkey_to_col).intersection(
state.committed_state):
value = state_dict[propkey]
col = propkey_to_col[propkey]
if isinstance(value, sql.ClauseElement):
value_params[col] = value
# guard against values that generate non-__nonzero__
# objects for __eq__()
elif state.manager[propkey].impl.is_equal(
value, state.committed_state[propkey]) is not True:
params[col.key] = value
if mapper.base_mapper.eager_defaults:
has_all_defaults = mapper._server_onupdate_default_cols[table].\
issubset(params)
else:
has_all_defaults = True
if update_version_id is not None and \
mapper.version_id_col in mapper._cols_by_table[table]:
if not bulk and not (params or value_params):
# HACK: check for history in other tables, in case the
# history is only in a different table than the one
# where the version_id_col is. This logic was lost
# from 0.9 -> 1.0.0 and restored in 1.0.6.
for prop in mapper._columntoproperty.values():
history = (
state.manager[prop.key].impl.get_history(
state, state_dict,
attributes.PASSIVE_NO_INITIALIZE))
if history.added:
break
else:
# no net change, break
continue
col = mapper.version_id_col
params[col._label] = update_version_id
if (bulk or col.key not in params) and \
mapper.version_id_generator is not False:
val = mapper.version_id_generator(update_version_id)
params[col.key] = val
elif not (params or value_params):
continue
if bulk:
pk_params = dict(
(propkey_to_col[propkey]._label, state_dict.get(propkey))
for propkey in
set(propkey_to_col).
intersection(mapper._pk_keys_by_table[table])
)
else:
pk_params = {}
for col in pks:
propkey = mapper._columntoproperty[col].key
history = state.manager[propkey].impl.get_history(
state, state_dict, attributes.PASSIVE_OFF)
if history.added:
if not history.deleted or \
("pk_cascaded", state, col) in \
uowtransaction.attributes:
pk_params[col._label] = history.added[0]
params.pop(col.key, None)
else:
# else, use the old value to locate the row
pk_params[col._label] = history.deleted[0]
params[col.key] = history.added[0]
else:
pk_params[col._label] = history.unchanged[0]
if pk_params[col._label] is None:
raise orm_exc.FlushError(
"Can't update table %s using NULL for primary "
"key value on column %s" % (table, col))
if params or value_params:
params.update(pk_params)
yield (
state, state_dict, params, mapper,
connection, value_params, has_all_defaults)
def _collect_post_update_commands(base_mapper, uowtransaction, table,
states_to_update, post_update_cols):
"""Identify sets of values to use in UPDATE statements for a
list of states within a post_update operation.
"""
for state, state_dict, mapper, connection in states_to_update:
# assert table in mapper._pks_by_table
pks = mapper._pks_by_table[table]
params = {}
hasdata = False
for col in mapper._cols_by_table[table]:
if col in pks:
params[col._label] = \
mapper._get_state_attr_by_column(
state,
state_dict, col, passive=attributes.PASSIVE_OFF)
elif col in post_update_cols:
prop = mapper._columntoproperty[col]
history = state.manager[prop.key].impl.get_history(
state, state_dict,
attributes.PASSIVE_NO_INITIALIZE)
if history.added:
value = history.added[0]
params[col.key] = value
hasdata = True
if hasdata:
yield params, connection
def _collect_delete_commands(base_mapper, uowtransaction, table,
states_to_delete):
"""Identify values to use in DELETE statements for a list of
states to be deleted."""
for state, state_dict, mapper, connection, \
update_version_id in states_to_delete:
if table not in mapper._pks_by_table:
continue
params = {}
for col in mapper._pks_by_table[table]:
params[col.key] = \
value = \
mapper._get_committed_state_attr_by_column(
state, state_dict, col)
if value is None:
raise orm_exc.FlushError(
"Can't delete from table %s "
"using NULL for primary "
"key value on column %s" % (table, col))
if update_version_id is not None and \
mapper.version_id_col in mapper._cols_by_table[table]:
params[mapper.version_id_col.key] = update_version_id
yield params, connection
def _emit_update_statements(base_mapper, uowtransaction,
cached_connections, mapper, table, update,
bookkeeping=True):
"""Emit UPDATE statements corresponding to value lists collected
by _collect_update_commands()."""
needs_version_id = mapper.version_id_col is not None and \
mapper.version_id_col in mapper._cols_by_table[table]
def update_stmt():
clause = sql.and_()
for col in mapper._pks_by_table[table]:
clause.clauses.append(col == sql.bindparam(col._label,
type_=col.type))
if needs_version_id:
clause.clauses.append(
mapper.version_id_col == sql.bindparam(
mapper.version_id_col._label,
type_=mapper.version_id_col.type))
stmt = table.update(clause)
return stmt
cached_stmt = base_mapper._memo(('update', table), update_stmt)
for (connection, paramkeys, hasvalue, has_all_defaults), \
records in groupby(
update,
lambda rec: (
rec[4], # connection
set(rec[2]), # set of parameter keys
bool(rec[5]), # whether or not we have "value" parameters
rec[6] # has_all_defaults
)
):
rows = 0
records = list(records)
statement = cached_stmt
# TODO: would be super-nice to not have to determine this boolean
# inside the loop here, in the 99.9999% of the time there's only
# one connection in use
assert_singlerow = connection.dialect.supports_sane_rowcount
assert_multirow = assert_singlerow and \
connection.dialect.supports_sane_multi_rowcount
allow_multirow = has_all_defaults and not needs_version_id
if bookkeeping and not has_all_defaults and \
mapper.base_mapper.eager_defaults:
statement = statement.return_defaults()
elif mapper.version_id_col is not None:
statement = statement.return_defaults(mapper.version_id_col)
if hasvalue:
for state, state_dict, params, mapper, \
connection, value_params, has_all_defaults in records:
c = connection.execute(
statement.values(value_params),
params)
if bookkeeping:
_postfetch(
mapper,
uowtransaction,
table,
state,
state_dict,
c,
c.context.compiled_parameters[0],
value_params)
rows += c.rowcount
check_rowcount = True
else:
if not allow_multirow:
check_rowcount = assert_singlerow
for state, state_dict, params, mapper, \
connection, value_params, has_all_defaults in records:
c = cached_connections[connection].\
execute(statement, params)
# TODO: why with bookkeeping=False?
if bookkeeping:
_postfetch(
mapper,
uowtransaction,
table,
state,
state_dict,
c,
c.context.compiled_parameters[0],
value_params)
rows += c.rowcount
else:
multiparams = [rec[2] for rec in records]
check_rowcount = assert_multirow or (
assert_singlerow and
len(multiparams) == 1
)
c = cached_connections[connection].\
execute(statement, multiparams)
rows += c.rowcount
for state, state_dict, params, mapper, \
connection, value_params, has_all_defaults in records:
if bookkeeping:
_postfetch(
mapper,
uowtransaction,
table,
state,
state_dict,
c,
c.context.compiled_parameters[0],
value_params)
if check_rowcount:
if rows != len(records):
raise orm_exc.StaleDataError(
"UPDATE statement on table '%s' expected to "
"update %d row(s); %d were matched." %
(table.description, len(records), rows))
elif needs_version_id:
util.warn("Dialect %s does not support updated rowcount "
"- versioning cannot be verified." %
c.dialect.dialect_description)
def _emit_insert_statements(base_mapper, uowtransaction,
cached_connections, mapper, table, insert,
bookkeeping=True):
"""Emit INSERT statements corresponding to value lists collected
by _collect_insert_commands()."""
cached_stmt = base_mapper._memo(('insert', table), table.insert)
for (connection, pkeys, hasvalue, has_all_pks, has_all_defaults), \
records in groupby(
insert,
lambda rec: (
rec[4], # connection
set(rec[2]), # parameter keys
bool(rec[5]), # whether we have "value" parameters
rec[6],
rec[7])):
statement = cached_stmt
if not bookkeeping or \
(
has_all_defaults
or not base_mapper.eager_defaults
or not connection.dialect.implicit_returning
) and has_all_pks and not hasvalue:
records = list(records)
multiparams = [rec[2] for rec in records]
c = cached_connections[connection].\
execute(statement, multiparams)
if bookkeeping:
for (state, state_dict, params, mapper_rec,
conn, value_params, has_all_pks, has_all_defaults), \
last_inserted_params in \
zip(records, c.context.compiled_parameters):
if state:
_postfetch(
mapper_rec,
uowtransaction,
table,
state,
state_dict,
c,
last_inserted_params,
value_params)
else:
_postfetch_bulk_save(mapper_rec, state_dict, table)
else:
if not has_all_defaults and base_mapper.eager_defaults:
statement = statement.return_defaults()
elif mapper.version_id_col is not None:
statement = statement.return_defaults(mapper.version_id_col)
for state, state_dict, params, mapper_rec, \
connection, value_params, \
has_all_pks, has_all_defaults in records:
if value_params:
result = connection.execute(
statement.values(value_params),
params)
else:
result = cached_connections[connection].\
execute(statement, params)
primary_key = result.context.inserted_primary_key
if primary_key is not None:
# set primary key attributes
for pk, col in zip(primary_key,
mapper._pks_by_table[table]):
prop = mapper_rec._columntoproperty[col]
if state_dict.get(prop.key) is None:
state_dict[prop.key] = pk
if bookkeeping:
if state:
_postfetch(
mapper_rec,
uowtransaction,
table,
state,
state_dict,
result,
result.context.compiled_parameters[0],
value_params)
else:
_postfetch_bulk_save(mapper_rec, state_dict, table)
def _emit_post_update_statements(base_mapper, uowtransaction,
cached_connections, mapper, table, update):
"""Emit UPDATE statements corresponding to value lists collected
by _collect_post_update_commands()."""
def update_stmt():
clause = sql.and_()
for col in mapper._pks_by_table[table]:
clause.clauses.append(col == sql.bindparam(col._label,
type_=col.type))
return table.update(clause)
statement = base_mapper._memo(('post_update', table), update_stmt)
# execute each UPDATE in the order according to the original
# list of states to guarantee row access order, but
# also group them into common (connection, cols) sets
# to support executemany().
for key, grouper in groupby(
update, lambda rec: (
rec[1], # connection
set(rec[0]) # parameter keys
)
):
connection = key[0]
multiparams = [params for params, conn in grouper]
cached_connections[connection].\
execute(statement, multiparams)
def _emit_delete_statements(base_mapper, uowtransaction, cached_connections,
mapper, table, delete):
"""Emit DELETE statements corresponding to value lists collected
by _collect_delete_commands()."""
need_version_id = mapper.version_id_col is not None and \
mapper.version_id_col in mapper._cols_by_table[table]
def delete_stmt():
clause = sql.and_()
for col in mapper._pks_by_table[table]:
clause.clauses.append(
col == sql.bindparam(col.key, type_=col.type))
if need_version_id:
clause.clauses.append(
mapper.version_id_col ==
sql.bindparam(
mapper.version_id_col.key,
type_=mapper.version_id_col.type
)
)
return table.delete(clause)
statement = base_mapper._memo(('delete', table), delete_stmt)
for connection, recs in groupby(
delete,
lambda rec: rec[1] # connection
):
del_objects = [params for params, connection in recs]
connection = cached_connections[connection]
expected = len(del_objects)
rows_matched = -1
only_warn = False
if connection.dialect.supports_sane_multi_rowcount:
c = connection.execute(statement, del_objects)
if not need_version_id:
only_warn = True
rows_matched = c.rowcount
elif need_version_id:
if connection.dialect.supports_sane_rowcount:
rows_matched = 0
# execute deletes individually so that versioned
# rows can be verified
for params in del_objects:
c = connection.execute(statement, params)
rows_matched += c.rowcount
else:
util.warn(
"Dialect %s does not support deleted rowcount "
"- versioning cannot be verified." %
connection.dialect.dialect_description,
stacklevel=12)
connection.execute(statement, del_objects)
else:
connection.execute(statement, del_objects)
if base_mapper.confirm_deleted_rows and \
rows_matched > -1 and expected != rows_matched:
if only_warn:
util.warn(
"DELETE statement on table '%s' expected to "
"delete %d row(s); %d were matched. Please set "
"confirm_deleted_rows=False within the mapper "
"configuration to prevent this warning." %
(table.description, expected, rows_matched)
)
else:
raise orm_exc.StaleDataError(
"DELETE statement on table '%s' expected to "
"delete %d row(s); %d were matched. Please set "
"confirm_deleted_rows=False within the mapper "
"configuration to prevent this warning." %
(table.description, expected, rows_matched)
)
def _finalize_insert_update_commands(base_mapper, uowtransaction, states):
"""finalize state on states that have been inserted or updated,
including calling after_insert/after_update events.
"""
for state, state_dict, mapper, connection, has_identity in states:
if mapper._readonly_props:
readonly = state.unmodified_intersection(
[p.key for p in mapper._readonly_props
if p.expire_on_flush or p.key not in state.dict]
)
if readonly:
state._expire_attributes(state.dict, readonly)
# if eager_defaults option is enabled, load
# all expired cols. Else if we have a version_id_col, make sure
# it isn't expired.
toload_now = []
if base_mapper.eager_defaults:
toload_now.extend(state._unloaded_non_object)
elif mapper.version_id_col is not None and \
mapper.version_id_generator is False:
if mapper._version_id_prop.key in state.unloaded:
toload_now.extend([mapper._version_id_prop.key])
if toload_now:
state.key = base_mapper._identity_key_from_state(state)
loading.load_on_ident(
uowtransaction.session.query(base_mapper),
state.key, refresh_state=state,
only_load_props=toload_now)
# call after_XXX extensions
if not has_identity:
mapper.dispatch.after_insert(mapper, connection, state)
else:
mapper.dispatch.after_update(mapper, connection, state)
def _postfetch(mapper, uowtransaction, table,
state, dict_, result, params, value_params):
"""Expire attributes in need of newly persisted database state,
after an INSERT or UPDATE statement has proceeded for that
state."""
prefetch_cols = result.context.compiled.prefetch
postfetch_cols = result.context.compiled.postfetch
returning_cols = result.context.compiled.returning
if mapper.version_id_col is not None and \
mapper.version_id_col in mapper._cols_by_table[table]:
prefetch_cols = list(prefetch_cols) + [mapper.version_id_col]
refresh_flush = bool(mapper.class_manager.dispatch.refresh_flush)
if refresh_flush:
load_evt_attrs = []
if returning_cols:
row = result.context.returned_defaults
if row is not None:
for col in returning_cols:
if col.primary_key:
continue
dict_[mapper._columntoproperty[col].key] = row[col]
if refresh_flush:
load_evt_attrs.append(mapper._columntoproperty[col].key)
for c in prefetch_cols:
if c.key in params and c in mapper._columntoproperty:
dict_[mapper._columntoproperty[c].key] = params[c.key]
if refresh_flush:
load_evt_attrs.append(mapper._columntoproperty[c].key)
if refresh_flush and load_evt_attrs:
mapper.class_manager.dispatch.refresh_flush(
state, uowtransaction, load_evt_attrs)
if postfetch_cols:
state._expire_attributes(state.dict,
[mapper._columntoproperty[c].key
for c in postfetch_cols if c in
mapper._columntoproperty]
)
# synchronize newly inserted ids from one table to the next
# TODO: this still goes a little too often. would be nice to
# have definitive list of "columns that changed" here
for m, equated_pairs in mapper._table_to_equated[table]:
sync.populate(state, m, state, m,
equated_pairs,
uowtransaction,
mapper.passive_updates)
def _postfetch_bulk_save(mapper, dict_, table):
for m, equated_pairs in mapper._table_to_equated[table]:
sync.bulk_populate_inherit_keys(dict_, m, equated_pairs)
def _connections_for_states(base_mapper, uowtransaction, states):
"""Return an iterator of (state, state.dict, mapper, connection).
The states are sorted according to _sort_states, then paired
with the connection they should be using for the given
unit of work transaction.
"""
# if session has a connection callable,
# organize individual states with the connection
# to use for update
if uowtransaction.session.connection_callable:
connection_callable = \
uowtransaction.session.connection_callable
else:
connection = uowtransaction.transaction.connection(base_mapper)
connection_callable = None
for state in _sort_states(states):
if connection_callable:
connection = connection_callable(base_mapper, state.obj())
mapper = state.manager.mapper
yield state, state.dict, mapper, connection
def _cached_connection_dict(base_mapper):
# dictionary of connection->connection_with_cache_options.
return util.PopulateDict(
lambda conn: conn.execution_options(
compiled_cache=base_mapper._compiled_cache
))
def _sort_states(states):
pending = set(states)
persistent = set(s for s in pending if s.key is not None)
pending.difference_update(persistent)
return sorted(pending, key=operator.attrgetter("insert_order")) + \
sorted(persistent, key=lambda q: q.key[1])
class BulkUD(object):
"""Handle bulk update and deletes via a :class:`.Query`."""
def __init__(self, query):
self.query = query.enable_eagerloads(False)
self.mapper = self.query._bind_mapper()
self._validate_query_state()
def _validate_query_state(self):
for attr, methname, notset, op in (
('_limit', 'limit()', None, operator.is_),
('_offset', 'offset()', None, operator.is_),
('_order_by', 'order_by()', False, operator.is_),
('_group_by', 'group_by()', False, operator.is_),
('_distinct', 'distinct()', False, operator.is_),
(
'_from_obj',
'join(), outerjoin(), select_from(), or from_self()',
(), operator.eq)
):
if not op(getattr(self.query, attr), notset):
raise sa_exc.InvalidRequestError(
"Can't call Query.update() or Query.delete() "
"when %s has been called" %
(methname, )
)
@property
def session(self):
return self.query.session
@classmethod
def _factory(cls, lookup, synchronize_session, *arg):
try:
klass = lookup[synchronize_session]
except KeyError:
raise sa_exc.ArgumentError(
"Valid strategies for session synchronization "
"are %s" % (", ".join(sorted(repr(x)
for x in lookup))))
else:
return klass(*arg)
def exec_(self):
self._do_pre()
self._do_pre_synchronize()
self._do_exec()
self._do_post_synchronize()
self._do_post()
@util.dependencies("sqlalchemy.orm.query")
def _do_pre(self, querylib):
query = self.query
self.context = querylib.QueryContext(query)
if isinstance(query._entities[0], querylib._ColumnEntity):
# check for special case of query(table)
tables = set()
for ent in query._entities:
if not isinstance(ent, querylib._ColumnEntity):
tables.clear()
break
else:
tables.update(_from_objects(ent.column))
if len(tables) != 1:
raise sa_exc.InvalidRequestError(
"This operation requires only one Table or "
"entity be specified as the target."
)
else:
self.primary_table = tables.pop()
else:
self.primary_table = query._only_entity_zero(
"This operation requires only one Table or "
"entity be specified as the target."
).mapper.local_table
session = query.session
if query._autoflush:
session._autoflush()
def _do_pre_synchronize(self):
pass
def _do_post_synchronize(self):
pass
class BulkEvaluate(BulkUD):
"""BulkUD which does the 'evaluate' method of session state resolution."""
def _additional_evaluators(self, evaluator_compiler):
pass
def _do_pre_synchronize(self):
query = self.query
target_cls = query._mapper_zero().class_
try:
evaluator_compiler = evaluator.EvaluatorCompiler(target_cls)
if query.whereclause is not None:
eval_condition = evaluator_compiler.process(
query.whereclause)
else:
def eval_condition(obj):
return True
self._additional_evaluators(evaluator_compiler)
except evaluator.UnevaluatableError:
raise sa_exc.InvalidRequestError(
"Could not evaluate current criteria in Python. "
"Specify 'fetch' or False for the "
"synchronize_session parameter.")
# TODO: detect when the where clause is a trivial primary key match
self.matched_objects = [
obj for (cls, pk), obj in
query.session.identity_map.items()
if issubclass(cls, target_cls) and
eval_condition(obj)]
class BulkFetch(BulkUD):
"""BulkUD which does the 'fetch' method of session state resolution."""
def _do_pre_synchronize(self):
query = self.query
session = query.session
context = query._compile_context()
select_stmt = context.statement.with_only_columns(
self.primary_table.primary_key)
self.matched_rows = session.execute(
select_stmt,
mapper=self.mapper,
params=query._params).fetchall()
class BulkUpdate(BulkUD):
"""BulkUD which handles UPDATEs."""
def __init__(self, query, values, update_kwargs):
super(BulkUpdate, self).__init__(query)
self.values = values
self.update_kwargs = update_kwargs
@classmethod
def factory(cls, query, synchronize_session, values, update_kwargs):
return BulkUD._factory({
"evaluate": BulkUpdateEvaluate,
"fetch": BulkUpdateFetch,
False: BulkUpdate
}, synchronize_session, query, values, update_kwargs)
def _resolve_string_to_expr(self, key):
if self.mapper and isinstance(key, util.string_types):
attr = _entity_descriptor(self.mapper, key)
return attr.__clause_element__()
else:
return key
def _resolve_key_to_attrname(self, key):
if self.mapper and isinstance(key, util.string_types):
attr = _entity_descriptor(self.mapper, key)
return attr.property.key
elif isinstance(key, attributes.InstrumentedAttribute):
return key.key
elif hasattr(key, '__clause_element__'):
key = key.__clause_element__()
if self.mapper and isinstance(key, expression.ColumnElement):
try:
attr = self.mapper._columntoproperty[key]
except orm_exc.UnmappedColumnError:
return None
else:
return attr.key
else:
raise sa_exc.InvalidRequestError(
"Invalid expression type: %r" % key)
def _do_exec(self):
values = [
(self._resolve_string_to_expr(k), v)
for k, v in (
self.values.items() if hasattr(self.values, 'items')
else self.values)
]
if not self.update_kwargs.get('preserve_parameter_order', False):
values = dict(values)
update_stmt = sql.update(self.primary_table,
self.context.whereclause, values,
**self.update_kwargs)
self.result = self.query.session.execute(
update_stmt, params=self.query._params,
mapper=self.mapper)
self.rowcount = self.result.rowcount
def _do_post(self):
session = self.query.session
session.dispatch.after_bulk_update(self)
class BulkDelete(BulkUD):
"""BulkUD which handles DELETEs."""
def __init__(self, query):
super(BulkDelete, self).__init__(query)
@classmethod
def factory(cls, query, synchronize_session):
return BulkUD._factory({
"evaluate": BulkDeleteEvaluate,
"fetch": BulkDeleteFetch,
False: BulkDelete
}, synchronize_session, query)
def _do_exec(self):
delete_stmt = sql.delete(self.primary_table,
self.context.whereclause)
self.result = self.query.session.execute(
delete_stmt,
params=self.query._params,
mapper=self.mapper)
self.rowcount = self.result.rowcount
def _do_post(self):
session = self.query.session
session.dispatch.after_bulk_delete(self)
class BulkUpdateEvaluate(BulkEvaluate, BulkUpdate):
"""BulkUD which handles UPDATEs using the "evaluate"
method of session resolution."""
def _additional_evaluators(self, evaluator_compiler):
self.value_evaluators = {}
values = (self.values.items() if hasattr(self.values, 'items')
else self.values)
for key, value in values:
key = self._resolve_key_to_attrname(key)
if key is not None:
self.value_evaluators[key] = evaluator_compiler.process(
expression._literal_as_binds(value))
def _do_post_synchronize(self):
session = self.query.session
states = set()
evaluated_keys = list(self.value_evaluators.keys())
for obj in self.matched_objects:
state, dict_ = attributes.instance_state(obj),\
attributes.instance_dict(obj)
# only evaluate unmodified attributes
to_evaluate = state.unmodified.intersection(
evaluated_keys)
for key in to_evaluate:
dict_[key] = self.value_evaluators[key](obj)
state._commit(dict_, list(to_evaluate))
# expire attributes with pending changes
# (there was no autoflush, so they are overwritten)
state._expire_attributes(dict_,
set(evaluated_keys).
difference(to_evaluate))
states.add(state)
session._register_altered(states)
class BulkDeleteEvaluate(BulkEvaluate, BulkDelete):
"""BulkUD which handles DELETEs using the "evaluate"
method of session resolution."""
def _do_post_synchronize(self):
self.query.session._remove_newly_deleted(
[attributes.instance_state(obj)
for obj in self.matched_objects])
class BulkUpdateFetch(BulkFetch, BulkUpdate):
"""BulkUD which handles UPDATEs using the "fetch"
method of session resolution."""
def _do_post_synchronize(self):
session = self.query.session
target_mapper = self.query._mapper_zero()
states = set([
attributes.instance_state(session.identity_map[identity_key])
for identity_key in [
target_mapper.identity_key_from_primary_key(
list(primary_key))
for primary_key in self.matched_rows
]
if identity_key in session.identity_map
])
attrib = [_attr_as_key(k) for k in self.values]
for state in states:
session._expire_state(state, attrib)
session._register_altered(states)
class BulkDeleteFetch(BulkFetch, BulkDelete):
"""BulkUD which handles DELETEs using the "fetch"
method of session resolution."""
def _do_post_synchronize(self):
session = self.query.session
target_mapper = self.query._mapper_zero()
for primary_key in self.matched_rows:
# TODO: inline this and call remove_newly_deleted
# once
identity_key = target_mapper.identity_key_from_primary_key(
list(primary_key))
if identity_key in session.identity_map:
session._remove_newly_deleted(
[attributes.instance_state(
session.identity_map[identity_key]
)]
)
|
sandan/sqlalchemy
|
lib/sqlalchemy/orm/persistence.py
|
Python
|
mit
| 51,786 | 0.000019 |
#!/usr/bin/env python
import yaml
import pwd
import sys
import subprocess
import json
import os
__author__ = "Anoop P Alias"
__copyright__ = "Copyright Anoop P Alias"
__license__ = "GPL"
__email__ = "anoopalias01@gmail.com"
installation_path = "/opt/nDeploy" # Absolute Installation Path
if __name__ == "__main__":
# This script is mostly intended to be called from a cronjob
conf_list = os.listdir("/opt/nDeploy/hhvm.d")
for filename in conf_list:
cpaneluser, extension = filename.split('.')
# if user is not in /etc/passwd we dont proceed any further
try:
pwd.getpwnam(cpaneluser)
except KeyError:
sys.exit(0)
else:
# Update the userdata cache
subprocess.Popen(['/scripts/updateuserdatacache', '--force', cpaneluser], shell=True)
# Try loading the main userdata cache file
cpuserdatajson = "/var/cpanel/userdata/" + cpaneluser + "/main.cache"
with open(cpuserdatajson) as cpaneluser_data_stream:
json_parsed_cpaneluser = json.load(cpaneluser_data_stream)
main_domain = json_parsed_cpaneluser.get('main_domain')
# parked_domains = yaml_parsed_cpaneluser.get('parked_domains') # This data is irrelevant as parked domain list is in ServerAlias
# addon_domains_dict = json_parsed_cpaneluser.get('addon_domains') # So we know which addon is mapped to which sub-domain
sub_domains = json_parsed_cpaneluser.get('sub_domains')
# Since we have all domains now..check XtendWeb domain-data files for HHVM enabled
# Turn off HHVM if no domain using HHVM
hhvm_flag = False
with open(installation_path + "/domain-data/" + main_domain, 'r') as domain_data_stream:
yaml_parsed_domain_data = yaml.safe_load(domain_data_stream)
backend_category = yaml_parsed_domain_data.get('backend_category', None)
if backend_category == 'HHVM':
hhvm_flag = True
for the_sub_domain in sub_domains:
if the_sub_domain.startswith("*"):
subdom_config_dom = "_wildcard_."+the_sub_domain.replace('*.', '')
else:
subdom_config_dom = the_sub_domain
with open(installation_path + "/domain-data/" + subdom_config_dom, 'r') as domain_data_stream:
yaml_parsed_domain_data = yaml.safe_load(domain_data_stream)
backend_category = yaml_parsed_domain_data.get('backend_category', None)
if backend_category == 'HHVM':
hhvm_flag = True
if hhvm_flag is False:
# This means none of the domain has HHVM enabled and we can shut down HHVM for the user
subprocess.call(['systemctl', 'stop', 'ndeploy_hhvm@'+cpaneluser+'.service'])
subprocess.call(['systemctl', 'disable', 'ndeploy_hhvm@'+cpaneluser+'.service'])
if os.path.isfile(installation_path+"/conf/ndeploy_cluster.yaml"):
subprocess.call('ansible -i /opt/nDeploy/conf/nDeploy-cluster/hosts ndeployslaves -m systemd -a "name=ndeploy_hhvm@'+cpaneluser+'.service state=stopped enabled=no"', shell=True)
|
AnoopAlias/XtendWeb
|
scripts/hhvm_ghost_hunter.py
|
Python
|
gpl-3.0
| 3,290 | 0.004863 |
class Corpus:
"""Interface for corpus
"""
def __init__(self):
pass
|
rain1024/underthesea
|
underthesea/corpus/corpus.py
|
Python
|
gpl-3.0
| 87 | 0 |
import py
from rpython.rtyper.lltypesystem import lltype, llmemory, llarena
from rpython.memory.gc.incminimark import IncrementalMiniMarkGC, WORD
from test_direct import BaseDirectGCTest
T = lltype.GcForwardReference()
T.become(lltype.GcStruct('pinning_test_struct2',
('someInt', lltype.Signed)))
S = lltype.GcForwardReference()
S.become(lltype.GcStruct('pinning_test_struct1',
('someInt', lltype.Signed),
('next', lltype.Ptr(T)),
('data', lltype.Ptr(T))))
class PinningGCTest(BaseDirectGCTest):
def setup_method(self, meth):
BaseDirectGCTest.setup_method(self, meth)
max = getattr(meth, 'max_number_of_pinned_objects', 20)
self.gc.max_number_of_pinned_objects = max
def test_pin_can_move(self):
# even a pinned object is considered to be movable. Only the caller
# of pin() knows if it is currently movable or not.
ptr = self.malloc(T)
adr = llmemory.cast_ptr_to_adr(ptr)
assert self.gc.can_move(adr)
assert self.gc.pin(adr)
assert self.gc.can_move(adr)
def test_pin_twice(self):
ptr = self.malloc(T)
adr = llmemory.cast_ptr_to_adr(ptr)
assert self.gc.pin(adr)
assert not self.gc.pin(adr)
def test_unpin_not_pinned(self):
# this test checks a requirement of the unpin() interface
ptr = self.malloc(S)
py.test.raises(Exception,
self.gc.unpin, llmemory.cast_ptr_to_adr(ptr))
def test__is_pinned(self):
ptr = self.malloc(T)
adr = llmemory.cast_ptr_to_adr(ptr)
assert not self.gc._is_pinned(adr)
assert self.gc.pin(adr)
assert self.gc._is_pinned(adr)
self.gc.unpin(adr)
assert not self.gc._is_pinned(adr)
def test_prebuilt_not_pinnable(self):
ptr = lltype.malloc(T, immortal=True)
self.consider_constant(ptr)
assert not self.gc.pin(llmemory.cast_ptr_to_adr(ptr))
self.gc.collect()
assert not self.gc.pin(llmemory.cast_ptr_to_adr(ptr))
# XXX test with multiple mallocs, and only part of them is pinned
def test_random(self):
# scenario: create bunch of objects. randomly pin, unpin, add to
# stackroots and remove from stackroots.
import random
for i in xrange(10**3):
obj = self.malloc(T)
obj.someInt = 100
#
if random.random() < 0.5:
self.stackroots.append(obj)
print("+stack")
if random.random() < 0.5:
self.gc.pin(llmemory.cast_ptr_to_adr(obj))
print("+pin")
self.gc.debug_gc_step(random.randint(1, 4))
for o in self.stackroots[:]:
assert o.someInt == 100
o_adr = llmemory.cast_ptr_to_adr(o)
if random.random() < 0.1 and self.gc._is_pinned(o_adr):
print("-pin")
self.gc.unpin(o_adr)
if random.random() < 0.1:
print("-stack")
self.stackroots.remove(o)
class TestIncminimark(PinningGCTest):
from rpython.memory.gc.incminimark import IncrementalMiniMarkGC as GCClass
from rpython.memory.gc.incminimark import STATE_SCANNING, STATE_MARKING
def test_try_pin_gcref_containing_type(self):
# scenario: incminimark's object pinning can't pin objects that may
# contain GC pointers
obj = self.malloc(S)
assert not self.gc.pin(llmemory.cast_ptr_to_adr(obj))
def test_pin_old(self):
# scenario: try pinning an old object. This should be not possible and
# we want to make sure everything stays as it is.
old_ptr = self.malloc(S)
old_ptr.someInt = 900
self.stackroots.append(old_ptr)
assert self.stackroots[0] == old_ptr # test assumption
self.gc.collect()
old_ptr = self.stackroots[0]
# now we try to pin it
old_adr = llmemory.cast_ptr_to_adr(old_ptr)
assert not self.gc.is_in_nursery(old_adr)
assert not self.gc.pin(old_adr)
assert self.gc.pinned_objects_in_nursery == 0
def pin_pin_pinned_object_count(self, collect_func):
# scenario: pin two objects that are referenced from stackroots. Check
# if the pinned objects count is correct, even after an other collection
pinned1_ptr = self.malloc(T)
pinned1_ptr.someInt = 100
self.stackroots.append(pinned1_ptr)
#
pinned2_ptr = self.malloc(T)
pinned2_ptr.someInt = 200
self.stackroots.append(pinned2_ptr)
#
assert self.gc.pin(llmemory.cast_ptr_to_adr(pinned1_ptr))
assert self.gc.pinned_objects_in_nursery == 1
assert self.gc.pin(llmemory.cast_ptr_to_adr(pinned2_ptr))
assert self.gc.pinned_objects_in_nursery == 2
#
collect_func()
#
assert self.gc.pinned_objects_in_nursery == 2
def test_pin_pin_pinned_object_count_minor_collection(self):
self.pin_pin_pinned_object_count(self.gc.minor_collection)
def test_pin_pin_pinned_object_count_major_collection(self):
self.pin_pin_pinned_object_count(self.gc.collect)
def pin_unpin_pinned_object_count(self, collect_func):
# scenario: pin an object and check the pinned object count. Unpin it
# and check the count again.
pinned_ptr = self.malloc(T)
pinned_ptr.someInt = 100
self.stackroots.append(pinned_ptr)
pinned_adr = llmemory.cast_ptr_to_adr(pinned_ptr)
#
assert self.gc.pinned_objects_in_nursery == 0
assert self.gc.pin(pinned_adr)
assert self.gc.pinned_objects_in_nursery == 1
collect_func()
assert self.gc.pinned_objects_in_nursery == 1
self.gc.unpin(pinned_adr)
assert self.gc.pinned_objects_in_nursery == 0
collect_func()
assert self.gc.pinned_objects_in_nursery == 0
def test_pin_unpin_pinned_object_count_minor_collection(self):
self.pin_unpin_pinned_object_count(self.gc.minor_collection)
def test_pin_unpin_pinned_object_count_major_collection(self):
self.pin_unpin_pinned_object_count(self.gc.collect)
def pinned_obj_in_stackroot(self, collect_func):
# scenario: a pinned object that is part of the stack roots. Check if
# it is not moved
#
ptr = self.malloc(T)
ptr.someInt = 100
self.stackroots.append(ptr)
assert self.stackroots[0] == ptr # validate our assumption
adr = llmemory.cast_ptr_to_adr(ptr)
assert self.gc.is_in_nursery(adr) # to be sure
assert self.gc.pin(adr)
#
# the object shouldn't move from now on
collect_func()
#
# check if it is still at the same location as expected
adr_after_collect = llmemory.cast_ptr_to_adr(self.stackroots[0])
assert self.gc.is_in_nursery(adr_after_collect)
assert adr == adr_after_collect
assert self.gc._is_pinned(adr)
assert ptr.someInt == 100
assert self.gc.pinned_objects_in_nursery == 1
def test_pinned_obj_in_stackroot_minor_collection(self):
self.pinned_obj_in_stackroot(self.gc.minor_collection)
def test_pinned_obj_in_stackroot_full_major_collection(self):
self.pinned_obj_in_stackroot(self.gc.collect)
def test_pinned_obj_in_stackroots_stepwise_major_collection(self):
# scenario: same as for 'pinned_obj_in_stackroot' with minor change
# that we do stepwise major collection and check in each step for
# a correct state
#
ptr = self.malloc(T)
ptr.someInt = 100
self.stackroots.append(ptr)
assert self.stackroots[0] == ptr # validate our assumption
adr = llmemory.cast_ptr_to_adr(ptr)
assert self.gc.is_in_nursery(adr)
assert self.gc.pin(adr)
#
# the object shouldn't move from now on. Do a full round of major
# steps and check each time for correct state
#
# check that we start at the expected point
assert self.gc.gc_state == self.STATE_SCANNING
done = False
while not done:
self.gc.debug_gc_step()
# check that the pinned object didn't move
ptr_after_collection = self.stackroots[0]
adr_after_collection = llmemory.cast_ptr_to_adr(ptr_after_collection)
assert self.gc.is_in_nursery(adr_after_collection)
assert adr == adr_after_collection
assert self.gc._is_pinned(adr)
assert ptr.someInt == 100
assert self.gc.pinned_objects_in_nursery == 1
# as the object is referenced from the stackroots, the gc internal
# 'old_objects_pointing_to_pinned' should be empty
assert not self.gc.old_objects_pointing_to_pinned.non_empty()
#
# break condition
done = self.gc.gc_state == self.STATE_SCANNING
def pin_unpin_moved_stackroot(self, collect_func):
# scenario: test if the pinned object is moved after being unpinned.
# the second part of the scenario is the tested one. The first part
# is already tests by other tests.
ptr = self.malloc(T)
ptr.someInt = 100
self.stackroots.append(ptr)
assert self.stackroots[0] == ptr # validate our assumption
adr = llmemory.cast_ptr_to_adr(ptr)
assert self.gc.pin(adr)
collect_func()
#
# from here on the test really starts. previouse logic is already tested
#
self.gc.unpin(adr)
assert not self.gc._is_pinned(adr)
assert self.gc.is_in_nursery(adr)
#
# now we do another collection and the object should be moved out of
# the nursery.
collect_func()
new_adr = llmemory.cast_ptr_to_adr(self.stackroots[0])
assert not self.gc.is_in_nursery(new_adr)
assert self.stackroots[0].someInt == 100
with py.test.raises(RuntimeError) as exinfo:
ptr.someInt = 200
assert "freed" in str(exinfo.value)
def test_pin_unpin_moved_stackroot_minor_collection(self):
self.pin_unpin_moved_stackroot(self.gc.minor_collection)
def test_pin_unpin_moved_stackroot_major_collection(self):
self.pin_unpin_moved_stackroot(self.gc.collect)
def pin_referenced_from_old(self, collect_func):
# scenario: an old object points to a pinned one. Check if the pinned
# object is correctly kept in the nursery and not moved.
#
# create old object
old_ptr = self.malloc(S)
old_ptr.someInt = 900
self.stackroots.append(old_ptr)
assert self.stackroots[0] == old_ptr # validate our assumption
collect_func() # make it old: move it out of the nursery
old_ptr = self.stackroots[0]
assert not self.gc.is_in_nursery(llmemory.cast_ptr_to_adr(old_ptr))
#
# create young pinned one and let the old one reference the young one
pinned_ptr = self.malloc(T)
pinned_ptr.someInt = 100
self.write(old_ptr, 'next', pinned_ptr)
pinned_adr = llmemory.cast_ptr_to_adr(pinned_ptr)
assert self.gc.pin(pinned_adr)
assert self.gc.is_in_nursery(pinned_adr)
assert old_ptr.next.someInt == 100
assert self.gc.pinned_objects_in_nursery == 1
#
# do a collection run and make sure the pinned one didn't move
collect_func()
assert old_ptr.next.someInt == pinned_ptr.someInt == 100
assert llmemory.cast_ptr_to_adr(old_ptr.next) == pinned_adr
assert self.gc.is_in_nursery(pinned_adr)
def test_pin_referenced_from_old_minor_collection(self):
self.pin_referenced_from_old(self.gc.minor_collection)
def test_pin_referenced_from_old_major_collection(self):
self.pin_referenced_from_old(self.gc.collect)
def test_pin_referenced_from_old_stepwise_major_collection(self):
# scenario: same as in 'pin_referenced_from_old'. However,
# this time we do a major collection step by step and check
# between steps that the states are as expected.
#
# create old object
old_ptr = self.malloc(S)
old_ptr.someInt = 900
self.stackroots.append(old_ptr)
assert self.stackroots[0] == old_ptr # validate our assumption
self.gc.minor_collection() # make it old: move it out of the nursery
old_ptr = self.stackroots[0]
old_adr = llmemory.cast_ptr_to_adr(old_ptr)
assert not self.gc.is_in_nursery(old_adr)
#
# create young pinned one and let the old one reference the young one
pinned_ptr = self.malloc(T)
pinned_ptr.someInt = 100
self.write(old_ptr, 'next', pinned_ptr)
pinned_adr = llmemory.cast_ptr_to_adr(pinned_ptr)
assert self.gc.pin(pinned_adr)
assert self.gc.is_in_nursery(pinned_adr)
assert old_ptr.next.someInt == 100
assert self.gc.pinned_objects_in_nursery == 1
#
# stepwise major collection with validation between steps
# check that we start at the expected point
assert self.gc.gc_state == self.STATE_SCANNING
done = False
while not done:
self.gc.debug_gc_step()
#
# make sure pinned object didn't move
assert old_ptr.next.someInt == pinned_ptr.someInt == 100
assert llmemory.cast_ptr_to_adr(old_ptr.next) == pinned_adr
assert self.gc.is_in_nursery(pinned_adr)
assert self.gc.pinned_objects_in_nursery == 1
#
# validate that the old object is part of the internal list
# 'old_objects_pointing_to_pinned' as expected.
should_be_old_adr = self.gc.old_objects_pointing_to_pinned.pop()
assert should_be_old_adr == old_adr
self.gc.old_objects_pointing_to_pinned.append(should_be_old_adr)
#
# break condition
done = self.gc.gc_state == self.STATE_SCANNING
def pin_referenced_from_old_remove_ref(self, collect_func):
# scenario: an old object points to a pinned one. We remove the
# reference from the old one. So nothing points to the pinned object.
# After this the pinned object should be collected (it's dead).
#
# Create the objects and get them to our initial state (this is not
# tested here, should be already tested by other tests)
old_ptr = self.malloc(S)
old_ptr.someInt = 900
self.stackroots.append(old_ptr)
assert self.stackroots[0] == old_ptr # check assumption
collect_func() # make it old
old_ptr = self.stackroots[0]
#
pinned_ptr = self.malloc(T)
pinned_ptr.someInt = 100
self.write(old_ptr, 'next', pinned_ptr)
pinned_adr = llmemory.cast_ptr_to_adr(pinned_ptr)
assert self.gc.pin(pinned_adr)
#
collect_func()
# from here on we have our initial state for this test.
#
# first check some basic assumptions.
assert self.gc.is_in_nursery(pinned_adr)
assert self.gc._is_pinned(pinned_adr)
# remove the reference
self.write(old_ptr, 'next', lltype.nullptr(T))
# from now on the pinned object is dead. Do a collection and make sure
# old object still there and the pinned one is gone.
collect_func()
assert self.stackroots[0].someInt == 900
assert not self.gc.old_objects_pointing_to_pinned.non_empty()
with py.test.raises(RuntimeError) as exinfo:
pinned_ptr.someInt = 200
assert "freed" in str(exinfo.value)
def test_pin_referenced_from_old_remove_ref_minor_collection(self):
self.pin_referenced_from_old_remove_ref(self.gc.minor_collection)
def test_pin_referenced_from_old_remove_ref_major_collection(self):
self.pin_referenced_from_old_remove_ref(self.gc.collect)
def pin_referenced_from_old_remove_old(self, collect_func):
# scenario: an old object referenced a pinned object. After removing
# the stackroot reference to the old object, bot objects (old and pinned)
# must be collected.
# This test is important as we expect not reachable pinned objects to
# be collected. At the same time we have an internal list of objects
# pointing to pinned ones and we must make sure that because of it the
# old/pinned object survive.
#
# create the objects and get them to the initial state for this test.
# Everything on the way to the initial state should be covered by
# other tests.
old_ptr = self.malloc(S)
old_ptr.someInt = 900
self.stackroots.append(old_ptr)
collect_func()
old_ptr = self.stackroots[0]
#
pinned_ptr = self.malloc(T)
pinned_ptr.someInt = 100
self.write(old_ptr, 'next', pinned_ptr)
assert self.gc.pin(llmemory.cast_ptr_to_adr(pinned_ptr))
#
collect_func()
#
# now we have our initial state: old object referenced from stackroots.
# Old object referencing a young pinned one. Next step is to make some
# basic checks that we got the expected state.
assert not self.gc.is_in_nursery(llmemory.cast_ptr_to_adr(old_ptr))
assert self.gc.is_in_nursery(llmemory.cast_ptr_to_adr(pinned_ptr))
assert pinned_ptr == old_ptr.next
#
# now we remove the old object from the stackroots...
self.stackroots.remove(old_ptr)
# ... and do a major collection (otherwise the old object wouldn't be
# gone).
self.gc.collect()
# check that both objects are gone
assert not self.gc.old_objects_pointing_to_pinned.non_empty()
with py.test.raises(RuntimeError) as exinfo_old:
old_ptr.someInt = 800
assert "freed" in str(exinfo_old.value)
#
with py.test.raises(RuntimeError) as exinfo_pinned:
pinned_ptr.someInt = 200
assert "freed" in str(exinfo_pinned.value)
def test_pin_referenced_from_old_remove_old_minor_collection(self):
self.pin_referenced_from_old_remove_old(self.gc.minor_collection)
def test_pin_referenced_from_old_remove_old_major_collection(self):
self.pin_referenced_from_old_remove_old(self.gc.collect)
def pin_referenced_from_young_in_stackroots(self, collect_func):
# scenario: a young object is referenced from the stackroots. This
# young object points to a young pinned object. We check if everything
# behaves as expected after a collection: the young object is moved out
# of the nursery while the pinned one stays where it is.
#
root_ptr = self.malloc(S)
root_ptr.someInt = 900
self.stackroots.append(root_ptr)
assert self.stackroots[0] == root_ptr # validate assumption
#
pinned_ptr = self.malloc(T)
pinned_ptr.someInt = 100
self.write(root_ptr, 'next', pinned_ptr)
pinned_adr = llmemory.cast_ptr_to_adr(pinned_ptr)
assert self.gc.pin(pinned_adr)
# check both are in nursery
assert self.gc.is_in_nursery(llmemory.cast_ptr_to_adr(root_ptr))
assert self.gc.is_in_nursery(pinned_adr)
#
# no old object yet pointing to a pinned one
assert not self.gc.old_objects_pointing_to_pinned.non_empty()
#
# now we do a collection and check if the result is as expected
collect_func()
#
# check if objects are where we expect them
root_ptr = self.stackroots[0]
assert not self.gc.is_in_nursery(llmemory.cast_ptr_to_adr(root_ptr))
assert self.gc.is_in_nursery(pinned_adr)
# and as 'root_ptr' object is now old, it should be tracked specially
should_be_root_adr = self.gc.old_objects_pointing_to_pinned.pop()
assert should_be_root_adr == llmemory.cast_ptr_to_adr(root_ptr)
self.gc.old_objects_pointing_to_pinned.append(should_be_root_adr)
# check that old object still points to the pinned one as expected
assert root_ptr.next == pinned_ptr
def test_pin_referenced_from_young_in_stackroots_minor_collection(self):
self.pin_referenced_from_young_in_stackroots(self.gc.minor_collection)
def test_pin_referenced_from_young_in_stackroots_major_collection(self):
self.pin_referenced_from_young_in_stackroots(self.gc.collect)
def pin_referenced_from_prebuilt(self, collect_func):
# scenario: a prebuilt object points to a pinned object. Check if the
# pinned object doesn't move and is still accessible.
#
prebuilt_ptr = lltype.malloc(S, immortal=True)
prebuilt_ptr.someInt = 900
self.consider_constant(prebuilt_ptr)
prebuilt_adr = llmemory.cast_ptr_to_adr(prebuilt_ptr)
collect_func()
#
pinned_ptr = self.malloc(T)
pinned_ptr.someInt = 100
self.write(prebuilt_ptr, 'next', pinned_ptr)
pinned_adr = llmemory.cast_ptr_to_adr(pinned_ptr)
assert self.gc.pin(pinned_adr)
#
# check if everything is as expected
assert not self.gc.is_in_nursery(prebuilt_adr)
assert self.gc.is_in_nursery(pinned_adr)
assert pinned_ptr == prebuilt_ptr.next
assert pinned_ptr.someInt == 100
#
# do a collection and check again
collect_func()
assert self.gc.is_in_nursery(pinned_adr)
assert pinned_ptr == prebuilt_ptr.next
assert pinned_ptr.someInt == 100
def test_pin_referenced_from_prebuilt_minor_collection(self):
self.pin_referenced_from_prebuilt(self.gc.minor_collection)
def test_pin_referenced_from_prebuilt_major_collection(self):
self.pin_referenced_from_prebuilt(self.gc.collect)
def test_old_objects_pointing_to_pinned_not_exploading(self):
# scenario: two old object, each pointing twice to a pinned object.
# The internal 'old_objects_pointing_to_pinned' should contain
# always two objects.
# In previous implementation the list exploded (grew with every minor
# collection), hence this test.
old1_ptr = self.malloc(S)
old1_ptr.someInt = 900
self.stackroots.append(old1_ptr)
old2_ptr = self.malloc(S)
old2_ptr.someInt = 800
self.stackroots.append(old2_ptr)
pinned_ptr = self.malloc(T)
pinned_ptr.someInt = 100
assert self.gc.pin(llmemory.cast_ptr_to_adr(pinned_ptr))
self.write(old1_ptr, 'next', pinned_ptr)
self.write(old1_ptr, 'data', pinned_ptr)
self.write(old2_ptr, 'next', pinned_ptr)
self.write(old2_ptr, 'data', pinned_ptr)
self.gc.collect()
old1_ptr = self.stackroots[0]
old2_ptr = self.stackroots[1]
assert not self.gc.is_in_nursery(llmemory.cast_ptr_to_adr(old1_ptr))
assert not self.gc.is_in_nursery(llmemory.cast_ptr_to_adr(old2_ptr))
# do multiple rounds to make sure
for _ in range(10):
assert self.gc.old_objects_pointing_to_pinned.length() == 2
self.gc.debug_gc_step()
def pin_shadow_1(self, collect_func):
ptr = self.malloc(T)
adr = llmemory.cast_ptr_to_adr(ptr)
self.stackroots.append(ptr)
ptr.someInt = 100
assert self.gc.pin(adr)
self.gc.id(ptr) # allocate shadow
collect_func()
assert self.gc.is_in_nursery(adr)
assert ptr.someInt == 100
self.gc.unpin(adr)
collect_func() # move to shadow
adr = llmemory.cast_ptr_to_adr(self.stackroots[0])
assert not self.gc.is_in_nursery(adr)
def test_pin_shadow_1_minor_collection(self):
self.pin_shadow_1(self.gc.minor_collection)
def test_pin_shadow_1_major_collection(self):
self.pin_shadow_1(self.gc.collect)
def test_malloc_different_types(self):
# scenario: malloc two objects of different type and pin them. Do a
# minor and major collection in between. This test showed a bug that was
# present in a previous implementation of pinning.
obj1 = self.malloc(T)
self.stackroots.append(obj1)
assert self.gc.pin(llmemory.cast_ptr_to_adr(obj1))
#
self.gc.collect()
#
obj2 = self.malloc(T)
self.stackroots.append(obj2)
assert self.gc.pin(llmemory.cast_ptr_to_adr(obj2))
def test_objects_to_trace_bug(self):
# scenario: In a previous implementation there was a bug because of a
# dead pointer inside 'objects_to_trace'. This was caused by the first
# major collection step that added the pointer to the list and right
# after the collection step the object is unpinned and freed by the minor
# collection, leaving a dead pointer in the list.
pinned_ptr = self.malloc(T)
pinned_ptr.someInt = 101
self.stackroots.append(pinned_ptr)
pinned_adr = llmemory.cast_ptr_to_adr(pinned_ptr)
assert self.gc.pin(pinned_adr)
self.gc.debug_gc_step()
self.gc.unpin(pinned_adr)
self.gc.debug_gc_step()
def pin_shadow_2(self, collect_func):
ptr = self.malloc(T)
adr = llmemory.cast_ptr_to_adr(ptr)
self.stackroots.append(ptr)
ptr.someInt = 100
assert self.gc.pin(adr)
self.gc.identityhash(ptr) # allocate shadow
collect_func()
assert self.gc.is_in_nursery(adr)
assert ptr.someInt == 100
self.gc.unpin(adr)
collect_func() # move to shadow
adr = llmemory.cast_ptr_to_adr(self.stackroots[0])
assert not self.gc.is_in_nursery(adr)
def test_pin_shadow_2_minor_collection(self):
self.pin_shadow_2(self.gc.minor_collection)
def test_pin_shadow_2_major_collection(self):
self.pin_shadow_2(self.gc.collect)
def test_pin_nursery_top_scenario1(self):
ptr1 = self.malloc(T)
adr1 = llmemory.cast_ptr_to_adr(ptr1)
ptr1.someInt = 101
self.stackroots.append(ptr1)
assert self.gc.pin(adr1)
ptr2 = self.malloc(T)
adr2 = llmemory.cast_ptr_to_adr(ptr2)
ptr2.someInt = 102
self.stackroots.append(ptr2)
assert self.gc.pin(adr2)
ptr3 = self.malloc(T)
adr3 = llmemory.cast_ptr_to_adr(ptr3)
ptr3.someInt = 103
self.stackroots.append(ptr3)
assert self.gc.pin(adr3)
# scenario: no minor collection happened, only three mallocs
# and pins
#
# +- nursery
# |
# v
# +--------+--------+--------+---------------------...---+
# | pinned | pinned | pinned | empty |
# +--------+--------+--------+---------------------...---+
# ^ ^
# | |
# nursery_free -+ |
# nursery_top -+
#
assert adr3 < self.gc.nursery_free
assert self.gc.nursery_free < self.gc.nursery_top
def test_pin_nursery_top_scenario2(self):
ptr1 = self.malloc(T)
adr1 = llmemory.cast_ptr_to_adr(ptr1)
ptr1.someInt = 101
self.stackroots.append(ptr1)
assert self.gc.pin(adr1)
ptr2 = self.malloc(T)
adr2 = llmemory.cast_ptr_to_adr(ptr2)
ptr2.someInt = 102
self.stackroots.append(ptr2)
assert self.gc.pin(adr2)
ptr3 = self.malloc(T)
adr3 = llmemory.cast_ptr_to_adr(ptr3)
ptr3.someInt = 103
self.stackroots.append(ptr3)
assert self.gc.pin(adr3)
# scenario: after first GC minor collection
#
# +- nursery
# |
# v
# +--------+--------+--------+---------------------...---+
# | pinned | pinned | pinned | empty |
# +--------+--------+--------+---------------------...---+
# ^
# |
# +- nursery_free
# +- nursery_top
#
self.gc.collect()
assert self.gc.nursery_free == self.gc.nursery_top
assert self.gc.nursery_top == self.gc.nursery
assert self.gc.nursery_top < adr3
def test_pin_nursery_top_scenario3(self):
ptr1 = self.malloc(T)
adr1 = llmemory.cast_ptr_to_adr(ptr1)
ptr1.someInt = 101
self.stackroots.append(ptr1)
assert self.gc.pin(adr1)
ptr2 = self.malloc(T)
adr2 = llmemory.cast_ptr_to_adr(ptr2)
ptr2.someInt = 102
self.stackroots.append(ptr2)
assert self.gc.pin(adr2)
ptr3 = self.malloc(T)
adr3 = llmemory.cast_ptr_to_adr(ptr3)
ptr3.someInt = 103
self.stackroots.append(ptr3)
assert self.gc.pin(adr3)
# scenario: after unpinning first object and a minor
# collection
#
# +- nursery
# |
# v
# +--------+--------+--------+---------------------...---+
# | empty | pinned | pinned | empty |
# +--------+--------+--------+---------------------...---+
# ^ ^
# | |
# | +- nursery_top
# +- nursery_free
#
self.gc.unpin(adr1)
self.gc.collect()
assert self.gc.nursery_free == self.gc.nursery
assert self.gc.nursery_top > self.gc.nursery_free
assert self.gc.nursery_top < adr2
def test_pin_nursery_top_scenario4(self):
ptr1 = self.malloc(T)
adr1 = llmemory.cast_ptr_to_adr(ptr1)
ptr1.someInt = 101
self.stackroots.append(ptr1)
assert self.gc.pin(adr1)
ptr2 = self.malloc(T)
adr2 = llmemory.cast_ptr_to_adr(ptr2)
ptr2.someInt = 102
self.stackroots.append(ptr2)
assert self.gc.pin(adr2)
ptr3 = self.malloc(T)
adr3 = llmemory.cast_ptr_to_adr(ptr3)
ptr3.someInt = 103
self.stackroots.append(ptr3)
assert self.gc.pin(adr3)
# scenario: after unpinning first & second object and a minor
# collection
#
# +- nursery
# |
# v
# +-----------------+--------+---------------------...---+
# | empty | pinned | empty |
# +-----------------+--------+---------------------...---+
# ^ ^
# | |
# | +- nursery_top
# +- nursery_free
#
self.gc.unpin(adr1)
self.gc.unpin(adr2)
self.gc.collect()
assert self.gc.nursery_free == self.gc.nursery
assert self.gc.nursery_free < self.gc.nursery_top
assert self.gc.nursery_top < adr3
def test_pin_nursery_top_scenario5(self):
ptr1 = self.malloc(T)
adr1 = llmemory.cast_ptr_to_adr(ptr1)
ptr1.someInt = 101
self.stackroots.append(ptr1)
assert self.gc.pin(adr1)
ptr2 = self.malloc(T)
adr2 = llmemory.cast_ptr_to_adr(ptr2)
ptr2.someInt = 102
self.stackroots.append(ptr2)
assert self.gc.pin(adr2)
ptr3 = self.malloc(T)
adr3 = llmemory.cast_ptr_to_adr(ptr3)
ptr3.someInt = 103
self.stackroots.append(ptr3)
assert self.gc.pin(adr3)
# scenario: no minor collection happened, only three mallocs
# and pins
#
# +- nursery
# |
# v
# +--------+--------+--------+---------------------...---+
# | pinned | pinned | pinned | empty |
# +--------+--------+--------+---------------------...---+
# ^ ^
# | |
# nursery_free -+ |
# nursery_top -+
#
assert adr3 < self.gc.nursery_free
assert self.gc.nursery_free < self.gc.nursery_top
# scenario: unpin everything and minor collection
#
# +- nursery
# |
# v
# +----------------------------------+-------------...---+
# | reset arena | empty (not reset) |
# +----------------------------------+-------------...---+
# ^ ^
# | |
# +- nursery_free |
# nursery_top -+
#
self.gc.unpin(adr1)
self.gc.unpin(adr2)
self.gc.unpin(adr3)
self.gc.collect()
assert self.gc.nursery_free == self.gc.nursery
assert self.gc.nursery_top > self.gc.nursery_free
def fill_nursery_with_pinned_objects(self):
typeid = self.get_type_id(T)
size = self.gc.fixed_size(typeid) + self.gc.gcheaderbuilder.size_gc_header
raw_size = llmemory.raw_malloc_usage(size)
object_mallocs = self.gc.nursery_size // raw_size
for instance_nr in xrange(object_mallocs):
ptr = self.malloc(T)
adr = llmemory.cast_ptr_to_adr(ptr)
ptr.someInt = 100 + instance_nr
self.stackroots.append(ptr)
self.gc.pin(adr)
def test_full_pinned_nursery_pin_fail(self):
self.fill_nursery_with_pinned_objects()
# nursery should be full now, at least no space for another `T`.
# Next malloc should fail.
py.test.raises(Exception, self.malloc, T)
def test_full_pinned_nursery_arena_reset(self):
# there were some bugs regarding the 'arena_reset()' calls at
# the end of the minor collection. This test brought them to light.
self.fill_nursery_with_pinned_objects()
self.gc.collect()
def test_pinning_limit(self):
assert self.gc.max_number_of_pinned_objects == 5
for instance_nr in xrange(self.gc.max_number_of_pinned_objects):
ptr = self.malloc(T)
adr = llmemory.cast_ptr_to_adr(ptr)
ptr.someInt = 100 + instance_nr
self.stackroots.append(ptr)
assert self.gc.pin(adr)
#
# now we reached the maximum amount of pinned objects
ptr = self.malloc(T)
adr = llmemory.cast_ptr_to_adr(ptr)
self.stackroots.append(ptr)
assert not self.gc.pin(adr)
test_pinning_limit.max_number_of_pinned_objects = 5
def test_full_pinned_nursery_pin_fail(self):
typeid = self.get_type_id(T)
size = self.gc.fixed_size(typeid) + self.gc.gcheaderbuilder.size_gc_header
raw_size = llmemory.raw_malloc_usage(size)
object_mallocs = self.gc.nursery_size // raw_size
# just to be sure we do not run into the limit as we test not the limiter
# but rather the case of a nursery full with pinned objects.
assert object_mallocs < self.gc.max_number_of_pinned_objects
for instance_nr in xrange(object_mallocs):
ptr = self.malloc(T)
adr = llmemory.cast_ptr_to_adr(ptr)
ptr.someInt = 100 + instance_nr
self.stackroots.append(ptr)
self.gc.pin(adr)
#
# nursery should be full now, at least no space for another `T`.
# Next malloc should fail.
py.test.raises(Exception, self.malloc, T)
test_full_pinned_nursery_pin_fail.max_number_of_pinned_objects = 50
def test_pin_bug1(self):
#
# * the nursery contains a pinned object 'ptr1'
#
# * outside the nursery is another object 'ptr2' pointing to 'ptr1'
#
# * during one incremental tracing step, we see 'ptr2' but don't
# trace 'ptr1' right now: it is left behind on the trace-me-later
# list
#
# * then we run the program, unpin 'ptr1', and remove it from 'ptr2'
#
# * at the next minor collection, we free 'ptr1' because we don't
# find anything pointing to it (it is removed from 'ptr2'),
# but 'ptr1' is still in the trace-me-later list
#
# * the trace-me-later list is deep enough that 'ptr1' is not
# seen right now! it is only seen at some later minor collection
#
# * at that later point, crash, because 'ptr1' in the nursery was
# overwritten
#
ptr2 = self.malloc(S)
ptr2.someInt = 102
self.stackroots.append(ptr2)
self.gc.collect()
ptr2 = self.stackroots[-1] # now outside the nursery
adr2 = llmemory.cast_ptr_to_adr(ptr2)
ptr1 = self.malloc(T)
adr1 = llmemory.cast_ptr_to_adr(ptr1)
ptr1.someInt = 101
self.write(ptr2, 'data', ptr1)
res = self.gc.pin(adr1)
assert res
self.gc.minor_collection()
assert self.gc.gc_state == self.STATE_SCANNING
self.gc.major_collection_step()
assert self.gc.objects_to_trace.tolist() == [adr2]
assert self.gc.more_objects_to_trace.tolist() == []
self.gc.TEST_VISIT_SINGLE_STEP = True
self.gc.minor_collection()
assert self.gc.gc_state == self.STATE_MARKING
self.gc.major_collection_step()
assert self.gc.objects_to_trace.tolist() == []
assert self.gc.more_objects_to_trace.tolist() == [adr2]
self.write(ptr2, 'data', lltype.nullptr(T))
self.gc.unpin(adr1)
assert ptr1.someInt == 101
self.gc.minor_collection() # should free 'ptr1'
py.test.raises(RuntimeError, "ptr1.someInt")
assert self.gc.gc_state == self.STATE_MARKING
self.gc.major_collection_step() # should not crash reading 'ptr1'!
del self.gc.TEST_VISIT_SINGLE_STEP
|
jptomo/rpython-lang-scheme
|
rpython/memory/gc/test/test_object_pinning.py
|
Python
|
mit
| 38,271 | 0.001698 |
from couchpotato.core.event import addEvent, fireEvent
from couchpotato.core.logger import CPLog
from couchpotato.core.plugins.base import Plugin
from couchpotato.environment import Env
log = CPLog(__name__)
class Automation(Plugin):
def __init__(self):
addEvent('app.load', self.setCrons)
if not Env.get('dev'):
addEvent('app.load', self.addMovies)
addEvent('setting.save.automation.hour.after', self.setCrons)
def setCrons(self):
fireEvent('schedule.interval', 'automation.add_movies', self.addMovies, hours = self.conf('hour', default = 12))
def addMovies(self):
movies = fireEvent('automation.get_movies', merge = True)
movie_ids = []
for imdb_id in movies:
prop_name = 'automation.added.%s' % imdb_id
added = Env.prop(prop_name, default = False)
if not added:
added_movie = fireEvent('movie.add', params = {'identifier': imdb_id}, force_readd = False, search_after = False, update_library = True, single = True)
if added_movie:
movie_ids.append(added_movie['id'])
Env.prop(prop_name, True)
for movie_id in movie_ids:
movie_dict = fireEvent('movie.get', movie_id, single = True)
fireEvent('searcher.single', movie_dict)
|
coolbombom/CouchPotatoServer
|
couchpotato/core/plugins/automation/main.py
|
Python
|
gpl-3.0
| 1,352 | 0.016272 |
#!/usr/bin/env python
# Standard packages
import sys
import cyvcf2
import argparse
import geneimpacts
from cyvcf2 import VCF
def get_effects(variant, annotation_keys):
effects = []
effects += [geneimpacts.SnpEff(e, annotation_keys) for e in variant.INFO.get("ANN").split(",")]
return effects
def get_top_impact(effects):
top_impact = geneimpacts.Effect.top_severity(effects)
if isinstance(top_impact, list):
top_impact = top_impact[0]
return top_impact
def get_genes(effects):
genes_list = []
for effect in effects:
if effect.gene not in genes_list:
genes_list.append(effect.gene)
return genes_list
def get_transcript_effects(effects):
transcript_effects = dict()
for effect in effects:
if effect.transcript is not None:
transcript_effects[effect.transcript] = "{biotype}|{effect}".format(biotype=effect.biotype,
effect=effect.impact_severity)
return transcript_effects
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('-a', '--annotated_vcf', help="snpEff annotated VCF file to scan")
parser.add_argument('-o', '--output', help="File for output information")
args = parser.parse_args()
sys.stdout.write("Parsing VCFAnno VCF with CyVCF2\n")
reader = cyvcf2.VCFReader(args.annotated_vcf)
desc = reader["ANN"]["Description"]
annotation_keys = [x.strip("\"'") for x in re.split("\s*\|\s*", desc.split(":", 1)[1].strip('" '))]
sys.stdout.write("Parsing VCFAnno VCF\n")
vcf = VCF(args.annotated_vcf)
for variant in vcf:
effects = get_effects(variant, annotation_keys)
top_impact = get_top_impact(effects)
gene_effects = dict()
for effect in effects:
if effect.gene not in gene_effects.keys():
if effect.transcript is not None:
|
GastonLab/ddb-scripts
|
specialist/scan_multi-gene_annotated_snpEff.py
|
Python
|
mit
| 1,956 | 0.00409 |
#
# Copyright (C) 2013 Stanislav Bohm
#
# This file is part of Kaira.
#
# Kaira is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, version 3 of the License, or
# (at your option) any later version.
#
# Kaira is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Kaira. If not, see <http://www.gnu.org/licenses/>.
#
import utils
def all_free_variables(edges):
return utils.unions(edges, lambda edge: edge.get_free_vars())
def get_variable_sources(inscriptions):
sources = {}
for inscription in inscriptions:
if not inscription.is_expr_variable():
continue
if sources.get(inscription.expr):
continue
if inscription.is_bulk():
sources[inscription.expr] = None
else:
sources[inscription.expr] = inscription.uid
return sources
def is_dependant(inscription1, inscription2):
if inscription1.edge is inscription2.edge and \
inscription2.index < inscription1.index:
return True
if not inscription2.is_expr_variable():
return False
return inscription2.expr in inscription1.get_foreign_variables()
def analyze_transition(tr):
variable_sources = {} # string -> uid - which inscriptions carry input variables
reuse_tokens = {} # uid -> uid - identification number of token for output inscpription
fresh_tokens = [] # (uid, type) - what tokens has to be created for output
used_tokens = [] # [uid] - Tokens from input inscriptions that are reused on output
variable_sources_out = {} # string -> uid or None
bulk_overtake = [] # [uid]
overtaken_variables = set()
def inscription_out_weight(inscription):
# Reorder edges, bulk edges first because we want them send first
# Otherwise it can cause problems like in sending results in "workers" example
s = inscription.config.get("seq")
if s is None:
seq = 0
else:
seq = int(s) * 3
if inscription.is_bulk():
return seq
# Unconditional edges has higher priority
if inscription.is_conditioned():
return seq + 2
else:
return seq + 1
def inscription_in_weight(inscription):
if inscription.is_conditioned():
return 1
else:
return 0
inscriptions_in = sum((edge.inscriptions for edge in tr.edges_in), [])
inscriptions_in.sort(key=inscription_in_weight)
inscriptions_out = sum((edge.inscriptions for edge in tr.edges_out), [])
inscriptions_out.sort(key=inscription_out_weight)
variable_sources = get_variable_sources(inscriptions_in)
# Order input inscriptions by variable dependancy
inscriptions_in = utils.topological_ordering(inscriptions_in, is_dependant)
if inscriptions_in is None:
raise utils.PtpException("Circle variable dependancy", tr.get_source())
# Try reuse tokens
for inscription in inscriptions_out:
if inscription.is_bulk() or not inscription.is_local():
continue # Bulk and nonlocal edge cannot use token reusage
if not inscription.is_expr_variable():
continue # Current implementation reuses tokens only for variable expression
if inscription.is_collective():
continue # Collective operations cannot use token reusage
token_uid = variable_sources.get(inscription.expr)
if token_uid is None or token_uid in used_tokens:
# Variable is not taken from input as token
# or token is already reused --> reusage not possible
continue
reuse_tokens[inscription.uid] = token_uid
used_tokens.append(token_uid)
# Setup fresh variables where token was not reused
for inscription in inscriptions_out:
if not inscription.is_expr_variable():
continue # We are interested only in variables
variable = inscription.expr
if variable in variable_sources:
# Variable take from input so we do not have to deal here with it
continue
if variable in variable_sources_out:
# Variable already prepared for output
continue
if inscription.is_bulk():
# No token, just build variable
variable_sources_out[variable] = None
continue
if inscription.is_local():
# Local send, we prepare token
fresh_tokens.append((inscription.uid, inscription.edge.place.type))
variable_sources_out[variable] = inscription.uid
reuse_tokens[inscription.uid] = inscription.uid # Use this fresh new token
else:
# Just create variable
variable_sources_out[variable] = None
for inscription in reversed(inscriptions_out):
# Now we are checking overtake. It has to be in reversed order
# becacase overtake has to be the last operation on variable
if not inscription.is_bulk() or not inscription.is_expr_variable():
continue # We are interested only in variables and bulk inscriptions
if inscription.expr not in overtaken_variables:
overtaken_variables.add(inscription.expr)
bulk_overtake.append(inscription.uid)
for inscription in inscriptions_out:
for variable in inscription.get_other_variables():
if variable not in variable_sources and \
variable not in variable_sources_out:
variable_sources_out[variable] = None
tr.inscriptions_in = inscriptions_in
tr.inscriptions_out = inscriptions_out
tr.variable_sources = variable_sources
tr.reuse_tokens = reuse_tokens
tr.variable_sources_out = variable_sources_out
tr.fresh_tokens = fresh_tokens
tr.bulk_overtake = bulk_overtake
|
MrPablozOne/kaira
|
ptp/base/analysis.py
|
Python
|
gpl-3.0
| 6,178 | 0.003561 |
import mock
from olympia.amo.tests import addon_factory, TestCase, user_factory
from olympia.ratings.models import Rating
from olympia.ratings.tasks import addon_rating_aggregates
class TestAddonRatingAggregates(TestCase):
# Prevent <Rating>.refresh() from being fired when setting up test data,
# since it'd call addon_rating_aggregates too early.
@mock.patch.object(Rating, 'refresh', lambda x, update_denorm=False: None)
def test_addon_rating_aggregates(self):
addon = addon_factory()
addon2 = addon_factory()
# Add a purely unlisted add-on. It should not be considered when
# calculating bayesian rating for the other add-ons.
addon3 = addon_factory(total_ratings=3, average_rating=4)
self.make_addon_unlisted(addon3)
# Create a few ratings with various scores.
user = user_factory()
# Add an old rating that should not be used to calculate the average,
# because the same user posts a new one right after that.
old_rating = Rating.objects.create(
addon=addon, rating=1, user=user, is_latest=False, body=u'old')
new_rating = Rating.objects.create(addon=addon, rating=3, user=user,
body=u'new')
Rating.objects.create(addon=addon, rating=3, user=user_factory(),
body=u'foo')
Rating.objects.create(addon=addon, rating=2, user=user_factory())
Rating.objects.create(addon=addon, rating=1, user=user_factory())
# On another addon as well.
Rating.objects.create(addon=addon2, rating=1, user=user_factory())
Rating.objects.create(addon=addon2, rating=1, user=user_factory(),
body=u'two')
# addon_rating_aggregates should ignore replies, so let's add one.
Rating.objects.create(
addon=addon, rating=5, user=user_factory(), reply_to=new_rating)
# Make sure old_review is considered old, new_review considered new.
old_rating.reload()
new_rating.reload()
assert old_rating.is_latest is False
assert new_rating.is_latest is True
# Make sure total_ratings hasn't been updated yet (because we are
# mocking Rating.refresh()).
addon.reload()
addon2.reload()
assert addon.total_ratings == 0
assert addon2.total_ratings == 0
assert addon.bayesian_rating == 0
assert addon.average_rating == 0
assert addon2.bayesian_rating == 0
assert addon2.average_rating == 0
assert addon.text_ratings_count == 0
assert addon2.text_ratings_count == 0
# Trigger the task and test results.
addon_rating_aggregates([addon.pk, addon2.pk])
addon.reload()
addon2.reload()
assert addon.total_ratings == 4
assert addon2.total_ratings == 2
assert addon.bayesian_rating == 1.9821428571428572
assert addon.average_rating == 2.25
assert addon2.bayesian_rating == 1.375
assert addon2.average_rating == 1.0
assert addon.text_ratings_count == 2
assert addon2.text_ratings_count == 1
# Trigger the task with a single add-on.
Rating.objects.create(addon=addon2, rating=5, user=user_factory(),
body=u'xxx')
addon2.reload()
assert addon2.total_ratings == 2
addon_rating_aggregates(addon2.pk)
addon2.reload()
assert addon2.total_ratings == 3
assert addon2.text_ratings_count == 2
assert addon.bayesian_rating == 1.9821428571428572
assert addon.average_rating == 2.25
assert addon2.bayesian_rating == 1.97915
assert addon2.average_rating == 2.3333
|
tsl143/addons-server
|
src/olympia/ratings/tests/test_tasks.py
|
Python
|
bsd-3-clause
| 3,772 | 0 |
#!/usr/bin/env python
# -*- mode: python; coding: utf-8 -*-
##################################################################
# Documentation
##################################################################
# Imports
from __future__ import absolute_import, unicode_literals, print_function
try:
from cPickle import dump, load
except ImportError:
from _pickle import dump, load
from collections import Counter
from copy import deepcopy
from keras.callbacks import EarlyStopping, ModelCheckpoint, TensorBoard
from keras.layers.embeddings import Embedding
from keras.models import load_model
from keras.preprocessing.sequence import pad_sequences
from keras.regularizers import l2
from keras.utils import to_categorical
from six import iteritems
from sklearn.utils.class_weight import compute_class_weight
from tempfile import mkstemp
import abc
import numpy as np
import os
from cgsa.base import BaseAnalyzer
from cgsa.utils.common import LOGGER, is_relevant, normlex
from .layers import CUSTOM_OBJECTS, DFLT_INITIALIZER, EMPTY_IDX, UNK_IDX
from .layers.word2vec import Word2Vec
from .utils import ModelMGPU, N_GPUS
##################################################################
# Variables and Constants
# default dimensionality for task-specific vectors
DFLT_VDIM = 100
DFLT_N_EPOCHS = 24 # 24
EMPTY_TOK = "%EMPTY%"
UNK_TOK = "%UNK%"
DICT_OFFSET = 1
UNK_PROB = 1e-4
L2_COEFF = 1e-4
EMB_INDICES_NAME = "embedding_indices"
# LBA Results for Different Optimizers:
# sgd: Macro: 10.33%; Micro: 36.2623%;
# rmsprop: Macro: 30.84%; Micro: 44.5902%;
# adagrad: Macro: 35.45%; Micro: 61.5738%;
# adadelta: 30.84%; Micro: 44.5902%;
# adam: Macro: 30.84%; Micro: 44.5902%;
# nadam: 30.84%; Micro: 44.5902%;
DFLT_TRAIN_PARAMS = {"optimizer": "adagrad",
"metrics": ["categorical_accuracy"],
"loss": "categorical_hinge"}
##################################################################
# Methods
##################################################################
# Class
class DLBaseAnalyzer(BaseAnalyzer):
"""Class for DeepLearning-based sentiment analysis.
Attributes:
"""
def __init__(self, w2v=False, lstsq=False, embeddings=None, **kwargs):
"""Class constructor.
Args:
w2v (bool): use word2vec embeddings
lstsq (bool): use the least squares method
embeddings (cgsa.utils.word2vec.Word2Vec or None): pretrained
embeddings
"""
super(DLBaseAnalyzer, self).__init__()
self.name = "DLBaseAnalyzer"
# boolean flags indicating whether to use external embeddings
self._w2v = w2v
self._lstsq = lstsq
# actual external embeddings
self._embeddings = embeddings
# mapping from words to their embedding indices in `self._embs` or
# `self.W_EMB`
self._w2i = {EMPTY_TOK: EMPTY_IDX, UNK_TOK: UNK_IDX}
self._pad_value = EMPTY_IDX
# mapping from words to their embeddings (will be initialized after
# training the network, if `w2v` or `lstsq` are true)
self._embs = None
# least squares matrix (will be initialized after training the network,
# if true)
self._lstsq_mtx = None
self.ndim = -1 # vector dimensionality will be initialized later
self.intm_dim = -1
self._model = None
self._model_path = None
self._trained = False
self._n_epochs = DFLT_N_EPOCHS
# mapping from word to its embedding index
self._aux_keys = set((0, 1))
self._max_seq_len = -1
self._min_width = 0
self._n_y = 0
self._train_params = deepcopy(DFLT_TRAIN_PARAMS)
self._fit_params = {}
# variables needed for training
self._w_stat = self._pred_class = None
self.W_EMB = self._cost = self._dev_cost = None
# initialize functions to None
self._reset_funcs()
# set up functions for obtaining word embeddings at train and test
# times
self._init_wemb_funcs()
def train(self, train_x, train_y, dev_x, dev_y,
a_grid_search, a_multi_gpu):
self._start_training()
self._logger.debug("Training %s...", self.name)
self._logger.debug("Preparing dataset...")
train_x, train_y, dev_x, dev_y = self._prepare_data(
train_x, train_y, dev_x, dev_y
)
self._logger.debug("Dataset ready...")
# initialize the network
self._logger.debug("Initializing the network...")
# self._update_fit_params(train_y)
self._init_nn()
self._logger.debug("Network ready...")
# initialize callbacks
_, ofname = mkstemp(suffix=".hdf5", prefix=self.name + '.')
try:
early_stop = EarlyStopping(patience=3, verbose=1)
chck_point = ModelCheckpoint(
filepath=ofname, monitor="val_categorical_accuracy",
mode="auto", verbose=1,
save_weights_only=True,
save_best_only=True
)
tensorboard = TensorBoard(
log_dir=os.environ.get("TENSORBOARD_DIR", "/tmp"),
histogram_freq=1, batch_size=32,
write_graph=True, write_grads=True
)
if a_multi_gpu:
train_model = ModelMGPU(self._model)
self._fit_params["batch_size"] = 32 * N_GPUS
train_model.compile(**self._train_params)
else:
train_model = self._model
train_model.fit(train_x, train_y,
validation_data=(dev_x, dev_y),
epochs=self._n_epochs,
callbacks=[early_stop, chck_point, tensorboard],
**self._fit_params)
self._model.load_weights(ofname)
self._finish_training()
finally:
os.remove(ofname)
self._logger.debug("%s trained", self.name)
def predict_proba(self, msg, yvec):
wseq = self._tweet2wseq(msg)
embs = np.array(
self._pad(len(wseq), self._pad_value)
+ [self.get_test_w_emb(w) for w in wseq], dtype="int32")
ret = self._model.predict(np.asarray([embs]),
batch_size=1,
verbose=2)
yvec[:] = ret[0]
def predict_proba_raw(self, messages):
yvecs = np.zeros((len(messages), self._n_y))
for i, msg_i in enumerate(messages):
self.predict_proba(msg_i, yvecs[i])
return yvecs
def restore(self, embs):
"""Restore members which could not be serialized.
Args:
embs (cgsa.utils.word2vec.Word2Vec or None): pretrained
embeddings
"""
self._embeddings = embs
self._logger = LOGGER
self._init_wemb_funcs()
def reset(self):
"""Remove members which cannot be serialized.
"""
# set functions to None
self._reset_funcs()
self._embeddings = None
self.W_EMB = None
super(DLBaseAnalyzer, self).reset()
def save(self, path):
"""Dump model to disc.
Args:
a_path (str): file path at which to store the model
Returns:
void:
"""
# set functions to None
model_path = path + ".h5"
self._model.save(model_path)
self._model_path = os.path.basename(model_path)
# all paths are relative
model = self._model
self._model = None
with open(path, "wb") as ofile:
dump(self, ofile)
self._model = model
def _load(self, a_path):
super(DLBaseAnalyzer, self)._load(a_path)
self._model = load_model(
os.path.join(a_path, self._model_path),
custom_objects=CUSTOM_OBJECTS
)
@abc.abstractmethod
def _init_nn(self):
"""Initialize neural network.
"""
raise NotImplementedError
def _extract_feats(self, a_tweet):
pass
def _start_training(self):
"""Prepare for training.
"""
self._trained = False
def _finish_training(self):
"""Finalize the trained network.
"""
self._logger.info("Finalizing network")
if self._lstsq or self._w2v:
emb_layer_idx = self._get_layer_idx()
if self._lstsq:
# Extract embeddings from the network
task_embs = self._model.layers[emb_layer_idx].get_weights()
assert len(task_embs) == 1, \
("Unmatching number of trained paramaters:"
" {:d} instead of {:d}").format(
len(task_embs), 1)
task_embs = task_embs[0]
# extract only embeddings of known words
START_IDX = UNK_IDX + 1
w2v_embs = self._embs
# Compute the least square matrix
self._logger.info("Computing transform matrix for"
" task-specific embeddings.")
self._lstsq_mtx, res, rank, _ = np.linalg.lstsq(
w2v_embs[START_IDX:], task_embs[START_IDX:]
)
self._logger.info("Transform matrix computed"
" (rank: %d, residuals: %f).",
rank, sum(res))
self._embs = task_embs
# pop embedding layer and modify the first layer coming after it to
# accept plaing embeddings as input
self._recompile_model(emb_layer_idx)
self._pad_value = self._embs[EMPTY_IDX]
self._logger.info("Network finalized")
self._trained = True
def _get_layer_idx(self):
"""Return the index of embedding layer in the model.
Args:
name (str): name of the layer (IGNORED)
Returns:
int: index of embedding layer
"""
return 0
def _recompile_model(self, emb_layer_idx):
"""Change model by removing the embedding layer and .
Args:
emb_layer_idx (int): index of the embedding layer
Returns:
void:
Note:
modifies `self._model` in place
"""
layers = self._model.layers
emb_layer = layers.pop(emb_layer_idx)
first_layer = layers.pop(emb_layer_idx)
layer_config = first_layer.get_config()
layer_config["input_shape"] = (None, emb_layer.output_dim)
new_layer = first_layer.__class__.from_config(
layer_config
)
new_layer.build((emb_layer.input_dim, emb_layer.output_dim))
new_layer.set_weights(first_layer.get_weights())
layers.insert(emb_layer_idx, new_layer)
self._model = self._model.__class__(layers=layers)
self._model.compile(**self._train_params)
def _init_wemb_funcs(self):
"""Initialize functions for obtaining word embeddings.
"""
if self.ndim < 0:
self.ndim = DFLT_VDIM
if self._w2v:
self._embeddings.load()
self.ndim = self._embeddings.ndim
self.init_w_emb = self._init_w2v_emb
self.get_train_w_emb_i = self._get_train_w2v_emb_i
if self._trained:
self.get_test_w_emb = self._get_test_w2v_emb
else:
self.get_test_w_emb = self._get_train_w2v_emb_i
elif self._lstsq:
self._embeddings.load()
self.ndim = self._embeddings.ndim
self.init_w_emb = self._init_w2v_emb
self.get_train_w_emb_i = self._get_train_w2v_emb_i
if self._trained:
self.get_test_w_emb = self._get_test_w2v_lstsq_emb
else:
self.get_test_w_emb = self._get_train_w2v_emb_i
else:
# checked
self.init_w_emb = self._init_w_emb
self.get_train_w_emb_i = self._get_train_w_emb_i
self.get_test_w_emb = self._get_test_w_emb_i
def _reset_funcs(self):
"""Set all compiled theano functions to None.
Note:
modifies instance variables in place
"""
self.get_train_w_emb_i = None
self.get_test_w_emb_i = None
self.init_w_emb = None
def _init_w_emb(self):
"""Initialize task-specific word embeddings.
"""
self.W_EMB = Embedding(len(self._w2i), self.ndim,
embeddings_initializer=DFLT_INITIALIZER,
embeddings_regularizer=l2(L2_COEFF))
def _init_w2v_emb(self):
"""Initialize word2vec embedding matrix.
"""
self._embeddings.load()
self.ndim = self._embeddings.ndim
self._embs = np.empty((len(self._w2i), self.ndim))
self._embs[EMPTY_IDX, :] *= 0
self._embs[UNK_IDX, :] = 1e-2 # prevent zeros in this row
for w, i in iteritems(self._w2i):
if i == EMPTY_IDX or i == UNK_IDX:
continue
self._embs[i] = self._embeddings[w]
# initialize custom keras layer
self.W_EMB = Word2Vec(self._embs, trainable=self._lstsq)
# We unload embeddings every time before the training to free more
# memory. Feel free to comment the line below, if you have plenty of
# RAM.
self._embeddings.unload()
def _get_train_w_emb_i(self, a_word):
"""Obtain embedding index for the given word.
Args:
a_word (str):
word whose embedding index should be retrieved
Returns:
int:
embedding index of the given word
"""
a_word = normlex(a_word)
if a_word in self._w2i:
return self._w2i[a_word]
elif self._w_stat[a_word] < 2 and np.random.binomial(1, UNK_PROB):
return UNK_IDX
else:
i = self._w2i[a_word] = len(self._w2i)
return i
def _get_test_w_emb_i(self, a_word):
"""Obtain embedding index for the given word.
Args:
a_word (str):
word whose embedding index should be retrieved
Returns:
int:
embedding index od the given word
"""
a_word = normlex(a_word)
return self._w2i.get(a_word, UNK_IDX)
def _get_train_w2v_emb_i(self, a_word):
"""Obtain embedding index for the given word.
Args:
a_word (str):
word whose embedding index should be retrieved
Returns:
int: embedding index of the given word
"""
a_word = normlex(a_word)
if a_word in self._w2i:
return self._w2i[a_word]
elif a_word in self._embeddings:
i = self._w2i[a_word] = len(self._w2i)
return i
else:
return UNK_IDX
def _get_test_w2v_emb(self, a_word):
"""Obtain embedding index for the given word.
Args:
a_word (str):
word whose embedding index should be retrieved
Returns:
np.array:
embedding of the input word
"""
a_word = normlex(a_word)
emb_i = self._w2i.get(a_word)
if emb_i is None:
if a_word in self._embeddings:
return self._embeddings[a_word]
return self._embs[UNK_IDX]
return self._embs[emb_i]
def _get_test_w2v_lstsq_emb(self, a_word):
"""Obtain embedding index for the given word.
Args:
a_word (str):
word whose embedding index should be retrieved
Returns:
np.array:
embedding of the input word
"""
a_word = normlex(a_word)
emb_i = self._w2i.get(a_word)
if emb_i is None:
if a_word in self._embeddings:
return np.dot(self._embeddings[a_word],
self._lstsq_mtx)
return self._embs[UNK_IDX]
return self._embs[emb_i]
def _prepare_data(self, train_x, train_y, dev_x, dev_y):
"""Provide train/test split and digitize the data.
"""
if not dev_x:
n = len(train_x)
n_dev = int(n / 15)
idcs = list(range(n))
np.random.shuffle(idcs)
def get_split(data, idcs):
return [data[i] for i in idcs]
dev_x = get_split(train_x, idcs[:n_dev])
dev_y = get_split(train_y, idcs[:n_dev])
train_x = get_split(train_x, idcs[n_dev:])
train_y = get_split(train_y, idcs[n_dev:])
# convert tweets to word indices
train_x, dev_x = self._digitize_data(train_x, dev_x)
self._n_y = len(set(train_y) | set(dev_y))
train_y = to_categorical(np.asarray(train_y))
dev_y = to_categorical(np.asarray(dev_y))
return (train_x, train_y, dev_x, dev_y)
def _compute_w_stat(self, train_x):
"""Compute word frequencies on the corpus.
Args:
train_x (list[list[str]]): training instances
Returns:
void:
Note:
modifies instance variables in place
"""
self._w_stat = Counter(w for t in train_x for w in t)
def _digitize_data(self, train_x, dev_x):
"""Convert sequences of words to sequences of word indices.
Args:
train_x (list[list[str]]): training set
dev_x (list[list[str]]): development set
Returns:
2-tuple[list, list]: digitized training and development sets
"""
train_x = [self._tweet2wseq(x) for x in train_x]
dev_x = [self._tweet2wseq(x) for x in dev_x]
self._compute_w_stat(train_x)
self._wseq2emb_ids(train_x, self.get_train_w_emb_i)
self._wseq2emb_ids(dev_x, self.get_test_w_emb)
train_x = self._pad_sequences(train_x)
dev_x = self._pad_sequences(dev_x)
return (train_x, dev_x)
def _pad(self, xlen, pad_value=EMPTY_IDX):
"""Add indices or vectors of empty words to match minimum filter length.
Args:
xlen (int): length of the input instance
"""
return [pad_value] * max(0, self._min_width - xlen)
def _pad_sequences(self, x):
"""Make all input instances of equal length.
Args:
x (list[np.array]): list of embedding indices
Returns:
x: list of embedding indices of equal lengths
"""
return pad_sequences(x)
def _tweet2wseq(self, msg):
"""Convert tweet to a sequence of word lemmas if these words are informative.
Args:
msg (cgsa.data.Tweet): input message
Return:
list: lemmas of informative words
"""
return [normlex(w.lemma)
for w in msg if is_relevant(w.form)]
def _wseq2emb_ids(self, data, w2i):
"""Convert sequence of words to embedding indices.
Args:
data (list[str]): list of input words
w2i (func): function to convert words to embedding indices
Return:
list[int]: list of embedding indices
"""
for i, inst_i in enumerate(data):
data[i] = np.asarray(
self._pad(len(inst_i))
+ [w2i(w) for w in inst_i], dtype="int32")
def _update_fit_params(self, train_y):
"""Add class weights to the training parameters.
Args:
train_y (list[np.array]): labels of training instances
Returns:
void:
Note:
modifies `self._train_params` in place
"""
return
y_labels = np.argmax(train_y, axis=-1)
class_weights = compute_class_weight("balanced",
np.unique(y_labels),
y_labels)
sample_weights = np.array([class_weights[y_i]
for y_i in y_labels])
self._fit_params["sample_weight"] = sample_weights
self._logger.debug("Class weights: %r", class_weights)
|
WladimirSidorenko/CGSA
|
cgsa/dl/base.py
|
Python
|
mit
| 20,205 | 0.000099 |
"""
kombu.transport.pyamqplib
=========================
amqplib transport.
:copyright: (c) 2009 - 2011 by Ask Solem.
:license: BSD, see LICENSE for more details.
"""
import socket
try:
from ssl import SSLError
except ImportError:
class SSLError(Exception): # noqa
pass
from amqplib import client_0_8 as amqp
from amqplib.client_0_8 import transport
from amqplib.client_0_8.channel import Channel as _Channel
from amqplib.client_0_8.exceptions import AMQPConnectionException
from amqplib.client_0_8.exceptions import AMQPChannelException
from kombu.transport import base
from kombu.utils.encoding import str_to_bytes
DEFAULT_PORT = 5672
# amqplib's handshake mistakenly identifies as protocol version 1191,
# this breaks in RabbitMQ tip, which no longer falls back to
# 0-8 for unknown ids.
transport.AMQP_PROTOCOL_HEADER = str_to_bytes("AMQP\x01\x01\x08\x00")
class Connection(amqp.Connection): # pragma: no cover
def _dispatch_basic_return(self, channel, args, msg):
reply_code = args.read_short()
reply_text = args.read_shortstr()
exchange = args.read_shortstr()
routing_key = args.read_shortstr()
exc = AMQPChannelException(reply_code, reply_text, (50, 60))
if channel.events["basic_return"]:
for callback in channel.events["basic_return"]:
callback(exc, exchange, routing_key, msg)
else:
raise exc
def __init__(self, *args, **kwargs):
super(Connection, self).__init__(*args, **kwargs)
self._method_override = {(60, 50): self._dispatch_basic_return}
def drain_events(self, allowed_methods=None, timeout=None):
"""Wait for an event on any channel."""
return self.wait_multi(self.channels.values(), timeout=timeout)
def wait_multi(self, channels, allowed_methods=None, timeout=None):
"""Wait for an event on a channel."""
chanmap = dict((chan.channel_id, chan) for chan in channels)
chanid, method_sig, args, content = self._wait_multiple(
chanmap.keys(), allowed_methods, timeout=timeout)
channel = chanmap[chanid]
if content \
and channel.auto_decode \
and hasattr(content, 'content_encoding'):
try:
content.body = content.body.decode(content.content_encoding)
except Exception:
pass
amqp_method = self._method_override.get(method_sig) or \
channel._METHOD_MAP.get(method_sig, None)
if amqp_method is None:
raise Exception('Unknown AMQP method (%d, %d)' % method_sig)
if content is None:
return amqp_method(channel, args)
else:
return amqp_method(channel, args, content)
def read_timeout(self, timeout=None):
if timeout is None:
return self.method_reader.read_method()
sock = self.transport.sock
prev = sock.gettimeout()
sock.settimeout(timeout)
try:
try:
return self.method_reader.read_method()
except SSLError, exc:
# http://bugs.python.org/issue10272
if "timed out" in str(exc):
raise socket.timeout()
raise
finally:
sock.settimeout(prev)
def _wait_multiple(self, channel_ids, allowed_methods, timeout=None):
for channel_id in channel_ids:
method_queue = self.channels[channel_id].method_queue
for queued_method in method_queue:
method_sig = queued_method[0]
if (allowed_methods is None) \
or (method_sig in allowed_methods) \
or (method_sig == (20, 40)):
method_queue.remove(queued_method)
method_sig, args, content = queued_method
return channel_id, method_sig, args, content
# Nothing queued, need to wait for a method from the peer
read_timeout = self.read_timeout
channels = self.channels
wait = self.wait
while 1:
channel, method_sig, args, content = read_timeout(timeout)
if (channel in channel_ids) \
and ((allowed_methods is None) \
or (method_sig in allowed_methods) \
or (method_sig == (20, 40))):
return channel, method_sig, args, content
# Not the channel and/or method we were looking for. Queue
# this method for later
channels[channel].method_queue.append((method_sig, args, content))
#
# If we just queued up a method for channel 0 (the Connection
# itself) it's probably a close method in reaction to some
# error, so deal with it right away.
#
if channel == 0:
wait()
def channel(self, channel_id=None):
try:
return self.channels[channel_id]
except KeyError:
return Channel(self, channel_id)
class Message(base.Message):
"""A message received by the broker.
.. attribute:: body
The message body.
.. attribute:: delivery_tag
The message delivery tag, uniquely identifying this message.
.. attribute:: channel
The channel instance the message was received on.
"""
def __init__(self, channel, msg, **kwargs):
props = msg.properties
super(Message, self).__init__(channel,
body=msg.body,
delivery_tag=msg.delivery_tag,
content_type=props.get("content_type"),
content_encoding=props.get("content_encoding"),
delivery_info=msg.delivery_info,
properties=msg.properties,
headers=props.get("application_headers"),
**kwargs)
class Channel(_Channel, base.StdChannel):
Message = Message
events = {"basic_return": []}
def __init__(self, *args, **kwargs):
self.no_ack_consumers = set()
super(Channel, self).__init__(*args, **kwargs)
def prepare_message(self, message_data, priority=None,
content_type=None, content_encoding=None, headers=None,
properties=None):
"""Encapsulate data into a AMQP message."""
return amqp.Message(message_data, priority=priority,
content_type=content_type,
content_encoding=content_encoding,
application_headers=headers,
**properties)
def message_to_python(self, raw_message):
"""Convert encoded message body back to a Python value."""
return self.Message(self, raw_message)
def close(self):
try:
super(Channel, self).close()
finally:
self.connection = None
def basic_consume(self, *args, **kwargs):
consumer_tag = super(Channel, self).basic_consume(*args, **kwargs)
if kwargs["no_ack"]:
self.no_ack_consumers.add(consumer_tag)
return consumer_tag
def basic_cancel(self, consumer_tag, **kwargs):
self.no_ack_consumers.discard(consumer_tag)
return super(Channel, self).basic_cancel(consumer_tag, **kwargs)
class Transport(base.Transport):
Connection = Connection
default_port = DEFAULT_PORT
# it's very annoying that amqplib sometimes raises AttributeError
# if the connection is lost, but nothing we can do about that here.
connection_errors = (AMQPConnectionException,
socket.error,
IOError,
OSError,
AttributeError)
channel_errors = (AMQPChannelException, )
def __init__(self, client, **kwargs):
self.client = client
self.default_port = kwargs.get("default_port") or self.default_port
def create_channel(self, connection):
return connection.channel()
def drain_events(self, connection, **kwargs):
return connection.drain_events(**kwargs)
def establish_connection(self):
"""Establish connection to the AMQP broker."""
conninfo = self.client
for name, default_value in self.default_connection_params.items():
if not getattr(conninfo, name, None):
setattr(conninfo, name, default_value)
if conninfo.hostname == "localhost":
conninfo.hostname = "127.0.0.1"
conn = self.Connection(host=conninfo.host,
userid=conninfo.userid,
password=conninfo.password,
login_method=conninfo.login_method,
virtual_host=conninfo.virtual_host,
insist=conninfo.insist,
ssl=conninfo.ssl,
connect_timeout=conninfo.connect_timeout)
conn.client = self.client
return conn
def close_connection(self, connection):
"""Close the AMQP broker connection."""
connection.client = None
connection.close()
def verify_connection(self, connection):
return connection.channels is not None
@property
def default_connection_params(self):
return {"userid": "guest", "password": "guest",
"port": self.default_port,
"hostname": "localhost", "login_method": "AMQPLAIN"}
|
mzdaniel/oh-mainline
|
vendor/packages/kombu/kombu/transport/pyamqplib.py
|
Python
|
agpl-3.0
| 9,517 | 0.002102 |
# -*- coding: utf-8 -*-
# Generated by Django 1.11.3 on 2017-08-14 06:27
from __future__ import unicode_literals
import django.contrib.postgres.fields
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('edgar', '0007_auto_20170706_2215'),
]
operations = [
migrations.AddField(
model_name='edgardocumentcontent',
name='urls',
field=django.contrib.postgres.fields.ArrayField(base_field=models.TextField(blank=True), blank=True, help_text='URL we parsed out of the content', null=True, size=None),
),
]
|
MiningTheDisclosures/conflict-minerals-data
|
conflict_minerals_data/edgar/migrations/0008_edgardocumentcontent_urls.py
|
Python
|
mit
| 628 | 0.001592 |
import vanilla
import urlparse
import fnmatch
import base64
import bencode
import struct
import socket
import peers
import posixpath
from eventlet.green import zmq
import cPickle as pickle
import eventlet.queue
import fairywren
import itertools
import logging
import array
def sendBencodedWsgiResponse(env,start_response,responseDict):
headers = [('Content-Type','text/plain')]
headers.append(('Cache-Control','no-cache'))
start_response('200 OK',headers)
yield bencode.bencode(responseDict)
def getClientAddress(environ):
try:
return environ['HTTP_X_FORWARDED_FOR'].split(',')[-1].strip()
except KeyError:
return environ['REMOTE_ADDR']
def dottedQuadToInt(dq):
#Change the peer IP into an integer
try:
peerIp = socket.inet_aton(dq)
except socket.error:
raise ValueError('Not a valid IP address:%s' % peerIp)
#Convert from network byte order to integer
try:
peerIp, = struct.unpack('!I',peerIp)
except struct.error:
raise ValueError('Serious wtf, how did this fail')
return peerIp
class Tracker(object):
def __init__(self,auth,peers,pathDepth):
self.auth = auth
self.peers = peers
self.pathDepth = pathDepth
self.announceLog = logging.getLogger('fairywren.announce')
self.trackerLog = logging.getLogger('fairywren.tracker')
self.afterAnnounce = []
self.trackerLog.info('Created')
def addAfterAnnounce(self,callback):
self.afterAnnounce.append(callback)
def getScrape(self,info_hashes):
"""Return a dictionary object that contains a tracker scrape.
@param info_hashes: list on info_hashes to include in the scrape
"""
retval = {}
retval['files'] = {}
for info_hash in info_hashes:
result = {}
result['downloaded'] = 0
result['complete'] = self.peers.getNumberOfSeeds(info_hash)
result['incomplete'] = self.peers.getNumberOfLeeches(info_hash)
retval['files'][info_hash] = result
return retval
def announce(self,env,start_response):
#Extract and normalize the path
#Posix path may not be the best approach here but
#no alternate has been found
pathInfo = posixpath.normpath(env['PATH_INFO'])
#Split the path into components. Drop the first
#since it should always be the empty string
pathComponents = pathInfo.split('/')[1+self.pathDepth:]
#A SHA512 encoded in base64 is 88 characters
#but the last two are always '==' so
#86 is used here
if len(pathComponents) !=2 or len(pathComponents[0]) != 86 or pathComponents[1] != 'announce':
return vanilla.http_error(404,env,start_response)
#Only GET requests are valid
if env['REQUEST_METHOD'] != 'GET':
return vanilla.http_error(405,env,start_response)
#Add the omitted equals signs back in
secretKey = pathComponents[0] + '=='
#base64 decode the secret key
try:
secretKey = base64.urlsafe_b64decode(secretKey)
except TypeError:
return vanilla.http_error(404,env,start_response)
#Extract the IP of the peer
peerIp = getClientAddress(env)
peerIpAsString = peerIp
try:
peerIp = dottedQuadToInt(peerIp)
except ValueError:
return vanilla.http_error(500,env,start_response)
#Parse the query string. Absence indicates error
if 'QUERY_STRING' not in env:
return vanilla.http_error(400,env,start_response)
query = urlparse.parse_qs(env['QUERY_STRING'])
#List of tuples. Each tuple is
#
#Parameter name
#default value (if any)
#type conversion, side-effect free callable
params = []
def validateInfoHash(info_hash):
#Info hashes are a SHA1 hash, and are always 20 bytes
if len(info_hash) != 20:
raise ValueError("Length " + str(len(info_hash)) + ' not acceptable')
return info_hash
params.append(('info_hash',None,validateInfoHash))
def validatePeerId(peer_id):
#Peer IDs are a string chosen by the peer to identify itself
#and are always 20 bytes
if len(peer_id) != 20:
raise ValueError("Improper Length")
return peer_id
params.append(('peer_id',None,validatePeerId))
def validatePort(port):
port = int(port)
#Ipv4 ports should not be higher than this value
if port > 2 ** 16 - 1 or port <= 0:
raise ValueError("Port outside of range")
return port
def validateByteCount(byteCount):
byteCount = int(byteCount)
if byteCount < 0:
raise ValueError('byte count cannot be negative')
return byteCount
params.append(('port',None,validatePort))
params.append(('uploaded',None,validateByteCount))
params.append(('downloaded',None,validateByteCount))
params.append(('left',None,validateByteCount))
#If the client doesn't specify the compact parameter, it is
#safe to assume that compact responses are understood. So a
#default value of 1 is used. Additionally, any non zero
#value provided assumes the client wants a compact response
params.append(('compact',1,int))
def validateEvent(event):
event = event.lower()
if event not in ['started','stopped','completed']:
raise ValueError("Unknown event")
return event
params.append(('event','update',validateEvent))
maxNumWant = 35
def limitNumWant(numwant):
numwant = int(numwant)
if numwant < 0:
raise ValueError('numwant cannot be negative')
numwant = min(numwant,maxNumWant)
return numwant
params.append(('numwant',maxNumWant,limitNumWant))
#Dictionary holding parameters to query
p = dict()
#Use the params to generate the parameters
for param,defaultValue,typeConversion in params:
#If the parameter is in the query, extract the first
#occurence and type convert if requested
if param in query:
p[param] = query[param][0]
if typeConversion:
try:
p[param] = typeConversion(p[param])
except ValueError as e:
return vanilla.http_error(400,env,start_response,msg='bad value for ' + param)
#If the parameter is not in the query, then
#use a default value is present. Otherwise this is an error
else:
if defaultValue == None:
return vanilla.http_error(400,env,start_response,msg='missing ' + param)
p[param] = defaultValue
#Make sure the secret key is valid
userId = self.auth.authenticateSecretKey(secretKey)
if userId == None:
response = {}
response['failure reason'] = 'failed to authenticate secret key'
return sendBencodedWsgiResponse(env,start_response,response)
#Make sure the info hash is allowed
torrentId = self.auth.authorizeInfoHash(p['info_hash'])
if torrentId == None:
response = {}
response['failure reason'] = 'unauthorized info hash'
return sendBencodedWsgiResponse(env,start_response,response)
#Construct the peers entry
peer = peers.Peer(peerIp,p['port'],p['left'])
#This is the basic response format
response = {}
response['interval'] = 5*60
response['complete'] = 0
response['incomplete'] = 0
response['peers'] = []
#This value is set to True if the number of seeds or leeches
#changes in the course of processing this result
change = False
#This value is set to true if the peer is added, false if removed
addPeer = False
#For all 3 cases here just return peers
if p['event'] in ['started','completed','update']:
response['complete'] = self.peers.getNumberOfLeeches(p['info_hash'])
response['incomplete'] = self.peers.getNumberOfSeeds(p['info_hash'])
change = self.peers.updatePeer(p['info_hash'],peer)
if change:
addPeer = True
peersForResponse = self.peers.getPeers(p['info_hash'])
#Return a compact response or a traditional response
#based on what is requested
if p['compact'] != 0:
peerStruct = struct.Struct('!IH')
maxSize = p['numwant'] * peerStruct.size
peersBuffer = array.array('c')
for peer in itertools.islice(peersForResponse,0,p['numwant']):
peersBuffer.fromstring(peerStruct.pack(peer.ip,peer.port))
response['peers'] = peersBuffer.tostring()
else:
for peer in itertools.islice(peersForResponse,0,p['numwant']):
#For non-compact responses, use a bogus peerId. Hardly any client
#uses this type of response anyways. There is no real meaning to the
#peer ID except informal agreements.
response['peers'].append({'peer id':'0'*20,'ip':socket.inet_ntoa(struct.pack('!I',peer.ip)),'port':peer.port})
#For stop event, just remove the peer. Don't return anything
elif p['event'] == 'stopped':
change = self.peers.removePeer(p['info_hash'],peer)
addPeer = False
#Log the successful announce
self.announceLog.info('%s:%d %s,%s,%d',peerIpAsString,p['port'],p['info_hash'].encode('hex').upper(),p['event'],p['left'])
for callback in self.afterAnnounce:
callback(userId,p['info_hash'],peerIpAsString,p['port'],p['peer_id'])
return sendBencodedWsgiResponse(env,start_response,response)
def __call__(self,env,start_response):
return self.announce(env,start_response)
|
hydrogen18/fairywren
|
tracker.py
|
Python
|
mit
| 8,937 | 0.053933 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright 2021-2022 Daniel Estevez <daniel@destevez.net>
#
# This file is part of gr-satellites
#
# SPDX-License-Identifier: GPL-3.0-or-later
#
from gnuradio import gr, digital
import pmt
from ...hier.sync_to_pdu_packed import sync_to_pdu_packed
from ...hdlc_deframer import hdlc_crc_check
# HDLC 0x7e flag
_syncword = '01111110'
class crop_and_check_crc(gr.basic_block):
"""
Helper block to crop using the final 0x7e flag and check CRC-16
"""
def __init__(self):
gr.basic_block.__init__(
self,
name='crop_and_check_crc',
in_sig=[],
out_sig=[])
self.crc_check = hdlc_crc_check()
self.message_port_register_in(pmt.intern('in'))
self.set_msg_handler(pmt.intern('in'), self.handle_msg)
self.message_port_register_out(pmt.intern('out'))
def handle_msg(self, msg_pmt):
msg = pmt.cdr(msg_pmt)
if not pmt.is_u8vector(msg):
print('[ERROR] Received invalid message type. Expected u8vector')
return
packet = pmt.u8vector_elements(msg)
start = 0
while True:
try:
idx = packet[start:].index(0x7e)
except ValueError:
return
start += idx + 1
p = packet[:idx]
if self.crc_check.fcs_ok(p):
p = p[:-2]
self.message_port_pub(
pmt.intern('out'),
pmt.cons(pmt.PMT_NIL, pmt.init_u8vector(len(p), p)))
return
class yusat_deframer(gr.hier_block2):
"""
Hierarchical block to deframe YUSAT ad-hoc AX.25-like protocol
The input is a float stream of soft symbols. The output are PDUs
with YUSAT frames.
Args:
options: Options from argparse
"""
def __init__(self, options=None):
gr.hier_block2.__init__(
self,
'yusat_deframer',
gr.io_signature(1, 1, gr.sizeof_float),
gr.io_signature(0, 0, 0))
self.message_port_register_hier_out('out')
self.slicer = digital.binary_slicer_fb()
# We hope that 256 bytes is long enough to contain the full packet
self.deframer = sync_to_pdu_packed(
packlen=256, sync=_syncword, threshold=0)
self.crop = crop_and_check_crc()
self.connect(self, self.slicer, self.deframer)
self.msg_connect((self.deframer, 'out'), (self.crop, 'in'))
self.msg_connect((self.crop, 'out'), (self, 'out'))
|
daniestevez/gr-satellites
|
python/components/deframers/yusat_deframer.py
|
Python
|
gpl-3.0
| 2,570 | 0 |
# Generated by Django 3.0.5 on 2020-04-17 14:12
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import easy_thumbnails.fields
import userena.models
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Organization',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=250)),
('primary_contact', models.ForeignKey(help_text='Contact for org.', on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
options={
'unique_together': {('name', 'primary_contact')},
},
),
migrations.CreateModel(
name='UserProfile',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('mugshot', easy_thumbnails.fields.ThumbnailerImageField(blank=True, help_text='A personal image displayed in your profile.', upload_to=userena.models.upload_to_mugshot, verbose_name='mugshot')),
('privacy', models.CharField(choices=[('open', 'Open'), ('registered', 'Registered'), ('closed', 'Closed')], default='registered', help_text='Designates who can view your profile.', max_length=15, verbose_name='privacy')),
('email', models.CharField(blank=True, max_length=250, null=True)),
('score', models.IntegerField(default=1)),
('last_activity', models.DateTimeField(auto_now_add=True)),
('openbadge_id', models.CharField(blank=True, max_length=250, null=True)),
('organization', models.ForeignKey(blank=True, help_text="If '------', no Organization records share the email domain.", null=True, on_delete=django.db.models.deletion.PROTECT, to='accounts.Organization')),
('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL, verbose_name='user')),
],
options={
'permissions': (('view_profile', 'Can view profile'),),
'abstract': False,
'default_permissions': ('add', 'change', 'delete'),
},
),
migrations.CreateModel(
name='UserAuthorization',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('authorized', models.BooleanField(help_text='Check this to approve member access.')),
('permission_granted_on', models.DateTimeField(auto_now_add=True)),
('user_accepted_terms_on', models.DateTimeField(blank=True, null=True)),
('permissions_granted_by', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.PROTECT, related_name='permissions_granted_by', to=settings.AUTH_USER_MODEL)),
('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
('user_profile', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to='accounts.UserProfile')),
],
),
migrations.CreateModel(
name='EmailDomain',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('email_domain', models.CharField(max_length=50)),
('organization', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='accounts.Organization')),
],
),
]
|
ngageoint/geoq
|
geoq/accounts/migrations/0001_initial.py
|
Python
|
mit
| 3,881 | 0.004638 |
# -*- coding: utf-8 -*-
# Generated by Django 1.9 on 2015-12-13 11:29
from __future__ import unicode_literals
from django.db import migrations, models
import jsonfield.fields
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='DashboardStats',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('graph_key', models.CharField(help_text='it needs to be one word unique. ex. auth, mygraph', max_length=90, unique=True, verbose_name='graph key')),
('graph_title', models.CharField(db_index=True, help_text='heading title of graph box', max_length=90, verbose_name='graph title')),
('model_app_name', models.CharField(help_text='ex. auth / dialer_cdr', max_length=90, verbose_name='app name')),
('model_name', models.CharField(help_text='ex. User', max_length=90, verbose_name='model name')),
('date_field_name', models.CharField(help_text='ex. date_joined', max_length=90, verbose_name='date field name')),
('operation_field_name', models.CharField(blank=True, help_text='The field you want to aggregate, ex. amount', max_length=90, null=True, verbose_name='Operate field name')),
('type_operation_field_name', models.CharField(blank=True, choices=[(b'Count', b'Count'), (b'Sum', b'Sum'), (b'Avg', b'Avg'), (b'Max', b'Max'), (b'Min', b'Min'), (b'StdDev', b'StdDev'), (b'Variance', b'Variance')], help_text='choose the type operation what you want to aggregate, ex. Sum', max_length=90, null=True, verbose_name='Choose Type operation')),
('is_visible', models.BooleanField(default=True, verbose_name='visible')),
('created_date', models.DateTimeField(auto_now_add=True, verbose_name='date')),
('updated_date', models.DateTimeField(auto_now=True)),
],
options={
'db_table': 'dashboard_stats',
'verbose_name': 'dashboard stats',
'verbose_name_plural': 'dashboard stats',
},
),
migrations.CreateModel(
name='DashboardStatsCriteria',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('criteria_name', models.CharField(db_index=True, help_text='it needs to be one word unique. Ex. status, yesno', max_length=90, verbose_name='criteria name')),
('criteria_fix_mapping', jsonfield.fields.JSONField(blank=True, help_text='a JSON dictionary of key-value pairs that will be used for the criteria', null=True, verbose_name='fixed criteria / value')),
('dynamic_criteria_field_name', models.CharField(blank=True, help_text='ex. for call records - disposition', max_length=90, null=True, verbose_name='dynamic criteria field name')),
('criteria_dynamic_mapping', jsonfield.fields.JSONField(blank=True, help_text='a JSON dictionary of key-value pairs that will be used for the criteria', null=True, verbose_name='dynamic criteria / value')),
('created_date', models.DateTimeField(auto_now_add=True, verbose_name='date')),
('updated_date', models.DateTimeField(auto_now=True)),
],
options={
'db_table': 'dash_stats_criteria',
'verbose_name': 'dashboard stats criteria',
'verbose_name_plural': 'dashboard stats criteria',
},
),
migrations.AddField(
model_name='dashboardstats',
name='criteria',
field=models.ManyToManyField(blank=True, to='admin_tools_stats.DashboardStatsCriteria'),
),
]
|
barseghyanartur/django-admin-tools-stats
|
admin_tools_stats/migrations/0001_initial.py
|
Python
|
mit
| 3,865 | 0.004398 |
# Copyright 2012 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo.config import cfg
from neutron.agent.common import config
from neutron.plugins.common import constants as p_const
from neutron.plugins.openvswitch.common import constants
DEFAULT_BRIDGE_MAPPINGS = []
DEFAULT_VLAN_RANGES = []
DEFAULT_TUNNEL_RANGES = []
DEFAULT_TUNNEL_TYPES = []
ovs_opts = [
cfg.StrOpt('integration_bridge', default='br-int',
help=_("Integration bridge to use.")),
cfg.BoolOpt('enable_tunneling', default=False,
help=_("Enable tunneling support.")),
cfg.StrOpt('tunnel_bridge', default='br-tun',
help=_("Tunnel bridge to use.")),
cfg.StrOpt('int_peer_patch_port', default='patch-tun',
help=_("Peer patch port in integration bridge for tunnel "
"bridge.")),
cfg.StrOpt('tun_peer_patch_port', default='patch-int',
help=_("Peer patch port in tunnel bridge for integration "
"bridge.")),
cfg.StrOpt('local_ip', default='',
help=_("Local IP address of GRE tunnel endpoints.")),
cfg.ListOpt('bridge_mappings',
default=DEFAULT_BRIDGE_MAPPINGS,
help=_("List of <physical_network>:<bridge>. "
"Deprecated for ofagent.")),
cfg.StrOpt('tenant_network_type', default='local',
help=_("Network type for tenant networks "
"(local, vlan, gre, vxlan, or none).")),
cfg.ListOpt('network_vlan_ranges',
default=DEFAULT_VLAN_RANGES,
help=_("List of <physical_network>:<vlan_min>:<vlan_max> "
"or <physical_network>.")),
cfg.ListOpt('tunnel_id_ranges',
default=DEFAULT_TUNNEL_RANGES,
help=_("List of <tun_min>:<tun_max>.")),
cfg.StrOpt('tunnel_type', default='',
help=_("The type of tunnels to use when utilizing tunnels, "
"either 'gre' or 'vxlan'.")),
cfg.BoolOpt('use_veth_interconnection', default=False,
help=_("Use veths instead of patch ports to interconnect the "
"integration bridge to physical bridges.")),
#added by jiahaojie 00209498
cfg.StrOpt('user_interface_driver',
default='neutron.agent.linux.interface.OVSInterfaceDriver',
help='Driver used to create user devices.'),
cfg.StrOpt('vm_interface',
default='eth0',
help='Visual Machine Device used to get user port.'),
cfg.IntOpt('vm_device_mtu', default=1350,
help=_('MTU setting for device.')),
cfg.BoolOpt('enable_vtep',
default=False,
help='use to enbale vtep function.'),
]
agent_opts = [
cfg.IntOpt('polling_interval', default=2,
help=_("The number of seconds the agent will wait between "
"polling for local device changes.")),
cfg.BoolOpt('minimize_polling',
default=True,
help=_("Minimize polling by monitoring ovsdb for interface "
"changes.")),
cfg.IntOpt('ovsdb_monitor_respawn_interval',
default=constants.DEFAULT_OVSDBMON_RESPAWN,
help=_("The number of seconds to wait before respawning the "
"ovsdb monitor after losing communication with it.")),
cfg.ListOpt('tunnel_types', default=DEFAULT_TUNNEL_TYPES,
help=_("Network types supported by the agent "
"(gre and/or vxlan).")),
cfg.IntOpt('vxlan_udp_port', default=p_const.VXLAN_UDP_PORT,
help=_("The UDP port to use for VXLAN tunnels.")),
cfg.IntOpt('veth_mtu',
help=_("MTU size of veth interfaces")),
cfg.BoolOpt('l2_population', default=False,
help=_("Use ML2 l2population mechanism driver to learn "
"remote MAC and IPs and improve tunnel scalability.")),
cfg.BoolOpt('arp_responder', default=False,
help=_("Enable local ARP responder if it is supported. "
"Requires OVS 2.1 and ML2 l2population driver. "
"Allows the switch (when supporting an overlay) "
"to respond to an ARP request locally without "
"performing a costly ARP broadcast into the overlay.")),
cfg.BoolOpt('dont_fragment', default=True,
help=_("Set or un-set the don't fragment (DF) bit on "
"outgoing IP packet carrying GRE/VXLAN tunnel.")),
cfg.BoolOpt('enable_distributed_routing', default=False,
help=_("Make the l2 agent run in DVR mode.")),
cfg.ListOpt('l2pop_network_types', default=['flat', 'vlan', 'vxlan'],
help=_("L2pop network types supported by the agent.")),
cfg.BoolOpt('enable_port_multi_device', default=False,
help=_("Port has multiple devices on bridge for XenServer.")),
]
qos_opts = [
cfg.BoolOpt('enable_dscp_vlanpcp_mapping', default=False,
help=_("Enable dscp map vlan pcp")),
]
cfg.CONF.register_opts(ovs_opts, "OVS")
cfg.CONF.register_opts(agent_opts, "AGENT")
cfg.CONF.register_opts(qos_opts, "qos")
config.register_agent_state_opts_helper(cfg.CONF)
config.register_root_helper(cfg.CONF)
|
Hybrid-Cloud/Hybrid-Cloud-Patches-For-Tricircle
|
hybrid-cloud/neutron/plugins/openvswitch/common/config.py
|
Python
|
gpl-2.0
| 5,912 | 0.000507 |
#author CongThuc 12/13/2015
import MySQLdb
from database.DBHelper import DBHelper
from database.DBConnectManager import DBConnectManager
from resourcefactories.AnalysisInitDefaultValue import AnalysisInitDefaultValue
db_helper = DBHelper()
class DataUtils:
def __init__(self):
print "init DataUtils"
def get_ActivitiesFromDB(self, db_connector):
activities = []
if db_connector is not None:
try:
query = "select * from activities"
activities = db_helper.get_Data(db_connector, db_connector.cursor(), query);
except Exception as e:
print e
return activities
def get_ActivitiesFromXML(self, db_connector):
activities = []
if db_connector is not None:
try:
query = "select * from activities_from_xml"
activities = db_helper.get_Data(db_connector, db_connector.cursor(), query);
except Exception as e:
print e
return activities
def get_PermissionFromDB(self, db_connector):
permissions = []
if db_connector is not None:
try:
query = "select * from permissions"
permissions = db_helper.get_Data(db_connector, db_connector.cursor(), query);
except Exception as e:
print e
return permissions
def get_PermissionFromXML(self, db_connector):
permissions = []
if db_connector is not None:
try:
query = "select * from permissions_from_xml"
permissions = db_helper.get_Data(db_connector, db_connector.cursor(), query);
except Exception as e:
print e
return permissions
def get_PermissionAnalysis(self, db_connector):
permission_detail = []
if db_connector is not None:
try:
query = "select permission_name, srcClass, srcMethod, srcMethodDes, dstClass, dstMethod, dstMethodDes " \
"from permission_analysis P1 INNER JOIN permissions P2 ON P1.permission_id = P2.id;"
permission_detail = db_helper.get_Data(db_connector, db_connector.cursor(), query);
except Exception as e:
print e
return permission_detail
def get_PackageFilter_Activity(self,db_connector, activities):
packages = []
if activities:
for ac in activities:
if db_connector is not None:
try:
select_stmt = "SELECT * FROM package_analysis WHERE srcClass like %(ac_name)s"
cursor = db_connector.cursor()
cursor.execute(select_stmt, { 'ac_name': "%" + ac[1]+ "%"})
rows = cursor.fetchall()
packages.extend(rows)
except Exception as e:
print e
return packages
def get_SensitiveAPIs(self, db_connector, table):
packages = []
if db_connector is not None:
for sen_APIs in AnalysisInitDefaultValue.Sensitive_APIs:
try:
select_stmt = "SELECT package_id, dstClass, dstMethod, dstMethodDes, srcClass, srcMethod, srcMethodDes FROM " + table + " WHERE dstMethod like %(sen_APIs)s"
cursor = db_connector.cursor()
cursor.execute(select_stmt, {'sen_APIs': "%" + sen_APIs + "%"})
rows = cursor.fetchall()
packages.extend(rows)
except Exception as e:
print e
return packages
def get_SensitiveAPIsFromDB(self, db_connector):
sensitive_apis = []
if db_connector is not None:
try:
query = "select * from sensitive_apis"
sensitive_apis = db_helper.get_Data(db_connector, db_connector.cursor(), query);
except Exception as e:
print e
return sensitive_apis
|
congthuc/androguard-2.0-custom
|
database/DataUtils.py
|
Python
|
apache-2.0
| 4,034 | 0.008676 |
# encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Deleting model 'Repo'
db.delete_table('package_repo')
# Deleting field 'Package.repo'
db.delete_column('package_package', 'repo_id')
def backwards(self, orm):
# Adding model 'Repo'
db.create_table('package_repo', (
('slug_regex', self.gf('django.db.models.fields.CharField')(max_length='100', blank=True)),
('description', self.gf('django.db.models.fields.TextField')(blank=True)),
('user_url', self.gf('django.db.models.fields.CharField')(max_length='100', blank=True)),
('handler', self.gf('django.db.models.fields.CharField')(default='package.handlers.unsupported', max_length='200')),
('repo_regex', self.gf('django.db.models.fields.CharField')(max_length='100', blank=True)),
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('title', self.gf('django.db.models.fields.CharField')(max_length='50')),
('url', self.gf('django.db.models.fields.URLField')(max_length=200)),
('created', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now, blank=True)),
('modified', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now, blank=True)),
('is_supported', self.gf('django.db.models.fields.BooleanField')(default=False)),
('user_regex', self.gf('django.db.models.fields.CharField')(max_length='100', blank=True)),
('is_other', self.gf('django.db.models.fields.BooleanField')(default=False)),
))
db.send_create_signal('package', ['Repo'])
# Adding field 'Package.repo'
db.add_column('package_package', 'repo', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['package.Repo'], null=True), keep_default=False)
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'package.category': {
'Meta': {'ordering': "['title']", 'object_name': 'Category'},
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'show_pypi': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50', 'db_index': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': "'50'"}),
'title_plural': ('django.db.models.fields.CharField', [], {'max_length': "'50'", 'blank': 'True'})
},
'package.commit': {
'Meta': {'ordering': "['-commit_date']", 'object_name': 'Commit'},
'commit_date': ('django.db.models.fields.DateTimeField', [], {}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'package': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['package.Package']"})
},
'package.package': {
'Meta': {'ordering': "['title']", 'object_name': 'Package'},
'category': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['package.Category']"}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'creator'", 'null': 'True', 'to': "orm['auth.User']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_modified_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'modifier'", 'null': 'True', 'to': "orm['auth.User']"}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'participants': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'pypi_downloads': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'pypi_home_page': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'pypi_url': ('django.db.models.fields.URLField', [], {'default': "''", 'max_length': '200', 'blank': 'True'}),
'related_packages': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'related_packages_rel_+'", 'blank': 'True', 'to': "orm['package.Package']"}),
'repo_commits': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'repo_description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'repo_forks': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'repo_url': ('django.db.models.fields.URLField', [], {'unique': 'True', 'max_length': '200', 'blank': 'True'}),
'repo_watchers': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50', 'db_index': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': "'100'"}),
'usage': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.User']", 'symmetrical': 'False', 'blank': 'True'})
},
'package.packageexample': {
'Meta': {'ordering': "['title']", 'object_name': 'PackageExample'},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'package': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['package.Package']"}),
'title': ('django.db.models.fields.CharField', [], {'max_length': "'100'"}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200'})
},
'package.version': {
'Meta': {'ordering': "['-created']", 'object_name': 'Version'},
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'downloads': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'hidden': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'license': ('django.db.models.fields.CharField', [], {'max_length': "'100'"}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'number': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': "'100'", 'blank': "''"}),
'package': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['package.Package']", 'null': 'True', 'blank': 'True'})
}
}
complete_apps = ['package']
|
miketheman/opencomparison
|
package/migrations/0015_auto__del_repo__del_field_package_repo.py
|
Python
|
mit
| 10,959 | 0.008304 |
#!/usr/bin/python
import os, subprocess
amsDecode = "/usr/local/bin/amsDecode"
path = "/usr/local/bin"
specDataFile = "specData.csv"
f = open("processFile.log", "w")
if os.path.exists(specDataFile):
os.remove(specDataFile)
for fileName in os.listdir('.'):
if fileName.endswith('.bin'):
#print 'file :' + fileName
cmnd = [amsDecode,
fileName,
"-t -95",
"-b",
"68",
"468" ]
subprocess.call(cmnd,stdout=f)
f.close
|
jennyb/amsDecode
|
processBinFiles.py
|
Python
|
gpl-2.0
| 451 | 0.046563 |
# shamelessly copied from pliExpertInfo (Vali, Mirakels, Littlesat)
from enigma import iServiceInformation, iPlayableService
from Components.Converter.Converter import Converter
from Components.Element import cached
from Components.config import config
from Tools.Transponder import ConvertToHumanReadable, getChannelNumber
from Tools.GetEcmInfo import GetEcmInfo
from Poll import Poll
def addspace(text):
if text:
text += " "
return text
class PliExtraInfo(Poll, Converter, object):
def __init__(self, type):
Converter.__init__(self, type)
Poll.__init__(self)
self.type = type
self.poll_interval = 1000
self.poll_enabled = True
self.caid_data = (
( "0x100", "0x1ff", "Seca", "S", True ),
( "0x500", "0x5ff", "Via", "V", True ),
( "0x600", "0x6ff", "Irdeto", "I", True ),
( "0x900", "0x9ff", "NDS", "Nd", True ),
( "0xb00", "0xbff", "Conax", "Co", True ),
( "0xd00", "0xdff", "CryptoW", "Cw", True ),
( "0xe00", "0xeff", "PowerVU", "P", False ),
("0x1000", "0x10FF", "Tandberg", "TB", False ),
("0x1700", "0x17ff", "Beta", "B", True ),
("0x1800", "0x18ff", "Nagra", "N", True ),
("0x2600", "0x2600", "Biss", "Bi", False ),
("0x4ae0", "0x4ae1", "Dre", "D", False ),
("0x4aee", "0x4aee", "BulCrypt", "B1", False ),
("0x5581", "0x5581", "BulCrypt", "B2", False )
)
self.ca_table = (
("CryptoCaidSecaAvailable", "S", False),
("CryptoCaidViaAvailable", "V", False),
("CryptoCaidIrdetoAvailable", "I", False),
("CryptoCaidNDSAvailable", "Nd", False),
("CryptoCaidConaxAvailable", "Co", False),
("CryptoCaidCryptoWAvailable", "Cw", False),
("CryptoCaidPowerVUAvailable", "P", False),
("CryptoCaidBetaAvailable", "B", False),
("CryptoCaidNagraAvailable", "N", False),
("CryptoCaidBissAvailable", "Bi", False),
("CryptoCaidDreAvailable", "D", False),
("CryptoCaidBulCrypt1Available","B1", False),
("CryptoCaidBulCrypt2Available","B2", False),
("CryptoCaidTandbergAvailable", "TB", False),
("CryptoCaidSecaSelected", "S", True),
("CryptoCaidViaSelected", "V", True),
("CryptoCaidIrdetoSelected", "I", True),
("CryptoCaidNDSSelected", "Nd", True),
("CryptoCaidConaxSelected", "Co", True),
("CryptoCaidCryptoWSelected", "Cw", True),
("CryptoCaidPowerVUSelected", "P", True),
("CryptoCaidBetaSelected", "B", True),
("CryptoCaidNagraSelected", "N", True),
("CryptoCaidBissSelected", "Bi", True),
("CryptoCaidDreSelected", "D", True),
("CryptoCaidBulCrypt1Selected", "B1", True),
("CryptoCaidBulCrypt2Selected", "B2", True),
("CryptoCaidTandbergSelected", "TB", True),
)
self.ecmdata = GetEcmInfo()
self.feraw = self.fedata = self.updateFEdata = None
def getCryptoInfo(self, info):
if info.getInfo(iServiceInformation.sIsCrypted) == 1:
data = self.ecmdata.getEcmData()
self.current_source = data[0]
self.current_caid = data[1]
self.current_provid = data[2]
self.current_ecmpid = data[3]
else:
self.current_source = ""
self.current_caid = "0"
self.current_provid = "0"
self.current_ecmpid = "0"
def createCryptoBar(self, info):
res = ""
available_caids = info.getInfoObject(iServiceInformation.sCAIDs)
for caid_entry in self.caid_data:
if int(caid_entry[0], 16) <= int(self.current_caid, 16) <= int(caid_entry[1], 16):
color="\c0000??00"
else:
color = "\c007?7?7?"
try:
for caid in available_caids:
if int(caid_entry[0], 16) <= caid <= int(caid_entry[1], 16):
color="\c00????00"
except:
pass
if color != "\c007?7?7?" or caid_entry[4]:
if res: res += " "
res += color + caid_entry[3]
res += "\c00??????"
return res
def createCryptoSpecial(self, info):
caid_name = "FTA"
try:
for caid_entry in self.caid_data:
if int(caid_entry[0], 16) <= int(self.current_caid, 16) <= int(caid_entry[1], 16):
caid_name = caid_entry[2]
break
return caid_name + ":%04x:%04x:%04x:%04x" % (int(self.current_caid,16), int(self.current_provid,16), info.getInfo(iServiceInformation.sSID), int(self.current_ecmpid,16))
except:
pass
return ""
def createResolution(self, info):
xres = info.getInfo(iServiceInformation.sVideoWidth)
if xres == -1:
return ""
yres = info.getInfo(iServiceInformation.sVideoHeight)
mode = ("i", "p", " ")[info.getInfo(iServiceInformation.sProgressive)]
fps = str((info.getInfo(iServiceInformation.sFrameRate) + 500) / 1000)
return str(xres) + "x" + str(yres) + mode + fps
def createVideoCodec(self, info):
return ("MPEG2", "AVC", "MPEG1", "MPEG4-VC", "VC1", "VC1-SM", "HEVC", "")[info.getInfo(iServiceInformation.sVideoType)]
def createPIDInfo(self, info):
vpid = info.getInfo(iServiceInformation.sVideoPID)
apid = info.getInfo(iServiceInformation.sAudioPID)
pcrpid = info.getInfo(iServiceInformation.sPCRPID)
sidpid = info.getInfo(iServiceInformation.sSID)
tsid = info.getInfo(iServiceInformation.sTSID)
onid = info.getInfo(iServiceInformation.sONID)
if vpid < 0 : vpid = 0
if apid < 0 : apid = 0
if pcrpid < 0 : pcrpid = 0
if sidpid < 0 : sidpid = 0
if tsid < 0 : tsid = 0
if onid < 0 : onid = 0
return "%d-%d:%05d:%04d:%04d:%04d" % (onid, tsid, sidpid, vpid, apid, pcrpid)
def createTransponderInfo(self, fedata, feraw):
if not feraw:
return ""
elif "DVB-T" in feraw.get("tuner_type"):
tmp = addspace(self.createChannelNumber(fedata, feraw)) + addspace(self.createFrequency(feraw)) + addspace(self.createPolarization(fedata))
else:
tmp = addspace(self.createFrequency(feraw)) + addspace(self.createPolarization(fedata))
return addspace(self.createTunerSystem(fedata)) + tmp + addspace(self.createSymbolRate(fedata, feraw)) + addspace(self.createFEC(fedata, feraw)) \
+ addspace(self.createModulation(fedata)) + addspace(self.createOrbPos(feraw))
def createFrequency(self, feraw):
frequency = feraw.get("frequency")
if frequency:
if "DVB-T" in feraw.get("tuner_type"):
return str(int(frequency / 1000000. + 0.5))
else:
return str(int(frequency / 1000 + 0.5))
return ""
def createChannelNumber(self, fedata, feraw):
return "DVB-T" in feraw.get("tuner_type") and fedata.get("channel") or ""
def createSymbolRate(self, fedata, feraw):
if "DVB-T" in feraw.get("tuner_type"):
bandwidth = fedata.get("bandwidth")
if bandwidth:
return bandwidth
else:
symbolrate = fedata.get("symbol_rate")
if symbolrate:
return str(symbolrate / 1000)
return ""
def createPolarization(self, fedata):
return fedata.get("polarization_abbreviation") or ""
def createFEC(self, fedata, feraw):
if "DVB-T" in feraw.get("tuner_type"):
code_rate_lp = fedata.get("code_rate_lp")
code_rate_hp = fedata.get("code_rate_hp")
if code_rate_lp and code_rate_hp:
return code_rate_lp + "-" + code_rate_hp
else:
fec = fedata.get("fec_inner")
if fec:
return fec
return ""
def createModulation(self, fedata):
if fedata.get("tuner_type") == _("Terrestrial"):
constellation = fedata.get("constellation")
if constellation:
return constellation
else:
modulation = fedata.get("modulation")
if modulation:
return modulation
return ""
def createTunerType(self, feraw):
return feraw.get("tuner_type") or ""
def createTunerSystem(self, fedata):
return fedata.get("system") or ""
def createOrbPos(self, feraw):
orbpos = feraw.get("orbital_position")
if orbpos > 1800:
return str((float(3600 - orbpos)) / 10.0) + "\xc2\xb0 W"
elif orbpos > 0:
return str((float(orbpos)) / 10.0) + "\xc2\xb0 E"
return ""
def createOrbPosOrTunerSystem(self, fedata,feraw):
orbpos = self.createOrbPos(feraw)
if orbpos is not "":
return orbpos
return self.createTunerSystem(fedata)
def createProviderName(self, info):
return info.getInfoString(iServiceInformation.sProvider)
@cached
def getText(self):
service = self.source.service
if service is None:
return ""
info = service and service.info()
if not info:
return ""
if self.type == "CryptoInfo":
self.getCryptoInfo(info)
if config.usage.show_cryptoinfo.value:
return addspace(self.createCryptoBar(info)) + self.createCryptoSpecial(info)
else:
return addspace(self.createCryptoBar(info)) + addspace(self.current_source) + self.createCryptoSpecial(info)
if self.type == "CryptoBar":
self.getCryptoInfo(info)
return self.createCryptoBar(info)
if self.type == "CryptoSpecial":
self.getCryptoInfo(info)
return self.createCryptoSpecial(info)
if self.type == "ResolutionString":
return self.createResolution(info)
if self.type == "VideoCodec":
return self.createVideoCodec(info)
if self.updateFEdata:
feinfo = service.frontendInfo()
if feinfo:
self.feraw = feinfo.getAll(config.usage.infobar_frontend_source.value == "settings")
if self.feraw:
self.fedata = ConvertToHumanReadable(self.feraw)
feraw = self.feraw
if not feraw:
feraw = info.getInfoObject(iServiceInformation.sTransponderData)
fedata = ConvertToHumanReadable(feraw)
else:
fedata = self.fedata
if self.type == "All":
self.getCryptoInfo(info)
if config.usage.show_cryptoinfo.value:
return addspace(self.createProviderName(info)) + self.createTransponderInfo(fedata, feraw) + "\n" \
+ addspace(self.createCryptoBar(info)) + addspace(self.createCryptoSpecial(info)) + "\n" \
+ addspace(self.createPIDInfo(info)) + addspace(self.createVideoCodec(info)) + self.createResolution(info)
else:
return addspace(self.createProviderName(info)) + self.createTransponderInfo(fedata, feraw) + "\n" \
+ addspace(self.createCryptoBar(info)) + self.current_source + "\n" \
+ addspace(self.createCryptoSpecial(info)) + addspace(self.createVideoCodec(info)) + self.createResolution(info)
if self.type == "PIDInfo":
return self.createPIDInfo(info)
if not feraw:
return ""
if self.type == "ServiceInfo":
return addspace(self.createProviderName(info)) + addspace(self.createTunerSystem(fedata)) + addspace(self.createFrequency(feraw)) + addspace(self.createPolarization(fedata)) \
+ addspace(self.createSymbolRate(fedata, feraw)) + addspace(self.createFEC(fedata, feraw)) + addspace(self.createModulation(fedata)) + addspace(self.createOrbPos(feraw)) \
+ addspace(self.createVideoCodec(info)) + self.createResolution(info)
if self.type == "TransponderInfo":
return self.createTransponderInfo(fedata, feraw)
if self.type == "TransponderFrequency":
return self.createFrequency(feraw)
if self.type == "TransponderSymbolRate":
return self.createSymbolRate(fedata, feraw)
if self.type == "TransponderPolarization":
return self.createPolarization(fedata)
if self.type == "TransponderFEC":
return self.createFEC(fedata, feraw)
if self.type == "TransponderModulation":
return self.createModulation(fedata)
if self.type == "OrbitalPosition":
return self.createOrbPos(feraw)
if self.type == "TunerType":
return self.createTunerType(feraw)
if self.type == "TunerSystem":
return self.createTunerSystem(fedata)
if self.type == "OrbitalPositionOrTunerSystem":
return self.createOrbPosOrTunerSystem(fedata,feraw)
if self.type == "TerrestrialChannelNumber":
return self.createChannelNumber(fedata, feraw)
return _("invalid type")
text = property(getText)
@cached
def getBool(self):
service = self.source.service
info = service and service.info()
if not info:
return False
request_caid = None
for x in self.ca_table:
if x[0] == self.type:
request_caid = x[1]
request_selected = x[2]
break
if request_caid is None:
return False
if info.getInfo(iServiceInformation.sIsCrypted) != 1:
return False
data = self.ecmdata.getEcmData()
if data is None:
return False
current_caid = data[1]
available_caids = info.getInfoObject(iServiceInformation.sCAIDs)
for caid_entry in self.caid_data:
if caid_entry[3] == request_caid:
if request_selected:
if int(caid_entry[0], 16) <= int(current_caid, 16) <= int(caid_entry[1], 16):
return True
else: # request available
try:
for caid in available_caids:
if int(caid_entry[0], 16) <= caid <= int(caid_entry[1], 16):
return True
except:
pass
return False
boolean = property(getBool)
def changed(self, what):
if what[0] == self.CHANGED_SPECIFIC:
self.updateFEdata = False
if what[1] == iPlayableService.evNewProgramInfo:
self.updateFEdata = True
if what[1] == iPlayableService.evEnd:
self.feraw = self.fedata = None
Converter.changed(self, what)
elif what[0] == self.CHANGED_POLL and self.updateFEdata is not None:
self.updateFEdata = False
Converter.changed(self, what)
|
Taapat/enigma2-openpli-vuplus
|
lib/python/Components/Converter/PliExtraInfo.py
|
Python
|
gpl-2.0
| 12,757 | 0.031199 |
# -*- coding: utf-8; -*-
#
# Licensed to CRATE Technology GmbH ("Crate") under one or more contributor
# license agreements. See the NOTICE file distributed with this work for
# additional information regarding copyright ownership. Crate licenses
# this file to you under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License. You may
# obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# However, if you have executed another commercial license agreement
# with Crate these terms will supersede the license and you may use the
# software solely pursuant to the terms of the relevant commercial agreement.
from unittest import TestCase
from unittest.mock import patch, MagicMock
import sqlalchemy as sa
from sqlalchemy.orm import Session
from sqlalchemy.ext.declarative import declarative_base
from crate.client.cursor import Cursor
fake_cursor = MagicMock(name='fake_cursor')
FakeCursor = MagicMock(name='FakeCursor', spec=Cursor)
FakeCursor.return_value = fake_cursor
class SqlAlchemyBulkTest(TestCase):
def setUp(self):
self.engine = sa.create_engine('crate://')
Base = declarative_base(bind=self.engine)
class Character(Base):
__tablename__ = 'characters'
name = sa.Column(sa.String, primary_key=True)
age = sa.Column(sa.Integer)
self.character = Character
self.session = Session()
@patch('crate.client.connection.Cursor', FakeCursor)
def test_bulk_save(self):
chars = [
self.character(name='Arthur', age=35),
self.character(name='Banshee', age=26),
self.character(name='Callisto', age=37),
]
fake_cursor.description = ()
fake_cursor.rowcount = len(chars)
fake_cursor.executemany.return_value = [
{'rowcount': 1},
{'rowcount': 1},
{'rowcount': 1},
]
self.session.bulk_save_objects(chars)
(stmt, bulk_args), _kwargs = fake_cursor.executemany.call_args
expected_stmt = "INSERT INTO characters (name, age) VALUES (?, ?)"
self.assertEqual(expected_stmt, stmt)
expected_bulk_args = (
('Arthur', 35),
('Banshee', 26),
('Callisto', 37)
)
self.assertEqual(expected_bulk_args, bulk_args)
|
crate/crate-python
|
src/crate/client/sqlalchemy/tests/bulk_test.py
|
Python
|
apache-2.0
| 2,714 | 0 |
def is_lazy_user(user):
""" Return True if the passed user is a lazy user. """
# Anonymous users are not lazy.
if user.is_anonymous:
return False
# Check the user backend. If the lazy signup backend
# authenticated them, then the user is lazy.
backend = getattr(user, 'backend', None)
if backend == 'lazysignup.backends.LazySignupBackend':
return True
# Otherwise, we have to fall back to checking the database.
from lazysignup.models import LazyUser
return bool(LazyUser.objects.filter(user=user).count() > 0)
|
danfairs/django-lazysignup
|
lazysignup/utils.py
|
Python
|
bsd-3-clause
| 570 | 0 |
from djpcms import test
from djpcms.core.exceptions import AlreadyRegistered
import djpcms
class TestSites(test.TestCase):
def testMake(self):
self.assertRaises(AlreadyRegistered,djpcms.MakeSite,__file__)
site = djpcms.MakeSite(__file__, route = '/extra/')
self.assertEqual(site.route,'/extra/')
def testClenUrl(self):
p = self.makepage(bit = 'test')
self.assertEqual(p.url,'/test/')
res = self.get('/test', status = 302, response = True)
self.assertEqual(res['location'],'http://testserver/test/')
res = self.get('/test////', status = 302, response = True)
self.assertEqual(res['location'],'http://testserver/test/')
|
strogo/djpcms
|
tests/regression/routes/tests.py
|
Python
|
bsd-3-clause
| 741 | 0.026991 |
# -*- coding: utf-8 -*-
# Copyright(C) 2012 Romain Bignon
#
# This file is part of weboob.
#
# weboob is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# weboob is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with weboob. If not, see <http://www.gnu.org/licenses/>.
from decimal import Decimal
from weboob.deprecated.browser import Page
from weboob.capabilities import NotAvailable
from weboob.capabilities.pricecomparison import Product, Shop, Price
class IndexPage(Page):
def get_token(self):
input = self.parser.select(self.document.getroot(), 'div#localisation input#recherche_recherchertype__token', 1)
return input.attrib['value']
def iter_products(self):
for li in self.parser.select(self.document.getroot(), 'div#choix_carbu ul li'):
input = li.find('input')
label = li.find('label')
product = Product(input.attrib['value'])
product.name = unicode(label.text.strip())
if '&' in product.name:
# "E10 & SP95" produces a non-supported table.
continue
yield product
class ComparisonResultsPage(Page):
def get_product_name(self):
th = self.document.getroot().cssselect('table#tab_resultat tr th')
if th and len(th) == 9:
return u'%s' % th[5].find('a').text
def iter_results(self, product=None):
price = None
product.name = self.get_product_name()
for tr in self.document.getroot().cssselect('table#tab_resultat tr'):
tds = self.parser.select(tr, 'td')
if tds and len(tds) == 9 and product is not None:
price = Price('%s.%s' % (product.id, tr.attrib['id']))
price.product = product
price.cost = Decimal(tds[5].text.replace(',', '.'))
price.currency = u'€'
shop = Shop(price.id)
shop.name = unicode(tds[3].text.strip())
shop.location = unicode(tds[2].text.strip())
price.shop = shop
price.set_empty_fields(NotAvailable)
yield price
class ShopInfoPage(Page):
def get_info(self):
return self.parser.tostring(self.parser.select(self.document.getroot(), 'div.infos', 1))
|
laurent-george/weboob
|
modules/prixcarburants/pages.py
|
Python
|
agpl-3.0
| 2,744 | 0.001094 |
# Copyright 2012 the rootpy developers
# distributed under the terms of the GNU General Public License
"""
This module supports monitoring TObject deletions.
.. warning::
This is not recommended for production
"""
from __future__ import absolute_import
from weakref import ref
import ctypes
from ctypes import CFUNCTYPE, py_object, addressof, c_int
from .. import compiled as C
from .. import QROOT, log
from ..utils.cinterface import callback, objectproxy_realaddress
__all__ = [
'monitor_deletion',
'monitor_object_deletion',
]
def monitor_deletion():
"""
Function for checking for correct deletion of weakref-able objects.
Example usage::
monitor, is_alive = monitor_deletion()
obj = set()
monitor(obj, "obj")
assert is_alive("obj") # True because there is a ref to `obj` is_alive
del obj
assert not is_alive("obj") # True because there `obj` is deleted
"""
monitors = {}
def set_deleted(x):
def _(weakref):
del monitors[x]
return _
def monitor(item, name):
monitors[name] = ref(item, set_deleted(name))
def is_alive(name):
return monitors.get(name, None) is not None
return monitor, is_alive
cleanuplog = log["memory.cleanup"]
cleanuplog.show_stack()
# Add python to the include path
C.add_python_includepath()
C.register_code("""
#ifndef __CINT__
#include <Python.h>
#endif
#include <TObject.h>
#include <TPython.h>
class RootpyObjectCleanup : public TObject {
public:
typedef void (*CleanupCallback)(PyObject*);
CleanupCallback _callback;
RootpyObjectCleanup(CleanupCallback callback) : _callback(callback) {}
virtual void RecursiveRemove(TObject* object) {
// When arriving here, object->ClassName() will _always_ be TObject
// since we're called by ~TObject, and virtual method calls don't
// work as expected from there.
PyObject* o = TPython::ObjectProxy_FromVoidPtr(object, "TObject");
PyGILState_STATE gstate;
gstate = PyGILState_Ensure();
PyObject *ptype, *pvalue, *ptraceback;
PyErr_Fetch(&ptype, &pvalue, &ptraceback);
_callback(o);
PyErr_Restore(ptype, pvalue, ptraceback);
PyGILState_Release(gstate);
}
ClassDef(RootpyObjectCleanup, 0);
};
ClassImp(RootpyObjectCleanup);
""", ["RootpyObjectCleanup"])
MONITORED = {}
@CFUNCTYPE(None, py_object)
def on_cleanup(tobject):
# Note, when we arrive here, tobject is in its ~TObject, and hence the
# subclass part of the object doesn't exist, in some sense. Hence why we
# store information about the object on the MONITORED dict.
addr = objectproxy_realaddress(tobject)
if addr in MONITORED:
args = MONITORED[addr]
fn, args = args[0], args[1:]
fn(tobject, *args)
del MONITORED[addr]
initialized = False
def init():
global initialized
if initialized: return
initialized = True
cleanup = C.RootpyObjectCleanup(callback(on_cleanup))
cleanups = QROOT.gROOT.GetListOfCleanups()
cleanups.Add(cleanup)
import atexit
@atexit.register
def exit():
# Needed to ensure we don't get called after ROOT has gone away
cleanups.RecursiveRemove(cleanup)
def monitor_object_deletion(o, fn=lambda *args: None):
init()
# Required so that GetListOfCleanups().RecursiveRemove() is called.
o.SetBit(o.kMustCleanup)
args = fn, type(o).__name__, o.GetName(), o.GetTitle(), repr(o)
MONITORED[objectproxy_realaddress(o)] = args
|
qbuat/rootpy
|
rootpy/memory/deletion.py
|
Python
|
gpl-3.0
| 3,682 | 0.000543 |
# -*- coding: utf-8 -*-
from flask import Flask, jsonify, request, abort, make_response
from futu_server_api import *
from db import save_update_token
from db import delete_tokens
from db import list_cards
import logging
import logging.config
import json
app = Flask(__name__)
logging.config.fileConfig('./conf/log.ini')
no_db_logger = logging.getLogger()
def check_parameters(pjson):
if not pjson or not 'app_account' in pjson or not 'card' in pjson or not 'appid' in pjson:
no_db_logger.info('No Parameter')
abort(400)
cli = {'account':pjson['app_account'], 'card':pjson['card'], 'appid':pjson['appid']}
return client(cli['account'], cli['card'], cli['appid'])
def log_handler(myjson, mytitle):
if 'ClientWarning' in myjson:
return '%s' % myjson['ClientWarning']
elif myjson['result_code'] == 0:
return 'SUCCESS'
else:
return 'FAIL ,REASON OF FAILURE:%s ,PARAMETER:%s' % (myjson['error_msg'], request.json)
@app.route('/')
def hello_world():
no_db_logger.info('server start#####')
return 'hello 22222222 world!'
@app.route('/api/v1/tradetoken', methods=['POST'])
def trade_token():
trade_pswd = request.json['trade_pswd']
account = request.json['app_account']
card = request.json['card']
appid = request.json['appid']
cc = check_parameters(request.json)
message = cc.get_trade_token(trade_pswd)
if message['result_code'] != 0 and message['error_msg'] == 'didn\'t get accesstoken':
no_db_logger.info('didn\'t get accesstoken')
return json.dumps({'result_code':2,'error_msg':'didn\'t get accesstoken'}, ensure_ascii=False)
if message['result_code'] == 0:
token = message['data']['trade_token']
save_update_token(account, appid, None, token, card, True)
return jsonify(**message)
@app.route('/api/v1/account', methods=['POST'])
def get_account_detail():
cc = check_parameters(request.json)
message = cc.get_account_detail()
logtext = log_handler(message, '获取账户信息')
no_db_logger.info(logtext)
return json.dumps(message, ensure_ascii=False)
@app.route('/api/v1/account/cash', methods=['POST'])
def get_account_cash():
cc = check_parameters(request.json)
message = cc.get_account_cash()
logtext = log_handler(message, '获取账户现金')
no_db_logger.info(logtext)
return json.dumps(message, ensure_ascii=False)
@app.route('/api/v1/account/portfolio', methods=['POST'])
def get_account_portfolio():
cc = check_parameters(request.json)
message = cc.get_account_portfolio()
logtext = log_handler(message, '获取账户持仓')
no_db_logger.info(logtext)
return json.dumps(message, ensure_ascii=False)
@app.route('/api/v1/get_list_orders', methods=['POST'])
def get_list_orders():
date_begin = request.json['date_begin']
date_end = request.json['date_end']
cc = check_parameters(request.json)
message = cc.get_list_orders()
logtext = log_handler(message, '获取订单列表')
no_db_logger.info(logtext)
return json.dumps(message, ensure_ascii=False)
@app.route('/api/v1/get_list_trades', methods=['POST'])
def get_list_trades():
cc = check_parameters(request.json)
message = cc.get_list_trades()
logtext = log_handler(message, '获取交易列表')
no_db_logger.info(logtext)
return json.dumps(message, ensure_ascii=False)
@app.route('/api/v1/place_order', methods=['POST'])
def place_order():
code = request.json['code']
quantity = request.json['quantity']
price = request.json['price']
side = request.json['side']
ltype = request.json['type']
cc = check_parameters(request.json)
message = cc.place_order(code, quantity, price, side, ltype)
logtext = log_handler(message, '下单')
no_db_logger.info(logtext)
return json.dumps(message, ensure_ascii=False)
@app.route('/api/v1/change_order', methods=['POST'])
def change_order():
order_id = request.json['order_id']
quantity = request.json['quantity']
price = request.json['price']
cc = check_parameters(request.json)
message = cc.change_order(order_id, quantity, price)
logtext = log_handler(message, '改单')
no_db_logger.info(logtext)
return json.dumps(message, ensure_ascii=False)
@app.route('/api/v1/cancle_order', methods=['POST'])
def cancle_order():
order_id = request.json['order_id']
cc = check_parameters(request.json)
message = cc.cancel_order(order_id)
logtext = log_handler(message, '撤单')
no_db_logger.info(logtext)
return json.dumps(message, ensure_ascii=False)
@app.route('/ap1/v1/save_token', methods=['POST'])
def save_token():
account = request.json['app_account']
appid = request.json['appid']
market = request.json['market']
token = request.json['token']
card = request.json['card']
card_desc = request.json['text']
DB_result = save_update_token(account, appid, market, token, card, False, card_desc)
if DB_result == 'success':
no_db_logger.info('token save success')
return json.dumps({'result_code':0,'error_msg':''}, ensure_ascii=False)
else:
no_db_logger.info('token save fail')
return json.dumps({'result_code':1,'error_msg':'token保存失败'}, ensure_ascii=False)
@app.route('/api/v1/delete_token', methods=['POST'])
def delete_token():
appid = request.json['appid']
account = request.json['app_account']
DB_result = delete_tokens(account, appid)
if DB_result == 'success':
no_db_logger.info('token delete success')
return json.dumps({'result_code':0,'error_msg':''}, ensure_ascii=False)
else:
no_db_logger.info('token delete fail')
return json.dumps({'result_code':1,'error_msg':'token删除失败'}, ensure_ascii=False)
@app.route('/api/v1/list_card', methods=['POST'])
def list_card():
appid = request.json['appid']
account = request.json['app_account']
cards = list_cards(account, appid)
message = dict(cards=cards)
if isinstance(cards, list):
no_db_logger.info('list cards success')
return json.dumps({'result_code':0,'error_msg':'','data':message}, ensure_ascii=False)
else:
no_db_logger.info('list cards fail')
return json.dumps({'result_code':1,'error_msg':'查询账户卡号失败'}, ensure_ascii=False)
if __name__ == '__main__':
app.run()
|
zznn/futu-openAPI
|
app/mainapp.py
|
Python
|
apache-2.0
| 6,014 | 0.02758 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Modulos
import sys
import pygame
from pygame.locals import *
# Constantes
venx = 640
veny = 448
# Clases
class Pieza(pygame.sprite.Sprite): # 64x64 px tamaño
def __init__(self, tipo):
pygame.sprite.Sprite.__init__(self)
if tipo == 0:
self.image = load_image("tablero.png", True)
elif tipo == 1:
self.image = load_image("laser.png", True)
elif tipo == 2:
self.image = load_image("diana.png", True)
elif tipo == 3:
self.image = load_image("diana_espejo.png", True)
elif tipo == 4:
self.image = load_image("espejo.png", True)
elif tipo == 5:
self.image = load_image("espejotraves.png", True)
elif tipo == 6:
self.image = load_image("tunel.png", True)
elif tipo == 7:
self.image = load_image("bloqueo.png", True)
elif tipo == 8:
self.image = load_image("bloqueo_g.png", True)
elif tipo == 9:
self.image = load_image("portal.png", True)
else:
tipo = 0
self.image = load_image("tablero.png", True)
# Funciones
def load_image(filename, transparent=False):
try:
image = pygame.image.load(filename)
except pygame.error:
raise SystemExit
image = image.convert()
if transparent:
color = image.get_at((0, 0))
image.set_colorkey(color, RLEACCEL)
return image
#------------------------------------------
def main():
screen = pygame.display.set_mode((venx, veny))
pygame.display.set_caption("Laser Game")
background_image = load_image('fondo.png')
bola = Bola()
while True:
for eventos in pygame.event.get():
if eventos.type == QUIT:
sys.exit(0)
screen.blit(background_image, (0, 0))
screen.blit(bola.image, bola.rect)
pygame.display.flip()
return 0
if __name__ == '__main__':
pygame.init()
main()
|
LordSprit/Laser
|
main.py
|
Python
|
gpl-2.0
| 2,036 | 0.000983 |
from Chip import OpCodeDefinitions
from Tests.OpCodeTests.OpCodeTestBase import OpCodeTestBase
class TestNopOpCode(OpCodeTestBase):
def test_nop_implied_command_calls_nop_method(self):
self.assert_opcode_execution(OpCodeDefinitions.nop_implied_command, self.target.get_nop_command_executed)
|
jeroanan/Nes2
|
Tests/OpCodeTests/TestNopOpCode.py
|
Python
|
bsd-3-clause
| 306 | 0.003268 |
# Copyright (c) 2012 Mitch Garnaat http://garnaat.org/
# Copyright (c) 2012 Amazon.com, Inc. or its affiliates.
# All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
"""
Check that all of the certs on all service endpoints validate.
"""
import unittest
import boto.dynamodb2
class CertVerificationTest(unittest.TestCase):
dynamodb2 = True
ssl = True
def test_certs(self):
for region in boto.dynamodb2.regions():
c = region.connect()
c.list_tables()
|
lochiiconnectivity/boto
|
tests/integration/dynamodb2/test_cert_verification.py
|
Python
|
mit
| 1,511 | 0 |
import operator
import os
import shutil
import tempfile
import unittest
import warnings
import tables
from mock import Mock
from numpy import array
from numpy.testing import assert_array_equal
from sapphire.analysis import process_events
TEST_DATA_FILE = 'test_data/process_events.h5'
DATA_GROUP = '/s501'
class ProcessEventsTests(unittest.TestCase):
def setUp(self):
warnings.filterwarnings('ignore')
self.data_path = self.create_tempfile_from_testdata()
self.data = tables.open_file(self.data_path, 'a')
self.proc = process_events.ProcessEvents(self.data, DATA_GROUP, progress=False)
def tearDown(self):
warnings.resetwarnings()
self.data.close()
os.remove(self.data_path)
def test_get_traces_for_event(self):
event = self.proc.source[0]
self.assertEqual(self.proc.get_traces_for_event(event)[12][3], 1334)
def test__find_unique_row_ids(self):
ext_timestamps = self.proc.source.col('ext_timestamp')
enumerated_timestamps = list(enumerate(ext_timestamps))
enumerated_timestamps.sort(key=operator.itemgetter(1))
ids_in = [id for id, _ in enumerated_timestamps]
ids = self.proc._find_unique_row_ids(enumerated_timestamps)
self.assertEqual(ids, ids_in)
enumerated_timestamps = [(0, 1), (1, 1), (3, 2), (2, 2)]
ids = self.proc._find_unique_row_ids(enumerated_timestamps)
self.assertEqual(ids, [0, 3])
# Must be sorted by timestamp or the result will be differenct.
enumerated_timestamps = [(0, 1), (3, 2), (1, 1), (2, 2)]
ids = self.proc._find_unique_row_ids(enumerated_timestamps)
self.assertNotEqual(ids, [0, 3])
def test__reconstruct_time_from_traces(self):
event = self.proc.source[10]
times = self.proc._reconstruct_time_from_traces(event)
self.assertEqual(times[0], 162.5)
self.assertEqual(times[2], -999)
event['pulseheights'][0] = -1
times = self.proc._reconstruct_time_from_traces(event)
self.assertEqual(times[0], -1)
def test__reconstruct_time_from_trace(self):
trace = [220, 222, 224, 222, 220]
self.assertEqual(self.proc._reconstruct_time_from_trace(trace, 200), 0)
self.assertEqual(self.proc._reconstruct_time_from_trace(trace, 203), 2)
self.assertEqual(self.proc._reconstruct_time_from_trace(trace, 205), -999)
def test_first_above_threshold(self):
trace = [0, 2, 4, 2, 0]
self.assertEqual(self.proc.first_above_threshold(trace, 1), 1)
self.assertEqual(self.proc.first_above_threshold(trace, 3), 2)
self.assertEqual(self.proc.first_above_threshold(trace, 4), 2)
self.assertEqual(self.proc.first_above_threshold(trace, 5), -999)
# @patch.object(process_events.FindMostProbableValueInSpectrum, 'find_mpv')
def test__process_pulseintegrals(self):
self.proc.limit = 1
# mock_find_mpv.return_value = (-999, False)
# Because of small data sample fit fails for detector 1
self.assertEqual(self.proc._process_pulseintegrals()[0][1], -999.)
self.assertAlmostEqual(self.proc._process_pulseintegrals()[0][3], 3.98951741969)
self.proc.limit = None
def create_tempfile_from_testdata(self):
tmp_path = self.create_tempfile_path()
data_path = self.get_testdata_path()
shutil.copyfile(data_path, tmp_path)
return tmp_path
def create_tempfile_path(self):
fd, path = tempfile.mkstemp('.h5')
os.close(fd)
return path
def get_testdata_path(self):
dir_path = os.path.dirname(__file__)
return os.path.join(dir_path, TEST_DATA_FILE)
class ProcessIndexedEventsTests(ProcessEventsTests):
def setUp(self):
warnings.filterwarnings('ignore')
self.data_path = self.create_tempfile_from_testdata()
self.data = tables.open_file(self.data_path, 'a')
self.proc = process_events.ProcessIndexedEvents(self.data, DATA_GROUP, [0, 10], progress=False)
def test_process_traces(self):
timings = self.proc.process_traces()
self.assertEqual(timings[1][0], 162.5)
self.assertEqual(timings[1][1], -999)
def test_get_traces_for_indexed_event_index(self):
self.assertEqual(self.proc.get_traces_for_indexed_event_index(0)[12][3], 1334)
class ProcessEventsWithLINTTests(ProcessEventsTests):
def setUp(self):
warnings.filterwarnings('ignore')
self.data_path = self.create_tempfile_from_testdata()
self.data = tables.open_file(self.data_path, 'a')
self.proc = process_events.ProcessEventsWithLINT(self.data, DATA_GROUP, progress=False)
def test__reconstruct_time_from_traces(self):
event = self.proc.source[10]
times = self.proc._reconstruct_time_from_traces(event)
self.assertAlmostEqual(times[0], 160.685483871)
self.assertEqual(times[2], -999)
def test__reconstruct_time_from_trace(self):
trace = [200, 220]
self.assertEqual(self.proc._reconstruct_time_from_trace(trace, 180), 0)
self.assertEqual(self.proc._reconstruct_time_from_trace(trace, 190), 0.5)
self.assertEqual(self.proc._reconstruct_time_from_trace(trace, 200), 1)
self.assertEqual(self.proc._reconstruct_time_from_trace(trace, 210), -999)
class ProcessEventsWithTriggerOffsetTests(ProcessEventsTests):
def setUp(self):
warnings.filterwarnings('ignore')
self.data_path = self.create_tempfile_from_testdata()
self.data = tables.open_file(self.data_path, 'a')
self.proc = process_events.ProcessEventsWithTriggerOffset(self.data, DATA_GROUP, progress=False)
def test__reconstruct_time_from_traces(self):
event = self.proc.source[10]
times = self.proc._reconstruct_time_from_traces(event)
self.assertEqual(times[0], 162.5)
self.assertEqual(times[2], -999)
self.assertEqual(times[4], 165)
def test__reconstruct_time_from_traces_with_external(self):
self.proc.trigger = [0, 0, 0, 1]
event = self.proc.source[10]
times = self.proc._reconstruct_time_from_traces(event)
self.assertEqual(times[0], 162.5)
self.assertEqual(times[2], -999)
self.assertEqual(times[4], -999)
def test__first_above_thresholds(self):
# 2 detectors
self.assertEqual(self.proc._first_above_thresholds((x for x in [200, 200, 900]), [300, 400], 900), [2, 2, -999])
self.assertEqual(self.proc._first_above_thresholds((x for x in [200, 200, 400]), [300, 400], 400), [2, 2, -999])
self.assertEqual(self.proc._first_above_thresholds((x for x in [200, 350, 450, 550]), [300, 400], 550), [1, 2, -999])
# 4 detectors
self.assertEqual(self.proc._first_above_thresholds((x for x in [200, 200, 900]), [300, 400, 500], 900), [2, 2, 2])
self.assertEqual(self.proc._first_above_thresholds((x for x in [200, 200, 400]), [300, 400, 500], 400), [2, 2, -999])
self.assertEqual(self.proc._first_above_thresholds((x for x in [200, 350, 450, 550]), [300, 400, 500], 550), [1, 2, 3])
# No signal
self.assertEqual(self.proc._first_above_thresholds((x for x in [200, 250, 200, 2000]), [300, 400, 500], 250), [-999, -999, -999])
def test__first_value_above_threshold(self):
trace = [200, 200, 300, 200]
self.assertEqual(self.proc._first_value_above_threshold(trace, 200), (0, 200))
self.assertEqual(self.proc._first_value_above_threshold(trace, 250), (2, 300))
self.assertEqual(self.proc._first_value_above_threshold(trace, 250, 4), (6, 300))
self.assertEqual(self.proc._first_value_above_threshold(trace, 500), (-999, 0))
def test__reconstruct_trigger(self):
self.proc.trigger = (0, 0, False, 0)
low_idx = [-999, -999, -999, -999]
high_idx = [-999, -999, -999, -999]
result = -999
self.assertEqual(self.proc._reconstruct_trigger(low_idx, high_idx), result)
self.proc.trigger = (0, 0, True, 0)
self.assertEqual(self.proc._reconstruct_trigger(low_idx, high_idx), result)
# Standard two detector trigger
self.proc.trigger = (2, 0, False, 0)
self.assertEqual(self.proc._reconstruct_trigger(low_idx, high_idx), result)
high_idx = [-999, -999, 10, -999]
low_idx = [-999, -999, 3, -999]
result = -999
self.assertEqual(self.proc._reconstruct_trigger(low_idx, high_idx), result)
low_idx = [-999, 0, 3, 2]
result = 2
self.assertEqual(self.proc._reconstruct_trigger(low_idx, high_idx), result)
low_idx = [0, 2, 4, -999]
self.assertEqual(self.proc._reconstruct_trigger(low_idx, high_idx), result)
low_idx = [0, 2, 3, -999]
self.assertEqual(self.proc._reconstruct_trigger(low_idx, high_idx), result)
low_idx = [0, 2, -999, -999]
self.assertEqual(self.proc._reconstruct_trigger(low_idx, high_idx), result)
low_idx = [-999, -999, 3, 6]
result = 6
self.assertEqual(self.proc._reconstruct_trigger(low_idx, high_idx), result)
# Standard four detector trigger
self.proc.trigger = (3, 2, True, 0)
low_idx = [-999, -999, -999, -999]
high_idx = [-999, -999, -999, -999]
result = -999
self.assertEqual(self.proc._reconstruct_trigger(low_idx, high_idx), result)
# Trigger on low
low_idx = [7, 4, 1, -999]
high_idx = [-999, -999, -999, -999]
result = 7
self.assertEqual(self.proc._reconstruct_trigger(low_idx, high_idx), result)
high_idx = [8, 5, -999, -999]
self.assertEqual(self.proc._reconstruct_trigger(low_idx, high_idx), result)
high_idx = [8, 9, 2, -999]
self.assertEqual(self.proc._reconstruct_trigger(low_idx, high_idx), result)
# Trigger on high
high_idx = [-999, 5, 2, -999]
result = 5
self.assertEqual(self.proc._reconstruct_trigger(low_idx, high_idx), result)
# Other triggers
self.proc.trigger = (1, 2, False, 0)
low_idx = [1, 3, 5, 7]
high_idx = [2, 4, -999, -999]
result = 5
self.assertEqual(self.proc._reconstruct_trigger(low_idx, high_idx), result)
self.proc.trigger = (3, 0, False, 0)
self.assertEqual(self.proc._reconstruct_trigger(low_idx, high_idx), result)
self.proc.trigger = (0, 2, False, 0)
result = 4
self.assertEqual(self.proc._reconstruct_trigger(low_idx, high_idx), result)
self.proc.trigger = (0, 4, False, 0)
result = -999
self.assertEqual(self.proc._reconstruct_trigger(low_idx, high_idx), result)
self.proc.trigger = (1, 3, False, 0)
self.assertEqual(self.proc._reconstruct_trigger(low_idx, high_idx), result)
class ProcessEventsFromSourceTests(ProcessEventsTests):
def setUp(self):
warnings.filterwarnings('ignore')
self.source_path = self.create_tempfile_from_testdata()
self.source_data = tables.open_file(self.source_path, 'r')
self.dest_path = self.create_tempfile_path()
self.dest_data = tables.open_file(self.dest_path, 'a')
self.proc = process_events.ProcessEventsFromSource(
self.source_data, self.dest_data, DATA_GROUP, DATA_GROUP)
def tearDown(self):
warnings.resetwarnings()
self.source_data.close()
os.remove(self.source_path)
self.dest_data.close()
os.remove(self.dest_path)
def test_process_and_store_results(self):
self.proc.process_and_store_results()
class ProcessEventsFromSourceWithTriggerOffsetTests(ProcessEventsFromSourceTests,
ProcessEventsWithTriggerOffsetTests):
def setUp(self):
warnings.filterwarnings('ignore')
self.source_path = self.create_tempfile_from_testdata()
self.source_data = tables.open_file(self.source_path, 'r')
self.dest_path = self.create_tempfile_path()
self.dest_data = tables.open_file(self.dest_path, 'a')
self.proc = process_events.ProcessEventsFromSourceWithTriggerOffset(
self.source_data, self.dest_data, DATA_GROUP, DATA_GROUP)
class ProcessEventsFromSourceWithTriggerOffsetStationTests(ProcessEventsFromSourceTests,
ProcessEventsWithTriggerOffsetTests):
def setUp(self):
warnings.filterwarnings('ignore')
self.source_path = self.create_tempfile_from_testdata()
self.source_data = tables.open_file(self.source_path, 'r')
self.dest_path = self.create_tempfile_path()
self.dest_data = tables.open_file(self.dest_path, 'a')
self.proc = process_events.ProcessEventsFromSourceWithTriggerOffset(
self.source_data, self.dest_data, DATA_GROUP, DATA_GROUP,
station=501)
def test__reconstruct_time_from_traces_with_external(self):
mock_trigger = Mock()
mock_trigger.return_value = ([(process_events.ADC_LOW_THRESHOLD,
process_events.ADC_HIGH_THRESHOLD)] * 4,
[0, 0, 0, 1])
self.proc.station.trigger = mock_trigger
event = self.proc.source[10]
times = self.proc._reconstruct_time_from_traces(event)
self.assertEqual(times[0], 162.5)
self.assertEqual(times[2], -999)
self.assertEqual(times[4], -999)
class ProcessSinglesTests(unittest.TestCase):
def setUp(self):
warnings.filterwarnings('ignore')
self.data_path = self.create_tempfile_from_testdata()
self.data = tables.open_file(self.data_path, 'a')
self.proc = process_events.ProcessSingles(self.data, DATA_GROUP,
progress=False)
def tearDown(self):
warnings.resetwarnings()
self.data.close()
os.remove(self.data_path)
def test_process_and_store_results(self):
self.proc.process_and_store_results()
# check for unique and sorted timestamps
singles_table = self.data.get_node(DATA_GROUP, 'singles')
ts = singles_table.col('timestamp')
unique_ts = array(sorted(set(ts)))
assert_array_equal(ts, unique_ts)
def create_tempfile_from_testdata(self):
tmp_path = self.create_tempfile_path()
data_path = self.get_testdata_path()
shutil.copyfile(data_path, tmp_path)
return tmp_path
def create_tempfile_path(self):
fd, path = tempfile.mkstemp('.h5')
os.close(fd)
return path
def get_testdata_path(self):
dir_path = os.path.dirname(__file__)
return os.path.join(dir_path, TEST_DATA_FILE)
class ProcessSinglesFromSourceTests(ProcessSinglesTests):
def setUp(self):
warnings.filterwarnings('ignore')
self.source_path = self.create_tempfile_from_testdata()
self.source_data = tables.open_file(self.source_path, 'r')
self.dest_path = self.create_tempfile_path()
self.dest_data = tables.open_file(self.dest_path, 'a')
self.proc = process_events.ProcessSinglesFromSource(
self.source_data, self.dest_data, DATA_GROUP, '/')
def tearDown(self):
warnings.resetwarnings()
self.source_data.close()
os.remove(self.source_path)
self.dest_data.close()
os.remove(self.dest_path)
def test_process_and_store_results(self):
self.proc.process_and_store_results()
# check for unique and sorted timestamps
singles_table = self.dest_data.get_node('/', 'singles')
ts = singles_table.col('timestamp')
unique_ts = array(sorted(set(ts)))
assert_array_equal(ts, unique_ts)
if __name__ == '__main__':
unittest.main()
|
HiSPARC/sapphire
|
sapphire/tests/analysis/test_process_events.py
|
Python
|
gpl-3.0
| 15,851 | 0.002713 |
SEFARIA_API_NODE = "https://www.sefaria.org/api/texts/"
CACHE_MONITOR_LOOP_DELAY_IN_SECONDS = 86400
CACHE_LIFETIME_SECONDS = 604800
category_colors = {
"Commentary": "#4871bf",
"Tanakh": "#004e5f",
"Midrash": "#5d956f",
"Mishnah": "#5a99b7",
"Talmud": "#ccb479",
"Halakhah": "#802f3e",
"Kabbalah": "#594176",
"Philosophy": "#7f85a9",
"Liturgy": "#ab4e66",
"Tanaitic": "#00827f",
"Parshanut": "#9ab8cb",
"Chasidut": "#97b386",
"Musar": "#7c406f",
"Responsa": "#cb6158",
"Apocrypha": "#c7a7b4",
"Other": "#073570",
"Quoting Commentary": "#cb6158",
"Sheets": "#7c406f",
"Community": "#7c406f",
"Targum": "#7f85a9",
"Modern Works": "#7c406f",
"Modern Commentary": "#7c406f",
}
platform_settings = {
"twitter": {
"font_size": 29,
"additional_line_spacing_he": 5,
"additional_line_spacing_en": -10,
"image_width": 506,
"image_height": 253,
"margin": 20,
"category_color_line_width": 7,
"sefaria_branding": False,
"branding_height": 0
},
"facebook": {
"font_size": 70,
"additional_line_spacing_he": 12,
"additional_line_spacing_en": -20,
"image_width": 1200,
"image_height": 630,
"margin": 40,
"category_color_line_width": 15,
"sefaria_branding": False,
"branding_height": 0
},
"instagram": {
"font_size": 70,
"additional_line_spacing_he": 12,
"additional_line_spacing_en": 0,
"image_width": 1040,
"image_height": 1040,
"margin": 40,
"category_color_line_width": 13,
"sefaria_branding": True,
"branding_height": 100
}
}
|
nassan/sefaria-embedded
|
constants.py
|
Python
|
gpl-3.0
| 1,909 | 0 |
#!/usr/bin/env python
# encoding: utf-8
"""
models.py
Created by Darcy Liu on 2012-03-03.
Copyright (c) 2012 Close To U. All rights reserved.
"""
from django.db import models
from django.contrib.auth.models import User
# class Setting(models.Model):
# sid = models.AutoField(primary_key=True)
# option = models.CharField(unique=True,max_length=128,verbose_name='Option')
# value = models.CharField(max_length=256,verbose_name='Value')
class Minisite(models.Model):
key = models.AutoField(primary_key=True)
name = models.CharField(max_length=256,verbose_name='name')
slug = models.CharField(unique=True,max_length=128,verbose_name='slug')
meta = models.TextField(blank=True, verbose_name='meta')
description = models.TextField(blank=True, verbose_name='description')
author = models.ForeignKey(User,verbose_name='author')
created = models.DateTimeField(auto_now_add=True,verbose_name='created')
updated = models.DateTimeField(auto_now=True,verbose_name='updated')
def __unicode__(self):
result = self.name
return unicode(result)
class Page(models.Model):
key = models.AutoField(primary_key=True)
name = models.CharField(max_length=256,verbose_name='name')
slug = models.CharField(max_length=128,verbose_name='slug')
#type=//insite standlone
Mode_Choices = (
('0', 'insite'),
('1', 'standlone'),
)
mode = models.CharField(verbose_name='format',max_length=1,default=0,choices=Mode_Choices)
#content-type
mime = models.CharField(max_length=64,default='text/html;charset=utf-8',verbose_name='mime')
#format
Format_Choices = (
('0', 'txt'),
('1', 'html'),
('2', 'markdown'),
('3', 'textile'),
)
format = models.CharField(verbose_name='format',max_length=1,default=0,choices=Format_Choices)
text = models.TextField(blank=True, verbose_name='content')
script = models.TextField(blank=True, verbose_name='script')
style = models.TextField(blank=True, verbose_name='style')
text_html = models.TextField(blank=True, verbose_name='html')
minisite = models.ForeignKey(Minisite,verbose_name='minisite')
author = models.ForeignKey(User,verbose_name='author')
created = models.DateTimeField(auto_now_add=True,verbose_name='created')
updated = models.DateTimeField(auto_now=True,verbose_name='updated')
def __unicode__(self):
result = self.name
return unicode(result)
class Meta:
unique_together = (('slug', 'minisite'),)
|
darcyliu/storyboard
|
home/models.py
|
Python
|
mit
| 2,582 | 0.013555 |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from itertools import combinations
__all__ = [
'network_complement'
]
def network_complement(network, cls=None):
"""Generate the complement network of a network.
The complement of a graph G is the graph H with the same vertices
but whose edges consists of the edges not present in the graph G [1]_.
Parameters
----------
network : :class:`~compas.datastructures.Network`
A network.
Returns
-------
:class:`~compas.datastructures.Network`
The complement network.
References
----------
.. [1] Wolfram MathWorld. *Graph complement*.
Available at: http://mathworld.wolfram.com/GraphComplement.html.
Examples
--------
>>> import compas
>>> from compas.datastructures import Network
>>> from compas.datastructures import network_complement
>>> network = Network.from_obj(compas.get('lines.obj'))
>>> complement = network_complement(network)
>>> any(complement.has_edge(u, v, directed=False) for u, v in network.edges())
False
"""
if not cls:
cls = type(network)
nodes = [network.node_coordinates(key) for key in network.nodes()]
edges = [(u, v) for u, v in combinations(network.nodes(), 2) if not network.has_edge(u, v, directed=False)]
return cls.from_nodes_and_edges(nodes, edges)
|
compas-dev/compas
|
src/compas/datastructures/network/complementarity.py
|
Python
|
mit
| 1,442 | 0.001387 |
"""Inspectors allow you to visually browse through the members of
various python objects. To open an inspector, import this module, and
execute inspector.inspect(anObject) I start IDLE with this command
line: idle.py -c "from inspector import inspect"
so that I can just type: inspect(anObject) any time."""
__all__ = ['inspect', 'inspectorFor', 'Inspector', 'ModuleInspector', 'ClassInspector', 'InstanceInspector', 'FunctionInspector', 'InstanceMethodInspector', 'CodeInspector', 'ComplexInspector', 'DictionaryInspector', 'SequenceInspector', 'SliceInspector', 'InspectorWindow']
from direct.showbase.TkGlobal import *
from Tkinter import *
import Pmw
### public API
def inspect(anObject):
inspector = inspectorFor(anObject)
inspectorWindow = InspectorWindow(inspector)
inspectorWindow.open()
return inspectorWindow
### private
def inspectorFor(anObject):
typeName = type(anObject).__name__.capitalize() + 'Type'
if typeName in _InspectorMap:
inspectorName = _InspectorMap[typeName]
else:
print("Can't find an inspector for " + typeName)
inspectorName = 'Inspector'
inspector = globals()[inspectorName](anObject)
return inspector
### initializing
def initializeInspectorMap():
global _InspectorMap
notFinishedTypes = ['BufferType', 'EllipsisType', 'FrameType', 'TracebackType', 'XRangeType']
_InspectorMap = {
'Builtin_function_or_methodType': 'FunctionInspector',
'BuiltinFunctionType': 'FunctionInspector',
'BuiltinMethodType': 'FunctionInspector',
'ClassType': 'ClassInspector',
'CodeType': 'CodeInspector',
'ComplexType': 'Inspector',
'DictionaryType': 'DictionaryInspector',
'DictType': 'DictionaryInspector',
'FileType': 'Inspector',
'FloatType': 'Inspector',
'FunctionType': 'FunctionInspector',
'Instance methodType': 'InstanceMethodInspector',
'InstanceType': 'InstanceInspector',
'IntType': 'Inspector',
'LambdaType': 'Inspector',
'ListType': 'SequenceInspector',
'LongType': 'Inspector',
'MethodType': 'FunctionInspector',
'ModuleType': 'ModuleInspector',
'NoneType': 'Inspector',
'SliceType': 'SliceInspector',
'StringType': 'SequenceInspector',
'TupleType': 'SequenceInspector',
'TypeType': 'Inspector',
'UnboundMethodType': 'FunctionInspector'}
for each in notFinishedTypes:
_InspectorMap[each] = 'Inspector'
### Classes
class Inspector:
def __init__(self, anObject):
self.object = anObject
self.lastPartNumber = 0
self.initializePartsList()
self.initializePartNames()
def __str__(self):
return __name__ + '(' + str(self.object) + ')'
def initializePartsList(self):
self._partsList = []
keys = self.namedParts()
keys.sort()
for each in keys:
self._partsList.append(each)
#if not callable(getattr(self.object, each)):
# self._partsList.append(each)
def initializePartNames(self):
self._partNames = ['up'] + [str(each) for each in self._partsList]
def title(self):
"Subclasses may override."
return self.objectType().__name__.capitalize()
def getLastPartNumber(self):
return self.lastPartNumber
def selectedPart(self):
return self.partNumber(self.getLastPartNumber())
def namedParts(self):
return dir(self.object)
def stringForPartNumber(self, partNumber):
object = self.partNumber(partNumber)
doc = None
if callable(object):
try:
doc = object.__doc__
except:
pass
if doc:
return (str(object) + '\n' + str(doc))
else:
return str(object)
def partNumber(self, partNumber):
self.lastPartNumber = partNumber
if partNumber == 0:
return self.object
else:
part = self.privatePartNumber(partNumber)
return getattr(self.object, part)
def inspectorFor(self, part):
return inspectorFor(part)
def privatePartNumber(self, partNumber):
return self._partsList[partNumber - 1]
def partNames(self):
return self._partNames
def objectType(self):
return type(self.object)
###
class ModuleInspector(Inspector):
def namedParts(self):
return ['__dict__']
class ClassInspector(Inspector):
def namedParts(self):
return ['__bases__'] + self.object.__dict__.keys()
def title(self):
return self.object.__name__ + ' Class'
class InstanceInspector(Inspector):
def title(self):
return self.object.__class__.__name__
def namedParts(self):
return ['__class__'] + dir(self.object)
###
class FunctionInspector(Inspector):
def title(self):
return self.object.__name__ + "()"
class InstanceMethodInspector(Inspector):
def title(self):
return str(self.object.im_class) + "." + self.object.__name__ + "()"
class CodeInspector(Inspector):
def title(self):
return str(self.object)
###
class ComplexInspector(Inspector):
def namedParts(self):
return ['real', 'imag']
###
class DictionaryInspector(Inspector):
def initializePartsList(self):
Inspector.initializePartsList(self)
keys = self.object.keys()
keys.sort()
for each in keys:
self._partsList.append(each)
def partNumber(self, partNumber):
self.lastPartNumber = partNumber
if partNumber == 0:
return self.object
key = self.privatePartNumber(partNumber)
if key in self.object:
return self.object[key]
else:
return getattr(self.object, key)
class SequenceInspector(Inspector):
def initializePartsList(self):
Inspector.initializePartsList(self)
for each in range(len(self.object)):
self._partsList.append(each)
def partNumber(self, partNumber):
self.lastPartNumber = partNumber
if partNumber == 0:
return self.object
index = self.privatePartNumber(partNumber)
if type(index) == IntType:
return self.object[index]
else:
return getattr(self.object, index)
class SliceInspector(Inspector):
def namedParts(self):
return ['start', 'stop', 'step']
### Initialization
initializeInspectorMap()
class InspectorWindow:
def __init__(self, inspector):
self.inspectors = [inspector]
def topInspector(self):
return self.inspectors[len(self.inspectors) - 1]
def selectedPart(self):
return self.topInspector().selectedPart()
def inspectedObject(self):
return self.topInspector().object
def open(self):
self.top= Toplevel()
self.top.geometry('650x315')
self.createViews()
self.update()
#Private - view construction
def createViews(self):
self.createMenus()
# Paned widget for dividing two halves
self.framePane = Pmw.PanedWidget(self.top, orient = HORIZONTAL)
self.createListWidget()
self.createTextWidgets()
self.framePane.pack(expand = 1, fill = BOTH)
def setTitle(self):
self.top.title('Inspecting: ' + self.topInspector().title())
def createListWidget(self):
listFrame = self.framePane.add('list')
listWidget = self.listWidget = Pmw.ScrolledListBox(
listFrame, vscrollmode = 'static')
listWidget.pack(side=LEFT, fill=BOTH, expand=1)
# If you click in the list box, take focus so you can navigate
# with the cursor keys
listbox = listWidget.component('listbox')
listbox.bind('<ButtonPress-1>',
lambda e, l = listbox: l.focus_set())
listbox.bind('<ButtonRelease-1>', self.listSelectionChanged)
listbox.bind('<Double-Button-1>', self.popOrDive)
listbox.bind('<ButtonPress-3>', self.popupMenu)
listbox.bind('<KeyRelease-Up>', self.listSelectionChanged)
listbox.bind('<KeyRelease-Down>', self.listSelectionChanged)
listbox.bind('<KeyRelease-Left>', lambda e, s = self: s.pop())
listbox.bind('<KeyRelease-Right>', lambda e, s = self: s.dive())
listbox.bind('<Return>', self.popOrDive)
def createTextWidgets(self):
textWidgetsFrame = self.framePane.add('textWidgets')
self.textPane = Pmw.PanedWidget(textWidgetsFrame, orient = VERTICAL)
textFrame = self.textPane.add('text', size = 200)
self.textWidget = Pmw.ScrolledText(
textFrame, vscrollmode = 'static', text_state = 'disabled')
self.textWidget.pack(fill=BOTH, expand=1)
commandFrame = self.textPane.add('command')
self.commandWidget = Pmw.ScrolledText(
commandFrame, vscrollmode = 'static')
self.commandWidget.insert(1.0, '>>> ')
self.commandWidget.pack(fill = BOTH, expand = 1)
self.commandWidget.component('text').bind(
'<KeyRelease-Return>', self.evalCommand)
self.textPane.pack(expand = 1, fill = BOTH)
def createMenus(self):
self.menuBar = Menu(self.top)
self.top.config(menu=self.menuBar)
inspectMenu = Menu(self.menuBar)
self.menuBar.add_cascade(label='Inspect', menu=inspectMenu)
inspectMenu.add_command(label='Pop', command=self.pop)
inspectMenu.add_command(label='Dive', command=self.dive)
inspectMenu.add_command(label='Inspect', command=self.inspect)
helpMenu = Menu(self.menuBar)
self.menuBar.add_cascade(label='Help', menu=helpMenu)
helpMenu.add_command(label='Instructions', command=self.showHelp)
def fillList(self):
self.listWidget.delete(0, END)
for each in self.topInspector().partNames():
self.listWidget.insert(END, each)
self.listWidget.select_clear(0)
# Event Handling
def listSelectionChanged(self, event):
partNumber = self.selectedIndex()
if partNumber == None:
partNumber = 0
string = self.topInspector().stringForPartNumber(partNumber)
self.textWidget.component('text').configure(state = 'normal')
self.textWidget.delete('1.0', END)
self.textWidget.insert(END, string)
self.textWidget.component('text').configure(state = 'disabled')
def popOrDive(self, event):
"""The list has been double-clicked. If the selection is 'self' then pop,
otherwise dive into the selected part"""
if self.selectedIndex() == 0:
self.pop()
else:
self.dive()
def evalCommand(self, event):
"""Eval text in commandWidget"""
insertPt = self.commandWidget.index(INSERT)
commandLineStart = self.commandWidget.search(
'>>> ', INSERT, backwards = 1)
if commandLineStart:
commandStart = self.commandWidget.index(
commandLineStart + ' + 4 chars')
command = self.commandWidget.get(commandStart,
commandStart + ' lineend')
if command:
partDict = { 'this': self.selectedPart(),
'object': self.topInspector().object }
result = eval(command, partDict)
self.commandWidget.insert(INSERT, repr(result) + '\n>>> ')
self.commandWidget.see(INSERT)
# Menu Events
def inspect(self):
inspector = self.inspectorForSelectedPart()
if inspector == None:
return
InspectorWindow(inspector).open()
def pop(self):
if len(self.inspectors) > 1:
self.inspectors = self.inspectors[:-1]
self.update()
def dive(self):
inspector = self.inspectorForSelectedPart()
if inspector == None:
return
self.inspectors.append(inspector)
self.update()
def update(self):
self.setTitle()
self.fillList()
# What is active part in this inspector
partNumber = self.topInspector().getLastPartNumber()
self.listWidget.select_clear(0)
self.listWidget.activate(partNumber)
self.listWidget.select_set(partNumber)
self.listSelectionChanged(None)
# Make sure selected item is visible
self.listWidget.see(partNumber)
# Make sure left side of listbox visible
self.listWidget.xview_moveto(0.0)
# Grab focus in listbox
self.listWidget.component('listbox').focus_set()
def showHelp(self):
help = Toplevel(tkroot)
help.title("Inspector Help")
frame = Frame(help)
frame.pack()
text = Label(
frame, justify = LEFT,
text = "ListBox shows selected object's attributes\nDouble click or use right arrow on an instance variable to dive down.\nDouble click self or use left arrow to pop back up.\nUse up and down arrow keys to move from item to item in the current level.\n\nValue box (upper right) shows current value of selected item\n\nCommand box (lower right) is used to evaluate python commands\nLocal variables 'object' and 'this' are defined as the current object being inspected\nand the current attribute selected."
)
text.pack()
#Private
def selectedIndex(self):
indicies = map(int, self.listWidget.curselection())
if len(indicies) == 0:
return None
partNumber = indicies[0]
return partNumber
def inspectorForSelectedPart(self):
partNumber = self.selectedIndex()
if partNumber == None:
return None
part = self.topInspector().partNumber(partNumber)
return self.topInspector().inspectorFor(part)
def popupMenu(self, event):
print(event)
partNumber = self.selectedIndex()
print(partNumber)
if partNumber == None:
return
part = self.topInspector().partNumber(partNumber)
print(part)
from panda3d.core import NodePath
from direct.fsm import ClassicFSM
popupMenu = None
if isinstance(part, NodePath):
popupMenu = self.createPopupMenu(
part,
[('Explore', NodePath.explore),
('Place', NodePath.place),
('Set Color', NodePath.rgbPanel)])
elif isinstance(part, ClassicFSM.ClassicFSM):
import FSMInspector
popupMenu = self.createPopupMenu(
part,
[('Inspect ClassicFSM', FSMInspector.FSMInspector)])
print(popupMenu)
if popupMenu:
popupMenu.post(event.widget.winfo_pointerx(),
event.widget.winfo_pointery())
def createPopupMenu(self, part, menuList):
popupMenu = Menu(self.top, tearoff = 0)
for item, func in menuList:
popupMenu.add_command(
label = item,
command = lambda p = part, f = func: f(p))
return popupMenu
|
ee08b397/panda3d
|
direct/src/tkpanels/Inspector.py
|
Python
|
bsd-3-clause
| 15,271 | 0.00681 |
#example
from kivy.base import runTouchApp
from kivy.lang import Builder
from kivy.garden.light_indicator import Light_indicator
from kivy.uix.button import Button
# LOAD KV UIX
runTouchApp(Builder.load_file('example.kv'))
|
CAES-Python/CAES_Kivy_Garden
|
garden.light_indicator/example.py
|
Python
|
mit
| 229 | 0.026201 |
# Copyright 2020 Princeton University
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Event segmentation using a Hidden Markov Model
Given an ROI timeseries, this class uses an annealed fitting procedure to
segment the timeseries into events with stable activity patterns. After
learning the signature activity pattern of each event, the model can then be
applied to other datasets to identify a corresponding sequence of events.
Full details are available in:
Christopher Baldassano, Janice Chen, Asieh Zadbood,
Jonathan W Pillow, Uri Hasson, Kenneth A Norman
Discovering event structure in continuous narrative perception and memory
Neuron, Volume 95, Issue 3, 709 - 721.e5
https://doi.org/10.1016/j.neuron.2017.06.041
This class also extends the model described in the Neuron paper:
1) It allows transition matrices that are composed of multiple separate
chains of events rather than a single linear path. This allows a model to
contain patterns for multiple event sequences (e.g. narratives), and
fit probabilities along each of these chains on a new, unlabeled timeseries.
To use this option, pass in an event_chain vector labeling which events
belong to each chain, define event patterns using set_event_patterns(),
then fit to a new dataset with find_events.
2) To obtain better fits when the underlying event structure contains
events that vary substantially in length, the split_merge option allows
the fit() function to re-distribute events during fitting. The number of
merge/split proposals is controlled by split_merge_proposals, which
controls how thorough versus fast the fitting process is.
"""
# Authors: Chris Baldassano and Cătălin Iordan (Princeton University)
import numpy as np
from scipy import stats
import logging
import copy
from sklearn.base import BaseEstimator
from sklearn.utils.validation import check_is_fitted, check_array
from sklearn.exceptions import NotFittedError
import itertools
from . import _utils as utils # type: ignore
logger = logging.getLogger(__name__)
__all__ = [
"EventSegment",
]
class EventSegment(BaseEstimator):
"""Class for event segmentation of continuous fMRI data
Parameters
----------
n_events: int
Number of segments to learn
step_var: Callable[[int], float] : default 4 * (0.98 ** (step - 1))
The Gaussian variance to use during fitting, as a function of the
number of steps. Should decrease slowly over time.
n_iter: int, default: 500
Maximum number of steps to run during fitting
event_chains: ndarray with length = n_events
Array with unique value for each separate chain of events, each linked
in the order they appear in the array
split_merge: bool, default: False
Determines whether merge/split proposals are used during fitting with
fit(). This can improve fitting performance when events are highly
uneven in size, but requires additional time
split_merge_proposals: int, default: 1
Number of merges and splits to consider at each step. Computation time
scales as O(proposals^2) so this should usually be a small value
Attributes
----------
p_start, p_end: length n_events+1 ndarray
initial and final prior distributions over events
P: n_events+1 by n_events+1 ndarray
HMM transition matrix
ll_ : ndarray with length = number of training datasets
Log-likelihood for training datasets over the course of training
segments_: list of (time by event) ndarrays
Learned (soft) segmentation for training datasets
event_var_ : float
Gaussian variance at the end of learning
event_pat_ : voxel by event ndarray
Learned mean patterns for each event
"""
def _default_var_schedule(step):
return 4 * (0.98 ** (step - 1))
def __init__(self, n_events=2,
step_var=_default_var_schedule,
n_iter=500, event_chains=None,
split_merge=False, split_merge_proposals=1):
self.n_events = n_events
self.step_var = step_var
self.n_iter = n_iter
self.split_merge = split_merge
self.split_merge_proposals = split_merge_proposals
if event_chains is None:
self.event_chains = np.zeros(n_events)
else:
self.event_chains = event_chains
def _fit_validate(self, X):
"""Validate input to fit()
Validate data passed to fit(). Includes a transpose operation to
change the row/column order of X and z-scoring in time.
Parameters
----------
X: time by voxel ndarray, or a list of such ndarrays
fMRI data to be segmented
Returns
-------
X: list of voxel by time ndarrays
"""
if len(np.unique(self.event_chains)) > 1:
raise RuntimeError("Cannot fit chains, use set_event_patterns")
# Copy X into a list and transpose
X = copy.deepcopy(X)
if type(X) is not list:
X = [X]
for i in range(len(X)):
X[i] = check_array(X[i])
X[i] = X[i].T
# Check that number of voxels is consistent across datasets
n_dim = X[0].shape[0]
for i in range(len(X)):
assert (X[i].shape[0] == n_dim)
# Double-check that data is z-scored in time
for i in range(len(X)):
X[i] = stats.zscore(X[i], axis=1, ddof=1)
return X
def fit(self, X, y=None):
"""Learn a segmentation on training data
Fits event patterns and a segmentation to training data. After
running this function, the learned event patterns can be used to
segment other datasets using find_events
Parameters
----------
X: time by voxel ndarray, or a list of such ndarrays
fMRI data to be segmented. If a list is given, then all datasets
are segmented simultaneously with the same event patterns
y: not used (added to comply with BaseEstimator definition)
Returns
-------
self: the EventSegment object
"""
X = self._fit_validate(X)
n_train = len(X)
n_dim = X[0].shape[0]
self.classes_ = np.arange(self.n_events)
# Initialize variables for fitting
log_gamma = []
for i in range(n_train):
log_gamma.append(np.zeros((X[i].shape[1], self.n_events)))
step = 1
best_ll = float("-inf")
self.ll_ = np.empty((0, n_train))
while step <= self.n_iter:
iteration_var = self.step_var(step)
# Based on the current segmentation, compute the mean pattern
# for each event
seg_prob = [np.exp(lg) / np.sum(np.exp(lg), axis=0)
for lg in log_gamma]
mean_pat = np.empty((n_train, n_dim, self.n_events))
for i in range(n_train):
mean_pat[i, :, :] = X[i].dot(seg_prob[i])
mean_pat = np.mean(mean_pat, axis=0)
# Based on the current mean patterns, compute the event
# segmentation
self.ll_ = np.append(self.ll_, np.empty((1, n_train)), axis=0)
for i in range(n_train):
logprob = self._logprob_obs(X[i], mean_pat, iteration_var)
log_gamma[i], self.ll_[-1, i] = self._forward_backward(logprob)
if step > 1 and self.split_merge:
curr_ll = np.mean(self.ll_[-1, :])
self.ll_[-1, :], log_gamma, mean_pat = \
self._split_merge(X, log_gamma, iteration_var, curr_ll)
# If log-likelihood has started decreasing, undo last step and stop
if np.mean(self.ll_[-1, :]) < best_ll:
self.ll_ = self.ll_[:-1, :]
break
self.segments_ = [np.exp(lg) for lg in log_gamma]
self.event_var_ = iteration_var
self.event_pat_ = mean_pat
best_ll = np.mean(self.ll_[-1, :])
logger.debug("Fitting step %d, LL=%f", step, best_ll)
step += 1
return self
def _logprob_obs(self, data, mean_pat, var):
"""Log probability of observing each timepoint under each event model
Computes the log probability of each observed timepoint being
generated by the Gaussian distribution for each event pattern
Parameters
----------
data: voxel by time ndarray
fMRI data on which to compute log probabilities
mean_pat: voxel by event ndarray
Centers of the Gaussians for each event
var: float or 1D array of length equal to the number of events
Variance of the event Gaussians. If scalar, all events are
assumed to have the same variance
Returns
-------
logprob : time by event ndarray
Log probability of each timepoint under each event Gaussian
"""
n_vox = data.shape[0]
t = data.shape[1]
# z-score both data and mean patterns in space, so that Gaussians
# are measuring Pearson correlations and are insensitive to overall
# activity changes
data_z = stats.zscore(data, axis=0, ddof=1)
mean_pat_z = stats.zscore(mean_pat, axis=0, ddof=1)
logprob = np.empty((t, self.n_events))
if type(var) is not np.ndarray:
var = var * np.ones(self.n_events)
for k in range(self.n_events):
logprob[:, k] = -0.5 * n_vox * np.log(
2 * np.pi * var[k]) - 0.5 * np.sum(
(data_z.T - mean_pat_z[:, k]).T ** 2, axis=0) / var[k]
logprob /= n_vox
return logprob
def _forward_backward(self, logprob):
"""Runs forward-backward algorithm on observation log probs
Given the log probability of each timepoint being generated by
each event, run the HMM forward-backward algorithm to find the
probability that each timepoint belongs to each event (based on the
transition priors in p_start, p_end, and P)
See https://en.wikipedia.org/wiki/Forward-backward_algorithm for
mathematical details
Parameters
----------
logprob : time by event ndarray
Log probability of each timepoint under each event Gaussian
Returns
-------
log_gamma : time by event ndarray
Log probability of each timepoint belonging to each event
ll : float
Log-likelihood of fit
"""
logprob = copy.copy(logprob)
t = logprob.shape[0]
logprob = np.hstack((logprob, float("-inf") * np.ones((t, 1))))
# Initialize variables
log_scale = np.zeros(t)
log_alpha = np.zeros((t, self.n_events + 1))
log_beta = np.zeros((t, self.n_events + 1))
# Set up transition matrix, with final sink state
self.p_start = np.zeros(self.n_events + 1)
self.p_end = np.zeros(self.n_events + 1)
self.P = np.zeros((self.n_events + 1, self.n_events + 1))
label_ind = np.unique(self.event_chains, return_inverse=True)[1]
n_chains = np.max(label_ind) + 1
# For each chain of events, link them together and then to sink state
for c in range(n_chains):
chain_ind = np.nonzero(label_ind == c)[0]
self.p_start[chain_ind[0]] = 1 / n_chains
self.p_end[chain_ind[-1]] = 1 / n_chains
p_trans = (len(chain_ind) - 1) / t
if p_trans >= 1:
raise ValueError('Too few timepoints')
for i in range(len(chain_ind)):
self.P[chain_ind[i], chain_ind[i]] = 1 - p_trans
if i < len(chain_ind) - 1:
self.P[chain_ind[i], chain_ind[i+1]] = p_trans
else:
self.P[chain_ind[i], -1] = p_trans
self.P[-1, -1] = 1
# Forward pass
for i in range(t):
if i == 0:
log_alpha[0, :] = self._log(self.p_start) + logprob[0, :]
else:
log_alpha[i, :] = self._log(np.exp(log_alpha[i - 1, :])
.dot(self.P)) + logprob[i, :]
log_scale[i] = np.logaddexp.reduce(log_alpha[i, :])
log_alpha[i] -= log_scale[i]
# Backward pass
log_beta[-1, :] = self._log(self.p_end) - log_scale[-1]
for i in reversed(range(t - 1)):
obs_weighted = log_beta[i + 1, :] + logprob[i + 1, :]
offset = np.max(obs_weighted)
log_beta[i, :] = offset + self._log(
np.exp(obs_weighted - offset).dot(self.P.T)) - log_scale[i]
# Combine and normalize
log_gamma = log_alpha + log_beta
log_gamma -= np.logaddexp.reduce(log_gamma, axis=1, keepdims=True)
ll = np.sum(log_scale[:(t - 1)]) + np.logaddexp.reduce(
log_alpha[-1, :] + log_scale[-1] + self._log(self.p_end))
log_gamma = log_gamma[:, :-1]
return log_gamma, ll
def _log(self, x):
"""Modified version of np.log that manually sets values <=0 to -inf
Parameters
----------
x: ndarray of floats
Input to the log function
Returns
-------
log_ma: ndarray of floats
log of x, with x<=0 values replaced with -inf
"""
xshape = x.shape
_x = x.flatten()
y = utils.masked_log(_x)
return y.reshape(xshape)
def set_event_patterns(self, event_pat):
"""Set HMM event patterns manually
Rather than fitting the event patterns automatically using fit(), this
function allows them to be set explicitly. They can then be used to
find corresponding events in a new dataset, using find_events().
Parameters
----------
event_pat: voxel by event ndarray
"""
if event_pat.shape[1] != self.n_events:
raise ValueError(("Number of columns of event_pat must match "
"number of events"))
self.event_pat_ = event_pat.copy()
def find_events(self, testing_data, var=None, scramble=False):
"""Applies learned event segmentation to new testing dataset
After fitting an event segmentation using fit() or setting event
patterns directly using set_event_patterns(), this function finds the
same sequence of event patterns in a new testing dataset.
Parameters
----------
testing_data: timepoint by voxel ndarray
fMRI data to segment based on previously-learned event patterns
var: float or 1D ndarray of length equal to the number of events
default: uses variance that maximized training log-likelihood
Variance of the event Gaussians. If scalar, all events are
assumed to have the same variance. If fit() has not previously
been run, this must be specifed (cannot be None).
scramble: bool : default False
If true, the order of the learned events are shuffled before
fitting, to give a null distribution
Returns
-------
segments : time by event ndarray
The resulting soft segmentation. segments[t,e] = probability
that timepoint t is in event e
test_ll : float
Log-likelihood of model fit
"""
if var is None:
if not hasattr(self, 'event_var_'):
raise NotFittedError(("Event variance must be provided, if "
"not previously set by fit()"))
else:
var = self.event_var_
if not hasattr(self, 'event_pat_'):
raise NotFittedError(("The event patterns must first be set "
"by fit() or set_event_patterns()"))
if scramble:
mean_pat = self.event_pat_[:, np.random.permutation(self.n_events)]
else:
mean_pat = self.event_pat_
logprob = self._logprob_obs(testing_data.T, mean_pat, var)
lg, test_ll = self._forward_backward(logprob)
segments = np.exp(lg)
return segments, test_ll
def predict(self, X):
"""Applies learned event segmentation to new testing dataset
Alternative function for segmenting a new dataset after using
fit() to learn a sequence of events, to comply with the sklearn
Classifier interface
Parameters
----------
X: timepoint by voxel ndarray
fMRI data to segment based on previously-learned event patterns
Returns
-------
Event label for each timepoint
"""
check_is_fitted(self, ["event_pat_", "event_var_"])
X = check_array(X)
segments, test_ll = self.find_events(X)
return np.argmax(segments, axis=1)
def calc_weighted_event_var(self, D, weights, event_pat):
"""Computes normalized weighted variance around event pattern
Utility function for computing variance in a training set of weighted
event examples. For each event, the sum of squared differences for all
timepoints from the event pattern is computed, and then the weights
specify how much each of these differences contributes to the
variance (normalized by the number of voxels).
Parameters
----------
D : timepoint by voxel ndarray
fMRI data for which to compute event variances
weights : timepoint by event ndarray
specifies relative weights of timepoints for each event
event_pat : voxel by event ndarray
mean event patterns to compute variance around
Returns
-------
ev_var : ndarray of variances for each event
"""
Dz = stats.zscore(D, axis=1, ddof=1)
ev_var = np.empty(event_pat.shape[1])
for e in range(event_pat.shape[1]):
# Only compute variances for weights > 0.1% of max weight
nz = weights[:, e] > np.max(weights[:, e])/1000
sumsq = np.dot(weights[nz, e],
np.sum(np.square(Dz[nz, :] -
event_pat[:, e]), axis=1))
ev_var[e] = sumsq/(np.sum(weights[nz, e]) -
np.sum(np.square(weights[nz, e])) /
np.sum(weights[nz, e]))
ev_var = ev_var / D.shape[1]
return ev_var
def model_prior(self, t):
"""Returns the prior probability of the HMM
Runs forward-backward without any data, showing the prior distribution
of the model (for comparison with a posterior).
Parameters
----------
t: int
Number of timepoints
Returns
-------
segments : time by event ndarray
segments[t,e] = prior probability that timepoint t is in event e
test_ll : float
Log-likelihood of model (data-independent term)"""
lg, test_ll = self._forward_backward(np.zeros((t, self.n_events)))
segments = np.exp(lg)
return segments, test_ll
def _split_merge(self, X, log_gamma, iteration_var, curr_ll):
"""Attempt to improve log-likelihood with a merge/split
The simulated annealing used in fit() is susceptible to getting
stuck in a local minimum if there are some very short events. This
function attempts to find
a) pairs of neighboring events that are highly similar, to merge
b) events that can be split into two dissimilar events
It then tests to see whether simultaneously merging one of the
pairs from (a) and splitting one of the events from (b) can improve
the log-likelihood. The number of (a)/(b) pairs tested is determined
by the split_merge_proposals class attribute.
Parameters
----------
X: list of voxel by time ndarrays
fMRI datasets being fit
log_gamma : list of time by event ndarrays
Log probability of each timepoint belonging to each event,
for each dataset
iteration_var : float
Current variance in simulated annealing
curr_ll: float
Log-likelihood of current model
Returns
-------
return_ll : ndarray with length equal to length of X
Log-likelihood after merge/split (same as curr_ll if no
merge/split improved curr_ll)
return_lg : list of time by event ndarrays
Log probability of each timepoint belonging to each event,
for each dataset (same as log_gamma if no merge/split
improved curr_ll)
return_mp : voxel by event ndarray
Mean patterns of events (after possible merge/split)
"""
# Compute current probabilities and mean patterns
n_train = len(X)
n_dim = X[0].shape[0]
seg_prob = [np.exp(lg) / np.sum(np.exp(lg), axis=0)
for lg in log_gamma]
mean_pat = np.empty((n_train, n_dim, self.n_events))
for i in range(n_train):
mean_pat[i, :, :] = X[i].dot(seg_prob[i])
mean_pat = np.mean(mean_pat, axis=0)
# For each event, merge its probability distribution
# with the next event, and also split its probability
# distribution at its median into two separate events.
# Use these new event probability distributions to compute
# merged and split event patterns.
merge_pat = np.empty((n_train, n_dim, self.n_events))
split_pat = np.empty((n_train, n_dim, 2 * self.n_events))
for i, sp in enumerate(seg_prob): # Iterate over datasets
m_evprob = np.zeros((sp.shape[0], sp.shape[1]))
s_evprob = np.zeros((sp.shape[0], 2 * sp.shape[1]))
cs = np.cumsum(sp, axis=0)
for e in range(sp.shape[1]):
# Split distribution at midpoint and normalize each half
mid = np.where(cs[:, e] >= 0.5)[0][0]
cs_first = cs[mid, e] - sp[mid, e]
cs_second = 1 - cs_first
s_evprob[:mid, 2 * e] = sp[:mid, e] / cs_first
s_evprob[mid:, 2 * e + 1] = sp[mid:, e] / cs_second
# Merge distribution with next event distribution
m_evprob[:, e] = sp[:, e:(e + 2)].mean(1)
# Weight data by distribution to get event patterns
merge_pat[i, :, :] = X[i].dot(m_evprob)
split_pat[i, :, :] = X[i].dot(s_evprob)
# Average across datasets
merge_pat = np.mean(merge_pat, axis=0)
split_pat = np.mean(split_pat, axis=0)
# Correlate the current event patterns with the split and
# merged patterns
merge_corr = np.zeros(self.n_events)
split_corr = np.zeros(self.n_events)
for e in range(self.n_events):
split_corr[e] = np.corrcoef(mean_pat[:, e],
split_pat[:, (2 * e):(2 * e + 2)],
rowvar=False)[0, 1:3].max()
merge_corr[e] = np.corrcoef(merge_pat[:, e],
mean_pat[:, e:(e + 2)],
rowvar=False)[0, 1:3].min()
merge_corr = merge_corr[:-1]
# Find best merge/split candidates
# A high value of merge_corr indicates that a pair of events are
# very similar to their merged pattern, and are good candidates for
# being merged.
# A low value of split_corr indicates that an event's pattern is
# very dissimilar from the patterns in its first and second half,
# and is a good candidate for being split.
best_merge = np.flipud(np.argsort(merge_corr))
best_merge = best_merge[:self.split_merge_proposals]
best_split = np.argsort(split_corr)
best_split = best_split[:self.split_merge_proposals]
# For every pair of merge/split candidates, attempt the merge/split
# and measure the log-likelihood. If any are better than curr_ll,
# accept this best merge/split
mean_pat_last = mean_pat.copy()
return_ll = curr_ll
return_lg = copy.deepcopy(log_gamma)
return_mp = mean_pat.copy()
for m_e, s_e in itertools.product(best_merge, best_split):
if m_e == s_e or m_e+1 == s_e:
# Don't attempt to merge/split same event
continue
# Construct new set of patterns with merge/split
mean_pat_ms = np.delete(mean_pat_last, s_e, axis=1)
mean_pat_ms = np.insert(mean_pat_ms, [s_e, s_e],
split_pat[:, (2 * s_e):(2 * s_e + 2)],
axis=1)
mean_pat_ms = np.delete(mean_pat_ms,
[m_e + (s_e < m_e), m_e + (s_e < m_e) + 1],
axis=1)
mean_pat_ms = np.insert(mean_pat_ms, m_e + (s_e < m_e),
merge_pat[:, m_e], axis=1)
# Measure log-likelihood with these new patterns
ll_ms = np.zeros(n_train)
log_gamma_ms = list()
for i in range(n_train):
logprob = self._logprob_obs(X[i],
mean_pat_ms, iteration_var)
lg, ll_ms[i] = self._forward_backward(logprob)
log_gamma_ms.append(lg)
# If better than best ll so far, save to return to fit()
if ll_ms.mean() > return_ll:
return_mp = mean_pat_ms.copy()
return_ll = ll_ms
for i in range(n_train):
return_lg[i] = log_gamma_ms[i].copy()
logger.debug("Identified merge %d,%d and split %d",
m_e, m_e+1, s_e)
return return_ll, return_lg, return_mp
|
brainiak/brainiak
|
brainiak/eventseg/event.py
|
Python
|
apache-2.0
| 26,617 | 0 |
"""bubble - re-emit a log record with superdomain
| bubble [field=host] [parts=3]
adds 'superhost' field
"""
import sys,splunk.Intersplunk
import re
ipregex = r"(?P<ip>((25[0-5]|2[0-4]\d|[01]\d\d|\d?\d)\.){3}(25[0-5]|2[0-4]\d|[01]\d\d|\d?\d))"
ip_rex = re.compile(ipregex)
def super_domain(host, output_parts):
parts = host.split(".")
num_parts = len(parts)
if output_parts > num_parts:
return host
if ip_rex.match(host):
host = '.'.join(parts[:-output_parts])
else:
host = '.'.join(parts[-output_parts:])
return host
def add_superhost(results, field, num_parts):
for r in results:
if field not in r:
continue
d = super_domain(r[field], num_parts)
r['superhost'] = d
yield r
try:
keywords, options = splunk.Intersplunk.getKeywordsAndOptions()
field = options.get('field', 'hostname')
num_parts = int(options.get('parts', 2))
results,dummyresults,settings = splunk.Intersplunk.getOrganizedResults()
results = list(add_superhost(results, field, num_parts))
except:
import traceback
stack = traceback.format_exc()
results = splunk.Intersplunk.generateErrorResults("Error : Traceback: " + str(stack))
splunk.Intersplunk.outputResults( results )
|
JustinAzoff/splunk-scripts
|
bubble.py
|
Python
|
mit
| 1,305 | 0.010728 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
try:
import cPickle as pickle
except ImportError:
import pickle
import os.path
class FileCache(dict):
def __init__(self, filename):
self.filename = os.path.abspath(filename)
try:
self.update(pickle.load(open(self.filename)))
except:
pass
def __setitem__(self, key, value):
super(FileCache, self).__setitem__(key, value)
pickle.dump(self, open(self.filename, 'w'))
def set(self, key, value):
self.__setitem__(key, value)
def get_stats(self):
pass
try:
import pylibmc as memcache
except ImportError:
import memcache
class Cache(object):
def __init__(self, servers=None, default='.cache', **kargs):
if servers is None:
self.cache = memcache.Client(**kargs)
else:
self.cache = memcache.Client(servers, **kargs)
if not self.cache.get_stats():
self.cache = FileCache(default)
def __getitem__(self, key):
return self.cache.get(key)
def __setitem__(self, key, value):
self.cache.set(key, value)
def get(self, key):
return self.cache.get(key)
def set(self, key, value):
self.cache.set(key, value)
|
moonfruit/yyfeed
|
lib/yyfeed/util/cache.py
|
Python
|
apache-2.0
| 1,274 | 0.000785 |
# This file is part of MyPaint.
# Copyright (C) 2015 by Andrew Chadwick <a.t.chadwick@gmail.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
## Imports
import math
from numpy import isfinite
import collections
import weakref
from logging import getLogger
logger = getLogger(__name__)
from gettext import gettext as _
import gi
from gi.repository import Gtk
from gi.repository import Gdk
from gi.repository import GLib
import gui.mode
import gui.overlays
import gui.style
import gui.drawutils
import lib.helpers
import gui.cursor
## Class defs
class _Phase:
"""Enumeration of the states that an InkingMode can be in"""
CAPTURE = 0
ADJUST = 1
_NODE_FIELDS = ("x", "y", "pressure", "xtilt", "ytilt", "time")
class _Node (collections.namedtuple("_Node", _NODE_FIELDS)):
"""Recorded control point, as a namedtuple.
Node tuples have the following 6 fields, in order
* x, y: model coords, float
* pressure: float in [0.0, 1.0]
* xtilt, ytilt: float in [-1.0, 1.0]
* time: absolute seconds, float
"""
class _EditZone:
"""Enumeration of what the pointer is on in the ADJUST phase"""
EMPTY_CANVAS = 0
CONTROL_NODE = 1 # see target_node_index
REJECT_BUTTON = 2
ACCEPT_BUTTON = 3
class InkingMode (gui.mode.ScrollableModeMixin,
gui.mode.BrushworkModeMixin,
gui.mode.DragMode):
## Metadata properties
ACTION_NAME = "InkingMode"
pointer_behavior = gui.mode.Behavior.PAINT_FREEHAND
scroll_behavior = gui.mode.Behavior.CHANGE_VIEW
permitted_switch_actions = (
set(gui.mode.BUTTON_BINDING_ACTIONS).union([
'RotateViewMode',
'ZoomViewMode',
'PanViewMode',
])
)
## Metadata methods
@classmethod
def get_name(cls):
return _(u"Inking")
def get_usage(self):
return _(u"Draw, and then adjust smooth lines")
@property
def inactive_cursor(self):
return None
@property
def active_cursor(self):
if self.phase == _Phase.ADJUST:
if self.zone == _EditZone.CONTROL_NODE:
return self._crosshair_cursor
elif self.zone != _EditZone.EMPTY_CANVAS: # assume button
return self._arrow_cursor
return None
## Class config vars
# Input node capture settings:
MAX_INTERNODE_DISTANCE_MIDDLE = 30 # display pixels
MAX_INTERNODE_DISTANCE_ENDS = 10 # display pixels
MAX_INTERNODE_TIME = 1/100.0 # seconds
# Captured input nodes are then interpolated with a spline.
# The code tries to make nice smooth input for the brush engine,
# but avoids generating too much work.
INTERPOLATION_MAX_SLICE_TIME = 1/200.0 # seconds
INTERPOLATION_MAX_SLICE_DISTANCE = 20 # model pixels
INTERPOLATION_MAX_SLICES = MAX_INTERNODE_DISTANCE_MIDDLE * 5
# In other words, limit to a set number of interpolation slices
# per display pixel at the time of stroke capture.
## Initialization & lifecycle methods
def __init__(self, **kwargs):
logger.info("Initializing %r", self)
super(InkingMode, self).__init__(**kwargs)
self.phase = _Phase.CAPTURE
self.zone = _EditZone.EMPTY_CANVAS
self._overlays = {} # keyed by tdw
self._reset_nodes()
self._reset_capture_data()
self._reset_adjust_data()
self._task_queue = collections.deque() # (cb, args, kwargs)
self._task_queue_runner_id = None
self._click_info = None # (button, zone)
self._current_override_cursor = None
# Button pressed while drawing
# Not every device sends button presses, but evdev ones
# do, and this is used as a workaround for an evdev bug:
# https://github.com/mypaint/mypaint/issues/223
self._button_down = None
self._last_good_raw_pressure = 0.0
self._last_good_raw_xtilt = 0.0
self._last_good_raw_ytilt = 0.0
def _reset_nodes(self):
self.nodes = [] # nodes that met the distance+time criteria
def _reset_capture_data(self):
self._last_event_node = None # node for the last event
self._last_node_evdata = None # (xdisp, ydisp, tmilli) for nodes[-1]
def _reset_adjust_data(self):
self.zone = _EditZone.EMPTY_CANVAS
self.target_node_index = None
self._dragged_node_start_pos = None
def _ensure_overlay_for_tdw(self, tdw):
overlay = self._overlays.get(tdw)
if not overlay:
overlay = InkingModeOverlay(self, tdw)
tdw.display_overlays.append(overlay)
self._overlays[tdw] = overlay
return overlay
def _is_active(self):
for mode in self.doc.modes:
if mode is self:
return True
return False
def _discard_overlays(self):
for tdw, overlay in self._overlays.items():
tdw.display_overlays.remove(overlay)
tdw.queue_draw()
self._overlays.clear()
def enter(self, **kwds):
"""Enters the mode: called by `ModeStack.push()` etc."""
super(InkingMode, self).enter(**kwds)
if not self._is_active():
self._discard_overlays()
self._ensure_overlay_for_tdw(self.doc.tdw)
self._arrow_cursor = self.doc.app.cursors.get_action_cursor(
self.ACTION_NAME,
gui.cursor.Name.ARROW,
)
self._crosshair_cursor = self.doc.app.cursors.get_action_cursor(
self.ACTION_NAME,
gui.cursor.Name.CROSSHAIR_OPEN_PRECISE,
)
def leave(self, **kwds):
"""Leaves the mode: called by `ModeStack.pop()` etc."""
if not self._is_active():
self._discard_overlays()
self._stop_task_queue_runner(complete=True)
super(InkingMode, self).leave(**kwds) # supercall will commit
def checkpoint(self, flush=True, **kwargs):
"""Sync pending changes from (and to) the model
If called with flush==False, this is an override which just
redraws the pending stroke with the current brush settings and
color. This is the behavior our testers expect:
https://github.com/mypaint/mypaint/issues/226
When this mode is left for another mode (see `leave()`), the
pending brushwork is committed properly.
"""
if flush:
# Commit the pending work normally
self._start_new_capture_phase(rollback=False)
super(InkingMode, self).checkpoint(flush=flush, **kwargs)
else:
# Queue a re-rendering with any new brush data
# No supercall
self._stop_task_queue_runner(complete=False)
for tdw in self._overlays.keys():
self._queue_draw_buttons(tdw)
self._queue_redraw_all_nodes(tdw)
self._queue_redraw_curve(tdw)
def _start_new_capture_phase(self, rollback=False):
"""Let the user capture a new ink stroke"""
if rollback:
self._stop_task_queue_runner(complete=False)
self.brushwork_rollback_all()
else:
self._stop_task_queue_runner(complete=True)
self.brushwork_commit_all()
for tdw in self._overlays.keys():
self._queue_draw_buttons(tdw)
self._queue_redraw_all_nodes(tdw)
self._reset_nodes()
self._reset_capture_data()
self._reset_adjust_data()
self.phase = _Phase.CAPTURE
## Raw event handling (prelight & zone selection in adjust phase)
def button_press_cb(self, tdw, event):
current_layer = tdw.doc._layers.current
if not (tdw.is_sensitive and current_layer.get_paintable()):
return False
self._update_zone_and_target(tdw, event.x, event.y)
if self.phase == _Phase.ADJUST:
if self.zone in (_EditZone.REJECT_BUTTON,
_EditZone.ACCEPT_BUTTON):
button = event.button
if button == 1 and event.type == Gdk.EventType.BUTTON_PRESS:
self._click_info = (button, self.zone)
return False
# FALLTHRU: *do* allow drags to start with other buttons
elif self.zone == _EditZone.EMPTY_CANVAS:
self._start_new_capture_phase(rollback=False)
assert self.phase == _Phase.CAPTURE
# FALLTHRU: *do* start a drag
elif self.phase == _Phase.CAPTURE:
# XXX Not sure what to do here.
# XXX Click to append nodes?
# XXX but how to stop that and enter the adjust phase?
# XXX Click to add a 1st & 2nd (=last) node only?
# XXX but needs to allow a drag after the 1st one's placed.
pass
else:
raise NotImplementedError("Unrecognized zone %r", self.zone)
# Update workaround state for evdev dropouts
self._button_down = event.button
self._last_good_raw_pressure = 0.0
self._last_good_raw_xtilt = 0.0
self._last_good_raw_ytilt = 0.0
# Supercall: start drags etc
return super(InkingMode, self).button_press_cb(tdw, event)
def button_release_cb(self, tdw, event):
current_layer = tdw.doc._layers.current
if not (tdw.is_sensitive and current_layer.get_paintable()):
return False
if self.phase == _Phase.ADJUST:
if self._click_info:
button0, zone0 = self._click_info
if event.button == button0:
if self.zone == zone0:
if zone0 == _EditZone.REJECT_BUTTON:
self._start_new_capture_phase(rollback=True)
assert self.phase == _Phase.CAPTURE
elif zone0 == _EditZone.ACCEPT_BUTTON:
self._start_new_capture_phase(rollback=False)
assert self.phase == _Phase.CAPTURE
self._click_info = None
self._update_zone_and_target(tdw, event.x, event.y)
return False
# (otherwise fall through and end any current drag)
elif self.phase == _Phase.CAPTURE:
# XXX Not sure what to do here: see above
pass
else:
raise NotImplementedError("Unrecognized zone %r", self.zone)
# Update workaround state for evdev dropouts
self._button_down = None
self._last_good_raw_pressure = 0.0
self._last_good_raw_xtilt = 0.0
self._last_good_raw_ytilt = 0.0
# Supercall: stop current drag
return super(InkingMode, self).button_release_cb(tdw, event)
def motion_notify_cb(self, tdw, event):
current_layer = tdw.doc._layers.current
if not (tdw.is_sensitive and current_layer.get_paintable()):
return False
self._update_zone_and_target(tdw, event.x, event.y)
return super(InkingMode, self).motion_notify_cb(tdw, event)
def _update_zone_and_target(self, tdw, x, y):
"""Update the zone and target node under a cursor position"""
new_zone = _EditZone.EMPTY_CANVAS
if self.phase == _Phase.ADJUST and not self.in_drag:
new_target_node_index = None
# Test buttons for hits
overlay = self._ensure_overlay_for_tdw(tdw)
hit_dist = gui.style.FLOATING_BUTTON_RADIUS
button_info = [
(_EditZone.ACCEPT_BUTTON, overlay.accept_button_pos),
(_EditZone.REJECT_BUTTON, overlay.reject_button_pos),
]
for btn_zone, btn_pos in button_info:
if btn_pos is None:
continue
btn_x, btn_y = btn_pos
d = math.hypot(btn_x - x, btn_y - y)
if d <= hit_dist:
new_target_node_index = None
new_zone = btn_zone
break
# Test nodes for a hit, in reverse draw order
if new_zone == _EditZone.EMPTY_CANVAS:
hit_dist = gui.style.DRAGGABLE_POINT_HANDLE_SIZE + 12
new_target_node_index = None
for i, node in reversed(list(enumerate(self.nodes))):
node_x, node_y = tdw.model_to_display(node.x, node.y)
d = math.hypot(node_x - x, node_y - y)
if d > hit_dist:
continue
new_target_node_index = i
new_zone = _EditZone.CONTROL_NODE
break
# Draw changes to the prelit node
if new_target_node_index != self.target_node_index:
if self.target_node_index is not None:
self._queue_draw_node(tdw, self.target_node_index)
self.target_node_index = new_target_node_index
if self.target_node_index is not None:
self._queue_draw_node(tdw, self.target_node_index)
# Update the zone, and assume any change implies a button state
# change as well (for now...)
if self.zone != new_zone:
self.zone = new_zone
self._queue_draw_buttons(tdw)
# Update the "real" inactive cursor too:
if not self.in_drag:
cursor = None
if self.phase == _Phase.ADJUST:
if self.zone == _EditZone.CONTROL_NODE:
cursor = self._crosshair_cursor
elif self.zone != _EditZone.EMPTY_CANVAS: # assume button
cursor = self._arrow_cursor
if cursor is not self._current_override_cursor:
tdw.set_override_cursor(cursor)
self._current_override_cursor = cursor
## Redraws
def _queue_draw_buttons(self, tdw):
overlay = self._ensure_overlay_for_tdw(tdw)
overlay.update_button_positions()
positions = (
overlay.reject_button_pos,
overlay.accept_button_pos,
)
for pos in positions:
if pos is None:
continue
r = gui.style.FLOATING_BUTTON_ICON_SIZE
r += max(
gui.style.DROP_SHADOW_X_OFFSET,
gui.style.DROP_SHADOW_Y_OFFSET,
)
r += gui.style.DROP_SHADOW_BLUR
x, y = pos
tdw.queue_draw_area(x-r, y-r, 2*r+1, 2*r+1)
def _queue_draw_node(self, tdw, i):
node = self.nodes[i]
x, y = tdw.model_to_display(node.x, node.y)
x = math.floor(x)
y = math.floor(y)
size = math.ceil(gui.style.DRAGGABLE_POINT_HANDLE_SIZE * 2)
tdw.queue_draw_area(x-size, y-size, size*2+1, size*2+1)
def _queue_redraw_all_nodes(self, tdw):
for i in xrange(len(self.nodes)):
self._queue_draw_node(tdw, i)
def _queue_redraw_curve(self, tdw):
model = tdw.doc
self._stop_task_queue_runner(complete=False)
if len(self.nodes) < 2:
return
self._queue_task(self.brushwork_rollback, model)
self._queue_task(
self.brushwork_begin, model,
description=_("Inking"),
abrupt=True,
)
interp_state = {"t_abs": self.nodes[0].time}
for p_1, p0, p1, p2 in gui.drawutils.spline_iter(self.nodes):
self._queue_task(
self._draw_curve_segment,
model,
p_1, p0, p1, p2,
state=interp_state
)
self._start_task_queue_runner()
def _draw_curve_segment(self, model, p_1, p0, p1, p2, state):
"""Draw the curve segment between the middle two points"""
last_t_abs = state["t_abs"]
dtime_p0_p1_real = p1[-1] - p0[-1]
steps_t = dtime_p0_p1_real / self.INTERPOLATION_MAX_SLICE_TIME
dist_p1_p2 = math.hypot(p1[0]-p2[0], p1[1]-p2[1])
steps_d = dist_p1_p2 / self.INTERPOLATION_MAX_SLICE_DISTANCE
steps_max = float(self.INTERPOLATION_MAX_SLICES)
steps = math.ceil(min(steps_max, max(steps_t, steps_d)))
for i in xrange(int(steps) + 1):
t = i / steps
point = gui.drawutils.spline_4p(t, p_1, p0, p1, p2)
x, y, pressure, xtilt, ytilt, t_abs = point
pressure = lib.helpers.clamp(pressure, 0.0, 1.0)
xtilt = lib.helpers.clamp(xtilt, -1.0, 1.0)
ytilt = lib.helpers.clamp(ytilt, -1.0, 1.0)
t_abs = max(last_t_abs, t_abs)
dtime = t_abs - last_t_abs
self.stroke_to(
model, dtime, x, y, pressure, xtilt, ytilt,
auto_split=False,
)
last_t_abs = t_abs
state["t_abs"] = last_t_abs
def _queue_task(self, callback, *args, **kwargs):
"""Append a task to be done later in an idle cycle"""
self._task_queue.append((callback, args, kwargs))
def _start_task_queue_runner(self):
"""Begin processing the task queue, if not already going"""
if self._task_queue_runner_id is not None:
return
idler_id = GLib.idle_add(self._task_queue_runner_cb)
self._task_queue_runner_id = idler_id
def _stop_task_queue_runner(self, complete=True):
"""Halts processing of the task queue, and clears it"""
if self._task_queue_runner_id is None:
return
if complete:
for (callback, args, kwargs) in self._task_queue:
callback(*args, **kwargs)
self._task_queue.clear()
GLib.source_remove(self._task_queue_runner_id)
self._task_queue_runner_id = None
def _task_queue_runner_cb(self):
"""Idle runner callback for the task queue"""
try:
callback, args, kwargs = self._task_queue.popleft()
except IndexError: # queue empty
self._task_queue_runner_id = None
return False
else:
callback(*args, **kwargs)
return True
## Drag handling (both capture and adjust phases)
def drag_start_cb(self, tdw, event):
self._ensure_overlay_for_tdw(tdw)
if self.phase == _Phase.CAPTURE:
self._reset_nodes()
self._reset_capture_data()
self._reset_adjust_data()
node = self._get_event_data(tdw, event)
self.nodes.append(node)
self._queue_draw_node(tdw, 0)
self._last_node_evdata = (event.x, event.y, event.time)
self._last_event_node = node
elif self.phase == _Phase.ADJUST:
if self.target_node_index is not None:
node = self.nodes[self.target_node_index]
self._dragged_node_start_pos = (node.x, node.y)
else:
raise NotImplementedError("Unknown phase %r" % self.phase)
def drag_update_cb(self, tdw, event, dx, dy):
if self.phase == _Phase.CAPTURE:
node = self._get_event_data(tdw, event)
if not self._last_node_evdata: # e.g. after an undo while dragging
append_node = True
else:
dx = event.x - self._last_node_evdata[0]
dy = event.y - self._last_node_evdata[1]
dist = math.hypot(dy, dx)
dt = event.time - self._last_node_evdata[2]
max_dist = self.MAX_INTERNODE_DISTANCE_MIDDLE
if len(self.nodes) < 2:
max_dist = self.MAX_INTERNODE_DISTANCE_ENDS
append_node = (
dist > max_dist and
dt > self.MAX_INTERNODE_TIME
)
if append_node:
self.nodes.append(node)
self._queue_draw_node(tdw, len(self.nodes)-1)
self._queue_redraw_curve(tdw)
self._last_node_evdata = (event.x, event.y, event.time)
self._last_event_node = node
elif self.phase == _Phase.ADJUST:
if self._dragged_node_start_pos:
x0, y0 = self._dragged_node_start_pos
disp_x, disp_y = tdw.model_to_display(x0, y0)
disp_x += event.x - self.start_x
disp_y += event.y - self.start_y
x, y = tdw.display_to_model(disp_x, disp_y)
node = self.nodes[self.target_node_index]
self._queue_draw_node(tdw, self.target_node_index)
self.nodes[self.target_node_index] = node._replace(x=x, y=y)
# FIXME: The curve redraw is a bit flickery.
# Perhaps dragging to adjust should only draw an
# armature during the drag, leaving the redraw to
# the stop handler.
self._queue_redraw_curve(tdw)
self._queue_draw_node(tdw, self.target_node_index)
else:
raise NotImplementedError("Unknown phase %r" % self.phase)
def drag_stop_cb(self, tdw):
if self.phase == _Phase.CAPTURE:
if not self.nodes:
return
node = self._last_event_node
# TODO: maybe rewrite the last node here so it's the right
# TODO: distance from the end?
if self.nodes[-1] is not node:
self.nodes.append(node)
self._reset_capture_data()
self._reset_adjust_data()
if len(self.nodes) > 1:
self.phase = _Phase.ADJUST
self._queue_redraw_all_nodes(tdw)
self._queue_redraw_curve(tdw)
self._queue_draw_buttons(tdw)
else:
self._reset_nodes()
tdw.queue_draw()
elif self.phase == _Phase.ADJUST:
self._dragged_node_start_pos = None
self._queue_redraw_curve(tdw)
self._queue_draw_buttons(tdw)
else:
raise NotImplementedError("Unknown phase %r" % self.phase)
## Interrogating events
def _get_event_data(self, tdw, event):
x, y = tdw.display_to_model(event.x, event.y)
xtilt, ytilt = self._get_event_tilt(tdw, event)
return _Node(
x=x, y=y,
pressure=self._get_event_pressure(event),
xtilt=xtilt, ytilt=ytilt,
time=(event.time / 1000.0),
)
def _get_event_pressure(self, event):
# FIXME: CODE DUPLICATION: copied from freehand.py
pressure = event.get_axis(Gdk.AxisUse.PRESSURE)
if pressure is not None:
if not isfinite(pressure):
pressure = None
else:
pressure = lib.helpers.clamp(pressure, 0.0, 1.0)
if pressure is None:
pressure = 0.0
if event.state & Gdk.ModifierType.BUTTON1_MASK:
pressure = 0.5
# Workaround for buggy evdev behaviour.
# Events sometimes get a zero raw pressure reading when the
# pressure reading has not changed. This results in broken
# lines. As a workaround, forbid zero pressures if there is a
# button pressed down, and substitute the last-known good value.
# Detail: https://github.com/mypaint/mypaint/issues/223
if self._button_down is not None:
if pressure == 0.0:
pressure = self._last_good_raw_pressure
elif pressure is not None and isfinite(pressure):
self._last_good_raw_pressure = pressure
return pressure
def _get_event_tilt(self, tdw, event):
# FIXME: CODE DUPLICATION: copied from freehand.py
xtilt = event.get_axis(Gdk.AxisUse.XTILT)
ytilt = event.get_axis(Gdk.AxisUse.YTILT)
if xtilt is None or ytilt is None or not isfinite(xtilt+ytilt):
return (0.0, 0.0)
if tdw.mirrored:
xtilt *= -1.0
if tdw.rotation != 0:
tilt_angle = math.atan2(ytilt, xtilt) - tdw.rotation
tilt_magnitude = math.sqrt((xtilt**2) + (ytilt**2))
xtilt = tilt_magnitude * math.cos(tilt_angle)
ytilt = tilt_magnitude * math.sin(tilt_angle)
# Evdev workaround. X and Y tilts suffer from the same
# problem as pressure for fancier devices.
if self._button_down is not None:
if xtilt == 0.0:
xtilt = self._last_good_raw_xtilt
else:
self._last_good_raw_xtilt = xtilt
if ytilt == 0.0:
ytilt = self._last_good_raw_ytilt
else:
self._last_good_raw_ytilt = ytilt
return (xtilt, ytilt)
class InkingModeOverlay (gui.overlays.Overlay):
"""Overlay for an InkingMode's adjustable points"""
def __init__(self, inkmode, tdw):
super(InkingModeOverlay, self).__init__()
self._inkmode = weakref.proxy(inkmode)
self._tdw = weakref.proxy(tdw)
self._button_pixbuf_cache = {}
self.accept_button_pos = None
self.reject_button_pos = None
def update_button_positions(self):
num_nodes = float(len(self._inkmode.nodes))
if num_nodes == 0:
self.reject_button_pos = None
return
x = sum(n.x for n in self._inkmode.nodes) / num_nodes
y = max(n.y for n in self._inkmode.nodes)
x, y = self._tdw.model_to_display(x, y)
r = gui.style.FLOATING_BUTTON_RADIUS
y += 2 * r
margin = 2 * r
alloc = self._tdw.get_allocation()
view_x0, view_y0 = alloc.x, alloc.y
view_x1, view_y1 = view_x0+alloc.width, view_y0+alloc.height
k = 1.333
self.accept_button_pos = (
lib.helpers.clamp(x-k*r, view_x0 + margin, view_x1 - margin),
lib.helpers.clamp(y, view_y0 + margin, view_y1 - margin),
)
self.reject_button_pos = (
lib.helpers.clamp(x+k*r, view_x0 + margin, view_x1 - margin),
lib.helpers.clamp(y, view_y0 + margin, view_y1 - margin),
)
def _get_button_pixbuf(self, name):
cache = self._button_pixbuf_cache
pixbuf = cache.get(name)
if not pixbuf:
pixbuf = gui.drawutils.load_symbolic_icon(
icon_name=name,
size=gui.style.FLOATING_BUTTON_ICON_SIZE,
fg=(0, 0, 0, 1),
)
return pixbuf
def paint(self, cr):
"""Draw adjustable nodes to the screen"""
mode = self._inkmode
# Control nodes
radius = gui.style.DRAGGABLE_POINT_HANDLE_SIZE
alloc = self._tdw.get_allocation()
for i, node in enumerate(mode.nodes):
x, y = self._tdw.model_to_display(node.x, node.y)
node_on_screen = (
x > alloc.x - radius*2 and
y > alloc.y - radius*2 and
x < alloc.x + alloc.width + radius*2 and
y < alloc.y + alloc.height + radius*2
)
if not node_on_screen:
continue
color = gui.style.PASSIVE_ITEM_COLOR
if mode.phase == _Phase.ADJUST:
if i == mode.target_node_index:
color = gui.style.ACTIVE_ITEM_COLOR
else:
color = gui.style.EDITABLE_ITEM_COLOR
gui.drawutils.render_round_floating_color_chip(
cr=cr, x=x, y=y,
color=color,
radius=radius,
)
# Buttons
if mode.phase == _Phase.ADJUST and not mode.in_drag:
self.update_button_positions()
radius = gui.style.FLOATING_BUTTON_RADIUS
button_info = [
(
"mypaint-ok-symbolic",
self.accept_button_pos,
_EditZone.ACCEPT_BUTTON,
),
(
"mypaint-trash-symbolic",
self.reject_button_pos,
_EditZone.REJECT_BUTTON,
),
]
for icon_name, pos, zone in button_info:
if pos is None:
continue
x, y = pos
if mode.zone == zone:
color = gui.style.ACTIVE_ITEM_COLOR
else:
color = gui.style.EDITABLE_ITEM_COLOR
icon_pixbuf = self._get_button_pixbuf(icon_name)
gui.drawutils.render_round_floating_button(
cr=cr, x=x, y=y,
color=color,
pixbuf=icon_pixbuf,
radius=radius,
)
|
prescott66/mypaint
|
gui/inktool.py
|
Python
|
gpl-2.0
| 28,795 | 0.000903 |
##########################################################################
#
# Copyright (c) 2011-2012, John Haddon. All rights reserved.
# Copyright (c) 2011, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of John Haddon nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import os
import weakref
import IECore
import Gaffer
import GafferUI
def appendDefinitions( menuDefinition, prefix ) :
menuDefinition.append( prefix + "/About Gaffer...", { "command" : about } )
menuDefinition.append( prefix + "/Preferences...", { "command" : preferences } )
menuDefinition.append( prefix + "/Documentation...", { "command" : IECore.curry( GafferUI.showURL, os.path.expandvars( "$GAFFER_ROOT/doc/gaffer/html/index.html" ) ) } )
menuDefinition.append( prefix + "/Quit", { "command" : quit, "shortCut" : "Ctrl+Q" } )
def quit( menu ) :
scriptWindow = menu.ancestor( GafferUI.ScriptWindow )
application = scriptWindow.scriptNode().ancestor( Gaffer.ApplicationRoot )
unsavedNames = []
for script in application["scripts"].children() :
if script["unsavedChanges"].getValue() :
f = script["fileName"].getValue()
f = f.rpartition( "/" )[2] if f else "untitled"
unsavedNames.append( f )
if unsavedNames :
dialogue = GafferUI.ConfirmationDialogue(
"Discard Unsaved Changes?",
"The following files have unsaved changes : \n\n" +
"\n".join( [ " - " + n for n in unsavedNames ] ) +
"\n\nDo you want to discard the changes and quit?",
confirmLabel = "Discard and Quit"
)
if not dialogue.waitForConfirmation( parentWindow=scriptWindow ) :
return
# Defer the actual removal of scripts till an idle event - removing all
# the scripts will result in the removal of the window our menu item is
# parented to, which would cause a crash as it's deleted away from over us.
GafferUI.EventLoop.addIdleCallback( IECore.curry( __removeAllScripts, application ) )
def __removeAllScripts( application ) :
for script in application["scripts"].children() :
application["scripts"].removeChild( script )
return False # remove idle callback
__aboutWindow = None
def about( menu ) :
global __aboutWindow
if __aboutWindow is not None and __aboutWindow() :
window = __aboutWindow()
else :
window = GafferUI.AboutWindow( Gaffer.About )
__aboutWindow = weakref.ref( window )
scriptWindow = menu.ancestor( GafferUI.ScriptWindow )
scriptWindow.addChildWindow( window )
window.setVisible( True )
__preferencesWindows = weakref.WeakKeyDictionary()
def preferences( menu ) :
scriptWindow = menu.ancestor( GafferUI.ScriptWindow )
application = scriptWindow.scriptNode().ancestor( Gaffer.ApplicationRoot )
global __preferencesWindows
window = __preferencesWindows.get( application, None )
if window is not None and window() :
window = window()
else :
window = GafferUI.Dialogue( "Preferences" )
closeButton = window._addButton( "Close" )
window.__closeButtonConnection = closeButton.clickedSignal().connect( __closePreferences )
saveButton = window._addButton( "Save" )
window.__saveButtonConnection = saveButton.clickedSignal().connect( __savePreferences )
nodeUI = GafferUI.NodeUI.create( application["preferences"] )
window._setWidget( nodeUI )
__preferencesWindows[application] = weakref.ref( window )
scriptWindow.addChildWindow( window )
window.setVisible( True )
def __closePreferences( button ) :
button.ancestor( type=GafferUI.Window ).setVisible( False )
def __savePreferences( button ) :
scriptWindow = button.ancestor( GafferUI.ScriptWindow )
application = scriptWindow.scriptNode().ancestor( Gaffer.ApplicationRoot )
application.savePreferences()
button.ancestor( type=GafferUI.Window ).setVisible( False )
|
cedriclaunay/gaffer
|
python/GafferUI/ApplicationMenu.py
|
Python
|
bsd-3-clause
| 5,283 | 0.038236 |
from flask_sqlalchemy import SQLAlchemy
from sqlalchemy.orm import relationship
from sqlalchemy.sql import func
db = SQLAlchemy()
class BaseTable(db.Model):
__abstract__ = True
updated = db.Column(db.DateTime, default=func.now(), onupdate=func.current_timestamp())
created = db.Column(db.DateTime, default=func.now())
# Server -> Namespace -> Repository -> Branch -> Commit -> Deploy -> Log
class Server(BaseTable):
__tablename__ = 'server'
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(255))
namespace = relationship("Namespace", order_by="Namespace.id", backref="server")
class Namespace(BaseTable):
__tablename__ = 'namespace'
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(255))
server_id = db.Column(db.Integer, db.ForeignKey('server.id'))
user_id = db.Column(db.Integer, db.ForeignKey('user.id'))
repository = relationship("Repository", order_by="Repository.id", backref="namespace")
class Repository(BaseTable):
__tablename__ = 'repository'
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(255))
namespace_id = db.Column(db.Integer, db.ForeignKey('namespace.id'))
branch = relationship("Branch", order_by="Branch.updated.desc()", backref="repository")
class Branch(BaseTable):
__tablename__ = 'branch'
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(255))
repository_id = db.Column(db.Integer, db.ForeignKey('repository.id'))
commit = relationship("Commit", order_by="Commit.created.desc()", backref="branch")
class Commit(BaseTable):
__tablename__ = 'commit'
id = db.Column(db.Integer, primary_key=True)
sha = db.Column(db.String(40))
name = db.Column(db.String(255))
description = db.Column(db.String(1024))
status = db.Column(db.Enum('ERROR', 'WARNING', 'OK', 'UNKNOWN', 'RUNNING', name='commit_status_type'))
runtime = db.Column(db.Integer)
branch_id = db.Column(db.Integer, db.ForeignKey('branch.id'))
user_id = db.Column(db.Integer, db.ForeignKey('user.id'))
deploy = relationship("Deploy", order_by="Deploy.id", backref="commit")
class Deploy(BaseTable):
__tablename__ = 'deploy'
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(255))
uri = db.Column(db.String(1024))
status = db.Column(db.Enum('ERROR', 'WARNING', 'OK', 'UNKNOWN', 'RUNNING', name='deploy_status_type'))
runtime = db.Column(db.Integer)
commit_id = db.Column(db.Integer, db.ForeignKey('commit.id'))
log = relationship("Log", order_by="Log.id", backref="deploy")
class Log(BaseTable):
__tablename__ = 'log'
id = db.Column(db.Integer, primary_key=True)
data = db.Column(db.String(1024))
status = db.Column(db.Enum('ERROR', 'WARNING', 'OK', 'UNKNOWN', name='log_status_type'))
deploy_id = db.Column(db.Integer, db.ForeignKey('deploy.id'))
class User(BaseTable):
__tablename__ = 'user'
id = db.Column(db.Integer, primary_key=True)
first_name = db.Column(db.String(255))
last_name = db.Column(db.String(255))
email = db.Column(db.String(255))
password = db.Column(db.String(255))
commit = relationship("Commit", order_by="Commit.id", backref="user")
namespace = relationship("Namespace", order_by="Namespace.id", backref="user")
|
Salamek/git-deploy
|
git_deploy/database.py
|
Python
|
gpl-3.0
| 3,246 | 0.021873 |
"""
The MIT License (MIT)
Copyright (c) [2015-2018] [Andrew Annex]
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
from .spiceypy import *
from .utils import support_types
__author__ = 'AndrewAnnex'
# Default setting for error reporting so that programs don't just exit out!
erract("set", 10, "return")
errdev("set", 10, "null")
|
drbitboy/SpiceyPy
|
spiceypy/__init__.py
|
Python
|
mit
| 1,313 | 0 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
__author__ = "Julien Chaumont"
__copyright__ = "Copyright 2014, Julien Chaumont"
__licence__ = "MIT"
__version__ = "1.0.2"
__contact__ = "julienc91 [at] outlook.fr"
import flickrapi
import os, sys
import re
from config import *
class ShFlickr:
##
# Connexion to Flickr.
#
def __init__(self):
self.flickr = flickrapi.FlickrAPI(API_KEY, API_SECRET)
(token, frob) = self.flickr.get_token_part_one(perms='delete')
if not token:
raw_input("Press ENTER after you authorized this program")
self.flickr.get_token_part_two((token, frob))
##
# Get the list of files to synchronize with Flickr.
# @param folder Path to the main folder
# @return A tuple (photos_to_sync, photosets_to_create) where photos_to_sync
# is the list of files to synchronize for each subfolder, and
# photoset_ids is the list of albums with their respective id on Flickr,
# or None if the album does not exist yet.
#
def synclist(self, folder=PICTURE_FOLDER_PATH):
print "Getting the list of pictures to synchronize..."
subfolders = [lfile for lfile in os.listdir(unicode(folder))
if os.path.isdir(os.path.join(folder, lfile))
and re.match(SUBFOLDERS_REGEXP, lfile)]
photosets = self.flickr.photosets_getList(user_id=USER_ID)
photos_to_sync = {}
photoset_ids = {}
for subfolder in subfolders:
subfolder = subfolder.encode("UTF-8")
# Check if the album already exists on Flickr
photoset_id = None
for photoset in photosets.find('photosets').findall('photoset'):
photoset_title = photoset.find('title').text
if type(photoset_title) == unicode:
photoset_title = photoset_title.encode("UTF-8")
if photoset_title == subfolder:
photoset_id = str(photoset.attrib['id'])
break
photoset_ids[subfolder] = photoset_id
# Get the list of pictures to synchronize within this album
photos_to_sync[subfolder] = self.synclist_subfolder(os.path.join(folder, subfolder), photoset_id)
return photos_to_sync, photoset_ids
##
# Get the list of pictures to synchronize within an album.
# @param subfolder Complete path to the subfolder to synchronize
# @param photoset_id Id of the album on Flickr, or None of the album does not exist yet
# @return The list of the pictures to synchronize.
#
def synclist_subfolder(self, subfolder, photoset_id=None):
files = [lfile for lfile in os.listdir(unicode(subfolder))
if lfile.endswith(PICTURE_EXTENSIONS)]
files_to_sync = []
if photoset_id is not None:
# Find which file were not uploaded
photoset = list(self.flickr.walk_set(photoset_id))
for lfile in files:
lfile = lfile.encode("UTF-8")
found = False
for photo in photoset:
photo = photo.get('title')
if type(photo) == unicode:
photo = photo.encode("UTF-8")
if photo == lfile:
found = True
break
if not found:
files_to_sync.append(lfile)
else:
for lfile in files:
files_to_sync.append(lfile)
return files_to_sync
##
# Performs the upload.
# @param photos_to_sync A dictionary containing the list of
# pictures to upload for each subfolder.
# @param photoset_ids Dict of albums and their Flickr ids.
# @param folder Path to the main folder.
#
def upload(self, photos_to_sync, photosets={}, folder=PICTURE_FOLDER_PATH):
for subfolder in sorted(photos_to_sync):
count = 1
total = len(photos_to_sync[subfolder])
len_count = len(str(total))
consecutive_errors = 0
print "Album %s: %s photos to synchronize" % (subfolder, total)
for photo in sorted(photos_to_sync[subfolder]):
print "%-*s/%s\t %s" % (len_count, count, total, photo)
nb_errors = 0
done = False
while nb_errors < MAX_RETRIES and not done:
try:
path = os.path.join(folder, subfolder, photo).encode("UTF-8")
photo = photo.encode("UTF-8")
response = self.flickr.upload(filename=path,
title=photo,
is_public=VISIBLE_PUBLIC,
is_family=VISIBLE_FAMILY,
is_friend=VISIBLE_FRIEND)
except KeyboardInterrupt:
print "Exit by user request"
return
except:
nb_errors += 1
consecutive_errors += 1
if consecutive_errors >= MAX_CONSECUTIVE_ERRORS:
print "5 failed uploads in a row, aborting."
return
else:
print "Error, retrying upload (%s/%s)" % (nb_errors, MAX_RETRIES)
else:
photo_id = response.find('photoid').text
done = True
count += 1
consecutive_errors = 0
if photoset_ids[subfolder] is None:
print "Creating the remote album %s" % subfolder
response = self.flickr.photosets_create(title=subfolder,
primary_photo_id=photo_id)
photoset_ids[subfolder] = response.find('photoset').attrib['id']
else:
self.flickr.photosets_addPhoto(photoset_id=photoset_ids[subfolder],
photo_id=photo_id)
if nb_errors == 3:
print "%s failed to upload" % photo
if __name__ == "__main__":
shflickr = ShFlickr()
photos_to_sync, photoset_ids = shflickr.synclist()
shflickr.upload(photos_to_sync, photoset_ids)
|
julienc91/ShFlickr
|
main.py
|
Python
|
mit
| 6,714 | 0.004319 |
# Create your views here.
from django.contrib.auth.models import Group
from django.core.validators import URLValidator
from django.core.exceptions import ValidationError
from django.http import HttpResponse
from django.shortcuts import get_object_or_404, render_to_response
from django.template import RequestContext
import os
from querystring_parser import parser
import simplejson
from simplejson import dumps
from social.backends.google import GooglePlusAuth
from madrona.features import get_feature_by_uid
import settings
from .models import *
from data_manager.models import *
from mp_settings.models import *
def show_planner(request, project=None, template='planner.html'):
try:
socket_url = settings.SOCKET_URL
except AttributeError:
socket_url = ''
try:
if project:
mp_settings = MarinePlannerSettings.objects.get(slug_name=project)
else:
mp_settings = MarinePlannerSettings.objects.get(active=True)
project_name = mp_settings.project_name
latitude = mp_settings.latitude
longitude = mp_settings.longitude
zoom = mp_settings.zoom
default_hash = mp_settings.default_hash
min_zoom = mp_settings.min_zoom
max_zoom = mp_settings.max_zoom
project_logo = mp_settings.project_logo
try:
if project_logo:
url_validator = URLValidator()
url_validator(project_logo)
except ValidationError, e:
project_logo = os.path.join(settings.MEDIA_URL, project_logo)
project_icon = mp_settings.project_icon
try:
url_validator = URLValidator()
url_validator(project_icon)
except ValidationError, e:
project_icon = os.path.join(settings.MEDIA_URL, project_icon)
project_home_page = mp_settings.project_home_page
enable_drawing = mp_settings.enable_drawing
bitly_registered_domain = mp_settings.bitly_registered_domain
bitly_username = mp_settings.bitly_username
bitly_api_key = mp_settings.bitly_api_key
except:
project_name = project_logo = project_icon = project_home_page = bitly_registered_domain = bitly_username = bitly_api_key = default_hash = ""
latitude = longitude = zoom = min_zoom = max_zoom = None
enable_drawing = False
context = {
'MEDIA_URL': settings.MEDIA_URL, 'SOCKET_URL': socket_url, 'login': 'true',
'project_name': project_name, 'latitude': latitude, 'longitude': longitude, 'zoom': zoom,
'default_hash': default_hash, 'min_zoom': min_zoom, 'max_zoom': max_zoom,
'project_logo': project_logo, 'project_icon': project_icon, 'project_home_page': project_home_page,
'enable_drawing': enable_drawing,
'bitly_registered_domain': bitly_registered_domain, 'bitly_username': bitly_username, 'bitly_api_key': bitly_api_key
}
if request.user.is_authenticated:
context['session'] = request.session._session_key
if request.user.is_authenticated() and request.user.social_auth.all().count() > 0:
context['picture'] = request.user.social_auth.all()[0].extra_data.get('picture')
if settings.SOCIAL_AUTH_GOOGLE_PLUS_KEY:
context['plus_scope'] = ' '.join(GooglePlusAuth.DEFAULT_SCOPE)
context['plus_id'] = settings.SOCIAL_AUTH_GOOGLE_PLUS_KEY
if settings.UNDER_MAINTENANCE_TEMPLATE:
return render_to_response('under_maintenance.html',
RequestContext(request, context))
return render_to_response(template, RequestContext(request, context))
def show_embedded_map(request, project=None, template='map.html'):
try:
if project:
mp_settings = MarinePlannerSettings.objects.get(slug_name=project)
else:
mp_settings = MarinePlannerSettings.objects.get(active=True)
project_name = mp_settings.project_name
project_logo = mp_settings.project_logo
try:
if project_logo:
url_validator = URLValidator(verify_exists=False)
url_validator(project_logo)
except ValidationError, e:
project_logo = os.path.join(settings.MEDIA_URL, project_logo)
project_home_page = mp_settings.project_home_page
except:
project_name = project_logo = project_home_page = None
context = {
'MEDIA_URL': settings.MEDIA_URL,
'project_name': project_name,
'project_logo': project_logo,
'project_home_page': project_home_page
}
#context = {'MEDIA_URL': settings.MEDIA_URL}
return render_to_response(template, RequestContext(request, context))
def show_mobile_map(request, project=None, template='mobile-map.html'):
try:
if project:
mp_settings = MarinePlannerSettings.objects.get(slug_name=project)
else:
mp_settings = MarinePlannerSettings.objects.get(active=True)
print 'so far so good'
project_name = mp_settings.project_name
project_logo = mp_settings.project_logo
print project_name
print project_logo
# try:
# if project_logo:
# url_validator = URLValidator(verify_exists=False)
# url_validator(project_logo)
# except ValidationError, e:
# project_logo = os.path.join(settings.MEDIA_URL, project_logo)
print 'almost there...'
project_home_page = mp_settings.project_home_page
print 'here we go...'
latitude = mp_settings.latitude
print latitude
longitude = mp_settings.longitude
print longitude
zoom = mp_settings.zoom
print zoom
min_zoom = mp_settings.min_zoom
max_zoom = mp_settings.max_zoom
print min_zoom
print max_zoom
except:
project_name = project_logo = project_home_page = None
context = {
'MEDIA_URL': settings.MEDIA_URL,
# 'project_name': project_name,
# 'project_logo': project_logo,
# 'project_home_page': project_home_page
'latitude': latitude,
'longitude': longitude,
'zoom': zoom
}
#context = {'MEDIA_URL': settings.MEDIA_URL}
return render_to_response(template, RequestContext(request, context))
def get_sharing_groups(request):
from madrona.features import user_sharing_groups
from functools import cmp_to_key
import locale
locale.setlocale(locale.LC_ALL, 'en_US.UTF-8')
json = []
sharing_groups = user_sharing_groups(request.user)
for group in sharing_groups:
members = []
for user in group.user_set.all():
if user.first_name.replace(' ', '') != '' and user.last_name.replace(' ', '') != '':
members.append(user.first_name + ' ' + user.last_name)
else:
members.append(user.username)
sorted_members = sorted(members, key=cmp_to_key(locale.strcoll))
json.append({
'group_name': group.name,
'group_slug': slugify(group.name)+'-sharing',
'members': sorted_members
})
return HttpResponse(dumps(json))
'''
'''
def share_bookmark(request):
group_names = request.POST.getlist('groups[]')
bookmark_uid = request.POST['bookmark']
bookmark = get_feature_by_uid(bookmark_uid)
viewable, response = bookmark.is_viewable(request.user)
if not viewable:
return response
#remove previously shared with groups, before sharing with new list
bookmark.share_with(None)
groups = []
for group_name in group_names:
groups.append(Group.objects.get(name=group_name))
bookmark.share_with(groups, append=False)
return HttpResponse("", status=200)
'''
'''
def get_bookmarks(request):
#sync the client-side bookmarks with the server side bookmarks
#update the server-side bookmarks and return the new list
try:
bookmark_dict = parser.parse(request.POST.urlencode())['bookmarks']
except:
bookmark_dict = {}
try:
#loop through the list from the client
#if user, bm_name, and bm_state match then skip
#otherwise, add to the db
for key,bookmark in bookmark_dict.items():
try:
Bookmark.objects.get(user=request.user, name=bookmark['name'], url_hash=bookmark['hash'])
except Bookmark.DoesNotExist:
new_bookmark = Bookmark(user=request.user, name=bookmark['name'], url_hash=bookmark['hash'])
new_bookmark.save()
except:
continue
#grab all bookmarks belonging to this user
#serialize bookmarks into 'name', 'hash' objects and return simplejson dump
content = []
bookmark_list = Bookmark.objects.filter(user=request.user)
for bookmark in bookmark_list:
sharing_groups = [group.name for group in bookmark.sharing_groups.all()]
content.append({
'uid': bookmark.uid,
'name': bookmark.name,
'hash': bookmark.url_hash,
'sharing_groups': sharing_groups
})
shared_bookmarks = Bookmark.objects.shared_with_user(request.user)
for bookmark in shared_bookmarks:
if bookmark not in bookmark_list:
username = bookmark.user.username
actual_name = bookmark.user.first_name + ' ' + bookmark.user.last_name
content.append({
'uid': bookmark.uid,
'name': bookmark.name,
'hash': bookmark.url_hash,
'shared': True,
'shared_by_username': username,
'shared_by_name': actual_name
})
return HttpResponse(simplejson.dumps(content), content_type="application/json", status=200)
except:
return HttpResponse(status=304)
def remove_bookmark(request):
try:
bookmark_uid = request.POST['uid']
bookmark = get_feature_by_uid(bookmark_uid)
viewable, response = bookmark.is_viewable(request.user)
if not viewable:
return response
bookmark.delete()
return HttpResponse(status=200)
except:
return HttpResponse(status=304)
def add_bookmark(request):
try:
bookmark = Bookmark(user=request.user, name=request.POST.get('name'), url_hash=request.POST.get('hash'))
bookmark.save()
sharing_groups = [group.name for group in bookmark.sharing_groups.all()]
content = []
content.append({
'uid': bookmark.uid,
'name': bookmark.name,
'hash': bookmark.url_hash,
'sharing_groups': sharing_groups
})
print 'returning content'
return HttpResponse(simplejson.dumps(content), content_type="application/json", status=200)
except:
return HttpResponse(status=304)
|
Ecotrust/PEW-EFH
|
mp/visualize/views.py
|
Python
|
apache-2.0
| 11,227 | 0.004186 |
# -*- coding: utf-8 -*-
# Author: Mikhail Polyanskiy
# Last modified: 2017-04-02
# Original data: Rakić et al. 1998, https://doi.org/10.1364/AO.37.005271
import numpy as np
import matplotlib.pyplot as plt
# Lorentz-Drude (LD) model parameters
ωp = 9.03 #eV
f0 = 0.760
Γ0 = 0.053 #eV
f1 = 0.024
Γ1 = 0.241 #eV
ω1 = 0.415 #eV
f2 = 0.010
Γ2 = 0.345 #eV
ω2 = 0.830 #eV
f3 = 0.071
Γ3 = 0.870 #eV
ω3 = 2.969 #eV
f4 = 0.601
Γ4 = 2.494 #eV
ω4 = 4.304 #eV
f5 = 4.384
Γ5 = 2.214 #eV
ω5 = 13.32 #eV
Ωp = f0**.5 * ωp #eV
def LD(ω): #ω: eV
ε = 1-Ωp**2/(ω*(ω+1j*Γ0))
ε += f1*ωp**2 / ((ω1**2-ω**2)-1j*ω*Γ1)
ε += f2*ωp**2 / ((ω2**2-ω**2)-1j*ω*Γ2)
ε += f3*ωp**2 / ((ω3**2-ω**2)-1j*ω*Γ3)
ε += f4*ωp**2 / ((ω4**2-ω**2)-1j*ω*Γ4)
ε += f5*ωp**2 / ((ω5**2-ω**2)-1j*ω*Γ5)
return ε
ev_min=0.2
ev_max=5
npoints=200
eV = np.logspace(np.log10(ev_min), np.log10(ev_max), npoints)
μm = 4.13566733e-1*2.99792458/eV
ε = LD(eV)
n = (ε**.5).real
k = (ε**.5).imag
#============================ DATA OUTPUT =================================
file = open('out.txt', 'w')
for i in range(npoints-1, -1, -1):
file.write('\n {:.4e} {:.4e} {:.4e}'.format(μm[i],n[i],k[i]))
file.close()
#=============================== PLOT =====================================
plt.rc('font', family='Arial', size='14')
plt.figure(1)
plt.plot(eV, -ε.real, label="-ε1")
plt.plot(eV, ε.imag, label="ε2")
plt.xlabel('Photon energy (eV)')
plt.ylabel('ε')
plt.xscale('log')
plt.yscale('log')
plt.legend(bbox_to_anchor=(0,1.02,1,0),loc=3,ncol=2,borderaxespad=0)
#plot n,k vs eV
plt.figure(2)
plt.plot(eV, n, label="n")
plt.plot(eV, k, label="k")
plt.xlabel('Photon energy (eV)')
plt.ylabel('n, k')
plt.yscale('log')
plt.legend(bbox_to_anchor=(0,1.02,1,0),loc=3,ncol=2,borderaxespad=0)
#plot n,k vs μm
plt.figure(3)
plt.plot(μm, n, label="n")
plt.plot(μm, k, label="k")
plt.xlabel('Wavelength (μm)')
plt.ylabel('n, k')
plt.xscale('log')
plt.yscale('log')
plt.legend(bbox_to_anchor=(0,1.02,1,0),loc=3,ncol=2,borderaxespad=0)
|
polyanskiy/refractiveindex.info-scripts
|
scripts/Rakic 1998 - Au (LD model).py
|
Python
|
gpl-3.0
| 2,100 | 0.028529 |
#!/usr/bin/python
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: nxos_overlay_global
extends_documentation_fragment: nxos
version_added: "2.2"
short_description: Configures anycast gateway MAC of the switch.
description:
- Configures anycast gateway MAC of the switch.
author: Gabriele Gerbino (@GGabriele)
notes:
- Default restores params default value
- Supported MAC address format are "E.E.E", "EE-EE-EE-EE-EE-EE",
"EE:EE:EE:EE:EE:EE" and "EEEE.EEEE.EEEE"
options:
anycast_gateway_mac:
description:
- Anycast gateway mac of the switch.
required: true
default: null
'''
EXAMPLES = '''
- nxos_overlay_global:
anycast_gateway_mac: "b.b.b"
username: "{{ un }}"
password: "{{ pwd }}"
host: "{{ inventory_hostname }}"
'''
RETURN = '''
proposed:
description: k/v pairs of parameters passed into module
returned: verbose mode
type: dict
sample: {"asn": "65535", "router_id": "1.1.1.1", "vrf": "test"}
existing:
description: k/v pairs of existing BGP configuration
returned: verbose mode
type: dict
sample: {"asn": "65535", "bestpath_always_compare_med": false,
"bestpath_aspath_multipath_relax": false,
"bestpath_compare_neighborid": false,
"bestpath_compare_routerid": false,
"bestpath_cost_community_ignore": false,
"bestpath_med_confed": false,
"bestpath_med_missing_as_worst": false,
"bestpath_med_non_deterministic": false, "cluster_id": "",
"confederation_id": "", "confederation_peers": "",
"graceful_restart": true, "graceful_restart_helper": false,
"graceful_restart_timers_restart": "120",
"graceful_restart_timers_stalepath_time": "300", "local_as": "",
"log_neighbor_changes": false, "maxas_limit": "",
"neighbor_down_fib_accelerate": false, "reconnect_interval": "60",
"router_id": "11.11.11.11", "suppress_fib_pending": false,
"timer_bestpath_limit": "", "timer_bgp_hold": "180",
"timer_bgp_keepalive": "60", "vrf": "test"}
end_state:
description: k/v pairs of BGP configuration after module execution
returned: verbose mode
type: dict
sample: {"asn": "65535", "bestpath_always_compare_med": false,
"bestpath_aspath_multipath_relax": false,
"bestpath_compare_neighborid": false,
"bestpath_compare_routerid": false,
"bestpath_cost_community_ignore": false,
"bestpath_med_confed": false,
"bestpath_med_missing_as_worst": false,
"bestpath_med_non_deterministic": false, "cluster_id": "",
"confederation_id": "", "confederation_peers": "",
"graceful_restart": true, "graceful_restart_helper": false,
"graceful_restart_timers_restart": "120",
"graceful_restart_timers_stalepath_time": "300", "local_as": "",
"log_neighbor_changes": false, "maxas_limit": "",
"neighbor_down_fib_accelerate": false, "reconnect_interval": "60",
"router_id": "1.1.1.1", "suppress_fib_pending": false,
"timer_bestpath_limit": "", "timer_bgp_hold": "180",
"timer_bgp_keepalive": "60", "vrf": "test"}
updates:
description: commands sent to the device
returned: always
type: list
sample: ["router bgp 65535", "vrf test", "router-id 1.1.1.1"]
changed:
description: check to see if a change was made on the device
returned: always
type: boolean
sample: true
'''
import re
from ansible.module_utils.nxos import get_config, load_config
from ansible.module_utils.nxos import nxos_argument_spec, check_args
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.netcfg import CustomNetworkConfig
PARAM_TO_COMMAND_KEYMAP = {
'anycast_gateway_mac': 'fabric forwarding anycast-gateway-mac',
}
def invoke(name, *args, **kwargs):
func = globals().get(name)
if func:
return func(*args, **kwargs)
def get_value(arg, config, module):
REGEX = re.compile(r'(?:{0}\s)(?P<value>.*)$'.format(PARAM_TO_COMMAND_KEYMAP[arg]), re.M)
value = ''
if PARAM_TO_COMMAND_KEYMAP[arg] in config:
value = REGEX.search(config).group('value')
return value
def get_existing(module, args):
existing = {}
config = str(get_config(module))
for arg in args:
existing[arg] = get_value(arg, config, module)
return existing
def apply_key_map(key_map, table):
new_dict = {}
for key, value in table.items():
new_key = key_map.get(key)
if new_key:
value = table.get(key)
if value:
new_dict[new_key] = value
else:
new_dict[new_key] = value
return new_dict
def get_commands(module, existing, proposed, candidate):
commands = list()
proposed_commands = apply_key_map(PARAM_TO_COMMAND_KEYMAP, proposed)
existing_commands = apply_key_map(PARAM_TO_COMMAND_KEYMAP, existing)
for key, value in proposed_commands.items():
if value == 'default':
existing_value = existing_commands.get(key)
if existing_value:
commands.append('no {0} {1}'.format(key, existing_value))
else:
if 'anycast-gateway-mac' in key:
value = normalize_mac(value, module)
command = '{0} {1}'.format(key, value)
commands.append(command)
if commands:
candidate.add(commands, parents=[])
def normalize_mac(proposed_mac, module):
try:
if '-' in proposed_mac:
splitted_mac = proposed_mac.split('-')
if len(splitted_mac) != 6:
raise ValueError
for octect in splitted_mac:
if len(octect) != 2:
raise ValueError
elif '.' in proposed_mac:
splitted_mac = []
splitted_dot_mac = proposed_mac.split('.')
if len(splitted_dot_mac) != 3:
raise ValueError
for octect in splitted_dot_mac:
if len(octect) > 4:
raise ValueError
else:
octect_len = len(octect)
padding = 4 - octect_len
splitted_mac.append(octect.zfill(padding+1))
elif ':' in proposed_mac:
splitted_mac = proposed_mac.split(':')
if len(splitted_mac) != 6:
raise ValueError
for octect in splitted_mac:
if len(octect) != 2:
raise ValueError
else:
raise ValueError
except ValueError:
module.fail_json(msg='Invalid MAC address format',
proposed_mac=proposed_mac)
joined_mac = ''.join(splitted_mac)
mac = [joined_mac[i:i+4] for i in range(0, len(joined_mac), 4)]
return '.'.join(mac).upper()
def main():
argument_spec = dict(
anycast_gateway_mac=dict(required=True, type='str'),
m_facts=dict(required=False, default=False, type='bool'),
)
argument_spec.update(nxos_argument_spec)
module = AnsibleModule(argument_spec=argument_spec,
supports_check_mode=True)
warnings = list()
check_args(module, warnings)
args = [
'anycast_gateway_mac'
]
existing = invoke('get_existing', module, args)
end_state = existing
proposed = dict((k, v) for k, v in module.params.items()
if v is not None and k in args)
result = {}
candidate = CustomNetworkConfig(indent=3)
invoke('get_commands', module, existing, proposed, candidate)
if not module.check_mode:
load_config(module, candidate)
if module._verbosity > 0:
end_state = invoke('get_existing', module, args)
result['end_state'] = end_state
result['existing'] = existing
result['proposed'] = proposed
result['warnings'] = True
module.exit_json(**result)
if __name__ == '__main__':
main()
|
andreaso/ansible
|
lib/ansible/modules/network/nxos/nxos_overlay_global.py
|
Python
|
gpl-3.0
| 8,884 | 0.0009 |
#!/usr/bin/env python3
#
import re
import evdev
import subprocess
import time
import argparse
def process_test_line(line, controls):
tmp = line.strip()
fields = tmp.split()
operation = fields[1].lower()
if operation == 'receive':
target = fields[2].lower()
if target == 'syn':
return (operation, 0, 0, 0)
elif target == 'axis':
ctrl_type = evdev.ecodes.EV_ABS
else:
ctrl_type = evdev.ecodes.EV_KEY
control = int(fields[3])
value = int(fields[4])
else:
control_str = fields[2]
if not control_str in controls:
print('Warning: Control {0} unknown.'.format(control_str))
print(line)
return None
(ctrl_type, control) = controls[control_str]
value = int(fields[3])
return (operation, ctrl_type, control, value)
def read_config(fname):
sequence = []
devname = ''
controls = {}
f = open(fname)
test_re = re.compile('//\*(.*)$')
dev_re = re.compile('^\s*(grab\s+)?device\s+"([^"]+)"')
def_re = re.compile('^\s*(button|axis)\s+(\S+)\s*=\s*(\S+)')
for line in f:
m = test_re.match(line)
if m:
tst = process_test_line(line, controls)
if tst:
sequence.append(tst)
continue
m = dev_re.match(line)
if m:
devname = m.group(2)
continue
m = def_re.match(line)
if m:
if m.group(1) == 'axis':
controls[m.group(2)] = (evdev.ecodes.EV_ABS, int(m.group(3)));
else:
controls[m.group(2)] = (evdev.ecodes.EV_KEY, int(m.group(3)));
f.close()
return {'seq':sequence, 'devname': devname, 'controls': controls}
def make_cap(config):
axes = []
buttons = []
# loops through keys of dictionary
for ctrl in config['controls']:
(ctrl_type, ctrl_id) = config['controls'][ctrl]
if ctrl_type == evdev.ecodes.EV_KEY:
buttons.append(ctrl_id)
else:
axes.append((ctrl_id, evdev.AbsInfo(0, 255, 0, 15, 0, 0)))
# sort the arrays
axes.sort()
buttons.sort()
cap = {}
if axes:
cap[evdev.ecodes.EV_ABS] = axes;
if buttons:
cap[evdev.ecodes.EV_KEY] = buttons;
return cap
def find_device(name):
patt = re.compile(name)
devices = [evdev.InputDevice(fn) for fn in evdev.list_devices()]
for device in devices:
if patt.match(device.name):
return device
parser = argparse.ArgumentParser(description = 'Test evdevshift using specially prepared config.')
parser.add_argument('--config', type=str, dest='arg')
args = parser.parse_args()
arg = args.arg
# read the config and prepare the caps of the source device
config = read_config(arg)
cap = make_cap(config)
# create the source device
ui = evdev.UInput(cap, name=config['devname'], vendor = 0xf30, product = 0x110, version=0x110)
eds_templ = subprocess.Popen(['./evdevshift_dbg', '--device={0}'.format(ui.device.fn), '--template=regrestest.conf'])
# start the evdevshift and point it to the config
eds = subprocess.Popen(['./evdevshift_dbg', '--config={0}'.format(arg)])
# temporary, to make sure the evdevshift started and created the device...
time.sleep(1)
# find the newly created device
dev = find_device('evdevshift')
print(dev)
#send the test sequence and check the outputs
buffered = False
problems = 0
for ev in config['seq']:
if ev[0] == 'send':
print('=====================================')
print('Sending (type {0} code {1} val {2})'.format(ev[1], ev[2], ev[3]))
sent = True
ui.write(ev[1], ev[2], ev[3])
else:
if sent:
#print('syn')
ui.syn()
sent = False
# give the stuff some time to pass the events
# not nice, will need to rework to avoid races
time.sleep(0.1)
in_ev = dev.read_one()
if in_ev:
if (in_ev.type == ev[1]) and (in_ev.code == ev[2]) and (in_ev.value == ev[3]):
print('Response OK (type {0} code {1} val {2})'.format(ev[1], ev[2], ev[3]))
else:
problems += 1
print('Error: Expected (type {0} code {1} val {2})'.format(ev[1], ev[2], ev[3]))
print(' Received (type {0} code {1} val {2})'.format(in_ev.type, in_ev.code, in_ev.value))
print('=====================================')
print('Expected error (Read wrong number of bytes (-1)!)')
ui.close()
time.sleep(1)
if problems == 0:
print('\n\nNo problems encountered!')
else:
print('\n\n{0} problems found.'.format(problems))
|
uglyDwarf/evdevshift
|
scafold2.py
|
Python
|
mit
| 4,502 | 0.026877 |
#!/usr/bin/env python
import os
import re
import sys
import socket
import httplib
import urlparse
from urllib import urlencode
from urllib2 import urlopen
from argparse import ArgumentParser
from collections import OrderedDict
def _get_discover_url(given_discover_url, update_type):
if update_type == '4':
return given_discover_url
elif update_type == '6':
parsed_url = urlparse.urlsplit(given_discover_url)
for (family, socktype, proto, canonname, sockaddr) in socket.getaddrinfo(parsed_url.netloc, parsed_url.port, socket.AF_INET6):
address, port, flow_info, scope_id = sockaddr
return urlparse.urlunsplit((parsed_url.scheme, '[' + address + ']', parsed_url.path, parsed_url.query, parsed_url.fragment))
raise ValueError('Cannot find an IPv6 address with the discovery URL {}'.format(given_discover_url))
else:
raise ValueError('Unknown update type {!r}'.format(update_type))
def _discover_own_address(discover_url):
response = urlopen(discover_url)
code, content = response.code, response.read()
if code != httplib.OK:
print >>sys.stderr, 'error: could not discover own address.'
print >>sys.stderr, 'server returned {}, {}'.format(code, content)
raise SystemExit(1)
parsed_response = re.search(r'Your IP address is\s*:\s*(?P<ip_address>(\d+\.\d+\.\d+\.\d+)|([0-9a-fA-F:]+))', content)
if parsed_response is None:
print >>sys.stderr, 'error: could not parse own IP properly'
print >>sys.stderr, 'server returned:', content
raise SystemExit(2)
return parsed_response.groupdict()['ip_address']
def _send_update(hostname, password, update_url, ip_address):
data = urlencode(OrderedDict(hostname=hostname, password=password, myip=ip_address))
response = urlopen(update_url, data)
content = response.read().strip()
if response.code != httplib.OK:
print >>sys.stderr, 'error: update failed. error is {}'.format(response.code)
print >>sys.stderr, content
raise SystemExit(3)
parsed_content = re.match(r'^(?P<key>badauth|nochg|good|noipv6)(\s(?P<value>.*))?$', content)
if parsed_content is None:
print >>sys.stderr, 'error: unknown returned response: {}'.format(content)
raise SystemExit(4)
key, value = parsed_content.groupdict()['key'], parsed_content.groupdict()['value']
if key == 'badauth':
print >>sys.stderr, 'error: the domain name and password do not match'
print >>sys.stderr, 'Make sure you are using a domain name that has been marked for dynamic updates,'
print >>sys.stderr, 'and that the password used is the update key (not your account password).'
raise SystemExit(5)
elif key == 'nochg':
print >>sys.stderr, 'no update required (IP is {})'.format(value)
elif key == 'noipv6':
print >>sys.stderr, 'cannot update ipv6 for this hostname'
elif key == 'good':
print >>sys.stderr, 'update complete: {}'.format(value)
def main():
parser = ArgumentParser()
parser.add_argument('hostname', help='The hostname (domain name) to be updated. Make sure this domain has been marked for dynamic DNS updating')
parser.add_argument('password', help='Update key for this domain (as generated from the zone management interface)')
parser.add_argument('-u', '--update-url', default='https://dyn.dns.he.net/nic/update',
help='URL to post the update to')
parser.add_argument('-d', '--discover-url', default='http://checkip.dns.he.net',
help='Service for discovery of own address')
parser.add_argument('-t', '--type', default='4',
help='Type of update: either "4" for IPv4 or "6" for IPv6')
parser.add_argument('-i', '--ip-address', default=None,
help='The IP address to be updated for this domain. Leave blank to auto-discover')
args = parser.parse_args()
if args.ip_address is None:
discover_url = _get_discover_url(args.discover_url, args.type)
args.ip_address = _discover_own_address(discover_url)
_send_update(args.hostname, args.password, args.update_url, args.ip_address)
if __name__ == '__main__':
main()
|
popen2/he_dns
|
update_he_dns.py
|
Python
|
mit
| 4,258 | 0.005636 |
from django.conf.urls import patterns, include, url
from misago.threads.views.privatethreads import PrivateThreadsView
urlpatterns = patterns('',
url(r'^private-threads/$', PrivateThreadsView.as_view(), name='private_threads'),
url(r'^private-threads/(?P<page>\d+)/$', PrivateThreadsView.as_view(), name='private_threads'),
url(r'^private-threads/sort-(?P<sort>[\w-]+)/$', PrivateThreadsView.as_view(), name='private_threads'),
url(r'^private-threads/sort-(?P<sort>[\w-]+)/(?P<page>\d+)/$', PrivateThreadsView.as_view(), name='private_threads'),
url(r'^private-threads/show-(?P<show>[\w-]+)/$', PrivateThreadsView.as_view(), name='private_threads'),
url(r'^private-threads/show-(?P<show>[\w-]+)/(?P<page>\d+)/$', PrivateThreadsView.as_view(), name='private_threads'),
url(r'^private-threads/sort-(?P<sort>[\w-]+)/show-(?P<show>[\w-]+)/$', PrivateThreadsView.as_view(), name='private_threads'),
url(r'^private-threads/sort-(?P<sort>[\w-]+)/show-(?P<show>[\w-]+)/(?P<page>\d+)/$', PrivateThreadsView.as_view(), name='private_threads'),
)
# thread view
from misago.threads.views.privatethreads import ThreadView
urlpatterns += patterns('',
url(r'^private-thread/(?P<thread_slug>[\w\d-]+)-(?P<thread_id>\d+)/$', ThreadView.as_view(), name='private_thread'),
url(r'^private-thread/(?P<thread_slug>[\w\d-]+)-(?P<thread_id>\d+)/(?P<page>\d+)/$', ThreadView.as_view(), name='private_thread'),
)
# goto views
from misago.threads.views.privatethreads import (GotoLastView, GotoNewView,
GotoPostView)
urlpatterns += patterns('',
url(r'^private-thread/(?P<thread_slug>[\w\d-]+)-(?P<thread_id>\d+)/last/$', GotoLastView.as_view(), name='private_thread_last'),
url(r'^private-thread/(?P<thread_slug>[\w\d-]+)-(?P<thread_id>\d+)/new/$', GotoNewView.as_view(), name='private_thread_new'),
url(r'^private-thread/(?P<thread_slug>[\w\d-]+)-(?P<thread_id>\d+)/post-(?P<post_id>\d+)/$', GotoPostView.as_view(), name='private_thread_post'),
)
# reported posts views
from misago.threads.views.privatethreads import ReportedPostsListView
urlpatterns += patterns('',
url(r'^private-thread/(?P<thread_slug>[\w\d-]+)-(?P<thread_id>\d+)/reported-posts/$', ReportedPostsListView.as_view(), name='private_thread_reported'),
)
# participants views
from misago.threads.views.privatethreads import (ThreadParticipantsView,
EditThreadParticipantsView,
AddThreadParticipantsView,
RemoveThreadParticipantView,
LeaveThreadView)
urlpatterns += patterns('',
url(r'^private-thread/(?P<thread_slug>[\w\d-]+)-(?P<thread_id>\d+)/participants/$', ThreadParticipantsView.as_view(), name='private_thread_participants'),
url(r'^private-thread/(?P<thread_slug>[\w\d-]+)-(?P<thread_id>\d+)/edit-participants/$', EditThreadParticipantsView.as_view(), name='private_thread_edit_participants'),
url(r'^private-thread/(?P<thread_slug>[\w\d-]+)-(?P<thread_id>\d+)/remove-participant/(?P<user_id>\d+)/$', RemoveThreadParticipantView.as_view(), name='private_thread_remove_participant'),
url(r'^private-thread/(?P<thread_slug>[\w\d-]+)-(?P<thread_id>\d+)/add-participants/$', AddThreadParticipantsView.as_view(), name='private_thread_add_participants'),
url(r'^private-thread/(?P<thread_slug>[\w\d-]+)-(?P<thread_id>\d+)/leave/$', LeaveThreadView.as_view(), name='private_thread_leave'),
)
# post views
from misago.threads.views.privatethreads import (QuotePostView, HidePostView,
UnhidePostView,
DeletePostView,
ReportPostView)
urlpatterns += patterns('',
url(r'^private-post/(?P<post_id>\d+)/quote/$', QuotePostView.as_view(), name='quote_private_post'),
url(r'^private-post/(?P<post_id>\d+)/unhide/$', UnhidePostView.as_view(), name='unhide_private_post'),
url(r'^private-post/(?P<post_id>\d+)/hide/$', HidePostView.as_view(), name='hide_private_post'),
url(r'^private-post/(?P<post_id>\d+)/delete/$', DeletePostView.as_view(), name='delete_private_post'),
url(r'^private-post/(?P<post_id>\d+)/report/$', ReportPostView.as_view(), name='report_private_post'),
)
# events view
from misago.threads.views.privatethreads import EventsView
urlpatterns += patterns('',
url(r'^edit-private-event/(?P<event_id>\d+)/$', EventsView.as_view(), name='edit_private_event'),
)
# posting views
from misago.threads.views.privatethreads import PostingView
urlpatterns += patterns('',
url(r'^start-private-thread/$', PostingView.as_view(), name='start_private_thread'),
url(r'^reply-private-thread/(?P<thread_id>\d+)/$', PostingView.as_view(), name='reply_private_thread'),
url(r'^edit-private_post/(?P<thread_id>\d+)/(?P<post_id>\d+)/edit/$', PostingView.as_view(), name='edit_private_post'),
)
|
390910131/Misago
|
misago/threads/urls/privatethreads.py
|
Python
|
gpl-2.0
| 5,045 | 0.008523 |
# -*- coding: utf-8 -*-
import ast
import os
import requests
import models
from config import config, sqla
from gevent.pool import Pool
from helpers import random_str, down
base_path = config.get('photo', 'path')
base_path = os.path.join(base_path, 'celebrity')
cookies = {
'bid': ''
}
def create_down(str_urls, douban_id, category):
urls = ast.literal_eval(str_urls or "[]")
path = os.path.join(base_path, category)
for url in urls:
filename = str(douban_id) + '_' + url.split('/')[-1].strip('?')
cookies['bid'] = random_str(11)
down(url, cookies, path, filename)
def create_requests_and_save_datas(douban_id):
session = sqla['session']
cookies['bid'] = random_str(11)
celebrity = session.query(models.Celebrity).filter_by(
douban_id=douban_id
).one()
cover_url = celebrity.cover
thumbnail_cover_url = celebrity.thumbnail_cover
photos_url = celebrity.photos
thumbnail_photos_url = celebrity.thumbnail_photos
down(
cover_url,
cookies,
os.path.join(base_path, 'cover'),
str(douban_id)+'_'+cover_url.split('/')[-1].strip('?')
)
down(
thumbnail_cover_url,
cookies,
os.path.join(base_path, 'thumbnail_cover'),
str(douban_id)+'_'+cover_url.split('/')[-1].strip('?')
)
create_down(photos_url, douban_id, 'photos')
create_down(thumbnail_photos_url, douban_id, 'thumbnail_photos')
def task(douban_ids, pool_number):
pool = Pool(pool_number)
for douban_id in douban_ids:
pool.spawn(
create_requests_and_save_datas,
douban_id=douban_id
)
pool.join()
|
billvsme/videoSpider
|
webs/douban/tasks/down_celebrity_images.py
|
Python
|
mit
| 1,702 | 0 |
"""A fast, lightweight, and secure session WSGI middleware for use with GAE."""
import datetime
import hashlib
import hmac
import logging
import os
import pickle
import threading
import time
from Cookie import CookieError, SimpleCookie
from base64 import b64decode, b64encode
from google.appengine.api import memcache
from google.appengine.ext import db
# Configurable cookie options
COOKIE_NAME_PREFIX = "DgU" # identifies a cookie as being one used by gae-sessions (so you can set cookies too)
COOKIE_PATH = "/"
DEFAULT_COOKIE_ONLY_THRESH = 10240 # 10KB: GAE only allows ~16000B in HTTP header - leave ~6KB for other info
DEFAULT_LIFETIME = datetime.timedelta(days=7)
# constants
SID_LEN = 43 # timestamp (10 chars) + underscore + md5 (32 hex chars)
SIG_LEN = 44 # base 64 encoded HMAC-SHA256
MAX_COOKIE_LEN = 4096
EXPIRE_COOKIE_FMT = ' %s=; expires=Wed, 01-Jan-1970 00:00:00 GMT; Path=' + COOKIE_PATH
COOKIE_FMT = ' ' + COOKIE_NAME_PREFIX + '%02d="%s"; %sPath=' + COOKIE_PATH + '; HttpOnly'
COOKIE_FMT_SECURE = COOKIE_FMT + '; Secure'
COOKIE_DATE_FMT = '%a, %d-%b-%Y %H:%M:%S GMT'
COOKIE_OVERHEAD = len(COOKIE_FMT % (0, '', '')) + len(
'expires=Xxx, xx XXX XXXX XX:XX:XX GMT; ') + 150 # 150=safety margin (e.g., in case browser uses 4000 instead of 4096)
MAX_DATA_PER_COOKIE = MAX_COOKIE_LEN - COOKIE_OVERHEAD
_tls = threading.local()
def get_current_session():
"""Returns the session associated with the current request."""
return _tls.current_session
def set_current_session(session):
"""Sets the session associated with the current request."""
_tls.current_session = session
def is_gaesessions_key(k):
return k.startswith(COOKIE_NAME_PREFIX)
class SessionModel(db.Model):
"""Contains session data. key_name is the session ID and pdump contains a
pickled dictionary which maps session variables to their values."""
pdump = db.BlobProperty()
class Session(object):
"""Manages loading, reading/writing key-value pairs, and saving of a session.
``sid`` - if set, then the session for that sid (if any) is loaded. Otherwise,
sid will be loaded from the HTTP_COOKIE (if any).
"""
DIRTY_BUT_DONT_PERSIST_TO_DB = 1
def __init__(self, sid=None, lifetime=DEFAULT_LIFETIME, no_datastore=False,
cookie_only_threshold=DEFAULT_COOKIE_ONLY_THRESH, cookie_key=None):
self._accessed = False
self.sid = None
self.cookie_keys = []
self.cookie_data = None
self.data = {}
self.dirty = False # has the session been changed?
self.lifetime = lifetime
self.no_datastore = no_datastore
self.cookie_only_thresh = cookie_only_threshold
self.base_key = cookie_key
if sid:
self.__set_sid(sid, False)
self.data = None
else:
self.__read_cookie()
@staticmethod
def __compute_hmac(base_key, sid, text):
"""Computes the signature for text given base_key and sid."""
key = base_key + sid
return b64encode(hmac.new(key, text, hashlib.sha256).digest())
def __read_cookie(self):
"""Reads the HTTP Cookie and loads the sid and data from it (if any)."""
try:
# check the cookie to see if a session has been started
cookie = SimpleCookie(os.environ['HTTP_COOKIE'])
self.cookie_keys = filter(is_gaesessions_key, cookie.keys())
if not self.cookie_keys:
return # no session yet
self.cookie_keys.sort()
data = ''.join(cookie[k].value for k in self.cookie_keys)
i = SIG_LEN + SID_LEN
sig, sid, b64pdump = data[:SIG_LEN], data[SIG_LEN:i], data[i:]
pdump = b64decode(b64pdump)
actual_sig = Session.__compute_hmac(self.base_key, sid, pdump)
if sig == actual_sig:
self.__set_sid(sid, False)
# check for expiration and terminate the session if it has expired
if self.get_expiration() != 0 and time.time() > self.get_expiration():
return self.terminate()
if pdump:
self.data = self.__decode_data(pdump)
else:
self.data = None # data is in memcache/db: load it on-demand
else:
logging.warn('cookie with invalid sig received from %s: %s' % (os.environ.get('REMOTE_ADDR'), b64pdump))
except (CookieError, KeyError, IndexError, TypeError):
# there is no cookie (i.e., no session) or the cookie is invalid
self.terminate(False)
def make_cookie_headers(self):
"""Returns a list of cookie headers to send (if any)."""
# expire all cookies if the session has ended
if not self.sid:
return [EXPIRE_COOKIE_FMT % k for k in self.cookie_keys]
if self.cookie_data is None:
return [] # no cookie headers need to be sent
# build the cookie header(s): includes sig, sid, and cookie_data
if self.is_ssl_only():
m = MAX_DATA_PER_COOKIE - 8
fmt = COOKIE_FMT_SECURE
else:
m = MAX_DATA_PER_COOKIE
fmt = COOKIE_FMT
sig = Session.__compute_hmac(self.base_key, self.sid, self.cookie_data)
cv = sig + self.sid + b64encode(self.cookie_data)
num_cookies = 1 + (len(cv) - 1) / m
if self.get_expiration() > 0:
ed = "expires=%s; " % datetime.datetime.fromtimestamp(self.get_expiration()).strftime(COOKIE_DATE_FMT)
else:
ed = ''
cookies = [fmt % (i, cv[i * m:i * m + m], ed) for i in xrange(num_cookies)]
# expire old cookies which aren't needed anymore
old_cookies = xrange(num_cookies, len(self.cookie_keys))
key = COOKIE_NAME_PREFIX + '%02d'
cookies_to_ax = [EXPIRE_COOKIE_FMT % (key % i) for i in old_cookies]
return cookies + cookies_to_ax
def is_active(self):
"""Returns True if this session is active (i.e., it has been assigned a
session ID and will be or has been persisted)."""
return self.sid is not None
def is_ssl_only(self):
"""Returns True if cookies set by this session will include the "Secure"
attribute so that the client will only send them over a secure channel
like SSL)."""
return self.sid is not None and self.sid[-33] == 'S'
def is_accessed(self):
"""Returns True if any value of this session has been accessed."""
return self._accessed
def ensure_data_loaded(self):
"""Fetch the session data if it hasn't been retrieved it yet."""
self._accessed = True
if self.data is None and self.sid:
self.__retrieve_data()
def get_expiration(self):
"""Returns the timestamp at which this session will expire."""
try:
return int(self.sid[:-33])
except:
return 0
def __make_sid(self, expire_ts=None, ssl_only=False):
"""Returns a new session ID."""
# make a random ID (random.randrange() is 10x faster but less secure?)
if expire_ts is None:
expire_dt = datetime.datetime.now() + self.lifetime
expire_ts = int(time.mktime((expire_dt).timetuple()))
else:
expire_ts = int(expire_ts)
if ssl_only:
sep = 'S'
else:
sep = '_'
return ('%010d' % expire_ts) + sep + hashlib.md5(os.urandom(16)).hexdigest()
@staticmethod
def __encode_data(d):
"""Returns a "pickled+" encoding of d. d values of type db.Model are
protobuf encoded before pickling to minimize CPU usage & data size."""
# separate protobufs so we'll know how to decode (they are just strings)
eP = {} # for models encoded as protobufs
eO = {} # for everything else
for k, v in d.iteritems():
if isinstance(v, db.Model):
eP[k] = db.model_to_protobuf(v)
else:
eO[k] = v
return pickle.dumps((eP, eO), 2)
@staticmethod
def __decode_data(pdump):
"""Returns a data dictionary after decoding it from "pickled+" form."""
try:
eP, eO = pickle.loads(pdump)
for k, v in eP.iteritems():
eO[k] = db.model_from_protobuf(v)
except Exception, e:
logging.warn("failed to decode session data: %s" % e)
eO = {}
return eO
def regenerate_id(self, expiration_ts=None):
"""Assigns the session a new session ID (data carries over). This
should be called whenever a user authenticates to prevent session
fixation attacks.
``expiration_ts`` - The UNIX timestamp the session will expire at. If
omitted, the session expiration time will not be changed.
"""
if self.sid or expiration_ts is not None:
self.ensure_data_loaded() # ensure we have the data before we delete it
if expiration_ts is None:
expiration_ts = self.get_expiration()
self.__set_sid(self.__make_sid(expiration_ts, self.is_ssl_only()))
self.dirty = True # ensure the data is written to the new session
def start(self, expiration_ts=None, ssl_only=False):
"""Starts a new session. expiration specifies when it will expire. If
expiration is not specified, then self.lifetime will used to
determine the expiration date.
Normally this method does not need to be called directly - a session is
automatically started when the first value is added to the session.
``expiration_ts`` - The UNIX timestamp the session will expire at. If
omitted, the session will expire after the default ``lifetime`` has past
(as specified in ``SessionMiddleware``).
``ssl_only`` - Whether to specify the "Secure" attribute on the cookie
so that the client will ONLY transfer the cookie over a secure channel.
"""
self.dirty = True
self.data = {}
self.__set_sid(self.__make_sid(expiration_ts, ssl_only), True)
def terminate(self, clear_data=True):
"""Deletes the session and its data, and expires the user's cookie."""
if clear_data:
self.__clear_data()
self.sid = None
self.data = {}
self.dirty = False
if self.cookie_keys:
self.cookie_data = '' # trigger the cookies to expire
else:
self.cookie_data = None
def __set_sid(self, sid, make_cookie=True):
"""Sets the session ID, deleting the old session if one existed. The
session's data will remain intact (only the session ID changes)."""
if self.sid:
self.__clear_data()
self.sid = sid
self.db_key = db.Key.from_path(SessionModel.kind(), sid, namespace='')
# set the cookie if requested
if make_cookie:
self.cookie_data = '' # trigger the cookie to be sent
def __clear_data(self):
"""Deletes this session from memcache and the datastore."""
if self.sid:
memcache.delete(self.sid, namespace='') # not really needed; it'll go away on its own
try:
db.delete(self.db_key)
except:
pass # either it wasn't in the db (maybe cookie/memcache-only) or db is down => cron will expire it
def __retrieve_data(self):
"""Sets the data associated with this session after retrieving it from
memcache or the datastore. Assumes self.sid is set. Checks for session
expiration after getting the data."""
pdump = memcache.get(self.sid, namespace='')
if pdump is None:
# memcache lost it, go to the datastore
if self.no_datastore:
logging.info("can't find session data in memcache for sid=%s (using memcache only sessions)" % self.sid)
self.terminate(False) # we lost it; just kill the session
return
session_model_instance = db.get(self.db_key)
if session_model_instance:
pdump = session_model_instance.pdump
else:
logging.error("can't find session data in the datastore for sid=%s" % self.sid)
self.terminate(False) # we lost it; just kill the session
return
self.data = self.__decode_data(pdump)
def save(self, persist_even_if_using_cookie=False):
"""Saves the data associated with this session IF any changes have been
made (specifically, if any mutator methods like __setitem__ or the like
is called).
If the data is small enough it will be sent back to the user in a cookie
instead of using memcache and the datastore. If `persist_even_if_using_cookie`
evaluates to True, memcache and the datastore will also be used. If the
no_datastore option is set, then the datastore will never be used.
Normally this method does not need to be called directly - a session is
automatically saved at the end of the request if any changes were made.
"""
if not self.sid:
return # no session is active
if not self.dirty:
return # nothing has changed
dirty = self.dirty
self.dirty = False # saving, so it won't be dirty anymore
# do the pickling ourselves b/c we need it for the datastore anyway
pdump = self.__encode_data(self.data)
# persist via cookies if it is reasonably small
if len(pdump) * 4 / 3 <= self.cookie_only_thresh: # 4/3 b/c base64 is ~33% bigger
self.cookie_data = pdump
if not persist_even_if_using_cookie:
return
elif self.cookie_keys:
# latest data will only be in the backend, so expire data cookies we set
self.cookie_data = ''
memcache.set(self.sid, pdump, namespace='', time=self.get_expiration()) # may fail if memcache is down
# persist the session to the datastore
if dirty is Session.DIRTY_BUT_DONT_PERSIST_TO_DB or self.no_datastore:
return
try:
SessionModel(key_name=self.sid, pdump=pdump).put()
except Exception, e:
logging.warning("unable to persist session to datastore for sid=%s (%s)" % (self.sid, e))
# Users may interact with the session through a dictionary-like interface.
def clear(self):
"""Removes all data from the session (but does not terminate it)."""
if self.sid:
self.data = {}
self.dirty = True
def get(self, key, default=None):
"""Retrieves a value from the session."""
self.ensure_data_loaded()
return self.data.get(key, default)
def has_key(self, key):
"""Returns True if key is set."""
self.ensure_data_loaded()
return key in self.data
def pop(self, key, default=None):
"""Removes key and returns its value, or default if key is not present."""
self.ensure_data_loaded()
self.dirty = True
return self.data.pop(key, default)
def pop_quick(self, key, default=None):
"""Removes key and returns its value, or default if key is not present.
The change will only be persisted to memcache until another change
necessitates a write to the datastore."""
self.ensure_data_loaded()
if self.dirty is False:
self.dirty = Session.DIRTY_BUT_DONT_PERSIST_TO_DB
return self.data.pop(key, default)
def set_quick(self, key, value):
"""Set a value named key on this session. The change will only be
persisted to memcache until another change necessitates a write to the
datastore. This will start a session if one is not already active."""
dirty = self.dirty
self[key] = value
if dirty is False or dirty is Session.DIRTY_BUT_DONT_PERSIST_TO_DB:
self.dirty = Session.DIRTY_BUT_DONT_PERSIST_TO_DB
def __getitem__(self, key):
"""Returns the value associated with key on this session."""
self.ensure_data_loaded()
return self.data.__getitem__(key)
def __setitem__(self, key, value):
"""Set a value named key on this session. This will start a session if
one is not already active."""
self.ensure_data_loaded()
if not self.sid:
self.start()
self.data.__setitem__(key, value)
self.dirty = True
def __delitem__(self, key):
"""Deletes the value associated with key on this session."""
self.ensure_data_loaded()
self.data.__delitem__(key)
self.dirty = True
def __iter__(self):
"""Returns an iterator over the keys (names) of the stored values."""
self.ensure_data_loaded()
return self.data.iterkeys()
def __contains__(self, key):
"""Returns True if key is present on this session."""
self.ensure_data_loaded()
return self.data.__contains__(key)
def __str__(self):
"""Returns a string representation of the session."""
if self.sid:
self.ensure_data_loaded()
return "SID=%s %s" % (self.sid, self.data)
else:
return "uninitialized session"
class SessionMiddleware(object):
"""WSGI middleware that adds session support.
``cookie_key`` - A key used to secure cookies so users cannot modify their
content. Keys should be at least 32 bytes (RFC2104). Tip: generate your
key using ``os.urandom(64)`` but do this OFFLINE and copy/paste the output
into a string which you pass in as ``cookie_key``. If you use ``os.urandom()``
to dynamically generate your key at runtime then any existing sessions will
become junk every time your app starts up!
``lifetime`` - ``datetime.timedelta`` that specifies how long a session may last. Defaults to 7 days.
``no_datastore`` - By default all writes also go to the datastore in case
memcache is lost. Set to True to never use the datastore. This improves
write performance but sessions may be occassionally lost.
``cookie_only_threshold`` - A size in bytes. If session data is less than this
threshold, then session data is kept only in a secure cookie. This avoids
memcache/datastore latency which is critical for small sessions. Larger
sessions are kept in memcache+datastore instead. Defaults to 10KB.
"""
def __init__(self, app, cookie_key, lifetime=DEFAULT_LIFETIME, no_datastore=False,
cookie_only_threshold=DEFAULT_COOKIE_ONLY_THRESH):
self.app = app
self.lifetime = lifetime
self.no_datastore = no_datastore
self.cookie_only_thresh = cookie_only_threshold
self.cookie_key = cookie_key
if not self.cookie_key:
raise ValueError("cookie_key MUST be specified")
if len(self.cookie_key) < 32:
raise ValueError(
"RFC2104 recommends you use at least a 32 character key. Try os.urandom(64) to make a key.")
def __call__(self, environ, start_response):
# initialize a session for the current user
_tls.current_session = Session(lifetime=self.lifetime, no_datastore=self.no_datastore,
cookie_only_threshold=self.cookie_only_thresh, cookie_key=self.cookie_key)
# create a hook for us to insert a cookie into the response headers
def my_start_response(status, headers, exc_info=None):
_tls.current_session.save() # store the session if it was changed
for ch in _tls.current_session.make_cookie_headers():
headers.append(('Set-Cookie', ch))
return start_response(status, headers, exc_info)
# let the app do its thing
return self.app(environ, my_start_response)
class DjangoSessionMiddleware(object):
"""Django middleware that adds session support. You must specify the
session configuration parameters by modifying the call to ``SessionMiddleware``
in ``DjangoSessionMiddleware.__init__()`` since Django cannot call an
initialization method with parameters.
"""
def __init__(self):
fake_app = lambda environ, start_response: start_response
self.wrapped_wsgi_middleware = SessionMiddleware(fake_app, cookie_key='you MUST change this')
self.response_handler = None
def process_request(self, request):
self.response_handler = self.wrapped_wsgi_middleware(None, lambda status, headers, exc_info: headers)
request.session = get_current_session() # for convenience
def process_response(self, request, response):
if self.response_handler:
session_headers = self.response_handler(None, [], None)
for k, v in session_headers:
response[k] = v
self.response_handler = None
if hasattr(request, 'session') and request.session.is_accessed():
from django.utils.cache import patch_vary_headers
logging.info("Varying")
patch_vary_headers(response, ('Cookie',))
return response
def delete_expired_sessions():
"""Deletes expired sessions from the datastore.
If there are more than 500 expired sessions, only 500 will be removed.
Returns True if all expired sessions have been removed.
"""
now_str = unicode(int(time.time()))
q = db.Query(SessionModel, keys_only=True, namespace='')
key = db.Key.from_path('SessionModel', now_str + u'\ufffd', namespace='')
q.filter('__key__ < ', key)
results = q.fetch(500)
db.delete(results)
logging.info('gae-sessions: deleted %d expired sessions from the datastore' % len(results))
return len(results) < 500
|
rhefner1/ghidonations
|
gaesessions/__init__.py
|
Python
|
apache-2.0
| 21,922 | 0.002144 |
import itchat, time, re
from itchat.content import *
import urllib2, urllib
import json
from watson_developer_cloud import ConversationV1
response={'context':{}}
@itchat.msg_register([TEXT])
def text_reply(msg):
global response
request_text = msg['Text'].encode('UTF-8')
conversation = ConversationV1(
username='9c359fba-0692-4afa-afb1-bd5bf4d7e367',
password='5Id2zfapBV6e',
version='2017-04-21')
# replace with your own workspace_id
workspace_id = 'd3e50587-f36a-4bdf-bf3e-38c382e8d63a'
print "request ==>", request_text
try:
type(eval(response))
except:
print "first call"
response = conversation.message(workspace_id=workspace_id, message_input={
'text': request_text}, context=response['context'])
else:
print "continue call"
response = conversation.message(workspace_id=workspace_id, message_input={
'text': request_text}, context=response['context'])
if len( response['output']['text']) >0:
response_text = response['output']['text'][0]
else:
response_text = "No message"
itchat.send( response_text, msg['FromUserName'])
itchat.auto_login()
itchat.run(debug=True)
|
Jonathanliu92251/watson-conversation
|
wechat/watson-wechat.py
|
Python
|
apache-2.0
| 1,140 | 0.032456 |
# Copyright 2008-2013 Nokia Siemens Networks Oyj
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from robot.errors import DataError
from robot.utils import plural_or_not
from robot.variables import is_list_var
class ArgumentValidator(object):
def __init__(self, argspec):
self._argspec = argspec
def validate(self, positional, named, dryrun=False):
if dryrun and any(is_list_var(arg) for arg in positional):
return
self._validate_no_multiple_values(positional, named, self._argspec)
self._validate_limits(positional, named, self._argspec)
self._validate_no_mandatory_missing(positional, named, self._argspec)
def _validate_limits(self, positional, named, spec):
count = len(positional) + self._named_positionals(named, spec)
if not spec.minargs <= count <= spec.maxargs:
self._raise_wrong_count(count, spec)
def _named_positionals(self, named, spec):
if not spec.supports_named:
return 0
return sum(1 for n in named if n in spec.positional)
def _raise_wrong_count(self, count, spec):
minend = plural_or_not(spec.minargs)
if spec.minargs == spec.maxargs:
expected = '%d argument%s' % (spec.minargs, minend)
elif not spec.varargs:
expected = '%d to %d arguments' % (spec.minargs, spec.maxargs)
else:
expected = 'at least %d argument%s' % (spec.minargs, minend)
if spec.kwargs:
expected = expected.replace('argument', 'non-keyword argument')
raise DataError("%s '%s' expected %s, got %d."
% (spec.type, spec.name, expected, count))
def _validate_no_multiple_values(self, positional, named, spec):
for name in spec.positional[:len(positional)]:
if name in named and spec.supports_named:
raise DataError("%s '%s' got multiple values for argument '%s'."
% (spec.type, spec.name, name))
def _validate_no_mandatory_missing(self, positional, named, spec):
for name in spec.positional[len(positional):spec.minargs]:
if name not in named:
raise DataError("%s '%s' missing value for argument '%s'."
% (spec.type, spec.name, name))
|
yamateh/robotframework
|
src/robot/running/arguments/argumentvalidator.py
|
Python
|
apache-2.0
| 2,829 | 0.000353 |
# -*- coding: utf-8 -*-
import nltk
import csv
import random
import codecs
import re
from nltk.corpus import stopwords
stopset = list(set(stopwords.words('spanish')))
hil_tweets = []
trump_tweets = []
bernie_tweets = []
cruz_tweets = []
classes = {}
def transform(temp):
if temp == "imo":
return "opinion"
elif temp == "inches":
return "inch"
elif temp == "including" or temp == "included" or temp == "includes":
return "include"
elif temp == "issued" or temp == "issues":
return "issue"
elif temp == "ppl":
return "people"
elif temp == "prices":
return "price"
elif temp == "say":
return "says"
elif temp == "shocked" or temp == "shocker" or temp == "shocking":
return "shock"
#elif temp == "sooooo" or temp == "soooo" or temp == "sooo" or temp == "soo":
# return "so"
return temp
def getPureWord(word):
#if str.startswith(word,'@'):
# return ""
#print word
temp = word.lower()
if str.startswith(temp,"http"):
return ""
temp = ''.join(e for e in temp if e.isalpha())
#if temp not in stop_words and temp !='':
if temp not in stopset and temp !='':
return transform(temp)
else:
return ""
def purifyText(input):
output = input.replace('\r','').replace('\n','')
op = re.sub(r'\w+:\/{2}[\d\w-]+(\.[\d\w-]+)*(?:(?:\/[^\s/]*))*', '', output)
op1 = " ".join(getPureWord(w) for w in op.split())
return op1.strip()
def buildHash():
#Hillary, Bernie, Trump, Cruz, GOP, DEM
classes["trump"] = ["donald","trump","donaldtrump"]
classes["cruz"] = ["tedcruz","cruz","ted"]
classes["hillary"] = ["hillaryclinton","hillary","clinton"]
classes["bernie"] = ["berniesanders","bernie","sanders","bern"]
classes["gop"] = ["gop","gopdebate","republicans"]
classes["dem"] = ["dem","demdebate","democrats","Democratic","democrata","democrat"]
def getEntities(line):
line = line.lower()
op = set()
for key in classes:
temp = classes[key]
#print temp
for t in temp:
#print type(line)
if t.lower() in line:
op.add(key)
if key in op:
break
return list(op)
def unicode_csv_reader(utf8_data, dialect=csv.excel, **kwargs):
csv_reader = csv.reader(utf8_data, dialect=dialect, **kwargs)
for row in csv_reader:
yield [unicode(cell, 'utf-8') for cell in row]
# Process Tweet
def processTweet(tweet):
tweet = tweet.lower()
# Convert www.* or https?://* to URL
tweet = re.sub('((www\.[^\s]+)|(https?://[^\s]+))', 'URL', tweet)
# Convert @username to AT_USER
tweet = re.sub('@[^\s]+', 'AT_USER', tweet)
# Remove additional white spaces
tweet = re.sub('[\s]+', ' ', tweet)
# Replace #word with word
tweet = re.sub(r'#([^\s]+)', r'\1', tweet)
# trim
tweet = tweet.strip('\'"')
return tweet
def tweet_word(words):
return dict([(word.decode('utf-8'), True) for word in words.split() if word.decode('utf-8') not in stopset])
buildHash()
test_set = []
for x in ['a', 'b', 'c', 'd', 'e']:
#for x in ['annotatedTrump2.csv']:
with codecs.open('../python/Annotated4/annotated.csva' + x, 'rb') as csvfile:
tweets = csv.reader(csvfile, delimiter=',', quotechar='\'')
for tweet in tweets:
if tweet[12] == 'berniePositive':
bernie_tweets.append(purifyText(tweet[13]))
elif tweet[12] == 'hillaryPositive':
hil_tweets.append(purifyText(tweet[13]))
elif tweet[12] == 'cruzPositive':
cruz_tweets.append(purifyText(tweet[13]))
elif tweet[12] == 'trumpPositive':
trump_tweets.append(purifyText(tweet[13]))
elif tweet[12] == 'nuetral':
test_set.append(tweet)
labeled_words = ([(word, 'hillary') for word in hil_tweets] + [(word, 'trump') for word in trump_tweets] + [(word, 'cruz') for word in cruz_tweets] + [(word, 'bernie') for word in bernie_tweets])
random.shuffle(labeled_words)
featuresets = [(tweet_word(n), classify) for (n, classify) in labeled_words]
train_set = featuresets
# Generating Test Set...
'''
for x in ['testTrump.csv']:
with codecs.open('../python/annotated2/' + x, 'rb') as csvfile:
tweets = csv.reader(csvfile, delimiter=',', quotechar='\'')
for tweet in tweets:
if tweet[7] == '0':
test_set.append(tweet)
'''
# Ref - http://www.nltk.org/api/nltk.classify.html
# ALGORITHMS = ['GIS', 'IIS', 'MEGAM', 'TADM']
algorithm = nltk.classify.MaxentClassifier.ALGORITHMS[1]
classifier = nltk.MaxentClassifier.train(train_set, algorithm, max_iter=3)
classifier.show_most_informative_features(10)
#print(nltk.classify.accuracy(classifier, test_set))
i = 1
with open("canoutput.csv", 'wb') as f:
for tweet in test_set:
op1 = purifyText(tweet[13])
op = getEntities(op1)
if "trump" in op or "bernie" in op or "hillary" in op or "cruz" in op:
result = classifier.classify(tweet_word(op1))
print tweet[13]
print result
#else:
# print result + "Positive"
i += 1
if i > 100:
break
|
karanjeets/CSCI-544
|
Experimental/classifier/tweet_can_es.py
|
Python
|
apache-2.0
| 5,270 | 0.009298 |
# ===============================================================================
# Copyright 2016 ross
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============================================================================
# ============= enthought library imports =======================
from __future__ import absolute_import
import os
import socket
from ftplib import FTP
import paramiko
from pychron.media_storage.storage import RemoteStorage
class FTPStorage(RemoteStorage):
url_name = 'FTP'
def put(self, src, dest):
client = self._get_client()
self._put(client, src, dest)
self._close_client(client)
def _close_client(self, client):
client.quit()
def _get_client(self):
client = FTP(self.host)
client.login(self.username, self.password)
return client
def _put(self, client, src, dest):
head, ext = os.path.splitext(src)
if ext in ('.jpg', '.png'):
with open(src, 'rb') as rfile:
client.storbinary('STOR {}'.format(dest), rfile, 1024)
else:
with open(src, 'r') as rfile:
client.storlines('STOR {}'.format(dest), rfile)
class SFTPStorage(FTPStorage):
url_name = 'SFTP'
def _get_client(self):
ssh = paramiko.SSHClient()
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
try:
ssh.connect(self.host, username=self.username, password=self.password, timeout=2)
except (socket.timeout, paramiko.AuthenticationException):
self.warning_dialog('Could not connect to server')
return
return ssh.open_sftp()
def _close_client(self, client):
client.close()
def _put(self, client, src, dest):
client.put(src, dest)
# ============= EOF =============================================
|
UManPychron/pychron
|
pychron/media_storage/ftp_storage.py
|
Python
|
apache-2.0
| 2,366 | 0.000423 |
from ctypes import *
from ctypes.wintypes import HANDLE
from ctypes.wintypes import BOOL
from ctypes.wintypes import LPCWSTR
_stdcall_libraries = {}
_stdcall_libraries['kernel32'] = WinDLL('kernel32')
from ctypes.wintypes import DWORD
from ctypes.wintypes import WORD
from ctypes.wintypes import BYTE
INVALID_HANDLE_VALUE = HANDLE(-1).value
class _SECURITY_ATTRIBUTES(Structure):
pass
LPSECURITY_ATTRIBUTES = POINTER(_SECURITY_ATTRIBUTES)
CreateEventW = _stdcall_libraries['kernel32'].CreateEventW
CreateEventW.restype = HANDLE
CreateEventW.argtypes = [LPSECURITY_ATTRIBUTES, BOOL, BOOL, LPCWSTR]
CreateEvent = CreateEventW # alias
CreateFileW = _stdcall_libraries['kernel32'].CreateFileW
CreateFileW.restype = HANDLE
CreateFileW.argtypes = [LPCWSTR, DWORD, DWORD, LPSECURITY_ATTRIBUTES, DWORD, DWORD, HANDLE]
CreateFile = CreateFileW # alias
class _OVERLAPPED(Structure):
pass
OVERLAPPED = _OVERLAPPED
class _COMSTAT(Structure):
pass
COMSTAT = _COMSTAT
class _DCB(Structure):
pass
DCB = _DCB
class _COMMTIMEOUTS(Structure):
pass
COMMTIMEOUTS = _COMMTIMEOUTS
GetLastError = _stdcall_libraries['kernel32'].GetLastError
GetLastError.restype = DWORD
GetLastError.argtypes = []
LPOVERLAPPED = POINTER(_OVERLAPPED)
LPDWORD = POINTER(DWORD)
GetOverlappedResult = _stdcall_libraries['kernel32'].GetOverlappedResult
GetOverlappedResult.restype = BOOL
GetOverlappedResult.argtypes = [HANDLE, LPOVERLAPPED, LPDWORD, BOOL]
ResetEvent = _stdcall_libraries['kernel32'].ResetEvent
ResetEvent.restype = BOOL
ResetEvent.argtypes = [HANDLE]
LPCVOID = c_void_p
WriteFile = _stdcall_libraries['kernel32'].WriteFile
WriteFile.restype = BOOL
WriteFile.argtypes = [HANDLE, LPCVOID, DWORD, LPDWORD, LPOVERLAPPED]
LPVOID = c_void_p
ReadFile = _stdcall_libraries['kernel32'].ReadFile
ReadFile.restype = BOOL
ReadFile.argtypes = [HANDLE, LPVOID, DWORD, LPDWORD, LPOVERLAPPED]
CloseHandle = _stdcall_libraries['kernel32'].CloseHandle
CloseHandle.restype = BOOL
CloseHandle.argtypes = [HANDLE]
ClearCommBreak = _stdcall_libraries['kernel32'].ClearCommBreak
ClearCommBreak.restype = BOOL
ClearCommBreak.argtypes = [HANDLE]
LPCOMSTAT = POINTER(_COMSTAT)
ClearCommError = _stdcall_libraries['kernel32'].ClearCommError
ClearCommError.restype = BOOL
ClearCommError.argtypes = [HANDLE, LPDWORD, LPCOMSTAT]
SetupComm = _stdcall_libraries['kernel32'].SetupComm
SetupComm.restype = BOOL
SetupComm.argtypes = [HANDLE, DWORD, DWORD]
EscapeCommFunction = _stdcall_libraries['kernel32'].EscapeCommFunction
EscapeCommFunction.restype = BOOL
EscapeCommFunction.argtypes = [HANDLE, DWORD]
GetCommModemStatus = _stdcall_libraries['kernel32'].GetCommModemStatus
GetCommModemStatus.restype = BOOL
GetCommModemStatus.argtypes = [HANDLE, LPDWORD]
LPDCB = POINTER(_DCB)
GetCommState = _stdcall_libraries['kernel32'].GetCommState
GetCommState.restype = BOOL
GetCommState.argtypes = [HANDLE, LPDCB]
LPCOMMTIMEOUTS = POINTER(_COMMTIMEOUTS)
GetCommTimeouts = _stdcall_libraries['kernel32'].GetCommTimeouts
GetCommTimeouts.restype = BOOL
GetCommTimeouts.argtypes = [HANDLE, LPCOMMTIMEOUTS]
PurgeComm = _stdcall_libraries['kernel32'].PurgeComm
PurgeComm.restype = BOOL
PurgeComm.argtypes = [HANDLE, DWORD]
SetCommBreak = _stdcall_libraries['kernel32'].SetCommBreak
SetCommBreak.restype = BOOL
SetCommBreak.argtypes = [HANDLE]
SetCommMask = _stdcall_libraries['kernel32'].SetCommMask
SetCommMask.restype = BOOL
SetCommMask.argtypes = [HANDLE, DWORD]
SetCommState = _stdcall_libraries['kernel32'].SetCommState
SetCommState.restype = BOOL
SetCommState.argtypes = [HANDLE, LPDCB]
SetCommTimeouts = _stdcall_libraries['kernel32'].SetCommTimeouts
SetCommTimeouts.restype = BOOL
SetCommTimeouts.argtypes = [HANDLE, LPCOMMTIMEOUTS]
WaitForSingleObject = _stdcall_libraries['kernel32'].WaitForSingleObject
WaitForSingleObject.restype = DWORD
WaitForSingleObject.argtypes = [HANDLE, DWORD]
ONESTOPBIT = 0 # Variable c_int
TWOSTOPBITS = 2 # Variable c_int
ONE5STOPBITS = 1
NOPARITY = 0 # Variable c_int
ODDPARITY = 1 # Variable c_int
EVENPARITY = 2 # Variable c_int
MARKPARITY = 3
SPACEPARITY = 4
RTS_CONTROL_HANDSHAKE = 2 # Variable c_int
RTS_CONTROL_DISABLE = 0 # Variable c_int
RTS_CONTROL_ENABLE = 1 # Variable c_int
SETRTS = 3
CLRRTS = 4
DTR_CONTROL_HANDSHAKE = 2 # Variable c_int
DTR_CONTROL_DISABLE = 0 # Variable c_int
DTR_CONTROL_ENABLE = 1 # Variable c_int
SETDTR = 5
CLRDTR = 6
MS_DSR_ON = 32 # Variable c_ulong
EV_RING = 256 # Variable c_int
EV_PERR = 512 # Variable c_int
EV_ERR = 128 # Variable c_int
SETXOFF = 1 # Variable c_int
EV_RXCHAR = 1 # Variable c_int
GENERIC_WRITE = 1073741824 # Variable c_long
PURGE_TXCLEAR = 4 # Variable c_int
FILE_FLAG_OVERLAPPED = 1073741824 # Variable c_int
EV_DSR = 16 # Variable c_int
MAXDWORD = 4294967295L # Variable c_uint
EV_RLSD = 32 # Variable c_int
ERROR_IO_PENDING = 997 # Variable c_long
MS_CTS_ON = 16 # Variable c_ulong
EV_EVENT1 = 2048 # Variable c_int
EV_RX80FULL = 1024 # Variable c_int
PURGE_RXABORT = 2 # Variable c_int
FILE_ATTRIBUTE_NORMAL = 128 # Variable c_int
PURGE_TXABORT = 1 # Variable c_int
SETXON = 2 # Variable c_int
OPEN_EXISTING = 3 # Variable c_int
MS_RING_ON = 64 # Variable c_ulong
EV_TXEMPTY = 4 # Variable c_int
EV_RXFLAG = 2 # Variable c_int
MS_RLSD_ON = 128 # Variable c_ulong
GENERIC_READ = 2147483648L # Variable c_ulong
EV_EVENT2 = 4096 # Variable c_int
EV_CTS = 8 # Variable c_int
EV_BREAK = 64 # Variable c_int
PURGE_RXCLEAR = 8 # Variable c_int
ULONG_PTR = c_ulong
INFINITE = 0xFFFFFFFFL
class N11_OVERLAPPED4DOLLAR_48E(Union):
pass
class N11_OVERLAPPED4DOLLAR_484DOLLAR_49E(Structure):
pass
N11_OVERLAPPED4DOLLAR_484DOLLAR_49E._fields_ = [
('Offset', DWORD),
('OffsetHigh', DWORD),
]
PVOID = c_void_p
N11_OVERLAPPED4DOLLAR_48E._anonymous_ = ['_0']
N11_OVERLAPPED4DOLLAR_48E._fields_ = [
('_0', N11_OVERLAPPED4DOLLAR_484DOLLAR_49E),
('Pointer', PVOID),
]
_OVERLAPPED._anonymous_ = ['_0']
_OVERLAPPED._fields_ = [
('Internal', ULONG_PTR),
('InternalHigh', ULONG_PTR),
('_0', N11_OVERLAPPED4DOLLAR_48E),
('hEvent', HANDLE),
]
_SECURITY_ATTRIBUTES._fields_ = [
('nLength', DWORD),
('lpSecurityDescriptor', LPVOID),
('bInheritHandle', BOOL),
]
_COMSTAT._fields_ = [
('fCtsHold', DWORD, 1),
('fDsrHold', DWORD, 1),
('fRlsdHold', DWORD, 1),
('fXoffHold', DWORD, 1),
('fXoffSent', DWORD, 1),
('fEof', DWORD, 1),
('fTxim', DWORD, 1),
('fReserved', DWORD, 25),
('cbInQue', DWORD),
('cbOutQue', DWORD),
]
_DCB._fields_ = [
('DCBlength', DWORD),
('BaudRate', DWORD),
('fBinary', DWORD, 1),
('fParity', DWORD, 1),
('fOutxCtsFlow', DWORD, 1),
('fOutxDsrFlow', DWORD, 1),
('fDtrControl', DWORD, 2),
('fDsrSensitivity', DWORD, 1),
('fTXContinueOnXoff', DWORD, 1),
('fOutX', DWORD, 1),
('fInX', DWORD, 1),
('fErrorChar', DWORD, 1),
('fNull', DWORD, 1),
('fRtsControl', DWORD, 2),
('fAbortOnError', DWORD, 1),
('fDummy2', DWORD, 17),
('wReserved', WORD),
('XonLim', WORD),
('XoffLim', WORD),
('ByteSize', BYTE),
('Parity', BYTE),
('StopBits', BYTE),
('XonChar', c_char),
('XoffChar', c_char),
('ErrorChar', c_char),
('EofChar', c_char),
('EvtChar', c_char),
('wReserved1', WORD),
]
_COMMTIMEOUTS._fields_ = [
('ReadIntervalTimeout', DWORD),
('ReadTotalTimeoutMultiplier', DWORD),
('ReadTotalTimeoutConstant', DWORD),
('WriteTotalTimeoutMultiplier', DWORD),
('WriteTotalTimeoutConstant', DWORD),
]
__all__ = ['GetLastError', 'MS_CTS_ON', 'FILE_ATTRIBUTE_NORMAL',
'DTR_CONTROL_ENABLE', '_COMSTAT', 'MS_RLSD_ON',
'GetOverlappedResult', 'SETXON', 'PURGE_TXABORT',
'PurgeComm', 'N11_OVERLAPPED4DOLLAR_48E', 'EV_RING',
'ONESTOPBIT', 'SETXOFF', 'PURGE_RXABORT', 'GetCommState',
'RTS_CONTROL_ENABLE', '_DCB', 'CreateEvent',
'_COMMTIMEOUTS', '_SECURITY_ATTRIBUTES', 'EV_DSR',
'EV_PERR', 'EV_RXFLAG', 'OPEN_EXISTING', 'DCB',
'FILE_FLAG_OVERLAPPED', 'EV_CTS', 'SetupComm',
'LPOVERLAPPED', 'EV_TXEMPTY', 'ClearCommBreak',
'LPSECURITY_ATTRIBUTES', 'SetCommBreak', 'SetCommTimeouts',
'COMMTIMEOUTS', 'ODDPARITY', 'EV_RLSD',
'GetCommModemStatus', 'EV_EVENT2', 'PURGE_TXCLEAR',
'EV_BREAK', 'EVENPARITY', 'LPCVOID', 'COMSTAT', 'ReadFile',
'PVOID', '_OVERLAPPED', 'WriteFile', 'GetCommTimeouts',
'ResetEvent', 'EV_RXCHAR', 'LPCOMSTAT', 'ClearCommError',
'ERROR_IO_PENDING', 'EscapeCommFunction', 'GENERIC_READ',
'RTS_CONTROL_HANDSHAKE', 'OVERLAPPED',
'DTR_CONTROL_HANDSHAKE', 'PURGE_RXCLEAR', 'GENERIC_WRITE',
'LPDCB', 'CreateEventW', 'SetCommMask', 'EV_EVENT1',
'SetCommState', 'LPVOID', 'CreateFileW', 'LPDWORD',
'EV_RX80FULL', 'TWOSTOPBITS', 'LPCOMMTIMEOUTS', 'MAXDWORD',
'MS_DSR_ON', 'MS_RING_ON',
'N11_OVERLAPPED4DOLLAR_484DOLLAR_49E', 'EV_ERR',
'ULONG_PTR', 'CreateFile', 'NOPARITY', 'CloseHandle']
|
pacoqueen/cican
|
utils/gmapcatcher/gmapcatcher/pyGPSD/nmea/serial/win32.py
|
Python
|
gpl-3.0
| 9,044 | 0.006634 |
"""
Tests for the Gauges template tags and filters.
"""
from django.http import HttpRequest
from django.template import Context
from django.test.utils import override_settings
from analytical.templatetags.gauges import GaugesNode
from analytical.tests.utils import TagTestCase
from analytical.utils import AnalyticalException
@override_settings(GAUGES_SITE_ID='1234567890abcdef0123456789')
class GaugesTagTestCase(TagTestCase):
"""
Tests for the ``gauges`` template tag.
"""
def test_tag(self):
self.assertEqual("""
<script type="text/javascript">
var _gauges = _gauges || [];
(function() {
var t = document.createElement('script');
t.type = 'text/javascript';
t.async = true;
t.id = 'gauges-tracker';
t.setAttribute('data-site-id', '1234567890abcdef0123456789');
t.src = '//secure.gaug.es/track.js';
var s = document.getElementsByTagName('script')[0];
s.parentNode.insertBefore(t, s);
})();
</script>
""", self.render_tag('gauges', 'gauges'))
def test_node(self):
self.assertEqual(
"""
<script type="text/javascript">
var _gauges = _gauges || [];
(function() {
var t = document.createElement('script');
t.type = 'text/javascript';
t.async = true;
t.id = 'gauges-tracker';
t.setAttribute('data-site-id', '1234567890abcdef0123456789');
t.src = '//secure.gaug.es/track.js';
var s = document.getElementsByTagName('script')[0];
s.parentNode.insertBefore(t, s);
})();
</script>
""", GaugesNode().render(Context()))
@override_settings(GAUGES_SITE_ID=None)
def test_no_account_number(self):
self.assertRaises(AnalyticalException, GaugesNode)
@override_settings(GAUGES_SITE_ID='123abQ')
def test_wrong_account_number(self):
self.assertRaises(AnalyticalException, GaugesNode)
@override_settings(ANALYTICAL_INTERNAL_IPS=['1.1.1.1'])
def test_render_internal_ip(self):
req = HttpRequest()
req.META['REMOTE_ADDR'] = '1.1.1.1'
context = Context({'request': req})
r = GaugesNode().render(context)
self.assertTrue(r.startswith(
'<!-- Gauges disabled on internal IP address'), r)
self.assertTrue(r.endswith('-->'), r)
|
pjdelport/django-analytical
|
analytical/tests/test_tag_gauges.py
|
Python
|
mit
| 2,359 | 0 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
def get_ki2_list(parser):
parser.add_argument('-p', '--path_2chkifu',
default='~/data/shogi/2chkifu/',
help='2chkifu.zipを展開したディレクトリ')
args = parser.parse_args()
path_2chkifu = args.path_2chkifu
sub_dir_list = ['00001', '10000', '20000', '30000', '40000']
path_ki2_list = []
# Extract paths of KI2 files
for sub_dir in sub_dir_list:
path_dir = os.path.expanduser(os.path.join(path_2chkifu, sub_dir))
ki2files = os.listdir(path_dir)
for ki2file in ki2files:
path_ki2_list.append(os.path.join(path_dir, ki2file))
return sorted(path_ki2_list)
|
tosh1ki/pyogi
|
doc/sample_code/get_ki2_list.py
|
Python
|
mit
| 741 | 0 |
from flask import Blueprint, jsonify, request
routes_api = Blueprint('routes_api', __name__)
@routes_api.route('/v1/routes', methods=['GET'])
def routes_get():
'''
Get a list of routes
It is handler for GET /routes
'''
return jsonify()
|
ridindirtyatl/truffle-api
|
routes.py
|
Python
|
agpl-3.0
| 261 | 0 |
"""
Support for manual alarms.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/alarm_control_panel.manual/
"""
import copy
import datetime
import logging
import voluptuous as vol
import homeassistant.components.alarm_control_panel as alarm
import homeassistant.util.dt as dt_util
from homeassistant.const import (
STATE_ALARM_ARMED_AWAY, STATE_ALARM_ARMED_HOME, STATE_ALARM_ARMED_NIGHT,
STATE_ALARM_ARMED_CUSTOM_BYPASS, STATE_ALARM_DISARMED, STATE_ALARM_PENDING,
STATE_ALARM_TRIGGERED, CONF_PLATFORM, CONF_NAME, CONF_CODE,
CONF_DELAY_TIME, CONF_PENDING_TIME, CONF_TRIGGER_TIME,
CONF_DISARM_AFTER_TRIGGER)
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.event import track_point_in_time
CONF_CODE_TEMPLATE = 'code_template'
DEFAULT_ALARM_NAME = 'HA Alarm'
DEFAULT_DELAY_TIME = datetime.timedelta(seconds=0)
DEFAULT_PENDING_TIME = datetime.timedelta(seconds=60)
DEFAULT_TRIGGER_TIME = datetime.timedelta(seconds=120)
DEFAULT_DISARM_AFTER_TRIGGER = False
SUPPORTED_STATES = [STATE_ALARM_DISARMED, STATE_ALARM_ARMED_AWAY,
STATE_ALARM_ARMED_HOME, STATE_ALARM_ARMED_NIGHT,
STATE_ALARM_ARMED_CUSTOM_BYPASS, STATE_ALARM_TRIGGERED]
SUPPORTED_PRETRIGGER_STATES = [state for state in SUPPORTED_STATES
if state != STATE_ALARM_TRIGGERED]
SUPPORTED_PENDING_STATES = [state for state in SUPPORTED_STATES
if state != STATE_ALARM_DISARMED]
ATTR_PRE_PENDING_STATE = 'pre_pending_state'
ATTR_POST_PENDING_STATE = 'post_pending_state'
def _state_validator(config):
config = copy.deepcopy(config)
for state in SUPPORTED_PRETRIGGER_STATES:
if CONF_DELAY_TIME not in config[state]:
config[state][CONF_DELAY_TIME] = config[CONF_DELAY_TIME]
if CONF_TRIGGER_TIME not in config[state]:
config[state][CONF_TRIGGER_TIME] = config[CONF_TRIGGER_TIME]
for state in SUPPORTED_PENDING_STATES:
if CONF_PENDING_TIME not in config[state]:
config[state][CONF_PENDING_TIME] = config[CONF_PENDING_TIME]
return config
def _state_schema(state):
schema = {}
if state in SUPPORTED_PRETRIGGER_STATES:
schema[vol.Optional(CONF_DELAY_TIME)] = vol.All(
cv.time_period, cv.positive_timedelta)
schema[vol.Optional(CONF_TRIGGER_TIME)] = vol.All(
cv.time_period, cv.positive_timedelta)
if state in SUPPORTED_PENDING_STATES:
schema[vol.Optional(CONF_PENDING_TIME)] = vol.All(
cv.time_period, cv.positive_timedelta)
return vol.Schema(schema)
PLATFORM_SCHEMA = vol.Schema(vol.All({
vol.Required(CONF_PLATFORM): 'manual',
vol.Optional(CONF_NAME, default=DEFAULT_ALARM_NAME): cv.string,
vol.Exclusive(CONF_CODE, 'code validation'): cv.string,
vol.Exclusive(CONF_CODE_TEMPLATE, 'code validation'): cv.template,
vol.Optional(CONF_DELAY_TIME, default=DEFAULT_DELAY_TIME):
vol.All(cv.time_period, cv.positive_timedelta),
vol.Optional(CONF_PENDING_TIME, default=DEFAULT_PENDING_TIME):
vol.All(cv.time_period, cv.positive_timedelta),
vol.Optional(CONF_TRIGGER_TIME, default=DEFAULT_TRIGGER_TIME):
vol.All(cv.time_period, cv.positive_timedelta),
vol.Optional(CONF_DISARM_AFTER_TRIGGER,
default=DEFAULT_DISARM_AFTER_TRIGGER): cv.boolean,
vol.Optional(STATE_ALARM_ARMED_AWAY, default={}):
_state_schema(STATE_ALARM_ARMED_AWAY),
vol.Optional(STATE_ALARM_ARMED_HOME, default={}):
_state_schema(STATE_ALARM_ARMED_HOME),
vol.Optional(STATE_ALARM_ARMED_NIGHT, default={}):
_state_schema(STATE_ALARM_ARMED_NIGHT),
vol.Optional(STATE_ALARM_ARMED_CUSTOM_BYPASS, default={}):
_state_schema(STATE_ALARM_ARMED_CUSTOM_BYPASS),
vol.Optional(STATE_ALARM_DISARMED, default={}):
_state_schema(STATE_ALARM_DISARMED),
vol.Optional(STATE_ALARM_TRIGGERED, default={}):
_state_schema(STATE_ALARM_TRIGGERED),
}, _state_validator))
_LOGGER = logging.getLogger(__name__)
def setup_platform(hass, config, add_devices, discovery_info=None):
"""Set up the manual alarm platform."""
add_devices([ManualAlarm(
hass,
config[CONF_NAME],
config.get(CONF_CODE),
config.get(CONF_CODE_TEMPLATE),
config.get(CONF_DISARM_AFTER_TRIGGER, DEFAULT_DISARM_AFTER_TRIGGER),
config
)])
class ManualAlarm(alarm.AlarmControlPanel):
"""
Representation of an alarm status.
When armed, will be pending for 'pending_time', after that armed.
When triggered, will be pending for the triggering state's 'delay_time'
plus the triggered state's 'pending_time'.
After that will be triggered for 'trigger_time', after that we return to
the previous state or disarm if `disarm_after_trigger` is true.
A trigger_time of zero disables the alarm_trigger service.
"""
def __init__(self, hass, name, code, code_template,
disarm_after_trigger, config):
"""Init the manual alarm panel."""
self._state = STATE_ALARM_DISARMED
self._hass = hass
self._name = name
if code_template:
self._code = code_template
self._code.hass = hass
else:
self._code = code or None
self._disarm_after_trigger = disarm_after_trigger
self._previous_state = self._state
self._state_ts = None
self._delay_time_by_state = {
state: config[state][CONF_DELAY_TIME]
for state in SUPPORTED_PRETRIGGER_STATES}
self._trigger_time_by_state = {
state: config[state][CONF_TRIGGER_TIME]
for state in SUPPORTED_PRETRIGGER_STATES}
self._pending_time_by_state = {
state: config[state][CONF_PENDING_TIME]
for state in SUPPORTED_PENDING_STATES}
@property
def should_poll(self):
"""Return the plling state."""
return False
@property
def name(self):
"""Return the name of the device."""
return self._name
@property
def state(self):
"""Return the state of the device."""
if self._state == STATE_ALARM_TRIGGERED:
if self._within_pending_time(self._state):
return STATE_ALARM_PENDING
trigger_time = self._trigger_time_by_state[self._previous_state]
if (self._state_ts + self._pending_time(self._state) +
trigger_time) < dt_util.utcnow():
if self._disarm_after_trigger:
return STATE_ALARM_DISARMED
else:
self._state = self._previous_state
return self._state
if self._state in SUPPORTED_PENDING_STATES and \
self._within_pending_time(self._state):
return STATE_ALARM_PENDING
return self._state
@property
def _active_state(self):
if self.state == STATE_ALARM_PENDING:
return self._previous_state
else:
return self._state
def _pending_time(self, state):
pending_time = self._pending_time_by_state[state]
if state == STATE_ALARM_TRIGGERED:
pending_time += self._delay_time_by_state[self._previous_state]
return pending_time
def _within_pending_time(self, state):
return self._state_ts + self._pending_time(state) > dt_util.utcnow()
@property
def code_format(self):
"""One or more characters."""
return None if self._code is None else '.+'
def alarm_disarm(self, code=None):
"""Send disarm command."""
if not self._validate_code(code, STATE_ALARM_DISARMED):
return
self._state = STATE_ALARM_DISARMED
self._state_ts = dt_util.utcnow()
self.schedule_update_ha_state()
def alarm_arm_home(self, code=None):
"""Send arm home command."""
if not self._validate_code(code, STATE_ALARM_ARMED_HOME):
return
self._update_state(STATE_ALARM_ARMED_HOME)
def alarm_arm_away(self, code=None):
"""Send arm away command."""
if not self._validate_code(code, STATE_ALARM_ARMED_AWAY):
return
self._update_state(STATE_ALARM_ARMED_AWAY)
def alarm_arm_night(self, code=None):
"""Send arm night command."""
if not self._validate_code(code, STATE_ALARM_ARMED_NIGHT):
return
self._update_state(STATE_ALARM_ARMED_NIGHT)
def alarm_arm_custom_bypass(self, code=None):
"""Send arm custom bypass command."""
if not self._validate_code(code, STATE_ALARM_ARMED_CUSTOM_BYPASS):
return
self._update_state(STATE_ALARM_ARMED_CUSTOM_BYPASS)
def alarm_trigger(self, code=None):
"""
Send alarm trigger command.
No code needed, a trigger time of zero for the current state
disables the alarm.
"""
if not self._trigger_time_by_state[self._active_state]:
return
self._update_state(STATE_ALARM_TRIGGERED)
def _update_state(self, state):
if self._state == state:
return
self._previous_state = self._state
self._state = state
self._state_ts = dt_util.utcnow()
self.schedule_update_ha_state()
pending_time = self._pending_time(state)
if state == STATE_ALARM_TRIGGERED:
track_point_in_time(
self._hass, self.async_update_ha_state,
self._state_ts + pending_time)
trigger_time = self._trigger_time_by_state[self._previous_state]
track_point_in_time(
self._hass, self.async_update_ha_state,
self._state_ts + pending_time + trigger_time)
elif state in SUPPORTED_PENDING_STATES and pending_time:
track_point_in_time(
self._hass, self.async_update_ha_state,
self._state_ts + pending_time)
def _validate_code(self, code, state):
"""Validate given code."""
if self._code is None:
return True
if isinstance(self._code, str):
alarm_code = self._code
else:
alarm_code = self._code.render(from_state=self._state,
to_state=state)
check = not alarm_code or code == alarm_code
if not check:
_LOGGER.warning("Invalid code given for %s", state)
return check
@property
def device_state_attributes(self):
"""Return the state attributes."""
state_attr = {}
if self.state == STATE_ALARM_PENDING:
state_attr[ATTR_PRE_PENDING_STATE] = self._previous_state
state_attr[ATTR_POST_PENDING_STATE] = self._state
return state_attr
|
ewandor/home-assistant
|
homeassistant/components/alarm_control_panel/manual.py
|
Python
|
apache-2.0
| 10,887 | 0 |
# -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2012 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Lunch Orders',
'author': 'OpenERP SA',
'version': '0.2',
'depends': ['base', 'report'],
'category' : 'Tools',
'summary': 'Lunch Order, Meal, Food',
'description': """
The base module to manage lunch.
================================
Many companies order sandwiches, pizzas and other, from usual suppliers, for their employees to offer them more facilities.
However lunches management within the company requires proper administration especially when the number of employees or suppliers is important.
The “Lunch Order” module has been developed to make this management easier but also to offer employees more tools and usability.
In addition to a full meal and supplier management, this module offers the possibility to display warning and provides quick order selection based on employee’s preferences.
If you want to save your employees' time and avoid them to always have coins in their pockets, this module is essential.
""",
'data': [
'security/lunch_security.xml',
'lunch_view.xml',
'wizard/lunch_order_view.xml',
'wizard/lunch_validation_view.xml',
'wizard/lunch_cancel_view.xml',
'lunch_report.xml',
'report/report_lunch_order_view.xml',
'security/ir.model.access.csv',
'views/report_lunchorder.xml',
'views/lunch.xml',
],
'images': ['images/new_order.jpeg','images/lunch_account.jpeg','images/order_by_supplier_analysis.jpeg','images/alert.jpeg'],
'demo': ['lunch_demo.xml',],
'installable': True,
'application' : True,
'certificate' : '001292377792581874189',
'images': [],
}
|
3dfxsoftware/cbss-addons
|
lunch/__openerp__.py
|
Python
|
gpl-2.0
| 2,636 | 0.006084 |
from django import forms, template
from django.core.cache import cache
from repertoire_telephonique.models import Phone
register = template.Library()
@register.simple_tag
def simple_add(a, b):
return a + b
@register.inclusion_tag('vcard/tags/form_phone.html')
def get_form_phone(contact_id):
# get from cache
cache_key = 'phone_choices_%s' % contact_id
choices = cache.get(cache_key)
# not in cache generate choices
if not choices:
choices = [(_p.id, '%s %s' % (_p.prefix, _p.value))
for _p in Phone.objects.filter(contact_id=contact_id)]
# cache update
cache.set(cache_key, choices)
# dynamic form to manage dynamic choices
class PhoneForm(forms.Form):
phone = forms.MultipleChoiceField(choices=choices)
return {
'contact_id': contact_id,
'form': PhoneForm()
}
|
toopy/django-mon-premier-projet
|
src/mon_premier_projet/apps/vcard/templatetags/vcard.py
|
Python
|
mit
| 901 | 0.006659 |
# -*- coding: utf-8 -*-
"""
Integration tests for submitting problem responses and getting grades.
"""
import json
import os
from textwrap import dedent
from django.conf import settings
from django.contrib.auth.models import User
from django.core.urlresolvers import reverse
from django.test.client import RequestFactory
from mock import patch
from nose.plugins.attrib import attr
from capa.tests.response_xml_factory import (
OptionResponseXMLFactory, CustomResponseXMLFactory, SchematicResponseXMLFactory,
CodeResponseXMLFactory,
)
from courseware import grades
from courseware.models import StudentModule, StudentModuleHistory
from courseware.tests.helpers import LoginEnrollmentTestCase
from lms.djangoapps.lms_xblock.runtime import quote_slashes
from student.tests.factories import UserFactory
from student.models import anonymous_id_for_user
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase
from xmodule.modulestore.tests.factories import CourseFactory, ItemFactory
from xmodule.partitions.partitions import Group, UserPartition
from openedx.core.djangoapps.credit.api import (
set_credit_requirements, get_credit_requirement_status
)
from openedx.core.djangoapps.credit.models import CreditCourse, CreditProvider
from openedx.core.djangoapps.user_api.tests.factories import UserCourseTagFactory
from openedx.core.djangoapps.grading_policy.utils import MaxScoresCache
class TestSubmittingProblems(ModuleStoreTestCase, LoginEnrollmentTestCase):
"""
Check that a course gets graded properly.
"""
# arbitrary constant
COURSE_SLUG = "100"
COURSE_NAME = "test_course"
def setUp(self):
super(TestSubmittingProblems, self).setUp(create_user=False)
# Create course
self.course = CourseFactory.create(display_name=self.COURSE_NAME, number=self.COURSE_SLUG)
assert self.course, "Couldn't load course %r" % self.COURSE_NAME
# create a test student
self.student = 'view@test.com'
self.password = 'foo'
self.create_account('u1', self.student, self.password)
self.activate_user(self.student)
self.enroll(self.course)
self.student_user = User.objects.get(email=self.student)
self.factory = RequestFactory()
def refresh_course(self):
"""
Re-fetch the course from the database so that the object being dealt with has everything added to it.
"""
self.course = self.store.get_course(self.course.id)
def problem_location(self, problem_url_name):
"""
Returns the url of the problem given the problem's name
"""
return self.course.id.make_usage_key('problem', problem_url_name)
def modx_url(self, problem_location, dispatch):
"""
Return the url needed for the desired action.
problem_location: location of the problem on which we want some action
dispatch: the the action string that gets passed to the view as a kwarg
example: 'check_problem' for having responses processed
"""
return reverse(
'xblock_handler',
kwargs={
'course_id': self.course.id.to_deprecated_string(),
'usage_id': quote_slashes(problem_location.to_deprecated_string()),
'handler': 'xmodule_handler',
'suffix': dispatch,
}
)
def submit_question_answer(self, problem_url_name, responses):
"""
Submit answers to a question.
Responses is a dict mapping problem ids to answers:
{'2_1': 'Correct', '2_2': 'Incorrect'}
"""
problem_location = self.problem_location(problem_url_name)
modx_url = self.modx_url(problem_location, 'problem_check')
answer_key_prefix = 'input_{}_'.format(problem_location.html_id())
# format the response dictionary to be sent in the post request by adding the above prefix to each key
response_dict = {(answer_key_prefix + k): v for k, v in responses.items()}
resp = self.client.post(modx_url, response_dict)
return resp
def look_at_question(self, problem_url_name):
"""
Create state for a problem, but don't answer it
"""
location = self.problem_location(problem_url_name)
modx_url = self.modx_url(location, "problem_get")
resp = self.client.get(modx_url)
return resp
def reset_question_answer(self, problem_url_name):
"""
Reset specified problem for current user.
"""
problem_location = self.problem_location(problem_url_name)
modx_url = self.modx_url(problem_location, 'problem_reset')
resp = self.client.post(modx_url)
return resp
def show_question_answer(self, problem_url_name):
"""
Shows the answer to the current student.
"""
problem_location = self.problem_location(problem_url_name)
modx_url = self.modx_url(problem_location, 'problem_show')
resp = self.client.post(modx_url)
return resp
def add_dropdown_to_section(self, section_location, name, num_inputs=2):
"""
Create and return a dropdown problem.
section_location: location object of section in which to create the problem
(problems must live in a section to be graded properly)
name: string name of the problem
num_input: the number of input fields to create in the problem
"""
prob_xml = OptionResponseXMLFactory().build_xml(
question_text='The correct answer is Correct',
num_inputs=num_inputs,
weight=num_inputs,
options=['Correct', 'Incorrect', u'ⓤⓝⓘⓒⓞⓓⓔ'],
correct_option='Correct'
)
problem = ItemFactory.create(
parent_location=section_location,
category='problem',
data=prob_xml,
metadata={'rerandomize': 'always'},
display_name=name
)
# re-fetch the course from the database so the object is up to date
self.refresh_course()
return problem
def add_graded_section_to_course(self, name, section_format='Homework', late=False, reset=False, showanswer=False):
"""
Creates a graded homework section within a chapter and returns the section.
"""
# if we don't already have a chapter create a new one
if not(hasattr(self, 'chapter')):
self.chapter = ItemFactory.create(
parent_location=self.course.location,
category='chapter'
)
if late:
section = ItemFactory.create(
parent_location=self.chapter.location,
display_name=name,
category='sequential',
metadata={'graded': True, 'format': section_format, 'due': '2013-05-20T23:30'}
)
elif reset:
section = ItemFactory.create(
parent_location=self.chapter.location,
display_name=name,
category='sequential',
rerandomize='always',
metadata={
'graded': True,
'format': section_format,
}
)
elif showanswer:
section = ItemFactory.create(
parent_location=self.chapter.location,
display_name=name,
category='sequential',
showanswer='never',
metadata={
'graded': True,
'format': section_format,
}
)
else:
section = ItemFactory.create(
parent_location=self.chapter.location,
display_name=name,
category='sequential',
metadata={'graded': True, 'format': section_format}
)
# now that we've added the problem and section to the course
# we fetch the course from the database so the object we are
# dealing with has these additions
self.refresh_course()
return section
def add_grading_policy(self, grading_policy):
"""
Add a grading policy to the course.
"""
self.course.grading_policy = grading_policy
self.update_course(self.course, self.student_user.id)
self.refresh_course()
def get_grade_summary(self):
"""
calls grades.grade for current user and course.
the keywords for the returned object are
- grade : A final letter grade.
- percent : The final percent for the class (rounded up).
- section_breakdown : A breakdown of each section that makes
up the grade. (For display)
- grade_breakdown : A breakdown of the major components that
make up the final grade. (For display)
"""
fake_request = self.factory.get(
reverse('progress', kwargs={'course_id': self.course.id.to_deprecated_string()})
)
fake_request.user = self.student_user
return grades.grade(self.student_user, fake_request, self.course)
def get_progress_summary(self):
"""
Return progress summary structure for current user and course.
Returns
- courseware_summary is a summary of all sections with problems in the course.
It is organized as an array of chapters, each containing an array of sections,
each containing an array of scores. This contains information for graded and
ungraded problems, and is good for displaying a course summary with due dates,
etc.
"""
fake_request = self.factory.get(
reverse('progress', kwargs={'course_id': self.course.id.to_deprecated_string()})
)
progress_summary = grades.progress_summary(
self.student_user, fake_request, self.course
)
return progress_summary
def check_grade_percent(self, percent):
"""
Assert that percent grade is as expected.
"""
grade_summary = self.get_grade_summary()
self.assertEqual(grade_summary['percent'], percent)
def earned_hw_scores(self):
"""
Global scores, each Score is a Problem Set.
Returns list of scores: [<points on hw_1>, <points on hw_2>, ..., <points on hw_n>]
"""
return [s.earned for s in self.get_grade_summary()['totaled_scores']['Homework']]
def score_for_hw(self, hw_url_name):
"""
Returns list of scores for a given url.
Returns list of scores for the given homework:
[<points on problem_1>, <points on problem_2>, ..., <points on problem_n>]
"""
# list of grade summaries for each section
sections_list = []
for chapter in self.get_progress_summary():
sections_list.extend(chapter['sections'])
# get the first section that matches the url (there should only be one)
hw_section = next(section for section in sections_list if section.get('url_name') == hw_url_name)
return [s.earned for s in hw_section['scores']]
@attr('shard_1')
class TestCourseGrader(TestSubmittingProblems):
"""
Suite of tests for the course grader.
"""
def basic_setup(self, late=False, reset=False, showanswer=False):
"""
Set up a simple course for testing basic grading functionality.
"""
grading_policy = {
"GRADER": [{
"type": "Homework",
"min_count": 1,
"drop_count": 0,
"short_label": "HW",
"weight": 1.0
}],
"GRADE_CUTOFFS": {
'A': .9,
'B': .33
}
}
self.add_grading_policy(grading_policy)
# set up a simple course with four problems
self.homework = self.add_graded_section_to_course('homework', late=late, reset=reset, showanswer=showanswer)
self.add_dropdown_to_section(self.homework.location, 'p1', 1)
self.add_dropdown_to_section(self.homework.location, 'p2', 1)
self.add_dropdown_to_section(self.homework.location, 'p3', 1)
self.refresh_course()
def weighted_setup(self):
"""
Set up a simple course for testing weighted grading functionality.
"""
grading_policy = {
"GRADER": [
{
"type": "Homework",
"min_count": 1,
"drop_count": 0,
"short_label": "HW",
"weight": 0.25
}, {
"type": "Final",
"name": "Final Section",
"short_label": "Final",
"weight": 0.75
}
]
}
self.add_grading_policy(grading_policy)
# set up a structure of 1 homework and 1 final
self.homework = self.add_graded_section_to_course('homework')
self.problem = self.add_dropdown_to_section(self.homework.location, 'H1P1')
self.final = self.add_graded_section_to_course('Final Section', 'Final')
self.final_question = self.add_dropdown_to_section(self.final.location, 'FinalQuestion')
def dropping_setup(self):
"""
Set up a simple course for testing the dropping grading functionality.
"""
grading_policy = {
"GRADER": [
{
"type": "Homework",
"min_count": 3,
"drop_count": 1,
"short_label": "HW",
"weight": 1
}
]
}
self.add_grading_policy(grading_policy)
# Set up a course structure that just consists of 3 homeworks.
# Since the grading policy drops 1 entire homework, each problem is worth 25%
# names for the problem in the homeworks
self.hw1_names = ['h1p1', 'h1p2']
self.hw2_names = ['h2p1', 'h2p2']
self.hw3_names = ['h3p1', 'h3p2']
self.homework1 = self.add_graded_section_to_course('homework1')
self.add_dropdown_to_section(self.homework1.location, self.hw1_names[0], 1)
self.add_dropdown_to_section(self.homework1.location, self.hw1_names[1], 1)
self.homework2 = self.add_graded_section_to_course('homework2')
self.add_dropdown_to_section(self.homework2.location, self.hw2_names[0], 1)
self.add_dropdown_to_section(self.homework2.location, self.hw2_names[1], 1)
self.homework3 = self.add_graded_section_to_course('homework3')
self.add_dropdown_to_section(self.homework3.location, self.hw3_names[0], 1)
self.add_dropdown_to_section(self.homework3.location, self.hw3_names[1], 1)
def test_submission_late(self):
"""Test problem for due date in the past"""
self.basic_setup(late=True)
resp = self.submit_question_answer('p1', {'2_1': 'Correct'})
self.assertEqual(resp.status_code, 200)
err_msg = (
"The state of this problem has changed since you loaded this page. "
"Please refresh your page."
)
self.assertEqual(json.loads(resp.content).get("success"), err_msg)
def test_submission_reset(self):
"""Test problem ProcessingErrors due to resets"""
self.basic_setup(reset=True)
resp = self.submit_question_answer('p1', {'2_1': 'Correct'})
# submit a second time to draw NotFoundError
resp = self.submit_question_answer('p1', {'2_1': 'Correct'})
self.assertEqual(resp.status_code, 200)
err_msg = (
"The state of this problem has changed since you loaded this page. "
"Please refresh your page."
)
self.assertEqual(json.loads(resp.content).get("success"), err_msg)
def test_submission_show_answer(self):
"""Test problem for ProcessingErrors due to showing answer"""
self.basic_setup(showanswer=True)
resp = self.show_question_answer('p1')
self.assertEqual(resp.status_code, 200)
err_msg = (
"The state of this problem has changed since you loaded this page. "
"Please refresh your page."
)
self.assertEqual(json.loads(resp.content).get("success"), err_msg)
def test_show_answer_doesnt_write_to_csm(self):
self.basic_setup()
self.submit_question_answer('p1', {'2_1': u'Correct'})
# Now fetch the state entry for that problem.
student_module = StudentModule.objects.get(
course_id=self.course.id,
student=self.student_user
)
# count how many state history entries there are
baseline = StudentModuleHistory.objects.filter(
student_module=student_module
)
baseline_count = baseline.count()
self.assertEqual(baseline_count, 3)
# now click "show answer"
self.show_question_answer('p1')
# check that we don't have more state history entries
csmh = StudentModuleHistory.objects.filter(
student_module=student_module
)
current_count = csmh.count()
self.assertEqual(current_count, 3)
def test_grade_with_max_score_cache(self):
"""
Tests that the max score cache is populated after a grading run
and that the results of grading runs before and after the cache
warms are the same.
"""
self.basic_setup()
self.submit_question_answer('p1', {'2_1': 'Correct'})
self.look_at_question('p2')
self.assertTrue(
StudentModule.objects.filter(
module_state_key=self.problem_location('p2')
).exists()
)
location_to_cache = unicode(self.problem_location('p2'))
max_scores_cache = MaxScoresCache.create_for_course(self.course)
# problem isn't in the cache
max_scores_cache.fetch_from_remote([location_to_cache])
self.assertIsNone(max_scores_cache.get(location_to_cache))
self.check_grade_percent(0.33)
# problem is in the cache
max_scores_cache.fetch_from_remote([location_to_cache])
self.assertIsNotNone(max_scores_cache.get(location_to_cache))
self.check_grade_percent(0.33)
def test_none_grade(self):
"""
Check grade is 0 to begin with.
"""
self.basic_setup()
self.check_grade_percent(0)
self.assertEqual(self.get_grade_summary()['grade'], None)
def test_b_grade_exact(self):
"""
Check that at exactly the cutoff, the grade is B.
"""
self.basic_setup()
self.submit_question_answer('p1', {'2_1': 'Correct'})
self.check_grade_percent(0.33)
self.assertEqual(self.get_grade_summary()['grade'], 'B')
@patch.dict("django.conf.settings.FEATURES", {"ENABLE_MAX_SCORE_CACHE": False})
def test_grade_no_max_score_cache(self):
"""
Tests grading when the max score cache is disabled
"""
self.test_b_grade_exact()
def test_b_grade_above(self):
"""
Check grade between cutoffs.
"""
self.basic_setup()
self.submit_question_answer('p1', {'2_1': 'Correct'})
self.submit_question_answer('p2', {'2_1': 'Correct'})
self.check_grade_percent(0.67)
self.assertEqual(self.get_grade_summary()['grade'], 'B')
def test_a_grade(self):
"""
Check that 100 percent completion gets an A
"""
self.basic_setup()
self.submit_question_answer('p1', {'2_1': 'Correct'})
self.submit_question_answer('p2', {'2_1': 'Correct'})
self.submit_question_answer('p3', {'2_1': 'Correct'})
self.check_grade_percent(1.0)
self.assertEqual(self.get_grade_summary()['grade'], 'A')
def test_wrong_answers(self):
"""
Check that answering incorrectly is graded properly.
"""
self.basic_setup()
self.submit_question_answer('p1', {'2_1': 'Correct'})
self.submit_question_answer('p2', {'2_1': 'Correct'})
self.submit_question_answer('p3', {'2_1': 'Incorrect'})
self.check_grade_percent(0.67)
self.assertEqual(self.get_grade_summary()['grade'], 'B')
def test_submissions_api_overrides_scores(self):
"""
Check that answering incorrectly is graded properly.
"""
self.basic_setup()
self.submit_question_answer('p1', {'2_1': 'Correct'})
self.submit_question_answer('p2', {'2_1': 'Correct'})
self.submit_question_answer('p3', {'2_1': 'Incorrect'})
self.check_grade_percent(0.67)
self.assertEqual(self.get_grade_summary()['grade'], 'B')
# But now we mock out a get_scores call, and watch as it overrides the
# score read from StudentModule and our student gets an A instead.
with patch('submissions.api.get_scores') as mock_get_scores:
mock_get_scores.return_value = {
self.problem_location('p3').to_deprecated_string(): (1, 1)
}
self.check_grade_percent(1.0)
self.assertEqual(self.get_grade_summary()['grade'], 'A')
def test_submissions_api_anonymous_student_id(self):
"""
Check that the submissions API is sent an anonymous student ID.
"""
self.basic_setup()
self.submit_question_answer('p1', {'2_1': 'Correct'})
self.submit_question_answer('p2', {'2_1': 'Correct'})
self.submit_question_answer('p3', {'2_1': 'Incorrect'})
with patch('submissions.api.get_scores') as mock_get_scores:
mock_get_scores.return_value = {
self.problem_location('p3').to_deprecated_string(): (1, 1)
}
self.get_grade_summary()
# Verify that the submissions API was sent an anonymized student ID
mock_get_scores.assert_called_with(
self.course.id.to_deprecated_string(),
anonymous_id_for_user(self.student_user, self.course.id)
)
def test_weighted_homework(self):
"""
Test that the homework section has proper weight.
"""
self.weighted_setup()
# Get both parts correct
self.submit_question_answer('H1P1', {'2_1': 'Correct', '2_2': 'Correct'})
self.check_grade_percent(0.25)
self.assertEqual(self.earned_hw_scores(), [2.0]) # Order matters
self.assertEqual(self.score_for_hw('homework'), [2.0])
def test_weighted_exam(self):
"""
Test that the exam section has the proper weight.
"""
self.weighted_setup()
self.submit_question_answer('FinalQuestion', {'2_1': 'Correct', '2_2': 'Correct'})
self.check_grade_percent(0.75)
def test_weighted_total(self):
"""
Test that the weighted total adds to 100.
"""
self.weighted_setup()
self.submit_question_answer('H1P1', {'2_1': 'Correct', '2_2': 'Correct'})
self.submit_question_answer('FinalQuestion', {'2_1': 'Correct', '2_2': 'Correct'})
self.check_grade_percent(1.0)
def dropping_homework_stage1(self):
"""
Get half the first homework correct and all of the second
"""
self.submit_question_answer(self.hw1_names[0], {'2_1': 'Correct'})
self.submit_question_answer(self.hw1_names[1], {'2_1': 'Incorrect'})
for name in self.hw2_names:
self.submit_question_answer(name, {'2_1': 'Correct'})
def test_dropping_grades_normally(self):
"""
Test that the dropping policy does not change things before it should.
"""
self.dropping_setup()
self.dropping_homework_stage1()
self.assertEqual(self.score_for_hw('homework1'), [1.0, 0.0])
self.assertEqual(self.score_for_hw('homework2'), [1.0, 1.0])
self.assertEqual(self.earned_hw_scores(), [1.0, 2.0, 0]) # Order matters
self.check_grade_percent(0.75)
def test_dropping_nochange(self):
"""
Tests that grade does not change when making the global homework grade minimum not unique.
"""
self.dropping_setup()
self.dropping_homework_stage1()
self.submit_question_answer(self.hw3_names[0], {'2_1': 'Correct'})
self.assertEqual(self.score_for_hw('homework1'), [1.0, 0.0])
self.assertEqual(self.score_for_hw('homework2'), [1.0, 1.0])
self.assertEqual(self.score_for_hw('homework3'), [1.0, 0.0])
self.assertEqual(self.earned_hw_scores(), [1.0, 2.0, 1.0]) # Order matters
self.check_grade_percent(0.75)
def test_dropping_all_correct(self):
"""
Test that the lowest is dropped for a perfect score.
"""
self.dropping_setup()
self.dropping_homework_stage1()
for name in self.hw3_names:
self.submit_question_answer(name, {'2_1': 'Correct'})
self.check_grade_percent(1.0)
self.assertEqual(self.earned_hw_scores(), [1.0, 2.0, 2.0]) # Order matters
self.assertEqual(self.score_for_hw('homework3'), [1.0, 1.0])
def test_min_grade_credit_requirements_status(self):
"""
Test for credit course. If user passes minimum grade requirement then
status will be updated as satisfied in requirement status table.
"""
self.basic_setup()
self.submit_question_answer('p1', {'2_1': 'Correct'})
self.submit_question_answer('p2', {'2_1': 'Correct'})
# Enable the course for credit
credit_course = CreditCourse.objects.create(
course_key=self.course.id,
enabled=True,
)
# Configure a credit provider for the course
CreditProvider.objects.create(
provider_id="ASU",
enable_integration=True,
provider_url="https://credit.example.com/request",
)
requirements = [{
"namespace": "grade",
"name": "grade",
"display_name": "Grade",
"criteria": {"min_grade": 0.52},
}]
# Add a single credit requirement (final grade)
set_credit_requirements(self.course.id, requirements)
self.get_grade_summary()
req_status = get_credit_requirement_status(self.course.id, self.student_user.username, 'grade', 'grade')
self.assertEqual(req_status[0]["status"], 'satisfied')
@attr('shard_1')
class ProblemWithUploadedFilesTest(TestSubmittingProblems):
"""Tests of problems with uploaded files."""
def setUp(self):
super(ProblemWithUploadedFilesTest, self).setUp()
self.section = self.add_graded_section_to_course('section')
def problem_setup(self, name, files):
"""
Create a CodeResponse problem with files to upload.
"""
xmldata = CodeResponseXMLFactory().build_xml(
allowed_files=files, required_files=files,
)
ItemFactory.create(
parent_location=self.section.location,
category='problem',
display_name=name,
data=xmldata
)
# re-fetch the course from the database so the object is up to date
self.refresh_course()
def test_three_files(self):
# Open the test files, and arrange to close them later.
filenames = "prog1.py prog2.py prog3.py"
fileobjs = [
open(os.path.join(settings.COMMON_TEST_DATA_ROOT, "capa", filename))
for filename in filenames.split()
]
for fileobj in fileobjs:
self.addCleanup(fileobj.close)
self.problem_setup("the_problem", filenames)
with patch('courseware.module_render.XQUEUE_INTERFACE.session') as mock_session:
resp = self.submit_question_answer("the_problem", {'2_1': fileobjs})
self.assertEqual(resp.status_code, 200)
json_resp = json.loads(resp.content)
self.assertEqual(json_resp['success'], "incorrect")
# See how post got called.
name, args, kwargs = mock_session.mock_calls[0]
self.assertEqual(name, "post")
self.assertEqual(len(args), 1)
self.assertTrue(args[0].endswith("/submit/"))
self.assertItemsEqual(kwargs.keys(), ["files", "data"])
self.assertItemsEqual(kwargs['files'].keys(), filenames.split())
@attr('shard_1')
class TestPythonGradedResponse(TestSubmittingProblems):
"""
Check that we can submit a schematic and custom response, and it answers properly.
"""
SCHEMATIC_SCRIPT = dedent("""
# for a schematic response, submission[i] is the json representation
# of the diagram and analysis results for the i-th schematic tag
def get_tran(json,signal):
for element in json:
if element[0] == 'transient':
return element[1].get(signal,[])
return []
def get_value(at,output):
for (t,v) in output:
if at == t: return v
return None
output = get_tran(submission[0],'Z')
okay = True
# output should be 1, 1, 1, 1, 1, 0, 0, 0
if get_value(0.0000004, output) < 2.7: okay = False;
if get_value(0.0000009, output) < 2.7: okay = False;
if get_value(0.0000014, output) < 2.7: okay = False;
if get_value(0.0000019, output) < 2.7: okay = False;
if get_value(0.0000024, output) < 2.7: okay = False;
if get_value(0.0000029, output) > 0.25: okay = False;
if get_value(0.0000034, output) > 0.25: okay = False;
if get_value(0.0000039, output) > 0.25: okay = False;
correct = ['correct' if okay else 'incorrect']""").strip()
SCHEMATIC_CORRECT = json.dumps(
[['transient', {'Z': [
[0.0000004, 2.8],
[0.0000009, 2.8],
[0.0000014, 2.8],
[0.0000019, 2.8],
[0.0000024, 2.8],
[0.0000029, 0.2],
[0.0000034, 0.2],
[0.0000039, 0.2]
]}]]
)
SCHEMATIC_INCORRECT = json.dumps(
[['transient', {'Z': [
[0.0000004, 2.8],
[0.0000009, 0.0], # wrong.
[0.0000014, 2.8],
[0.0000019, 2.8],
[0.0000024, 2.8],
[0.0000029, 0.2],
[0.0000034, 0.2],
[0.0000039, 0.2]
]}]]
)
CUSTOM_RESPONSE_SCRIPT = dedent("""
def test_csv(expect, ans):
# Take out all spaces in expected answer
expect = [i.strip(' ') for i in str(expect).split(',')]
# Take out all spaces in student solution
ans = [i.strip(' ') for i in str(ans).split(',')]
def strip_q(x):
# Strip quotes around strings if students have entered them
stripped_ans = []
for item in x:
if item[0] == "'" and item[-1]=="'":
item = item.strip("'")
elif item[0] == '"' and item[-1] == '"':
item = item.strip('"')
stripped_ans.append(item)
return stripped_ans
return strip_q(expect) == strip_q(ans)""").strip()
CUSTOM_RESPONSE_CORRECT = "0, 1, 2, 3, 4, 5, 'Outside of loop', 6"
CUSTOM_RESPONSE_INCORRECT = "Reading my code I see. I hope you like it :)"
COMPUTED_ANSWER_SCRIPT = dedent("""
if submission[0] == "a shout in the street":
correct = ['correct']
else:
correct = ['incorrect']""").strip()
COMPUTED_ANSWER_CORRECT = "a shout in the street"
COMPUTED_ANSWER_INCORRECT = "because we never let them in"
def setUp(self):
super(TestPythonGradedResponse, self).setUp()
self.section = self.add_graded_section_to_course('section')
self.correct_responses = {}
self.incorrect_responses = {}
def schematic_setup(self, name):
"""
set up an example Circuit_Schematic_Builder problem
"""
script = self.SCHEMATIC_SCRIPT
xmldata = SchematicResponseXMLFactory().build_xml(answer=script)
ItemFactory.create(
parent_location=self.section.location,
category='problem',
boilerplate='circuitschematic.yaml',
display_name=name,
data=xmldata
)
# define the correct and incorrect responses to this problem
self.correct_responses[name] = self.SCHEMATIC_CORRECT
self.incorrect_responses[name] = self.SCHEMATIC_INCORRECT
# re-fetch the course from the database so the object is up to date
self.refresh_course()
def custom_response_setup(self, name):
"""
set up an example custom response problem using a check function
"""
test_csv = self.CUSTOM_RESPONSE_SCRIPT
expect = self.CUSTOM_RESPONSE_CORRECT
cfn_problem_xml = CustomResponseXMLFactory().build_xml(script=test_csv, cfn='test_csv', expect=expect)
ItemFactory.create(
parent_location=self.section.location,
category='problem',
boilerplate='customgrader.yaml',
data=cfn_problem_xml,
display_name=name
)
# define the correct and incorrect responses to this problem
self.correct_responses[name] = expect
self.incorrect_responses[name] = self.CUSTOM_RESPONSE_INCORRECT
# re-fetch the course from the database so the object is up to date
self.refresh_course()
def computed_answer_setup(self, name):
"""
set up an example problem using an answer script'''
"""
script = self.COMPUTED_ANSWER_SCRIPT
computed_xml = CustomResponseXMLFactory().build_xml(answer=script)
ItemFactory.create(
parent_location=self.section.location,
category='problem',
boilerplate='customgrader.yaml',
data=computed_xml,
display_name=name
)
# define the correct and incorrect responses to this problem
self.correct_responses[name] = self.COMPUTED_ANSWER_CORRECT
self.incorrect_responses[name] = self.COMPUTED_ANSWER_INCORRECT
# re-fetch the course from the database so the object is up to date
self.refresh_course()
def _check_correct(self, name):
"""
check that problem named "name" gets evaluated correctly correctly
"""
resp = self.submit_question_answer(name, {'2_1': self.correct_responses[name]})
respdata = json.loads(resp.content)
self.assertEqual(respdata['success'], 'correct')
def _check_incorrect(self, name):
"""
check that problem named "name" gets evaluated incorrectly correctly
"""
resp = self.submit_question_answer(name, {'2_1': self.incorrect_responses[name]})
respdata = json.loads(resp.content)
self.assertEqual(respdata['success'], 'incorrect')
def _check_ireset(self, name):
"""
Check that the problem can be reset
"""
# first, get the question wrong
resp = self.submit_question_answer(name, {'2_1': self.incorrect_responses[name]})
# reset the question
self.reset_question_answer(name)
# then get it right
resp = self.submit_question_answer(name, {'2_1': self.correct_responses[name]})
respdata = json.loads(resp.content)
self.assertEqual(respdata['success'], 'correct')
def test_schematic_correct(self):
name = "schematic_problem"
self.schematic_setup(name)
self._check_correct(name)
def test_schematic_incorrect(self):
name = "schematic_problem"
self.schematic_setup(name)
self._check_incorrect(name)
def test_schematic_reset(self):
name = "schematic_problem"
self.schematic_setup(name)
self._check_ireset(name)
def test_check_function_correct(self):
name = 'cfn_problem'
self.custom_response_setup(name)
self._check_correct(name)
def test_check_function_incorrect(self):
name = 'cfn_problem'
self.custom_response_setup(name)
self._check_incorrect(name)
def test_check_function_reset(self):
name = 'cfn_problem'
self.custom_response_setup(name)
self._check_ireset(name)
def test_computed_correct(self):
name = 'computed_answer'
self.computed_answer_setup(name)
self._check_correct(name)
def test_computed_incorrect(self):
name = 'computed_answer'
self.computed_answer_setup(name)
self._check_incorrect(name)
def test_computed_reset(self):
name = 'computed_answer'
self.computed_answer_setup(name)
self._check_ireset(name)
@attr('shard_1')
class TestAnswerDistributions(TestSubmittingProblems):
"""Check that we can pull answer distributions for problems."""
def setUp(self):
"""Set up a simple course with four problems."""
super(TestAnswerDistributions, self).setUp()
self.homework = self.add_graded_section_to_course('homework')
self.p1_html_id = self.add_dropdown_to_section(self.homework.location, 'p1', 1).location.html_id()
self.p2_html_id = self.add_dropdown_to_section(self.homework.location, 'p2', 1).location.html_id()
self.p3_html_id = self.add_dropdown_to_section(self.homework.location, 'p3', 1).location.html_id()
self.refresh_course()
def test_empty(self):
# Just make sure we can process this without errors.
empty_distribution = grades.answer_distributions(self.course.id)
self.assertFalse(empty_distribution) # should be empty
def test_one_student(self):
# Basic test to make sure we have simple behavior right for a student
# Throw in a non-ASCII answer
self.submit_question_answer('p1', {'2_1': u'ⓤⓝⓘⓒⓞⓓⓔ'})
self.submit_question_answer('p2', {'2_1': 'Correct'})
distributions = grades.answer_distributions(self.course.id)
self.assertEqual(
distributions,
{
('p1', 'p1', '{}_2_1'.format(self.p1_html_id)): {
u'ⓤⓝⓘⓒⓞⓓⓔ': 1
},
('p2', 'p2', '{}_2_1'.format(self.p2_html_id)): {
'Correct': 1
}
}
)
def test_multiple_students(self):
# Our test class is based around making requests for a particular user,
# so we're going to cheat by creating another user and copying and
# modifying StudentModule entries to make them from other users. It's
# a little hacky, but it seemed the simpler way to do this.
self.submit_question_answer('p1', {'2_1': u'Correct'})
self.submit_question_answer('p2', {'2_1': u'Incorrect'})
self.submit_question_answer('p3', {'2_1': u'Correct'})
# Make the above submissions owned by user2
user2 = UserFactory.create()
problems = StudentModule.objects.filter(
course_id=self.course.id,
student=self.student_user
)
for problem in problems:
problem.student_id = user2.id
problem.save()
# Now make more submissions by our original user
self.submit_question_answer('p1', {'2_1': u'Correct'})
self.submit_question_answer('p2', {'2_1': u'Correct'})
self.assertEqual(
grades.answer_distributions(self.course.id),
{
('p1', 'p1', '{}_2_1'.format(self.p1_html_id)): {
'Correct': 2
},
('p2', 'p2', '{}_2_1'.format(self.p2_html_id)): {
'Correct': 1,
'Incorrect': 1
},
('p3', 'p3', '{}_2_1'.format(self.p3_html_id)): {
'Correct': 1
}
}
)
def test_other_data_types(self):
# We'll submit one problem, and then muck with the student_answers
# dict inside its state to try different data types (str, int, float,
# none)
self.submit_question_answer('p1', {'2_1': u'Correct'})
# Now fetch the state entry for that problem.
student_module = StudentModule.objects.get(
course_id=self.course.id,
student=self.student_user
)
for val in ('Correct', True, False, 0, 0.0, 1, 1.0, None):
state = json.loads(student_module.state)
state["student_answers"]['{}_2_1'.format(self.p1_html_id)] = val
student_module.state = json.dumps(state)
student_module.save()
self.assertEqual(
grades.answer_distributions(self.course.id),
{
('p1', 'p1', '{}_2_1'.format(self.p1_html_id)): {
str(val): 1
},
}
)
def test_missing_content(self):
# If there's a StudentModule entry for content that no longer exists,
# we just quietly ignore it (because we can't display a meaningful url
# or name for it).
self.submit_question_answer('p1', {'2_1': 'Incorrect'})
# Now fetch the state entry for that problem and alter it so it points
# to a non-existent problem.
student_module = StudentModule.objects.get(
course_id=self.course.id,
student=self.student_user
)
student_module.module_state_key = student_module.module_state_key.replace(
name=student_module.module_state_key.name + "_fake"
)
student_module.save()
# It should be empty (ignored)
empty_distribution = grades.answer_distributions(self.course.id)
self.assertFalse(empty_distribution) # should be empty
def test_broken_state(self):
# Missing or broken state for a problem should be skipped without
# causing the whole answer_distribution call to explode.
# Submit p1
self.submit_question_answer('p1', {'2_1': u'Correct'})
# Now fetch the StudentModule entry for p1 so we can corrupt its state
prb1 = StudentModule.objects.get(
course_id=self.course.id,
student=self.student_user
)
# Submit p2
self.submit_question_answer('p2', {'2_1': u'Incorrect'})
for new_p1_state in ('{"student_answers": {}}', "invalid json!", None):
prb1.state = new_p1_state
prb1.save()
# p1 won't show up, but p2 should still work
self.assertEqual(
grades.answer_distributions(self.course.id),
{
('p2', 'p2', '{}_2_1'.format(self.p2_html_id)): {
'Incorrect': 1
},
}
)
@attr('shard_1')
class TestConditionalContent(TestSubmittingProblems):
"""
Check that conditional content works correctly with grading.
"""
def setUp(self):
"""
Set up a simple course with a grading policy, a UserPartition, and 2 sections, both graded as "homework".
One section is pre-populated with a problem (with 2 inputs), visible to all students.
The second section is empty. Test cases should add conditional content to it.
"""
super(TestConditionalContent, self).setUp()
self.user_partition_group_0 = 0
self.user_partition_group_1 = 1
self.partition = UserPartition(
0,
'first_partition',
'First Partition',
[
Group(self.user_partition_group_0, 'alpha'),
Group(self.user_partition_group_1, 'beta')
]
)
self.course = CourseFactory.create(
display_name=self.COURSE_NAME,
number=self.COURSE_SLUG,
user_partitions=[self.partition]
)
grading_policy = {
"GRADER": [{
"type": "Homework",
"min_count": 2,
"drop_count": 0,
"short_label": "HW",
"weight": 1.0
}]
}
self.add_grading_policy(grading_policy)
self.homework_all = self.add_graded_section_to_course('homework1')
self.p1_all_html_id = self.add_dropdown_to_section(self.homework_all.location, 'H1P1', 2).location.html_id()
self.homework_conditional = self.add_graded_section_to_course('homework2')
def split_setup(self, user_partition_group):
"""
Setup for tests using split_test module. Creates a split_test instance as a child of self.homework_conditional
with 2 verticals in it, and assigns self.student_user to the specified user_partition_group.
The verticals are returned.
"""
vertical_0_url = self.course.id.make_usage_key("vertical", "split_test_vertical_0")
vertical_1_url = self.course.id.make_usage_key("vertical", "split_test_vertical_1")
group_id_to_child = {}
for index, url in enumerate([vertical_0_url, vertical_1_url]):
group_id_to_child[str(index)] = url
split_test = ItemFactory.create(
parent_location=self.homework_conditional.location,
category="split_test",
display_name="Split test",
user_partition_id='0',
group_id_to_child=group_id_to_child,
)
vertical_0 = ItemFactory.create(
parent_location=split_test.location,
category="vertical",
display_name="Condition 0 vertical",
location=vertical_0_url,
)
vertical_1 = ItemFactory.create(
parent_location=split_test.location,
category="vertical",
display_name="Condition 1 vertical",
location=vertical_1_url,
)
# Now add the student to the specified group.
UserCourseTagFactory(
user=self.student_user,
course_id=self.course.id,
key='xblock.partition_service.partition_{0}'.format(self.partition.id), # pylint: disable=no-member
value=str(user_partition_group)
)
return vertical_0, vertical_1
def split_different_problems_setup(self, user_partition_group):
"""
Setup for the case where the split test instance contains problems for each group
(so both groups do have graded content, though it is different).
Group 0 has 2 problems, worth 1 and 3 points respectively.
Group 1 has 1 problem, worth 1 point.
This method also assigns self.student_user to the specified user_partition_group and
then submits answers for the problems in section 1, which are visible to all students.
The submitted answers give the student 1 point out of a possible 2 points in the section.
"""
vertical_0, vertical_1 = self.split_setup(user_partition_group)
# Group 0 will have 2 problems in the section, worth a total of 4 points.
self.add_dropdown_to_section(vertical_0.location, 'H2P1_GROUP0', 1).location.html_id()
self.add_dropdown_to_section(vertical_0.location, 'H2P2_GROUP0', 3).location.html_id()
# Group 1 will have 1 problem in the section, worth a total of 1 point.
self.add_dropdown_to_section(vertical_1.location, 'H2P1_GROUP1', 1).location.html_id()
# Submit answers for problem in Section 1, which is visible to all students.
self.submit_question_answer('H1P1', {'2_1': 'Correct', '2_2': 'Incorrect'})
def test_split_different_problems_group_0(self):
"""
Tests that users who see different problems in a split_test module instance are graded correctly.
This is the test case for a user in user partition group 0.
"""
self.split_different_problems_setup(self.user_partition_group_0)
self.submit_question_answer('H2P1_GROUP0', {'2_1': 'Correct'})
self.submit_question_answer('H2P2_GROUP0', {'2_1': 'Correct', '2_2': 'Incorrect', '2_3': 'Correct'})
self.assertEqual(self.score_for_hw('homework1'), [1.0])
self.assertEqual(self.score_for_hw('homework2'), [1.0, 2.0])
self.assertEqual(self.earned_hw_scores(), [1.0, 3.0])
# Grade percent is .63. Here is the calculation
homework_1_score = 1.0 / 2
homework_2_score = (1.0 + 2.0) / 4
self.check_grade_percent(round((homework_1_score + homework_2_score) / 2, 2))
def test_split_different_problems_group_1(self):
"""
Tests that users who see different problems in a split_test module instance are graded correctly.
This is the test case for a user in user partition group 1.
"""
self.split_different_problems_setup(self.user_partition_group_1)
self.submit_question_answer('H2P1_GROUP1', {'2_1': 'Correct'})
self.assertEqual(self.score_for_hw('homework1'), [1.0])
self.assertEqual(self.score_for_hw('homework2'), [1.0])
self.assertEqual(self.earned_hw_scores(), [1.0, 1.0])
# Grade percent is .75. Here is the calculation
homework_1_score = 1.0 / 2
homework_2_score = 1.0 / 1
self.check_grade_percent(round((homework_1_score + homework_2_score) / 2, 2))
def split_one_group_no_problems_setup(self, user_partition_group):
"""
Setup for the case where the split test instance contains problems on for one group.
Group 0 has no problems.
Group 1 has 1 problem, worth 1 point.
This method also assigns self.student_user to the specified user_partition_group and
then submits answers for the problems in section 1, which are visible to all students.
The submitted answers give the student 2 points out of a possible 2 points in the section.
"""
[_, vertical_1] = self.split_setup(user_partition_group)
# Group 1 will have 1 problem in the section, worth a total of 1 point.
self.add_dropdown_to_section(vertical_1.location, 'H2P1_GROUP1', 1).location.html_id()
self.submit_question_answer('H1P1', {'2_1': 'Correct'})
def test_split_one_group_no_problems_group_0(self):
"""
Tests what happens when a given group has no problems in it (students receive 0 for that section).
"""
self.split_one_group_no_problems_setup(self.user_partition_group_0)
self.assertEqual(self.score_for_hw('homework1'), [1.0])
self.assertEqual(self.score_for_hw('homework2'), [])
self.assertEqual(self.earned_hw_scores(), [1.0, 0.0])
# Grade percent is .25. Here is the calculation.
homework_1_score = 1.0 / 2
homework_2_score = 0.0
self.check_grade_percent(round((homework_1_score + homework_2_score) / 2, 2))
def test_split_one_group_no_problems_group_1(self):
"""
Verifies students in the group that DOES have a problem receive a score for their problem.
"""
self.split_one_group_no_problems_setup(self.user_partition_group_1)
self.submit_question_answer('H2P1_GROUP1', {'2_1': 'Correct'})
self.assertEqual(self.score_for_hw('homework1'), [1.0])
self.assertEqual(self.score_for_hw('homework2'), [1.0])
self.assertEqual(self.earned_hw_scores(), [1.0, 1.0])
# Grade percent is .75. Here is the calculation.
homework_1_score = 1.0 / 2
homework_2_score = 1.0 / 1
self.check_grade_percent(round((homework_1_score + homework_2_score) / 2, 2))
|
martynovp/edx-platform
|
lms/djangoapps/courseware/tests/test_submitting_problems.py
|
Python
|
agpl-3.0
| 51,827 | 0.001777 |
# -*- coding: utf-8 -*-
"""
logbook.testsuite
~~~~~~~~~~~~~~~~~
The logbook testsuite.
:copyright: (c) 2010 by Armin Ronacher, Georg Brandl.
:license: BSD, see LICENSE for more details.
"""
import sys
import unittest
import logbook
_skipped_modules = []
_missing = object()
_func_ident = lambda f: f
_func_none = lambda f: None
class LogbookTestSuite(unittest.TestSuite):
def run(self, result):
try:
return unittest.TestSuite.run(self, result)
finally:
sys.stderr.write('\n')
for mod in _skipped_modules:
msg = '*** Failed to import %s, tests skipped.\n' % mod
sys.stderr.write(msg)
class LogbookTestCase(unittest.TestCase):
def setUp(self):
self.log = logbook.Logger('testlogger')
# silence deprecation warning displayed on Py 3.2
LogbookTestCase.assert_ = LogbookTestCase.assertTrue
def make_fake_mail_handler(**kwargs):
class FakeMailHandler(logbook.MailHandler):
mails = []
def get_connection(self):
return self
def close_connection(self, con):
pass
def sendmail(self, fromaddr, recipients, mail):
self.mails.append((fromaddr, recipients, mail))
kwargs.setdefault('level', logbook.ERROR)
return FakeMailHandler('foo@example.com', ['bar@example.com'], **kwargs)
def skip_if(condition):
if condition:
return _func_ident
else:
return _func_none
def require(name):
if name in _skipped_modules:
return _func_none
try:
__import__(name)
except ImportError:
_skipped_modules.append(name)
return _func_none
return _func_ident
def missing(name):
def decorate(f):
def wrapper(*args, **kwargs):
old = sys.modules.get(name, _missing)
sys.modules[name] = None
try:
f(*args, **kwargs)
finally:
if old is _missing:
del sys.modules[name]
else:
sys.modules[name] = old
return wrapper
return decorate
def suite():
loader = unittest.TestLoader()
suite = LogbookTestSuite()
suite.addTests(loader.loadTestsFromName('logbook.testsuite.test_regular'))
if sys.version_info >= (2, 5):
suite.addTests(loader.loadTestsFromName
('logbook.testsuite.test_contextmanager'))
return suite
if __name__ == '__main__':
unittest.main(defaultTest='suite')
|
chiehwen/logbook
|
logbook/testsuite/__init__.py
|
Python
|
bsd-3-clause
| 2,534 | 0.001579 |
import unittest
import pandas as pd
from pandas.util.testing import assert_series_equal
import numpy as np
from easyframes.easyframes import hhkit
class TestStataMerge(unittest.TestCase):
def setUp(self):
"""
df_original = pd.read_csv('sample_hh_dataset.csv')
df = df_original.copy()
print(df.to_dict())
"""
self.df_master = pd.DataFrame(
{'educ': {0: 'secondary', 1: 'bachelor', 2: 'primary', 3: 'higher', 4: 'bachelor', 5: 'secondary',
6: 'higher', 7: 'higher', 8: 'primary', 9: 'primary'},
'hh': {0: 1, 1: 1, 2: 1, 3: 2, 4: 3, 5: 3, 6: 4, 7: 4, 8: 4, 9: 4},
'id': {0: 1, 1: 2, 2: 3, 3: 1, 4: 1, 5: 2, 6: 1, 7: 2, 8: 3, 9: 4},
'has_car': {0: 1, 1: 1, 2: 1, 3: 1, 4: 0, 5: 0, 6: 1, 7: 1, 8: 1, 9: 1},
'weighthh': {0: 2, 1: 2, 2: 2, 3: 3, 4: 2, 5: 2, 6: 3, 7: 3, 8: 3, 9: 3},
'house_rooms': {0: 3, 1: 3, 2: 3, 3: 2, 4: 1, 5: 1, 6: 3, 7: 3, 8: 3, 9: 3},
'prov': {0: 'BC', 1: 'BC', 2: 'BC', 3: 'Alberta', 4: 'BC', 5: 'BC', 6: 'Alberta',
7: 'Alberta', 8: 'Alberta', 9: 'Alberta'},
'age': {0: 44, 1: 43, 2: 13, 3: 70, 4: 23, 5: 20, 6: 37, 7: 35, 8: 8, 9: 15},
'fridge': {0: 'yes', 1: 'yes', 2: 'yes', 3: 'no', 4: 'yes', 5: 'yes', 6: 'no',
7: 'no', 8: 'no', 9: 'no'},
'male': {0: 1, 1: 0, 2: 1, 3: 1, 4: 1, 5: 0, 6: 1, 7: 0, 8: 0, 9: 0}})
self.df_using_hh = pd.DataFrame(
{'hh': {0: 2, 1: 4, 2: 5, 3: 6, 4: 7},
'has_fence': {0: 1, 1: 0, 2: 1, 3: 1, 4: 0}
})
self.df_using_ind = pd.DataFrame(
{'empl': {0: 'not employed', 1: 'full-time', 2: 'part-time', 3: 'part-time', 4: 'full-time', 5: 'part-time',
6: 'self-employed', 7: 'full-time', 8: 'self-employed'},
'hh': {0: 1, 1: 1, 2: 1, 3: 2, 4: 5, 5: 5, 6: 4, 7: 4, 8: 4},
'id': {0: 1, 1: 2, 2: 4, 3: 1, 4: 1, 5: 2, 6: 1, 7: 2, 8: 5}
})
# @unittest.skip("demonstrating skipping")
def test_new_columns_added_merging_hh_level(self):
myhhkit = hhkit(self.df_master)
# myhhkit.from_dict(self.df_master)
myhhkit_using_hh = hhkit(self.df_using_hh)
# myhhkit_using_hh.from_dict(self.df_using_hh)
myhhkit.statamerge(myhhkit_using_hh, on=['hh'])
list_of_columns = myhhkit.df.columns.values.tolist()
self.assertIn('has_fence',list_of_columns)
# also check that the values are correct
correct_values = pd.Series([np.nan, np.nan, np.nan, 1, np.nan, np.nan, 0, 0, 0, 0, 1, 1, 0])
assert_series_equal(correct_values, myhhkit.df['has_fence'])
# @unittest.skip("demonstrating skipping")
def test_new_columns_added_merging_ind_level(self):
myhhkit = hhkit(self.df_master)
# myhhkit.from_dict(self.df_master)
myhhkit_using_ind = hhkit(self.df_using_ind)
# myhhkit_using_ind.from_dict(self.df_using_ind)
myhhkit.statamerge(myhhkit_using_ind, on=['hh','id'])
list_of_columns = myhhkit.df.columns.values.tolist()
self.assertIn('empl',list_of_columns)
# also check that the values are correct
correct_values = pd.Series(['not employed', 'full-time', np.nan, 'part-time', np.nan, np.nan,
'self-employed', 'full-time', np.nan, np.nan, 'part-time', 'full-time', 'part-time', 'self-employed'])
assert_series_equal(correct_values, myhhkit.df['empl'])
# @unittest.skip("demonstrating skipping")
def test_check_proper_merged_variable_created_and_is_correct_hh_level(self):
myhhkit = hhkit(self.df_master)
# myhhkit.from_dict(self.df_master)
myhhkit_using_hh = hhkit(self.df_using_hh)
# myhhkit_using_hh.from_dict(self.df_using_hh)
correct_values = pd.Series([1, 1, 1, 3, 1, 1, 3, 3, 3, 3, 2, 2, 2])
myhhkit.statamerge(myhhkit_using_hh, on=['hh'], mergevarname='_merge_hh')
assert_series_equal(correct_values, myhhkit.df['_merge_hh'])
def test_check_proper_merged_variable_created_and_is_correct_ind_level(self):
myhhkit = hhkit(self.df_master)
# myhhkit.from_dict(self.df_master)
myhhkit_using_ind = hhkit(self.df_using_ind)
# myhhkit_using_ind.from_dict(self.df_using_ind)
correct_values = pd.Series([3, 3, 1, 3, 1, 1, 3, 3, 1, 1, 2, 2, 2, 2])
myhhkit.statamerge(myhhkit_using_ind, on=['hh','id'], mergevarname='_merge_hh')
assert_series_equal(correct_values, myhhkit.df['_merge_hh'])
if __name__ == '__main__':
unittest.main()
|
shafiquejamal/easyframes
|
easyframes/test/test_statamerge.py
|
Python
|
apache-2.0
| 4,184 | 0.032744 |
# Copyright 2021 Red Hat, Inc. Jake Hunsaker <jhunsake@redhat.com>
# This file is part of the sos project: https://github.com/sosreport/sos
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions of
# version 2 of the GNU General Public License.
#
# See the LICENSE file in the source distribution for further information.
from sos.cleaner.archives import SoSObfuscationArchive
import tarfile
class InsightsArchive(SoSObfuscationArchive):
"""This class represents archives generated by the insights-client utility
for RHEL systems.
"""
type_name = 'insights'
description = 'insights-client archive'
prep_files = {
'hostname': 'data/insights_commands/hostname_-f',
'ip': 'data/insights_commands/ip_addr',
'mac': 'data/insights_commands/ip_addr'
}
@classmethod
def check_is_type(cls, arc_path):
try:
return tarfile.is_tarfile(arc_path) and 'insights-' in arc_path
except Exception:
return False
def get_archive_root(self):
top = self.archive_path.split('/')[-1].split('.tar')[0]
if self.tarobj.firstmember.name == '.':
top = './' + top
return top
|
slashdd/sos
|
sos/cleaner/archives/insights.py
|
Python
|
gpl-2.0
| 1,284 | 0 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
tools for imr datasets
@author: Chris Mantas
@contact: the1pro@gmail.com
@since: Created on 2016-02-12
@todo: custom formats, break up big lines
@license: http://www.apache.org/licenses/LICENSE-2.0 Apache License
"""
from ast import literal_eval
from collections import defaultdict
def create_label_encoder(labels):
"""
Creates a label encoder from a list of labels
:param labels: a list of integers
:return: a LabelEncoder object
"""
from sklearn.preprocessing import LabelEncoder
encoder = LabelEncoder()
encoder.fit(labels)
return encoder
def get_features_from_line(line):
"""
Given a text line it returns
a) only the last element of the tuple if the line is a tuple.
That element we assume to be a list of features.
b) the line's elements if the line is not a tuple
:param line:
:return:
"""
from ast import literal_eval
entry = literal_eval(line)
return entry[-1] if isinstance(entry, tuple) else entry
def parse_line(line):
"""
Parses a string line to a tuple
:param line:
:return:
"""
from ast import literal_eval
try:
entry = literal_eval(line)
if not isinstance(entry, tuple):
raise Exception("Input parsed, but is not a tuple")
except:
raise Exception("Could not evaluate (parse) input into an object")
return entry
def tuple_to_labeled_point(entry, category, l_encoder=None):
"""
Creates a label point from a text line that is formated as a tuple
:param entry: a tuple of format (3, 2, 1, [3,4,4 ..]), where the first
entries in the tuple are labels, and the last entry is
a list of features
:param category: which one of the labels in the tuple to keep for the
labeled point (0 to 2 for imr dataset)
:param l_encoder: the label encoder to encode the label (if any)
:return: a LabeledPoint
"""
from pyspark.mllib.classification import LabeledPoint
label = entry[category]
if l_encoder:
label = l_encoder.transform(label)
features = entry[-1]
return LabeledPoint(label, features) # return a new labelPoint
def classify_line(features, model, l_encoder=None):
"""
Classifies the features based on the given model.
If a label encoder is specified, it reverses the encoding of the label
:param features: a vector of features
:param model: a Classification Model
:param l_encoder: a LabelEncoder
:return: a tuple of: label, [feat1, feat2 ... featN]
"""
encoded_prediction = model.predict(features)
prediction = l_encoder.inverse_transform(encoded_prediction) \
if l_encoder else encoded_prediction
return prediction, features
def label_encoders_from_json_file(labels_json_file, category=None):
"""
Loads a mapping of categories->available_labels from a json file.
If category is specified it returns the LabelEncoder for this category.
If not, it returns a dict of category->LabelEncoder
:param labels_json_file:
:param category:
:return:
"""
from json import load
from sklearn.preprocessing import LabelEncoder
with open(labels_json_file) as infile:
all_labels = load(infile)
label_dict = dict(map(
lambda (k, v): (int(k), LabelEncoder().fit(v)),
all_labels.iteritems()
))
return label_dict[category] if category else label_dict
def labels_from_csv_file(csv_file, label_range):
"""
Parses a csv dataset and keeps a set of all the labels in 'label_range'.
Preserves the order in which it sees labels - does not contain duplicates.
:param csv_file:
:param label_range:
:return:
"""
labels = defaultdict(list)
label_sets = defaultdict(set)
with open(csv_file) as infile:
for line in infile:
line_tokens = line.split(';')
for i in range(label_range[0], label_range[1]+1):
label = int(line_tokens[i])
if label not in label_sets[i]:
label_sets[i].add(label)
labels[i].append(label)
# convert to regular dict of lists
return dict(labels.iteritems())
# ======================= MAIN ========================= #
if __name__ == "__main__":
from argparse import ArgumentParser
from json import dump
cli_parser = ArgumentParser(description='tools for imr datasets')
cli_parser.add_argument("operation",
help="the operation to run: 'train' or 'classify'")
cli_parser.add_argument("input",
help="the input dataset (formatted as a csv file"
"separated with ';' character")
cli_parser.add_argument("output", help="the output file")
cli_parser.add_argument("-rs", '--range-start', type=int, default=1,
help="the start of the range of labels")
cli_parser.add_argument("-re", '--range-end', type=int, default=3,
help="the end of the range of labels (inclusive)")
args = cli_parser.parse_args()
if args.operation == "storelabels":
from collections import defaultdict
# get a dict of labels from a csv dataset
labels_dict = labels_from_csv_file(args.input,
(args.range_start, args.range_end))
# dump it to the output file
with open(args.output, 'w+') as outfile:
dump(labels_dict, outfile)
else:
print("I do not know operation:", args.operation)
|
project-asap/IReS-Platform
|
asap-tools/imr-code/imr_workflow_spark/operators/imr_tools.py
|
Python
|
apache-2.0
| 5,668 | 0.000706 |
#!/usr/bin/python2.7
# Copyright 2012 Google Inc. All Rights Reserved.
"""Support for simple JSON templates.
A JSON template is a dictionary of JSON data in which string values
may be simple templates in string.Template format (i.e.,
$dollarSignEscaping). By default, the template is expanded against
its own data, optionally updated with additional context.
"""
import json
from string import Template
import sys
__author__ = 'smulloni@google.com (Jacob Smullyan)'
def ExpandJsonTemplate(json_data, extra_context=None, use_self=True):
"""Recursively template-expand a json dict against itself or other context.
The context for string expansion is the json dict itself by default, updated
by extra_context, if supplied.
Args:
json_data: (dict) A JSON object where string values may be templates.
extra_context: (dict) Additional context for template expansion.
use_self: (bool) Whether to expand the template against itself, or only use
extra_context.
Returns:
A dict where string template values have been expanded against
the context.
"""
if use_self:
context = dict(json_data)
else:
context = {}
if extra_context:
context.update(extra_context)
def RecursiveExpand(obj):
if isinstance(obj, list):
return [RecursiveExpand(x) for x in obj]
elif isinstance(obj, dict):
return dict((k, RecursiveExpand(v)) for k, v in obj.iteritems())
elif isinstance(obj, (str, unicode)):
return Template(obj).safe_substitute(context)
else:
return obj
return RecursiveExpand(json_data)
if __name__ == '__main__':
if len(sys.argv) > 1:
json_in = open(sys.argv[1])
else:
json_in = sys.stdin
data = json.load(json_in)
expanded = ExpandJsonTemplate(data)
json.dump(expanded, sys.stdout, indent=2)
|
dougbeal/google-apis-client-generator
|
src/googleapis/codegen/utilities/json_expander.py
|
Python
|
apache-2.0
| 1,809 | 0.008292 |
"""
raven.transport.exceptions
~~~~~~~~~~~~~~~~~~~~~~~~~~
:copyright: (c) 2010 by the Sentry Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
class InvalidScheme(ValueError):
"""
Raised when a transport is constructed using a URI which is not
handled by the transport
"""
class DuplicateScheme(StandardError):
"""
Raised when registering a handler for a particular scheme which
is already registered
"""
|
lptorres/noah-inasafe
|
web_api/third_party/raven/transport/exceptions.py
|
Python
|
gpl-3.0
| 498 | 0 |
# -*- coding: utf-8 -*-
"""
Created on Tue Jul 07 13:58:49 2015
@author: Wasit
"""
import serial
import re
import datetime
#ser = serial.Serial('/dev/tty.usbserial', 9600)
#ser = serial.Serial('COM7', 9600)
#ser = serial.Serial(0) # open first serial port
ser=None
for i in xrange(10):
try:
ser = serial.Serial(i)
break
except:
print "port COM%d is disabled"%(i+1)
print "Connecting to port: "+ser.name
endTime = datetime.datetime.now() + datetime.timedelta(seconds=5)
while True:
if datetime.datetime.now() >= endTime:
break
record=re.split(',',ser.readline())
record = map(int, record)
print record
ser.close()
|
wasit7/tutorials
|
arduino_python/02_python_serial/readUno.py
|
Python
|
mit
| 669 | 0.019432 |
from datetime import datetime, timedelta
from manager_rest.test.base_test import BaseServerTestCase
from cloudify_rest_client.exceptions import CloudifyClientError
class ExecutionSchedulesTestCase(BaseServerTestCase):
DEPLOYMENT_ID = 'deployment'
fmt = '%Y-%m-%dT%H:%M:%S.%fZ'
an_hour_from_now = \
datetime.utcnow().replace(microsecond=0) + timedelta(hours=1)
two_hours_from_now = \
datetime.utcnow().replace(microsecond=0) + timedelta(hours=2)
three_hours_from_now = \
datetime.utcnow().replace(microsecond=0) + timedelta(hours=3)
three_weeks_from_now = \
datetime.utcnow().replace(microsecond=0) + timedelta(weeks=3)
deployment_id = None
def setUp(self):
super(ExecutionSchedulesTestCase, self).setUp()
_, self.deployment_id, _, _ = self.put_deployment(self.DEPLOYMENT_ID)
def test_schedule_create(self):
schedule_id = 'sched-1'
workflow_id = 'install'
schedule = self.client.execution_schedules.create(
schedule_id, self.deployment_id, workflow_id,
since=self.an_hour_from_now, recurrence='1 minutes', count=5)
self.assertEqual(schedule.id, schedule_id)
self.assertEqual(schedule.deployment_id, self.deployment_id)
self.assertEqual(schedule.workflow_id, workflow_id)
self.assertEqual(datetime.strptime(schedule.since, self.fmt),
self.an_hour_from_now)
self.assertEqual(len(schedule['all_next_occurrences']), 5)
self.assertEqual(
datetime.strptime(schedule['next_occurrence'], self.fmt),
self.an_hour_from_now)
self.assertEqual(schedule['slip'], 0)
self.assertEqual(schedule['stop_on_fail'], False)
def test_schedule_create_weekdays(self):
schedule = self.client.execution_schedules.create(
'sched-weekdays', self.deployment_id, 'install',
since=self.an_hour_from_now, until=self.three_weeks_from_now,
recurrence='1 days', weekdays=['mo', 'tu', 'we', 'th'])
self.assertEqual(len(schedule['all_next_occurrences']), 12) # 3w * 4d
def test_schedules_list(self):
schedule_ids = ['sched-1', 'sched-2']
for schedule_id in schedule_ids:
self.client.execution_schedules.create(
schedule_id, self.deployment_id, 'install',
since=self.an_hour_from_now, recurrence='1 minutes', count=5)
schedules = self.client.execution_schedules.list()
self.assertEqual(len(schedules), 2)
self.assertSetEqual({s.id for s in schedules}, set(schedule_ids))
def test_schedule_delete(self):
self.client.execution_schedules.create(
'delete-me', self.deployment_id, 'install',
since=self.an_hour_from_now, recurrence='1 minutes', count=5)
self.assertEqual(len(self.client.execution_schedules.list()), 1)
self.client.execution_schedules.delete('delete-me', self.deployment_id)
self.assertEqual(len(self.client.execution_schedules.list()), 0)
def test_schedule_update(self):
schedule = self.client.execution_schedules.create(
'update-me', self.deployment_id, 'install',
since=self.an_hour_from_now, until=self.two_hours_from_now,
recurrence='1 minutes')
# `until` is inclusive
self.assertEqual(len(schedule['all_next_occurrences']), 61)
self.assertEqual(schedule['rule']['recurrence'], '1 minutes')
self.assertEqual(schedule['slip'], 0)
self.client.execution_schedules.update(
'update-me', self.deployment_id, recurrence='5 minutes', slip=30)
# get the schedule from the DB and not directly from .update endpoint
schedule = self.client.execution_schedules.get('update-me',
self.deployment_id)
self.assertEqual(len(schedule['all_next_occurrences']), 13) # 60/5+1
self.assertEqual(schedule['rule']['recurrence'], '5 minutes')
self.assertEqual(schedule['slip'], 30)
self.client.execution_schedules.update(
'update-me', self.deployment_id, until=self.three_hours_from_now)
schedule = self.client.execution_schedules.get('update-me',
self.deployment_id)
self.assertEqual(len(schedule['all_next_occurrences']), 25) # 2*60/5+1
def test_schedule_get_invalid_id(self):
self.assertRaisesRegex(
CloudifyClientError,
'404: Requested `ExecutionSchedule` .* was not found',
self.client.execution_schedules.get,
'nonsuch',
self.deployment_id
)
def test_schedule_create_no_since(self):
self.assertRaises(
AssertionError,
self.client.execution_schedules.create,
'some_id', self.deployment_id, 'some_workflow',
recurrence='1 minutes', count=5
)
def test_schedule_create_invalid_time_format(self):
self.assertRaisesRegex(
AttributeError,
"'str' object has no attribute 'isoformat'",
self.client.execution_schedules.create,
'some_id', self.deployment_id, 'install',
since='long ago', recurrence='1 minutes', count=5
)
def test_schedule_create_invalid_workflow(self):
self.assertRaisesRegex(
CloudifyClientError,
'400: Workflow some_workflow does not exist',
self.client.execution_schedules.create,
'some_id', self.deployment_id, 'some_workflow',
since=self.an_hour_from_now, recurrence='1 minutes', count=5,
)
def test_schedule_invalid_weekdays(self):
self.assertRaisesRegex(
CloudifyClientError,
'400:.* invalid weekday',
self.client.execution_schedules.create,
'bad-weekdays', self.deployment_id, 'install',
since=self.an_hour_from_now, recurrence='4 hours',
weekdays=['oneday', 'someday']
)
self.client.execution_schedules.create(
'good-weekdays', self.deployment_id, 'install',
since=self.an_hour_from_now, recurrence='4 hours', count=6,
weekdays=['mo', 'tu']
)
self.assertRaisesRegex(
CloudifyClientError,
'400:.* invalid weekday',
self.client.execution_schedules.update,
'good-weekdays', self.deployment_id, weekdays=['oneday', 'someday']
)
def test_schedule_create_invalid_complex_weekdays(self):
self.assertRaisesRegex(
CloudifyClientError,
'400:.* invalid weekday',
self.client.execution_schedules.create,
'bad-complex-wd', self.deployment_id, 'install',
since=self.an_hour_from_now, recurrence='4 hours',
weekdays=['5tu']
)
def test_schedule_create_invalid_recurrence_with_complex_weekdays(self):
self.assertRaisesRegex(
CloudifyClientError,
'400:.* complex weekday expression',
self.client.execution_schedules.create,
'bad-complex-wd', self.deployment_id, 'install',
since=self.an_hour_from_now, recurrence='4 hours',
weekdays=['2mo', 'l-tu']
)
def test_schedule_invalid_repetition_without_recurrence(self):
recurrence_error = \
'400: recurrence must be specified for execution count ' \
'larger than 1'
self.assertRaisesRegex(
CloudifyClientError,
recurrence_error,
self.client.execution_schedules.create,
'no-recurrence-no-count', self.deployment_id, 'uninstall',
since=self.an_hour_from_now, weekdays=['su', 'mo', 'tu'],
)
self.client.execution_schedules.create(
'no-recurrence-count-1', self.deployment_id, 'install',
since=self.an_hour_from_now, count=1,
)
self.assertRaisesRegex(
CloudifyClientError,
recurrence_error,
self.client.execution_schedules.update,
'no-recurrence-count-1', self.deployment_id, count=2
)
def test_schedule_create_invalid_recurrence(self):
self.assertRaisesRegex(
CloudifyClientError,
'400: `10 doboshes` is not a legal recurrence expression.',
self.client.execution_schedules.create,
'bad-freq', self.deployment_id, 'install',
since=self.an_hour_from_now, recurrence='10 doboshes'
)
|
cloudify-cosmo/cloudify-manager
|
rest-service/manager_rest/test/endpoints/test_execution_schedules.py
|
Python
|
apache-2.0
| 8,636 | 0 |
#!/usr/bin/env python
from sys import argv, stderr
usage = \
"""
Usage: {program} <sample rate> <A4 freq.> [octaves=8]
e.g.: {program} 64000 442.0 5
""".format(program=argv[0])
if len(argv) < 3 or len(argv) > 4 :
print(usage, file = stderr)
exit(1)
A4 = 0
sample_rate = 0
octaves = 8
try:
A4 = float(argv[2])
except:
print("Error, invalid argument: Freq. must be a number!", file = stderr)
print(usage, file = stderr)
exit(1)
try:
sample_rate = int(argv[1])
except:
print("Error, invalid argument: Sample rate must be an integer!", \
file = stderr)
print(usage, file = stderr)
exit(1)
if len(argv) == 4 :
try:
octaves = int(argv[3])
except:
print("Error, invalid argument: Octaves must be an integer!", \
file = stderr)
print(usage, file = stderr)
exit(1)
freq_ratio = 2**(1/12)
base_freq = A4/(freq_ratio**57)
periods = [round(sample_rate/(2*base_freq*freq_ratio**t)) \
for t in range(0, 12*octaves)]
print("uint16_t tone_periods[{ntones}] = {{".format(ntones=12*octaves))
for o in range(0, octaves):
print('\t', end='')
for i in range(0, 12):
print("{period}, ".format(period=periods[12*o+i]), end='')
print('')
print("};")
|
bardes/sonitus
|
tools/tone_gen.py
|
Python
|
mit
| 1,271 | 0.02203 |
# -*- coding: utf-8 -*-
# Generated by Django 1.9.13 on 2017-07-27 15:04
from __future__ import unicode_literals
import django.core.validators
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('press', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='press',
name='password_length',
field=models.PositiveIntegerField(default=12, validators=[django.core.validators.MinValueValidator(9)]),
),
migrations.AddField(
model_name='press',
name='password_number',
field=models.BooleanField(default=False, help_text='If set, passwords must include one number.'),
),
migrations.AddField(
model_name='press',
name='password_upper',
field=models.BooleanField(default=False, help_text='If set, passwords must include one upper case.'),
),
]
|
BirkbeckCTP/janeway
|
src/press/migrations/0002_auto_20170727_1504.py
|
Python
|
agpl-3.0
| 979 | 0.003064 |
# -*- coding: utf8 -*-
"CountColumn filter"
from .abstract import AbstractFilter
class CountColumn(AbstractFilter):
"Count a flux's column and put the result in a variable"
name = 'Compter colonnes'
description = "Compte le nombre de colonnes d'un flux et met le résultat dans une variable"
node_in = ['cible']
parameters = [
{
'name': 'Variable',
'key': 'target',
'type': 'integer'
}
]
def run(self):
"Execute the filter"
target = self._model.config('target')
value = len(self._flux_in['cible']['headers'])
self._registery.set(target, value)
|
Exanis/cannelloni
|
backend/filters/count_column.py
|
Python
|
mit
| 662 | 0.003026 |
import bitcoin
import struct
import serialize
class BlockHeader:
def __init__(self):
self.height = None
@classmethod
def deserialize(cls, raw):
assert len(raw) == 80
self = cls()
self.version = struct.unpack('<I', raw[:4])[0]
self.previous_block_hash = raw[4:36][::-1]
assert len(self.previous_block_hash) == 32
self.merkle = raw[36:68][::-1]
assert len(self.merkle) == 32
self.timestamp, self.bits, self.nonce = struct.unpack('<III', raw[68:])
return self
@property
def hash(self):
data = struct.pack('<I', self.version)
data += self.previous_block_hash[::-1]
data += self.merkle[::-1]
data += struct.pack('<III', self.timestamp, self.bits, self.nonce)
return bitcoin.Hash(data)[::-1]
def __repr__(self):
return '<BlockHeader %s>' % (self.hash.encode("hex"),)
class OutPoint(object):
def __init__(self):
self.hash = None
self.index = None
def is_null(self):
return (len(self.hash) == 0) and (self.index == 0xffffffff)
def __repr__(self):
return "OutPoint(hash=%s, index=%i)" % (self.hash.encode("hex"), self.index)
def serialize(self):
return serialize.ser_output_point(self)
@staticmethod
def deserialize(bytes):
return serialize.deser_output_point(bytes)
class TxOut(object):
def __init__(self):
self.value = None
self.script = ""
def __repr__(self):
return "TxOut(value=%i.%08i script=%s)" % (self.value // 100000000, self.value % 100000000, self.script.encode("hex"))
def serialize(self):
return serialize.ser_txout(self)
@staticmethod
def deserialize(bytes):
return serialize.deser_txout(bytes)
class TxIn(object):
def __init__(self):
self.previous_output = OutPoint()
self.script = ""
self.sequence = 0xffffffff
def is_final(self):
return self.sequence == 0xffffffff
def __repr__(self):
return "TxIn(previous_output=%s script=%s sequence=%i)" % (repr(self.previous_output), self.script.encode("hex"), self.sequence)
def serialize(self):
return serialize.ser_txin(self)
@staticmethod
def deserialize(bytes):
return serialize.deser_txin(bytes)
class Transaction:
def __init__(self):
self.version = 1
self.locktime = 0
self.inputs = []
self.outputs = []
def is_final(self):
for tin in self.vin:
if not tin.is_final():
return False
return True
def is_coinbase(self):
return len(self.vin) == 1 and self.vin[0].prevout.is_null()
def __repr__(self):
return "Transaction(version=%i inputs=%s outputs=%s locktime=%i)" % (self.version, repr(self.inputs), repr(self.outputs), self.locktime)
def serialize(self):
return serialize.ser_tx(self)
@staticmethod
def deserialize(bytes):
return serialize.deser_tx(bytes)
|
BWallet/sx
|
src/obelisk/models.py
|
Python
|
agpl-3.0
| 3,035 | 0.003295 |
__author__ = 'sondredyvik'
class ConstraintNet:
def __init__(self):
self.constraints = {}
def add_constraint(self,key,constraint):
if key in self.constraints:
self.constraints[key].append(constraint)
else:
self.constraints[key] = [constraint]
|
pmitche/it3105-aiprogramming
|
project1/common/constraintnet.py
|
Python
|
mit
| 304 | 0.009868 |
#
# Graphene schema for exposing ProcessClassification model
#
import graphene
from valuenetwork.valueaccounting.models import ProcessType
from valuenetwork.api.types.Process import ProcessClassification
from valuenetwork.api.types.EconomicEvent import Action
from django.db.models import Q
class Query(object): #graphene.AbstractType):
process_classification = graphene.Field(ProcessClassification,
id=graphene.Int())
all_process_classifications = graphene.List(ProcessClassification)
def resolve_process_classification(self, args, *rargs):
id = args.get('id')
if id is not None:
pt = ProcessType.objects.get(pk=id)
if pt:
return pt
return None
def resolve_all_process_classifications(self, args, context, info):
return ProcessType.objects.all()
|
FreedomCoop/valuenetwork
|
valuenetwork/api/schemas/ProcessClassification.py
|
Python
|
agpl-3.0
| 887 | 0.002255 |
"""
This module implement a filesystem storage adapter.
"""
from __future__ import unicode_literals
import errno
import logging
import os
from flask import current_app
from .interface import ImagineAdapterInterface
from PIL import Image
LOGGER = logging.getLogger(__name__)
class ImagineFilesystemAdapter(ImagineAdapterInterface):
"""
Filesystem storage adapter
"""
source_folder = None
cache_folder = None
def __init__(self, **kwargs):
"""
Init _adapter
:param kwargs: parameters
:return:
"""
self.source_folder = kwargs.get('source_folder', '').strip('/')
self.cache_folder = kwargs.get('cache_folder', 'cache').strip('/')
def get_item(self, path):
"""
Get resource item
:param path: string
:return: PIL.Image
"""
if self.source_folder:
item_path = '%s/%s/%s' % (
current_app.static_folder,
self.source_folder,
path.strip('/')
)
else:
item_path = '%s/%s' % (
current_app.static_folder,
path.strip('/')
)
if os.path.isfile(item_path):
try:
return Image.open(item_path)
except IOError as err:
LOGGER.warning('File not found on path "%s" with error: %s' % (item_path, str(err)))
return False
else:
return False
def create_cached_item(self, path, content):
"""
Create cached resource item
:param path: str
:param content: Image
:return: str
"""
if isinstance(content, Image.Image):
item_path = '%s/%s/%s' % (
current_app.static_folder,
self.cache_folder,
path.strip('/')
)
self.make_dirs(item_path)
content.save(item_path)
if os.path.isfile(item_path):
return '%s/%s/%s' % (current_app.static_url_path, self.cache_folder, path.strip('/'))
else: # pragma: no cover
LOGGER.warning('File is not created on path: %s' % item_path)
return False
else:
return False
def get_cached_item(self, path):
"""
Get cached resource item
:param path: str
:return: PIL.Image
"""
item_path = '%s/%s/%s' % (
current_app.static_folder,
self.cache_folder,
path.strip('/')
)
if os.path.isfile(item_path):
try:
return Image.open(item_path)
except IOError as err: # pragma: no cover
LOGGER.warning('Cached file not found on path "%s" with error: %s' % (item_path, str(err)))
return False
else:
return False
def check_cached_item(self, path):
"""
Check for cached resource item exists
:param path: str
:return: bool
"""
item_path = '%s/%s/%s' % (
current_app.static_folder,
self.cache_folder,
path.strip('/')
)
if os.path.isfile(item_path):
return '%s/%s/%s' % (current_app.static_url_path, self.cache_folder, path.strip('/'))
else:
return False
def remove_cached_item(self, path):
"""
Remove cached resource item
:param path: str
:return: bool
"""
item_path = '%s/%s/%s' % (
current_app.static_folder,
self.cache_folder,
path.strip('/')
)
if os.path.isfile(item_path):
os.remove(item_path)
return True
@staticmethod
def make_dirs(path):
"""
Create directories if not exist
:param path: string
:return:
"""
try:
os.makedirs(os.path.dirname(path))
except OSError as err:
if err.errno != errno.EEXIST:
LOGGER.error('Failed to create directory %s with error: %s' % (path, str(err)))
raise
|
FlaskGuys/Flask-Imagine
|
flask_imagine/adapters/filesystem.py
|
Python
|
mit
| 4,244 | 0.001178 |
"""
Flask routing
"""
from flask import Flask, request, session, send_from_directory, render_template
from werkzeug.contrib.fixers import ProxyFix
app = Flask(__name__, static_path="/")
app.wsgi_app = ProxyFix(app.wsgi_app)
import api
import json
import mimetypes
import os.path
from datetime import datetime
from api.common import WebSuccess, WebError
from api.annotations import api_wrapper, require_login, require_teacher, require_admin, check_csrf
from api.annotations import block_before_competition, block_after_competition
from api.annotations import log_action
import api.routes.autogen
import api.routes.user
import api.routes.team
import api.routes.stats
import api.routes.admin
import api.routes.group
import api.routes.problem
import api.routes.achievements
log = api.logger.use(__name__)
session_cookie_domain = "127.0.0.1"
session_cookie_path = "/"
session_cookie_name = "flask"
secret_key = ""
def config_app(*args, **kwargs):
"""
Return the app object configured correctly.
This needed to be done for gunicorn.
"""
app.secret_key = secret_key
app.config["SESSION_COOKIE_DOMAIN"] = session_cookie_domain
app.config["SESSION_COOKIE_PATH"] = session_cookie_path
app.config["SESSION_COOKIE_NAME"] = session_cookie_name
app.register_blueprint(api.routes.autogen.blueprint, url_prefix="/api/autogen")
app.register_blueprint(api.routes.user.blueprint, url_prefix="/api/user")
app.register_blueprint(api.routes.team.blueprint, url_prefix="/api/team")
app.register_blueprint(api.routes.stats.blueprint, url_prefix="/api/stats")
app.register_blueprint(api.routes.admin.blueprint, url_prefix="/api/admin")
app.register_blueprint(api.routes.group.blueprint, url_prefix="/api/group")
app.register_blueprint(api.routes.problem.blueprint, url_prefix="/api/problems")
app.register_blueprint(api.routes.achievements.blueprint, url_prefix="/api/achievements")
api.logger.setup_logs({"verbose": 2})
return app
@app.after_request
def after_request(response):
response.headers.add('Access-Control-Allow-Methods', 'GET, POST')
response.headers.add('Access-Control-Allow-Credentials', 'true')
response.headers.add('Access-Control-Allow-Headers', 'Content-Type, *')
response.headers.add('Cache-Control', 'no-cache')
response.headers.add('Cache-Control', 'no-store')
if api.auth.is_logged_in():
if 'token' in session:
response.set_cookie('token', session['token'])
else:
csrf_token = api.common.token()
session['token'] = csrf_token
response.set_cookie('token', csrf_token)
# JB: This is a hack. We need a better solution
if request.path[0:19] != "/api/autogen/serve/":
response.mimetype = 'appication/json'
return response
@app.route('/api/time', methods=['GET'])
@api_wrapper
def get_time():
return WebSuccess(data=int(datetime.utcnow().timestamp()))
|
picoCTF/picoCTF-Platform-2
|
api/api/app.py
|
Python
|
mit
| 2,943 | 0.008155 |
# Django settings for sensible_data_platform project.
import os
import LOCAL_SETTINGS
from utils import SECURE_platform_config
DEBUG = True
TEMPLATE_DEBUG = DEBUG
MAINTENANCE_MODE = False
ADMINS = (
# ('Your Name', 'your_email@example.com'),
)
MANAGERS = ADMINS
BASE_DIR = LOCAL_SETTINGS.BASE_DIR
ROOT_DIR = LOCAL_SETTINGS.ROOT_DIR
ROOT_URL = LOCAL_SETTINGS.ROOT_URL
DATABASES = LOCAL_SETTINGS.DATABASES
BASE_URL = LOCAL_SETTINGS.BASE_URL
TRUST_ROOTS = LOCAL_SETTINGS.TRUST_ROOTS
PLATFORM_NAME = LOCAL_SETTINGS.PLATFORM_NAME
SUPPORT_EMAIL = LOCAL_SETTINGS.SUPPORT_EMAIL
EMAIL_HOST = LOCAL_SETTINGS.EMAIL_HOST
EMAIL_PORT = LOCAL_SETTINGS.EMAIL_PORT
EMAIL_HOST_USER = SECURE_platform_config.EMAIL_HOST_USER
EMAIL_HOST_PASSWORD = SECURE_platform_config.EMAIL_HOST_PASSWORD
DEFAULT_FROM_EMAIL = LOCAL_SETTINGS.DEFAULT_FROM_EMAIL
EMAIL_USE_TLS = LOCAL_SETTINGS.EMAIL_USE_TLS
MAINTENANCE_IGNORE_URLS = (
r'^.*/admin/$',
)
# Hosts/domain names that are valid for this site; required if DEBUG is False
# See https://docs.djangoproject.com/en/1.5/ref/settings/#allowed-hosts
ALLOWED_HOSTS = []
LOGIN_URL = ROOT_URL + 'accounts/login/'
LOGIN_REDIRECT_URL = ROOT_URL
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# In a Windows environment this must be set to your system time zone.
TIME_ZONE = 'America/Chicago'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'da'
LANGUAGES = (
('da', 'Danish'),
('en', 'English'),
)
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale.
USE_L10N = True
# If you set this to False, Django will not use timezone-aware datetimes.
USE_TZ = True
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/var/www/example.com/media/"
MEDIA_ROOT = ''
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://example.com/media/", "http://media.example.com/"
MEDIA_URL = ''
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/var/www/example.com/static/"
STATIC_ROOT = ROOT_DIR+'static_root/'
# URL prefix for static files.
# Example: "http://example.com/static/", "http://static.example.com/"
STATIC_URL = ROOT_URL+'static/'
# Additional locations of static files
STATICFILES_DIRS = (
ROOT_DIR+'static/',
# Put strings here, like "/home/html/static" or "C:/www/django/static".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
# Make this unique, and don't share it with anybody.
SECRET_KEY = LOCAL_SETTINGS.SECRET_KEY
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# 'django.template.loaders.eggs.Loader',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.locale.LocaleMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'maintenancemode.middleware.MaintenanceModeMiddleware',
# Uncomment the next line for simple clickjacking protection:
# 'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
TEMPLATE_CONTEXT_PROCESSORS = (
'django.core.context_processors.static',
'django.core.context_processors.i18n',
'django.contrib.auth.context_processors.auth',
'sensible_data_platform.context_processors.platform',
)
ROOT_URLCONF = 'sensible_data_platform.urls'
# Python dotted path to the WSGI application used by Django's runserver.
WSGI_APPLICATION = 'sensible_data_platform.wsgi.application'
TEMPLATE_DIRS = (
ROOT_DIR+'templates',
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.admin',
'openid_provider',
'accounts',
'render',
'identity_providers',
'utils',
'oauth2app',
'oauth2_authorization_server',
'uni_form',
'service_manager',
'south',
'sensible_platform_documents',
'password_reset',
)
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error when DEBUG=False.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
import hashlib
SESSION_COOKIE_NAME = str(hashlib.sha1(SECRET_KEY).hexdigest())
LOCALE_PATHS = (
'/home/arks/sensibledtu_DEVEL/SensibleData-Platform/sensible_data_platform/locale',
)
|
MIT-Model-Open-Data-and-Identity-System/SensibleData-Platform
|
sensible_data_platform/sensible_data_platform/settings.py
|
Python
|
mit
| 6,429 | 0.004977 |
from __future__ import print_function
from builtins import object
import psycopg2
import psycopg2.extras
from gevent.lock import BoundedSemaphore as Semaphore
from gevent.local import local as gevent_local
from config import PG_DSN,DONESTATES
from gevent import sleep
# migration stuff
import json,re
from datetime import datetime,date
import decimal
class ConnectionPool(object):
def __init__(self, dsn, max_con=12, max_idle=3,
connection_factory=psycopg2.extras.RealDictConnection):
self.dsn = dsn
self.max_con = max_con
self.max_idle = max_idle
self.connection_factory = connection_factory
self._sem = Semaphore(max_con)
self._free = []
self._local = gevent_local()
def __enter__(self):
self._sem.acquire()
try:
if getattr(self._local, 'con', None) is not None:
con = self._local.con
print('WARNING: returning existing connection (re-entered connection pool)!')
if self._free:
con = self._free.pop()
else:
con = psycopg2.connect(
dsn=self.dsn, connection_factory=self.connection_factory)
self._local.con = con
return con
except: # StandardError:
#print('releasing')
self._sem.release()
#print('released')
raise
def __exit__(self, exc_type, exc_value, traceback):
try:
if self._local.con is None:
raise RuntimeError("Exit connection pool with no connection?")
if exc_type is not None:
self.rollback()
else:
self.commit()
if len(self._free) < self.max_idle:
self._free.append(self._local.con)
self._local.con = None
finally:
self._sem.release()
#print('released')
def commit(self):
self._local.con.commit()
def rollback(self):
self._local.con.rollback()
def connect():
#raise Exception('from where')
pg = psycopg2.connect(PG_DSN)
pgc = pg.cursor(cursor_factory=psycopg2.extras.DictCursor)
pg.set_client_encoding('utf-8')
return pg,pgc
def json_serial(obj):
"""JSON serializer for objects not serializable by default json code"""
if isinstance(obj, datetime):
serial = obj.isoformat()
elif isinstance(obj,decimal.Decimal):
serial = float(obj)
elif isinstance(obj,set):
serial = list(obj)
elif isinstance(obj,date):
serial = obj.isoformat()
else:
raise Exception(type(obj))
return serial
raise TypeError ("Type not serializable")
def get_journals(P,C,assignee=None,metastate_group='merge',archive=False):
qry = "select * from tasks where 1=1" #journal_entries where 1=1"
args=[]
if assignee=='all': assignee=None
if assignee:
qry+=" and contents->>'assignee'=%s"
args.append(assignee)
if metastate_group:
if metastate_group!='production':
if not archive: #and t['status'] in cfg.DONESTATES: continue
qry+=" and contents->>'status' not in %s"
args.append(tuple(DONESTATES))
elif archive: #and t['status'] not in cfg.DONESTATES: continue
qry+=" and contents->>'status' in %s"
args.append(tuple(DONESTATES))
else:
raise Exception('wtf')
args = tuple(args) ;
C.execute(qry,args)
rt=[]
for r in C.fetchall():
rt.append(r)
return rt
def journal_digest(j):
"""prepare a digest of the journal's most recent state."""
rt={}
for i in j:
cat = i['created_at']
jc = i['content']
ja = i['creator']
for k,v in list(i['attrs'].items()):
if type(cat)!=str:
cat = cat.strftime('%Y-%m-%dT%H:%I:%S')
if k not in rt: rt[k]={'created_at':cat}
#print('about to compare',type(rt[k]['created_at']),'with',type(cat))
if rt[k]['created_at']<=cat:
rt[k]['created_at']=cat
rt[k]['value']=v
return rt
def validate_save(C,tid,fetch_stamp,exc=True):
C.execute("select changed_at,changed_by from tasks where id=%s",(tid,))
res = C.fetchone()
if res and fetch_stamp and res['changed_at'] and res.get('changed_by')!='notify-trigger':
eq = res['changed_at']==fetch_stamp
if exc:
assert eq,"task %s: fetch stamp!=changed_at by %s (%s , %s)"%(tid,res.get('changed_by'),fetch_stamp,res and res['changed_at']) or None
else:
return eq,res['changed_at'],res['changed_by']
return True,res and res.get('changed_at') or None,res and res.get('changed_by') or None
def migrate_one(t,pgc,fetch_stamp=None,user=None):
td={}
tid = t._id
parid = "/".join(tid.split("/")[0:-1])
if not parid: parid=None
for k in t.__dict__:
if k not in ['_dynamic_properties','_doc']:
if t.__dict__[k] is not None:
assert k not in td,"%s already exists with value %s (!= %s) for %s"%(k,td[k],t.__dict__[k],t._id)
td[k]=t.__dict__[k]
if 'journal' in td and len(td['journal']):
td['journal_digest']=journal_digest(td['journal'])
tdj = json.dumps(td,default=json_serial)
pgc.execute("select * from tasks where id=%s",(tid,))
res = pgc.fetchone()
if not res:
op='ins'
qry = "insert into tasks (contents,parent_id,changed_at,changed_by,id) values(%s,%s,%s,%s,%s)"
chat=datetime.now() ; chatf='now'
suser = user
else:
excont = res['contents']
nwcont = json.loads(tdj)
# exf = open('/tmp/ex.json','w') ; exf.write(json.dumps(excont)) ; exf.close()
# nwf = open('/tmp/nw.json','w') ; nwf.write(json.dumps(nwcont)) ; nwf.close()
if nwcont==excont and user not in ['notify-trigger']:
chat = res['changed_at'] ; chatf='existing'
suser = res['changed_by']
else:
chatf='now'
chat = datetime.now() ;
suser = user
#raise Exception(type(nwcont),type(excont),len(nwcont),len(excont),nwcont==excont)
op='upd'
qry = "update tasks set contents=%s,parent_id=%s,changed_at=%s,changed_by=%s where id=%s"
data = (tdj,parid,chat,suser,t._id)
#print qry,data
print((op,t._id,parid))
pgc.execute(qry,data)
# -- create table tasks (id varchar primary key, parent_id varchar references tasks(id) , contents json);
def get_repos(C):
C.execute("select name from repos")
res = C.fetchall()
return [r['name'] for r in res]
def get_usernames(C):
C.execute("select username from participants where active=true order by username")
res = C.fetchall()
return [r['username'] for r in res]
def hasperm_db(C,perm,user):
qry = "select count(*) cnt from participants where username=%s and %s=any(perms) and active=true"
C.execute(qry,(perm,user))
o = C.fetchone()
rt = o['cnt'] and True or False
return rt
def hasperm(perms,perm):
rt = perm in perms
#print(qry,(user,perm),'=>',rt)
return rt
def get_participants(C,sort=True,disabled=False):
qry = "select * from participants "
if not disabled: qry+=" where active=true "
if sort: qry+=" order by username"
C.execute(qry)
rt = {}
for r in C.fetchall():
if r['username'] not in rt: rt[r['username']]={}
for k in list(r.keys()):
rt[r['username']][k]=r[k]
return rt #dict([(r['username'],dict([(k,r[k]) for k in r.keys()])) for r in C.fetchall()])
def get_all_journals(C,day=None,creator=None):
qry = "select * from journal_entries where 1=1 "
cnd=[]
if day:
qry+=" and created_at::date between %s and %s"
cnd.append(day[0]) ; cnd.append(day[1])
if creator:
qry+=" and creator=%s"
cnd.append(creator)
C.execute(qry,cnd)
jes = C.fetchall()
return [{'creator':je['creator'],
'content':je['cnt'],
'attrs':je['attrs'],
'created_at':je['created_at'],
'tid':je['tid']} for je in jes]
# parents retrieval:
# with recursive allparents as (select id,parent_id from tasks t where id='832/408/8/1' union all select t.id,t.parent_id from tasks t join allparents on allparents.parent_id=t.id) select * from allparents order by id
# children retrieval:
def get_children(C,tid):
from couchdb import Task
qry="select t.* from task_hierarchy th,tasks t where %s=any(th.path_info) and th.id<>%s and t.id=th.id"
opts=(tid,tid)
C.execute(qry,opts)
rows = [t['contents'] for t in C.fetchall()]
rt=[]
for r in rows:
r['created_at']=datetime.strptime( r['created_at'].split('.')[0].split('Z')[0], "%Y-%m-%dT%H:%M:%S" )
t = Task(**r)
rt.append(t)
return rt
def get_cross_links(C,tid):
C.execute("select * from cross_links where id=%s or clid=%s",(tid,tid))
rt = C.fetchall()
rt2=[]
for r in rt:
rt2.append(r['clid'])
rt2.append(r['id'])
rt2 = [r for r in rt2 if r!=tid]
return rt2
def get_new_idx(C,parent=None):
if parent==None:
qry = "select max((regexp_split_to_array(id,'/'))[1]::integer)+1 new_idx from tasks"
conds=()
else:
pars = str(parent).split("/")
parlen=len(pars)+1
like=str(parent)+'/%'
qry = "select max((regexp_split_to_array(id,'/'))["+str(parlen)+"]::integer)+1 new_idx from tasks where id like %s"
conds=(like,)
#print('QUERYING',qry,conds)
C.execute(qry,conds)
nid = C.fetchall()[0]['new_idx']
if nid==None:
nid='1'
if parent:
nid=str(parent)+'/'+str(nid)
rt= str(nid)
#if parent: raise Exception(parent,nid,'=>',rt)
assert re.compile('^([0-9]+)([0-9\/]*)$').search(rt),"%s does not match"%rt
return rt
def get_task(C,tid):
from couchdb import Task
C.execute("select contents from tasks where id=%s",(tid,))
c = C.fetchall()[0]['contents']
return Task(**c)
def get_tags(C):
C.execute("select tag,count(*) from task_tags group by tag")
res = C.fetchall()
return dict([(r['tag'],r['count']) for r in res])
def revfmt(lower,upper):
k=(lower and str(lower) or '')+'_'+(upper and str(upper) or '')
return k
def get_revisions(C,tid,limit=None):
qry = "select * from tasks where id=%s union select * from tasks_history where id=%s order by sys_period desc"
args = [tid,tid]
if limit:
qry+=" limit %s"
args.append(limit)
C.execute(qry,args)
res = C.fetchall()
rt = {}
for r in res:
bnd=sorted(filter(lambda x:x,[r['sys_period'].lower,r['sys_period'].upper]))
k = revfmt(bnd[0],len(bnd)>1 and bnd[1] or None)
rt[k]=r
return rt
def last_change(C,tid):
return get_revisions(C,tid,limit=2)
def parse_last_change(C,tid):
r1,r2 = last_change(C,tid)
# this section of code is for debugging and repairing journal digests. structurization needed.
if __name__=='__main__':
import sys,jsonpatch,re
if sys.argv[1]=='digest':
tid = len(sys.argv)>2 and re.compile('^([0-9\/]+)$').search(sys.argv[2]) and sys.argv[2] or None
fix = 'fix' in sys.argv[1:]
from couchdb import init_conn,Task
P = init_conn()
with P as p:
C = p.cursor()
qry = "select id,contents from tasks"
args=[]
if tid:
qry+=" where id=%s"
args.append(tid)
C.execute(qry,args)
ts = C.fetchall()
for t in ts:
tid=t['id']
tc = t['contents']
if 'journal' not in tc:
print(tid,'no journal')
continue
jd = journal_digest(t['contents']['journal'])
if 'journal_digest' not in tc:
pjd={}
else:
pjd = t['contents']['journal_digest']
eq = jd == pjd
dfs = [d for d in jsonpatch.JsonPatch.from_diff(jd,pjd) if not d['path'].endswith('/created_at')]
if not eq and not len(dfs): eq='negligible'
if eq:
print(tid,eq,dfs)
else:
print(tid,eq,dfs,pjd,'=>',jd)
if fix:
print('FIXING',tid)
tc['journal_digest'] = jd
qry = "update tasks set contents=%s where id=%s"
args = [json.dumps(tc),tid]
C.execute(qry,args)
|
SandStormHoldings/ScratchDocs
|
pg.py
|
Python
|
mit
| 12,792 | 0.021029 |
import sys
from Bio.Seq import Seq
from Bio.SeqRecord import SeqRecord
from Bio.Alphabet import generic_dna
from Bio import SeqIO
import numpy
import math
import itertools as it
MATERNALFASTAFILENAMELISTFILENAME = sys.argv[1]
PATERNALFASTAFILENAMELISTFILENAME = sys.argv[2]
PEAKHEIGHTFILENAME = sys.argv[3]
VALUEFILENAME = sys.argv[4]
OUTPUTFILENAME = sys.argv[5]
K = int(sys.argv[6])
WINDOWSIZE = int(sys.argv[7])
WINDOWSTRIDE = int(sys.argv[8])
MOTIFLISTFILENAME = sys.argv[9]
def makeSequenceList():
# Make a list of groups of 4 bases
baseList = ["A", "C", "G", "T"]
sequenceList = []
allSameList = []
for baseOne in baseList:
# Iterate through the first base options
for baseTwo in baseList:
# Iterate through the second base options
for baseThree in baseList:
# Iterate through the third base options
for baseFour in baseList:
# Iterate through the fourth base options
currentSequence = baseOne + baseTwo + baseThree + baseFour
sequenceList.append(currentSequence)
if ((baseOne + baseTwo == baseThree + baseFour) or (baseOne + baseTwo == baseFour + baseThree)) or ((baseTwo + baseOne == baseThree + baseFour) or (baseTwo + baseOne == baseFour + baseThree)):
# The individuals are the same at the current base
allSameList.append(1)
else:
# The individuals are not the same at the current base
allSameList.append(0)
return [sequenceList, allSameList]
def filterSequenceList(sequenceList, allSameList, base):
# Filter sequences to include only those that contain the base
sequenceListFiltered = []
allSameListFiltered = []
for i in range(len(sequenceList)):
# Iterate through the sequences and choose only those that contain the base
if base in sequenceList[i]:
# The base is in the current sequence, so use it
sequenceListFiltered.append(sequenceList[i])
allSameListFiltered.append(allSameList[i])
return [sequenceListFiltered, allSameListFiltered]
def makeKMerDictFromMotifFile():
# Make a dictionary that maps K-mers from a motif file to numbers
# MAP ONLY VARIABLE K-mers
motifListFile = open(MOTIFLISTFILENAME)
kMerDict = {}
count = 0
[sequenceList, allSameList] = makeSequenceList()
for line in motifListFile:
# Iterate through the motifs and make an entry in the dictionary for each 4 x K combination of each motif
motif = line.strip()
sequences = []
allSame = []
for base in motif:
# Iterate through the motif bases and select 4-base sequences that contain the base in the motif
[sequenceListFiltered, allSameListFiltered] = filterSequenceList(sequenceList, allSameList, base)
if len(sequences) == 0:
# At the first base, so add each sequence
sequences = sequenceListFiltered
allSame = allSameListFiltered
else:
# At a later base, so find all combinations of it and earlier bases
lastSequences = sequences
lastAllSameList = allSame
sequences = []
allSame = []
for i in range(len(lastSequences)):
# Iterate through the existing sequences and add each new one to it
for j in range(len(sequenceListFiltered)):
# Iterate through the new sequences and append each
sequences.append(lastSequences[i] + sequenceListFiltered[j])
allSame.append(lastAllSameList[i] * allSameListFiltered[j])
for i in range(len(sequences)):
# Iterate through the sequences and add each new one that is not all the same to the dictionary
if (allSame[i] == 0) and (sequences[i] not in kMerDict.keys()):
# The sequence is new and not all the same, so add it to the dictionary
kMerDict[sequences[i]] = count
count = count + 1
motifListFile.close()
return kMerDict
def getFastaList(fastaFileList):
# Get the next fasta from each file in a list
fastaList = []
for fastaFile in fastaFileList:
# Iterate through the fasta files and begin parsing each
fastaName = fastaFile.readline().strip()
if fastaName != "":
# Not at the end of the fasta file
DNASequenceRecord = SeqRecord(Seq(fastaFile.readline().strip(), generic_dna), name = fastaName)
fastaList.append(DNASequenceRecord)
else:
# At the end of the fasta file, so stop
break
return fastaList
def makeValueFileEntries(valueFile, peakHeightLineElements, peakHeightColA, peakHeightColB):
# Make the value entries
# The 1st 8 are the fold change from individual A to individual B, and the next 8 are the fold change from individual B to individual A
FCA = int(peakHeightLineElements[peakHeightColA]) - int(peakHeightLineElements[peakHeightColB])
for i in range(8):
# Record the fold change from individual A to individual B 8 times for the 8 examples
valueFile.write(str(FCA) + "\n")
FCB = int(peakHeightLineElements[peakHeightColB]) - int(peakHeightLineElements[peakHeightColA])
for i in range(8):
# Record the fold change from individual B to individual A 8 times for the 8 examples
valueFile.write(str(FCB) + "\n")
def makeReverseComplements(seqRecordMaternalA, seqRecordPaternalA, seqRecordMaternalB, seqRecordPaternalB):
# Make the reverse complements of all of the sequences
seqReverseMaternalA = seqRecordMaternalA.seq.reverse_complement().upper()
seqReversePaternalA = seqRecordPaternalA.seq.reverse_complement().upper()
seqReverseMaternalB = seqRecordMaternalB.seq.reverse_complement().upper()
seqReversePaternalB = seqRecordPaternalB.seq.reverse_complement().upper()
return [seqReverseMaternalA, seqReversePaternalA, seqReverseMaternalB, seqReversePaternalB]
def recordKMerCounts(kMerCounts, outputFile):
# Record the k-mer counts in the windows of the sequence to the output file
for count in kMerCounts:
# Iterate through the k-mer counts and record each in the output file
outputFile.write(str(count) + "\t")
outputFile.write("\n")
def getKMerFeatures(sequenceOneA, sequenceTwoA, sequenceOneB, sequenceTwoB, outputFile, kMerDict, WINDOWSIZE, WINDOWSTRIDE):
# Convert the fasta sequence pair to the (4 x 4) x (4 x 4) alphabet and record its k-mer counts
# ASSUMES THAT sequenceOneA sequenceTwoA, sequenceOneB, and sequenceTwoB ARE THE SAME LENGTH (no indels)
# THE END OF THE SEQUENCE WILL GET CUT OFF IF THE WINDOWS DO NOT EXACTLY ENCOMPASS THE SEQUENCE
numWindows = math.trunc(float(len(sequenceOneA) - WINDOWSIZE)/float(WINDOWSTRIDE)) + 1
kMerCounts = numpy.zeros(numWindows*len(kMerDict), 1)
for l in range(numWindows):
# Iterate through the windows and get the k-mer counts in each
windowStart = l * WINDOWSTRIDE
windowEnd = windowStart + WINDOWSIZE
for i in range(windowStart, windowEnd - K + 1):
# Iterate through the k-mers in the current window mark a 1 in the appropriate k-mer
sequenceToLookUp = ""
for j in range(K):
# Iterate through the bases in the k-mer and make the sequence combination that represents it
sequenceToLookUp = sequenceOneA[i + j] + sequenceTwoA[i + j] + sequenceOneB[i + j] + sequenceTwoB[i + j]
if sequenceToLookUp not in kMerDict.keys():
# The sequence has a wild card or is not in a motif, so skip it
continue
kMerCounts[(l * len(kMerDict)) + kMerDict[sequenceToLookUp]] = kMerCounts[kMerDict[sequenceToLookUp]] + 1
recordKMerCounts(kMerCounts, outputFile)
def makeFourFeatureCounts(seqRecordMaternalA, seqRecordPaternalA, seqRecordMaternalB, seqRecordPaternalB, outputFile, kMerDict, WINDOWSIZE, WINDOWSTRIDE):
# Make four sets of sequence features for two sequence pairs
# Sequence set 1: (MaternalA, PaternalA), (MaternalB, PaternalB)
# Sequence set 2: (MaternalA, PaternalA), (PaternalB, MaternalB)
# Sequence set 3: (PaternalA, MaternalA), (MaternalB, PaternalB)
# Sequence set 4: (PaternalA, MaternalA), (PaternalB, MaternalB)
getKMerFeatures(seqRecordMaternalA, seqRecordPaternalA, seqRecordMaternalB, seqRecordPaternalB, outputFile, kMerDict, WINDOWSIZE, WINDOWSTRIDE)
getKMerFeatures(seqRecordMaternalA, seqRecordPaternalA, seqRecordPaternalB, seqRecordMaternalB, outputFile, kMerDict, WINDOWSIZE, WINDOWSTRIDE)
getKMerFeatures(seqRecordPaternalA, seqRecordMaternalA, seqRecordMaternalB, seqRecordPaternalB, outputFile, kMerDict, WINDOWSIZE, WINDOWSTRIDE)
getKMerFeatures(seqRecordPaternalA, seqRecordMaternalA, seqRecordPaternalB, seqRecordMaternalB,outputFile, kMerDict, WINDOWSIZE, WINDOWSTRIDE)
def makeSingleSequenceInputsKMerCounts(((seqRecordMaternalA, seqRecordPaternalA), (seqRecordMaternalB, seqRecordPaternalB)), valueFile, outputFile, kMerDict, WINDOWSIZE, WINDOWSTRIDE, peakHeightLineElements, peakHeightColA, peakHeightColB):
# Make all of the sequence features for a fasta file, and make the corresponding values
makeValueFileEntries(valueFile, peakHeightLineElements, peakHeightColA, peakHeightColB)
[seqReverseMaternalA, seqReversePaternalA, seqReverseMaternalB, seqReversePaternalB] = makeReverseComplements(seqRecordMaternalA, seqRecordPaternalA, seqRecordMaternalB, seqRecordPaternalB)
makeFourFeatureCounts(seqRecordMaternalA.seq.upper(), seqRecordPaternalA.seq.upper(), seqRecordMaternalB.seq.upper(), seqRecordPaternalB.seq.upper(), outputFile, kMerDict, WINDOWSIZE, WINDOWSTRIDE)
makeFourFeatureCounts(seqReverseMaternalA, seqReversePaternalA, seqReverseMaternalB, seqReversePaternalB, outputFile, kMerDict, WINDOWSIZE, WINDOWSTRIDE)
makeFourFeatureCounts(seqRecordMaternalB.seq.upper(), seqRecordPaternalB.seq.upper(), seqRecordMaternalA.seq.upper(), seqRecordPaternalA.seq.upper(), outputFile, kMerDict, WINDOWSIZE, WINDOWSTRIDE)
makeFourFeatureCounts(seqReverseMaternalB, seqReversePaternalB, seqReverseMaternalA, seqReversePaternalA, outputFile, kMerDict, WINDOWSIZE, WINDOWSTRIDE)
def makeSequenceInputsKMerCountsAllSeqIter(kMerDict):
# Make sequence inputs for all of the pairs of individuals
# ASSUMES THAT THE INDIVIDUALS LISTED IN maternalFastaFileNameListFile and paternalFastaFileNameListFile ARE IN THE SAME ORDER
maternalFastaFileNameListFile = open(MATERNALFASTAFILENAMELISTFILENAME)
paternalFastaFileNameListFile = open(PATERNALFASTAFILENAMELISTFILENAME)
maternalFastaFileList = []
paternalFastaFileList = []
for line in maternalFastaFileNameListFile:
# Iterate through the fasta files and make a list of the maternal and paternal ones
maternalFastaFileList.append(open(line.strip()))
paternalFastaFileList.append(open(paternalFastaFileNameListFile.readline().strip()))
maternalFastaFileNameListFile.close()
paternalFastaFileNameListFile.close()
valueFile = open(VALUEFILENAME, 'w+')
outputFile = open(OUTPUTFILENAME, 'w+')
peakHeightFile = open(PEAKHEIGHTFILENAME)
print "Starting iterations!"
maternalFastas = getFastaList(maternalFastaFileList)
paternalFastas = getFastaList(paternalFastaFileList)
while len(maternalFastas) > 0:
# Iterate through all of the sequences and make all of the images for each combination
fastaPairs = it.izip(maternalFastas, paternalFastas)
fastaCombinations = it.combinations(fastaPairs, 2)
peakHeightLineElements = peakHeightFile.readline().strip().split("\t")
peakHeightColA = 0
peakHeightColB = 1
for comb in fastaCombinations:
# Iterate through the combinations of sequences and make the images for each
makeSingleSequenceInputsKMerCounts(((seqRecordMaternalA, seqRecordPaternalA), (seqRecordMaternalB, seqRecordPaternalB)), valueFile, outputFile, kMerDict, WINDOWSIZE, WINDOWSTRIDE, peakHeightLineElements, peakHeightColA, peakHeightColB)
peakHeightColB = peakHeightColB + 1
if peakHeightColB >= len(maternalFastas):
# The sequence inputs for all of the pairs for the first individual in the current pair have been made
peakHeightColA = peakHeightColA + 1
peakHeightColB = peakHeightColA + 1
maternalFastas = getFastaList(maternalFastaFileList)
paternalFastas = getFastaList(paternalFastaFileList)
def main():
# NOT DEBUGGED
kMerDict = makeKMerDictFromMotifFile()
makeSequenceInputsKMerCountsAllSeqIter(kMerDict)
if __name__=="__main__":
main()
|
imk1/IMKTFBindingCode
|
makeSequenceInputsKMerMotifCounts.py
|
Python
|
mit
| 12,089 | 0.020928 |
# Copyright (c) 2020, Michael Boyle
# See LICENSE file for details: <https://github.com/moble/spherical_functions/blob/master/LICENSE>
### NOTE: The functions in this file are intended purely for inclusion in the Grid class. In
### particular, they assume that the first argument, `self` is an instance of Grid. They should
### probably not be used outside of that class.
def modes(self, ell_max=None, **kwargs):
"""Return mode weights of function decomposed into SWSHs
This method uses `spinsfast` to convert values on an equiangular grid to mode weights.
The output array has one less dimension than this object; rather than the last two axes giving
the values on the two-dimensional grid, the last axis gives the mode weights.
Parameters
==========
ell_max: None or int [defaults to None]
Maximum ell value in the output. If None, the result will have enough ell values to express
the data on the grid without aliasing: (max(n_phi, n_theta) - 1) // 2.
**kwargs: any types
Additional keyword arguments are passed through to the Modes constructor on output
"""
import copy
import numpy as np
import spinsfast
from .. import Modes
ell_max = ell_max or (max(n_phi, n_theta) - 1) // 2
metadata = copy.copy
return Modes(spinsfast.map2salm(self.view(np.ndarray), self.s, ell_max),
spin_weight=self.s, ell_min=0, ell_max=ell_max, **metadata)
def _check_broadcasting(self, array, reverse=False):
"""Test whether or not the given array can broadcast against this object"""
import numpy as np
if isinstance(array, type(self)):
try:
if reverse:
np.broadcast(array, self)
else:
np.broadcast(self, array)
except ValueError:
return False
else:
return True
else:
if np.ndim(array) > np.ndim(self)-2:
raise ValueError(f"Cannot broadcast array of {np.ndim(array)} dimensions against {type(self).__name__} "
f"object of fewer ({np.ndim(self)-2}) non-grid dimensions.\n"
"This is to ensure that scalars do not operate on individual "
"grid values; they must operate on all simultaneously.\n"
"If that is the case and you still want to broadcast, add more "
"dimensions before this object's first dimension.")
try:
if reverse:
np.broadcast(array, self[..., 0, 0])
else:
np.broadcast(self[..., 0, 0], array)
except ValueError:
return False
else:
return True
|
moble/spherical_functions
|
spherical_functions/SWSH_grids/utilities.py
|
Python
|
mit
| 2,746 | 0.006191 |
#
# Copyright © 2012–2022 Michal Čihař <michal@cihar.com>
#
# This file is part of Weblate <https://weblate.org/>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
#
from django.apps import AppConfig
class MetricsConfig(AppConfig):
name = "weblate.metrics"
label = "metrics"
verbose_name = "Metrics"
|
nijel/weblate
|
weblate/metrics/apps.py
|
Python
|
gpl-3.0
| 906 | 0 |
# -*- coding: utf-8 -*-
import sys
#Éú³ÉºòÑ¡¼¯C1
#return£º×Öµäkey=item;value=item³öÏֵĴÎÊý
def getC1(srcdata):
c1 = {}
for transaction in srcdata:
for item in transaction:
key = frozenset(set([item])) #frozenset²Å¿ÉÒÔ×÷Ϊ×ÖµäµÄkey
#¼ÆÊýitem
if key in c1:
c1[key] = c1[key] + 1
else:
c1[key] = 1
return c1
#return£º Âú×ã×îС֧³Ö¶ÈµÄºòÑ¡¼¯
def getL(c, supct):
# ɾ³ýСÓÚ×îС֧³Ö¶ÈµÄitem
for key in [item for item in c if c[item] < supct]:
del c[key]
#if c != {}:
#print c
return c
#¸ù¾ÝÉÏÒ»¸öL²úÉúºòÑ¡¼¯C
#ɨÃèÔ´Êý¾Ý£¬¼ÆÊýitem
def getnextcandi(preL, srcdata):
c = {}
for key1 in preL:
for key2 in preL:
if key1 != key2:
# preL ºÍ preL Éú³ÉµÑ¿¨¶û»ý
key = key1.union(key2)
c[key] = 0
#¼ÆÊýitem
for i in srcdata:
for item in c:
if item.issubset(i):
c[item] = c[item] + 1
return c
# Apriori Ëã·¨
def Apriori(filename, supct):
#¶ÁÈ¡Êý¾ÝÎļþ
#Îļþ¸ñʽ:Ò»ÐÐÒ»¸öÊÂÎñ,Ò»¸öÊÂÎñµÄ¸÷¸öÔªËØÒÔTab(\t)·Ö¸ô
srcdata = [line.strip("\n").split(" ") for line in file(filename)]
c = getC1(srcdata)
L = getL(c, supct)
c = getnextcandi(L, srcdata)
return c
if __name__ == "__main__":
if len(sys.argv) == 3:
#Usage: apri.py filename surpport
items = Apriori(sys.argv[1], int(sys.argv[2]))
for key in [item for item in items if items[item] < int(sys.argv[2])]:
del items[key]
ap = {}
for itor in items:
#print items[itor]
#print itor
strword = ''
for word in itor:
strword += word + " "
ap[strword.strip(' ')] = items[itor]
linelst = sorted(ap.items(), lambda x, y: cmp(x[1], y[1]), reverse=True)
for i in range(len(linelst)):
print "#" + str(linelst[i][1]) + " " + linelst[i][0]
#for (k, v) in ap.items():
#print "#" + str(v) + " " + k
else:
#for example
print "err args"
|
RayleighChen/Improve
|
Project/python/PyLDA-Apriori/m3.py
|
Python
|
mit
| 2,169 | 0.033195 |
#!/usr/bin/env python
# encoding: utf-8
import urllib
from config import USERNAME, EXTENSION, PASSWORD, APP_KEY, APP_SECRET, SERVER, MOBILE
from ringcentral import SDK
def main():
sdk = SDK(APP_KEY, APP_SECRET, SERVER)
platform = sdk.platform()
platform.login(USERNAME, EXTENSION, PASSWORD)
to_numbers = "1234567890"
params = {'from': {'phoneNumber': USERNAME},'to': [{'phoneNumber': to_number}],'text': "SMS message"}
response = platform.post('/restapi/v1.0/account/~/extension/~/sms', params)
print 'Sent SMS: ' + response.json().uri
if __name__ == '__main__':
main()
|
ringcentral/python-sdk
|
demo_sms.py
|
Python
|
mit
| 609 | 0.00821 |
# encoding: utf-8
'''
@author: Jose Emilio Romero Lopez
@copyright: Copyright 2013-2014, Jose Emilio Romero Lopez.
@license: GPL
@contact: jemromerol@gmail.com
This file is part of APASVO.
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
from PySide import QtGui
from apasvo.gui.views.generated import ui_loaddialog
from apasvo.utils.formats import rawfile
FORMATS = {'Autodetect': None,
'Binary': rawfile.format_binary,
'Text': rawfile.format_text,
}
DEFAULT_FORMAT = 'Autodetect'
DTYPES = (rawfile.datatype_int16,
rawfile.datatype_int32,
rawfile.datatype_int64,
rawfile.datatype_float16,
rawfile.datatype_float32,
rawfile.datatype_float64, )
DTYPES_LABELS = ('16 bits, PCM',
'32 bits, PCM',
'64 bits, PCM',
'16 bits, float',
'32 bits, float',
'64 bits, float', )
BYTEORDERS = (rawfile.byteorder_little_endian,
rawfile.byteorder_big_endian)
class LoadDialog(QtGui.QDialog, ui_loaddialog.Ui_LoadDialog):
"""A dialog window to load seismic data stored in a binary or text file.
Allows the user to choose several settings in order to load a seismic
signal, i.e.:
Format: Binary or text format.
Data-type: Float16, Float32 or Float64,
Endianness: Little-endian or big-endian.
Sample rate.
The class also infers the right parameters for the chosen file and shows
a preview of the loaded data for the selected parameters.
Attributes:
filename: Name of the opened file.
"""
def __init__(self, parent, filename):
super(LoadDialog, self).__init__(parent)
self.setupUi(self)
self.FileFormatComboBox.currentIndexChanged.connect(self.on_format_change)
self.FileFormatComboBox.currentIndexChanged.connect(self.load_preview)
self.DataTypeComboBox.currentIndexChanged.connect(self.load_preview)
self.ByteOrderComboBox.currentIndexChanged.connect(self.load_preview)
# init file format combobox
self.FileFormatComboBox.addItems(FORMATS.keys())
self.FileFormatComboBox.setCurrentIndex(FORMATS.keys().index(DEFAULT_FORMAT))
# init datatype combobox
self.DataTypeComboBox.addItems(DTYPES_LABELS)
self.DataTypeComboBox.setCurrentIndex(DTYPES.index(rawfile.datatype_float64))
self.filename = filename
self.load_preview()
def on_format_change(self, idx):
"""Updates UI after toggling the format value."""
fmt = FORMATS[self.FileFormatComboBox.currentText()]
if fmt == rawfile.format_binary:
self.DataTypeComboBox.setVisible(True)
self.DataTypeLabel.setVisible(True)
self.ByteOrderComboBox.setVisible(True)
self.ByteOrderLabel.setVisible(True)
self.groupBox_2.setVisible(True)
self.SampleFrequencySpinBox.setVisible(True)
self.SampleFrequencyLabel.setVisible(True)
elif fmt == rawfile.format_text:
self.DataTypeComboBox.setVisible(False)
self.DataTypeLabel.setVisible(False)
self.ByteOrderComboBox.setVisible(False)
self.ByteOrderLabel.setVisible(False)
self.groupBox_2.setVisible(True)
self.SampleFrequencySpinBox.setVisible(True)
self.SampleFrequencyLabel.setVisible(True)
else:
self.DataTypeComboBox.setVisible(False)
self.DataTypeLabel.setVisible(False)
self.ByteOrderComboBox.setVisible(False)
self.ByteOrderLabel.setVisible(False)
self.groupBox_2.setVisible(False)
self.SampleFrequencySpinBox.setVisible(False)
self.SampleFrequencyLabel.setVisible(False)
self.groupBox.adjustSize()
self.adjustSize()
def load_preview(self):
"""Shows a preview of loaded data using the selected parameters."""
# Load parameters
values = self.get_values()
try:
# Set up a file handler according to the type of raw data (binary or text)
fhandler = rawfile.get_file_handler(self.filename, **values)
# Print data preview
array = fhandler.read_in_blocks().next()
data = ''
for x in array:
data += ("%g\n" % x)
except:
data = '*** There was a problem reading the file content ***'
self.buttonBox.button(QtGui.QDialogButtonBox.Ok).setEnabled(False)
else:
self.buttonBox.button(QtGui.QDialogButtonBox.Ok).setEnabled(True)
self.PreviewTextEdit.clear()
self.PreviewTextEdit.setText(data)
def get_values(self):
"""Gets selected parameters."""
return {'fmt': FORMATS[self.FileFormatComboBox.currentText()],
'dtype': DTYPES[self.DataTypeComboBox.currentIndex()],
'byteorder': BYTEORDERS[self.ByteOrderComboBox.currentIndex()],
'fs': float(self.SampleFrequencySpinBox.value())}
|
jemromerol/apasvo
|
apasvo/gui/views/loaddialog.py
|
Python
|
gpl-3.0
| 5,689 | 0.000879 |
from fibonacci import Fibonacci
def ans():
return Fibonacci.index(Fibonacci.after(int('9' * 999)))
if __name__ == '__main__':
print(ans())
|
mackorone/euler
|
src/025.py
|
Python
|
mit
| 155 | 0.006452 |
# -*- coding: utf-8 -*-
import json
import re
import unicodedata
import string
from urllib import urlencode
from requests import get
languages = {'de', 'en', 'es', 'fr', 'hu', 'it', 'nl', 'jp'}
url_template = 'https://www.wikidata.org/w/api.php?action=wbgetentities&format=json&{query}&props=labels%7Cdatatype%7Cclaims%7Caliases&languages=' + '|'.join(languages)
url_wmflabs_template = 'http://wdq.wmflabs.org/api?q='
url_wikidata_search_template='http://www.wikidata.org/w/api.php?action=query&list=search&format=json&srnamespace=0&srprop=sectiontitle&{query}'
wmflabs_queries = [
'CLAIM[31:8142]', # all devise
]
db = {
'iso4217' : {
},
'names' : {
}
}
def remove_accents(data):
return unicodedata.normalize('NFKD', data).lower()
def normalize_name(name):
return re.sub(' +',' ', remove_accents(name.lower()).replace('-', ' '))
def add_currency_name(name, iso4217):
global db
db_names = db['names']
if not isinstance(iso4217, basestring):
print "problem", name, iso4217
return
name = normalize_name(name)
if name == '':
print "name empty", iso4217
return
iso4217_set = db_names.get(name, None)
if iso4217_set is not None and iso4217 not in iso4217_set:
db_names[name].append(iso4217)
else:
db_names[name] = [ iso4217 ]
def add_currency_label(label, iso4217, language):
global db
db['iso4217'][iso4217] = db['iso4217'].get(iso4217, {})
db['iso4217'][iso4217][language] = label
def get_property_value(data, name):
prop = data.get('claims', {}).get(name, {})
if len(prop) == 0:
return None
value = prop[0].get('mainsnak', {}).get('datavalue', {}).get('value', '')
if value == '':
return None
return value
def parse_currency(data):
iso4217 = get_property_value(data, 'P498')
if iso4217 is not None:
unit = get_property_value(data, 'P558')
if unit is not None:
add_currency_name(unit, iso4217)
labels = data.get('labels', {})
for language in languages:
name = labels.get(language, {}).get('value', None)
if name != None:
add_currency_name(name, iso4217)
add_currency_label(name, iso4217, language)
aliases = data.get('aliases', {})
for language in aliases:
for i in range(0, len(aliases[language])):
alias = aliases[language][i].get('value', None)
add_currency_name(alias, iso4217)
def fetch_data(wikidata_ids):
url = url_template.format(query=urlencode({'ids' : '|'.join(wikidata_ids)}))
htmlresponse = get(url)
jsonresponse = json.loads(htmlresponse.content)
entities = jsonresponse.get('entities', {})
for pname in entities:
pvalue = entities.get(pname)
parse_currency(pvalue)
def add_q(i):
return "Q" + str(i)
def fetch_data_batch(wikidata_ids):
while len(wikidata_ids) > 0:
if len(wikidata_ids) > 50:
fetch_data(wikidata_ids[0:49])
wikidata_ids = wikidata_ids[50:]
else:
fetch_data(wikidata_ids)
wikidata_ids = []
def wdq_query(query):
url = url_wmflabs_template + query
htmlresponse = get(url)
jsonresponse = json.loads(htmlresponse.content)
qlist = map(add_q, jsonresponse.get('items', {}))
error = jsonresponse.get('status', {}).get('error', None)
if error != None and error != 'OK':
print "error for query '" + query + "' :" + error
fetch_data_batch(qlist)
def wd_query(query, offset=0):
qlist = []
url = url_wikidata_search_template.format(query=urlencode({'srsearch': query, 'srlimit': 50, 'sroffset': offset}))
htmlresponse = get(url)
jsonresponse = json.loads(htmlresponse.content)
for r in jsonresponse.get('query', {}).get('search', {}):
qlist.append(r.get('title', ''))
fetch_data_batch(qlist)
## fetch ##
for q in wmflabs_queries:
wdq_query(q)
# static
add_currency_name(u"euro", 'EUR')
add_currency_name(u"euros", 'EUR')
add_currency_name(u"dollar", 'USD')
add_currency_name(u"dollars", 'USD')
add_currency_name(u"peso", 'MXN')
add_currency_name(u"pesos", 'MXN')
# write
f = open("currencies.json", "wb")
json.dump(db, f, indent=4, encoding="utf-8")
f.close()
|
kdani3/searx
|
utils/fetch_currencies.py
|
Python
|
agpl-3.0
| 4,394 | 0.009558 |
#!/usr/bin/env python
#
# __COPYRIGHT__
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "__FILE__ __REVISION__ __DATE__ __DEVELOPER__"
"""
Test the ListVariable canned Variable type.
"""
import os
import TestSCons
test = TestSCons.TestSCons()
SConstruct_path = test.workpath('SConstruct')
def check(expect):
result = test.stdout().split('\n')
r = result[1:len(expect)+1]
assert r == expect, (r, expect)
test.write(SConstruct_path, """\
from SCons.Variables.ListVariable import ListVariable
LV = ListVariable
from SCons.Variables import ListVariable
list_of_libs = Split('x11 gl qt ical')
optsfile = 'scons.variables'
opts = Variables(optsfile, args=ARGUMENTS)
opts.AddVariables(
ListVariable('shared',
'libraries to build as shared libraries',
'all',
names = list_of_libs,
map = {'GL':'gl', 'QT':'qt'}),
LV('listvariable', 'listvariable help', 'all', names=['l1', 'l2', 'l3'])
)
env = Environment(variables=opts)
opts.Save(optsfile, env)
Help(opts.GenerateHelpText(env))
print(env['shared'])
if 'ical' in env['shared']:
print('1')
else:
print('0')
print(" ".join(env['shared']))
print(env.subst('$shared'))
# Test subst_path() because it's used in $CPPDEFINES expansions.
print(env.subst_path('$shared'))
Default(env.Alias('dummy', None))
""")
test.run()
check(['all', '1', 'gl ical qt x11', 'gl ical qt x11',
"['gl ical qt x11']"])
expect = "shared = 'all'"+os.linesep+"listvariable = 'all'"+os.linesep
test.must_match(test.workpath('scons.variables'), expect)
check(['all', '1', 'gl ical qt x11', 'gl ical qt x11',
"['gl ical qt x11']"])
test.run(arguments='shared=none')
check(['none', '0', '', '', "['']"])
test.run(arguments='shared=')
check(['none', '0', '', '', "['']"])
test.run(arguments='shared=x11,ical')
check(['ical,x11', '1', 'ical x11', 'ical x11',
"['ical x11']"])
test.run(arguments='shared=x11,,ical,,')
check(['ical,x11', '1', 'ical x11', 'ical x11',
"['ical x11']"])
test.run(arguments='shared=GL')
check(['gl', '0', 'gl', 'gl'])
test.run(arguments='shared=QT,GL')
check(['gl,qt', '0', 'gl qt', 'gl qt', "['gl qt']"])
expect_stderr = """
scons: *** Error converting option: shared
Invalid value(s) for option: foo
""" + test.python_file_line(SConstruct_path, 19)
test.run(arguments='shared=foo', stderr=expect_stderr, status=2)
# be paranoid in testing some more combinations
expect_stderr = """
scons: *** Error converting option: shared
Invalid value(s) for option: foo
""" + test.python_file_line(SConstruct_path, 19)
test.run(arguments='shared=foo,ical', stderr=expect_stderr, status=2)
expect_stderr = """
scons: *** Error converting option: shared
Invalid value(s) for option: foo
""" + test.python_file_line(SConstruct_path, 19)
test.run(arguments='shared=ical,foo', stderr=expect_stderr, status=2)
expect_stderr = """
scons: *** Error converting option: shared
Invalid value(s) for option: foo
""" + test.python_file_line(SConstruct_path, 19)
test.run(arguments='shared=ical,foo,x11', stderr=expect_stderr, status=2)
expect_stderr = """
scons: *** Error converting option: shared
Invalid value(s) for option: foo,bar
""" + test.python_file_line(SConstruct_path, 19)
test.run(arguments='shared=foo,x11,,,bar', stderr=expect_stderr, status=2)
test.write('SConstruct', """
from SCons.Variables import ListVariable
opts = Variables(args=ARGUMENTS)
opts.AddVariables(
ListVariable('gpib',
'comment',
['ENET', 'GPIB'],
names = ['ENET', 'GPIB', 'LINUX_GPIB', 'NO_GPIB']),
)
env = Environment(variables=opts)
Help(opts.GenerateHelpText(env))
print(env['gpib'])
Default(env.Alias('dummy', None))
""")
test.run(stdout=test.wrap_stdout(read_str="ENET,GPIB\n", build_str="""\
scons: Nothing to be done for `dummy'.
"""))
test.pass_test()
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
|
timj/scons
|
test/Variables/ListVariable.py
|
Python
|
mit
| 5,013 | 0.000798 |
from django.conf.urls import patterns, include, url
from django.conf import settings
from cabot.cabotapp.views import (
run_status_check, graphite_api_data, checks_run_recently,
duplicate_icmp_check, duplicate_graphite_check, duplicate_http_check, duplicate_jenkins_check,
duplicate_instance, acknowledge_alert, remove_acknowledgement,
GraphiteCheckCreateView, GraphiteCheckUpdateView,
HttpCheckCreateView, HttpCheckUpdateView,
ICMPCheckCreateView, ICMPCheckUpdateView,
JenkinsCheckCreateView, JenkinsCheckUpdateView,
StatusCheckDeleteView, StatusCheckListView, StatusCheckDetailView,
StatusCheckResultDetailView, StatusCheckReportView, UserProfileUpdateAlert)
from cabot.cabotapp.views import (InstanceListView, InstanceDetailView,
InstanceUpdateView, InstanceCreateView, InstanceDeleteView,
ServiceListView, ServiceDetailView,
ServiceUpdateView, ServiceCreateView, ServiceDeleteView,
UserProfileUpdateView, ShiftListView, subscriptions)
from cabot import rest_urls
from django.contrib import admin
from django.views.generic.base import RedirectView
from django.contrib.auth.views import login, logout, password_reset, password_reset_done, password_reset_confirm
admin.autodiscover()
from importlib import import_module
import logging
logger = logging.getLogger(__name__)
urlpatterns = patterns('',
url(r'^$', view=RedirectView.as_view(url='services/', permanent=False),
name='dashboard'),
url(r'^subscriptions/', view=subscriptions,
name='subscriptions'),
url(r'^accounts/login/', view=login, name='login'),
url(r'^accounts/logout/', view=logout, name='logout'),
url(r'^accounts/password-reset/',
view=password_reset, name='password-reset'),
url(r'^accounts/password-reset-done/',
view=password_reset_done, name='password-reset-done'),
url(r'^accounts/password-reset-confirm/',
view=password_reset_confirm, name='password-reset-confirm'),
url(r'^status/', view=checks_run_recently,
name='system-status'),
url(r'^services/', view=ServiceListView.as_view(),
name='services'),
url(r'^service/create/', view=ServiceCreateView.as_view(),
name='create-service'),
url(r'^service/update/(?P<pk>\d+)/',
view=ServiceUpdateView.as_view(
), name='update-service'),
url(r'^service/delete/(?P<pk>\d+)/',
view=ServiceDeleteView.as_view(
), name='delete-service'),
url(r'^service/(?P<pk>\d+)/',
view=ServiceDetailView.as_view(), name='service'),
url(r'^service/acknowledge_alert/(?P<pk>\d+)/',
view=acknowledge_alert, name='acknowledge-alert'),
url(r'^service/remove_acknowledgement/(?P<pk>\d+)/',
view=remove_acknowledgement, name='remove-acknowledgement'),
url(r'^instances/', view=InstanceListView.as_view(),
name='instances'),
url(r'^instance/create/', view=InstanceCreateView.as_view(),
name='create-instance'),
url(r'^instance/update/(?P<pk>\d+)/',
view=InstanceUpdateView.as_view(
), name='update-instance'),
url(r'^instance/duplicate/(?P<pk>\d+)/',
view=duplicate_instance, name='duplicate-instance'),
url(r'^instance/delete/(?P<pk>\d+)/',
view=InstanceDeleteView.as_view(
), name='delete-instance'),
url(r'^instance/(?P<pk>\d+)/',
view=InstanceDetailView.as_view(), name='instance'),
url(r'^checks/$', view=StatusCheckListView.as_view(),
name='checks'),
url(r'^check/run/(?P<pk>\d+)/',
view=run_status_check, name='run-check'),
url(r'^check/delete/(?P<pk>\d+)/',
view=StatusCheckDeleteView.as_view(
), name='delete-check'),
url(r'^check/(?P<pk>\d+)/',
view=StatusCheckDetailView.as_view(), name='check'),
url(r'^checks/report/$',
view=StatusCheckReportView.as_view(), name='checks-report'),
url(r'^icmpcheck/create/', view=ICMPCheckCreateView.as_view(),
name='create-icmp-check'),
url(r'^icmpcheck/update/(?P<pk>\d+)/',
view=ICMPCheckUpdateView.as_view(
), name='update-icmp-check'),
url(r'^icmpcheck/duplicate/(?P<pk>\d+)/',
view=duplicate_icmp_check, name='duplicate-icmp-check'),
url(r'^graphitecheck/create/',
view=GraphiteCheckCreateView.as_view(
), name='create-graphite-check'),
url(r'^graphitecheck/update/(?P<pk>\d+)/',
view=GraphiteCheckUpdateView.as_view(
), name='update-graphite-check'),
url(r'^graphitecheck/duplicate/(?P<pk>\d+)/',
view=duplicate_graphite_check, name='duplicate-graphite-check'),
url(r'^httpcheck/create/', view=HttpCheckCreateView.as_view(),
name='create-http-check'),
url(r'^httpcheck/update/(?P<pk>\d+)/',
view=HttpCheckUpdateView.as_view(
), name='update-http-check'),
url(r'^httpcheck/duplicate/(?P<pk>\d+)/',
view=duplicate_http_check, name='duplicate-http-check'),
url(r'^jenkins_check/create/', view=JenkinsCheckCreateView.as_view(),
name='create-jenkins-check'),
url(r'^jenkins_check/update/(?P<pk>\d+)/',
view=JenkinsCheckUpdateView.as_view(
), name='update-jenkins-check'),
url(r'^jenkins_check/duplicate/(?P<pk>\d+)/',
view=duplicate_jenkins_check, name='duplicate-jenkins-check'),
url(r'^result/(?P<pk>\d+)/',
view=StatusCheckResultDetailView.as_view(
), name='result'),
url(r'^shifts/', view=ShiftListView.as_view(),
name='shifts'),
url(r'^graphite/', view=graphite_api_data,
name='graphite-data'),
url(r'^user/(?P<pk>\d+)/profile/$',
view=UserProfileUpdateView.as_view(), name='user-profile'),
url(r'^user/(?P<pk>\d+)/profile/(?P<alerttype>.+)',
view=UserProfileUpdateAlert.as_view(
), name='update-alert-user-data'),
url(r'^admin/', include(admin.site.urls)),
# Comment below line to disable browsable rest api
url(r'^api-auth/', include('rest_framework.urls', namespace='rest_framework')),
url(r'^api/', include(rest_urls.router.urls)),
)
def append_plugin_urls():
"""
Appends plugin specific URLs to the urlpatterns variable.
"""
global urlpatterns
for plugin in settings.CABOT_PLUGINS_ENABLED_PARSED:
try:
_module = import_module('%s.urls' % plugin)
except Exception as e:
pass
else:
urlpatterns += patterns('',
url(r'^plugins/%s/' % plugin, include('%s.urls' % plugin))
)
append_plugin_urls()
|
mcansky/cabotapp
|
cabot/urls.py
|
Python
|
mit
| 6,860 | 0.008017 |
"""Testing for ORM"""
from unittest import TestCase
import nose
from nose.tools import eq_
from sets import Set
from mdcorpus.orm import *
class ORMTestCase(TestCase):
def setUp(self):
self.store = Store(create_database("sqlite:"))
self.store.execute(MovieTitlesMetadata.CREATE_SQL)
self.store.execute(MovieCharactersMetadata.CREATE_SQL)
self.store.execute(RawScriptUrl.CREATE_SQL)
self.store.execute(MovieConversation.CREATE_SQL)
self.store.execute(MovieLine.CREATE_SQL)
movie = self.store.add(MovieTitlesMetadata(0,
u"10 things i hate about you",
1999,
6.90,
62847))
bianca = self.store.add(MovieCharactersMetadata(0,
"BIANCA",
"f",
4))
bruce = self.store.add(MovieCharactersMetadata(1,
"BRUCE",
"?",
"?"))
cameron = self.store.add(MovieCharactersMetadata(2,
"CAMERON",
"m",
"3"))
url = self.store.add(RawScriptUrl("http://www.dailyscript.com/scripts/10Things.html"))
conversation = self.store.add(MovieConversation(0, 2, 0))
line194 = self.store.add(MovieLine(
194, "Can we make this quick? Roxanne Korrine and Andrew Barrett are having an incredibly horrendous public break- up on the quad. Again."))
line195 = self.store.add(MovieLine(
195, "Well, I thought we'd start with pronunciation, if that's okay with you."))
line196 = self.store.add(MovieLine(
196, "Not the hacking and gagging and spitting part. Please."))
line197 = self.store.add(MovieLine(
197, "Okay... then how 'bout we try out some French cuisine. Saturday? Night?"))
self.store.flush()
movie.characters.add(bianca)
movie.characters.add(bruce)
movie.characters.add(cameron)
url.movie = movie
line_id_list = [194, 195, 196, 197]
for (i, line_id) in enumerate(line_id_list):
line = self.store.find(MovieLine, MovieLine.id == line_id).one()
line.number = i + 1
conversation.lines.add(line)
self.store.commit()
def tearDown(self):
print "done"
class MovieTitlesMetadataTestCase(ORMTestCase):
@nose.with_setup(ORMTestCase.setUp, ORMTestCase.tearDown)
def test_url(self):
movie = self.store.find(MovieTitlesMetadata, MovieTitlesMetadata.id == 0).one()
eq_(movie.url(), "http://www.dailyscript.com/scripts/10Things.html")
class MovieCharactersMetadataTestCase(ORMTestCase):
@nose.with_setup(ORMTestCase.setUp, ORMTestCase.tearDown)
def test_gender(self):
bianca = self.store.find(MovieCharactersMetadata, MovieCharactersMetadata.id == 0).one()
bruce = self.store.find(MovieCharactersMetadata, MovieCharactersMetadata.id == 1).one()
cameron = self.store.find(MovieCharactersMetadata, MovieCharactersMetadata.id == 2).one()
eq_(bianca.gender(), "f")
eq_(bruce.gender(), "?")
eq_(cameron.gender(), "m")
class MovieConversationTestCase(ORMTestCase):
@nose.with_setup(ORMTestCase.setUp, ORMTestCase.tearDown)
def test_consistency(self):
conversation = self.store.find(MovieConversation, MovieConversation.id == 1).one()
eq_(conversation.first_character.movie.title, conversation.movie.title)
eq_(conversation.second_character.movie.title, conversation.movie.title)
@nose.with_setup(ORMTestCase.setUp, ORMTestCase.tearDown)
def test_line_list(self):
conversation = self.store.find(MovieConversation, MovieConversation.id == 1).one()
line_ids = [line.id for line in conversation.line_list()]
eq_(line_ids, [194, 195, 196, 197])
|
sosuke-k/cornel-movie-dialogs-corpus-storm
|
mdcorpus/tests/test_orm.py
|
Python
|
mit
| 4,351 | 0.002758 |
# Copyright 2020 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
"""Core rules for Pants to operate correctly.
These are always activated and cannot be disabled.
"""
from pants.core.goals import check, fmt, lint, package, publish, repl, run, tailor, test
from pants.core.target_types import (
ArchiveTarget,
FilesGeneratorTarget,
FileTarget,
GenericTarget,
RelocatedFiles,
ResourcesGeneratorTarget,
ResourceTarget,
)
from pants.core.target_types import rules as target_type_rules
from pants.core.util_rules import (
archive,
config_files,
distdir,
external_tool,
filter_empty_sources,
pants_bin,
source_files,
stripped_source_files,
subprocess_environment,
)
from pants.goal import anonymous_telemetry, stats_aggregator
from pants.source import source_root
def rules():
return [
# goals
*check.rules(),
*fmt.rules(),
*lint.rules(),
*package.rules(),
*publish.rules(),
*repl.rules(),
*run.rules(),
*tailor.rules(),
*test.rules(),
# util_rules
*anonymous_telemetry.rules(),
*archive.rules(),
*config_files.rules(),
*distdir.rules(),
*external_tool.rules(),
*filter_empty_sources.rules(),
*pants_bin.rules(),
*source_files.rules(),
*source_root.rules(),
*stats_aggregator.rules(),
*stripped_source_files.rules(),
*subprocess_environment.rules(),
*target_type_rules(),
]
def target_types():
return [
ArchiveTarget,
FileTarget,
FilesGeneratorTarget,
GenericTarget,
ResourceTarget,
ResourcesGeneratorTarget,
RelocatedFiles,
]
|
patricklaw/pants
|
src/python/pants/core/register.py
|
Python
|
apache-2.0
| 1,821 | 0.000549 |
from django.db import models
class MaternalArvPostModManager(models.Manager):
def get_by_natural_key(
self, arv_code, report_datetime, visit_instance, appt_status,
visit_definition_code, subject_identifier_as_pk):
MaternalVisit = models.get_model('mb_maternal', 'MaternalVisit')
MaternalArvPost = models.get_model('mb_maternal', 'MaternalArvPost')
maternal_visit = MaternalVisit.objects.get_by_natural_key(
report_datetime, visit_instance, appt_status, visit_definition_code, subject_identifier_as_pk)
maternal_arv_post = MaternalArvPost.objects.get(maternal_visit=maternal_visit)
return self.get(arv_code=arv_code, maternal_arv_post=maternal_arv_post)
|
botswana-harvard/microbiome
|
microbiome/apps/mb_maternal/managers/maternal_arv_post_mod_manager.py
|
Python
|
gpl-2.0
| 736 | 0.002717 |
#!/usr/bin/env python
"""
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from resource_management import *
import sys
from ambari_commons.os_family_impl import OsFamilyFuncImpl, OsFamilyImpl
from ambari_commons import OSConst
@OsFamilyFuncImpl(os_family=OSConst.WINSRV_FAMILY)
def hcat():
import params
XmlConfig("hive-site.xml",
conf_dir = params.hive_conf_dir,
configurations = params.config['configurations']['hive-site'],
owner=params.hive_user,
configuration_attributes=params.config['configuration_attributes']['hive-site']
)
@OsFamilyFuncImpl(os_family=OsFamilyImpl.DEFAULT)
def hcat():
import params
Directory(params.hive_conf_dir,
create_parents = True,
owner=params.hcat_user,
group=params.user_group,
)
Directory(params.hcat_conf_dir,
create_parents = True,
owner=params.hcat_user,
group=params.user_group,
)
Directory(params.hcat_pid_dir,
owner=params.webhcat_user,
create_parents = True
)
XmlConfig("hive-site.xml",
conf_dir=params.hive_client_conf_dir,
configurations=params.hive_site_config,
configuration_attributes=params.config['configuration_attributes']['hive-site'],
owner=params.hive_user,
group=params.user_group,
mode=0644)
File(format("{hcat_conf_dir}/hcat-env.sh"),
owner=params.hcat_user,
group=params.user_group,
content=InlineTemplate(params.hcat_env_sh_template)
)
|
alexryndin/ambari
|
ambari-server/src/main/resources/stacks/BigInsights/4.2/services/HIVE/package/scripts/hcat.py
|
Python
|
apache-2.0
| 2,284 | 0.011384 |
from django.test import TestCase
from app_forum.models import Forum, Comment
from app_forum.forms import CommentForm, ThreadForm
# test for forms
class CommentFormTest(TestCase):
def test_comment_forms(self):
form_data = {
'comment_content' : 'comment'
}
form = CommentForm(data=form_data)
self.assertTrue(form.is_valid())
class ThreadFormTest(TestCase):
def test_thread_forms(self):
thread_data = {
'forum_title' : 'title',
'forum_category' : 'category',
'forum_content' : 'content'
}
thread = ThreadForm(data=thread_data)
self.assertFalse(thread.is_valid())
|
django-id/website
|
app_forum/tests/test_forms.py
|
Python
|
mit
| 685 | 0.008759 |
#!/usr/bin/env python
# coding: UTF-8
from __future__ import division
import numpy as np
def left_multiplication(g, x):
"""
Multiplication action of a group and a vector.
"""
return np.dot(g, x)
def trans_adjoint(g, x):
return np.dot(np.dot(g,x),g.T)
class RungeKutta(object):
def __init__(self, method):
self.method = method
self.movement = self.method.movement
self.nb_stages = len(self.method.edges) + 1
def compute_vectors(self, movement_field, stages):
"""
Compute the Lie algebra elements for the stages.
"""
return np.array([movement_field(stage) for stage in stages])
def get_iterate(self, movement_field, action):
def evol(stages):
new_stages = stages.copy()
for (i,j, transition) in self.method.edges:
# inefficient as a) only some vectors are needed b) recomputed for each edge
vects = self.compute_vectors(movement_field, new_stages)
# the order of the edges matters; the goal is that explicit method need only one iteration
new_stages[i] = action(self.movement(transition(vects)), new_stages[j])
return new_stages
return evol
@classmethod
def fix(self, iterate, z):
"""
Find a fixed point to the iterating function `iterate`.
"""
for i in range(30):
new_z = iterate(z)
if np.allclose(z, new_z, atol=1e-10, rtol=1e-16):
break
z = new_z
else:
raise Exception("No convergence after {} steps".format(i))
return z, i
def step(self, movement_field, x0, action=None):
if action is None:
action = left_multiplication
iterate = self.get_iterate(movement_field, action)
z0 = np.array([x0]*self.nb_stages) # initial guess
z, i = self.fix(iterate, z0)
return z[-1]
|
olivierverdier/homogint
|
homogint/homogint.py
|
Python
|
mit
| 1,957 | 0.00511 |
from ete3 import Tree,TreeStyle,TextFace
t = Tree('tagfrog.phy')
for node in t.traverse():
node.img_style['size'] = 3
if node.is_leaf():
name_face = TextFace(node.name)
ts = TreeStyle()
ts.show_scale = True
t.render('tagfrog.pdf')
|
taejoonlab/taejoonlab-toolbox
|
PopGen/phy2bmp.py
|
Python
|
gpl-3.0
| 248 | 0.008065 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.