repo_name
stringlengths 5
92
| path
stringlengths 4
232
| copies
stringclasses 19
values | size
stringlengths 4
7
| content
stringlengths 721
1.04M
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 15
997
| alpha_frac
float64 0.25
0.97
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
vinc456/coala
|
coalib/processes/Processing.py
|
1
|
28893
|
import multiprocessing
import os
import platform
import queue
import subprocess
from itertools import chain
from coalib.collecting import Dependencies
from coalib.collecting.Collectors import collect_files
from coalib.misc.StringConverter import StringConverter
from coalib.output.printers.LOG_LEVEL import LOG_LEVEL
from coalib.processes.BearRunning import run
from coalib.processes.CONTROL_ELEMENT import CONTROL_ELEMENT
from coalib.processes.LogPrinterThread import LogPrinterThread
from coalib.results.Result import Result
from coalib.results.result_actions.ApplyPatchAction import ApplyPatchAction
from coalib.results.result_actions.PrintDebugMessageAction import (
PrintDebugMessageAction)
from coalib.results.result_actions.ShowPatchAction import ShowPatchAction
from coalib.results.RESULT_SEVERITY import RESULT_SEVERITY
from coalib.results.SourceRange import SourceRange
from coalib.settings.Setting import glob_list
from coalib.parsing.Globbing import fnmatch
ACTIONS = [ApplyPatchAction,
PrintDebugMessageAction,
ShowPatchAction]
def get_cpu_count():
try:
return multiprocessing.cpu_count()
# cpu_count is not implemented for some CPU architectures/OSes
except NotImplementedError: # pragma: no cover
return 2
def fill_queue(queue_fill, any_list):
"""
Takes element from a list and populates a queue with those elements.
:param queue_fill: The queue to be filled.
:param any_list: List containing the elements.
"""
for elem in any_list:
queue_fill.put(elem)
def get_running_processes(processes):
return sum((1 if process.is_alive() else 0) for process in processes)
def create_process_group(command_array, **kwargs):
if platform.system() == "Windows": # pragma: no cover
proc = subprocess.Popen(
command_array,
creationflags=subprocess.CREATE_NEW_PROCESS_GROUP,
**kwargs)
else:
proc = subprocess.Popen(command_array,
preexec_fn=os.setsid,
**kwargs)
return proc
def get_default_actions(section):
"""
Parses the key ``default_actions`` in the given section.
:param section: The section where to parse from.
:return: A dict with the bearname as keys and their default
actions as values and another dict that contains bears
and invalid action names.
"""
try:
default_actions = dict(section["default_actions"])
except IndexError:
return {}, {}
action_dict = {action.get_metadata().name: action for action in ACTIONS}
invalid_action_set = default_actions.values() - action_dict.keys()
invalid_actions = {}
if len(invalid_action_set) != 0:
invalid_actions = {
bear: action
for bear, action in default_actions.items()
if action in invalid_action_set}
for invalid in invalid_actions.keys():
del default_actions[invalid]
actions = {bearname: action_dict[action_name]
for bearname, action_name in default_actions.items()}
return actions, invalid_actions
def autoapply_actions(results,
file_dict,
file_diff_dict,
section,
log_printer):
"""
Auto-applies actions like defined in the given section.
:param results: A list of results.
:param file_dict: A dictionary containing the name of files and its
contents.
:param file_diff_dict: A dictionary that contains filenames as keys and
diff objects as values.
:param section: The section.
:param log_printer: A log printer instance to log messages on.
:return: A list of unprocessed results.
"""
default_actions, invalid_actions = get_default_actions(section)
for bearname, actionname in invalid_actions.items():
log_printer.warn("Selected default action {!r} for bear {!r} does "
"not exist. Ignoring action.".format(actionname,
bearname))
if len(default_actions) == 0:
# There's nothing to auto-apply.
return results
not_processed_results = []
for result in results:
try:
# Match full bear names deterministically, prioritized!
action = default_actions[result.origin]
except KeyError:
for bear_glob in default_actions:
if fnmatch(result.origin, bear_glob):
action = default_actions[bear_glob]
break
else:
not_processed_results.append(result)
continue
if not action.is_applicable(result, file_dict, file_diff_dict):
log_printer.warn("Selected default action {!r} for bear {!r} is "
"not applicable. Action not applied.".format(
action.get_metadata().name, result.origin))
not_processed_results.append(result)
continue
try:
action().apply_from_section(result,
file_dict,
file_diff_dict,
section)
log_printer.info("Applied {!r} on {} from {!r}.".format(
action.get_metadata().name,
result.location_repr(),
result.origin))
except Exception as ex:
not_processed_results.append(result)
log_printer.log_exception(
"Failed to execute action {!r} with error: {}.".format(
action.get_metadata().name, ex),
ex)
log_printer.debug("-> for result " + repr(result) + ".")
return not_processed_results
def check_result_ignore(result, ignore_ranges):
"""
Determines if the result has to be ignored.
:param result: The result that needs to be checked.
:param ignore_ranges: A list of tuples, each containing a list of lower
cased affected bearnames and a SourceRange to
ignore. If any of the bearname lists is empty, it
is considered an ignore range for all bears.
This may be a list of globbed bear wildcards.
:return: True if the result has to be ignored.
"""
for bears, range in ignore_ranges:
orig = result.origin.lower()
if (result.overlaps(range) and
(len(bears) == 0 or orig in bears or fnmatch(orig, bears))):
return True
return False
def print_result(results,
file_dict,
retval,
print_results,
section,
log_printer,
file_diff_dict,
ignore_ranges):
"""
Takes the results produced by each bear and gives them to the print_results
method to present to the user.
:param results: A list of results.
:param file_dict: A dictionary containing the name of files and its
contents.
:param retval: It is True if no results were yielded ever before.
If it is False this function will return False no
matter what happens. Else it depends on if this
invocation yields results.
:param print_results: A function that prints all given results appropriate
to the output medium.
:param file_diff_dict: A dictionary that contains filenames as keys and
diff objects as values.
:param ignore_ranges: A list of SourceRanges. Results that affect code in
any of those ranges will be ignored.
:return: Returns False if any results were yielded. Else
True.
"""
min_severity_str = str(section.get('min_severity', 'INFO')).upper()
min_severity = RESULT_SEVERITY.str_dict.get(min_severity_str, 'INFO')
results = list(filter(lambda result:
type(result) is Result and
result.severity >= min_severity and
not check_result_ignore(result, ignore_ranges),
results))
if bool(section.get('autoapply', 'true')):
patched_results = autoapply_actions(results,
file_dict,
file_diff_dict,
section,
log_printer)
else:
patched_results = results
print_results(log_printer,
section,
patched_results,
file_dict,
file_diff_dict)
return retval or len(results) > 0, patched_results
def get_file_dict(filename_list, log_printer):
"""
Reads all files into a dictionary.
:param filename_list: List of names of paths to files to get contents of.
:param log_printer: The logger which logs errors.
:return: Reads the content of each file into a dictionary
with filenames as keys.
"""
file_dict = {}
for filename in filename_list:
try:
with open(filename, "r", encoding="utf-8") as _file:
file_dict[filename] = tuple(_file.readlines())
except UnicodeDecodeError:
log_printer.warn("Failed to read file '{}'. It seems to contain "
"non-unicode characters. Leaving it "
"out.".format(filename))
except OSError as exception: # pragma: no cover
log_printer.log_exception("Failed to read file '{}' because of "
"an unknown error. Leaving it "
"out.".format(filename),
exception,
log_level=LOG_LEVEL.WARNING)
log_printer.debug("Files that will be checked:\n" +
"\n".join(file_dict.keys()))
return file_dict
def filter_raising_callables(it, exception, *args, **kwargs):
"""
Filters all callable items inside the given iterator that raise the
given exceptions.
:param it: The iterator to filter.
:param exception: The (tuple of) exception(s) to filter for.
:param args: Positional arguments to pass to the callable.
:param kwargs: Keyword arguments to pass to the callable.
"""
for elem in it:
try:
yield elem(*args, **kwargs)
except exception:
pass
def instantiate_bears(section,
local_bear_list,
global_bear_list,
file_dict,
message_queue):
"""
Instantiates each bear with the arguments it needs.
:param section: The section the bears belong to.
:param local_bear_list: List of local bear classes to instantiate.
:param global_bear_list: List of global bear classes to instantiate.
:param file_dict: Dictionary containing filenames and their
contents.
:param message_queue: Queue responsible to maintain the messages
delivered by the bears.
:return: The local and global bear instance lists.
"""
local_bear_list = [bear
for bear in filter_raising_callables(
local_bear_list,
RuntimeError,
section,
message_queue,
timeout=0.1)]
global_bear_list = [bear
for bear in filter_raising_callables(
global_bear_list,
RuntimeError,
file_dict,
section,
message_queue,
timeout=0.1)]
return local_bear_list, global_bear_list
def instantiate_processes(section,
local_bear_list,
global_bear_list,
job_count,
cache,
log_printer):
"""
Instantiate the number of processes that will run bears which will be
responsible for running bears in a multiprocessing environment.
:param section: The section the bears belong to.
:param local_bear_list: List of local bears belonging to the section.
:param global_bear_list: List of global bears belonging to the section.
:param job_count: Max number of processes to create.
:param cache: An instance of ``misc.Caching.FileCache`` to use as
a file cache buffer.
:param log_printer: The log printer to warn to.
:return: A tuple containing a list of processes,
and the arguments passed to each process which are
the same for each object.
"""
filename_list = collect_files(
glob_list(section.get('files', "")),
log_printer,
ignored_file_paths=glob_list(section.get('ignore', "")),
limit_file_paths=glob_list(section.get('limit_files', "")))
# This stores all matched files irrespective of whether coala is run
# only on changed files or not. Global bears require all the files
complete_filename_list = filename_list
# Start tracking all the files
if cache and section.get('changed_files', False):
cache.track_files(set(complete_filename_list))
changed_files = cache.get_uncached_files(
set(filename_list)) if cache else filename_list
# If caching is enabled then the local bears should process only the
# changed files.
# FIXME: Log this to the debug channel instead.
log_printer.info("coala is run only on changed files, bears' log "
"messages from previous runs may not appear. You may "
"use the `--flush-cache` flag to see them.")
filename_list = changed_files
# Note: the complete file dict is given as the file dict to bears and
# the whole project is accessible to every bear. However, local bears are
# run only for the changed files if caching is enabled.
file_dict = get_file_dict(filename_list, log_printer)
complete_file_dict = get_file_dict(complete_filename_list, log_printer)
manager = multiprocessing.Manager()
global_bear_queue = multiprocessing.Queue()
filename_queue = multiprocessing.Queue()
local_result_dict = manager.dict()
global_result_dict = manager.dict()
message_queue = multiprocessing.Queue()
control_queue = multiprocessing.Queue()
bear_runner_args = {"file_name_queue": filename_queue,
"local_bear_list": local_bear_list,
"global_bear_list": global_bear_list,
"global_bear_queue": global_bear_queue,
"file_dict": file_dict,
"local_result_dict": local_result_dict,
"global_result_dict": global_result_dict,
"message_queue": message_queue,
"control_queue": control_queue,
"timeout": 0.1}
local_bear_list[:], global_bear_list[:] = instantiate_bears(
section,
local_bear_list,
global_bear_list,
complete_file_dict,
message_queue)
fill_queue(filename_queue, file_dict.keys())
fill_queue(global_bear_queue, range(len(global_bear_list)))
return ([multiprocessing.Process(target=run, kwargs=bear_runner_args)
for i in range(job_count)],
bear_runner_args)
def get_ignore_scope(line, keyword):
"""
Retrieves the bears that are to be ignored defined in the given line.
:param line: The line containing the ignore declaration.
:param keyword: The keyword that was found. Everything after the rightmost
occurrence of it will be considered for the scope.
:return: A list of lower cased bearnames or an empty list (-> "all")
"""
toignore = line[line.rfind(keyword) + len(keyword):]
if toignore.startswith("all"):
return []
else:
return list(StringConverter(toignore, list_delimiters=', '))
def yield_ignore_ranges(file_dict):
"""
Yields tuples of affected bears and a SourceRange that shall be ignored for
those.
:param file_dict: The file dictionary.
"""
for filename, file in file_dict.items():
start = None
bears = []
stop_ignoring = False
for line_number, line in enumerate(file, start=1):
# Before lowering all lines ever read, first look for the biggest
# common substring, case sensitive: I*gnor*e, start i*gnor*ing.
if 'gnor' in line:
line = line.lower()
if "start ignoring " in line:
start = line_number
bears = get_ignore_scope(line, "start ignoring ")
elif "stop ignoring" in line:
stop_ignoring = True
if start:
yield (bears,
SourceRange.from_values(
filename,
start,
1,
line_number,
len(file[line_number-1])))
elif "ignore " in line:
yield (get_ignore_scope(line, "ignore "),
SourceRange.from_values(filename,
line_number,
1,
line_number+1,
len(file[line_number])))
if stop_ignoring is False and start is not None:
yield (bears,
SourceRange.from_values(filename,
start,
1,
len(file),
len(file[-1])))
def get_file_list(results):
"""
Get the set of files that are affected in the given results.
:param results: A list of results from which the list of files is to be
extracted.
:return: A set of file paths containing the mentioned list of
files.
"""
return {code.file for result in results for code in result.affected_code}
def process_queues(processes,
control_queue,
local_result_dict,
global_result_dict,
file_dict,
print_results,
section,
cache,
log_printer):
"""
Iterate the control queue and send the results recieved to the print_result
method so that they can be presented to the user.
:param processes: List of processes which can be used to run
Bears.
:param control_queue: Containing control elements that indicate
whether there is a result available and which
bear it belongs to.
:param local_result_dict: Dictionary containing results respective to
local bears. It is modified by the processes
i.e. results are added to it by multiple
processes.
:param global_result_dict: Dictionary containing results respective to
global bears. It is modified by the processes
i.e. results are added to it by multiple
processes.
:param file_dict: Dictionary containing file contents with
filename as keys.
:param print_results: Prints all given results appropriate to the
output medium.
:param cache: An instance of ``misc.Caching.FileCache`` to use
as a file cache buffer.
:return: Return True if all bears execute succesfully and
Results were delivered to the user. Else False.
"""
file_diff_dict = {}
retval = False
# Number of processes working on local/global bears. They are count down
# when the last queue element of that process is processed which may be
# *after* the process has ended!
local_processes = len(processes)
global_processes = len(processes)
global_result_buffer = []
result_files = set()
ignore_ranges = list(yield_ignore_ranges(file_dict))
# One process is the logger thread
while local_processes > 1:
try:
control_elem, index = control_queue.get(timeout=0.1)
if control_elem == CONTROL_ELEMENT.LOCAL_FINISHED:
local_processes -= 1
elif control_elem == CONTROL_ELEMENT.GLOBAL_FINISHED:
global_processes -= 1
elif control_elem == CONTROL_ELEMENT.LOCAL:
assert local_processes != 0
result_files.update(get_file_list(local_result_dict[index]))
retval, res = print_result(local_result_dict[index],
file_dict,
retval,
print_results,
section,
log_printer,
file_diff_dict,
ignore_ranges)
local_result_dict[index] = res
else:
assert control_elem == CONTROL_ELEMENT.GLOBAL
global_result_buffer.append(index)
except queue.Empty:
if get_running_processes(processes) < 2: # pragma: no cover
# Recover silently, those branches are only
# nondeterministically covered.
break
# Flush global result buffer
for elem in global_result_buffer:
result_files.update(get_file_list(global_result_dict[elem]))
retval, res = print_result(global_result_dict[elem],
file_dict,
retval,
print_results,
section,
log_printer,
file_diff_dict,
ignore_ranges)
global_result_dict[elem] = res
# One process is the logger thread
while global_processes > 1:
try:
control_elem, index = control_queue.get(timeout=0.1)
if control_elem == CONTROL_ELEMENT.GLOBAL:
result_files.update(get_file_list(global_result_dict[index]))
retval, res = print_result(global_result_dict[index],
file_dict,
retval,
print_results,
section,
log_printer,
file_diff_dict,
ignore_ranges)
global_result_dict[index] = res
else:
assert control_elem == CONTROL_ELEMENT.GLOBAL_FINISHED
global_processes -= 1
except queue.Empty:
if get_running_processes(processes) < 2: # pragma: no cover
# Recover silently, those branches are only
# nondeterministically covered.
break
if cache:
cache.untrack_files(result_files)
return retval
def simplify_section_result(section_result):
"""
Takes in a section's result from ``execute_section`` and simplifies it
for easy usage in other functions.
:param section_result: The result of a section which was executed.
:return: Tuple containing:
- bool - True if results were yielded
- bool - True if unfixed results were yielded
- list - Results from all bears (local and global)
"""
section_yielded_result = section_result[0]
results_for_section = []
for value in chain(section_result[1].values(),
section_result[2].values()):
if value is None:
continue
for result in value:
results_for_section.append(result)
section_yielded_unfixed_results = len(results_for_section) > 0
return (section_yielded_result,
section_yielded_unfixed_results,
results_for_section)
def execute_section(section,
global_bear_list,
local_bear_list,
print_results,
cache,
log_printer):
"""
Executes the section with the given bears.
The execute_section method does the following things:
1. Prepare a Process
- Load files
- Create queues
2. Spawn up one or more Processes
3. Output results from the Processes
4. Join all processes
:param section: The section to execute.
:param global_bear_list: List of global bears belonging to the section.
:param local_bear_list: List of local bears belonging to the section.
:param print_results: Prints all given results appropriate to the
output medium.
:param cache: An instance of ``misc.Caching.FileCache`` to use as
a file cache buffer.
:param log_printer: The log_printer to warn to.
:return: Tuple containing a bool (True if results were
yielded, False otherwise), a Manager.dict
containing all local results(filenames are key)
and a Manager.dict containing all global bear
results (bear names are key) as well as the
file dictionary.
"""
local_bear_list = Dependencies.resolve(local_bear_list)
global_bear_list = Dependencies.resolve(global_bear_list)
try:
running_processes = int(section['jobs'])
except ValueError:
log_printer.warn("Unable to convert setting 'jobs' into a number. "
"Falling back to CPU count.")
running_processes = get_cpu_count()
except IndexError:
running_processes = get_cpu_count()
processes, arg_dict = instantiate_processes(section,
local_bear_list,
global_bear_list,
running_processes,
cache,
log_printer)
logger_thread = LogPrinterThread(arg_dict["message_queue"],
log_printer)
# Start and join the logger thread along with the processes to run bears
processes.append(logger_thread)
for runner in processes:
runner.start()
try:
return (process_queues(processes,
arg_dict["control_queue"],
arg_dict["local_result_dict"],
arg_dict["global_result_dict"],
arg_dict["file_dict"],
print_results,
section,
cache,
log_printer),
arg_dict["local_result_dict"],
arg_dict["global_result_dict"],
arg_dict["file_dict"])
finally:
logger_thread.running = False
for runner in processes:
runner.join()
|
agpl-3.0
| -7,423,959,529,669,435,000 | 39.924929 | 80 | 0.534559 | false |
KlubbAlfaRomeoNorge/members
|
admin/__init__.py
|
1
|
5300
|
# -*- coding: utf-8 -*-
# -------------------------------------------------------------------------
# Portello membership system
# Copyright (C) 2014 Klubb Alfa Romeo Norge
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# -------------------------------------------------------------------------
""" Common admin tasks. These aren't exposed in any way but can be invoked from
the (remote) console. Most of these things *will* break the app big time.
"""
import logging
from model import Member
from model import Status
from model import MemberType
from model import Country
from model import Car
from model import ConfigTuple
from model import ModelRange
from model import CarModel
from model import User
from model import MembershipDues
from google.appengine.api import search
class Admin(object):
"""Admin tasks and functions"""
def __init__(self):
pass
def purge_data(self):
"""Purge data from store (scary shit)"""
print 'Purging data'
for entity in [Member, Status, MemberType, Country, Car, ConfigTuple,
ModelRange, CarModel, User, MembershipDues]:
print 'Purging', entity.entity_type()
entities = entity.all().fetch(1000)
length = len(entities)
while length > 0:
print '...purging', length, 'entities'
for ent in entities:
ent.delete()
entities = entity.all().fetch(1000)
length = len(entities)
def purge_index(self):
"""Purge the index"""
index = search.Index(name='members')
# looping because get_range by default returns up to 100 documents at a time
while True:
# Get a list of documents populating only the doc_id field and extract the ids.
document_ids = [
document.doc_id for document in index.get_range(ids_only=True)]
if not document_ids:
break
# Delete the documents for the given ids from the Index.
index.delete(document_ids)
def create_index(self):
"""Create a new index"""
members = Member.all().fetch(20000)
docs = []
count = 0
for member in members:
try:
docs.append(member.create_document())
if len(docs) > 100:
index = search.Index(name='members')
index.put(docs)
docs = []
count = count + 1
except Exception as ex:
logging.warning('Got exception ex %s', ex)
if len(docs) > 0:
index = search.Index(name='members')
index.put(docs)
docs = []
logging.info('Reindexed %d members', count)
def rebuild_index(self):
"""Rebuild the index by purge and create a new"""
self.purge_index()
self.create_index()
def nonone(self):
""" Mass update fields set to 'None'. Please don't ask. """
for member in Member.all().fetch(10000):
mod = False
if member.email == 'None':
member.email = None
mod = True
if member.phone == 'None':
member.phone = None
mod = True
if member.phone_work == 'None':
member.phone_work = None
mod = True
if member.phone_home == 'None':
member.phone_home = None
mod = True
if member.address == 'None':
member.address = None
mod = True
if mod:
member.put()
def help(self):
"""Show help"""
print 'purge_data, purge_index, create_index, rebuild_index, index_verification'
def index_verification(self):
"""Verify the index"""
logging.info('Retrieving member list')
member_list = Member.all().fetch(10000)
logging.info('Found %d members', len(member_list))
index = search.Index(name='members')
for member in member_list:
try:
result = index.search(query=search.Query(
'number:' + member.number, options=search.QueryOptions(limit=10)))
if not result.results:
logging.warning(
'Found no entry for member with number %s. Adding to index', member.number)
member.update_index()
except Exception as ex:
logging.warning('Got exception ex %s', ex)
logging.info('Completed verification')
|
gpl-2.0
| -6,283,368,510,747,206,000 | 35.054422 | 99 | 0.562264 | false |
richo/groundstation
|
test/handlers/test_listallobjects_handler.py
|
1
|
2821
|
from support.handler_fixture import StationHandlerTestCase
from groundstation.transfer.request_handlers import handle_listallobjects
from groundstation.transfer.response_handlers import handle_terminate
import groundstation.transfer.response as response
from groundstation.proto.object_list_pb2 import ObjectList
class TestHandlerListAllObjects(StationHandlerTestCase):
def test_handle_listallobjects_returns_stream_for_few_objects(self):
# Make ourselves cached
self.station.station.mark_queried(self.station.origin)
oids = list()
for i in xrange(64):
oids.append(self.station.station.write("test_%i" % (i)))
handle_listallobjects(self.station)
resp = self.station.stream.pop()
self.assertIsInstance(resp, response.Response)
objects = ObjectList()
objects.ParseFromString(resp.payload)
self.assertEqual(len(objects.objectname), len(oids))
for i in objects.objectname:
self.assertIn(i, oids)
def test_follows_up_on_channels(self):
self.station.set_real_terminate(True)
self.station.set_real_id(True)
self.station.set_real_register(True)
handle_listallobjects(self.station)
req1 = self.station.stream.pop(0)
self.assertEqual(req1.verb, "LISTALLOBJECTS")
while self.station.stream:
resp = self.station.stream.pop(0)
if resp.verb == "TERMINATE":
break
self.assertEqual(resp.verb, "DESCRIBEOBJECTS")
self.assertEqual(len(self.station.stream), 0)
resp.stream = self.station.stream
handle_terminate(req1)
req2 = self.station.stream.pop(0)
self.assertEqual(req2.verb, "LISTALLCHANNELS")
class TestHandlerListAllObjectsCached(StationHandlerTestCase):
def test_has_cache(self):
handle_listallobjects(self.station)
req1 = self.station.stream.pop(0)
self.assertEqual(req1.verb, "LISTALLOBJECTS")
while self.station.stream:
resp = self.station.stream.pop()
self.assertEqual(resp.verb, "DESCRIBEOBJECTS")
handle_listallobjects(self.station)
resp = self.station.stream.pop(0)
self.assertIsInstance(resp, response.Response)
class TestHandlerQueuesDeferredRetry(StationHandlerTestCase):
def test_queues_retry(self):
self.station.set_real_terminate(True)
self.station.set_real_id(True)
self.station.set_real_register(True)
self.assertFalse(self.station.station.has_ready_deferreds())
self.assertEqual(len(self.station.station.deferreds), 0)
handle_listallobjects(self.station)
req1 = self.station.stream.pop(0)
handle_terminate(req1)
self.assertEqual(len(self.station.station.deferreds), 1)
|
mit
| 2,283,875,326,875,109,000 | 35.166667 | 73 | 0.684864 | false |
freeflightsim/ffs-app-engine
|
freeflightsim.appspot.com/distlib/tipfy/__init__.py
|
1
|
41328
|
# -*- coding: utf-8 -*-
"""
tipfy
~~~~~
Minimalist WSGI application and utilities for App Engine.
:copyright: 2010 by tipfy.org.
:license: BSD, see LICENSE.txt for more details.
"""
import logging
import os
from wsgiref.handlers import CGIHandler
# Werkzeug swiss knife.
# Need to import werkzeug first otherwise py_zipimport fails.
import werkzeug
from werkzeug import (cached_property, escape, import_string, redirect,
Request as WerkzeugRequest, Response as WerkzeugResponse, url_quote)
from werkzeug.exceptions import (abort, BadGateway, BadRequest, Forbidden,
Gone, HTTPException, InternalServerError, LengthRequired,
MethodNotAllowed, NotAcceptable, NotFound, NotImplemented,
PreconditionFailed, RequestEntityTooLarge, RequestTimeout,
RequestURITooLarge, ServiceUnavailable, Unauthorized,
UnsupportedMediaType)
from werkzeug.routing import (BaseConverter, EndpointPrefix, Map,
RequestRedirect, Rule as WerkzeugRule, RuleFactory, RuleTemplate,
Subdomain, Submount)
try:
# We declare the namespace to be used outside of App Engine, so that
# we can distribute and install separate extensions.
__import__('pkg_resources').declare_namespace(__name__)
except ImportError, e:
pass
__version__ = '0.6.3'
__version_info__ = tuple(int(n) for n in __version__.split('.'))
#: Default configuration values for this module. Keys are:
#:
#: apps_installed
#: A list of active app modules as a string. Default is an empty list.
#:
#: apps_entry_points
#: URL entry points for the installed apps, in case their URLs are mounted
#: using base paths.
#:
#: middleware
#: A list of middleware classes for the WSGIApplication. The classes can
#: be defined as strings. They define hooks that plug into the application
#: to initialize stuff when the app is built, at the start or end of a
#: request or to handle exceptions. Default is an empty list.
#:
#: server_name
#: A server name hint, used to calculate current subdomain.
#: If you plan to use dynamic subdomains, you must define the main domain
#: here so that the subdomain can be extracted and applied to URL rules.
#:
#: subdomain
#: Force this subdomain to be used instead of extracting
#: the subdomain from the current url.
#:
#: dev
#: True is this is the development server, False otherwise.
#: Default is the value of ``os.environ['SERVER_SOFTWARE']``.
#:
#: app_id
#: The application id. Default is the value of
#: ``os.environ['APPLICATION_ID']``.
#:
#: version_id
#: The current deplyment version id. Default is the value
#: of ``os.environ['CURRENT_VERSION_ID']``.
default_config = {
'apps_installed': [],
'apps_entry_points': {},
'middleware': [],
'server_name': None,
'subdomain': None,
'dev': os.environ.get('SERVER_SOFTWARE', '').startswith('Dev'),
'app_id': os.environ.get('APPLICATION_ID', None),
'version_id': os.environ.get('CURRENT_VERSION_ID', '1'),
}
# Allowed request methods.
ALLOWED_METHODS = frozenset(['DELETE', 'GET', 'HEAD', 'OPTIONS', 'POST', 'PUT',
'TRACE'])
# Value used for required values.
REQUIRED_VALUE = object()
# Value used for missing default values.
DEFAULT_VALUE = object()
class RequestHandler(object):
"""Base class to handle requests. Implements the minimal interface
required by :class:`Tipfy`.
The dispatch method implements a middleware system to execute hooks before
and after processing a request and to handle exceptions.
"""
#: A list of middleware classes or callables. A middleware can implement
#: three methods that are called before and after the current request
#: method is executed, or if an exception occurs:
#:
#: pre_dispatch(handler)
#: Called before the requested method is
#: executed. If returns a response, stops the middleware chain and
#: uses that response, not calling the requested method.
#:
#: post_dispatch(handler, response)
#: Called after the requested method is executed. Must always return
#: a response. All *post_dispatch* middleware are always executed.
#:
#: handle_exception(exception, handler)
#: Called if an exception occurs.
middleware = []
def __init__(self, app, request):
"""Initializes the handler.
:param app:
A :class:`Tipfy` instance.
:param request:
A :class:`Request` instance.
"""
self.app = app
self.request = request
def __call__(self, _method, *args, **kwargs):
"""Executes a handler method. This is called by :class:`Tipfy` and
must return a :class:`Response` object.
:param _method:
The method to be dispatched, normally the request method in
lower case, e.g., 'get', 'post', 'head' or 'put'.
:param kwargs:
Keyword arguments from the matched :class:`Rule`.
:return:
A :class:`Response` instance.
"""
method = getattr(self, _method, None)
if method is None:
# 405 Method Not Allowed.
# The response MUST include an Allow header containing a
# list of valid methods for the requested resource.
# http://www.w3.org/Protocols/rfc2616/rfc2616-sec10.html#sec10.4.6
self.abort(405, valid_methods=get_valid_methods(self))
if not self.middleware:
# No middleware is set: just execute the method.
return method(*args, **kwargs)
# Get all middleware for this handler.
middleware = self.app.get_middleware(self, self.middleware)
# Execute pre_dispatch middleware.
for hook in middleware.get('pre_dispatch', []):
response = hook(self)
if response is not None:
break
else:
try:
# Execute the requested method.
response = method(*args, **kwargs)
except Exception, e:
# Execute handle_exception middleware.
for hook in middleware.get('handle_exception', []):
response = hook(e, self)
if response is not None:
break
else:
raise
# Make sure we have a response object.
response = self.app.make_response(self.request, response)
# Execute post_dispatch middleware.
for hook in middleware.get('post_dispatch', []):
response = hook(self, response)
# Done!
return response
def dispatch(self, _method, *args, **kwargs):
"""Deprecated method: a wrapper for :meth:`__call__`."""
return self(_method, *args, **kwargs)
def abort(self, code, *args, **kwargs):
"""Raises an :class:`HTTPException`. This stops code execution,
leaving the HTTP exception to be handled by an exception handler.
:param code:
HTTP status error code (e.g., 404).
:param args:
Positional arguments to be passed to the exception class.
:param kwargs:
Keyword arguments to be passed to the exception class.
"""
abort(code, *args, **kwargs)
def get_config(self, module, key=None, default=REQUIRED_VALUE):
"""Returns a configuration value for a module.
.. seealso:: :meth:`Config.get_or_load`.
"""
return self.app.config.get_or_load(module, key=key, default=default)
def handle_exception(self, exception=None, debug=False):
"""Handles an exception. The default behavior is to re-raise the
exception (no exception handling is implemented).
:param exception:
The exception that was thrown.
:param debug:
True if the exception should be handled in debug mode.
"""
raise
def redirect(self, location, code=302):
"""Issues an HTTP redirect to the given URL. This won't stop
code execution. You must return when calling this method::
return self.redirect('/some-path')
:param location:
An absolute URI.
:param code:
The HTTP status code for the redirect.
:return:
A :class:`Response` object with headers set for redirection.
"""
return redirect(location, code)
def redirect_to(self, _name, _code=302, **kwargs):
"""Convenience method mixing :meth:`redirect` and :methd:`url_for`:
redirects the client to a URL built using a named :class:`Rule`.
:param _name:
The rule name.
:param _code:
The HTTP status code for the redirect.
:param kwargs:
Keyword arguments to build the URL.
:return:
A :class:`Response` object with headers set for redirection.
"""
return self.redirect(self.url_for(_name, **kwargs), code=_code)
def url_for(self, _name, **kwargs):
"""Builds a URL for a named :class:`Rule`.
.. seealso:: :meth:`Request.url_for`.
"""
return self.request.url_for(_name, **kwargs)
class Request(WerkzeugRequest):
"""The :class:`Request` object contains all environment variables for the
current request: GET, POST, FILES, cookies and headers. Additionally
it stores the URL adapter bound to the request and information about the
matched URL rule.
"""
#: URL adapter bound to a request.
url_adapter = None
#: Matched URL rule for a request.
rule = None
#: Keyword arguments from the matched rule.
rule_args = None
#: Exception raised when matching URL rules, if any.
routing_exception = None
def __init__(self, environ):
"""Initializes the request. This also sets a context attribute to
hold variables valid for a single request.
"""
super(Request, self).__init__(environ)
# A registry for objects in use during a request.
self.registry = {}
# A context for template variables.
self.context = {}
def url_for(self, endpoint, _full=False, _method=None, _anchor=None,
**kwargs):
"""Builds and returns a URL for a named :class:`Rule`.
For example, if you have these rules registered in the application:
.. code-block::
Rule('/', endoint='home/main' handler='handlers.MyHomeHandler')
Rule('/wiki', endoint='wiki/start' handler='handlers.WikiHandler')
Here are some examples of how to generate URLs for them:
>>> url = url_for('home/main')
>>> '/'
>>> url = url_for('home/main', _full=True)
>>> 'http://localhost:8080/'
>>> url = url_for('wiki/start')
>>> '/wiki'
>>> url = url_for('wiki/start', _full=True)
>>> 'http://localhost:8080/wiki'
>>> url = url_for('wiki/start', _full=True, _anchor='my-heading')
>>> 'http://localhost:8080/wiki#my-heading'
:param endpoint:
The rule endpoint.
:param _full:
If True, returns an absolute URL. Otherwise, returns a
relative one.
:param _method:
The rule request method, in case there are different rules
for different request methods.
:param _anchor:
An anchor to add to the end of the URL.
:param kwargs:
Keyword arguments to build the URL.
:return:
An absolute or relative URL.
"""
url = self.url_adapter.build(endpoint, force_external=_full,
method=_method, values=kwargs)
if _anchor:
url += '#' + url_quote(_anchor)
return url
class Response(WerkzeugResponse):
"""A response object with default mimetype set to ``text/html``."""
default_mimetype = 'text/html'
class Tipfy(object):
"""The WSGI application which centralizes URL dispatching, configuration
and hooks for an App Rngine app.
"""
#: Default class for requests.
request_class = Request
#: Default class for responses.
response_class = Response
#: The active :class:`Tipfy` instance.
app = None
#: The active :class:`Request` instance.
request = None
def __init__(self, config=None, rules='urls.get_rules', debug=False):
"""Initializes the application.
:param config:
Dictionary with configuration for the application modules.
:param rules:
URL rules definitions for the application. It can be a list of
:class:`Rule`, a callable or a string defining a callable that
returns the rules list. The callable is called passing the WSGI
application as parameter. Default is ``urls.get_rules``: import
``get_rules()`` from *urls.py* and calls it passing the app.
"""
# Set the currently active wsgi app instance.
self.set_wsgi_app()
# Load default config and update with values for this instance.
self.config = Config(config, {'tipfy': default_config}, ['tipfy'])
# Set up a context registry for this app.
self.registry = {}
# Set a shortcut to the development flag.
self.dev = self.config.get('tipfy', 'dev', False)
# Cache for loaded handler classes.
self.handlers = {}
# Middleware factory and registry.
self.middleware_factory = MiddlewareFactory()
# Store the app middleware dict.
self.middleware = self.get_middleware(self, self.config.get('tipfy',
'middleware'))
# Initialize the URL map.
self.url_map = self.get_url_map(rules)
def __call__(self, environ, start_response):
"""Shortcut for :meth:`Tipfy.wsgi_app`."""
return self.wsgi_app(environ, start_response)
def wsgi_app(self, environ, start_response):
"""The actual WSGI application. This is not implemented in
:meth:`Tipfy.__call__` so that middlewares can be applied without
losing a reference to the class. So instead of doing this::
app = MyMiddleware(app)
It's a better idea to do this instead::
app.wsgi_app = MyMiddleware(app.wsgi_app)
Then you still have the original application object around and
can continue to call methods on it.
:param environ:
A WSGI environment.
:param start_response:
A callable accepting a status code, a list of headers and an
optional exception context to start the response.
"""
cleanup = True
try:
# Set the currently active wsgi app and request instances.
request = self.request_class(environ)
self.set_wsgi_app()
self.set_request(request)
# Make sure that the requested method is allowed in App Engine.
if request.method not in ALLOWED_METHODS:
abort(501)
# Match current URL and store routing exceptions if any.
self.match_url(request)
# Run pre_dispatch_handler middleware.
rv = self.pre_dispatch(request)
if rv is None:
# Dispatch the requested handler.
rv = self.dispatch(request)
# Run post_dispatch_handler middleware.
response = self.make_response(request, rv)
response = self.post_dispatch(request, response)
except RequestRedirect, e:
# Execute redirects raised by the routing system or the
# application.
response = e
except Exception, e:
# Handle HTTP and uncaught exceptions.
cleanup = not self.dev
response = self.handle_exception(request, e)
response = self.make_response(request, response)
finally:
# Do not clean request if we are in development mode and an
# exception happened. This allows the debugger to still access
# request and other variables in the interactive shell.
if cleanup:
self.cleanup()
# Call the response object as a WSGI application.
return response(environ, start_response)
def get_url_map(self, rules=None):
"""Returns a ``werkzeug.routing.Map`` instance with initial
:class:`Rule` definitions.
:param rules:
Initial list of :class:`Rule`, a callable or a string defining
a callable that returns the list of rules.
:return:
A ``werkzeug.routing.Map`` instance.
"""
if isinstance(rules, basestring):
try:
rules = import_string(rules)
except (AttributeError, ImportError), e:
logging.warning('Missing %s. No URL rules were loaded.' %
rules)
rules = None
if callable(rules):
try:
rules = rules(self)
except TypeError, e:
# Backwards compatibility:
# Previously get_rules() didn't receive the WSGI app.
rules = rules()
return Map(rules)
def add_url_rule(self, path, endpoint, handler, **kwargs):
"""Adds a rule to the URL map.
:param path:
The URL path.
:param endpoint:
The rule endpoint: an identifier for the rule.
:param handler:
A :class:`RequestHandler` class, or a module and class
specification as a string.
"""
rule = Rule(path, endpoint=endpoint, handler=handler, **kwargs)
self.url_map.add(rule)
def match_url(self, request):
"""Matches registered :class:`Rule` definitions against the request.
This will store the URL adapter, matched rule and rule arguments in
the :class: `Request` instance.
Three exceptions can occur when matching the rules: ``NotFound``,
``MethodNotAllowed`` or ``RequestRedirect``. If they are
raised, they are stored in the request for later use.
:param request:
A :class:`Request` instance.
:return:
None.
"""
# Bind url map to the current request location.
config = self.config.get('tipfy')
request.url_adapter = self.url_map.bind_to_environ(request.environ,
server_name=config.get('server_name'),
subdomain=config.get('subdomain'))
try:
# Match the path against registered rules.
request.rule, request.rule_args = request.url_adapter.match(
return_rule=True)
except HTTPException, e:
request.routing_exception = e
def pre_dispatch(self, request):
"""Executes pre_dispatch_handler middleware. If a middleware returns
anything, the chain is stopped and that value is retirned.
:param request:
A :class:`Request` instance.
:return:
The returned value from a middleware or None.
"""
for hook in self.middleware.get('pre_dispatch_handler', []):
rv = hook()
if rv is not None:
return rv
def dispatch(self, request):
"""Matches the current URL against registered rules and returns the
resut from the :class:`RequestHandler`.
:param request:
A :class:`Request` instance.
:return:
The returned value from a middleware or None.
"""
if request.routing_exception is not None:
raise request.routing_exception
handler = request.rule.handler
if isinstance(handler, basestring):
if handler not in self.handlers:
# Import handler set in matched rule.
self.handlers[handler] = import_string(handler)
handler = self.handlers[handler]
# Instantiate handler and dispatch requested method.
method = request.method.lower().replace('-', '_')
return handler(self, request)(method, **request.rule_args)
def post_dispatch(self, request, response):
"""Executes post_dispatch_handler middleware. All middleware are
executed and must return a response object.
:param request:
A :class:`Request` instance.
:param response:
The :class:`Response` returned from :meth:`Tipfy.pre_dispatch`
or :meth:`Tipfy.dispatch` and converted by
:meth:`Tipfy.make_response`.
:return:
A :class:`Response` instance.
"""
for hook in self.middleware.get('post_dispatch_handler', []):
response = hook(response)
return response
def make_response(self, request, rv):
"""Converts the return value from a handler to a real response
object that is an instance of :class:`Response`.
The following types are allowd for ``rv``:
response_class
The object is returned unchanged.
str
A response object is created with the string as body.
unicode
A response object is created with the string encoded to
utf-8 as body.
tuple
The response object is created with the contents of the
tuple as arguments.
WSGI function
The function is called as WSGI application and
buffered as response object.
This method comes from `Flask <http://flask.pocoo.org/>`_.
:param request:
A :class:`Request` instance.
:param rv:
The return value from the handler.
:return:
A :class:`Response` instance.
"""
if isinstance(rv, self.response_class):
return rv
if isinstance(rv, basestring):
return self.response_class(rv)
if isinstance(rv, tuple):
return self.response_class(*rv)
if rv is None:
raise ValueError('Handler did not return a response.')
return self.response_class.force_type(rv, request.environ)
def handle_exception(self, request, e):
"""Handles HTTPException or uncaught exceptions raised by the WSGI
application, optionally applying exception middleware.
:param request:
A :class:`Request` instance.
:param e:
The catched exception.
:return:
A :class:`Response` instance, if the exception is not raised.
"""
# Execute handle_exception middleware.
for hook in self.middleware.get('handle_exception', []):
response = hook(e)
if response is not None:
return response
if self.dev:
raise
logging.exception(e)
if isinstance(e, HTTPException):
return e
return InternalServerError()
def get_middleware(self, obj, classes):
"""Returns a dictionary of all middleware instance methods for a given
object.
:param obj:
The object to search for related middleware (:class:`Tipfy` or
:class:`RequestHandler` instance).
:param classes:
A list of middleware classes.
:return:
A dictionary with middleware instance methods.
"""
if not classes:
return {}
return self.middleware_factory.get_middleware(obj, classes)
def get_config(self, module, key=None, default=REQUIRED_VALUE):
"""Returns a configuration value for a module.
.. seealso:: :meth:`Config.get_or_load`.
"""
return self.config.get_or_load(module, key=key, default=default)
def set_wsgi_app(self):
"""Sets the currently active :class:`Tipfy` instance."""
Tipfy.app = self
def set_request(self, request):
"""Sets the currently active :class:`Request` instance.
:param request:
The currently active :class:`Request` instance.
"""
Tipfy.request = request
def cleanup(self):
"""Cleans :class:`Tipfy` variables at the end of a request."""
Tipfy.app = Tipfy.request = None
def get_test_client(self):
"""Creates a test client for this application.
:return:
A ``werkzeug.Client``, which is a :class:`Tipfy` wrapped
for tests.
"""
from werkzeug import Client
return Client(self, self.response_class, use_cookies=True)
def run(self):
"""Runs the app using ``CGIHandler``. This must be called inside a
``main()`` function in the file defined in *app.yaml* to run the
application::
# ...
app = Tipfy(rules=[
Rule('/', endpoint='home', handler=HelloWorldHandler),
])
def main():
app.run()
if __name__ == '__main__':
main()
"""
# Fix issue #772.
if self.dev:
fix_sys_path()
CGIHandler().run(self)
class Config(dict):
"""A simple configuration dictionary keyed by module name. This is a
dictionary of dictionaries. It requires all values to be dictionaries
and applies updates and default values to the inner dictionaries instead
of the first level one.
The configuration object is available as a ``config`` attribute of
:class:`Tipfy`. If is instantiated and populated when the app is built::
config = {}
config['my.module'] = {
'foo': 'bar',
}
app = Tipfy(rules=[Rule('/', endpoint='home', handler=MyHandler)], config=config)
Then to read configuration values, use :meth:`RequestHandler.get_config`::
class MyHandler(RequestHandler):
def get(self):
foo = self.get_config('my.module', 'foo')
# ...
"""
#: Loaded module configurations.
loaded = None
def __init__(self, value=None, default=None, loaded=None):
"""Initializes the configuration object.
:param value:
A dictionary of configuration dictionaries for modules.
:param default:
A dictionary of configuration dictionaries for default values.
:param loaded:
A list of modules to be marked as loaded.
"""
self.loaded = loaded or []
if value is not None:
assert isinstance(value, dict)
for module in value.keys():
self.update(module, value[module])
if default is not None:
assert isinstance(default, dict)
for module in default.keys():
self.setdefault(module, default[module])
def __setitem__(self, module, value):
"""Sets a configuration for a module, requiring it to be a dictionary.
:param module:
A module name for the configuration, e.g.: `tipfy.ext.i18n`.
:param value:
A dictionary of configurations for the module.
"""
assert isinstance(value, dict)
super(Config, self).__setitem__(module, value)
def get(self, module, key=None, default=None):
"""Returns a configuration value for given key in a given module.
>>> cfg = Config({'tipfy.ext.i18n': {'locale': 'pt_BR'})
>>> cfg.get('tipfy.ext.i18n')
{'locale': 'pt_BR'}
>>> cfg.get('tipfy.ext.i18n', 'locale')
pt_BR
>>> cfg.get('tipfy.ext.i18n', 'invalid-key')
None
>>> cfg.get('tipfy.ext.i18n', 'invalid-key', 'default-value')
default-value
:param module:
The module to get a configuration from, e.g.: `tipfy.ext.i18n`.
:param key:
The key from the module configuration.
:param default:
A default value to return when the configuration for the given
key is not set. It is only returned if **key** is defined.
:returns:
The configuration value.
"""
if module not in self:
if key is None:
return None
return default
if key is None:
return self[module]
if key not in self[module]:
return default
return self[module][key]
def get_or_load(self, module, key=None, default=REQUIRED_VALUE):
"""Returns a configuration value for a module. If it is not already
set, loads a ``default_config`` variable from the given module,
updates the app configuration with those default values and returns
the value for the given key. If the key is still not available,
returns the provided default value or raises an exception if no
default was provided.
Every module that allows some kind of configuration sets a
``default_config`` global variable that is loaded by this function,
cached and used in case the requested configuration was not defined
by the user.
:param module:
The configured module.
:param key:
The config key.
:param default:
A default value to return in case the configuration for
the module/key is not set.
:returns:
A configuration value.
"""
if module not in self.loaded:
# Load default configuration and update config.
values = import_string(module + '.default_config', silent=True)
if values:
self.setdefault(module, values)
self.loaded.append(module)
value = self.get(module, key, default)
if value is not REQUIRED_VALUE and not (key is None and value is None):
return value
if key is None and value is None:
raise KeyError('Module %s is not configured.' % module)
raise KeyError('Module %s requires the config key "%s" to be '
'set.' % (module, key))
def setdefault(self, module, value):
"""Sets a default configuration dictionary for a module.
>>> cfg = Config({'tipfy.ext.i18n': {'locale': 'pt_BR'})
>>> cfg.get('tipfy.ext.i18n', 'locale')
pt_BR
>>> cfg.get('tipfy.ext.i18n', 'foo')
None
>>> cfg.setdefault('tipfy.ext.i18n', {'locale': 'en_US', 'foo': 'bar'})
>>> cfg.get('tipfy.ext.i18n', 'locale')
pt_BR
>>> cfg.get('tipfy.ext.i18n', 'foo')
bar
:param module:
The module to set default configuration, e.g.: `tipfy.ext.i18n`.
:param value:
A dictionary of configurations for the module.
:returns:
None.
"""
assert isinstance(value, dict)
if module not in self:
self[module] = {}
for key in value.keys():
self[module].setdefault(key, value[key])
def update(self, module, value):
"""Updates the configuration dictionary for a module.
>>> cfg = Config({'tipfy.ext.i18n': {'locale': 'pt_BR'})
>>> cfg.get('tipfy.ext.i18n', 'locale')
pt_BR
>>> cfg.get('tipfy.ext.i18n', 'foo')
None
>>> cfg.update('tipfy.ext.i18n', {'locale': 'en_US', 'foo': 'bar'})
>>> cfg.get('tipfy.ext.i18n', 'locale')
en_US
>>> cfg.get('tipfy.ext.i18n', 'foo')
bar
:param module:
The module to update the configuration, e.g.: `tipfy.ext.i18n`.
:param value:
A dictionary of configurations for the module.
:returns:
None.
"""
assert isinstance(value, dict)
if module not in self:
self[module] = {}
self[module].update(value)
class MiddlewareFactory(object):
"""A factory and registry for middleware instances in use."""
#: All middleware methods to look for.
names = (
'post_make_app',
'pre_dispatch_handler',
'post_dispatch_handler',
'pre_dispatch',
'post_dispatch',
'handle_exception',
)
#: Methods that must run in reverse order.
reverse_names = (
'post_dispatch_handler',
'post_dispatch',
'handle_exception',
)
def __init__(self):
# Instantiated middleware.
self.instances = {}
# Methods from instantiated middleware.
self.methods = {}
# Middleware methods for a given object.
self.obj_middleware = {}
def get_middleware(self, obj, classes):
"""Returns a dictionary of all middleware instance methods for a given
object.
:param obj:
The object to search for related middleware (the :class:`Tipfy` or
:class:`RequestHandler`).
:param classes:
A list of middleware classes.
:return:
A dictionary with middleware instance methods.
"""
id = obj.__module__ + '.' + obj.__class__.__name__
if id not in self.obj_middleware:
self.obj_middleware[id] = self.load_middleware(classes)
return self.obj_middleware[id]
def load_middleware(self, specs):
"""Returns a dictionary of middleware instance methods for a list of
middleware specifications.
:param specs:
A list of middleware classes, classes as strings or instances.
:return:
A dictionary with middleware instance methods.
"""
res = {}
for spec in specs:
# Middleware can be defined in 3 forms: strings, classes and
# instances.
is_str = isinstance(spec, basestring)
is_obj = not is_str and not isinstance(spec, type)
if is_obj:
# Instance.
spec_id = id(spec)
obj = spec
elif is_str:
spec_id = spec
else:
spec_id = spec.__module__ + '.' + spec.__name__
if spec_id not in self.methods:
if is_str:
spec = import_string(spec, silent=True)
if not spec:
logging.warning('Missing %s. Middleware was not '
'loaded.' % spec)
continue
if not is_obj:
obj = spec()
self.instances[spec_id] = obj
self.methods[spec_id] = [getattr(obj, n, None) for n in \
self.names]
for name, method in zip(self.names, self.methods[spec_id]):
if method:
res.setdefault(name, []).append(method)
for name in self.reverse_names:
if name in res:
res[name].reverse()
return res
class Rule(WerkzeugRule):
"""Extends Werkzeug routing to support a handler definition for each Rule.
Handler is a :class:`RequestHandler` module and class specification, and
endpoint is a friendly name used to build URL's. For example:
.. code-block:: python
Rule('/users', endpoint='user-list', handler='my_app:UsersHandler')
Access to the URL ``/users`` loads ``UsersHandler`` class from
``my_app`` module. To generate a URL to that page, use :func:`url_for`::
url = url_for('user-list')
"""
def __init__(self, *args, **kwargs):
self.handler = kwargs.pop('handler', kwargs.get('endpoint', None))
super(Rule, self).__init__(*args, **kwargs)
def empty(self):
"""Returns an unbound copy of this rule. This can be useful if you
want to reuse an already bound URL for another map.
"""
defaults = None
if self.defaults is not None:
defaults = dict(self.defaults)
return Rule(self.rule, defaults, self.subdomain, self.methods,
self.build_only, self.endpoint, self.strict_slashes,
self.redirect_to, handler=self.handler)
class HandlerPrefix(RuleFactory):
"""Prefixes all handler values (which must be strings for this factory) of
nested rules with another string. For example, take these rules::
rules = [
HandlerPrefix('my_app.handlers.', [
Rule('/', endpoint='index', handler='IndexHandler'),
Rule('/entry/<entry_slug>', endpoint='show', handler='ShowHandler'),
]),
]
These are the same as::
rules = [
Rule('/', endpoint='index', handler='my_app.handlers.IndexHandler'),
Rule('/entry/<entry_slug>', endpoint='show', handler='my_app.handlers.ShowHandler'),
]
"""
def __init__(self, prefix, rules):
self.prefix = prefix
self.rules = rules
def get_rules(self, map):
for rulefactory in self.rules:
for rule in rulefactory.get_rules(map):
rule = rule.empty()
rule.handler = self.prefix + rule.handler
yield rule
def get_config(module, key=None, default=REQUIRED_VALUE):
"""Returns a configuration value for a module.
.. seealso:: :meth:`Config.get_or_load`.
"""
return Tipfy.app.config.get_or_load(module, key=key, default=default)
def get_valid_methods(handler):
"""Returns a list of HTTP methods supported by a handler.
:param handler:
A :class:`RequestHandler` instance.
:returns:
A list of HTTP methods supported by the handler.
"""
return [method for method in ALLOWED_METHODS if
getattr(handler, method.lower().replace('-', '_'), None)]
def url_for(endpoint, _full=False, _method=None, _anchor=None, **kwargs):
"""Builds and returns a URL for a named :class:`Rule`.
This is a shortcut to :meth:`Request.url_for`.
"""
# For backwards compatibility, check old keywords.
full = kwargs.pop('full', _full)
method = kwargs.pop('method', _method)
return Tipfy.request.url_for(endpoint, _full=full, _method=method,
_anchor=_anchor, **kwargs)
def redirect_to(endpoint, _method=None, _anchor=None, _code=302, **kwargs):
"""Convenience function mixing ``werkzeug.redirect`` and
:meth:`Request.url_for`: redirects the client to a URL built using a named
:class:`Rule`.
:param endpoint:
The rule endpoint.
:param _method:
The rule request method, in case there are different rules
for different request methods.
:param _anchor:
An anchor to add to the end of the URL.
:param _code:
The redirect status code.
:param kwargs:
Keyword arguments to build the URL.
:return:
A :class:`Response` object with headers set for redirection.
"""
# For backwards compatibility, check old keywords.
method = kwargs.pop('method', _method)
code = kwargs.pop('code', _code)
url = Tipfy.request.url_for(endpoint, _full=True, _method=method,
_anchor=_anchor, **kwargs)
return redirect(url, code=code)
def render_json_response(*args, **kwargs):
"""Renders a JSON response.
:param args:
Arguments to be passed to simplejson.dumps().
:param kwargs:
Keyword arguments to be passed to simplejson.dumps().
:return:
A :class:`Response` object with a JSON string in the body and
mimetype set to ``application/json``.
"""
from django.utils import simplejson
return Response(simplejson.dumps(*args, **kwargs),
mimetype='application/json')
def make_wsgi_app(config=None, **kwargs):
"""Returns a instance of :class:`Tipfy`.
:param config:
A dictionary of configuration values.
:param kwargs:
Additional keyword arguments to instantiate :class:`Tipfy`.
:return:
A :class:`Tipfy` instance.
"""
app = Tipfy(config=config, **kwargs)
if app.dev:
logging.getLogger().setLevel(logging.DEBUG)
# Execute post_make_app middleware.
for hook in app.middleware.get('post_make_app', []):
app = hook(app)
return app
def run_wsgi_app(app):
"""Executes the application, optionally wrapping it by middleware.
.. warning::
This is deprecated. Use app.run() instead.
:param app:
A :class:`Tipfy` instance.
:return:
None.
"""
app.run()
_ULTIMATE_SYS_PATH = None
def fix_sys_path():
"""A fix for issue 772. We must keep this here until it is fixed in the dev
server.
See: http://code.google.com/p/googleappengine/issues/detail?id=772
"""
global _ULTIMATE_SYS_PATH
import sys
if _ULTIMATE_SYS_PATH is None:
_ULTIMATE_SYS_PATH = list(sys.path)
elif sys.path != _ULTIMATE_SYS_PATH:
sys.path[:] = _ULTIMATE_SYS_PATH
__all__ = [
'Config',
'DEFAULT_VALUE',
'EndpointPrefix',
'HTTPException',
'InternalServerError',
'Map',
'REQUIRED_VALUE',
'Request',
'RequestHandler',
'RequestRedirect',
'Response',
'Rule',
'RuleTemplate',
'Subdomain',
'Submount',
'Tipfy',
'abort',
'cached_property',
'default_config',
'escape',
'get_config',
'import_string',
'make_wsgi_app',
'redirect',
'redirect_to',
'render_json_response',
'run_wsgi_app',
'url_for',
'url_quote',
]
|
gpl-2.0
| -5,686,333,359,332,392,000 | 32.356739 | 96 | 0.591754 | false |
tylerwmarrs/single-artist-lyric-analysis
|
lyricanalysis/utils.py
|
1
|
1576
|
import unicodedata
import sys
from nltk.stem import PorterStemmer
def split_sentences(text):
sentences = []
for sentence in text.split('\n'):
sentence = sentence.strip()
if sentence:
sentences.append(sentence)
return sentences
stemmer = PorterStemmer()
def stem_words(words):
return [stemmer.stem(w) for w in words]
punc_tbl = dict.fromkeys(i for i in range(sys.maxunicode)
if unicodedata.category(chr(i)).startswith('P'))
def remove_punctuation(text):
return text.translate(punc_tbl)
def remove_stop_words(stop_words, words):
"""Remove stop words from a list of words."""
wl = []
for word in words:
word = word.lower()
if word not in stop_words:
wl.append(word)
return wl
def song_repetiveness(lyrics, rate=2):
# split song on sentence and find unique sentences
sentences = split_sentences(lyrics)
unique_sentences = set(sentences)
total_sentences = len(sentences)
total_unique_sentences = len(unique_sentences)
# collect frequency of unique sentences and calculate reptetiveness
repetitive_rate = 0
frequency = 0
for usentence in unique_sentences:
for sentence in sentences:
if usentence == sentence:
frequency = frequency + 1
# only calc. reptetiveness rate if frequency rate cutoff is met
if frequency >= rate:
repetitive_rate = repetitive_rate + (frequency / total_sentences)
frequency = 0
return repetitive_rate
|
mit
| -4,943,583,380,357,310 | 25.283333 | 77 | 0.645305 | false |
blueshed/blueshed-py
|
src/blueshed/fling/fling_rpc.py
|
1
|
1225
|
'''
Created on Nov 12, 2013
@author: peterb
'''
import logging
import time
from blueshed.fling.fling_client import FlingClient
from tornado.ioloop import IOLoop
class FlingRPC(FlingClient):
def __init__(self, target, url="ws://localhost:7777/ws", reconnect=1):
FlingClient.__init__(self, url=url)
self._target = target
self._reconnect = reconnect
def on_open(self):
for name in dir(self._target):
if name[0] != "_":
self.subscribe(name)
logging.info("subscribed to %s",name)
def requested(self, name, options=None, **kwargs):
logging.debug("requested %s[%s] %s",name,options,kwargs)
return getattr(self._target,name)(**kwargs)
def on_close(self, error=None):
logging.debug("closed %s %s", self.url, error)
if self._reconnect is not None:
IOLoop.instance().add_timeout(time.time()+self._reconnect, self._connect)
logging.debug("reconnecting...")
@classmethod
def serve(cls, target, url="ws://localhost:7777/ws"):
service = cls(target,url)
IOLoop.instance().start()
return service
|
mit
| 6,750,943,200,508,859,000 | 25.652174 | 85 | 0.585306 | false |
veteman/thepython2blob
|
timei.py
|
1
|
6839
|
#Copyright 2015 B. Johan G. Svensson
#Licensed under the terms of the MIT license (see LICENSE).
def datetest(y,m,d,bc=False):
# datetest(y ,m ,d [,bc])
if (type(y)!=type(1) and type(y)!=type(1L)) or \
(type(m)!=type(1) and type(y)!=type(1L)) or \
(type(d)!=type(1) and type(y)!=type(1L)):
print "Datatype?"
return(-1)
if y != abs(y) or m != abs(m) or d != abs(d):
return(-1)
if m>12 or y==0 or m==0 or d==0:
"**Warning: Month > 12"
return(-1)
if d<29:
return(0) # Year > 0, month 1-12 and day 1-29, always OK.
if bc:
y=y-1
if m==2:
if d==29 and y%4==0 and not(y%100==0 and y%400==0): # Leap day?
return(0) # Yes
else:
print "**Warning: Day 29 and not leap year, or day > 29"
return(-1) # Day 29 and not leap year, or day > 29
if m>7:
m=m-5
if m>2:
m=m-2
if m%2==1 and d<32: # If Jan, Mar, May, Jul, Aug, Oct or Dec and day 29-31
return(0) # then OK
elif d<31: # else if day 29-30
return(0) # also OK
else:
print "**Warning: Day > 31 NOT OK"
return(-1) # Day > 31 NOT OK
return -2
def jday(y,m,d,bc=False):
# jday(y, m, d [, bc])
# Any valid gregorian date any year -inf to inf.
if datetest(y,m,d,bc)<0:
return(-1)
if bc:
y=1-y
if m<3:
y=y-1
m=m+12
m=m-2
jd = y*365 + y//4 - y//100 + y//400 + 28*m + (13*m - 1)//5 + d + 1721089
return jd
def jddate(jd):
# jddate(jd)
# Any day -inf to inf. Generetes a gregorian date
if type(jd)!=type(1) and type(jd)!=type(1L):
print "**Warning: Wrong type!"
return((-1,-1,-1,False))
z = jd - 1721119
h = 100*z-25
a = h//3652425
b = a - a//4
yr = (100*b+h)//36525
c = b+z-365*yr-yr//4
mn = (5*c+456)//153+(5*c+456<0)
dy = c-(153*mn-457)//5+(153*mn-457<0)
if mn>12:
yr = yr + 1
mn = mn - 12
if yr < 1:
return((1-yr,mn,dy,True))
else:
return((yr,mn,dy,False))
def date(y,w,wd):
# jdayfrgweek(y,w,wd)
# Any valid date from 1 Jan, year 1, in the gregorian calender.
if type(y) != type(1) or type(w) != type(1) or type(wd) != type(1):
return((-1,-1,-1))
if y != abs(y):
return((-2,-1,-1))
if wd!=abs(wd) or wd < 1 or wd > 7:
return((-3,-1,-1))
if w!=abs(w) or w < 1 or w > 53:
return((-4,-1,-1))
day = (y-1)*365+(y-1)//4-(y-1)//100+(y-1)//400+1721426
ywd = day%7+1
if w == 53 and ywd !=4:
if ywd !=3 or y%4!=0 or y%100!=0 and y%400!=0:
return((-5,-1,-1))
if ywd > 4:
day = day + 8 - ywd
else:
day = day + 1 - ywd
day = day+w*7-8+wd
return day
def wday(jd):
wd = jd%7+1
z = jd - 1721119
h = 100*z-25
a = h//3652425
b = a - a//4
y = (100*b+h)//36525
day = y*365+y//4-y//100+y//400+1721119
yr = y
c = y//100
y = y%100
tmp1 = jd - day # Days since day before 1 Mar
tmp2 = (2-2*c+y+c//4+y//4)%7 # Weekday on the day before the latest 1 Mar. 0=Mon, 6=Sun
w = (tmp1+6-28+tmp2)//7+12 # Uncorrected weeknumber
###Correction
if (3-2*c+y+c//4+y//4)%7==1: # Latest 1 Mar is a Monday
if y%4==0 and not(y==0 and c%4!=0): # Leap year? If yes, year has 53 weeks
w = w+1
if w > 53:
w = w-53
yr = yr + 1
elif w > 52:
w = w-52
yr = yr + 1
elif (3-2*c+y+c//4+y//4)%7 == 0: # Latest 1 Mar is a Sunday. If yes, year has 53 weeks
if w > 53:
w = w-53
yr = yr + 1
elif w > 52: # Otherwise, year has 52 weeks
w = w-52
yr = yr + 1
if wd == 0:
wd = 7
if yr < 1:
return((1-yr,w,wd,True))
else:
return((yr,w,wd,False))
return (yr,w,wd)
def week(y,m,d):
if datetest(y,m,d)<0:
return(-1)
if m<3:
y=y-1
m=m+12
m=m-2
yr = y
c = y//100
y = y%100
wd = (d+int(2.6*m-0.2)-2*c+y+y//4+c//4)%7
w = ((d+(m-1)*28+int(2.6*m-0.2)-3+((2-2*c+y+c//4+y//4)%7))//7)+9
if (3-2*c+y+c//4+y//4)%7==1:
if y%4==0 and not(y==0 and c%4!=0): # Leap year?
w = w+1
if w > 53:
w = w-53
yr = yr + 1
elif w > 52:
w = w-52
yr = yr + 1
elif (3-2*c+y+c//4+y//4)%7 == 0:
if w > 53:
w = w-53
yr = yr + 1
elif w > 52:
w = w-52
yr = yr + 1
if wd == 0:
wd = 7
return (yr,w,wd)
def easter(yr):
if yr!=abs(int(yr)): return (-1,-1)
#Lilius-Clavius algoritm
a=yr%19+1
b=yr//100+1
c=(3*b)//4-12
d=(8*b+5)//25-5
e=(yr*5)//4-10-c
f=((11*a+20+d-c)%30+30)%30 # Two same modulo???
if f==24 or f==25 and a>11: f=f+1
g=44-f
if g<21: g=g+30
s=g+7-(e+g)%7
emo=3+s//32
edy=1+(s-1)%31
if yr<1583:
emo=-1
edy=-1
return (emo,edy)
def easter2(yr):
if yr!=abs(int(yr)): return (-1,-1)
#www.merlyn.demon.co.uk
aa=yr//100
bb=aa-aa//4
cc=yr%19
dd=(15+19*cc+bb-((aa+1-((aa+8)//25))//3))%30
ee=dd-((cc+11*dd)//319)
s=22+ee+(140004-(yr+(yr//4))%7+bb-ee)%7
emo=3+(s//32)
edy=1+(s-1)%31
return (emo,edy)
#Utils:
def unixtime(y,m,d,h,mi,s,epoch=210866803200):
day=jday(y,m,d)
return int(86400*day+h*3600+mi*60+s-epoch)
def windowstime(y,m,d,h,mi,s):
return unixtime(y,m,d,h,mi,s,epoch=199222329600)
def unixdate(sec,epoch=210866803200):
weekdays=["Mon","Tue","Wed","Thu","Fri","Sat","Sun"]
months=["Jan.","Feb.","Mar.","Apr.","May.","Jun.","Jul.",\
"Aug.","Sep.","Oct.","Nov.","Dec."]
if sec!=int(sec):
return -1
day=int((sec+epoch)//86400)
date=jddate(day)
if date[0]==-1:return -2
sec=int(sec%86400)
hou=sec//3600
sec=sec%3600
mnu=sec//60
sec=sec%60
wee=week(int(date[0]),int(date[1]),int(date[2]))
#[date[0],date[1],date[2],wee[1],wee[2],hou,mnu,sec]
strng = weekdays[wee[2]-1]
strng+=". w."+str(wee[1])+", "+str(date[2])+" "
strng+=months[date[1]-1]
strng+=" "+str(date[0])+" "
if hou<10:strng+="0"
strng+=str(hou)+":"
if mnu<10:strng+="0"
strng+=str(mnu)+":"
if sec<10:strng+="0"
strng+=str(sec)+" UTC"
return strng
def windowsdate(sec):
return unixdate(sec,epoch=199222329600)
|
mit
| -7,946,349,581,877,763,000 | 26.144033 | 100 | 0.446118 | false |
sclooney94/example-avi-gaiadr1-magnitude-distribution
|
views.py
|
1
|
2732
|
"""
GAVIP Example AVIS: Simple AVI
@req: SOW-FUN-010
@req: SOW-FUN-040
@req: SOW-FUN-046
@req: SOW-INT-001
@comp: AVI Web System
This is a simple example AVI which demonstrates usage of the GAVIP AVI framework
Here in views.py, you can define any type of functions to handle
HTTP requests. Any of these functions can be used to create an
AVI query from your AVI interface.
"""
import os
import time
import json
import logging
from django.conf import settings
from django.http import JsonResponse
from django.shortcuts import redirect, get_object_or_404
from django.shortcuts import render
from django.core import serializers
from django.utils import formats
from django.core.exceptions import ObjectDoesNotExist
from django.core.urlresolvers import reverse
from django.views.decorators.http import require_http_methods
from avi.models import DemoModel
from gavip_avi.decorators import require_gavip_role # use this to restrict access to views in an AVI
ROLES = settings.GAVIP_ROLES
logger = logging.getLogger(__name__)
@require_http_methods(["GET"])
def index(request):
"""
This view is the first view that the user sees
We send a dictionary called a context, which contains
'millis' and 'standalone' variables.
"""
context = {
"millis": int(round(time.time() * 1000)),
"show_welcome": request.session.get('show_welcome', True)
}
request.session['show_welcome'] = False
return render(request, 'avi/index.html', context)
@require_http_methods(["POST"])
def run_query(request):
"""
This is called when the user submits their job parameters in
their interface.
We pull the parameters from the request POST parameters.
We create an avi_job_request, which must be used to create
the DemoModel instance, so that the pipeline can excercise
the pipeline correctly.
We attach the job_request instance to th DemoModel; this
extends the AviJob class, which is required for pipeline
processing.
We start the job using the job_request ID, and return the
ID to the user so they can view progress.
"""
outfile = request.POST.get("outfile")
adql_query = request.POST.get("query")
job = DemoModel.objects.create(
query=adql_query,
outputFile=outfile
)
return JsonResponse({})
@require_http_methods(["GET"])
def job_result(request, job_id):
job = get_object_or_404(DemoModel, request_id=job_id)
file_path = os.path.join(settings.OUTPUT_PATH, job.outputFile)
with open(file_path, 'r') as outFile:
# job_data = json.load(outFile)
job_data = outFile.read()
return render(request, 'avi/job_result.html', {'job_id': job_id,
'job_data': job_data})
|
lgpl-3.0
| -8,593,449,538,297,468,000 | 29.032967 | 101 | 0.711933 | false |
g2p/systems
|
lib/systems/context.py
|
1
|
17949
|
# vim: set fileencoding=utf-8 sw=2 ts=2 et :
from __future__ import absolute_import
from __future__ import with_statement
from logging import getLogger
import networkx as NX
import yaml
from systems.collector import Aggregate, CResource
from systems.registry import get_registry
from systems.typesystem import EResource, Transition, ResourceRef
__all__ = ('Realizer', )
LOGGER = getLogger(__name__)
DESC_LIMIT = 64
def describe(thing):
return '%s' % str(thing)[:DESC_LIMIT]
class CycleError(Exception):
pass
class Node(object):
def __init__(self):
if type(self) == Node:
raise TypeError
def __repr__(self):
return '<%s>' % self
def __str__(self):
return type(self).__name__
class CheckPointNode(Node):
pass
class ExpandableNode(Node):
def __init__(self, res):
super(ExpandableNode, self).__init__()
if type(self) == ExpandableNode:
# Abstract class
raise TypeError
self._res = res
class BeforeExpandableNode(ExpandableNode):
def __str__(self):
return 'Before %s' % self._res
class AfterExpandableNode(ExpandableNode):
def __str__(self):
return 'After %s' % self._res
class GraphFirstNode(Node, yaml.YAMLObject):
yaml_tag = u'GraphFirstNode'
class GraphLastNode(Node, yaml.YAMLObject):
yaml_tag = u'GraphLastNode'
node_types = (CheckPointNode, BeforeExpandableNode, AfterExpandableNode,
GraphFirstNode, GraphLastNode,
Transition, Aggregate, CResource, EResource, ResourceRef)
class ResourceGraph(yaml.YAMLObject):
"""
A graph of resources and transitions linked by dependencies.
Resources are positioned as two sentinels in the transition graph.
Invariant: directed, acyclic.
"""
def __init__(self, top=None):
self._graph = NX.DiGraph()
self._first = GraphFirstNode()
self._last = GraphLastNode()
self._graph.add_edge(self._first, self._last)
# Contains CResource and EResource, despite the name.
# Used to enforce max one resource per id.
self.__expandables = {}
# Received references, by name.
self.__received_refs = {}
# What nodes were processed (meaning expanding or collecting)
self.__processed = set()
# Pre-bound args pased by ref. Allow putting extra depends on them.
if top is not None:
if not isinstance(top, ResourceGraph):
raise TypeError(top, ResourceGraph)
self.__top = top
else:
self.__top = self
yaml_tag = u'!ResourceGraph'
@classmethod
def from_yaml(cls, loader, ynode):
rg = cls()
# Deep because of aliases and anchors, I think.
mp = loader.construct_mapping(ynode, deep=True)
pred_rels = mp['nodes']
for rel in pred_rels:
rg._add_node(rel['node'], depends=rel['depends'])
return rg
@classmethod
def to_yaml(cls, dumper, rg):
# This is incomplete.
pred_rels = [{'node': node, 'depends': list(depends), }
for (node, depends) in rg._iter_pred_rels()]
return dumper.represent_mapping(cls.yaml_tag, {
'nodes': pred_rels,
})
def _iter_node_preds(self, node0):
return (node
for node in self._graph.predecessors_iter(node0)
if node not in (self._first, self._last))
def _iter_pred_rels(self):
return ((node, self._iter_node_preds(node))
for node in self.sorted_nodes()
if node not in (self._first, self._last))
def sorted_nodes(self):
return NX.topological_sort(self._graph)
def sorted_transitions(self):
return [n for n in self.sorted_nodes()
if isinstance(n, Transition)]
def iter_uncollected_resources(self):
for nod in self._graph.nodes_iter():
if isinstance(nod, CResource):
if not nod in self.__processed:
yield nod
def iter_unexpanded_resources(self):
for nod in self._graph.nodes_iter():
if isinstance(nod, EResource):
if not nod in self.__processed:
yield nod
def iter_unexpanded_aggregates(self):
for agg in self._graph.nodes_iter():
if isinstance(agg, Aggregate):
if not agg in self.__processed:
yield agg
def iter_unprocessed(self):
for nod in self.iter_uncollected_resources():
yield nod
for nod in self.iter_unexpanded_resources():
yield nod
for nod in self.iter_unexpanded_aggregates():
yield nod
def has_unprocessed(self):
l = list(self.iter_unprocessed())
return bool(l) # Tests for non-emptiness
def require_acyclic(self):
if not NX.is_directed_acyclic_graph(self._graph):
# XXX NX doesn't have a 1-line method for listing those cycles
raise CycleError
def _add_node(self, node, depends=()):
if not isinstance(node, node_types):
raise TypeError(node, node_types)
self._graph.add_node(node)
self._graph.add_edge(self._first, node)
self._graph.add_edge(node, self._last)
for dep in depends:
depn = self._intern(dep)
self._add_node_dep(depn, node)
return node
def add_checkpoint(self, depends=()):
return self._add_node(CheckPointNode(), depends)
def add_transition(self, transition, depends=()):
if not isinstance(transition, Transition):
raise TypeError(transition, Transition)
return self._add_node(transition, depends)
def _add_aggregate(self, aggregate, depends=()):
if not isinstance(aggregate, Aggregate):
raise TypeError(aggregate, Aggregate)
return self._add_node(aggregate, depends)
def add_resource(self, resource, depends=()):
"""
Add a resource.
If an identical resource exists, it is returned.
"""
if not isinstance(resource, (CResource, EResource)):
raise TypeError(resource, (CResource, EResource))
if resource.identity in self.__expandables:
# We have this id already.
# Either it's the exact same resource, or a KeyError is thrown.
resource = self._intern(resource)
# XXX Need to bypass _intern for already expanded.
# XXX When we use add_to_top, we sometimes have to deal
# with a resource that's already been expanded.
# Those are not in the graph anymore. How do we refer to them?
else:
self.__expandables[resource.identity] = resource
# Even if already there, we need to add the depends.
resource = self._add_node(resource, depends)
# If already there, notice we aliase it.
return self.make_ref(resource)
def make_ref(self, res, depends=()):
res = self._intern(res)
if not isinstance(res, (CResource, EResource)):
raise TypeError(res, (CResource, EResource))
depends = list(depends)
depends.append(res)
return self._add_node(ResourceRef(res), depends)
def make_alias_ref(self, ref, depends=()):
ref = self._intern(ref)
if not isinstance(ref, ResourceRef):
raise TypeError(ref, ResourceRef)
depends = list(depends)
depends.append(ref)
return self._add_node(ResourceRef(ref.unref), depends)
def add_to_top(self, res):
"""
Add a resource to the top ResourceGraph.
Use it to put things that you don't necessarily
want to be after the outside dependencies the current graph has.
"""
ref = self.__top.add_resource(res)
return self._add_node(ref)
def _add_node_dep(self, node0, node1):
if not isinstance(node0, node_types):
raise TypeError(node0, node_types)
if not isinstance(node1, node_types):
raise TypeError(node1, node_types)
if not self._graph.has_node(node0):
raise KeyError(node0)
if not self._graph.has_node(node1):
raise KeyError(node1)
if self._graph.has_edge(node0, node1):
return False
if node0 == node1:
# Disallow self-loops to keep acyclic invariant.
# Also they don't make sense.
raise ValueError(node0)
# Invariant check
rev_path = NX.shortest_path(self._graph, node1, node0)
if rev_path is not False:
raise CycleError(rev_path)
self._graph.add_edge(node0, node1)
return True
def _intern(self, thing):
if not isinstance(thing, node_types):
raise TypeError
if thing not in self._graph:
raise KeyError(thing)
return thing
def add_dependency(self, elem0, elem1):
node0 = self._intern(elem0)
node1 = self._intern(elem1)
return self._add_node_dep(node0, node1)
def _is_direct_rconnect(self, r0, r1):
s0 = self._intern(r0)
s1 = self._intern(r1)
# shortest_path is also a test for connectedness.
return bool(NX.shortest_path(self._graph, s0, s1))
def resources_connected(self, r0, r1):
return self._is_direct_rconnect(r0, r1) \
or self._is_direct_rconnect(r1, r0)
def draw(self, fname):
return self.draw_agraph(fname)
def draw_agraph(self, fname):
# XXX pygraphviz has steep dependencies (x11 libs)
# and recommends (texlive) for a headless box.
# We duplicate the graph, otherwise networkx / pygraphviz
# would make a lossy conversion (sometimes refusing to convert), by adding
# nodes as their string representation. Madness, I know.
gr2 = NX.create_empty_copy(self._graph, False)
for node in self._graph.nodes_iter():
gr2.add_node(id(node))
for (n0, n1) in self._graph.edges_iter():
gr2.add_edge(id(n0), id(n1))
names = dict((id(node), { 'label': describe(node)})
for node in self._graph.nodes_iter())
gr2.delete_node(id(self._first))
gr2.delete_node(id(self._last))
g = NX.to_agraph(gr2, {
'graph': {
'nodesep': '0.2',
'rankdir': 'TB',
'ranksep': '0.5',
},
'node': {
'shape': 'box',
},
},
names)
g.write(fname + '.dot')
# Dot is good for DAGs.
g.layout(prog='dot')
g.draw(fname + '.svg')
with open(fname + '.yaml', 'w') as f:
yaml.dump(self, f)
# Fails with the expanded graph, due to instancemethod
#yaml.load(yaml.dump(self))
def draw_matplotlib(self, fname):
# Pyplot is stateful and awkward to use.
import matplotlib.pyplot as P
# Disable hold or it definitely won't work (probably a bug).
P.hold(False)
NX.draw(self._graph)
P.savefig(fname)
def collect_resources(self, r0s, r1):
"""
Replace an iterable of resources with one new resource.
May break the acyclic invariant, caveat emptor.
"""
# The invariant is kept iff the r0s don't have paths linking them.
# For our use case (collectors), we could allow paths provided they are
# internal to r0s. This introduces self-loops that we would then remove.
for r0 in r0s:
r0 = self._intern(r0)
if r0 in self.__processed:
raise RuntimeError
if r1 in self._graph:
raise ValueError(r1)
r1 = self._add_aggregate(r1)
for r0 in r0s:
r0 = self._intern(r0)
self._move_edges(r0, r1)
self.__processed.add(r0)
self.require_acyclic()
def _move_edges(self, n0, n1):
if n0 == n1:
raise RuntimeError
n0 = self._intern(n0)
n1 = self._intern(n1)
# list is used as a temporary
# add after delete in case of same.
for pred in list(self._graph.predecessors_iter(n0)):
self._graph.delete_edge(pred, n0)
self._graph.add_edge(pred, n1)
for succ in list(self._graph.successors_iter(n0)):
self._graph.delete_edge(n0, succ)
self._graph.add_edge(n1, succ)
self._graph.delete_node(n0)
# Can't undo. Invariant will stay broken.
def _split_node(self, res):
res = self._intern(res)
before = self._add_node(BeforeExpandableNode(res))
after = self._add_node(AfterExpandableNode(res))
self._graph.add_edge(before, after)
for pred in list(self._graph.predecessors_iter(res)):
self._graph.delete_edge(pred, res)
self._graph.add_edge(pred, before)
for succ in list(self._graph.successors_iter(res)):
self._graph.delete_edge(res, succ)
self._graph.add_edge(after, succ)
self._graph.delete_node(res)
return before, after
def _receive_by_ref(self, name, ref):
if name in self.__received_refs:
raise RuntimeError(name, ref)
ref = self._add_node(ref)
self.__received_refs[name] = ref
return ref
def _pass_by_ref(self, subgraph, name, ref):
# The origin/value distinction is important
# for aliased arguments (two refs, same val).
ref = self._intern(ref)
if not isinstance(ref, ResourceRef):
raise TypeError(ref, ResourceRef)
subgraph._receive_by_ref(name, ref)
def expand_resource(self, res):
"""
Replace res by a small resource graph.
The resource_graph is inserted in the main graph
between the sentinels that represent the resource.
"""
res = self._intern(res)
# We're processing from the outside in.
if res in self.__processed:
raise RuntimeError
resource_graph = ResourceGraph(self.__top)
if isinstance(res, EResource):
for (name, ref) in res.iter_passed_by_ref():
# ref will be present in both graphs.
self._pass_by_ref(resource_graph, name, ref)
elif isinstance(res, Aggregate):
pass
else:
raise TypeError(res)
res.expand_into(resource_graph)
# We expand from the outside in
if bool(resource_graph.__processed):
raise RuntimeError
# Do not skip sentinels.
for n in resource_graph._graph.nodes_iter():
self._add_node(n)
for (n0, n1) in resource_graph._graph.edges_iter():
self._add_node_dep(n0, n1)
for (id1, res1) in resource_graph.__expandables.iteritems():
# We expand from the outside in.
assert res1 not in self.__processed
if id1 in self.__expandables:
# Pass by reference if you must use the same resource
# in different contexts.
raise RuntimeError('ResourceBase collision.', res, res1)
else:
self.__expandables[id1] = res1
before, after = self._split_node(res)
self.__processed.add(res)
self._move_edges(resource_graph._first, before)
self._move_edges(resource_graph._last, after)
# What may break the invariant:
# Passing a ref to res, and making res depend on ref.
# ref ends up on both sides of ref.before.
self.require_acyclic()
class Realizer(object):
"""
A graph of realizables linked by dependencies.
"""
def __init__(self, expandable):
self.__resources = ResourceGraph()
self.__expandable = expandable
self.__state = 'init'
def require_state(self, state):
"""
Raise an exception if we are not in the required state.
"""
if self.__state != state:
raise RuntimeError(u'Realizer state should be «%s»' % state)
def ensure_frozen(self):
"""
Build the finished dependency graph.
Merge identical realizables, collect what can be.
"""
if self.__state == 'frozen':
return
# Order is important
self.require_state('init')
self.__expandable.expand_into(self.__resources)
#self.__resources.draw('/tmp/freezing')
self._expand()
#self.__resources.draw('/tmp/pre-collect')
self._collect()
self._expand_aggregates()
assert not bool(list(self.__resources.iter_unprocessed()))
self.__state = 'frozen'
#self.__resources.draw('/tmp/frozen')
def _collect(self):
# Collects compatible nodes into merged nodes.
def can_merge(part0, part1):
for n0 in part0:
for n1 in part1:
if self.__resources.resources_connected(n0, n1):
return False
return True
def possibly_merge(partition):
# Merge once if possible. Return true if did merge.
e = dict(enumerate(partition))
n = len(partition)
# Loop over the triangle of unordered pairs
for i in xrange(n):
for j in xrange(i + 1, n):
part0, part1 = e[i], e[j]
if can_merge(part0, part1):
partition.add(part0.union(part1))
partition.remove(part0)
partition.remove(part1)
return True
return False
reg = get_registry()
for collector in reg.collectors:
# Pre-partition is made of parts acceptable for the collector.
pre_partition = collector.partition(
[r for r in self.__resources.iter_uncollected_resources()
if collector.filter(r)])
for part in pre_partition:
# Collector parts are split again, the sub-parts are merged
# when dependencies allow.
# Not a particularly efficient algorithm, just simple.
# Gives one solution among many possibilities.
partition = set(frozenset((r, ))
for r in part
for part in pre_partition)
while possibly_merge(partition):
pass
# Let the collector handle the rest
for part in partition:
if not bool(part):
# Test for emptiness.
# Aggregate even singletons.
continue
merged = collector.collect(part)
self.__resources.collect_resources(part, merged)
assert not bool(list(self.__resources.iter_uncollected_resources()))
def _expand(self):
# Poor man's recursion
while True:
fresh = set(r
for r in self.__resources.iter_unexpanded_resources())
if bool(fresh) == False: # Test for emptiness
break
for r in fresh:
self.__resources.expand_resource(r)
assert not bool(list(self.__resources.iter_unexpanded_resources()))
def _expand_aggregates(self):
for a in list(self.__resources.iter_unexpanded_aggregates()):
self.__resources.expand_resource(a)
assert not bool(list(self.__resources.iter_unexpanded_aggregates()))
# Enforce the rule that aggregates can only expand into transitions.
if self.__resources.has_unprocessed():
raise RuntimeError(list(self.__resources.iter_unprocessed()))
def realize(self):
"""
Realize all realizables and transitions in dependency order.
"""
self.ensure_frozen()
for t in self.__resources.sorted_transitions():
t.realize()
self.__state = 'realized'
|
gpl-2.0
| -7,044,774,306,404,940,000 | 29.783877 | 78 | 0.645902 | false |
paour/weblate
|
weblate/trans/models/__init__.py
|
1
|
1926
|
# -*- coding: utf-8 -*-
#
# Copyright © 2012 - 2014 Michal Čihař <michal@cihar.com>
#
# This file is part of Weblate <http://weblate.org/>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import os
import shutil
from django.db.models.signals import post_delete
from django.dispatch import receiver
from weblate.trans.models.project import Project
from weblate.trans.models.subproject import SubProject
from weblate.trans.models.translation import Translation
from weblate.trans.models.unit import Unit
from weblate.trans.models.unitdata import (
Check, Suggestion, Comment, Vote
)
from weblate.trans.models.search import IndexUpdate
from weblate.trans.models.changes import Change
from weblate.trans.models.dictionary import Dictionary
from weblate.trans.models.source import Source
from weblate.trans.models.advertisement import Advertisement
@receiver(post_delete, sender=Project)
@receiver(post_delete, sender=SubProject)
def delete_object_dir(sender, instance, **kwargs):
'''
Handler to delete (sub)project directory on project deletion.
'''
# Do not delete linked subprojects
if hasattr(instance, 'is_repo_link') and instance.is_repo_link:
return
project_path = instance.get_path()
# Remove path if it exists
if os.path.exists(project_path):
shutil.rmtree(project_path)
|
gpl-3.0
| -3,836,975,499,630,569,000 | 33.963636 | 71 | 0.76131 | false |
ErwinRieger/ddprint
|
host/intmath.py
|
1
|
5070
|
# -*- coding: utf-8 -*-
#
#/*
# This file is part of ddprint - a 3D printer firmware.
#
# Copyright 2021 erwin.rieger@ibrieger.de
#
# ddprint is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ddprint is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with ddprint. If not, see <http://www.gnu.org/licenses/>.
#*/
import math
import packedvalue
from ddprintconstants import TempScale, PidPrecision, PidFrequency
####################################################################################################
# Convert temperature (int) to firmware temperature (fractions of °C)
def toFWTemp(t):
return t * TempScale
####################################################################################################
# Convert firmware temperature (fractions of °C) to temperature (float)
def fromFWTemp(t):
return float(t) / TempScale
####################################################################################################
# Compute scaling factor and shiftvalue for PID Kp
def pidScaleKp(kP):
#
# pTerm = Kp * e
#
kPMult = kP / TempScale
shiftBits = int( PidPrecision - math.floor(math.log(kPMult, 2)))
kPScale = pow(2, shiftBits)
kPScaled = int(kPMult * kPScale + 0.5)
print "kP: %.4f, kPMult: %f, shiftBits: %d, kPScaled: %d" % (kP, kPMult, shiftBits, kPScaled)
return packedvalue.scaledint_t(kPScaled, shiftBits)
####################################################################################################
# Compute scaling factor and shiftvalue for PID Ki
def pidScaleKi(kI):
kiPrecision = 8
#
# iTerm = Ki * pid_dt * eSum
#
kIMult = kI / (TempScale * PidFrequency)
maxEsum16 = int(255 / kIMult) # Limit of eSum
maxScale = (pow(2, 31)-1) / (kIMult * maxEsum16) # Max scaling factor to avoid overflow
maxBits = math.log(maxScale, 2)
shiftBits = int( kiPrecision - math.floor(math.log(kIMult, 2)))
kIScale = pow(2, shiftBits)
kIScaled = int(kIMult * kIScale + 0.5)
print "kI: %.4f, kIMult: %f, shiftBits: %d(of %d), kIScaled: %d, maxEsum16: %d" % (kI, kIMult, shiftBits, maxBits, kIScaled, maxEsum16)
assert(shiftBits <= maxBits)
return (packedvalue.scaledint_t(kIScaled, shiftBits), maxEsum16)
####################################################################################################
# Compute scaling factor and shiftvalue for PID Kd
def pidScaleKd(kD):
#
# dTerm = Kd * (e - eAlt) / pid_dt;
#
kDMult = (kD * PidFrequency) / TempScale
maxDeltaE = 500.0 * TempScale # Switch from 0 to 500°C
maxScale = (pow(2, 31)-1) / (kDMult * maxDeltaE) # Max scaling factor to avoid overflow
maxBits = math.log(maxScale, 2)
shiftBits = int( PidPrecision - math.floor(math.log(kDMult, 2)))
assert(shiftBits <= maxBits)
kDScale = pow(2, shiftBits)
kDScaled = int(kDMult * kDScale + 0.5)
print "kD: %.4f, kDMult: %f, shiftBits: %d(of %d), kDScaled: %d" % (kD, kDMult, shiftBits, maxBits, kDScaled)
return packedvalue.scaledint_t(kDScaled, shiftBits)
####################################################################################################
def pidSwitch(kiOld, kiNew):
swMult = kiOld / kiNew
shiftBits = int(PidPrecision - math.floor(math.log(swMult, 2)))
swScale = pow(2, shiftBits)
swScaled = int(swMult * swScale + 0.5)
print "swMult: %.4f, shiftBits: %d, swScaled: %d" % (swMult, shiftBits, swScaled)
return packedvalue.scaledint_t(swScaled, shiftBits)
####################################################################################################
def fsCalibration(fsc):
fsScale = 32 # Hardcoded in firmware
FSPrecision = 8
fsc32 = fsc * fsScale
shiftBits = int(FSPrecision - math.floor(math.log(fsc32, 2)))
Scale = pow(2, shiftBits)
Scaled = int(fsc32 * Scale + 0.5)
print "fsc32: %.4f, shiftBits: %d, Scaled: %d" % (fsc32, shiftBits, Scaled)
return packedvalue.scaledint16_t(Scaled, shiftBits)
####################################################################################################
def eTimer(eTimer):
fsScale = 1024.0 # Hardcoded in firmware
FSPrecision = 10
et = fsScale / eTimer
shiftBits = int(FSPrecision - math.floor(math.log(et, 2)))
Scale = pow(2, shiftBits)
Scaled = int(et * Scale + 0.5)
# print "eTimer: %d, fscaled: %f, shiftBits: %d, Scaled: %d" % (eTimer, et, shiftBits, Scaled)
return packedvalue.scaledint_t(Scaled, shiftBits)
####################################################################################################
|
gpl-2.0
| -6,284,402,136,257,748,000 | 27.627119 | 139 | 0.546675 | false |
grezesf/Research
|
Reservoirs/Task0_Replication/code/preprocessing/feat_extract.py
|
1
|
2574
|
#!/usr/bin/python
import os
import sys
import datetime
# README
# reads the TIMIT dataset, copies the directory structure
# performs a mffc extraction on the audio files
# input:
# TIMITPath: path to the base of the dataset directory
# SMILExtractPath: path to SMILExtract executable
# ConfPath: path to the SMILExtract configuration file
# targetDir: path the the directory in which the mffcs will be saved
# no output
def main():
# path to corpus
TIMITPath = os.path.abspath(os.path.normpath(sys.argv[1]))
print TIMITPath
if "TIMIT" not in TIMITPath:
print "TIMIT not in path, exiting."
return None
# path to necessary files
SMILExtractPath = os.path.abspath(os.path.normpath(sys.argv[2]))
print SMILExtractPath
# conf file spath
ConfPath = os.path.abspath(os.path.normpath(sys.argv[3]))
# path to save target
targetDir = os.path.abspath(os.path.normpath(sys.argv[4]))
print targetDir
# create it if it doesnt exist
if not os.path.exists(targetDir):
print "Creating target directory"
os.makedirs(targetDir)
# create dataset description
if not os.path.exists(targetDir + "/" + "description.txt"):
f = open(targetDir + "/" + "description.txt", 'w')
f.write("This directory contains the mffc extractions from the audio files of the TIMIT dataset.\n")
f.write("This directory was created by /code/preprocessing/feat_extract.py on " + str(datetime.date.today()) + "\n")
f.close()
# walk the directories
for (path, dirs, files) in os.walk(TIMITPath):
for file in files:
if ".wav" in file:
print "working on: " + file
print "from path : " + path
# create copy of TIMIT directory structure
ind = path.split("/").index("TIMIT")
newDir = "/".join(path.split('/')[ind+1:])
# print targetDir + "/" + newDir
if not os.path.exists(targetDir + "/" + newDir):
print "creating sub-directory"
os.makedirs(targetDir + "/" + newDir )
# perform MFCC extraction on current wav file
# 25ms samples every 10ms (Hamming window)
# 12 mffc feats and 1 energy, plus deltas and accel
base = file[:-4]
command = SMILExtractPath + " -C " + ConfPath + " -I " + path + "/" + file + " -O " + targetDir + "/" + newDir + "/" + base + ".mfcc.csv"
# print command
os.system(command)
# test command
# file = "/home/felix/reservoirs/datasets/TIMIT/test/dr1/faks0/sa1.wav"
# command = SMILExtractPath + " -C " + ConfPath + " -I " + file + " -O " + targetDir + "sa1" + ".mfcc.csv"
# os.system(command)
# don't return anything
return None
# Call to main
if __name__=='__main__':
main()
|
mit
| 3,095,255,279,587,556,400 | 31.582278 | 141 | 0.674825 | false |
aitjcize/PyTox
|
setup.py
|
1
|
1268
|
from distutils.core import setup, Extension
from subprocess import Popen, PIPE
def supports_av():
h = Popen("ld $LDFLAGS -ltoxav", shell=True, stderr=PIPE)
out, err = h.communicate()
return 'toxav' not in str(err)
sources = ["pytox/pytox.c", "pytox/core.c", "pytox/util.c"]
libraries = [
"opus",
"sodium",
"toxcore",
# "toxcrypto",
# "toxdht",
"toxdns",
"toxencryptsave",
# "toxfriends",
# "toxgroup",
# "toxmessenger",
# "toxnetcrypto",
# "toxnetwork",
"vpx",
]
cflags = [
"-Wall",
# "-Werror",
"-Wextra",
"-Wno-declaration-after-statement",
"-Wno-missing-field-initializers",
"-Wno-unused-parameter",
"-fno-strict-aliasing",
]
if supports_av():
libraries.append("toxav")
sources.append("pytox/av.c")
cflags.append("-DENABLE_AV")
else:
print("Warning: AV support not found, disabled.")
setup(
name="PyTox",
version="0.0.23",
description='Python binding for Tox the skype replacement',
author='Wei-Ning Huang (AZ)',
author_email='aitjcize@gmail.com',
url='http://github.com/aitjcize/PyTox',
license='GPL',
ext_modules=[
Extension(
"pytox",
sources,
extra_compile_args=cflags,
libraries=libraries
)
]
)
|
gpl-3.0
| -2,171,528,531,317,965,000 | 20.491525 | 63 | 0.608044 | false |
WaveBlocks/WaveBlocks
|
src/WaveBlocks/TimeManager.py
|
1
|
7218
|
"""The WaveBlocks Project
Provides several computation routines for
handling time and timesteps.
@author: R. Bourquin
@copyright: Copyright (C) 2010, 2011 R. Bourquin
@license: Modified BSD License
"""
from scipy import floor
class TimeManager:
r"""
This class performs several computation with time, timesteps and so for.
The important quantities here are:
T : the fixed simulation end time
dt : the size of the timestep
N : the overall number of timesteps.
t : an unspecified time in the interval [0, T]
n : an unspecified timestep in the interval [0, N]
The importtant relations that hold are:
T = N * dt and in analogy t = n * dt
There are also conversion routines for t and n.
Additionally the class contains some routines for determining
if and when to save data. But we do not touch any data in here.
"""
def __init__(self, parameters):
if parameters is None:
parameters = {}
if parameters.has_key("T") and parameters.has_key("dt"):
self.set_T(parameters["T"])
self.set_dt(parameters["dt"])
else:
raise KeyError("Parameters provide to little data to construct a 'TimeManager'.")
if parameters.has_key("nsteps"):
self.set_nsteps(parameters["nsteps"])
else:
self.set_nsteps(None)
#: Interval for saving
if parameters.has_key("write_nth"):
self.set_interval(parameters["write_nth"])
else:
self.set_interval(1)
#: List of timesteps when we have to save
if parameters.has_key("save_at"):
self.add_to_savelist(parameters["save_at"])
else:
self.savetimes = []
def __str__(self):
s = "TimeManager configured with:\n"
s += " Final time T: " +str(self.T) +"\n"
s += " Timestep size dt: " +str(self.dt) +"\n"
s += " Interval : " +str(self.interval) +"\n"
s += " List : " +str(self.savetimes) +"\n"
return s
def set_T(self, T):
r"""
Set the simulation endtime T.
:param T: The simulation end time.
"""
self.T = T
def set_dt(self, dt):
r"""
Set the simulation timestep size dt.
:param dt: The simulation timestep size.
"""
self.dt = dt
def set_nsteps(self, nsteps):
r"""
Set the number of timesteps the simulation runs.
:param nsteps: The number timesteps we do.
"""
self.nsteps = nsteps
def set_interval(self, interval):
r"""
Set the inteval for saving results.
:param interval: The interval at which we save simulation results.
.. note:: A value of 0 means we never save data at any regular interval.
"""
self.interval = interval
def get_nsteps(self):
if self.nsteps is None:
self.nsteps = self.compute_number_timesteps(self)
return self.nsteps
def compute_number_timesteps(self):
r"""
Computes the number of time steps we will perform.
"""
# This is independent from if, when and what data we save
if self.nsteps is not None:
return self.nsteps
else:
return int( floor(self.T / self.dt) )
def compute_timestep(self, t):
r"""
Compute the timestep n from a time t such that t = n * dt holds.
:param t: The time t of which we want to find the timestep number.
.. note:: The user has to ensure that time is an integral multiple of dt.
"""
stepo = t / self.dt
step = round(stepo)
if abs(stepo - step) > 10**-10:
print("Warning: questionable rounding for timestep computation!")
return int(step)
def compute_time(self, n):
r"""
Compute the time t from a timestep n such that t = n * dt holds.
:param n: The timestep n of which we want to find the corresponding time.
"""
return 1.0 * n * self.dt
def add_to_savelist(self, alist):
r"""
Add a list of times and/or timesteps to the list of times which determine when to save data.
:param alist: A list with integers (interpreted as timesteps) and/or floats (interpreted as times)
.. note:: The times and timesteps can be mixed and needn't to be given in monotone order.
"""
timesteps = []
# If the list is empty (global default), shortcut
if len(alist) == 0:
return
# Integers are interpreted as timesteps, floats are interpreted as times (and converted to timesteps)
for item in alist:
if type(item) == int:
timesteps.append(item)
elif type(item) == float:
timesteps.append( self.compute_timestep(item) )
# Validate timesteps and check if n in [0,...,N]
tmp = len(timesteps)
nsteps = self.compute_number_timesteps()
timesteps = [ i for i in timesteps if i > 0 and i <= nsteps ]
if tmp != len(timesteps):
print("Warning: Dropped some save timesteps due to invalidity!")
# Assure unique elements, just silently remove duplicates
oldlist = set(self.savetimes)
newlist = set(timesteps)
times = list(oldlist.union(newlist))
# Sort in ascending order
times.sort()
# Write back
self.savetimes = times
def compute_number_saves(self):
r"""
Compute the number of saves we will perform during the simulation. This
can be used to determine how much space to allocate in the output files.
"""
# We do not save at regular intervals
if self.interval == 0:
# Determine the number of saves resulting from saving at a regular interval is zero.
n_si = 0
# Determine the number of saves resulting from the savelist
n_sl = len(self.savetimes)
# We do save at regular intervals
else:
# Determine the number of saves resulting from saving at a regular interval
n_ts = self.compute_number_timesteps()
n_si = n_ts // self.interval
# Determine the number of saves resulting from the savelist and
# exclude the timesteps which coincide with the regular intervals.
n_sl = len( [ i for i in self.savetimes if i % self.interval != 0 ] )
# Total number of saves we will perform is given by the sum plus the initial value
number_saves = 1 + n_si + n_sl
return number_saves
def must_save(self, n):
r"""
Determine if we have to save right now.
:param n: The current timestep in question.
"""
if self.interval == 1:
# Save every timestep
return True
elif self.interval != 0 and n % self.interval == 0:
# Save every k-th timestep specified by the inetrval
return True
elif n in self.savetimes:
# Save if the n is in the list of timesteps
return True
return False
|
bsd-3-clause
| 1,664,457,358,564,621,800 | 30.246753 | 109 | 0.584927 | false |
simonemainardi/LSHash
|
tests/test_lsh.py
|
1
|
4542
|
import random
import string
from unittest import TestCase
from redis import StrictRedis
from pprint import pprint
import sys
import os
# add the LSHash package to the current python path
sys.path.insert(0, os.path.abspath('../'))
# now we can use our lshash package and not the standard one
from lshash import LSHash
class TestLSHash(TestCase):
num_elements = 100
def setUp(self):
self.els = []
self.el_names = []
for i in range(self.num_elements):
el = [random.randint(0, 100) for _ in range(8)]
elname = ''.join(random.choice(string.ascii_uppercase + string.digits) for _ in range(10))
self.els.append(tuple(el))
self.el_names.append(elname)
def test_lshash(self):
lsh = LSHash(6, 8, 1)
for i in range(self.num_elements):
lsh.index(list(self.els[i]))
lsh.index(list(self.els[i])) # multiple insertions
hasht = lsh.hash_tables[0]
itms = [hasht.get_list(k) for k in hasht.keys()]
for itm in itms:
assert itms.count(itm) == 1
for el in itm:
assert el in self.els
for el in self.els:
res = lsh.query(list(el), num_results=1, distance_func='euclidean')[0]
# res is a tuple containing the vector and the distance
el_v, el_dist = res
assert el_v in self.els
assert el_dist == 0
del lsh
def test_lshash_extra_val(self):
lsh = LSHash(6, 8, 1)
for i in range(self.num_elements):
lsh.index(list(self.els[i]), self.el_names[i])
hasht = lsh.hash_tables[0]
itms = [hasht.get_list(k) for k in hasht.keys()]
for itm in itms:
for el in itm:
assert el[0] in self.els
assert el[1] in self.el_names
for el in self.els:
# res is a list, so we need to select the first entry only
res = lsh.query(list(el), num_results=1, distance_func='euclidean')[0]
# vector an name are in the first element of the tuple res[0]
el_v, el_name = res[0]
# the distance is in the second element of the tuple
el_dist = res[1]
assert el_v in self.els
assert el_name in self.el_names
assert el_dist == 0
del lsh
def test_lshash_redis(self):
"""
Test external lshash module
"""
config = {"redis": {"host": 'localhost', "port": 6379, "db": 15}}
sr = StrictRedis(**config['redis'])
sr.flushdb()
lsh = LSHash(6, 8, 1, config)
for i in range(self.num_elements):
lsh.index(list(self.els[i]))
lsh.index(list(self.els[i])) # multiple insertions should be prevented by the library
hasht = lsh.hash_tables[0]
itms = [hasht.get_list(k) for k in hasht.keys()]
for itm in itms:
for el in itm:
assert itms.count(itm) == 1 # have multiple insertions been prevented?
assert el in self.els
for el in self.els:
res = lsh.query(list(el), num_results=1, distance_func='euclidean')[0]
el_v, el_dist = res
assert el_v in self.els
assert el_dist == 0
del lsh
sr.flushdb()
def test_lshash_redis_extra_val(self):
"""
Test external lshash module
"""
config = {"redis": {"host": 'localhost', "port": 6379, "db": 15}}
sr = StrictRedis(**config['redis'])
sr.flushdb()
lsh = LSHash(6, 8, 1, config)
for i in range(self.num_elements):
lsh.index(list(self.els[i]), self.el_names[i])
lsh.index(list(self.els[i]), self.el_names[i]) # multiple insertions
hasht = lsh.hash_tables[0]
itms = [hasht.get_list(k) for k in hasht.keys()]
for itm in itms:
assert itms.count(itm) == 1
for el in itm:
assert el[0] in self.els
assert el[1] in self.el_names
for el in self.els:
res = lsh.query(list(el), num_results=1, distance_func='euclidean')[0]
# vector an name are in the first element of the tuple res[0]
el_v, el_name = res[0]
# the distance is in the second element of the tuple
el_dist = res[1]
assert el_v in self.els
assert el_name in self.el_names
assert el_dist == 0
del lsh
sr.flushdb()
|
mit
| 2,219,903,868,623,740,700 | 34.76378 | 102 | 0.547776 | false |
stevshil/PiMediaPlayer
|
PiGUI/example.py
|
1
|
1068
|
#move the GUI-window from its initial position to see how it stays there only with the new version
#note also the flickering happens only in the old version
# comment out and in as necessary
################################# easygui with callback
import easygui_callback
def controller(user_input):
print "controller:",user_input, type(user_input)
if user_input == "forward":
pass
elif user_input == "backward":
pass
elif user_input == "off":
return "terminate" #this terminates the callback loop
choices = ["on", "off", "forward", "backward", "right", "left"]
easygui_callback.buttonbox("robot cobtroller","repeatative input", choices, callback=controller)
################################# OLD easygui
# import easygui
# choices = ["on", "off", "forward", "backward", "right", "left"]
# input= ''
# while input != "None": #None is when user press ESC
# input = easygui.buttonbox("robot cobtroller","repeatative input", choices)
# if input == "forward":
# pass
# elif input == "backward":
# pass
# elif input == "off":
# break
|
apache-2.0
| -4,630,286,491,995,790,000 | 27.864865 | 98 | 0.645131 | false |
JarbasAI/JarbasAI
|
mycroft/stt/__init__.py
|
1
|
4937
|
# Copyright 2016 Mycroft AI, Inc.
#
# This file is part of Mycroft Core.
#
# Mycroft Core is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Mycroft Core is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Mycroft Core. If not, see <http://www.gnu.org/licenses/>.
from abc import ABCMeta, abstractmethod
from speech_recognition import Recognizer, UnknownValueError, RequestError
from mycroft.api import STTApi
from mycroft.configuration import ConfigurationManager
import re
from requests import post
from mycroft.util.log import LOG
__author__ = "jdorleans"
class STT(object):
__metaclass__ = ABCMeta
def __init__(self):
config_core = ConfigurationManager.get()
self.lang = str(self.init_language(config_core))
config_stt = config_core.get("stt", {})
self.config = config_stt.get(config_stt.get("module"), {})
self.credential = self.config.get("credential", {})
self.recognizer = Recognizer()
@staticmethod
def init_language(config_core):
lang = config_core.get("lang", "en-US")
langs = lang.split("-")
if len(langs) == 2:
return langs[0].lower() + "-" + langs[1].upper()
return lang
@abstractmethod
def execute(self, audio, language=None):
pass
class TokenSTT(STT):
__metaclass__ = ABCMeta
def __init__(self):
super(TokenSTT, self).__init__()
self.token = str(self.credential.get("token"))
class BasicSTT(STT):
__metaclass__ = ABCMeta
def __init__(self):
super(BasicSTT, self).__init__()
self.username = str(self.credential.get("username"))
self.password = str(self.credential.get("password"))
class GoogleSTT(TokenSTT):
def __init__(self):
super(GoogleSTT, self).__init__()
def execute(self, audio, language=None):
self.lang = language or self.lang
return self.recognizer.recognize_google(audio, self.token, self.lang)
class WITSTT(TokenSTT):
def __init__(self):
super(WITSTT, self).__init__()
def execute(self, audio, language=None):
LOG.warning(
"WITSTT language should be configured at wit.ai settings.")
return self.recognizer.recognize_wit(audio, self.token)
class IBMSTT(BasicSTT):
def __init__(self):
super(IBMSTT, self).__init__()
def execute(self, audio, language=None):
self.lang = language or self.lang
if "pt-" in self.lang:
self.lang = "pt-BR"
return self.recognizer.recognize_ibm(audio, self.username,
self.password, self.lang)
class MycroftSTT(STT):
def __init__(self):
super(MycroftSTT, self).__init__()
self.api = STTApi()
def execute(self, audio, language=None):
self.lang = language or self.lang
try:
return self.api.stt(audio.get_flac_data(convert_rate=16000),
self.lang, 1)[0]
except:
return self.api.stt(audio.get_flac_data(), self.lang, 1)[0]
class KaldiSTT(STT):
def __init__(self):
super(KaldiSTT, self).__init__()
def execute(self, audio, language=None):
language = language or self.lang
response = post(self.config.get("uri"), data=audio.get_wav_data())
return self.get_response(response)
def get_response(self, response):
try:
hypotheses = response.json()["hypotheses"]
return re.sub(r'\s*\[noise\]\s*', '', hypotheses[0]["utterance"])
except:
return None
class PocketSphinxSTT(STT):
def __init__(self):
super(PocketSphinxSTT, self).__init__()
def execute(self, audio, language=None):
text = None
try:
text = self.recognizer.recognize_sphinx(audio)
except UnknownValueError:
LOG.error("Sphinx could not understand audio")
except RequestError as e:
LOG.error("Sphinx error; {0}".format(e))
return text
class STTFactory(object):
CLASSES = {
"mycroft": MycroftSTT,
"google": GoogleSTT,
"wit": WITSTT,
"ibm": IBMSTT,
"kaldi": KaldiSTT,
"pocketsphinx": PocketSphinxSTT
}
@staticmethod
def create():
config = ConfigurationManager.get().get("stt", {})
module = config.get("module", "mycroft")
LOG.info("STT engine: " + module)
clazz = STTFactory.CLASSES.get(module)
return clazz()
|
gpl-3.0
| 4,356,992,615,965,597,000 | 28.740964 | 77 | 0.616974 | false |
gkc1000/pyscf
|
pyscf/fci/direct_uhf.py
|
1
|
14649
|
#!/usr/bin/env python
# Copyright 2014-2018 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Author: Qiming Sun <osirpt.sun@gmail.com>
#
'''
Different FCI solvers are implemented to support different type of symmetry.
Symmetry
File Point group Spin singlet Real hermitian* Alpha/beta degeneracy
direct_spin0_symm Yes Yes Yes Yes
direct_spin1_symm Yes No Yes Yes
direct_spin0 No Yes Yes Yes
direct_spin1 No No Yes Yes
direct_uhf No No Yes No
direct_nosym No No No** Yes
* Real hermitian Hamiltonian implies (ij|kl) = (ji|kl) = (ij|lk) = (ji|lk)
** Hamiltonian is real but not hermitian, (ij|kl) != (ji|kl) ...
'''
import sys
import ctypes
import numpy
from pyscf import lib
from pyscf import ao2mo
from pyscf.fci import cistring
from pyscf.fci import direct_spin1
libfci = lib.load_library('libfci')
# When the spin-orbitals do not have the degeneracy on spacial part,
# there is only one version of FCI which is close to _spin1 solver.
# The inputs: h1e has two parts (h1e_a, h1e_b),
# h2e has three parts (h2e_aa, h2e_ab, h2e_bb)
def contract_1e(f1e, fcivec, norb, nelec, link_index=None):
fcivec = numpy.asarray(fcivec, order='C')
link_indexa, link_indexb = direct_spin1._unpack(norb, nelec, link_index)
na, nlinka = link_indexa.shape[:2]
nb, nlinkb = link_indexb.shape[:2]
assert(fcivec.size == na*nb)
ci1 = numpy.zeros_like(fcivec)
f1e_tril = lib.pack_tril(f1e[0])
libfci.FCIcontract_a_1e(f1e_tril.ctypes.data_as(ctypes.c_void_p),
fcivec.ctypes.data_as(ctypes.c_void_p),
ci1.ctypes.data_as(ctypes.c_void_p),
ctypes.c_int(norb),
ctypes.c_int(na), ctypes.c_int(nb),
ctypes.c_int(nlinka), ctypes.c_int(nlinkb),
link_indexa.ctypes.data_as(ctypes.c_void_p),
link_indexb.ctypes.data_as(ctypes.c_void_p))
f1e_tril = lib.pack_tril(f1e[1])
libfci.FCIcontract_b_1e(f1e_tril.ctypes.data_as(ctypes.c_void_p),
fcivec.ctypes.data_as(ctypes.c_void_p),
ci1.ctypes.data_as(ctypes.c_void_p),
ctypes.c_int(norb),
ctypes.c_int(na), ctypes.c_int(nb),
ctypes.c_int(nlinka), ctypes.c_int(nlinkb),
link_indexa.ctypes.data_as(ctypes.c_void_p),
link_indexb.ctypes.data_as(ctypes.c_void_p))
return ci1
def contract_2e(eri, fcivec, norb, nelec, link_index=None):
fcivec = numpy.asarray(fcivec, order='C')
g2e_aa = ao2mo.restore(4, eri[0], norb)
g2e_ab = ao2mo.restore(4, eri[1], norb)
g2e_bb = ao2mo.restore(4, eri[2], norb)
link_indexa, link_indexb = direct_spin1._unpack(norb, nelec, link_index)
na, nlinka = link_indexa.shape[:2]
nb, nlinkb = link_indexb.shape[:2]
assert(fcivec.size == na*nb)
ci1 = numpy.empty_like(fcivec)
libfci.FCIcontract_uhf2e(g2e_aa.ctypes.data_as(ctypes.c_void_p),
g2e_ab.ctypes.data_as(ctypes.c_void_p),
g2e_bb.ctypes.data_as(ctypes.c_void_p),
fcivec.ctypes.data_as(ctypes.c_void_p),
ci1.ctypes.data_as(ctypes.c_void_p),
ctypes.c_int(norb),
ctypes.c_int(na), ctypes.c_int(nb),
ctypes.c_int(nlinka), ctypes.c_int(nlinkb),
link_indexa.ctypes.data_as(ctypes.c_void_p),
link_indexb.ctypes.data_as(ctypes.c_void_p))
return ci1
def contract_2e_hubbard(u, fcivec, norb, nelec, opt=None):
neleca, nelecb = direct_spin1._unpack_nelec(nelec)
u_aa, u_ab, u_bb = u
strsa = numpy.asarray(cistring.gen_strings4orblist(range(norb), neleca))
strsb = numpy.asarray(cistring.gen_strings4orblist(range(norb), nelecb))
na = cistring.num_strings(norb, neleca)
nb = cistring.num_strings(norb, nelecb)
fcivec = fcivec.reshape(na,nb)
fcinew = numpy.zeros_like(fcivec)
if u_aa != 0: # u * n_alpha^+ n_alpha
for i in range(norb):
maska = (strsa & (1<<i)) > 0
fcinew[maska] += u_aa * fcivec[maska]
if u_ab != 0: # u * (n_alpha^+ n_beta + n_beta^+ n_alpha)
for i in range(norb):
maska = (strsa & (1<<i)) > 0
maskb = (strsb & (1<<i)) > 0
fcinew[maska[:,None]&maskb] += 2*u_ab * fcivec[maska[:,None]&maskb]
if u_bb != 0: # u * n_beta^+ n_beta
for i in range(norb):
maskb = (strsb & (1<<i)) > 0
fcinew[:,maskb] += u_bb * fcivec[:,maskb]
return fcinew
def make_hdiag(h1e, eri, norb, nelec):
neleca, nelecb = direct_spin1._unpack_nelec(nelec)
h1e_a = numpy.ascontiguousarray(h1e[0])
h1e_b = numpy.ascontiguousarray(h1e[1])
g2e_aa = ao2mo.restore(1, eri[0], norb)
g2e_ab = ao2mo.restore(1, eri[1], norb)
g2e_bb = ao2mo.restore(1, eri[2], norb)
occslsta = occslstb = cistring._gen_occslst(range(norb), neleca)
if neleca != nelecb:
occslstb = cistring._gen_occslst(range(norb), nelecb)
na = len(occslsta)
nb = len(occslstb)
hdiag = numpy.empty(na*nb)
jdiag_aa = numpy.asarray(numpy.einsum('iijj->ij',g2e_aa), order='C')
jdiag_ab = numpy.asarray(numpy.einsum('iijj->ij',g2e_ab), order='C')
jdiag_bb = numpy.asarray(numpy.einsum('iijj->ij',g2e_bb), order='C')
kdiag_aa = numpy.asarray(numpy.einsum('ijji->ij',g2e_aa), order='C')
kdiag_bb = numpy.asarray(numpy.einsum('ijji->ij',g2e_bb), order='C')
libfci.FCImake_hdiag_uhf(hdiag.ctypes.data_as(ctypes.c_void_p),
h1e_a.ctypes.data_as(ctypes.c_void_p),
h1e_b.ctypes.data_as(ctypes.c_void_p),
jdiag_aa.ctypes.data_as(ctypes.c_void_p),
jdiag_ab.ctypes.data_as(ctypes.c_void_p),
jdiag_bb.ctypes.data_as(ctypes.c_void_p),
kdiag_aa.ctypes.data_as(ctypes.c_void_p),
kdiag_bb.ctypes.data_as(ctypes.c_void_p),
ctypes.c_int(norb),
ctypes.c_int(na), ctypes.c_int(nb),
ctypes.c_int(neleca), ctypes.c_int(nelecb),
occslsta.ctypes.data_as(ctypes.c_void_p),
occslstb.ctypes.data_as(ctypes.c_void_p))
return numpy.asarray(hdiag)
def absorb_h1e(h1e, eri, norb, nelec, fac=1):
if not isinstance(nelec, (int, numpy.number)):
nelec = sum(nelec)
h1e_a, h1e_b = h1e
h2e_aa = ao2mo.restore(1, eri[0], norb).copy()
h2e_ab = ao2mo.restore(1, eri[1], norb).copy()
h2e_bb = ao2mo.restore(1, eri[2], norb).copy()
f1e_a = h1e_a - numpy.einsum('jiik->jk', h2e_aa) * .5
f1e_b = h1e_b - numpy.einsum('jiik->jk', h2e_bb) * .5
f1e_a *= 1./(nelec+1e-100)
f1e_b *= 1./(nelec+1e-100)
for k in range(norb):
h2e_aa[:,:,k,k] += f1e_a
h2e_aa[k,k,:,:] += f1e_a
h2e_ab[:,:,k,k] += f1e_a
h2e_ab[k,k,:,:] += f1e_b
h2e_bb[:,:,k,k] += f1e_b
h2e_bb[k,k,:,:] += f1e_b
return (ao2mo.restore(4, h2e_aa, norb) * fac,
ao2mo.restore(4, h2e_ab, norb) * fac,
ao2mo.restore(4, h2e_bb, norb) * fac)
def pspace(h1e, eri, norb, nelec, hdiag=None, np=400):
neleca, nelecb = direct_spin1._unpack_nelec(nelec)
h1e_a = numpy.ascontiguousarray(h1e[0])
h1e_b = numpy.ascontiguousarray(h1e[1])
g2e_aa = ao2mo.restore(1, eri[0], norb)
g2e_ab = ao2mo.restore(1, eri[1], norb)
g2e_bb = ao2mo.restore(1, eri[2], norb)
link_indexa = cistring.gen_linkstr_index_trilidx(range(norb), neleca)
link_indexb = cistring.gen_linkstr_index_trilidx(range(norb), nelecb)
nb = link_indexb.shape[0]
if hdiag is None:
hdiag = make_hdiag(h1e, eri, norb, nelec)
if hdiag.size < np:
addr = numpy.arange(hdiag.size)
else:
try:
addr = numpy.argpartition(hdiag, np-1)[:np]
except AttributeError:
addr = numpy.argsort(hdiag)[:np]
addra = addr // nb
addrb = addr % nb
stra = cistring.addrs2str(norb, neleca, addra)
strb = cistring.addrs2str(norb, nelecb, addrb)
np = len(addr)
h0 = numpy.zeros((np,np))
libfci.FCIpspace_h0tril_uhf(h0.ctypes.data_as(ctypes.c_void_p),
h1e_a.ctypes.data_as(ctypes.c_void_p),
h1e_b.ctypes.data_as(ctypes.c_void_p),
g2e_aa.ctypes.data_as(ctypes.c_void_p),
g2e_ab.ctypes.data_as(ctypes.c_void_p),
g2e_bb.ctypes.data_as(ctypes.c_void_p),
stra.ctypes.data_as(ctypes.c_void_p),
strb.ctypes.data_as(ctypes.c_void_p),
ctypes.c_int(norb), ctypes.c_int(np))
for i in range(np):
h0[i,i] = hdiag[addr[i]]
h0 = lib.hermi_triu(h0)
return addr, h0
# be careful with single determinant initial guess. It may lead to the
# eigvalue of first davidson iter being equal to hdiag
def kernel(h1e, eri, norb, nelec, ci0=None, level_shift=1e-3, tol=1e-10,
lindep=1e-14, max_cycle=50, max_space=12, nroots=1,
davidson_only=False, pspace_size=400, orbsym=None, wfnsym=None,
ecore=0, **kwargs):
return direct_spin1._kfactory(FCISolver, h1e, eri, norb, nelec, ci0, level_shift,
tol, lindep, max_cycle, max_space, nroots,
davidson_only, pspace_size, ecore=ecore, **kwargs)
def energy(h1e, eri, fcivec, norb, nelec, link_index=None):
h2e = absorb_h1e(h1e, eri, norb, nelec, .5)
ci1 = contract_2e(h2e, fcivec, norb, nelec, link_index)
return numpy.dot(fcivec.reshape(-1), ci1.reshape(-1))
# dm_pq = <|p^+ q|>
def make_rdm1s(fcivec, norb, nelec, link_index=None):
return direct_spin1.make_rdm1s(fcivec, norb, nelec, link_index)
# spacial part of DM, dm_pq = <|p^+ q|>
def make_rdm1(fcivec, norb, nelec, link_index=None):
raise ValueError('Spin trace for UHF-FCI density matrices.')
def make_rdm12s(fcivec, norb, nelec, link_index=None, reorder=True):
return direct_spin1.make_rdm12s(fcivec, norb, nelec, link_index, reorder)
def trans_rdm1s(cibra, ciket, norb, nelec, link_index=None):
return direct_spin1.trans_rdm1s(cibra, ciket, norb, nelec, link_index)
# spacial part of DM
def trans_rdm1(cibra, ciket, norb, nelec, link_index=None):
raise ValueError('Spin trace for UHF-FCI density matrices.')
def trans_rdm12s(cibra, ciket, norb, nelec, link_index=None, reorder=True):
return direct_spin1.trans_rdm12s(cibra, ciket, norb, nelec, link_index, reorder)
###############################################################
# uhf-integral direct-CI driver
###############################################################
class FCISolver(direct_spin1.FCISolver):
def absorb_h1e(self, h1e, eri, norb, nelec, fac=1):
return absorb_h1e(h1e, eri, norb, nelec, fac)
def make_hdiag(self, h1e, eri, norb, nelec):
return make_hdiag(h1e, eri, norb, nelec)
def pspace(self, h1e, eri, norb, nelec, hdiag, np=400):
return pspace(h1e, eri, norb, nelec, hdiag, np)
def contract_1e(self, f1e, fcivec, norb, nelec, link_index=None, **kwargs):
return contract_1e(f1e, fcivec, norb, nelec, link_index, **kwargs)
def contract_2e(self, eri, fcivec, norb, nelec, link_index=None, **kwargs):
return contract_2e(eri, fcivec, norb, nelec, link_index, **kwargs)
def spin_square(self, fcivec, norb, nelec):
from pyscf.fci import spin_op
return spin_op.spin_square(fcivec, norb, nelec)
def make_rdm1(self, cibra, ciket, norb, nelec, link_index=None):
return trans_rdm1(cibra, ciket, norb, nelec, link_index)
def trans_rdm1(self, cibra, ciket, norb, nelec, link_index=None):
return trans_rdm1(cibra, ciket, norb, nelec, link_index)
FCI = FCISolver
if __name__ == '__main__':
from functools import reduce
from pyscf import gto
from pyscf import scf
from pyscf import ao2mo
mol = gto.Mole()
mol.verbose = 0
mol.output = None#"out_h2o"
mol.atom = [
['H', ( 1.,-1. , 0. )],
['H', ( 0.,-1. ,-1. )],
['H', ( 1.,-0.5 ,-1. )],
#['H', ( 0.,-0.5 ,-1. )],
#['H', ( 0.,-0.5 ,-0. )],
['H', ( 0.,-0. ,-1. )],
['H', ( 1.,-0.5 , 0. )],
['H', ( 0., 1. , 1. )],
]
mol.basis = {'H': 'sto-3g'}
mol.charge = 1
mol.spin = 1
mol.build()
m = scf.UHF(mol)
ehf = m.scf()
cis = FCISolver(mol)
norb = m.mo_energy[0].size
nea = (mol.nelectron+1) // 2
neb = (mol.nelectron-1) // 2
nelec = (nea, neb)
mo_a = m.mo_coeff[0]
mo_b = m.mo_coeff[1]
h1e_a = reduce(numpy.dot, (mo_a.T, m.get_hcore(), mo_a))
h1e_b = reduce(numpy.dot, (mo_b.T, m.get_hcore(), mo_b))
g2e_aa = ao2mo.incore.general(m._eri, (mo_a,)*4, compact=False)
g2e_aa = g2e_aa.reshape(norb,norb,norb,norb)
g2e_ab = ao2mo.incore.general(m._eri, (mo_a,mo_a,mo_b,mo_b), compact=False)
g2e_ab = g2e_ab.reshape(norb,norb,norb,norb)
g2e_bb = ao2mo.incore.general(m._eri, (mo_b,)*4, compact=False)
g2e_bb = g2e_bb.reshape(norb,norb,norb,norb)
h1e = (h1e_a, h1e_b)
eri = (g2e_aa, g2e_ab, g2e_bb)
na = cistring.num_strings(norb, nea)
nb = cistring.num_strings(norb, neb)
numpy.random.seed(15)
fcivec = numpy.random.random((na,nb))
e = kernel(h1e, eri, norb, nelec)[0]
print(e, e - -8.65159903476)
|
apache-2.0
| 5,878,243,789,563,975,000 | 41.46087 | 89 | 0.564271 | false |
shaz13/oppia
|
core/controllers/learner_playlist.py
|
1
|
2096
|
# Copyright 2017 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Controllers for the learner playlist."""
from constants import constants
from core.controllers import base
from core.domain import acl_decorators
from core.domain import learner_progress_services
from core.domain import learner_playlist_services
class LearnerPlaylistHandler(base.BaseHandler):
"""Handles operations related to the learner playlist."""
@acl_decorators.can_access_learner_dashboard
def post(self, activity_type, activity_id):
position_to_be_inserted_in = self.payload.get('index')
if activity_type == constants.ACTIVITY_TYPE_EXPLORATION:
learner_progress_services.add_exp_to_learner_playlist(
self.user_id, activity_id, position_to_be_inserted_in)
elif activity_type == constants.ACTIVITY_TYPE_COLLECTION:
learner_progress_services.add_collection_to_learner_playlist(
self.user_id, activity_id, position_to_be_inserted_in)
self.render_json(self.values)
@acl_decorators.can_access_learner_dashboard
def delete(self, activity_type, activity_id):
if activity_type == constants.ACTIVITY_TYPE_EXPLORATION:
learner_playlist_services.remove_exploration_from_learner_playlist(
self.user_id, activity_id)
elif activity_type == constants.ACTIVITY_TYPE_COLLECTION:
learner_playlist_services.remove_collection_from_learner_playlist(
self.user_id, activity_id)
self.render_json(self.values)
|
apache-2.0
| 7,436,396,985,364,026,000 | 42.666667 | 79 | 0.722328 | false |
canast02/csci544_fall2016_project
|
yelp-sentiment/experiments/sentiment_stochasticGradientDescent.py
|
1
|
2641
|
import numpy as np
from nltk import TweetTokenizer, accuracy
from nltk.stem.snowball import EnglishStemmer
from sklearn import svm, linear_model
from sklearn.cross_validation import StratifiedKFold
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.metrics import accuracy_score
from sklearn.metrics import classification_report
from sklearn.metrics import precision_recall_fscore_support
from sentiment_util import load_datasets
def main():
# x, y = load_dataset("datasets/sentiment_uci/yelp_labelled.txt")
x, y = load_datasets(["../datasets/sentiment_uci/yelp_labelled.txt"])
stopwords = set()
with open('../stopwords.txt', 'r') as f:
for w in f:
stopwords.add(w)
tok = TweetTokenizer()
stemmer = EnglishStemmer()
vectorizer = TfidfVectorizer(sublinear_tf=True, use_idf=True, binary=True, preprocessor=stemmer.stem,
tokenizer=tok.tokenize, ngram_range=(1, 2))
accu_p = np.zeros(shape=(2,))
accu_r = np.zeros(shape=(2,))
accu_f = np.zeros(shape=(2,))
accu_a = 0.0
folds = 10
for train_idx, test_idx in StratifiedKFold(y=y, n_folds=folds, shuffle=True):
train_x, train_y = x[train_idx], y[train_idx]
test_x, test_y = x[test_idx], y[test_idx]
cls = linear_model.SGDClassifier(loss='hinge', penalty='l2', n_iter=100)
# train
train_x = vectorizer.fit_transform(train_x).toarray()
cls.fit(train_x, train_y)
# test
test_x = vectorizer.transform(test_x).toarray()
pred_y = cls.predict(test_x)
# evaluate
p, r, f, _ = precision_recall_fscore_support(test_y, pred_y)
a = accuracy_score(test_y, pred_y)
accu_p += p
accu_r += r
accu_f += f
accu_a += a
print("Evaluating classifier:")
print("\tAccuracy: {}".format(a))
print("\tPrecision[0]: {}".format(p[0]))
print("\tPrecision[1]: {}".format(p[1]))
print("\tRecall[0]: {}".format(r[0]))
print("\tRecall[1]: {}".format(r[1]))
print("\tF1-score[0]: {}".format(f[0]))
print("\tF1-score[1]: {}".format(f[1]))
print("Average evaluation")
print("\tAccuracy: {}".format(accu_a / folds))
print("\tPrecision[0]: {}".format(accu_p[0] / folds))
print("\tPrecision[1]: {}".format(accu_p[1] / folds))
print("\tRecall[0]: {}".format(accu_r[0] / folds))
print("\tRecall[1]: {}".format(accu_r[1] / folds))
print("\tF1-score[0]: {}".format(accu_f[0] / folds))
print("\tF1-score[1]: {}".format(accu_f[1] / folds))
if __name__ == '__main__':
main()
|
gpl-3.0
| -4,050,931,623,567,482,000 | 33.75 | 105 | 0.603559 | false |
MissionCriticalCloud/marvin
|
marvin/cloudstackAPI/stopVirtualMachine.py
|
1
|
24289
|
"""Stops a virtual machine."""
from baseCmd import *
from baseResponse import *
class stopVirtualMachineCmd (baseCmd):
typeInfo = {}
def __init__(self):
self.isAsync = "true"
"""The ID of the virtual machine"""
"""Required"""
self.id = None
self.typeInfo['id'] = 'uuid'
"""Force stop the VM (vm is marked as Stopped even when command fails to be send to the backend). The caller knows the VM is stopped."""
self.forced = None
self.typeInfo['forced'] = 'boolean'
self.required = ["id", ]
class stopVirtualMachineResponse (baseResponse):
typeInfo = {}
def __init__(self):
"""the ID of the virtual machine"""
self.id = None
self.typeInfo['id'] = 'string'
"""the account associated with the virtual machine"""
self.account = None
self.typeInfo['account'] = 'string'
"""the number of cpu this virtual machine is running with"""
self.cpunumber = None
self.typeInfo['cpunumber'] = 'integer'
"""the speed of each cpu"""
self.cpuspeed = None
self.typeInfo['cpuspeed'] = 'integer'
"""the amount of the vm's CPU currently used"""
self.cpuused = None
self.typeInfo['cpuused'] = 'string'
"""the date when this virtual machine was created"""
self.created = None
self.typeInfo['created'] = 'date'
"""Vm details in key/value pairs."""
self.details = None
self.typeInfo['details'] = 'map'
"""the read (io) of disk on the vm"""
self.diskioread = None
self.typeInfo['diskioread'] = 'long'
"""the write (io) of disk on the vm"""
self.diskiowrite = None
self.typeInfo['diskiowrite'] = 'long'
"""the read (bytes) of disk on the vm"""
self.diskkbsread = None
self.typeInfo['diskkbsread'] = 'long'
"""the write (bytes) of disk on the vm"""
self.diskkbswrite = None
self.typeInfo['diskkbswrite'] = 'long'
"""the ID of the disk offering of the virtual machine"""
self.diskofferingid = None
self.typeInfo['diskofferingid'] = 'string'
"""the name of the disk offering of the virtual machine"""
self.diskofferingname = None
self.typeInfo['diskofferingname'] = 'string'
"""user generated name. The name of the virtual machine is returned if no displayname exists."""
self.displayname = None
self.typeInfo['displayname'] = 'string'
"""an optional field whether to the display the vm to the end user or not."""
self.displayvm = None
self.typeInfo['displayvm'] = 'boolean'
"""the name of the domain in which the virtual machine exists"""
self.domain = None
self.typeInfo['domain'] = 'string'
"""the ID of the domain in which the virtual machine exists"""
self.domainid = None
self.typeInfo['domainid'] = 'string'
"""the virtual network for the service offering"""
self.forvirtualnetwork = None
self.typeInfo['forvirtualnetwork'] = 'boolean'
"""the group name of the virtual machine"""
self.group = None
self.typeInfo['group'] = 'string'
"""the group ID of the virtual machine"""
self.groupid = None
self.typeInfo['groupid'] = 'string'
"""Os type ID of the virtual machine"""
self.guestosid = None
self.typeInfo['guestosid'] = 'string'
"""true if high-availability is enabled, false otherwise"""
self.haenable = None
self.typeInfo['haenable'] = 'boolean'
"""the ID of the host for the virtual machine"""
self.hostid = None
self.typeInfo['hostid'] = 'string'
"""the name of the host for the virtual machine"""
self.hostname = None
self.typeInfo['hostname'] = 'string'
"""the hypervisor on which the template runs"""
self.hypervisor = None
self.typeInfo['hypervisor'] = 'string'
"""instance name of the user vm; this parameter is returned to the ROOT admin only"""
self.instancename = None
self.typeInfo['instancename'] = 'string'
"""true if vm contains XS tools inorder to support dynamic scaling of VM cpu/memory."""
self.isdynamicallyscalable = None
self.typeInfo['isdynamicallyscalable'] = 'boolean'
"""an alternate display text of the ISO attached to the virtual machine"""
self.isodisplaytext = None
self.typeInfo['isodisplaytext'] = 'string'
"""the ID of the ISO attached to the virtual machine"""
self.isoid = None
self.typeInfo['isoid'] = 'string'
"""the name of the ISO attached to the virtual machine"""
self.isoname = None
self.typeInfo['isoname'] = 'string'
"""ssh key-pair"""
self.keypair = None
self.typeInfo['keypair'] = 'string'
"""the memory allocated for the virtual machine"""
self.memory = None
self.typeInfo['memory'] = 'integer'
"""the name of the virtual machine"""
self.name = None
self.typeInfo['name'] = 'string'
"""the incoming network traffic on the vm"""
self.networkkbsread = None
self.typeInfo['networkkbsread'] = 'long'
"""the outgoing network traffic on the host"""
self.networkkbswrite = None
self.typeInfo['networkkbswrite'] = 'long'
"""OS type id of the vm"""
self.ostypeid = None
self.typeInfo['ostypeid'] = 'long'
"""the password (if exists) of the virtual machine"""
self.password = None
self.typeInfo['password'] = 'string'
"""true if the password rest feature is enabled, false otherwise"""
self.passwordenabled = None
self.typeInfo['passwordenabled'] = 'boolean'
"""the project name of the vm"""
self.project = None
self.typeInfo['project'] = 'string'
"""the project id of the vm"""
self.projectid = None
self.typeInfo['projectid'] = 'string'
"""public IP address id associated with vm via Static nat rule"""
self.publicip = None
self.typeInfo['publicip'] = 'string'
"""public IP address id associated with vm via Static nat rule"""
self.publicipid = None
self.typeInfo['publicipid'] = 'string'
"""device ID of the root volume"""
self.rootdeviceid = None
self.typeInfo['rootdeviceid'] = 'long'
"""device type of the root volume"""
self.rootdevicetype = None
self.typeInfo['rootdevicetype'] = 'string'
"""the ID of the service offering of the virtual machine"""
self.serviceofferingid = None
self.typeInfo['serviceofferingid'] = 'string'
"""the name of the service offering of the virtual machine"""
self.serviceofferingname = None
self.typeInfo['serviceofferingname'] = 'string'
"""State of the Service from LB rule"""
self.servicestate = None
self.typeInfo['servicestate'] = 'string'
"""the state of the virtual machine"""
self.state = None
self.typeInfo['state'] = 'string'
"""an alternate display text of the template for the virtual machine"""
self.templatedisplaytext = None
self.typeInfo['templatedisplaytext'] = 'string'
"""the ID of the template for the virtual machine. A -1 is returned if the virtual machine was created from an ISO file."""
self.templateid = None
self.typeInfo['templateid'] = 'string'
"""the name of the template for the virtual machine"""
self.templatename = None
self.typeInfo['templatename'] = 'string'
"""the user's ID who deployed the virtual machine"""
self.userid = None
self.typeInfo['userid'] = 'string'
"""the user's name who deployed the virtual machine"""
self.username = None
self.typeInfo['username'] = 'string'
"""the vgpu type used by the virtual machine"""
self.vgpu = None
self.typeInfo['vgpu'] = 'string'
"""the ID of the availablility zone for the virtual machine"""
self.zoneid = None
self.typeInfo['zoneid'] = 'string'
"""the name of the availability zone for the virtual machine"""
self.zonename = None
self.typeInfo['zonename'] = 'string'
"""list of affinity groups associated with the virtual machine"""
self.affinitygroup = []
"""the list of nics associated with vm"""
self.nic = []
"""list of security groups associated with the virtual machine"""
self.securitygroup = []
"""the list of resource tags associated with vm"""
self.tags = []
"""the ID of the latest async job acting on this object"""
self.jobid = None
self.typeInfo['jobid'] = ''
"""the current status of the latest async job acting on this object"""
self.jobstatus = None
self.typeInfo['jobstatus'] = ''
class affinitygroup:
def __init__(self):
""""the ID of the affinity group"""
self.id = None
""""the account owning the affinity group"""
self.account = None
""""the description of the affinity group"""
self.description = None
""""the domain name of the affinity group"""
self.domain = None
""""the domain ID of the affinity group"""
self.domainid = None
""""the name of the affinity group"""
self.name = None
""""the project name of the affinity group"""
self.project = None
""""the project ID of the affinity group"""
self.projectid = None
""""the type of the affinity group"""
self.type = None
""""virtual machine IDs associated with this affinity group"""
self.virtualmachineIds = None
class nic:
def __init__(self):
""""the ID of the nic"""
self.id = None
""""the broadcast uri of the nic"""
self.broadcasturi = None
""""device id for the network when plugged into the virtual machine"""
self.deviceid = None
""""the gateway of the nic"""
self.gateway = None
""""the IPv6 address of network"""
self.ip6address = None
""""the cidr of IPv6 network"""
self.ip6cidr = None
""""the gateway of IPv6 network"""
self.ip6gateway = None
""""the ip address of the nic"""
self.ipaddress = None
""""true if nic is default, false otherwise"""
self.isdefault = None
""""the isolation uri of the nic"""
self.isolationuri = None
""""true if nic is default, false otherwise"""
self.macaddress = None
""""the netmask of the nic"""
self.netmask = None
""""the ID of the corresponding network"""
self.networkid = None
""""the name of the corresponding network"""
self.networkname = None
""""the Secondary ipv4 addr of nic"""
self.secondaryip = None
""""the traffic type of the nic"""
self.traffictype = None
""""the type of the nic"""
self.type = None
""""Id of the vm to which the nic belongs"""
self.virtualmachineid = None
class tags:
def __init__(self):
""""the account associated with the tag"""
self.account = None
""""customer associated with the tag"""
self.customer = None
""""the domain associated with the tag"""
self.domain = None
""""the ID of the domain associated with the tag"""
self.domainid = None
""""tag key name"""
self.key = None
""""the project name where tag belongs to"""
self.project = None
""""the project id the tag belongs to"""
self.projectid = None
""""id of the resource"""
self.resourceid = None
""""resource type"""
self.resourcetype = None
""""tag value"""
self.value = None
class egressrule:
def __init__(self):
""""account owning the security group rule"""
self.account = None
""""the CIDR notation for the base IP address of the security group rule"""
self.cidr = None
""""the ending IP of the security group rule"""
self.endport = None
""""the code for the ICMP message response"""
self.icmpcode = None
""""the type of the ICMP message response"""
self.icmptype = None
""""the protocol of the security group rule"""
self.protocol = None
""""the id of the security group rule"""
self.ruleid = None
""""security group name"""
self.securitygroupname = None
""""the starting IP of the security group rule"""
self.startport = None
""""the list of resource tags associated with the rule"""
self.tags = []
""""the account associated with the tag"""
self.account = None
""""customer associated with the tag"""
self.customer = None
""""the domain associated with the tag"""
self.domain = None
""""the ID of the domain associated with the tag"""
self.domainid = None
""""tag key name"""
self.key = None
""""the project name where tag belongs to"""
self.project = None
""""the project id the tag belongs to"""
self.projectid = None
""""id of the resource"""
self.resourceid = None
""""resource type"""
self.resourcetype = None
""""tag value"""
self.value = None
class tags:
def __init__(self):
""""the account associated with the tag"""
self.account = None
""""customer associated with the tag"""
self.customer = None
""""the domain associated with the tag"""
self.domain = None
""""the ID of the domain associated with the tag"""
self.domainid = None
""""tag key name"""
self.key = None
""""the project name where tag belongs to"""
self.project = None
""""the project id the tag belongs to"""
self.projectid = None
""""id of the resource"""
self.resourceid = None
""""resource type"""
self.resourcetype = None
""""tag value"""
self.value = None
class tags:
def __init__(self):
""""the account associated with the tag"""
self.account = None
""""customer associated with the tag"""
self.customer = None
""""the domain associated with the tag"""
self.domain = None
""""the ID of the domain associated with the tag"""
self.domainid = None
""""tag key name"""
self.key = None
""""the project name where tag belongs to"""
self.project = None
""""the project id the tag belongs to"""
self.projectid = None
""""id of the resource"""
self.resourceid = None
""""resource type"""
self.resourcetype = None
""""tag value"""
self.value = None
class ingressrule:
def __init__(self):
""""account owning the security group rule"""
self.account = None
""""the CIDR notation for the base IP address of the security group rule"""
self.cidr = None
""""the ending IP of the security group rule"""
self.endport = None
""""the code for the ICMP message response"""
self.icmpcode = None
""""the type of the ICMP message response"""
self.icmptype = None
""""the protocol of the security group rule"""
self.protocol = None
""""the id of the security group rule"""
self.ruleid = None
""""security group name"""
self.securitygroupname = None
""""the starting IP of the security group rule"""
self.startport = None
""""the list of resource tags associated with the rule"""
self.tags = []
""""the account associated with the tag"""
self.account = None
""""customer associated with the tag"""
self.customer = None
""""the domain associated with the tag"""
self.domain = None
""""the ID of the domain associated with the tag"""
self.domainid = None
""""tag key name"""
self.key = None
""""the project name where tag belongs to"""
self.project = None
""""the project id the tag belongs to"""
self.projectid = None
""""id of the resource"""
self.resourceid = None
""""resource type"""
self.resourcetype = None
""""tag value"""
self.value = None
class tags:
def __init__(self):
""""the account associated with the tag"""
self.account = None
""""customer associated with the tag"""
self.customer = None
""""the domain associated with the tag"""
self.domain = None
""""the ID of the domain associated with the tag"""
self.domainid = None
""""tag key name"""
self.key = None
""""the project name where tag belongs to"""
self.project = None
""""the project id the tag belongs to"""
self.projectid = None
""""id of the resource"""
self.resourceid = None
""""resource type"""
self.resourcetype = None
""""tag value"""
self.value = None
class tags:
def __init__(self):
""""the account associated with the tag"""
self.account = None
""""customer associated with the tag"""
self.customer = None
""""the domain associated with the tag"""
self.domain = None
""""the ID of the domain associated with the tag"""
self.domainid = None
""""tag key name"""
self.key = None
""""the project name where tag belongs to"""
self.project = None
""""the project id the tag belongs to"""
self.projectid = None
""""id of the resource"""
self.resourceid = None
""""resource type"""
self.resourcetype = None
""""tag value"""
self.value = None
class securitygroup:
def __init__(self):
""""the ID of the security group"""
self.id = None
""""the account owning the security group"""
self.account = None
""""the description of the security group"""
self.description = None
""""the domain name of the security group"""
self.domain = None
""""the domain ID of the security group"""
self.domainid = None
""""the name of the security group"""
self.name = None
""""the project name of the group"""
self.project = None
""""the project id of the group"""
self.projectid = None
""""the number of virtualmachines associated with this securitygroup"""
self.virtualmachinecount = None
""""the list of virtualmachine ids associated with this securitygroup"""
self.virtualmachineids = None
""""the list of egress rules associated with the security group"""
self.egressrule = []
""""account owning the security group rule"""
self.account = None
""""the CIDR notation for the base IP address of the security group rule"""
self.cidr = None
""""the ending IP of the security group rule"""
self.endport = None
""""the code for the ICMP message response"""
self.icmpcode = None
""""the type of the ICMP message response"""
self.icmptype = None
""""the protocol of the security group rule"""
self.protocol = None
""""the id of the security group rule"""
self.ruleid = None
""""security group name"""
self.securitygroupname = None
""""the starting IP of the security group rule"""
self.startport = None
""""the list of resource tags associated with the rule"""
self.tags = []
""""the account associated with the tag"""
self.account = None
""""customer associated with the tag"""
self.customer = None
""""the domain associated with the tag"""
self.domain = None
""""the ID of the domain associated with the tag"""
self.domainid = None
""""tag key name"""
self.key = None
""""the project name where tag belongs to"""
self.project = None
""""the project id the tag belongs to"""
self.projectid = None
""""id of the resource"""
self.resourceid = None
""""resource type"""
self.resourcetype = None
""""tag value"""
self.value = None
""""the list of ingress rules associated with the security group"""
self.ingressrule = []
""""account owning the security group rule"""
self.account = None
""""the CIDR notation for the base IP address of the security group rule"""
self.cidr = None
""""the ending IP of the security group rule"""
self.endport = None
""""the code for the ICMP message response"""
self.icmpcode = None
""""the type of the ICMP message response"""
self.icmptype = None
""""the protocol of the security group rule"""
self.protocol = None
""""the id of the security group rule"""
self.ruleid = None
""""security group name"""
self.securitygroupname = None
""""the starting IP of the security group rule"""
self.startport = None
""""the list of resource tags associated with the rule"""
self.tags = []
""""the account associated with the tag"""
self.account = None
""""customer associated with the tag"""
self.customer = None
""""the domain associated with the tag"""
self.domain = None
""""the ID of the domain associated with the tag"""
self.domainid = None
""""tag key name"""
self.key = None
""""the project name where tag belongs to"""
self.project = None
""""the project id the tag belongs to"""
self.projectid = None
""""id of the resource"""
self.resourceid = None
""""resource type"""
self.resourcetype = None
""""tag value"""
self.value = None
""""the list of resource tags associated with the rule"""
self.tags = []
""""the account associated with the tag"""
self.account = None
""""customer associated with the tag"""
self.customer = None
""""the domain associated with the tag"""
self.domain = None
""""the ID of the domain associated with the tag"""
self.domainid = None
""""tag key name"""
self.key = None
""""the project name where tag belongs to"""
self.project = None
""""the project id the tag belongs to"""
self.projectid = None
""""id of the resource"""
self.resourceid = None
""""resource type"""
self.resourcetype = None
""""tag value"""
self.value = None
""""the ID of the latest async job acting on this object"""
self.jobid = None
""""the current status of the latest async job acting on this object"""
self.jobstatus = None
class tags:
def __init__(self):
""""the account associated with the tag"""
self.account = None
""""customer associated with the tag"""
self.customer = None
""""the domain associated with the tag"""
self.domain = None
""""the ID of the domain associated with the tag"""
self.domainid = None
""""tag key name"""
self.key = None
""""the project name where tag belongs to"""
self.project = None
""""the project id the tag belongs to"""
self.projectid = None
""""id of the resource"""
self.resourceid = None
""""resource type"""
self.resourcetype = None
""""tag value"""
self.value = None
|
apache-2.0
| 5,430,298,847,573,778,000 | 37.800319 | 145 | 0.572276 | false |
Mutiny-Games/unicorn-hat-hd
|
examples/snake.py
|
1
|
5313
|
#!/usr/bin/env python
import curses
import os
import random
import time
import unicornhathd
CONTROLS = {
ord('w'): 'up',
ord('s'): 'down',
ord('a'): 'left',
ord('d'): 'right',
curses.KEY_UP: 'up',
curses.KEY_DOWN: 'down',
curses.KEY_LEFT: 'left',
curses.KEY_RIGHT: 'right'
}
class Snake:
def __init__(self, canvas, x=5, y=5):
self.position = (x, y)
self.velocity = (1, 0)
self.length = 1
self.score = 0
self.tail = []
self.colour_head = (128,0,255)
self.colour_tail = (32,0,64)
self.canvas = canvas
self.eaten = []
self.grow_speed = 1
def poo(self):
self.eaten = []
self.tail = []
self.length = 1
self.grow_speed += 1
def shrink(self):
if self.length > 1:
self.length -= 1
self.tail = self.tail[-self.length:]
if len(self.eaten) > 0:
self.eaten.pop(0)
def get_colour(self, x, y):
if (x, y) == self.position:
return self.colour_head
elif (x, y) in self.tail:
return self.colour_tail
def draw(self):
for position in [self.position] + self.tail:
x, y = position
r, g, b = self.get_colour(x, y)
self.canvas.set_pixel(x, y, r, g, b)
for idx, colour in enumerate(self.eaten):
r, g, b = colour
self.canvas.set_pixel(idx, 14, r >> 1, g >> 1, b >> 1)
def num_eaten(self):
return len(self.eaten)
def update(self, apples, direction=''):
x, y = self.position
if direction == 'left' and self.velocity != (1, 0):
self.velocity = (-1, 0)
if direction == 'right' and self.velocity != (-1, 0):
self.velocity = (1, 0)
if direction == 'up' and self.velocity != (0, -1):
self.velocity = (0, 1)
if direction == 'down' and self.velocity != (0, 1):
self.velocity = (0, -1)
v_x, v_y = self.velocity
x += v_x
y += v_y
c_x, c_y = self.canvas.get_shape()
c_y -= 3 # 3 pixels along the top for score
x %= c_x
y %= c_y
if (x, y) in self.tail:
return False
self.tail.append(self.position)
self.tail = self.tail[-self.length:]
self.position = (x, y)
for apple in apples:
if apple.position == self.position:
score = apple.eat()
if score > 0:
self.score += score
self.length += self.grow_speed
self.eaten.append(apple.get_colour())
return True
class Apple:
colours = [(128,0,0), (0,128,0), (96,96,0)]
def __init__(self, canvas):
self.canvas = canvas
self.reset()
def get_colour(self):
return self.colours[self.score]
def reset(self):
c_x, c_y = self.canvas.get_shape()
c_y -= 3
self.position = (random.randint(0, c_x-1), random.randint(0, c_y-1))
self.score = random.randint(0, len(self.colours)-1)
self.eaten = False
def eat(self):
if self.eaten:
return 0
self.eaten = True
return self.score + 1
def update(self):
pass # What's an apple 'gon do?
def draw(self):
if self.eaten:
return
x, y = self.position
r, g, b = self.get_colour()
self.canvas.set_pixel(x, y, r, g, b)
def main(stdscr):
stdscr.nodelay(1)
stdscr.addstr(2, 5, "Unicorn HAT HD: Snake")
stdscr.addstr(4, 5, "w = UP, s = DOWN, a = LEFT, d = RIGHT")
stdscr.addstr(6, 5, "Press Ctrl+C to exit!")
width, height = unicornhathd.get_shape()
step = 0
running = True
num_apples = 16
snake = Snake(unicornhathd, 13, 5)
apples = [Apple(unicornhathd) for x in range(num_apples)]
t_start = time.time()
hit = False
try:
while running:
unicornhathd.clear()
for x in range(16):
for y in range(3):
unicornhathd.set_pixel(x, 15-y, 10, 10, 10)
if hit:
if snake.length == 1:
hit = False
for apple in apples:
apple.reset()
continue
snake.shrink()
snake.draw()
else:
for apple in apples:
apple.update()
apple.draw()
dir = ''
key = 0
while key != -1:
key = stdscr.getch()
if key in CONTROLS:
dir = CONTROLS[key]
hit = not snake.update(apples, dir)
if snake.num_eaten() == num_apples:
snake.poo()
for apple in apples:
apple.reset()
apple.draw()
snake.draw()
unicornhathd.show()
step += 1
time.sleep(0.1)
t_time = time.time() - t_start
print("You scored: {}".format(snake.score))
except KeyboardInterrupt:
pass
if __name__ == "__main__":
curses.wrapper(main)
|
mit
| 2,677,595,456,128,741,000 | 22.50885 | 76 | 0.472238 | false |
GreatEmerald/geoscripting
|
Assignment1/Complete.py
|
1
|
3661
|
import os
os.chdir('/home/tim/geoscripting/lesson11')
print os.getcwd()
os.chdir('data')
## Loading osgeo
try:
from osgeo import ogr, osr
print 'Import of ogr and osr from osgeo worked. Hurray!\n'
except:
print 'Import of ogr and osr from osgeo failed\n\n'
## Is the ESRI Shapefile driver available?
driverName = "ESRI Shapefile"
drv = ogr.GetDriverByName( driverName )
if drv is None:
print "%s driver not available.\n" % driverName
else:
print "%s driver IS available.\n" % driverName
## choose your own name
## make sure this layer does not exist in your 'data' folder
fn = "testing.shp"
layername = "anewlayer"
## Create shape file
ds = drv.CreateDataSource(fn)
print ds.GetRefCount()
# Set spatial reference
spatialReference = osr.SpatialReference()
spatialReference.ImportFromProj4('+proj=longlat +ellps=WGS84 +datum=WGS84 +no_defs')
# you can also do the following
# spatialReference.ImportFromEPSG(4326)
## Create Layer
layer=ds.CreateLayer(layername, spatialReference, ogr.wkbPoint)
## Now check your data folder and you will see that the file has been created!
## From now on it is not possible anymore to CreateDataSource with the same name
## in your workdirectory untill your remove the name.shp name.shx and name.dbf file.
print(layer.GetExtent())
## What is the geometry type???
## What does wkb mean??
## ok lets leave the pyramid top and start building the bottom,
## let's do points
## Create a point
point1 = ogr.Geometry(ogr.wkbPoint)
point2 = ogr.Geometry(ogr.wkbPoint)
## SetPoint(self, int point, double x, double y, double z = 0)
point1.SetPoint(0,1.0,1.0)
point2.SetPoint(0,2.0,2.0)
## Actually we can do lots of things with points:
## Export to other formats/representations:
print "KML file export"
print point2.ExportToKML()
## Buffering
buffer = point2.Buffer(4,4)
print buffer.Intersects(point1)
## More exports:
buffer.ExportToGML()
## Back to the pyramid, we still have no Feature
## Feature is defined from properties of the layer:e.g:
layerDefinition = layer.GetLayerDefn()
feature1 = ogr.Feature(layerDefinition)
feature2 = ogr.Feature(layerDefinition)
## Lets add the points to the feature
feature1.SetGeometry(point1)
feature2.SetGeometry(point2)
## Lets store the feature in a layer
layer.CreateFeature(feature1)
layer.CreateFeature(feature2)
print "The new extent"
print layer.GetExtent()
## So what is missing ????
## Saving the file, but OGR doesn't have a Save() option
## The shapefile is updated with all object structure
## when the script finished of when it is destroyed,
# if necessay SyncToDisk() maybe used
ds.Destroy()
## below the output is shown of the above Python script that is run in the terminal
# /home/tim/geoscripting/lesson11 --- yesyesremove.sh
os.system('../yesyesremove.sh')
qgis.utils.iface.addVectorLayer(fn, layername, "ogr")
aLayer = qgis.utils.iface.activeLayer()
print aLayer.name()
driverName = "ESRI Shapefile"
driver = ogr.GetDriverByName("ESRI Shapefile")
ds = driver.Open("testing.shp", 1)
## check layers and get the first layer
layernr = ds.GetLayerCount()
print layernr
layer = ds.GetLayerByIndex(0)
print layer
## get number of features in shapefile layer
features_number = layer.GetFeatureCount()
print "number of features for this layer:", features_number
## get the feature definition:
featureDefn = layer.GetLayerDefn()
## create a point
point = ogr.Geometry(ogr.wkbPoint)
point.SetPoint(0,2.0,1.0)
print point
## similarly
## point.AddPoint(2,1)
## create a new feature
feature = ogr.Feature(featureDefn)
feature.SetGeometry(point)
# Lets store the feature in file
layer.CreateFeature(feature)
layer.GetExtent()
ds.Destroy()
|
apache-2.0
| -55,867,917,027,162,376 | 26.533835 | 84 | 0.749795 | false |
jarrodmcc/OpenFermion
|
src/openfermion/tests/_lih_integration_test.py
|
1
|
6229
|
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests many modules to compute energy of LiH."""
from __future__ import absolute_import
import os
import numpy
import scipy.sparse
import unittest
from openfermion.config import *
from openfermion.hamiltonians import *
from openfermion.ops import *
from openfermion.transforms import *
from openfermion.utils import *
class LiHIntegrationTest(unittest.TestCase):
def setUp(self):
# Set up molecule.
geometry = [('Li', (0., 0., 0.)), ('H', (0., 0., 1.45))]
basis = 'sto-3g'
multiplicity = 1
filename = os.path.join(THIS_DIRECTORY, 'data',
'H1-Li1_sto-3g_singlet_1.45')
self.molecule = MolecularData(
geometry, basis, multiplicity, filename=filename)
self.molecule.load()
# Get molecular Hamiltonian.
self.molecular_hamiltonian = self.molecule.get_molecular_hamiltonian()
self.molecular_hamiltonian_no_core = (
self.molecule.
get_molecular_hamiltonian(occupied_indices=[0],
active_indices=range(1,
self.molecule.
n_orbitals)))
# Get FCI RDM.
self.fci_rdm = self.molecule.get_molecular_rdm(use_fci=1)
# Get explicit coefficients.
self.nuclear_repulsion = self.molecular_hamiltonian.constant
self.one_body = self.molecular_hamiltonian.one_body_tensor
self.two_body = self.molecular_hamiltonian.two_body_tensor
# Get fermion Hamiltonian.
self.fermion_hamiltonian = normal_ordered(get_fermion_operator(
self.molecular_hamiltonian))
# Get qubit Hamiltonian.
self.qubit_hamiltonian = jordan_wigner(self.fermion_hamiltonian)
# Get explicit coefficients.
self.nuclear_repulsion = self.molecular_hamiltonian.constant
self.one_body = self.molecular_hamiltonian.one_body_tensor
self.two_body = self.molecular_hamiltonian.two_body_tensor
# Get matrix form.
self.hamiltonian_matrix = get_sparse_operator(
self.molecular_hamiltonian)
self.hamiltonian_matrix_no_core = get_sparse_operator(
self.molecular_hamiltonian_no_core)
def test_all(self):
# Test reverse Jordan-Wigner.
fermion_hamiltonian = reverse_jordan_wigner(self.qubit_hamiltonian)
fermion_hamiltonian = normal_ordered(fermion_hamiltonian)
self.assertTrue(self.fermion_hamiltonian == fermion_hamiltonian)
# Test mapping to interaction operator.
fermion_hamiltonian = get_fermion_operator(self.molecular_hamiltonian)
fermion_hamiltonian = normal_ordered(fermion_hamiltonian)
self.assertTrue(self.fermion_hamiltonian == fermion_hamiltonian)
# Test RDM energy.
fci_rdm_energy = self.nuclear_repulsion
fci_rdm_energy += numpy.sum(self.fci_rdm.one_body_tensor *
self.one_body)
fci_rdm_energy += numpy.sum(self.fci_rdm.two_body_tensor *
self.two_body)
self.assertAlmostEqual(fci_rdm_energy, self.molecule.fci_energy)
# Confirm expectation on qubit Hamiltonian using reverse JW matches.
qubit_rdm = self.fci_rdm.get_qubit_expectations(self.qubit_hamiltonian)
qubit_energy = 0.0
for term, coefficient in qubit_rdm.terms.items():
qubit_energy += coefficient * self.qubit_hamiltonian.terms[term]
self.assertAlmostEqual(qubit_energy, self.molecule.fci_energy)
# Confirm fermionic RDMs can be built from measured qubit RDMs.
new_fermi_rdm = get_interaction_rdm(qubit_rdm)
fermi_rdm_energy = new_fermi_rdm.expectation(
self.molecular_hamiltonian)
self.assertAlmostEqual(fci_rdm_energy, self.molecule.fci_energy)
# Test sparse matrices.
energy, wavefunction = get_ground_state(self.hamiltonian_matrix)
self.assertAlmostEqual(energy, self.molecule.fci_energy)
expected_energy = expectation(self.hamiltonian_matrix, wavefunction)
self.assertAlmostEqual(expected_energy, energy)
# Make sure you can reproduce Hartree-Fock energy.
hf_state = jw_hartree_fock_state(
self.molecule.n_electrons, count_qubits(self.qubit_hamiltonian))
hf_density = get_density_matrix([hf_state], [1.])
expected_hf_density_energy = expectation(self.hamiltonian_matrix,
hf_density)
expected_hf_energy = expectation(self.hamiltonian_matrix, hf_state)
self.assertAlmostEqual(expected_hf_energy, self.molecule.hf_energy)
self.assertAlmostEqual(expected_hf_density_energy,
self.molecule.hf_energy)
# Check that frozen core result matches frozen core FCI from psi4.
# Recore frozen core result from external calculation.
self.frozen_core_fci_energy = -7.8807607374168
no_core_fci_energy = scipy.linalg.eigh(
self.hamiltonian_matrix_no_core.todense())[0][0]
self.assertAlmostEqual(no_core_fci_energy,
self.frozen_core_fci_energy)
# Check that the freeze_orbitals function has the same effect as the
# as the occupied_indices option of get_molecular_hamiltonian.
frozen_hamiltonian = freeze_orbitals(
get_fermion_operator(self.molecular_hamiltonian), [0, 1])
self.assertTrue(frozen_hamiltonian ==
get_fermion_operator(self.molecular_hamiltonian_no_core))
|
apache-2.0
| -6,725,934,730,498,603,000 | 43.81295 | 79 | 0.647455 | false |
pli3/e2-openwbif
|
plugin/controllers/views/web/parentcontrollist.py
|
1
|
5525
|
#!/usr/bin/env python
##################################################
## DEPENDENCIES
import sys
import os
import os.path
try:
import builtins as builtin
except ImportError:
import __builtin__ as builtin
from os.path import getmtime, exists
import time
import types
from Cheetah.Version import MinCompatibleVersion as RequiredCheetahVersion
from Cheetah.Version import MinCompatibleVersionTuple as RequiredCheetahVersionTuple
from Cheetah.Template import Template
from Cheetah.DummyTransaction import *
from Cheetah.NameMapper import NotFound, valueForName, valueFromSearchList, valueFromFrameOrSearchList
from Cheetah.CacheRegion import CacheRegion
import Cheetah.Filters as Filters
import Cheetah.ErrorCatchers as ErrorCatchers
##################################################
## MODULE CONSTANTS
VFFSL=valueFromFrameOrSearchList
VFSL=valueFromSearchList
VFN=valueForName
currentTime=time.time
__CHEETAH_version__ = '2.4.4'
__CHEETAH_versionTuple__ = (2, 4, 4, 'development', 0)
__CHEETAH_genTime__ = 1447321436.26005
__CHEETAH_genTimestamp__ = 'Thu Nov 12 18:43:56 2015'
__CHEETAH_src__ = '/home/knuth/openpli-oe-core/build/tmp/work/fusionhd-oe-linux/enigma2-plugin-extensions-openwebif/1+gitAUTOINC+5837c87afc-r0/git/plugin/controllers/views/web/parentcontrollist.tmpl'
__CHEETAH_srcLastModified__ = 'Thu Nov 12 18:43:41 2015'
__CHEETAH_docstring__ = 'Autogenerated by Cheetah: The Python-Powered Template Engine'
if __CHEETAH_versionTuple__ < RequiredCheetahVersionTuple:
raise AssertionError(
'This template was compiled with Cheetah version'
' %s. Templates compiled before version %s must be recompiled.'%(
__CHEETAH_version__, RequiredCheetahVersion))
##################################################
## CLASSES
class parentcontrollist(Template):
##################################################
## CHEETAH GENERATED METHODS
def __init__(self, *args, **KWs):
super(parentcontrollist, self).__init__(*args, **KWs)
if not self._CHEETAH__instanceInitialized:
cheetahKWArgs = {}
allowedKWs = 'searchList namespaces filter filtersLib errorCatcher'.split()
for k,v in KWs.items():
if k in allowedKWs: cheetahKWArgs[k] = v
self._initCheetahInstance(**cheetahKWArgs)
def respond(self, trans=None):
## CHEETAH: main method generated for this template
if (not trans and not self._CHEETAH__isBuffering and not callable(self.transaction)):
trans = self.transaction # is None unless self.awake() was called
if not trans:
trans = DummyTransaction()
_dummyTrans = True
else: _dummyTrans = False
write = trans.response().write
SL = self._CHEETAH__searchList
_filter = self._CHEETAH__currentFilter
########################################
## START - generated method body
_orig_filter_83772566 = _filter
filterName = u'WebSafe'
if self._CHEETAH__filters.has_key("WebSafe"):
_filter = self._CHEETAH__currentFilter = self._CHEETAH__filters[filterName]
else:
_filter = self._CHEETAH__currentFilter = \
self._CHEETAH__filters[filterName] = getattr(self._CHEETAH__filtersLib, filterName)(self).filter
write(u'''<?xml version="1.0" encoding="UTF-8"?>
<e2servicelist>
''')
for service in VFFSL(SL,"services",True): # generated from line 4, col 2
write(u'''\t<e2service>
\t\t<e2servicereference>''')
_v = VFFSL(SL,"service.servicereference",True) # u'$service.servicereference' on line 6, col 23
if _v is not None: write(_filter(_v, rawExpr=u'$service.servicereference')) # from line 6, col 23.
write(u'''</e2servicereference>
\t\t<e2servicename>''')
_v = VFFSL(SL,"service.servicename",True) # u'$service.servicename' on line 7, col 18
if _v is not None: write(_filter(_v, rawExpr=u'$service.servicename')) # from line 7, col 18.
write(u'''</e2servicename>
\t</e2service>
''')
write(u'''</e2servicelist>
''')
_filter = self._CHEETAH__currentFilter = _orig_filter_83772566
########################################
## END - generated method body
return _dummyTrans and trans.response().getvalue() or ""
##################################################
## CHEETAH GENERATED ATTRIBUTES
_CHEETAH__instanceInitialized = False
_CHEETAH_version = __CHEETAH_version__
_CHEETAH_versionTuple = __CHEETAH_versionTuple__
_CHEETAH_genTime = __CHEETAH_genTime__
_CHEETAH_genTimestamp = __CHEETAH_genTimestamp__
_CHEETAH_src = __CHEETAH_src__
_CHEETAH_srcLastModified = __CHEETAH_srcLastModified__
_mainCheetahMethod_for_parentcontrollist= 'respond'
## END CLASS DEFINITION
if not hasattr(parentcontrollist, '_initCheetahAttributes'):
templateAPIClass = getattr(parentcontrollist, '_CHEETAH_templateClass', Template)
templateAPIClass._addCheetahPlumbingCodeToClass(parentcontrollist)
# CHEETAH was developed by Tavis Rudd and Mike Orr
# with code, advice and input from many other volunteers.
# For more information visit http://www.CheetahTemplate.org/
##################################################
## if run from command line:
if __name__ == '__main__':
from Cheetah.TemplateCmdLineIface import CmdLineIface
CmdLineIface(templateObj=parentcontrollist()).run()
|
gpl-2.0
| 7,711,901,379,807,445,000 | 35.111111 | 199 | 0.635113 | false |
andymitchhank/bessie
|
bessie/base.py
|
1
|
2160
|
from functools import partial
import requests
try:
import urlparse as parse
except:
from urllib import parse
class InvalidEndpointException(Exception):
pass
class BaseClient(object):
endpoints = []
separator = '/'
base_url = ''
def __init__(self, path='', path_params=None, **kwargs):
self.path = path
self.path_params = path_params
if not self.path_params:
self.path_params = {}
self.kwargs = kwargs
self.__define_convenience_methods()
self._create_request()
def __call__(self, value):
path_param_key = self.path.split(self.separator)[-1]
self.path_params[path_param_key] = str(value)
return self
def __getattr__(self, name):
new_path = self.separator.join((self.path, name)) if self.path else name
return self.__class__(new_path, self.path_params, **self.kwargs)
def _find_endpoint(self, method):
endpoint = None
to_match = '{} {}'.format(method, self.path)
for e in self.endpoints:
if e.match(to_match, self.separator):
endpoint = e
if not endpoint:
raise InvalidEndpointException('{} is not a valid endpoint.'.format(to_match))
return endpoint
def _create_request(self):
self.request = requests.Request()
def _validate_endpoint(self, endpoint, params):
endpoint.validate(params, self.path_params)
def _build_url(self, path):
url = parse.urljoin(self.base_url, path)
for param, value in self.path_params.items():
url = url.replace('<{}>'.format(param), value)
return url
def _finalize_request(self, method, payload):
endpoint = self._find_endpoint(method)
self._validate_endpoint(endpoint, payload)
self.request.url = self._build_url(endpoint.path)
if method == 'GET':
self.request.params = payload
else:
self.request.data = payload
self.request.method = method
def _send_request(self, method, **kwargs):
self._finalize_request(method, kwargs)
prepped = self.request.prepare()
return requests.session().send(prepped)
def __define_convenience_methods(self):
actions = ['POST', 'GET', 'PUT', 'PATCH', 'DELETE', 'OPTIONS', 'HEAD']
for action in actions:
setattr(self, action.lower(), partial(self._send_request, action))
|
mit
| 4,279,400,171,414,058,000 | 23.545455 | 81 | 0.688426 | false |
x10an14/Programming_Practice
|
project_euler/9/python/solution.py
|
1
|
1427
|
#!/usr/bin/env python3.7
from math import floor
from typing import Tuple
from functools import reduce
from itertools import combinations_with_replacement
def factors(
n: int # Number to factorize
) -> Tuple[int]:
return tuple(
reduce(
list.__add__,
(
[i, floor(n / i)]
for i in range(1, int(n ** 0.5) + 1)
if n % i == 0
)
)
)
def all_pythagorean_triplets_of_n_dickson(
n: int
) -> Tuple[Tuple[int]]:
"""
https://en.wikipedia.org/wiki/Formulas_for_generating_Pythagorean_triples#Dickson's_method
"""
n_2 = int(n ** 2 / 2)
interesting_factors = factors(n_2)
interesting_factors = tuple(
combo
for combo in combinations_with_replacement(interesting_factors, 2)
if (combo[0] * combo[1]) == n_2
)
for s, t in interesting_factors:
x, y, z = n + s, n + t, n + s + t
if x + y + z == 1000:
return x * y * z
return 0
def find_pythagorean_triplet_whose_sum_match_n(
n: int,
) -> int:
max_value = int(n ** 1 / 3) + 1 # Cube root + 1 of n
for x in range(6, max_value, 2):
result = all_pythagorean_triplets_of_n_dickson(
n=x
)
if result != 0 :
return result
if __name__ == '__main__':
print(
find_pythagorean_triplet_whose_sum_match_n(1000)
)
|
mit
| -4,583,035,532,445,794,000 | 23.186441 | 94 | 0.529783 | false |
cranes-bill/cedit
|
tools/generate-plugin.py
|
1
|
5946
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# generate-plugin.py - cedit plugin skeletton generator
# This file is part of cedit
#
# Copyright (C) 2006 - Steve Frécinaux
#
# cedit is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# cedit is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with cedit; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor,
# Boston, MA 02110-1301 USA
import re
import os
import sys
import getopt
from datetime import date
import preprocessor
# Default values of command line options
options = {
'language' : 'c',
'description' : 'Type here a short description of your plugin',
'author' : os.getenv('USERNAME'),
'email' : os.getenv('LOGNAME') + '@email.com',
'standalone' : False,
'with-side-pane' : False,
'with-bottom-pane' : False,
'with-menu' : False,
'with-config-dlg' : False
}
USAGE = """Usage:
%s [OPTIONS...] pluginname
""" % os.path.basename(sys.argv[0])
HELP = USAGE + """
generate skeleton source tree for a new cedit plugin.
Options:
--author Set the author name
--email Set the author email
--description Set the description you want for your new plugin
--standalone Is this plugin intended to be distributed as a
standalone package ? (N/A)
--language / -l Set the language (C) [default: %(language)s]
--with-$feature Enable $feature
--without-$feature Disable $feature
--help / -h Show this message and exits
Features:
config-dlg Plugin configuration dialog
menu Plugin menu entries
side-pane Side pane item (N/A)
bottom-pane Bottom pane item (N/A)
""" % options
TEMPLATE_DIR = os.path.join(os.path.dirname(sys.argv[0]), "plugin_template")
# Parsing command line options
try:
opts, args = getopt.getopt(sys.argv[1:],
'l:h',
['language=',
'description=',
'author=',
'email=',
'standalone',
'with-menu' , 'without-menu',
'with-side-pane' , 'without-side-pane',
'with-bottom-pane' , 'without-bottom-pane',
'with-config-dlg' , 'without-config-dlg',
'help'])
except getopt.error, exc:
print >>sys.stderr, '%s: %s' % (sys.argv[0], str(exc))
print >>sys.stderr, USAGE
sys.exit(1)
for opt, arg in opts:
if opt in ('-h', '--help'):
print >>sys.stderr, HELP
sys.exit(0)
elif opt in ('--description', '--author', '--email'):
options[opt[2:]] = arg
elif opt in ('-l', '--language'):
options['language'] = arg.lower()
elif opt == '--standalone':
options['standalone'] = True
elif opt[0:7] == '--with-':
options['with-' + opt[7:]] = True
elif opt[0:10] == '--without-':
options['with-' + opt[10:]] = False
# What's the new plugin name ?
if len(args) < 1:
print >>sys.stderr, USAGE
sys.exit(1)
plugin_name = args[0]
plugin_id = re.sub('[^a-z0-9_]', '', plugin_name.lower().replace(' ', '_'))
plugin_module = plugin_id.replace('_', '-')
directives = {
'PLUGIN_NAME' : plugin_name,
'PLUGIN_MODULE' : plugin_module,
'PLUGIN_ID' : plugin_id,
'AUTHOR_FULLNAME' : options['author'],
'AUTHOR_EMAIL' : options['email'],
'DATE_YEAR' : date.today().year,
'DESCRIPTION' : options['description'],
}
# Files to be generated by the preprocessor, in the form "template : outfile"
output_files = {
'Makefile.am': '%s/Makefile.am' % plugin_module,
'cedit-plugin.desktop.in': '%s/%s.cedit-plugin.desktop.in' % (plugin_module, plugin_module)
}
if options['language'] == 'c':
output_files['cedit-plugin.c'] = '%s/%s-plugin.c' % (plugin_module, plugin_module)
output_files['cedit-plugin.h'] = '%s/%s-plugin.h' % (plugin_module, plugin_module)
else:
print >>sys.stderr, 'Value of --language should be C'
print >>sys.stderr, USAGE
sys.exit(1)
if options['standalone']:
output_files['configure.ac'] = 'configure.ac'
if options['with-side-pane']:
directives['WITH_SIDE_PANE'] = True
if options['with-bottom-pane']:
directives['WITH_BOTTOM_PANE'] = True
if options['with-menu']:
directives['WITH_MENU'] = True
if options['with-config-dlg']:
directives['WITH_CONFIGURE_DIALOG'] = True
# Generate the plugin base
for infile, outfile in output_files.iteritems():
print 'Processing %s\n' \
' into %s...' % (infile, outfile)
infile = os.path.join(TEMPLATE_DIR, infile)
outfile = os.path.join(os.getcwd(), outfile)
if not os.path.isfile(infile):
print >>sys.stderr, 'Input file does not exist : %s.' % os.path.basename(infile)
continue
# Make sure the destination directory exists
if not os.path.isdir(os.path.split(outfile)[0]):
os.makedirs(os.path.split(outfile)[0])
# Variables relative to the generated file
directives['DIRNAME'], directives['FILENAME'] = os.path.split(outfile)
# Generate the file
preprocessor.process(infile, outfile, directives.copy())
print 'Done.'
# ex:ts=4:et:
|
gpl-2.0
| -9,085,646,997,685,193,000 | 31.664835 | 95 | 0.584861 | false |
jdepoix/goto_cloud
|
goto_cloud/migration_commander/device_identification.py
|
1
|
4673
|
from command.public import SourceCommand
from .mountpoint_mapping import MountpointMapper
class DeviceIdentificationCommand(SourceCommand):
"""
takes care of identifying, which of the targets devices, should replicated, which device of the source system
"""
class NoMatchingDevicesException(SourceCommand.CommandExecutionException):
"""
raised if no matching devices were found
"""
COMMAND_DOES = 'match the target and source devices'
ERROR_REPORT_EXCEPTION_CLASS = NoMatchingDevicesException
DEVICE_TYPES_TO_IDENTIFY = ('disk', 'part',)
def _execute(self):
self._target.device_mapping = self._map_unallocated_devices_onto_source_devices(
self._get_unallocated_target_devices()
)
self._target.save()
def _get_unallocated_target_devices(self):
"""
gets the devices from target, which have not been allocated
:return: unallocated target devices
:rtype: dict
"""
return {
target_device_id: target_device
for target_device_id, target_device in self._target.remote_host.system_info['block_devices'].items()
if not target_device['fs'] and not target_device['children']
}
def _map_unallocated_devices_onto_source_devices(self, unallocated_devices):
"""
maps the unallocated target device on the source devices, which they will replicate during the migration
:param unallocated_devices: the unallocated devices
:type unallocated_devices: dict
:return: the mapped devices
:rtype: dict
"""
device_map = {}
for source_device_id, source_device in (
(device_id, device)
for device_id, device in self._source.remote_host.system_info['block_devices'].items()
if device['type'] in self.DEVICE_TYPES_TO_IDENTIFY
):
if unallocated_devices:
matching_device_id = next(
(
target_device_id
for target_device_id, target_device in unallocated_devices.items()
if round(target_device['size'] / 1024 ** 3) == round(source_device['size'] / 1024 ** 3)
),
None
)
if matching_device_id:
device_map[source_device_id] = {
'id': matching_device_id,
'mountpoint': self._map_mountpoint(source_device['mountpoint']),
'children': self._map_children(source_device_id, matching_device_id)
}
unallocated_devices.pop(matching_device_id)
else:
self._add_error(
'no device of the target system matches the size of {source_device_id} on the source system'
.format(
source_device_id=source_device_id
)
)
else:
self._add_error(
'there are not enough devices on the target instance, to be able to replicated the source'
)
return device_map
def _map_children(self, source_device_id, target_device_id):
"""
maps the children of the source device to children in the target device
:param source_device_id: the id of the source device
:type source_device_id: str
:param target_device_id: the id of the target device
:type target_device_id: str
:return: the children of the target device
:rtype: dict
"""
children = {}
source_device = self._source.remote_host.system_info['block_devices'][source_device_id]
for partition_device_id, partition_device in source_device['children'].items():
children[partition_device_id] = {
'id': target_device_id + partition_device_id[-1],
'mountpoint': self._map_mountpoint(partition_device['mountpoint'])
}
return children
@staticmethod
def _map_mountpoint(mountpoint):
"""
map the mountpoint of a source device to a hashed mountpoint, which can be used on a target device
:param mountpoint: mountpoint of the source device
:type mountpoint: str
:return: the mountpoint containing the hashed path for the target device
:rtype: str
"""
return MountpointMapper.map_mountpoint('/mnt', mountpoint) if mountpoint and mountpoint != '[SWAP]' else ''
|
mit
| -253,590,842,994,977,500 | 38.601695 | 116 | 0.577573 | false |
rocktavious/pyul
|
pyul/xmlUtils.py
|
1
|
7272
|
from xml.parsers import expat
from xml.sax.saxutils import XMLGenerator
from xml.sax.xmlreader import AttributesImpl
from xml.dom.minidom import parseString
try:
from cStringIO import StringIO
except ImportError:
try:
from StringIO import StringIO
except ImportError:
from io import StringIO
try:
from collections import OrderedDict
except ImportError :
OrderedDict = dict
try:
_basestring = basestring
except NameError:
_basestring = str
try:
_unicode = unicode
except NameError:
_unicode = str
__all__ = ['parseToDict','unparseFromDict',
'removeWhitespaceNodes']
class ParsingInterrupted(Exception): pass
class _DictSAXHandler(object):
"""Written by: Martin Blech - Integrated by: Kyle Rockman"""
def __init__(self,
item_depth=0,
item_callback=(lambda *args: True),
xml_attribs=True,
attr_prefix='@',
cdata_key='#text',
force_cdata=True,
cdata_separator='',
postprocessor=None,
dict_constructor=OrderedDict,
strip_whitespace=True):
self.path = []
self.stack = []
self.data = None
self.item = None
self.item_depth = item_depth
self.xml_attribs = xml_attribs
self.item_callback = item_callback
self.attr_prefix = attr_prefix
self.cdata_key = cdata_key
self.force_cdata = force_cdata
self.cdata_separator = cdata_separator
self.postprocessor = postprocessor
self.dict_constructor = dict_constructor
self.strip_whitespace = strip_whitespace
def startElement(self, name, attrs):
attrs = self.dict_constructor(zip(attrs[0::2], attrs[1::2]))
self.path.append((name, attrs or None))
if len(self.path) > self.item_depth:
self.stack.append((self.item, self.data))
if self.xml_attribs:
attrs = self.dict_constructor(
(self.attr_prefix+key, value)
for (key, value) in attrs.items())
else:
attrs = None
self.item = attrs or None
self.data = None
def endElement(self, name):
if len(self.path) == self.item_depth:
item = self.item
if item is None:
item = self.data
should_continue = self.item_callback(self.path, item)
if not should_continue:
raise ParsingInterrupted()
if len(self.stack):
item, data = self.item, self.data
self.item, self.data = self.stack.pop()
if self.strip_whitespace and data is not None:
data = data.strip() or None
if data and self.force_cdata and item is None:
item = self.dict_constructor()
if item is not None:
if data:
self.push_data(item, self.cdata_key, data)
self.item = self.push_data(self.item, name, item)
else:
self.item = self.push_data(self.item, name, data)
else:
self.item = self.data = None
self.path.pop()
def characters(self, data):
if not self.data:
self.data = data
else:
self.data += self.cdata_separator + data
def push_data(self, item, key, data):
if self.postprocessor is not None:
result = self.postprocessor(self.path, key, data)
if result is None:
return item
key, data = result
if item is None:
item = self.dict_constructor()
try:
value = item[key]
if isinstance(value, list):
value.append(data)
else:
item[key] = [value, data]
except KeyError:
item[key] = data
return item
def _emit(key, value, content_handler,
attr_prefix='@',
cdata_key='#text',
root=True,
preprocessor=None):
if preprocessor is not None:
result = preprocessor(key, value)
if result is None:
return
key, value = result
if not isinstance(value, (list, tuple)):
value = [value]
if root and len(value) > 1:
raise ValueError('document with multiple roots')
for v in value:
if v is None:
v = OrderedDict()
elif not isinstance(v, dict):
v = _unicode(v)
if isinstance(v, _basestring):
v = OrderedDict(((cdata_key, v),))
cdata = None
attrs = OrderedDict()
children = []
for ik, iv in v.items():
if ik == cdata_key:
cdata = iv
continue
if ik.startswith(attr_prefix):
attrs[ik[len(attr_prefix):]] = iv
continue
children.append((ik, iv))
content_handler.startElement(key, AttributesImpl(attrs))
for child_key, child_value in children:
_emit(child_key, child_value, content_handler,
attr_prefix, cdata_key, False, preprocessor)
if cdata is not None:
content_handler.characters(cdata)
content_handler.endElement(key)
def parseToDict(data):
handler = _DictSAXHandler()
parser = expat.ParserCreate()
parser.ordered_attributes = True
parser.StartElementHandler = handler.startElement
parser.EndElementHandler = handler.endElement
parser.CharacterDataHandler = handler.characters
try:
parser.ParseFile(data)
except (TypeError, AttributeError):
if isinstance(data, _unicode):
data = data.encode('utf-8')
parser.Parse(data, True)
return handler.item
def unparseFromDict(data):
((key, value),) = data.items()
output = StringIO()
content_handler = XMLGenerator(output, 'utf-8')
content_handler.startDocument()
_emit(key, value, content_handler)
content_handler.endDocument()
value = output.getvalue()
try:
value = value.decode('utf-8')
except AttributeError:
pass
return parseString(value).toprettyxml(indent="\t")
def removeWhitespaceNodes(node, unlink=False):
"""Removes all of the whitespace-only text decendants of a DOM node.
When creating a DOM from an XML source, XML parsers are required to
consider several conditions when deciding whether to include
whitespace-only text nodes. This function ignores all of those
conditions and removes all whitespace-only text decendants of the
specified node. If the unlink flag is specified, the removed text
nodes are unlinked so that their storage can be reclaimed. If the
specified node is a whitespace-only text node then it is left
unmodified."""
remove_list = []
for child in node.childNodes:
if child.nodeType == Node.TEXT_NODE and \
not child.data.strip():
remove_list.append(child)
elif child.hasChildNodes():
remove_whilespace_nodes(child, unlink)
for node in remove_list:
node.parentNode.removeChild(node)
if unlink:
node.unlink()
|
mit
| 3,095,048,240,525,451,000 | 32.205479 | 72 | 0.582508 | false |
benosteen/mypaint
|
lib/brush.py
|
1
|
14425
|
# This file is part of MyPaint.
# Copyright (C) 2007 by Martin Renold <martinxyz@gmx.ch>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
import mypaintlib
from brushlib import brushsettings
import helpers
import urllib, copy, math
string_value_settings = set(("parent_brush_name", "group"))
current_brushfile_version = 2
brush_settings = set([s.cname for s in brushsettings.settings])
all_settings = brush_settings.union(string_value_settings)
def brushinfo_quote(string):
"""Quote a string for serialisation of brushes.
>>> brushinfo_quote(u'foo')
'foo'
>>> brushinfo_quote(u'foo/bar blah')
'foo%2Fbar%20blah'
>>> brushinfo_quote(u'Have a nice day \u263A')
'Have%20a%20nice%20day%20%E2%98%BA'
"""
string = unicode(string)
u8bytes = string.encode("utf-8")
return str(urllib.quote(u8bytes, safe=''))
def brushinfo_unquote(quoted):
"""Unquote a serialised string value from a brush field.
>>> brushinfo_unquote("foo")
u'foo'
>>> brushinfo_unquote("foo%2fbar%20blah")
u'foo/bar blah'
>>> expected = u'Have a nice day \u263A'
>>> brushinfo_unquote('Have%20a%20nice%20day%20%E2%98%BA') == expected
True
"""
quoted = str(quoted)
u8bytes = urllib.unquote(quoted)
return unicode(u8bytes.decode("utf-8"))
class BrushInfo:
"""Fully parsed description of a brush.
Just the strings, numbers and inputs/points hashes in a dict-based wrapper
without any special interpretation other than a free upgrade to the newest
brush format."""
def __init__(self, string=None):
"""Construct a BrushInfo object, optionally parsing it."""
self.settings = None
self.cache_str = None
self.observers = [self.settings_changed_cb]
self.observers_hidden = []
self.pending_updates = set()
if string:
self.load_from_string(string)
def settings_changed_cb(self, settings):
self.cache_str = None
def clone(self):
"""Returns a deep-copied duplicate."""
res = BrushInfo()
res.load_from_brushinfo(self)
return res
def load_from_brushinfo(self, other):
"""Updates the brush's Settings from (a clone of) ``brushinfo``."""
self.settings = copy.deepcopy(other.settings)
for f in self.observers:
f(all_settings)
self.cache_str = other.cache_str
def load_defaults(self):
"""Load default brush settings, dropping all current settings."""
self.begin_atomic()
self.settings = {}
for s in brushsettings.settings:
self.reset_setting(s.cname)
self.end_atomic()
def reset_setting(self, cname):
basevalue = brushsettings.settings_dict[cname].default
if cname == 'opaque_multiply':
# make opaque depend on pressure by default
input_points = {'pressure': [(0.0, 0.0), (1.0, 1.0)]}
else:
input_points = {}
self.settings[cname] = [basevalue, input_points]
for f in self.observers:
f(set([cname]))
class ParseError(Exception):
pass
class Obsolete(ParseError):
pass
def load_from_string(self, settings_str):
"""Load a setting string, overwriting all current settings."""
def parse_value(rawvalue, cname, version):
"""Parses a setting value, for a given setting name and brushfile version."""
if cname in string_value_settings:
string = brushinfo_unquote(rawvalue)
return [(cname, string)]
elif version <= 1 and cname == 'color':
rgb = [int(c)/255.0 for c in rawvalue.split(" ")]
h, s, v = helpers.rgb_to_hsv(*rgb)
return [('color_h', [h, {}]), ('color_s', [s, {}]), ('color_v', [v, {}])]
elif version <= 1 and cname == 'change_radius':
if rawvalue == '0.0':
return []
raise Obsolete, 'change_radius is not supported any more'
elif version <= 2 and cname == 'adapt_color_from_image':
if rawvalue == '0.0':
return []
raise Obsolete, 'adapt_color_from_image is obsolete, ignored;' + \
' use smudge and smudge_length instead'
elif version <= 1 and cname == 'painting_time':
return []
if version <= 1 and cname == 'speed':
cname = 'speed1'
parts = rawvalue.split('|')
basevalue = float(parts[0])
input_points = {}
for part in parts[1:]:
inputname, rawpoints = part.strip().split(' ', 1)
if version <= 1:
points = parse_points_v1(rawpoints)
else:
points = parse_points_v2(rawpoints)
assert len(points) >= 2
input_points[inputname] = points
return [(cname, [float(basevalue), input_points])]
def parse_points_v1(rawpoints):
"""Parses the points list format from versions prior to version 2."""
points_seq = [float(f) for f in rawpoints.split()]
points = [(0, 0)]
while points_seq:
x = points_seq.pop(0)
y = points_seq.pop(0)
if x == 0: break
assert x > points[-1][0]
points.append((x, y))
return points
def parse_points_v2(rawpoints):
"""Parses the newer points list format of v2 and beyond."""
points = []
for s in rawpoints.split(', '):
s = s.strip()
if not (s.startswith('(') and s.endswith(')') and ' ' in s):
return '(x y) expected, got "%s"' % s
s = s[1:-1]
try:
x, y = [float(ss) for ss in s.split(' ')]
except:
print s
raise
points.append((x, y))
return points
def transform_y(valuepair, func):
"""Used during migration from earlier versions."""
basevalue, input_points = valuepair
basevalue = func(basevalue)
input_points_new = {}
for inputname, points in input_points.iteritems():
points_new = [(x, func(y)) for x, y in points]
input_points_new[inputname] = points_new
return [basevalue, input_points_new]
# Split out the raw settings and grab the version we're dealing with
rawsettings = []
errors = []
version = 1 # for files without a 'version' field
for line in settings_str.split('\n'):
try:
line = line.strip()
if not line or line.startswith('#'):
continue
cname, rawvalue = line.split(' ', 1)
if cname == 'version':
version = int(rawvalue)
if version > current_brushfile_version:
raise BrushInfo.ParseError, 'this brush was saved with a more recent version of mypaint'
else:
rawsettings.append((cname, rawvalue))
except Exception, e:
errors.append((line, str(e)))
# Parse each pair
self.load_defaults()
num_parsed = 0
for rawcname, rawvalue in rawsettings:
try:
cnamevaluepairs = parse_value(rawvalue, rawcname, version)
num_parsed += 1
for cname, value in cnamevaluepairs:
if cname in brushsettings.settings_migrate:
cname, func = brushsettings.settings_migrate[cname]
if func:
value = transform_y(value, func)
self.settings[cname] = value
except Exception, e:
line = "%s %s" % (rawcname, rawvalue)
errors.append((line, str(e)))
if num_parsed == 0:
errors.append(('', 'there was only garbage in this file, using defaults'))
if errors:
for error in errors:
print error
for f in self.observers:
f(all_settings)
self.cache_str = settings_str # Maybe. It could still be old format...
def save_to_string(self):
"""Serialise brush information to a string. Result is cached."""
if self.cache_str:
return self.cache_str
res = '# mypaint brush file\n'
res += '# you can edit this file and then select the brush in mypaint (again) to reload\n'
res += 'version %d\n' % current_brushfile_version
for cname, data in self.settings.iteritems():
if cname in string_value_settings:
if data is not None:
res += cname + " " + brushinfo_quote(data)
else:
res += cname + " "
basevalue, input_points = data
res += str(basevalue)
if input_points:
for inputname, points in input_points.iteritems():
res += " | " + inputname + ' '
res += ', '.join(['(%f %f)' % xy for xy in points])
res += "\n"
self.cache_str = res
return res
def get_base_value(self, cname):
return self.settings[cname][0]
def get_points(self, cname, input):
return copy.deepcopy(self.settings[cname][1].get(input, ()))
def set_base_value(self, cname, value):
assert cname in brush_settings
self.settings[cname][0] = value
for f in self.observers:
f(set([cname]))
def set_points(self, cname, input, points):
assert cname in brush_settings
points = tuple(points)
d = self.settings[cname][1]
if points:
d[input] = copy.deepcopy(points)
elif input in d:
d.pop(input)
for f in self.observers:
f(set([cname]))
def set_setting(self, cname, value):
self.settings[cname] = copy.deepcopy(value)
for f in self.observers:
f(set([cname]))
def get_setting(self, cname):
return copy.deepcopy(self.settings[cname])
def get_string_property(self, name):
tmp = self.settings.get(name, None)
return self.settings.get(name, None)
def set_string_property(self, name, value):
assert name in string_value_settings
if value is None:
self.settings.pop(name, None)
else:
assert isinstance(value, str) or isinstance(value, unicode)
self.settings[name] = value
for f in self.observers:
f(set([name]))
def has_only_base_value(self, cname):
"""Return whether a setting is constant for this brush."""
for i in brushsettings.inputs:
if self.has_input(cname, i.name):
return False
return True
def has_input(self, cname, input):
"""Return whether a given input is used by some setting."""
return self.get_points(cname, input)
def begin_atomic(self):
self.observers_hidden.append(self.observers[:])
del self.observers[:]
self.observers.append(self.add_pending_update)
def add_pending_update(self, settings):
self.pending_updates.update(settings)
def end_atomic(self):
self.observers[:] = self.observers_hidden.pop()
pending = self.pending_updates.copy()
if pending:
self.pending_updates.clear()
for f in self.observers:
f(pending)
def get_color_hsv(self):
h = self.get_base_value('color_h')
s = self.get_base_value('color_s')
v = self.get_base_value('color_v')
return (h, s, v)
def set_color_hsv(self, hsv):
self.begin_atomic()
try:
h, s, v = hsv
self.set_base_value('color_h', h)
self.set_base_value('color_s', s)
self.set_base_value('color_v', v)
finally:
self.end_atomic()
def set_color_rgb(self, rgb):
self.set_color_hsv(helpers.rgb_to_hsv(*rgb))
def get_color_rgb(self):
hsv = self.get_color_hsv()
return helpers.hsv_to_rgb(*hsv)
def is_eraser(self):
return self.get_base_value('eraser') > 0.9
def get_effective_radius(self):
"""Return brush radius in pixels for cursor shape."""
base_radius = math.exp(self.get_base_value('radius_logarithmic'))
r = base_radius
r += 2*base_radius*self.get_base_value('offset_by_random')
return r
class Brush(mypaintlib.Brush):
"""
Low-level extension of the C brush class, propagating all changes of
a brushinfo instance down into the C code.
"""
def __init__(self, brushinfo):
mypaintlib.Brush.__init__(self)
self.brushinfo = brushinfo
brushinfo.observers.append(self.update_brushinfo)
self.update_brushinfo(all_settings)
def update_brushinfo(self, settings):
"""Mirror changed settings into the BrushInfo tracking this Brush."""
for cname in settings:
setting = brushsettings.settings_dict.get(cname)
if not setting:
continue
base = self.brushinfo.get_base_value(cname)
self.set_base_value(setting.index, base)
for input in brushsettings.inputs:
points = self.brushinfo.get_points(cname, input.name)
assert len(points) != 1
#if len(points) > 2:
# print 'set_points[%s](%s, %s)' % (cname, input.name, points)
self.set_mapping_n(setting.index, input.index, len(points))
for i, (x, y) in enumerate(points):
self.set_mapping_point(setting.index, input.index, i, x, y)
def get_stroke_bbox(self):
bbox = self.stroke_bbox
return bbox.x, bbox.y, bbox.w, bbox.h
if __name__ == "__main__":
import doctest
doctest.testmod()
|
gpl-2.0
| 3,792,774,296,780,017,700 | 34.972569 | 112 | 0.552236 | false |
Musaka96/FB-Chat-Toolkit
|
chat.py
|
1
|
2494
|
import fbchat
def login():
username = input("Enter your id --> ")
password = input("Enter your password --> ")
try:
client = fbchat.Client(username, password)
except:
print("id or password is wrong")
login()
menu(client)
def menu(client):
print("1.Messenger")
print("2.Spammer")
print("3.Log out")
choice = input("> ")
if choice == "1":
messenger(client)
if choice == "2":
spammer()
if choice == "3":
login()
def messenger(client):
who_we_need = input ("Who do we want to chat? --> ")
friends = client.getUsers(who_we_need) #gets us a list of names
counter = 0
for friend in friends:
friend = str(friend)
friend = friend[6:]
string = ''
for i in friend: #all of this just print users nicely
if i != "(":
string += i
else:
break
print(counter, ".",string)
counter += 1
who_s_it_gonna_be = input("So, who's it gonna be? > ")
friend = friends[int(who_s_it_gonna_be)]
try:
while True:
last_messages = client.getThreadInfo(friend.uid,0)
last_messages.reverse() # messages come in reversed order
for message in last_messages:
print(message.body)
new_mssg = input("Poruka --> ")
sent = client.send(friend.uid, new_mssg)
if sent:
print("Sent!")
except KeyboardInterrupt:
menu()
def spammer(client):
who_we_need = input ("Who do we want to spamm? --> ")
friends = client.getUsers(who_we_need) #gets us a list of names
counter = 0
for friend in friends:
friend = str(friend)
friend = friend[6:]
string = ''
for i in friend: #all of this just print users nicely
if i != "(":
string += i
else:
break
print(counter, ".",string)
counter += 1
who_s_it_gonna_be = input("So, who's it gonna be? > ")
friend = friends[int(who_s_it_gonna_be)]
pain = input("How many times do we want to spamm him (enter a number, or 0 for infinity)")
if pain == "0":
message = input("Spamm message? > ")
counter = 0
try:
while True:
poruka = input("Poruka --> ")
sent = client.send(friend.uid, poruka)
if sent:
counter += 1
print("Sent " + counter + " times")
except KeyboardInterrupt:
menu()
else:
message = input("Spamm message? > ")
counter = 0
try:
for i in range(int(pain)):
poruka = input("Poruka --> ")
sent = client.send(friend.uid, poruka)
if sent:
counter += 1
print("Sent " + counter + " times")
print("Done ^^")
except KeyboardInterrupt:
menu()
login()
|
gpl-3.0
| 1,108,275,265,305,331,300 | 16.206897 | 91 | 0.607458 | false |
simontakite/sysadmin
|
pythonscripts/learningPython/argtest.py
|
1
|
5114
|
"""
File argtest.py: (3.X + 2.X) function decorator that performs
arbitrary passed-in validations for arguments passed to any
function method. Range and type tests are two example uses;
valuetest handles more arbitrary tests on a argument's value.
Arguments are specified by keyword to the decorator. In the actual
call, arguments may be passed by position or keyword, and defaults
may be omitted. See self-test code below for example use cases.
Caveats: doesn't fully support nesting because call proxy args
differ; doesn't validate extra args passed to a decoratee's *args;
and may be no easier than an assert except for canned use cases.
"""
trace = False
def rangetest(**argchecks):
return argtest(argchecks, lambda arg, vals: arg < vals[0] or arg > vals[1])
def typetest(**argchecks):
return argtest(argchecks, lambda arg, type: not isinstance(arg, type))
def valuetest(**argchecks):
return argtest(argchecks, lambda arg, tester: not tester(arg))
def argtest(argchecks, failif): # Validate args per failif + criteria
def onDecorator(func): # onCall retains func, argchecks, failif
if not __debug__: # No-op if "python -O main.py args..."
return func
else:
code = func.__code__
expected = list(code.co_varnames[:code.co_argcount])
def onError(argname, criteria):
errfmt = '%s argument "%s" not %s'
raise TypeError(errfmt % (func.__name__, argname, criteria))
def onCall(*pargs, **kargs):
positionals = expected[:len(pargs)]
for (argname, criteria) in argchecks.items(): # For all to test
if argname in kargs: # Passed by name
if failif(kargs[argname], criteria):
onError(argname, criteria)
elif argname in positionals: # Passed by posit
position = positionals.index(argname)
if failif(pargs[position], criteria):
onError(argname, criteria)
else: # Not passed-dflt
if trace:
print('Argument "%s" defaulted' % argname)
return func(*pargs, **kargs) # OK: run original call
return onCall
return onDecorator
if __name__ == '__main__':
import sys
def fails(test):
try: result = test()
except: print('[%s]' % sys.exc_info()[1])
else: print('?%s?' % result)
print('--------------------------------------------------------------------')
# Canned use cases: ranges, types
@rangetest(m=(1, 12), d=(1, 31), y=(1900, 2013))
def date(m, d, y):
print('date = %s/%s/%s' % (m, d, y))
date(1, 2, 1960)
fails(lambda: date(1, 2, 3))
@typetest(a=int, c=float)
def sum(a, b, c, d):
print(a + b + c + d)
sum(1, 2, 3.0, 4)
sum(1, d=4, b=2, c=3.0)
fails(lambda: sum('spam', 2, 99, 4))
fails(lambda: sum(1, d=4, b=2, c=99))
print('--------------------------------------------------------------------')
# Arbitrary/mixed tests
@valuetest(word1=str.islower, word2=(lambda x: x[0].isupper()))
def msg(word1='mighty', word2='Larch', label='The'):
print('%s %s %s' % (label, word1, word2))
msg() # word1 and word2 defaulted
msg('majestic', 'Moose')
fails(lambda: msg('Giant', 'Redwood'))
fails(lambda: msg('great', word2='elm'))
print('--------------------------------------------------------------------')
# Manual type and range tests
@valuetest(A=lambda x: isinstance(x, int), B=lambda x: x > 0 and x < 10)
def manual(A, B):
print(A + B)
manual(100, 2)
fails(lambda: manual(1.99, 2))
fails(lambda: manual(100, 20))
print('--------------------------------------------------------------------')
# Nesting: runs both, by nesting proxies on original.
# Open issue: outer levels do not validate positionals due
# to call proxy function's differing argument signature;
# when trace=True, in all but the last of these "X" is
# classified as defaulted due to the proxy's signature.
@rangetest(X=(1, 10))
@typetest(Z=str) # Only innermost validates positional args
def nester(X, Y, Z):
return('%s-%s-%s' % (X, Y, Z))
print(nester(1, 2, 'spam')) # Original function runs properly
fails(lambda: nester(1, 2, 3)) # Nested typetest is run: positional
fails(lambda: nester(1, 2, Z=3)) # Nested typetest is run: keyword
fails(lambda: nester(0, 2, 'spam')) # <==Outer rangetest not run: posit.
fails(lambda: nester(X=0, Y=2, Z='spam')) # Outer rangetest is run: keyword
|
gpl-2.0
| -1,233,746,748,900,023,800 | 39.577236 | 84 | 0.517208 | false |
cpdean/sqlalchemy-bigquery
|
sqlalchemy_bigquery/base.py
|
1
|
2701
|
"""
Support for Google BigQuery.
Does not support actually connecting to BQ.
Directly derived from the mssql dialect with minor modifications
"""
import sqlalchemy.dialects.mssql.base as mssql_base
from sqlalchemy.dialects.mssql.base import MSDialect
from sqlalchemy.sql import compiler
from sqlalchemy.sql import sqltypes
class BQString(sqltypes.String):
def __init__(
self,
length=None,
collation=None,
convert_unicode=False,
unicode_error=None,
_warn_on_bytestring=False
):
return super(BQString, self).__init__(
length=length,
collation=collation,
convert_unicode=convert_unicode,
unicode_error=unicode_error,
_warn_on_bytestring=_warn_on_bytestring
)
def literal_processor(self, dialect):
def process(value):
value = value.replace("'", "\\'")
return "'%s'" % value
return process
class BQSQLCompiler(mssql_base.MSSQLCompiler):
def get_select_precolumns(self,select, **kw):
"""BQ uses TOP differently from MS-SQL"""
s = ""
if select._distinct:
s += "DISTINCT "
if s:
return s
else:
return compiler.SQLCompiler.get_select_precolumns(
self, select, **kw)
def limit_clause(self, select, **kw):
"""Only supports simple (integer) LIMIT clause"""
s = ""
if select._simple_int_limit and not select._offset:
s += "\nLIMIT %d " % select._limit
return s
def visit_column(self, column, add_to_result_map=None, **kwargs):
# TODO: figure out how to do this immutably
# force column rendering to not use quotes by declaring every col literal
column.is_literal = True
return super(BQSQLCompiler, self).visit_column(
column,
add_to_result_map=add_to_result_map,
**kwargs
)
def visit_match_op_binary(self, binary, operator, **kw):
return "%s CONTAINS %s" % (
self.process(binary.left, **kw),
self.process(binary.right, **kw))
class BQIdentifierPreparer(compiler.IdentifierPreparer):
def __init__(self, dialect):
super(BQIdentifierPreparer, self).__init__(
dialect,
initial_quote='[',
final_quote=']',
)
def format_label(self, label, name=None):
""" bq can't handle quoting labels """
return name or label.name
class BQDialect(MSDialect):
statement_compiler = BQSQLCompiler
preparer = BQIdentifierPreparer
colspecs = {
sqltypes.String: BQString
}
|
mit
| -109,448,852,808,508,160 | 27.135417 | 81 | 0.593484 | false |
nickmeharry/django-mysql
|
django_mysql/models/fields/lists.py
|
1
|
7795
|
# -*- coding:utf-8 -*-
from __future__ import (
absolute_import, division, print_function, unicode_literals
)
from django.core import checks
from django.db.models import CharField, IntegerField, Lookup, TextField
from django.utils import six
from django.utils.translation import ugettext_lazy as _
from django_mysql.forms import SimpleListField
from django_mysql.models.lookups import SetContains, SetIContains
from django_mysql.models.transforms import SetLength
from django_mysql.validators import ListMaxLengthValidator
class ListFieldMixin(object):
def __init__(self, base_field, size=None, **kwargs):
self.base_field = base_field
self.size = size
super(ListFieldMixin, self).__init__(**kwargs)
if self.size:
self.validators.append(ListMaxLengthValidator(int(self.size)))
def get_default(self):
default = super(ListFieldMixin, self).get_default()
if default == '':
return []
else:
return default
def check(self, **kwargs):
errors = super(ListFieldMixin, self).check(**kwargs)
if not isinstance(self.base_field, (CharField, IntegerField)):
errors.append(
checks.Error(
'Base field for list must be a CharField or IntegerField.',
hint=None,
obj=self,
id='django_mysql.E005'
)
)
return errors
# Remove the field name checks as they are not needed here.
base_errors = self.base_field.check()
if base_errors:
messages = '\n '.join(
'%s (%s)' % (error.msg, error.id)
for error in base_errors
)
errors.append(
checks.Error(
'Base field for list has errors:\n %s' % messages,
hint=None,
obj=self,
id='django_mysql.E004'
)
)
return errors
@property
def description(self):
return _('List of %(base_description)s') % {
'base_description': self.base_field.description
}
def set_attributes_from_name(self, name):
super(ListFieldMixin, self).set_attributes_from_name(name)
self.base_field.set_attributes_from_name(name)
def deconstruct(self):
name, path, args, kwargs = super(ListFieldMixin, self).deconstruct()
bad_paths = (
'django_mysql.models.fields.lists.' + self.__class__.__name__,
'django_mysql.models.fields.' + self.__class__.__name__
)
if path in bad_paths:
path = 'django_mysql.models.' + self.__class__.__name__
args.insert(0, self.base_field)
kwargs['size'] = self.size
return name, path, args, kwargs
def to_python(self, value):
if isinstance(value, six.string_types):
if not len(value):
value = []
else:
value = [self.base_field.to_python(v) for
v in value.split(',')]
return value
def from_db_value(self, value, expression, connection, context):
# Similar to to_python, for Django 1.8+
if isinstance(value, six.string_types):
if not len(value):
value = []
else:
value = [self.base_field.to_python(v) for
v in value.split(',')]
return value
def get_prep_value(self, value):
if isinstance(value, list):
value = [
six.text_type(self.base_field.get_prep_value(v))
for v in value
]
for v in value:
if ',' in v:
raise ValueError(
"List members in {klass} {name} cannot contain commas"
.format(klass=self.__class__.__name__,
name=self.name)
)
elif not len(v):
raise ValueError(
"The empty string cannot be stored in {klass} {name}"
.format(klass=self.__class__.__name__,
name=self.name)
)
return ','.join(value)
return value
def get_lookup(self, lookup_name):
lookup = super(ListFieldMixin, self).get_lookup(lookup_name)
if lookup:
return lookup
try:
index = int(lookup_name)
except ValueError:
pass
else:
index += 1 # MySQL uses 1-indexing
return IndexLookupFactory(index)
return lookup
def value_to_string(self, obj):
vals = self._get_val_from_obj(obj)
return self.get_prep_value(vals)
def formfield(self, **kwargs):
defaults = {
'form_class': SimpleListField,
'base_field': self.base_field.formfield(),
'max_length': self.size,
}
defaults.update(kwargs)
return super(ListFieldMixin, self).formfield(**defaults)
def contribute_to_class(self, cls, name, **kwargs):
super(ListFieldMixin, self).contribute_to_class(cls, name, **kwargs)
self.base_field.model = cls
class ListCharField(ListFieldMixin, CharField):
"""
A subclass of CharField for using MySQL's handy FIND_IN_SET function with.
"""
def check(self, **kwargs):
errors = super(ListCharField, self).check(**kwargs)
# Unfortunately this check can't really be done for IntegerFields since
# they have boundless length
has_base_error = any(e.id == 'django_mysql.E004' for e in errors)
if (
not has_base_error and
isinstance(self.base_field, CharField) and
self.size
):
max_size = (
# The chars used
(self.size * (self.base_field.max_length)) +
# The commas
self.size - 1
)
if max_size > self.max_length:
errors.append(
checks.Error(
'Field can overrun - set contains CharFields of max '
'length %s, leading to a comma-combined max length of '
'%s, which is greater than the space reserved for the '
'set - %s' %
(self.base_field.max_length, max_size,
self.max_length),
hint=None,
obj=self,
id='django_mysql.E006'
)
)
return errors
class ListTextField(ListFieldMixin, TextField):
pass
ListCharField.register_lookup(SetContains)
ListTextField.register_lookup(SetContains)
ListCharField.register_lookup(SetIContains)
ListTextField.register_lookup(SetIContains)
ListCharField.register_lookup(SetLength)
ListTextField.register_lookup(SetLength)
class IndexLookup(Lookup):
def __init__(self, index, *args, **kwargs):
super(IndexLookup, self).__init__(*args, **kwargs)
self.index = index
def as_sql(self, qn, connection):
lhs, lhs_params = self.process_lhs(qn, connection)
rhs, rhs_params = self.process_rhs(qn, connection)
params = lhs_params + rhs_params
# Put rhs on the left since that's the order FIND_IN_SET uses
return '(FIND_IN_SET(%s, %s) = %s)' % (rhs, lhs, self.index), params
class IndexLookupFactory(object):
def __init__(self, index):
self.index = index
def __call__(self, *args, **kwargs):
return IndexLookup(self.index, *args, **kwargs)
|
bsd-3-clause
| -5,785,116,181,025,149,000 | 32.170213 | 79 | 0.540475 | false |
c4sc/arividam
|
arividam/djangocms_news/migrations/0003_auto_20170218_2159.py
|
1
|
1393
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.12 on 2017-02-18 16:29
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
import djangocms_text_ckeditor.fields
import filer.fields.image
class Migration(migrations.Migration):
dependencies = [
('filer', '0006_auto_20160623_1627'),
('sites', '0004_auto_20160810_2005'),
('djangocms_news', '0002_auto_20170119_2322'),
]
operations = [
migrations.AddField(
model_name='promotednews',
name='extract',
field=djangocms_text_ckeditor.fields.HTMLField(blank=True, default='', help_text='A brief description of the article used in the featured lists', verbose_name='extract'),
),
migrations.AddField(
model_name='promotednews',
name='site',
field=models.ForeignKey(default=1, help_text='The site the article is accessible from', on_delete=django.db.models.deletion.CASCADE, related_name='djangocms_articles', to='sites.Site', verbose_name='site'),
preserve_default=False,
),
migrations.AddField(
model_name='promotednews',
name='thumbnail',
field=filer.fields.image.FilerImageField(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='filer.Image'),
),
]
|
mit
| 5,029,275,819,570,105,000 | 37.694444 | 218 | 0.643934 | false |
guyemerson/pyphon
|
src/pyphon.py
|
1
|
3033
|
#!/usr/bin/env python2
import wx, os, sqlite3
import wxGUI
wx.USE_UNICODE = 1
SRC_DIR = os.getcwd()
DATA_DIR = os.path.join(os.path.split(SRC_DIR)[0], 'data')
def filepath(text):
""" If text contains no slashes, add the default data directory """
directory, filename = os.path.split(text)
if directory == "":
return os.path.join(DATA_DIR, filename)
else:
return text
if __name__ == "__main__":
# Read settings from file, or else create settings file
settingsFile = os.path.join(DATA_DIR, '.pyphonsettings')
settings = dict()
if os.path.exists(settingsFile):
with open(settingsFile, 'r') as fin:
for line in fin:
key, value = line.strip().split('\t')
if value == "TRUE":
value = True
elif value == "FALSE":
value = False
settings[key] = value
else:
settings = {'album':'default_album.db', 'user':'default_user.db', 'copy':True}
with open(settingsFile, 'w') as fout:
for key, value in settings.items():
if value == True:
value = "TRUE"
elif value == False:
value = "FALSE"
fout.write("{}\t{}\n".format(key, value))
albumFile = filepath(settings['album'])
userFile = filepath(settings['user'])
# Open database files, if they exist, or else create empty databases
# Currently, userFile is not implemented
if os.path.exists(albumFile):
with sqlite3.connect(albumFile) as data:
cursor = data.cursor()
cursor.execute("PRAGMA foreign_keys = ON")
else:
with sqlite3.connect(albumFile) as data:
cursor = data.cursor()
cursor.execute("PRAGMA foreign_keys = ON")
cursor.execute('''CREATE TABLE language_set
(language TEXT PRIMARY KEY)''')
cursor.execute('''CREATE TABLE contrast_set
(language TEXT,
contrast TEXT,
FOREIGN KEY (language) REFERENCES language_set(language)
ON DELETE CASCADE ON UPDATE CASCADE,
PRIMARY KEY (language, contrast))''')
cursor.execute('''CREATE TABLE speaker_set
(language TEXT,
speaker TEXT,
FOREIGN KEY (language) REFERENCES language_set(language)
ON DELETE CASCADE ON UPDATE CASCADE,
PRIMARY KEY (language, speaker))''')
cursor.execute('''CREATE TABLE recordings
(file TEXT PRIMARY KEY,
speaker TEXT,
language TEXT,
answer TEXT NOT NULL,
FOREIGN KEY (language, speaker) REFERENCES speaker_set(language, speaker)
ON DELETE CASCADE ON UPDATE CASCADE)''')
cursor.execute('''CREATE TABLE minimal_pairs
(language TEXT,
contrast TEXT,
item_1 TEXT,
item_2 TEXT,
FOREIGN KEY (language, contrast) REFERENCES contrast_set(language, contrast)
ON DELETE CASCADE ON UPDATE CASCADE,
PRIMARY KEY (language, contrast, item_1, item_2))''')
# Open the main window
app = wx.App(False)
frame = wxGUI.MainWindow(None, title="High Variability Phonetic Training software", cursor=cursor)
frame.Show()
app.MainLoop()
# Save database changes after exiting
data.commit()
|
gpl-3.0
| 8,069,458,571,730,618,000 | 28.656566 | 99 | 0.648863 | false |
umass-forensics/Yapr-forensics
|
yapr/yapr_scan.py
|
1
|
6163
|
"""
This script attempts to determine important characteristics of a Yaffs phone image.
Ideally, this functionality will be incorporated into a new version of the Yaffs parser.
"""
__author__ = 'wallsr'
import argparse
import os
from . import utilities
from .YaffsClasses.YaffsChunk import YaffsHeader
from .YaffsClasses.YaffsOobTag import YaffsOobTag
_description = "attempts to determine important characteristics of the phone image"
def scan_file(image, anchor, chunk_sizes, oob_sizes):
max_count = 0
best_csize = None
best_osize = None
best_headers = None
for csize in chunk_sizes:
for osize in oob_sizes:
size = os.path.getsize(image)
#Check if image size is a multiple of the chunk plus oob size.
if size % (csize + osize) != 0:
continue
headers = get_anchor_headers(image, csize, osize, anchor)
if len(headers) == 0:
continue
print ">", csize, osize
print "Found %d %s headers" % (len(headers), anchor)
constant_count = count_constant_oobs(image, headers, osize)
count = 2 * len(headers) - constant_count
print "Found %d potentially good oobs for the headers." \
% (len(headers) - constant_count)
if count >= max_count:
max_count = count
best_csize = csize
best_osize = osize
best_headers = headers
if best_headers is None or len(best_headers) == 0:
print "Unable to determine sizes."
return None
print "Most likely chunk and oob sizes: %d, %d" % (best_csize, best_osize)
headers = get_anchor_headers(image, best_csize, best_osize, anchor)
unicode = '.'.join([h.name for h in headers])
if '\x00' in unicode > 0:
print "Filenames appear to be in unicode"
best_oob = guess_oob_offset(image, best_headers, best_osize)
if best_oob is not None:
guess_block_size(image, best_csize, best_osize, best_oob)
return best_osize, best_csize
def count_constant_oobs(image, chunks, oobsize):
oobs = utilities.get_oob_bytes(image, chunks, oobsize)
constants_count = 0
constant = '\xff' * oobsize
for oob in oobs:
if oob == constant:
constants_count += 1
return constants_count
def guess_oob_offset(image, headers, oob_size):
oobs_bytes = utilities.get_oob_bytes(image, headers, oob_size)
best_parsed = []
# We use -16 because we are looking for 16 bytes in the tag
# for parsing
for offset in xrange(0, oob_size-16):
parsed = []
for bytes in oobs_bytes:
parsed_oob = YaffsOobTag(bytes, offset)
if not parsed_oob.isHeaderTag:
continue
else:
parsed.append(parsed_oob)
if len(parsed) > len(best_parsed):
best_offset = offset
best_parsed = parsed
object_ids = set([o.object_id for o in best_parsed])
if len(object_ids) > 0:
print "OOB tag offset is %d" % best_offset
print "with %d valid header tags" % len(best_parsed)
print "Object id: %s" % str(object_ids)
return best_offset
print "Unable to determine OOB tag offset."
return None
def guess_block_size(image, chunk_size, oob_size, oob_offset):
chunk_pairs = utilities.extract_chunks(image, chunk_size, oob_size)
chunks = [c for c, o in chunk_pairs[:1024]]
oobs_bytes = utilities.get_oob_bytes(image, chunks, oob_size)
oobs = [YaffsOobTag(b, oob_offset) for b in oobs_bytes]
prev = -1
counts = []
count = 0
for oob in oobs:
if oob.block_seq != prev:
if count > 0:
counts.append(count)
count = 1
prev = oob.block_seq
else:
count += 1
import collections
size, freq = collections.Counter(counts).most_common(1)[0]
if freq == 1:
print "Unable to determine block size."
return None
print "Most likely block size: %d" % size
return size
def get_headers(image, chunk_size, oob_size):
chunk_pairs = utilities.extract_chunks(image, chunk_size, oob_size)
#First filter, The first byte should be 0x01
#Litte endian
header_chunks = [YaffsHeader(c) for c, obb in chunk_pairs
if c.get_bytes(4) == '\x01\00\00\00']
#Now use the second, slower filter.
header_chunks = [c for c in header_chunks
if YaffsHeader(c).is_valid_file()]
return header_chunks
def get_anchor_headers(image, chunk_size, oob_size, anchor):
header_chunks = get_headers(image, chunk_size, oob_size)
anchor_headers = [h for h in header_chunks
if h.name.replace('\x00', '') == anchor]
return anchor_headers
def main():
"""
Assume we pass this script the image file path as an argument on the
command line.
"""
DEFAULT_ANCHORS = ['contacts2.db']
DEFAULT_CHUNK_SIZES = [2048]
DEFAULT_OOB_SIZES = [64]
parser = argparse.ArgumentParser(description=_description)
parser.add_argument("imagefile", help="The path to the YAFFS2 image.", type=str)
parser.add_argument("--anchors",
help="The filenames to use for anchoring the search. Default: %s" % DEFAULT_ANCHORS,
nargs='*', default=DEFAULT_ANCHORS, dest="anchors")
parser.add_argument("--chunksizes",
help="The chunk sizes to test for. Default: %s" % DEFAULT_CHUNK_SIZES,
nargs='*', default=DEFAULT_CHUNK_SIZES, dest="chunk_sizes", type=int)
parser.add_argument("--oobsizes",
help="The oob sizes to test for. Default: %s" % DEFAULT_OOB_SIZES,
nargs='*', default=DEFAULT_OOB_SIZES, dest="oob_sizes", type=int)
args = parser.parse_args()
print args.imagefile
for anchor in args.anchors:
print 'Scanning for %s' % anchor
scan_file(args.imagefile, anchor, args.chunk_sizes, args.oob_sizes)
pass
if __name__ == '__main__':
main()
|
gpl-2.0
| -7,580,820,317,265,886,000 | 28.917476 | 108 | 0.602304 | false |
nthall/pip
|
pip/index.py
|
1
|
37111
|
"""Routines related to PyPI, indexes"""
from __future__ import absolute_import
import logging
import cgi
from collections import namedtuple
import itertools
import sys
import os
import re
import mimetypes
import posixpath
import warnings
from pip._vendor.six.moves.urllib import parse as urllib_parse
from pip._vendor.six.moves.urllib import request as urllib_request
from pip.compat import ipaddress
from pip.utils import (
cached_property, splitext, normalize_path,
ARCHIVE_EXTENSIONS, SUPPORTED_EXTENSIONS,
)
from pip.utils.deprecation import RemovedInPip9Warning, RemovedInPip10Warning
from pip.utils.logging import indent_log
from pip.exceptions import (
DistributionNotFound, BestVersionAlreadyInstalled, InvalidWheelFilename,
UnsupportedWheel,
)
from pip.download import HAS_TLS, is_url, path_to_url, url_to_path
from pip.wheel import Wheel, wheel_ext
from pip.pep425tags import supported_tags
from pip._vendor import html5lib, requests, six
from pip._vendor.packaging.version import parse as parse_version
from pip._vendor.packaging.utils import canonicalize_name
from pip._vendor.requests.exceptions import SSLError
__all__ = ['FormatControl', 'fmt_ctl_handle_mutual_exclude', 'PackageFinder']
SECURE_ORIGINS = [
# protocol, hostname, port
# Taken from Chrome's list of secure origins (See: http://bit.ly/1qrySKC)
("https", "*", "*"),
("*", "localhost", "*"),
("*", "127.0.0.0/8", "*"),
("*", "::1/128", "*"),
("file", "*", None),
# ssh is always secure.
("ssh", "*", "*"),
]
logger = logging.getLogger(__name__)
class InstallationCandidate(object):
def __init__(self, project, version, location):
self.project = project
self.version = parse_version(version)
self.location = location
self._key = (self.project, self.version, self.location)
def __repr__(self):
return "<InstallationCandidate({0!r}, {1!r}, {2!r})>".format(
self.project, self.version, self.location,
)
def __hash__(self):
return hash(self._key)
def __lt__(self, other):
return self._compare(other, lambda s, o: s < o)
def __le__(self, other):
return self._compare(other, lambda s, o: s <= o)
def __eq__(self, other):
return self._compare(other, lambda s, o: s == o)
def __ge__(self, other):
return self._compare(other, lambda s, o: s >= o)
def __gt__(self, other):
return self._compare(other, lambda s, o: s > o)
def __ne__(self, other):
return self._compare(other, lambda s, o: s != o)
def _compare(self, other, method):
if not isinstance(other, InstallationCandidate):
return NotImplemented
return method(self._key, other._key)
class PackageFinder(object):
"""This finds packages.
This is meant to match easy_install's technique for looking for
packages, by reading pages and looking for appropriate links.
"""
def __init__(self, find_links, index_urls, allow_all_prereleases=False,
trusted_hosts=None, process_dependency_links=False,
session=None, format_control=None):
"""Create a PackageFinder.
:param format_control: A FormatControl object or None. Used to control
the selection of source packages / binary packages when consulting
the index and links.
"""
if session is None:
raise TypeError(
"PackageFinder() missing 1 required keyword argument: "
"'session'"
)
# Build find_links. If an argument starts with ~, it may be
# a local file relative to a home directory. So try normalizing
# it and if it exists, use the normalized version.
# This is deliberately conservative - it might be fine just to
# blindly normalize anything starting with a ~...
self.find_links = []
for link in find_links:
if link.startswith('~'):
new_link = normalize_path(link)
if os.path.exists(new_link):
link = new_link
self.find_links.append(link)
self.index_urls = index_urls
self.dependency_links = []
# These are boring links that have already been logged somehow:
self.logged_links = set()
self.format_control = format_control or FormatControl(set(), set())
# Domains that we won't emit warnings for when not using HTTPS
self.secure_origins = [
("*", host, "*")
for host in (trusted_hosts if trusted_hosts else [])
]
# Do we want to allow _all_ pre-releases?
self.allow_all_prereleases = allow_all_prereleases
# Do we process dependency links?
self.process_dependency_links = process_dependency_links
# The Session we'll use to make requests
self.session = session
# If we don't have TLS enabled, then WARN if anyplace we're looking
# relies on TLS.
if not HAS_TLS:
for link in itertools.chain(self.index_urls, self.find_links):
parsed = urllib_parse.urlparse(link)
if parsed.scheme == "https":
logger.warning(
"pip is configured with locations that require "
"TLS/SSL, however the ssl module in Python is not "
"available."
)
break
def add_dependency_links(self, links):
# # FIXME: this shouldn't be global list this, it should only
# # apply to requirements of the package that specifies the
# # dependency_links value
# # FIXME: also, we should track comes_from (i.e., use Link)
if self.process_dependency_links:
warnings.warn(
"Dependency Links processing has been deprecated and will be "
"removed in a future release.",
RemovedInPip9Warning,
)
self.dependency_links.extend(links)
@staticmethod
def _sort_locations(locations, expand_dir=False):
"""
Sort locations into "files" (archives) and "urls", and return
a pair of lists (files,urls)
"""
files = []
urls = []
# puts the url for the given file path into the appropriate list
def sort_path(path):
url = path_to_url(path)
if mimetypes.guess_type(url, strict=False)[0] == 'text/html':
urls.append(url)
else:
files.append(url)
for url in locations:
is_local_path = os.path.exists(url)
is_file_url = url.startswith('file:')
if is_local_path or is_file_url:
if is_local_path:
path = url
else:
path = url_to_path(url)
if os.path.isdir(path):
if expand_dir:
path = os.path.realpath(path)
for item in os.listdir(path):
sort_path(os.path.join(path, item))
elif is_file_url:
urls.append(url)
elif os.path.isfile(path):
sort_path(path)
else:
logger.warning(
"Url '%s' is ignored: it is neither a file "
"nor a directory.", url)
elif is_url(url):
# Only add url with clear scheme
urls.append(url)
else:
logger.warning(
"Url '%s' is ignored. It is either a non-existing "
"path or lacks a specific scheme.", url)
return files, urls
def _candidate_sort_key(self, candidate):
"""
Function used to generate link sort key for link tuples.
The greater the return value, the more preferred it is.
If not finding wheels, then sorted by version only.
If finding wheels, then the sort order is by version, then:
1. existing installs
2. wheels ordered via Wheel.support_index_min()
3. source archives
Note: it was considered to embed this logic into the Link
comparison operators, but then different sdist links
with the same version, would have to be considered equal
"""
support_num = len(supported_tags)
if candidate.location.is_wheel:
# can raise InvalidWheelFilename
wheel = Wheel(candidate.location.filename)
if not wheel.supported():
raise UnsupportedWheel(
"%s is not a supported wheel for this platform. It "
"can't be sorted." % wheel.filename
)
pri = -(wheel.support_index_min())
else: # sdist
pri = -(support_num)
return (candidate.version, pri)
def _validate_secure_origin(self, logger, location):
# Determine if this url used a secure transport mechanism
parsed = urllib_parse.urlparse(str(location))
origin = (parsed.scheme, parsed.hostname, parsed.port)
# The protocol to use to see if the protocol matches.
# Don't count the repository type as part of the protocol: in
# cases such as "git+ssh", only use "ssh". (I.e., Only verify against
# the last scheme.)
protocol = origin[0].rsplit('+', 1)[-1]
# Determine if our origin is a secure origin by looking through our
# hardcoded list of secure origins, as well as any additional ones
# configured on this PackageFinder instance.
for secure_origin in (SECURE_ORIGINS + self.secure_origins):
if protocol != secure_origin[0] and secure_origin[0] != "*":
continue
try:
# We need to do this decode dance to ensure that we have a
# unicode object, even on Python 2.x.
addr = ipaddress.ip_address(
origin[1]
if (
isinstance(origin[1], six.text_type) or
origin[1] is None
)
else origin[1].decode("utf8")
)
network = ipaddress.ip_network(
secure_origin[1]
if isinstance(secure_origin[1], six.text_type)
else secure_origin[1].decode("utf8")
)
except ValueError:
# We don't have both a valid address or a valid network, so
# we'll check this origin against hostnames.
if origin[1] != secure_origin[1] and secure_origin[1] != "*":
continue
else:
# We have a valid address and network, so see if the address
# is contained within the network.
if addr not in network:
continue
# Check to see if the port patches
if (origin[2] != secure_origin[2] and
secure_origin[2] != "*" and
secure_origin[2] is not None):
continue
# If we've gotten here, then this origin matches the current
# secure origin and we should return True
return True
# If we've gotten to this point, then the origin isn't secure and we
# will not accept it as a valid location to search. We will however
# log a warning that we are ignoring it.
logger.warning(
"The repository located at %s is not a trusted or secure host and "
"is being ignored. If this repository is available via HTTPS it "
"is recommended to use HTTPS instead, otherwise you may silence "
"this warning and allow it anyways with '--trusted-host %s'.",
parsed.hostname,
parsed.hostname,
)
return False
def _get_index_urls_locations(self, project_name):
"""Returns the locations found via self.index_urls
Checks the url_name on the main (first in the list) index and
use this url_name to produce all locations
"""
def mkurl_pypi_url(url):
loc = posixpath.join(url, urllib_parse.quote(project_name.lower()))
# For maximum compatibility with easy_install, ensure the path
# ends in a trailing slash. Although this isn't in the spec
# (and PyPI can handle it without the slash) some other index
# implementations might break if they relied on easy_install's
# behavior.
if not loc.endswith('/'):
loc = loc + '/'
return loc
return [mkurl_pypi_url(url) for url in self.index_urls]
def find_all_candidates(self, project_name):
"""Find all available InstallationCandidate for project_name
This checks index_urls, find_links and dependency_links.
All versions found are returned as an InstallationCandidate list.
See _link_package_versions for details on which files are accepted
"""
index_locations = self._get_index_urls_locations(project_name)
index_file_loc, index_url_loc = self._sort_locations(index_locations)
fl_file_loc, fl_url_loc = self._sort_locations(
self.find_links, expand_dir=True)
dep_file_loc, dep_url_loc = self._sort_locations(self.dependency_links)
file_locations = (
Link(url) for url in itertools.chain(
index_file_loc, fl_file_loc, dep_file_loc)
)
# We trust every url that the user has given us whether it was given
# via --index-url or --find-links
# We explicitly do not trust links that came from dependency_links
# We want to filter out any thing which does not have a secure origin.
url_locations = [
link for link in itertools.chain(
(Link(url) for url in index_url_loc),
(Link(url) for url in fl_url_loc),
(Link(url) for url in dep_url_loc),
)
if self._validate_secure_origin(logger, link)
]
logger.debug('%d location(s) to search for versions of %s:',
len(url_locations), project_name)
for location in url_locations:
logger.debug('* %s', location)
canonical_name = canonicalize_name(project_name)
formats = fmt_ctl_formats(self.format_control, canonical_name)
search = Search(project_name, canonical_name, formats)
find_links_versions = self._package_versions(
# We trust every directly linked archive in find_links
(Link(url, '-f') for url in self.find_links),
search
)
page_versions = []
for page in self._get_pages(url_locations, project_name):
logger.debug('Analyzing links from page %s', page.url)
with indent_log():
page_versions.extend(
self._package_versions(page.links, search)
)
dependency_versions = self._package_versions(
(Link(url) for url in self.dependency_links), search
)
if dependency_versions:
logger.debug(
'dependency_links found: %s',
', '.join([
version.location.url for version in dependency_versions
])
)
file_versions = self._package_versions(file_locations, search)
if file_versions:
file_versions.sort(reverse=True)
logger.debug(
'Local files found: %s',
', '.join([
url_to_path(candidate.location.url)
for candidate in file_versions
])
)
# This is an intentional priority ordering
return (
file_versions + find_links_versions + page_versions +
dependency_versions
)
def find_requirement(self, req, upgrade):
"""Try to find a Link matching req
Expects req, an InstallRequirement and upgrade, a boolean
Returns a Link if found,
Raises DistributionNotFound or BestVersionAlreadyInstalled otherwise
"""
all_candidates = self.find_all_candidates(req.name)
# Filter out anything which doesn't match our specifier
compatible_versions = set(
req.specifier.filter(
# We turn the version object into a str here because otherwise
# when we're debundled but setuptools isn't, Python will see
# packaging.version.Version and
# pkg_resources._vendor.packaging.version.Version as different
# types. This way we'll use a str as a common data interchange
# format. If we stop using the pkg_resources provided specifier
# and start using our own, we can drop the cast to str().
[str(c.version) for c in all_candidates],
prereleases=(
self.allow_all_prereleases
if self.allow_all_prereleases else None
),
)
)
applicable_candidates = [
# Again, converting to str to deal with debundling.
c for c in all_candidates if str(c.version) in compatible_versions
]
if applicable_candidates:
best_candidate = max(applicable_candidates,
key=self._candidate_sort_key)
else:
best_candidate = None
if req.satisfied_by is not None:
installed_version = parse_version(req.satisfied_by.version)
else:
installed_version = None
if installed_version is None and best_candidate is None:
logger.critical(
'Could not find a version that satisfies the requirement %s '
'(from versions: %s)',
req,
', '.join(
sorted(
set(str(c.version) for c in all_candidates),
key=parse_version,
)
)
)
raise DistributionNotFound(
'No matching distribution found for %s' % req
)
best_installed = False
if installed_version and (
best_candidate is None or
best_candidate.version <= installed_version):
best_installed = True
if not upgrade and installed_version is not None:
if best_installed:
logger.debug(
'Existing installed version (%s) is most up-to-date and '
'satisfies requirement',
installed_version,
)
else:
logger.debug(
'Existing installed version (%s) satisfies requirement '
'(most up-to-date version is %s)',
installed_version,
best_candidate.version,
)
return None
if best_installed:
# We have an existing version, and its the best version
logger.debug(
'Installed version (%s) is most up-to-date (past versions: '
'%s)',
installed_version,
', '.join(sorted(compatible_versions, key=parse_version)) or
"none",
)
raise BestVersionAlreadyInstalled
logger.debug(
'Using version %s (newest of versions: %s)',
best_candidate.version,
', '.join(sorted(compatible_versions, key=parse_version))
)
return best_candidate.location
def _get_pages(self, locations, project_name):
"""
Yields (page, page_url) from the given locations, skipping
locations that have errors.
"""
seen = set()
for location in locations:
if location in seen:
continue
seen.add(location)
page = self._get_page(location)
if page is None:
continue
yield page
_py_version_re = re.compile(r'-py([123]\.?[0-9]?)$')
def _sort_links(self, links):
"""
Returns elements of links in order, non-egg links first, egg links
second, while eliminating duplicates
"""
eggs, no_eggs = [], []
seen = set()
for link in links:
if link not in seen:
seen.add(link)
if link.egg_fragment:
eggs.append(link)
else:
no_eggs.append(link)
return no_eggs + eggs
def _package_versions(self, links, search):
result = []
for link in self._sort_links(links):
v = self._link_package_versions(link, search)
if v is not None:
result.append(v)
return result
def _log_skipped_link(self, link, reason):
if link not in self.logged_links:
logger.debug('Skipping link %s; %s', link, reason)
self.logged_links.add(link)
def _link_package_versions(self, link, search):
"""Return an InstallationCandidate or None"""
version = None
if link.egg_fragment:
egg_info = link.egg_fragment
ext = link.ext
else:
egg_info, ext = link.splitext()
if not ext:
self._log_skipped_link(link, 'not a file')
return
if ext not in SUPPORTED_EXTENSIONS:
self._log_skipped_link(
link, 'unsupported archive format: %s' % ext)
return
if "binary" not in search.formats and ext == wheel_ext:
self._log_skipped_link(
link, 'No binaries permitted for %s' % search.supplied)
return
if "macosx10" in link.path and ext == '.zip':
self._log_skipped_link(link, 'macosx10 one')
return
if ext == wheel_ext:
try:
wheel = Wheel(link.filename)
except InvalidWheelFilename:
self._log_skipped_link(link, 'invalid wheel filename')
return
if canonicalize_name(wheel.name) != search.canonical:
self._log_skipped_link(
link, 'wrong project name (not %s)' % search.supplied)
return
if not wheel.supported():
self._log_skipped_link(
link, 'it is not compatible with this Python')
return
version = wheel.version
# This should be up by the search.ok_binary check, but see issue 2700.
if "source" not in search.formats and ext != wheel_ext:
self._log_skipped_link(
link, 'No sources permitted for %s' % search.supplied)
return
if not version:
version = egg_info_matches(egg_info, search.supplied, link)
if version is None:
self._log_skipped_link(
link, 'wrong project name (not %s)' % search.supplied)
return
match = self._py_version_re.search(version)
if match:
version = version[:match.start()]
py_version = match.group(1)
if py_version != sys.version[:3]:
self._log_skipped_link(
link, 'Python version is incorrect')
return
logger.debug('Found link %s, version: %s', link, version)
return InstallationCandidate(search.supplied, version, link)
def _get_page(self, link):
return HTMLPage.get_page(link, session=self.session)
def egg_info_matches(
egg_info, search_name, link,
_egg_info_re=re.compile(r'([a-z0-9_.]+)-([a-z0-9_.!+-]+)', re.I)):
"""Pull the version part out of a string.
:param egg_info: The string to parse. E.g. foo-2.1
:param search_name: The name of the package this belongs to. None to
infer the name. Note that this cannot unambiguously parse strings
like foo-2-2 which might be foo, 2-2 or foo-2, 2.
:param link: The link the string came from, for logging on failure.
"""
match = _egg_info_re.search(egg_info)
if not match:
logger.debug('Could not parse version from link: %s', link)
return None
if search_name is None:
full_match = match.group(0)
return full_match[full_match.index('-'):]
name = match.group(0).lower()
# To match the "safe" name that pkg_resources creates:
name = name.replace('_', '-')
# project name and version must be separated by a dash
look_for = search_name.lower() + "-"
if name.startswith(look_for):
return match.group(0)[len(look_for):]
else:
return None
class HTMLPage(object):
"""Represents one page, along with its URL"""
def __init__(self, content, url, headers=None):
# Determine if we have any encoding information in our headers
encoding = None
if headers and "Content-Type" in headers:
content_type, params = cgi.parse_header(headers["Content-Type"])
if "charset" in params:
encoding = params['charset']
self.content = content
self.parsed = html5lib.parse(
self.content,
encoding=encoding,
namespaceHTMLElements=False,
)
self.url = url
self.headers = headers
def __str__(self):
return self.url
@classmethod
def get_page(cls, link, skip_archives=True, session=None):
if session is None:
raise TypeError(
"get_page() missing 1 required keyword argument: 'session'"
)
url = link.url
url = url.split('#', 1)[0]
# Check for VCS schemes that do not support lookup as web pages.
from pip.vcs import VcsSupport
for scheme in VcsSupport.schemes:
if url.lower().startswith(scheme) and url[len(scheme)] in '+:':
logger.debug('Cannot look at %s URL %s', scheme, link)
return None
try:
if skip_archives:
filename = link.filename
for bad_ext in ARCHIVE_EXTENSIONS:
if filename.endswith(bad_ext):
content_type = cls._get_content_type(
url, session=session,
)
if content_type.lower().startswith('text/html'):
break
else:
logger.debug(
'Skipping page %s because of Content-Type: %s',
link,
content_type,
)
return
logger.debug('Getting page %s', url)
# Tack index.html onto file:// URLs that point to directories
(scheme, netloc, path, params, query, fragment) = \
urllib_parse.urlparse(url)
if (scheme == 'file' and
os.path.isdir(urllib_request.url2pathname(path))):
# add trailing slash if not present so urljoin doesn't trim
# final segment
if not url.endswith('/'):
url += '/'
url = urllib_parse.urljoin(url, 'index.html')
logger.debug(' file: URL is directory, getting %s', url)
resp = session.get(
url,
headers={
"Accept": "text/html",
"Cache-Control": "max-age=600",
},
)
resp.raise_for_status()
# The check for archives above only works if the url ends with
# something that looks like an archive. However that is not a
# requirement of an url. Unless we issue a HEAD request on every
# url we cannot know ahead of time for sure if something is HTML
# or not. However we can check after we've downloaded it.
content_type = resp.headers.get('Content-Type', 'unknown')
if not content_type.lower().startswith("text/html"):
logger.debug(
'Skipping page %s because of Content-Type: %s',
link,
content_type,
)
return
inst = cls(resp.content, resp.url, resp.headers)
except requests.HTTPError as exc:
cls._handle_fail(link, exc, url)
except SSLError as exc:
reason = ("There was a problem confirming the ssl certificate: "
"%s" % exc)
cls._handle_fail(link, reason, url, meth=logger.info)
except requests.ConnectionError as exc:
cls._handle_fail(link, "connection error: %s" % exc, url)
except requests.Timeout:
cls._handle_fail(link, "timed out", url)
else:
return inst
@staticmethod
def _handle_fail(link, reason, url, meth=None):
if meth is None:
meth = logger.debug
meth("Could not fetch URL %s: %s - skipping", link, reason)
@staticmethod
def _get_content_type(url, session):
"""Get the Content-Type of the given url, using a HEAD request"""
scheme, netloc, path, query, fragment = urllib_parse.urlsplit(url)
if scheme not in ('http', 'https'):
# FIXME: some warning or something?
# assertion error?
return ''
resp = session.head(url, allow_redirects=True)
resp.raise_for_status()
return resp.headers.get("Content-Type", "")
@cached_property
def base_url(self):
bases = [
x for x in self.parsed.findall(".//base")
if x.get("href") is not None
]
if bases and bases[0].get("href"):
return bases[0].get("href")
else:
return self.url
@property
def links(self):
"""Yields all links in the page"""
for anchor in self.parsed.findall(".//a"):
if anchor.get("href"):
href = anchor.get("href")
url = self.clean_link(
urllib_parse.urljoin(self.base_url, href)
)
yield Link(url, self)
_clean_re = re.compile(r'[^a-z0-9$&+,/:;=?@.#%_\\|-]', re.I)
def clean_link(self, url):
"""Makes sure a link is fully encoded. That is, if a ' ' shows up in
the link, it will be rewritten to %20 (while not over-quoting
% or other characters)."""
return self._clean_re.sub(
lambda match: '%%%2x' % ord(match.group(0)), url)
class Link(object):
def __init__(self, url, comes_from=None):
# url can be a UNC windows share
if url.startswith('\\\\'):
url = path_to_url(url)
self.url = url
self.comes_from = comes_from
def __str__(self):
if self.comes_from:
return '%s (from %s)' % (self.url, self.comes_from)
else:
return str(self.url)
def __repr__(self):
return '<Link %s>' % self
def __eq__(self, other):
if not isinstance(other, Link):
return NotImplemented
return self.url == other.url
def __ne__(self, other):
if not isinstance(other, Link):
return NotImplemented
return self.url != other.url
def __lt__(self, other):
if not isinstance(other, Link):
return NotImplemented
return self.url < other.url
def __le__(self, other):
if not isinstance(other, Link):
return NotImplemented
return self.url <= other.url
def __gt__(self, other):
if not isinstance(other, Link):
return NotImplemented
return self.url > other.url
def __ge__(self, other):
if not isinstance(other, Link):
return NotImplemented
return self.url >= other.url
def __hash__(self):
return hash(self.url)
@property
def filename(self):
_, netloc, path, _, _ = urllib_parse.urlsplit(self.url)
name = posixpath.basename(path.rstrip('/')) or netloc
name = urllib_parse.unquote(name)
assert name, ('URL %r produced no filename' % self.url)
return name
@property
def scheme(self):
return urllib_parse.urlsplit(self.url)[0]
@property
def netloc(self):
return urllib_parse.urlsplit(self.url)[1]
@property
def path(self):
return urllib_parse.unquote(urllib_parse.urlsplit(self.url)[2])
def splitext(self):
return splitext(posixpath.basename(self.path.rstrip('/')))
@property
def ext(self):
return self.splitext()[1]
@property
def url_without_fragment(self):
scheme, netloc, path, query, fragment = urllib_parse.urlsplit(self.url)
return urllib_parse.urlunsplit((scheme, netloc, path, query, None))
_egg_fragment_re = re.compile(r'[#&]egg=([^&]*)')
@property
def egg_fragment(self):
match = self._egg_fragment_re.search(self.url)
if not match:
return None
return match.group(1)
_subdirectory_fragment_re = re.compile(r'[#&]subdirectory=([^&]*)')
@property
def subdirectory_fragment(self):
match = self._subdirectory_fragment_re.search(self.url)
if not match:
return None
return match.group(1)
_hash_re = re.compile(
r'(sha1|sha224|sha384|sha256|sha512|md5)=([a-f0-9]+)'
)
@property
def hash(self):
match = self._hash_re.search(self.url)
if match:
return match.group(2)
return None
@property
def hash_name(self):
match = self._hash_re.search(self.url)
if match:
return match.group(1)
return None
@property
def show_url(self):
return posixpath.basename(self.url.split('#', 1)[0].split('?', 1)[0])
@property
def is_wheel(self):
return self.ext == wheel_ext
@property
def is_artifact(self):
"""
Determines if this points to an actual artifact (e.g. a tarball) or if
it points to an "abstract" thing like a path or a VCS location.
"""
from pip.vcs import vcs
if self.scheme in vcs.all_schemes:
return False
return True
FormatControl = namedtuple('FormatControl', 'no_binary only_binary')
"""This object has two fields, no_binary and only_binary.
If a field is falsy, it isn't set. If it is {':all:'}, it should match all
packages except those listed in the other field. Only one field can be set
to {':all:'} at a time. The rest of the time exact package name matches
are listed, with any given package only showing up in one field at a time.
"""
def fmt_ctl_handle_mutual_exclude(value, target, other):
new = value.split(',')
while ':all:' in new:
other.clear()
target.clear()
target.add(':all:')
del new[:new.index(':all:') + 1]
if ':none:' not in new:
# Without a none, we want to discard everything as :all: covers it
return
for name in new:
if name == ':none:':
target.clear()
continue
name = canonicalize_name(name)
other.discard(name)
target.add(name)
def fmt_ctl_formats(fmt_ctl, canonical_name):
result = set(["binary", "source"])
if canonical_name in fmt_ctl.only_binary:
result.discard('source')
elif canonical_name in fmt_ctl.no_binary:
result.discard('binary')
elif ':all:' in fmt_ctl.only_binary:
result.discard('source')
elif ':all:' in fmt_ctl.no_binary:
result.discard('binary')
return frozenset(result)
def fmt_ctl_no_binary(fmt_ctl):
fmt_ctl_handle_mutual_exclude(
':all:', fmt_ctl.no_binary, fmt_ctl.only_binary)
def fmt_ctl_no_use_wheel(fmt_ctl):
fmt_ctl_no_binary(fmt_ctl)
warnings.warn(
'--no-use-wheel is deprecated and will be removed in the future. '
' Please use --no-binary :all: instead.', RemovedInPip10Warning,
stacklevel=2)
Search = namedtuple('Search', 'supplied canonical formats')
"""Capture key aspects of a search.
:attribute supplied: The user supplied package.
:attribute canonical: The canonical package name.
:attribute formats: The formats allowed for this package. Should be a set
with 'binary' or 'source' or both in it.
"""
|
mit
| -1,062,880,060,406,941,300 | 34.546935 | 79 | 0.554391 | false |
axltxl/kaoru
|
kaoru/commands/hello.py
|
1
|
1196
|
# -*- coding: utf-8 -*-
"""
kaoru.commands.hello
~~~~~~~~
/hello command implementation
:copyright: (c) 2015 by Alejandro Ricoveri
:license: MIT, see LICENSE for more details.
"""
from telegram import Update
from .. import utils
from . import bot_command
_greetings = [
# English
'Affirmative Dave, I read you',
'Hello world!',
# Spanish
'Hola',
# Arabic
'أهلاً و سهلاً',
# Mandarin
'你好',
# Corsican
'Salute',
# French
'Salut', 'Bonjour!, est-ce que vous allez bien?',
# Danish
'Hej',
# German
'Hallo',
'Guten tag!',
# Italian
'Ciao',
# Japanese
'今日は',
# Klingon
'nuqneH',
# Farsi
'سلام',
# Turkish
'Merhaba',
]
# /hello command
@bot_command
def _cmd_handler(bot, update):
"""a rather simple ping command"""
if isinstance(update, Update):
utils.echo_msg( bot, update, utils.select_rand_str(_greetings))
else:
utils.echo_msg(bot, update, utils.select_rand_str(_greetings))
desc = 'See if I "live"' # This command's description
cmd_handler = _cmd_handler # command handler
cmd_str = 'hello' # command /string
|
mit
| -4,500,527,741,360,126,500 | 15.041096 | 71 | 0.590094 | false |
kentaro0919/billing
|
app/entries/forms.py
|
1
|
1990
|
"""doc."""
import wtforms
from wtforms.validators import DataRequired
from models import Entry, Tag
class ImageForm(wtforms.Form):
file = wtforms.FileField('Image file')
class TagField(wtforms.StringField):
"""doc."""
def _value(self):
"""doc."""
if self.data:
# Display tags as a comma-separated list.
return ', '.join([tag.name for tag in self.data])
return ''
def get_tags_from_string(self, tag_string):
"""doc."""
raw_tags = tag_string.split(',')
# Filter out any empty tag names.
tag_names = [name.strip() for name in raw_tags if name.strip()]
# Query the database and retrieve any tags we have already saved.
existing_tags = Tag.query.filter(Tag.name.in_(tag_names))
# Determine which tag names are new.
new_names = set(tag_names) - set([tag.name for tag in existing_tags])
# Create a list of unsaved Tag instances for the new tags.
new_tags = [Tag(name=name) for name in new_names]
# Return all the existing tags + all the new, unsaved tags.
return list(existing_tags) + new_tags
def process_formdata(self, valuelist):
"""doc."""
if valuelist:
self.data = self.get_tags_from_string(valuelist[0])
else:
self.data = []
class EntryForm(wtforms.Form):
"""doc."""
title = wtforms.StringField(
'Title',
validators=[DataRequired()])
body = wtforms.TextAreaField(
'Body',
validators=[DataRequired()])
status = wtforms.SelectField(
'Entry status',
choices=(
(Entry.STATUS_PUBLIC, 'Public'),
(Entry.STATUS_DRAFT, 'Draft')),
coerce=int)
tags = TagField(
'Tags',
description='Separate multiple tags with commas.')
def save_entry(self, entry):
"""doc."""
self.populate_obj(entry)
entry.generate_slug()
return entry
|
mit
| 1,626,705,362,889,400,300 | 26.638889 | 77 | 0.58392 | false |
chemelnucfin/tensorflow
|
tensorflow/python/keras/regularizers_test.py
|
1
|
7803
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Keras regularizers."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
import numpy as np
from tensorflow.python import keras
from tensorflow.python.eager import context
from tensorflow.python.keras import keras_parameterized
from tensorflow.python.keras import regularizers
from tensorflow.python.keras import testing_utils
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test
DATA_DIM = 5
NUM_CLASSES = 2
class KerasRegularizersTest(keras_parameterized.TestCase,
parameterized.TestCase):
def create_model(self, kernel_regularizer=None, activity_regularizer=None):
model = keras.models.Sequential()
model.add(keras.layers.Dense(NUM_CLASSES,
kernel_regularizer=kernel_regularizer,
activity_regularizer=activity_regularizer,
input_shape=(DATA_DIM,)))
return model
def get_data(self):
(x_train, y_train), (x_test, y_test) = testing_utils.get_test_data(
train_samples=10,
test_samples=10,
input_shape=(DATA_DIM,),
num_classes=NUM_CLASSES)
y_train = keras.utils.to_categorical(y_train, NUM_CLASSES)
y_test = keras.utils.to_categorical(y_test, NUM_CLASSES)
return (x_train, y_train), (x_test, y_test)
def create_multi_input_model_from(self, layer1, layer2):
input_1 = keras.layers.Input(shape=(DATA_DIM,))
input_2 = keras.layers.Input(shape=(DATA_DIM,))
out1 = layer1(input_1)
out2 = layer2(input_2)
out = keras.layers.Average()([out1, out2])
model = keras.models.Model([input_1, input_2], out)
model.add_loss(keras.backend.mean(out2))
model.add_loss(math_ops.reduce_sum(input_1))
return model
@keras_parameterized.run_all_keras_modes
@parameterized.named_parameters([
('l1', regularizers.l1()),
('l2', regularizers.l2()),
('l1_l2', regularizers.l1_l2()),
])
def test_kernel_regularization(self, regularizer):
(x_train, y_train), _ = self.get_data()
model = self.create_model(kernel_regularizer=regularizer)
model.compile(
loss='categorical_crossentropy',
optimizer='sgd',
run_eagerly=testing_utils.should_run_eagerly(),
experimental_run_tf_function=testing_utils.should_run_tf_function())
self.assertEqual(len(model.losses), 1)
model.fit(x_train, y_train, batch_size=10, epochs=1, verbose=0)
@keras_parameterized.run_all_keras_modes
@parameterized.named_parameters([
('l1', regularizers.l1()),
('l2', regularizers.l2()),
('l1_l2', regularizers.l1_l2()),
('l2_zero', keras.regularizers.l2(0.)),
])
def test_activity_regularization(self, regularizer):
(x_train, y_train), _ = self.get_data()
model = self.create_model(activity_regularizer=regularizer)
model.compile(
loss='categorical_crossentropy',
optimizer='sgd',
run_eagerly=testing_utils.should_run_eagerly(),
experimental_run_tf_function=testing_utils.should_run_tf_function())
self.assertEqual(len(model.losses), 1 if context.executing_eagerly() else 1)
model.fit(x_train, y_train, batch_size=10, epochs=1, verbose=0)
@keras_parameterized.run_all_keras_modes
@keras_parameterized.run_with_all_model_types
def test_zero_regularization(self):
# Verifies that training with zero regularization works.
x, y = np.ones((10, 10)), np.ones((10, 3))
model = testing_utils.get_model_from_layers(
[keras.layers.Dense(3, kernel_regularizer=keras.regularizers.l2(0))],
input_shape=(10,))
model.compile(
'sgd',
'mse',
run_eagerly=testing_utils.should_run_eagerly(),
experimental_run_tf_function=testing_utils.should_run_tf_function())
model.fit(x, y, batch_size=5, epochs=1)
def test_custom_regularizer_saving(self):
def my_regularizer(weights):
return math_ops.reduce_sum(math_ops.abs(weights))
inputs = keras.Input((10,))
outputs = keras.layers.Dense(1, kernel_regularizer=my_regularizer)(inputs)
model = keras.Model(inputs, outputs)
model2 = model.from_config(
model.get_config(), custom_objects={'my_regularizer': my_regularizer})
self.assertEqual(model2.layers[1].kernel_regularizer, my_regularizer)
@keras_parameterized.run_all_keras_modes
@parameterized.named_parameters([
('l1', regularizers.l1()),
('l2', regularizers.l2()),
('l1_l2', regularizers.l1_l2()),
])
def test_regularization_shared_layer(self, regularizer):
dense_layer = keras.layers.Dense(
NUM_CLASSES,
kernel_regularizer=regularizer,
activity_regularizer=regularizer)
model = self.create_multi_input_model_from(dense_layer, dense_layer)
model.compile(
loss='categorical_crossentropy',
optimizer='sgd',
run_eagerly=testing_utils.should_run_eagerly(),
experimental_run_tf_function=testing_utils.should_run_tf_function())
self.assertEqual(len(model.losses), 5)
@keras_parameterized.run_all_keras_modes
@parameterized.named_parameters([
('l1', regularizers.l1()),
('l2', regularizers.l2()),
('l1_l2', regularizers.l1_l2()),
])
def test_regularization_shared_model(self, regularizer):
dense_layer = keras.layers.Dense(
NUM_CLASSES,
kernel_regularizer=regularizer,
activity_regularizer=regularizer)
input_tensor = keras.layers.Input(shape=(DATA_DIM,))
dummy_model = keras.models.Model(input_tensor, dense_layer(input_tensor))
model = self.create_multi_input_model_from(dummy_model, dummy_model)
model.compile(
loss='categorical_crossentropy',
optimizer='sgd',
run_eagerly=testing_utils.should_run_eagerly(),
experimental_run_tf_function=testing_utils.should_run_tf_function())
self.assertEqual(len(model.losses), 6)
@keras_parameterized.run_all_keras_modes
@parameterized.named_parameters([
('l1', regularizers.l1()),
('l2', regularizers.l2()),
('l1_l2', regularizers.l1_l2()),
])
def test_regularization_shared_layer_in_different_models(self, regularizer):
shared_dense = keras.layers.Dense(
NUM_CLASSES,
kernel_regularizer=regularizer,
activity_regularizer=regularizer)
models = []
for _ in range(2):
input_tensor = keras.layers.Input(shape=(DATA_DIM,))
unshared_dense = keras.layers.Dense(
NUM_CLASSES, kernel_regularizer=regularizer)
out = unshared_dense(shared_dense(input_tensor))
models.append(keras.models.Model(input_tensor, out))
model = self.create_multi_input_model_from(
layer1=models[0], layer2=models[1])
model.compile(
loss='categorical_crossentropy',
optimizer='sgd',
run_eagerly=testing_utils.should_run_eagerly(),
experimental_run_tf_function=testing_utils.should_run_tf_function())
self.assertEqual(len(model.losses), 14)
if __name__ == '__main__':
test.main()
|
apache-2.0
| -3,927,156,256,061,779,000 | 37.438424 | 80 | 0.670768 | false |
JiscPER/magnificent-octopus
|
octopus/modules/es/initialise.py
|
1
|
4362
|
import esprit
from octopus.lib import plugin
from octopus.core import app
def _default_mapping():
default_mapping = app.config.get("ELASTIC_SEARCH_DEFAULT_MAPPING")
if default_mapping is None:
return None
if "mappings" in default_mapping:
return default_mapping
if "_default_" in default_mapping:
return {"mappings" : default_mapping}
return {"mappings" : {"_default_" : default_mapping}}
def put_mappings(mappings):
# make a connection to the index
conn = esprit.raw.Connection(app.config['ELASTIC_SEARCH_HOST'], app.config['ELASTIC_SEARCH_INDEX'])
# get the ES version that we're working with
es_version = app.config.get("ELASTIC_SEARCH_VERSION", "0.90.13")
# for each mapping (a class may supply multiple), create them in the index
for key, mapping in mappings.iteritems():
if not esprit.raw.type_exists(conn, key, es_version=es_version):
r = esprit.raw.put_mapping(conn, key, mapping, es_version=es_version)
print "Creating ES Type+Mapping for", key, "; status:", r.status_code
else:
print "ES Type+Mapping already exists for", key
def put_example(type, example):
# make a connection to the index
conn = esprit.raw.Connection(app.config['ELASTIC_SEARCH_HOST'], app.config['ELASTIC_SEARCH_INDEX'])
# get the ES version that we're working with
es_version = app.config.get("ELASTIC_SEARCH_VERSION", "0.90.13")
if not esprit.raw.type_exists(conn, type, es_version=es_version):
example.save()
example.delete()
print "Initialising ES Type+Mapping from document for", type
else:
print "Not Initialising from document - ES Type+Mapping already exists for", type
def initialise():
# if we are not to initialise the index, stop here
if not app.config.get("INITIALISE_INDEX", False):
return
# create the index itself if it needs creating
conn = esprit.raw.Connection(app.config['ELASTIC_SEARCH_HOST'], app.config['ELASTIC_SEARCH_INDEX'])
if not esprit.raw.index_exists(conn):
print "Creating ES Index; host:" + str(conn.host) + " port:" + str(conn.port) + " db:" + str(conn.index)
default_mapping = _default_mapping()
if default_mapping is not None:
print "Applying default mapping to index"
esprit.raw.create_index(conn, mapping=default_mapping)
else:
print "ES Index Already Exists; host:" + str(conn.host) + " port:" + str(conn.port) + " db:" + str(conn.index)
# get the list of classes which carry the type-specific mappings to be loaded
mapping_daos = app.config.get("ELASTIC_SEARCH_MAPPINGS", [])
# get the ES version that we're working with
es_version = app.config.get("ELASTIC_SEARCH_VERSION", "0.90.13")
# load each class and execute the "mappings" function to get the mappings
# that need to be imported
for cname in mapping_daos:
klazz = plugin.load_class_raw(cname)
mappings = klazz.mappings()
put_mappings(mappings)
"""
# for each mapping (a class may supply multiple), create them in the index
for key, mapping in mappings.iteritems():
if not esprit.raw.type_exists(conn, key, es_version=es_version):
r = esprit.raw.put_mapping(conn, key, mapping, es_version=es_version)
print "Creating ES Type+Mapping for", key, "; status:", r.status_code
else:
print "ES Type+Mapping already exists for", key
"""
# get the list of classes which will give us example docs to load
example_daos = app.config.get("ELASTIC_SEARCH_EXAMPLE_DOCS", [])
for cname in example_daos:
klazz = plugin.load_class_raw(cname)
example = klazz.example()
type = klazz.get_write_type()
put_example(type, example)
"""
if not esprit.raw.type_exists(conn, type, es_version=es_version):
example.save()
example.delete()
print "Initialising ES Type+Mapping from document for", type
else:
print "Not Initialising from document - ES Type+Mapping already exists for", type
"""
self_inits = app.config.get("ELASTIC_SEARCH_SELF_INIT", [])
for cname in self_inits:
klazz = plugin.load_class_raw(cname)
klazz.self_init()
|
apache-2.0
| -1,562,341,599,889,666,300 | 39.775701 | 118 | 0.646034 | false |
Ictp/indico
|
indico/MaKaC/common/log.py
|
1
|
8114
|
# -*- coding: utf-8 -*-
##
##
## This file is part of Indico.
## Copyright (C) 2002 - 2014 European Organization for Nuclear Research (CERN).
##
## Indico is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 3 of the
## License, or (at your option) any later version.
##
## Indico is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Indico;if not, see <http://www.gnu.org/licenses/>.
from persistent import Persistent
from indico.util.contextManager import ContextManager
from indico.util.struct import iterators
from MaKaC.common.timezoneUtils import nowutc
class ModuleNames:
MATERIAL = "Material"
PAPER_REVIEWING = "Paper Reviewing"
PARTICIPANTS = "Participants"
REGISTRATION = "Registration"
TIMETABLE = "Timetable"
def __init__(self):
pass
class LogItem(Persistent) :
def __init__(self, user, logInfo, module):
self._logId = None
self._logDate = nowutc()
self._logType = "generalLog"
# User who has performed / authorised the logged action
self._responsibleUser = user if user else ContextManager.get("currentUser")
# Indico module, the logged action comes from
self._module = module
# DICTIONARY containing infos that have to be logged
# MUST CONTAIN entry with key : "subject"
# keys as well as values should be meaningful
self._logInfo = logInfo
if self._logInfo.get("subject", None) is None :
self._logInfo["subject"] = "%s : %s : %s" % (self._logDate,
self._module,
self._logType)
def getLogId(self):
return self._logId
def setLogId(self, log_id):
if self._logId is not None :
return False
self._logId = log_id
return True
def getLogDate(self):
return self._logDate
def getLogType(self):
return self._logType
def getResponsible(self):
return self._responsibleUser
def getResponsibleName(self):
if self._responsibleUser is None :
return "System"
else :
return self._responsibleUser.getStraightAbrName()
def getModule(self):
return self._module
def getLogInfo(self):
return self._logInfo
def getLogSubject(self):
return self._logInfo["subject"]
def getLogInfoList(self):
"""
Return a list of pairs with the caption and the pre-processed
information to be shown.
"""
info_list = []
for entry in iterators.SortedDictIterator(self._logInfo):
if (entry[0] != "subject"):
caption = entry[0]
value = entry[1]
info_list.append((caption, value))
return info_list
class ActionLogItem(LogItem):
def __init__(self, user, logInfo, module):
LogItem.__init__(self, user, logInfo, module)
self._logType = "actionLog"
class EmailLogItem(LogItem):
"""
self._logInfo expected keys:
- body
- ccList
- toList
"""
def __init__(self, user, logInfo, module):
LogItem.__init__(self, user, logInfo, module)
self._logType = "emailLog"
def getLogBody(self):
return self._logInfo.get("body", "No message")
def getLogCCList(self):
return self._logInfo.get("ccList", "No CC receptors")
def getLogToList(self):
return self._logInfo.get("toList", "No receptors")
def getLogInfoList(self):
"""
Return a list of pairs with the caption and the pre-processed
information to be shown.
"""
info_list = []
info_list.append(("To", ",".join(self.getLogToList())))
info_list.append(("CC", ",".join(self.getLogCCList())))
info_list.append(("Body", self.getLogBody()))
return info_list
class LogHandler(Persistent):
def __init__(self):
self._logLists = {}
self._logLists["generalLog"] = {}
self._logLists["emailLog"] = []
self._logLists["actionLog"] = []
self._logIdGenerator = 0
def _newLogId(self):
self._logIdGenerator += 1
return self._logIdGenerator
def _lastLogId(self):
return self._logIdGenerator
@staticmethod
def _cmpLogDate(logItem1, logItem2):
return cmp(logItem2.getLogDate(), logItem1.getLogDate())
@staticmethod
def _cmpLogModule(logItem1, logItem2):
return cmp(logItem1.getModule(), logItem2.getModule())
@staticmethod
def _cmpLogSubject(logItem1, logItem2):
return cmp(logItem1.getLogSubject(), logItem2.getLogSubject())
@staticmethod
def _cmpLogRecipients(logItem1, logItem2):
return cmp(logItem1.getLogRecipients(), logItem2.getLogRecipients())
@staticmethod
def _cmpLogResponsibleName(logItem1, logItem2):
return cmp(logItem1.getResponsibleName(), logItem2.getResponsibleName())
@staticmethod
def _cmpLogType(logItem1, logItem2):
return cmp(logItem1.getLogType(), logItem2.getLogType())
@staticmethod
def _sortLogList(log_list, order="date"):
if order == "date" :
log_list.sort(LogHandler._cmpLogDate)
elif order == "subject" :
log_list.sort(LogHandler._cmpLogSubject)
elif order == "recipients" :
log_list.sort(LogHandler._cmpLogRecipients)
elif order == "responsible" :
log_list.sort(LogHandler._cmpLogResponsibleName)
elif order == "module" :
log_list.sort(LogHandler._cmpLogModule)
elif order == "type" :
log_list.sort(LogHandler._cmpLogType)
return log_list
def getLogList(self, log_type="general", key="", order="date"):
"""
log_type can be 'email', 'action', 'general' or 'custom'
"""
if log_type == "email" :
log_list = self._logLists["emailLog"]
elif log_type == "action" :
log_list = self._logLists["actionLog"]
elif log_type == "custom" :
log_list = self._getCustomLogList(key)
else:
log_list = self._logLists["generalLog"].values()
return LogHandler._sortLogList(log_list, order)
def _getCustomLogList(self, key):
log_list = []
for li in self._logLists["generalLog"].values() :
if li.getResponsibleName().lower().find(key.lower()) >= 0 :
log_list.append(li)
else :
for v in li.getLogInfo().values() :
value = "%s" % v
if value.lower().find(key.lower()) >= 0 :
log_list.append(li)
break
return log_list
def getLogItemById(self, logId):
if logId is None :
return None
return self._logLists["generalLog"].get(logId, None)
def _addLogItem(self, logItem):
if logItem is None :
return False
logItem.setLogId(self._newLogId())
self._logLists[logItem.getLogType()].append(logItem)
self._logLists["generalLog"]["%s" % self._lastLogId()] = logItem
self.notifyModification()
return True
def logEmail(self, logInfo, module, user=None):
if logInfo is None :
return False
logItem = EmailLogItem(user, logInfo, module)
self._addLogItem(logItem)
return True
def logAction(self, logInfo, module, user=None):
if logInfo is None :
return False
logItem = ActionLogItem(user, logInfo, module)
self._addLogItem(logItem)
return True
def notifyModification(self):
self._p_changed = 1
|
gpl-3.0
| -8,128,992,374,017,895,000 | 30.449612 | 83 | 0.599704 | false |
palankai/baluster
|
tests/test_nested.py
|
1
|
1212
|
from baluster import Baluster, placeholders
class CompositeRoot(Baluster):
@placeholders.factory
def value(self, root):
return 2
class subns(Baluster):
_closed = False
@placeholders.factory
def value(self, root):
return 1
@value.close
def close_value(self, root, resource):
self._closed = True
class subns_2(Baluster):
@placeholders.factory
def value(self, root):
return root.subns.value + 3
class TestNested:
def test_sanity(self):
obj = CompositeRoot()
assert obj.value == 2
assert obj.subns.value == 1
def test_cross_access(self):
obj = CompositeRoot()
obj.subns.value = 3
assert obj.subns_2.value == 6
def test_nested_close(self):
obj = CompositeRoot()
obj.subns.value
obj.close()
assert obj.subns._closed is True
def test_nested_copy(self):
obj = CompositeRoot()
copyA = obj.partial_copy('subns.value')
obj.subns.value = 3
copyB = obj.partial_copy('subns.value')
assert copyA.subns_2.value == 4
assert copyB.subns_2.value == 6
|
mit
| 7,758,543,138,161,990,000 | 19.896552 | 47 | 0.578383 | false |
crossroadchurch/paul
|
openlp/core/ui/settingsdialog.py
|
1
|
3292
|
# -*- coding: utf-8 -*-
# vim: autoindent shiftwidth=4 expandtab textwidth=120 tabstop=4 softtabstop=4
###############################################################################
# OpenLP - Open Source Lyrics Projection #
# --------------------------------------------------------------------------- #
# Copyright (c) 2008-2015 OpenLP Developers #
# --------------------------------------------------------------------------- #
# This program is free software; you can redistribute it and/or modify it #
# under the terms of the GNU General Public License as published by the Free #
# Software Foundation; version 2 of the License. #
# #
# This program is distributed in the hope that it will be useful, but WITHOUT #
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or #
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for #
# more details. #
# #
# You should have received a copy of the GNU General Public License along #
# with this program; if not, write to the Free Software Foundation, Inc., 59 #
# Temple Place, Suite 330, Boston, MA 02111-1307 USA #
###############################################################################
"""
The UI widgets of the settings dialog.
"""
from PyQt4 import QtCore, QtGui
from openlp.core.common import translate
from openlp.core.lib import build_icon
from openlp.core.lib.ui import create_button_box
class Ui_SettingsDialog(object):
"""
The UI widgets of the settings dialog.
"""
def setupUi(self, settings_dialog):
"""
Set up the UI
"""
settings_dialog.setObjectName('settings_dialog')
settings_dialog.setWindowIcon(build_icon(u':/icon/openlp-logo.svg'))
settings_dialog.resize(800, 700)
self.dialog_layout = QtGui.QGridLayout(settings_dialog)
self.dialog_layout.setObjectName('dialog_layout')
self.dialog_layout.setMargin(8)
self.setting_list_widget = QtGui.QListWidget(settings_dialog)
self.setting_list_widget.setUniformItemSizes(True)
self.setting_list_widget.setMinimumSize(QtCore.QSize(150, 0))
self.setting_list_widget.setHorizontalScrollBarPolicy(QtCore.Qt.ScrollBarAlwaysOff)
self.setting_list_widget.setObjectName('setting_list_widget')
self.dialog_layout.addWidget(self.setting_list_widget, 0, 0, 1, 1)
self.stacked_layout = QtGui.QStackedLayout()
self.stacked_layout.setObjectName('stacked_layout')
self.dialog_layout.addLayout(self.stacked_layout, 0, 1, 1, 1)
self.button_box = create_button_box(settings_dialog, 'button_box', ['cancel', 'ok'])
self.dialog_layout.addWidget(self.button_box, 1, 1, 1, 1)
self.retranslateUi(settings_dialog)
def retranslateUi(self, settings_dialog):
"""
Translate the UI on the fly
"""
settings_dialog.setWindowTitle(translate('OpenLP.SettingsForm', 'Configure OpenLP'))
|
gpl-2.0
| -6,551,662,760,534,454,000 | 51.253968 | 92 | 0.559538 | false |
icereval/scrapi
|
scrapi/base/__init__.py
|
1
|
6929
|
# Classes for scrAPI Harvesters
from __future__ import unicode_literals
import abc
import json
import logging
from datetime import timedelta, date
from lxml import etree
from scrapi import util
from scrapi import requests
from scrapi import registry
from scrapi import settings
from scrapi.base.schemas import OAISCHEMA
from scrapi.base.helpers import updated_schema, build_properties
from scrapi.linter.document import RawDocument, NormalizedDocument
from scrapi.base.transformer import XMLTransformer, JSONTransformer
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
etree.set_default_parser(etree.XMLParser(recover=True))
class HarvesterMeta(abc.ABCMeta):
def __init__(cls, name, bases, dct):
super(HarvesterMeta, cls).__init__(name, bases, dct)
if len(cls.__abstractmethods__) == 0 and cls.short_name not in settings.disabled:
registry[cls.short_name] = cls()
else:
logger.info('Class {} not added to registry'.format(cls.__name__))
class BaseHarvester(object):
""" This is a base class that all harvesters should inheret from
Defines the copy to unicode method, which is useful for getting standard
unicode out of xml results.
"""
__metaclass__ = HarvesterMeta
@abc.abstractproperty
def short_name(self):
raise NotImplementedError
@abc.abstractproperty
def long_name(self):
raise NotImplementedError
@abc.abstractproperty
def url(self):
raise NotImplementedError
@abc.abstractproperty
def file_format(self):
raise NotImplementedError
@abc.abstractmethod
def harvest(self, start_date=None, end_date=None):
raise NotImplementedError
@abc.abstractmethod
def normalize(self, raw_doc):
raise NotImplementedError
@property
def run_at(self):
return {
'hour': 22,
'minute': 59,
'day_of_week': 'mon-fri',
}
class JSONHarvester(BaseHarvester, JSONTransformer):
file_format = 'json'
def normalize(self, raw_doc):
transformed = self.transform(json.loads(raw_doc['doc']), fail=settings.RAISE_IN_TRANSFORMER)
transformed['shareProperties'] = {
'source': self.short_name
}
return NormalizedDocument(transformed)
class XMLHarvester(BaseHarvester, XMLTransformer):
file_format = 'xml'
def normalize(self, raw_doc):
transformed = self.transform(etree.XML(raw_doc['doc']), fail=settings.RAISE_IN_TRANSFORMER)
transformed['shareProperties'] = {
'source': self.short_name
}
return NormalizedDocument(transformed)
class OAIHarvester(XMLHarvester):
""" Create a harvester with a oai_dc namespace, that will harvest
documents within a certain date range
Contains functions for harvesting from an OAI provider, normalizing,
and outputting in a way that scrapi can understand, in the most
generic terms possible.
For more information, see the OAI PMH specification:
http://www.openarchives.org/OAI/openarchivesprotocol.html
"""
record_encoding = None
DEFAULT_ENCODING = 'UTF-8'
RESUMPTION = '&resumptionToken='
RECORDS_URL = '?verb=ListRecords'
META_PREFIX_DATE = '&metadataPrefix=oai_dc&from={}&until={}'
# Override these variable is required
namespaces = {
'dc': 'http://purl.org/dc/elements/1.1/',
'ns0': 'http://www.openarchives.org/OAI/2.0/',
'oai_dc': 'http://www.openarchives.org/OAI/2.0/',
}
timeout = 0.5
approved_sets = None
timezone_granularity = False
property_list = ['date', 'type']
@property
def schema(self):
properties = {
'otherProperties': build_properties(*[(item, (
'//dc:{}/node()'.format(item),
'//ns0:{}/node()'.format(item),
self.resolve_property)
) for item in self.property_list])
}
return updated_schema(OAISCHEMA, properties)
def resolve_property(self, dc, ns0):
ret = dc + ns0
return ret[0] if len(ret) == 1 else ret
def harvest(self, start_date=None, end_date=None):
start_date = (start_date or date.today() - timedelta(settings.DAYS_BACK)).isoformat()
end_date = (end_date or date.today()).isoformat()
if self.timezone_granularity:
start_date += 'T00:00:00Z'
end_date += 'T00:00:00Z'
records_url = self.base_url + self.RECORDS_URL
request_url = records_url + self.META_PREFIX_DATE.format(start_date, end_date)
records = self.get_records(request_url, start_date, end_date)
rawdoc_list = []
for record in records:
doc_id = record.xpath(
'ns0:header/ns0:identifier', namespaces=self.namespaces)[0].text
record = etree.tostring(record, encoding=self.record_encoding)
rawdoc_list.append(RawDocument({
'doc': record,
'source': util.copy_to_unicode(self.short_name),
'docID': util.copy_to_unicode(doc_id),
'filetype': 'xml'
}))
return rawdoc_list
def get_records(self, url, start_date, end_date, resump_token=''):
data = requests.get(url, throttle=self.timeout)
doc = etree.XML(data.content)
records = doc.xpath(
'//ns0:record',
namespaces=self.namespaces
)
token = doc.xpath(
'//ns0:resumptionToken/node()',
namespaces=self.namespaces
)
if len(token) == 1:
base_url = url.replace(self.META_PREFIX_DATE.format(start_date, end_date), '')
base_url = base_url.replace(self.RESUMPTION + resump_token, '')
url = base_url + self.RESUMPTION + token[0]
records += self.get_records(url, start_date, end_date, resump_token=token[0])
return records
def normalize(self, raw_doc):
str_result = raw_doc.get('doc')
result = etree.XML(str_result)
if self.approved_sets:
set_spec = result.xpath(
'ns0:header/ns0:setSpec/node()',
namespaces=self.namespaces
)
# check if there's an intersection between the approved sets and the
# setSpec list provided in the record. If there isn't, don't normalize.
if not {x.replace('publication:', '') for x in set_spec}.intersection(self.approved_sets):
logger.info('Series {} not in approved list'.format(set_spec))
return None
status = result.xpath('ns0:header/@status', namespaces=self.namespaces)
if status and status[0] == 'deleted':
logger.info('Deleted record, not normalizing {}'.format(raw_doc['docID']))
return None
return super(OAIHarvester, self).normalize(raw_doc)
|
apache-2.0
| -8,320,974,376,876,299,000 | 31.530516 | 102 | 0.625487 | false |
tchellomello/home-assistant
|
homeassistant/components/template/trigger.py
|
1
|
3430
|
"""Offer template automation rules."""
import logging
import voluptuous as vol
from homeassistant import exceptions
from homeassistant.const import CONF_FOR, CONF_PLATFORM, CONF_VALUE_TEMPLATE
from homeassistant.core import callback
from homeassistant.helpers import config_validation as cv, template
from homeassistant.helpers.event import (
TrackTemplate,
async_call_later,
async_track_template_result,
)
from homeassistant.helpers.template import result_as_boolean
# mypy: allow-untyped-defs, no-check-untyped-defs
_LOGGER = logging.getLogger(__name__)
TRIGGER_SCHEMA = IF_ACTION_SCHEMA = vol.Schema(
{
vol.Required(CONF_PLATFORM): "template",
vol.Required(CONF_VALUE_TEMPLATE): cv.template,
vol.Optional(CONF_FOR): cv.positive_time_period_template,
}
)
async def async_attach_trigger(
hass, config, action, automation_info, *, platform_type="template"
):
"""Listen for state changes based on configuration."""
value_template = config.get(CONF_VALUE_TEMPLATE)
value_template.hass = hass
time_delta = config.get(CONF_FOR)
template.attach(hass, time_delta)
delay_cancel = None
@callback
def template_listener(event, updates):
"""Listen for state changes and calls action."""
nonlocal delay_cancel
result = updates.pop().result
if delay_cancel:
# pylint: disable=not-callable
delay_cancel()
delay_cancel = None
if not result_as_boolean(result):
return
entity_id = event.data.get("entity_id")
from_s = event.data.get("old_state")
to_s = event.data.get("new_state")
@callback
def call_action(*_):
"""Call action with right context."""
hass.async_run_job(
action,
{
"trigger": {
"platform": "template",
"entity_id": entity_id,
"from_state": from_s,
"to_state": to_s,
"for": time_delta if not time_delta else period,
"description": f"{entity_id} via template",
}
},
(to_s.context if to_s else None),
)
if not time_delta:
call_action()
return
variables = {
"trigger": {
"platform": platform_type,
"entity_id": entity_id,
"from_state": from_s,
"to_state": to_s,
}
}
try:
period = cv.positive_time_period(
template.render_complex(time_delta, variables)
)
except (exceptions.TemplateError, vol.Invalid) as ex:
_LOGGER.error(
"Error rendering '%s' for template: %s", automation_info["name"], ex
)
return
delay_cancel = async_call_later(hass, period.seconds, call_action)
info = async_track_template_result(
hass,
[TrackTemplate(value_template, automation_info["variables"])],
template_listener,
)
unsub = info.async_remove
@callback
def async_remove():
"""Remove state listeners async."""
unsub()
if delay_cancel:
# pylint: disable=not-callable
delay_cancel()
return async_remove
|
apache-2.0
| 9,030,021,066,005,904,000 | 28.568966 | 84 | 0.560933 | false |
daniel-j/lutris
|
lutris/runners/__init__.py
|
1
|
2119
|
"""Generic runner functions."""
# from lutris.util.log import logger
__all__ = (
# Native
"linux",
"steam",
"browser",
"web",
# Microsoft based
"wine",
"winesteam",
"dosbox",
# Multi-system
"mame",
"mess",
"mednafen",
"scummvm",
"residualvm",
"libretro",
"ags",
"higan",
# Commdore
"fsuae",
"vice",
# Atari
"stella",
"atari800",
"hatari",
"virtualjaguar",
# Nintendo
"snes9x",
"mupen64plus",
"dolphin",
"desmume",
"citra",
"melonds",
# Sony
"ppsspp",
"pcsx2",
"rpcs3",
# Sega
"osmose",
"dgen",
"reicast",
"redream",
# Fantasy consoles
"pico8",
# Misc legacy systems
"frotz",
"jzintv",
"o2em",
"zdoom",
"tic80",
)
class InvalidRunner(Exception):
def __init__(self, message):
self.message = message
class RunnerInstallationError(Exception):
def __init__(self, message):
self.message = message
class NonInstallableRunnerError(Exception):
def __init__(self, message):
self.message = message
def get_runner_module(runner_name):
if runner_name not in __all__:
raise InvalidRunner("Invalid runner name '%s'" % runner_name)
return __import__(
"lutris.runners.%s" % runner_name, globals(), locals(), [runner_name], 0
)
def import_runner(runner_name):
"""Dynamically import a runner class."""
runner_module = get_runner_module(runner_name)
if not runner_module:
return None
return getattr(runner_module, runner_name)
def import_task(runner, task):
"""Return a runner task."""
runner_module = get_runner_module(runner)
if not runner_module:
return None
return getattr(runner_module, task)
def get_installed(sort=True):
"""Return a list of installed runners (class instances)."""
installed = []
for runner_name in __all__:
runner = import_runner(runner_name)()
if runner.is_installed():
installed.append(runner)
return sorted(installed) if sort else installed
|
gpl-3.0
| 6,627,909,325,748,285,000 | 19.375 | 80 | 0.583766 | false |
Diti24/python-ivi
|
ivi/agilent/agilent3000A.py
|
1
|
6792
|
"""
Python Interchangeable Virtual Instrument Library
Copyright (c) 2012-2016 Alex Forencich
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
from .agilent2000A import *
import numpy as np
import struct
from .. import ivi
from .. import fgen
OutputMode = set(['function', 'arbitrary'])
StandardWaveformMapping = {
'sine': 'sin',
'square': 'squ',
#'triangle': 'tri',
'ramp_up': 'ramp',
#'ramp_down',
#'dc'
'pulse': 'puls',
'noise': 'nois',
'dc': 'dc',
'sinc': 'sinc',
'exprise': 'expr',
'expfall': 'expf',
'cardiac': 'card',
'gaussian': 'gaus'
}
class agilent3000A(agilent2000A, fgen.ArbWfm, fgen.ArbFrequency,
fgen.ArbChannelWfm):
"Agilent InfiniiVision 3000A series IVI oscilloscope driver"
def __init__(self, *args, **kwargs):
self.__dict__.setdefault('_instrument_id', '')
super(agilent3000A, self).__init__(*args, **kwargs)
self._analog_channel_name = list()
self._analog_channel_count = 4
self._digital_channel_name = list()
self._digital_channel_count = 16
self._channel_count = self._analog_channel_count + self._digital_channel_count
self._bandwidth = 1e9
self._horizontal_divisions = 10
self._vertical_divisions = 8
# wavegen option
self._output_count = 1
self._output_standard_waveform_mapping = StandardWaveformMapping
self._output_mode_list = OutputMode
self._arbitrary_sample_rate = 0
self._arbitrary_waveform_number_waveforms_max = 0
self._arbitrary_waveform_size_max = 8192
self._arbitrary_waveform_size_min = 2
self._arbitrary_waveform_quantum = 1
self._identity_description = "Agilent InfiniiVision 3000A X-series IVI oscilloscope driver"
self._identity_supported_instrument_models = ['DSOX3012A','DSOX3014A','DSOX3024A',
'DSOX3032A','DSOX3034A','DSOX3052A','DSOX3054A','DSOX3104A','MSOX3012A','MSOX3014A',
'MSOX3024A','MSOX3032A','MSOX3034A','MSOX3052A','MSOX3054A','MSOX3104A']
self._init_outputs()
self._init_channels()
def _get_output_arbitrary_gain(self, index):
index = ivi.get_index(self._output_name, index)
return self._output_arbitrary_gain[index]
def _set_output_arbitrary_gain(self, index, value):
index = ivi.get_index(self._output_name, index)
value = float(value)
self._output_arbitrary_gain[index] = value
def _get_output_arbitrary_offset(self, index):
index = ivi.get_index(self._output_name, index)
return self._output_arbitrary_offset[index]
def _set_output_arbitrary_offset(self, index, value):
index = ivi.get_index(self._output_name, index)
value = float(value)
self._output_arbitrary_offset[index] = value
def _get_output_arbitrary_waveform(self, index):
index = ivi.get_index(self._output_name, index)
return self._output_arbitrary_waveform[index]
def _set_output_arbitrary_waveform(self, index, value):
index = ivi.get_index(self._output_name, index)
value = str(value)
self._output_arbitrary_waveform[index] = value
def _get_arbitrary_sample_rate(self):
return self._arbitrary_sample_rate
def _set_arbitrary_sample_rate(self, value):
value = float(value)
self._arbitrary_sample_rate = value
def _get_arbitrary_waveform_number_waveforms_max(self):
return self._arbitrary_waveform_number_waveforms_max
def _get_arbitrary_waveform_size_max(self):
return self._arbitrary_waveform_size_max
def _get_arbitrary_waveform_size_min(self):
return self._arbitrary_waveform_size_min
def _get_arbitrary_waveform_quantum(self):
return self._arbitrary_waveform_quantum
def _arbitrary_waveform_clear(self, handle):
pass
def _arbitrary_waveform_configure(self, index, handle, gain, offset):
self._set_output_arbitrary_waveform(index, handle)
self._set_output_arbitrary_gain(index, gain)
self._set_output_arbitrary_offset(index, offset)
def _arbitrary_waveform_create(self, data):
return "handle"
def _get_output_arbitrary_frequency(self, index):
index = ivi.get_index(self._output_name, index)
return self._output_arbitrary_frequency[index]
def _set_output_arbitrary_frequency(self, index, value):
index = ivi.get_index(self._output_name, index)
value = float(value)
self._output_arbitrary_frequency[index] = value
def _arbitrary_waveform_create_channel_waveform(self, index, data):
y = None
x = None
if type(data) == list and type(data[0]) == float:
# list
y = array(data)
elif type(data) == np.ndarray and len(data.shape) == 1:
# 1D array
y = data
elif type(data) == np.ndarray and len(data.shape) == 2 and data.shape[0] == 1:
# 2D array, hieght 1
y = data[0]
elif type(data) == np.ndarray and len(data.shape) == 2 and data.shape[1] == 1:
# 2D array, width 1
y = data[:,0]
else:
x, y = ivi.get_sig(data)
if len(y) % self._arbitrary_waveform_quantum != 0:
raise ivi.ValueNotSupportedException()
raw_data = b''
for f in y:
# clip at -1 and 1
if f > 1.0: f = 1.0
if f < -1.0: f = -1.0
raw_data = raw_data + struct.pack('<f', f)
self._write_ieee_block(raw_data, ':%s:arbitrary:data ' % self._output_name[index])
return self._output_name[index]
|
mit
| 3,845,266,033,228,307,500 | 34.375 | 100 | 0.633687 | false |
HazyResearch/metal
|
metal/contrib/info_extraction/mentions.py
|
1
|
6222
|
import numpy as np
class EntityMention(object):
"""A mention of an entity (span of text) in a document
Args:
doc_id: a unique identifier for the document
text: a single string of text corresponding to the document
char_start: the integer offset of the first character in the entity
mention
char_end: the integer offset of the last character in the entity
mention, plus one (so that text[char_start:char_end] returns the
full entity).
tokens: (optional) a list of tokens corresponding to the text.
If None, tokenization on whitespace is the default.
char_offsets: (optional) a list of character offsets corresponding
to the tokens in tokens.
If None, we assume all tokens are separated by a single space.
attributes: (optional) additional lists of attributes corresponding to
the provided tokens (e.g., pos tags, ner tags, types, etc.)
"""
def __init__(
self,
doc_id,
text,
char_start,
char_end,
tokens=None,
char_offsets=None,
mention_id=None,
**attributes,
):
self.doc_id = doc_id
self.text = text
self.char_start = int(char_start)
self.char_end = int(char_end)
self.mention_id = mention_id if mention_id else hash(self)
self.entity = text[self.char_start : self.char_end]
self.tokens = tokens if tokens is not None else text.split()
self.char_offsets = self._get_char_offsets(char_offsets)
# Convert exclusive character offsets to inclusive token indices
self.word_start = self.char_to_word_idx(self.char_start)
self.word_end = self.char_to_word_idx(self.char_end - 1)
# Store extra attributes
for attr, values in attributes.items():
setattr(self, attr, values)
def _get_char_offsets(self, char_offsets):
"""Store or calculate char_offsets, adding the offset of the doc end"""
if char_offsets:
char_offsets = char_offsets
char_offsets.append(len(self.text))
else:
char_offsets = np.zeros(len(self.tokens) + 1)
for i, tok in enumerate(self.tokens):
# Add 1 to account for the spaces between tokens
char_offsets[i + 1] = char_offsets[i] + len(tok) + 1
char_offsets[-1] = len(self.text)
return np.array(char_offsets)
def word_to_char_idx(self, word_idx):
"""Converts a word index to a character offset
Returns the offset of the first character of the token with the given
index.
"""
return self.char_offsets[word_idx]
def char_to_word_idx(self, char_offset):
"""Converts a character offset to a token index
Finds the first index of a True (i.e., the index of the first token that
is past the desired token) and subtracts one.
"""
return np.argmax(self.char_offsets > char_offset) - 1
def get_entity_attrib(self, attrib):
attrib_tokens = self.get(attrib, None)
return attrib_tokens[self.word_start : self.word_end + 1]
@property
def words(self):
return self.tokens
def __repr__(self):
return (
f"EntityMention(doc_id={self.doc_id}: '{self.entity}'"
f"({self.char_start}:{self.char_end})"
)
def __hash__(self):
return hash((self.doc_id, self.char_start, self.char_end))
class RelationMention(object):
"""A mention of a relation between two spans of text (entities) in a doc
Args:
doc_id: a unique identifier for the document
text: a single string of text corresponding to the document
entity_positions: a list with two elements, each a tuple of the integer
offsets (in characters) of the corresponding entity in the text so
that text[char_start:char_end] returns the full entity
tokens: (optional) a list of tokens corresponding to the text.
If None, tokenization on whitespace is the default.
char_offsets: (optional) a list of character offsets corresponding
to the tokens in tokens.
If None, we assume all tokens are separated by a single space.
attributes: (optional) additional lists of attributes corresponding to
the provided tokens (e.g., pos tags, ner tags, types, etc.)
TODO: There is currently inefficiency in the way each EntityMention in a
RelationMention stores all properties of a sentence. Instead, create a
Sentence object that each EntityMention points to and store the properties
with the sentence.
"""
def __init__(
self,
doc_id,
text,
entity_positions,
tokens=None,
char_offsets=None,
mention_id=None,
**attributes,
):
self.doc_id = doc_id
self.entity_positions = entity_positions
self.entities = [
EntityMention(doc_id, text, *cp, tokens, char_offsets, **attributes)
for cp in entity_positions
]
self.mention_id = mention_id if mention_id else hash(self)
@property
def text(self):
return self.entities[0].text
@property
def tokens(self):
return self.entities[0].tokens
@property
def words(self):
return self.entities[0].tokens
@property
def word_starts(self):
return [e.word_start for e in self.entities]
@property
def word_ends(self):
return [e.word_end for e in self.entities]
@property
def word_positions(self):
return [(e.word_start, e.word_end) for e in self.entities]
def get_attr(self, attr):
return self.entities[0].get(attr, None)
def __getitem__(self, key):
return self.entities[key]
def __repr__(self):
entities = ", ".join(
[f'"{e.entity}"({e.char_start}:{e.char_end})' for e in self.entities]
)
return f"""RelationMention(doc_id={self.doc_id}: entities=({entities})"""
def __hash__(self):
return hash((self.doc_id, tuple(self.entity_positions)))
|
apache-2.0
| 566,391,379,007,993,200 | 33.759777 | 81 | 0.613468 | false |
SnakeHunt2012/word2vec
|
word-word/word-word.py
|
1
|
3317
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from sys import stdout
from codecs import open
from json import loads
from numpy import zeros, dot, sqrt
from argparse import ArgumentParser
from heapq import nlargest
import cudamat as cm
def debug(debug_str):
stdout.write(debug_str)
stdout.flush()
def load_vector_dict(tsv_file):
word_dict = {}
with open(tsv_file, "r", encoding = "utf-8") as fd:
for line in fd:
splited_line = line.strip().split("\t")
if len(splited_line) != 2:
continue
word, vector = splited_line
if word not in word_dict:
word_dict[word] = [float(value) for value in vector.split()]
assert len(word_dict[word]) == 200
for word in word_dict:
norm = sqrt(dot(word_dict[word], word_dict[word]))
if norm > 0.0:
word_dict[word] = [val / norm for val in word_dict[word]]
return word_dict
def load_suffix_set(tsv_file):
suffix_set = set([])
with open(tsv_file, "r") as fd:
for line in fd:
if len(line.strip().split()) != 1:
continue
suffix_set.add(line.strip())
return suffix_set
def main():
parser = ArgumentParser()
parser.add_argument("dict_file", help = "word2vec file in tsv format")
parser.add_argument("suffix_file", help = "suffix word list")
args = parser.parse_args()
batch_size = 1000
dict_file = args.dict_file
suffix_file = args.suffix_file
word_dict = load_vector_dict(dict_file)
word_list = list(word_dict)
suffix_set = load_suffix_set(suffix_file)
suffix_list = list(suffix_set)
suffix_matrix = zeros((len(suffix_list), 200), dtype = "float32")
for index in xrange(len(suffix_list)):
if suffix_list[index].decode("utf-8") in word_dict:
suffix_matrix[index, :] += word_dict[suffix_list[index].decode("utf-8")]
cm.cublas_init(1000000)
partition_begin = 0
partition_end = 0
cuda_target_matrix = cm.CUDAMatrix(suffix_matrix.transpose())
while partition_begin < len(suffix_list):
if (partition_begin + batch_size) > len(suffix_list):
partition_end = len(suffix_list)
else:
partition_end = partition_begin + batch_size
cuda_source_matrix = cm.CUDAMatrix(suffix_matrix[partition_begin:partition_end, :])
cuda_result_matrix = cm.dot(cuda_source_matrix, cuda_target_matrix)
result_matrix = cuda_result_matrix.asarray()
for index in xrange(partition_end - partition_begin):
source_suffix = suffix_list[index + partition_begin]
sorted_list = []
length = len(range(len(suffix_list)))
sorted_list = nlargest(30, zip(result_matrix[index, :], range(len(suffix_list))))
#print "%s\t" % source_suffix.encode("utf-8"),
print "%s\t" % source_suffix,
for sorted_item in sorted_list:
if (sorted_item[0] < 0.4):
break
#print "%s" % suffix_list[sorted_item[1]].encode("utf-8"),
print "%s/%f" % (suffix_list[sorted_item[1]], sorted_item[0]),
print
partition_begin = partition_end
if __name__ == "__main__":
main()
|
apache-2.0
| -9,172,534,614,930,218,000 | 32.505051 | 93 | 0.586675 | false |
Telestream/telestream-cloud-python-sdk
|
telestream_cloud_qc_sdk/test/test_drop_frame_type.py
|
1
|
1328
|
# coding: utf-8
"""
Qc API
Qc API # noqa: E501
The version of the OpenAPI document: 3.0.0
Contact: cloudsupport@telestream.net
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import unittest
import datetime
import telestream_cloud_qc
from telestream_cloud_qc.models.drop_frame_type import DropFrameType # noqa: E501
from telestream_cloud_qc.rest import ApiException
class TestDropFrameType(unittest.TestCase):
"""DropFrameType unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def make_instance(self, include_optional):
"""Test DropFrameType
include_option is a boolean, when False only required
params are included, when True both required and
optional params are included """
# model = telestream_cloud_qc.models.drop_frame_type.DropFrameType() # noqa: E501
if include_optional :
return DropFrameType(
)
else :
return DropFrameType(
)
def testDropFrameType(self):
"""Test DropFrameType"""
inst_req_only = self.make_instance(include_optional=False)
inst_req_and_optional = self.make_instance(include_optional=True)
if __name__ == '__main__':
unittest.main()
|
mit
| -4,047,097,072,042,222,600 | 24.538462 | 90 | 0.649849 | false |
bearstech/modoboa
|
modoboa/lib/form_utils.py
|
1
|
12669
|
# coding: utf-8
"""Form management utilities."""
from __future__ import unicode_literals
import abc
from collections import OrderedDict
import re
from django.forms import TypedChoiceField
from django.forms.fields import Field
from django.forms.widgets import RadioSelect
from django.forms.widgets import RadioChoiceInput
from django.shortcuts import render
from django.utils.encoding import force_text, force_str
from django.utils.html import conditional_escape
from django.utils.safestring import mark_safe
from django.utils.translation import ugettext as _, ugettext_lazy
from modoboa.lib.exceptions import BadRequest
from modoboa.lib.web_utils import render_to_json_response
ABC = abc.ABCMeta(force_str('ABC'), (object,), {})
class WizardStep(object):
"""A wizard step."""
def __init__(self, uid, formclass, title, formtpl=None, new_args=None):
"""Constructor."""
self.uid = uid
self._cls = formclass
self.title = title
self.formtpl = formtpl
self._new_args = new_args
self._prev = None
self._next = None
self.form = None
@property
def prev(self):
return self._prev
@prev.setter
def prev(self, step):
self._prev = step
@property
def next(self):
return self._next
@next.setter
def next(self, step):
self._next = step
def check_access(self, wizard):
"""Check if this step should be displayed or not."""
return True
def create_form(self, data=None):
"""Instantiate a new form."""
args = []
if self._new_args is not None:
args += self._new_args
if data:
args.append(data)
self.form = self._cls(*args)
class WizardForm(ABC):
"""Custom wizard."""
template_name = "common/wizard_forms.html"
def __init__(self, request, submit_button_label=None):
self.request = request
self.steps = []
self._submit_button_label = submit_button_label
@property
def submit_button_label(self):
if self._submit_button_label is None:
self._submit_button_label = _("Submit")
return self._submit_button_label
@property
def errors(self):
result = {}
for step in self.steps:
for name, value in list(step.form.errors.items()):
if name == '__all__':
continue
result[name] = value
return result
@property
def first_step(self):
"""Return the first step."""
return self.steps[0] if self.steps else None
def add_step(self, step):
"""Add a new step to the wizard."""
if self.steps:
step.prev = self.steps[-1]
self.steps[-1].next = step
self.steps += [step]
def create_forms(self, data=None):
for step in self.steps:
step.create_form(data)
def _get_step_id(self):
"""Retrieve the step identifier from the request."""
stepid = self.request.POST.get("stepid", None)
if stepid is None:
raise BadRequest(_("Invalid request"))
stepid = int(stepid.replace("step", ""))
if stepid < 0:
raise BadRequest(_("Invalid request"))
return min(stepid, len(self.steps))
def previous_step(self):
"""Go back to the previous step."""
stepid = self._get_step_id()
stepid -= 2
self.create_forms(self.request.POST)
for step in self.steps:
step.form.is_valid()
while stepid >= 0:
if self.steps[stepid].check_access(self):
break
stepid -= 1
return render_to_json_response({
"title": self.steps[stepid].title, "id": self.steps[stepid].uid,
"stepid": stepid
})
def next_step(self):
"""Go to the next step if previous forms are valid."""
stepid = self._get_step_id()
self.create_forms(self.request.POST)
statuses = []
for cpt in range(0, stepid):
if self.steps[cpt].check_access(self):
statuses.append(self.steps[cpt].form.is_valid())
if False in statuses:
return render_to_json_response({
"stepid": stepid, "id": self.steps[stepid - 1].uid,
"form_errors": self.errors
}, status=400)
while stepid < len(self.steps):
if self.steps[stepid].check_access(self):
break
stepid += 1
if stepid == len(self.steps):
return self.done()
return render_to_json_response({
"title": self.steps[stepid].title, "id": self.steps[stepid].uid,
"stepid": stepid
})
def extra_context(self, context):
"""Provide additional information to template's context.
"""
pass
def process(self):
"""Process the request."""
if self.request.method == "POST":
if self.request.POST.get("target", "next") == "next":
return self.next_step()
return self.previous_step()
self.create_forms()
context = {"wizard": self}
self.extra_context(context)
return render(self.request, self.template_name, context)
@abc.abstractmethod
def done(self):
"""Method to exexute when all steps are validated.
Must be implemented by all sub classes.
:rtype: HttpResponse
"""
class DynamicForm(object):
"""
A form which accepts dynamic fields.
We consider a field to be dynamic when it can appear multiple
times within the same request.
"""
def _create_field(self, typ, name, value=None, pos=None):
"""Create a new form field.
"""
self.fields[name] = typ(label="", required=False)
if value is not None:
self.fields[name].initial = value
if pos:
order = list(self.fields.keys())
order.remove(name)
order.insert(pos, name)
self.fields = OrderedDict((key, self.fields[key]) for key in order)
def _load_from_qdict(self, qdict, pattern, typ):
"""Load all instances of a field from a ``QueryDict`` object.
:param ``QueryDict`` qdict: a QueryDict object
:param string pattern: pattern used to find field instances
:param typ: a form field class
"""
expr = re.compile(r'%s_\d+' % pattern)
values = []
for k, v in list(qdict.items()):
if k == pattern or expr.match(k):
values.append((k, v))
ndata = self.data.copy()
values.reverse()
for v in values:
if v[0] in self.fields:
continue
self._create_field(typ, v[0])
ndata[v[0]] = v[1]
self.data = ndata
class TabForms(object):
"""
Simple forms container.
This class tries to encapsulate multiple forms that will be
displayed using tabs. It is different from a classical formset
because it can contain different forms.
"""
template_name = "common/tabforms.html"
def __init__(self, request, instances=None, classes=None):
self.request = request
self.instances = {}
to_remove = []
for fd in self.forms:
args = []
kwargs = {}
if "new_args" in fd:
args += fd["new_args"]
if request.method == "POST":
args.append(request.POST)
if instances is not None:
self.instances = instances
mname = "check_%s" % fd["id"]
if hasattr(self, mname):
if not getattr(self, mname)(instances[fd["id"]]):
to_remove += [fd]
continue
kwargs["instance"] = instances[fd["id"]]
if classes is not None and fd["id"] in classes:
fd["instance"] = classes[fd["id"]](*args, **kwargs)
else:
fd["instance"] = fd["cls"](*args, **kwargs)
self.forms = [form for form in self.forms if form not in to_remove]
if self.forms:
self.active_id = self.forms[0]["id"]
def _before_is_valid(self, form):
return True
@property
def errors(self):
"""Return validation errors.
We aggregate all form errors into one dictionary.
:rtype: dict
"""
result = {}
for f in self.forms:
for name, value in list(f['instance'].errors.items()):
if name == '__all__':
continue
result[name] = value
return result
def is_valid(self, mandatory_only=False, optional_only=False):
"""Check if the form is valid.
:param boolean mandatory_only:
:param boolean optional_only:
"""
to_remove = []
for f in self.forms:
if mandatory_only and \
('mandatory' not in f or not f["mandatory"]):
continue
elif optional_only and ('mandatory' in f and f["mandatory"]):
continue
if not self._before_is_valid(f):
to_remove.append(f)
continue
if not f["instance"].is_valid():
self.active_id = f["id"]
return False
self.forms = [f for f in self.forms if f not in to_remove]
return True
@abc.abstractmethod
def save(self):
"""Save objects here.
"""
def remove_tab(self, tabid):
for f in self.forms:
if f["id"] == tabid:
self.forms.remove(f)
break
def __iter__(self):
return self.forward()
def forward(self):
for form in self.forms:
yield form
def extra_context(self, context):
""""Provide additional information to template's context.
"""
pass
@abc.abstractmethod
def done(self):
"""Actions to execute after the form has been validated and saved.
:rtype: HttpResponse instance
"""
def process(self):
"""Process the received request.
"""
if self.request.method == "POST":
if self.is_valid():
self.save()
return self.done()
return render_to_json_response(
{'form_errors': self.errors}, status=400
)
context = {
"tabs": self,
}
if self.forms:
context.update({
"action_label": _("Update"),
"action_classes": "submit",
})
self.extra_context(context)
active_tab_id = self.request.GET.get("active_tab", "default")
if active_tab_id != "default":
context["tabs"].active_id = active_tab_id
return render(self.request, self.template_name, context)
#
# Custom fields from here
#
class CustomRadioInput(RadioChoiceInput):
"""Custom radio input."""
def __unicode__(self):
if "id" in self.attrs:
label_for = ' for="%s"' % self.attrs["id"]
else:
label_for = ""
choice_label = conditional_escape(force_text(self.choice_label))
return mark_safe(
u"<label class='radio-inline' %s>%s %s</label>"
% (label_for, self.tag(), choice_label)
)
class InlineRadioRenderer(RadioSelect.renderer):
"""Custom inline radio renderer."""
def __iter__(self):
for i, choice in enumerate(self.choices):
yield CustomRadioInput(
self.name, self.value, self.attrs.copy(), choice, i
)
def render(self):
return mark_safe(
u"\n".join([u"%s\n" % force_text(w) for w in self])
)
class InlineRadioSelect(RadioSelect):
"""Custom inline radio widget."""
renderer = InlineRadioRenderer
class SeparatorField(Field):
"""Custom field to represent a separator."""
def __init__(self, *args, **kwargs):
kwargs["required"] = False
super(SeparatorField, self).__init__(*args, **kwargs)
class YesNoField(TypedChoiceField):
"""A yes/no form field."""
def __init__(self, *args, **kwargs):
"""Constructor."""
kwargs.update({
"choices": (
(True, ugettext_lazy("Yes")),
(False, ugettext_lazy("No"))
),
"widget": InlineRadioSelect,
"coerce": lambda x: x == "True"
})
super(YesNoField, self).__init__(*args, **kwargs)
|
isc
| 2,693,508,765,597,406,000 | 28.462791 | 79 | 0.546926 | false |
dnbaker/emp
|
python/hll_sketch.py
|
1
|
1366
|
import argparse
import os
import shlex
import subprocess
import multiprocessing
import sys
def sketch_call(tup):
ss, ks, paths = tup
cstr = "flashdans sketch -p1 -k%i -S%i %s" % (ss, ks, " ".join(paths))
subprocess.check_call(shlex.split(cstr))
if __name__ == "__main__":
sketch_range = range(10, 24, 1)
p = argparse.ArgumentParser(
description="This calculates all pairwise distances between "
"genomes for all combinations of parameters."
"This does take a while.")
p.add_argument("--threads", "-p",
default=multiprocessing.cpu_count(), type=int)
p.add_argument('genomes', metavar='paths', type=str, nargs='+',
help=('paths to genomes or a path to a file'
' with one genome per line.'))
p.add_argument("--range-start", default=24, type=int)
p.add_argument("--range-end", default=32, type=int)
args = p.parse_args()
kmer_range = range(args.range_start, args.range_end + 1)
threads = args.threads
paths = args.genomes
path = paths[0]
if os.path.isfile(path) and os.path.isfile(next(open(path)).strip()):
paths = list(open(path))
Spooool = multiprocessing.Pool(threads)
Spooool.map(sketch_call,
((ss, ks, paths) for ss in sketch_range for ks in kmer_range))
|
gpl-3.0
| 9,033,839,705,742,966,000 | 34.947368 | 78 | 0.606149 | false |
tanghaibao/goatools
|
goatools/cli/docopt_parse.py
|
1
|
3852
|
"""Run docopt in GOATOOLS."""
from __future__ import print_function
__copyright__ = "Copyright (C) 2016-present, DV Klopfenstein, H Tang, All rights reserved."
__author__ = "DV Klopfenstein"
import sys
import re
from docopt import docopt
from goatools.gosubdag.utils import get_kwargs
#pylint: disable=too-few-public-methods
class DocOptParse(object):
"""Run docopt in GOATOOLS."""
def __init__(self, doc, exp_keys, exp_elems):
self.doc = doc # doc string
self.exp_keys = exp_keys if exp_keys else set() # Expected dictionary keys
self.exp_elems = exp_elems if exp_elems else set() # Expected set elements (True/False)
def get_docargs(self, args=None, prt=None, **kws):
"""Pare down docopt. Return a minimal dictionary and a set containing runtime arg values."""
arg_kws = self._get_docargs(args, prt)
if 'intvals' in kws:
self._set_intvals(arg_kws, kws['intvals'])
return arg_kws
def _get_docargs(self, args_user, prt):
"""Pare down docopt. Return a minimal dictionary and a set containing runtime arg values."""
if prt is not None:
print("DocOptParse BEFORE docopt: {}".format(args_user))
docargs = docopt(self.doc, args_user)
if prt is not None:
print("DocOptParse AFTER docopt: {}".format(docargs))
kwargs_doc = {re.sub(r'^-{1,2}', '', k):v for k, v in docargs.items()}
self._chk_docopt_kws(kwargs_doc, args_user)
kwargs_usr = get_kwargs(kwargs_doc, self.exp_keys, self.exp_elems)
if prt is not None:
print("DocOptParse AFTER pared: {}".format(kwargs_usr))
for key in ['taxid']:
if key in kwargs_usr:
kwargs_usr[key] = int(kwargs_usr[key])
if prt is not None:
print("DocOptParse AFTER edited/checked: {}".format(kwargs_usr))
return kwargs_usr
@staticmethod
def _set_intvals(kws, keys):
"""Convert keyword values to int."""
for key in keys:
if key in kws:
kws[key] = int(kws[key])
def _chk_docopt_exit(self, args, exp_letters):
"""Check if docopt exit was for an unknown argument."""
if args is None:
args = sys.argv[1:]
keys_all = self.exp_keys.union(self.exp_elems)
if exp_letters:
keys_all |= exp_letters
unknown_args = self._chk_docunknown(args, keys_all)
if unknown_args:
raise RuntimeError("{USAGE}\n **FATAL: UNKNOWN ARGS: {UNK}".format(
USAGE=self.doc, UNK=" ".join(unknown_args)))
def _chk_docopt_kws(self, docdict, exp):
"""Check for common user errors when running from the command-line."""
for key, val in docdict.items():
if isinstance(val, str):
assert '=' not in val, self._err("'=' FOUND IN VALUE", key, val, exp)
elif key != 'help' and key not in self.exp_keys and key not in self.exp_elems:
raise RuntimeError(self._err("UNKNOWN KEY", key, val, exp))
def _err(self, msg, key, val, exp):
return "{DOC}\n{MSG}: KEY({K}) VAL({V}): {EXP}".format(
DOC=self.doc, MSG=msg, K=key, V=val, EXP=" ".join(exp) if exp else '')
@staticmethod
def _chk_docunknown(args, exp):
"""Return any unknown args."""
unknown = []
for arg in args:
if arg[:2] == '--':
val = arg[2:]
if val not in exp:
unknown.append(arg)
elif arg[:1] == '-':
val = arg[1:]
if val not in exp:
unknown.append(arg)
if '-h' in unknown or '--help' in unknown:
return []
return unknown
# Copyright (C) 2016-present, DV Klopfenstein, H Tang, All rights reserved.
|
bsd-2-clause
| 713,140,892,205,950,200 | 38.306122 | 100 | 0.570872 | false |
Asana/boto
|
tests/unit/test_connection.py
|
1
|
23327
|
# Copyright (c) 2013 Amazon.com, Inc. or its affiliates. All Rights Reserved
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
import os
import socket
from tests.compat import mock, unittest
from httpretty import HTTPretty
from boto import UserAgent
from boto.compat import json, parse_qs
from boto.connection import AWSQueryConnection, AWSAuthConnection, HTTPRequest
from boto.exception import BotoServerError
from boto.regioninfo import RegionInfo
class TestListParamsSerialization(unittest.TestCase):
maxDiff = None
def setUp(self):
self.connection = AWSQueryConnection('access_key', 'secret_key')
def test_complex_list_serialization(self):
# This example is taken from the doc string of
# build_complex_list_params.
params = {}
self.connection.build_complex_list_params(
params, [('foo', 'bar', 'baz'), ('foo2', 'bar2', 'baz2')],
'ParamName.member', ('One', 'Two', 'Three'))
self.assertDictEqual({
'ParamName.member.1.One': 'foo',
'ParamName.member.1.Two': 'bar',
'ParamName.member.1.Three': 'baz',
'ParamName.member.2.One': 'foo2',
'ParamName.member.2.Two': 'bar2',
'ParamName.member.2.Three': 'baz2',
}, params)
def test_simple_list_serialization(self):
params = {}
self.connection.build_list_params(
params, ['foo', 'bar', 'baz'], 'ParamName.member')
self.assertDictEqual({
'ParamName.member.1': 'foo',
'ParamName.member.2': 'bar',
'ParamName.member.3': 'baz',
}, params)
class MockAWSService(AWSQueryConnection):
"""
Fake AWS Service
This is used to test the AWSQueryConnection object is behaving properly.
"""
APIVersion = '2012-01-01'
def _required_auth_capability(self):
return ['sign-v2']
def __init__(self, aws_access_key_id=None, aws_secret_access_key=None,
is_secure=True, host=None, port=None,
proxy=None, proxy_port=None,
proxy_user=None, proxy_pass=None, debug=0,
https_connection_factory=None, region=None, path='/',
api_version=None, security_token=None,
validate_certs=True, profile_name=None):
self.region = region
if host is None:
host = self.region.endpoint
AWSQueryConnection.__init__(self, aws_access_key_id,
aws_secret_access_key,
is_secure, port, proxy, proxy_port,
proxy_user, proxy_pass,
host, debug,
https_connection_factory, path,
security_token,
validate_certs=validate_certs,
profile_name=profile_name)
class TestAWSAuthConnection(unittest.TestCase):
def test_get_path(self):
conn = AWSAuthConnection(
'mockservice.cc-zone-1.amazonaws.com',
aws_access_key_id='access_key',
aws_secret_access_key='secret',
suppress_consec_slashes=False
)
# Test some sample paths for mangling.
self.assertEqual(conn.get_path('/'), '/')
self.assertEqual(conn.get_path('image.jpg'), '/image.jpg')
self.assertEqual(conn.get_path('folder/image.jpg'), '/folder/image.jpg')
self.assertEqual(conn.get_path('folder//image.jpg'), '/folder//image.jpg')
# Ensure leading slashes aren't removed.
# See https://github.com/boto/boto/issues/1387
self.assertEqual(conn.get_path('/folder//image.jpg'), '/folder//image.jpg')
self.assertEqual(conn.get_path('/folder////image.jpg'), '/folder////image.jpg')
self.assertEqual(conn.get_path('///folder////image.jpg'), '///folder////image.jpg')
def test_connection_behind_proxy(self):
os.environ['http_proxy'] = "http://john.doe:p4ssw0rd@127.0.0.1:8180"
conn = AWSAuthConnection(
'mockservice.cc-zone-1.amazonaws.com',
aws_access_key_id='access_key',
aws_secret_access_key='secret',
suppress_consec_slashes=False
)
self.assertEqual(conn.proxy, '127.0.0.1')
self.assertEqual(conn.proxy_user, 'john.doe')
self.assertEqual(conn.proxy_pass, 'p4ssw0rd')
self.assertEqual(conn.proxy_port, '8180')
del os.environ['http_proxy']
def test_get_proxy_url_with_auth(self):
conn = AWSAuthConnection(
'mockservice.cc-zone-1.amazonaws.com',
aws_access_key_id='access_key',
aws_secret_access_key='secret',
suppress_consec_slashes=False,
proxy="127.0.0.1",
proxy_user="john.doe",
proxy_pass="p4ssw0rd",
proxy_port="8180"
)
self.assertEqual(conn.get_proxy_url_with_auth(), 'http://john.doe:p4ssw0rd@127.0.0.1:8180')
def test_build_base_http_request_noproxy(self):
os.environ['no_proxy'] = 'mockservice.cc-zone-1.amazonaws.com'
conn = AWSAuthConnection(
'mockservice.cc-zone-1.amazonaws.com',
aws_access_key_id='access_key',
aws_secret_access_key='secret',
suppress_consec_slashes=False,
proxy="127.0.0.1",
proxy_user="john.doe",
proxy_pass="p4ssw0rd",
proxy_port="8180"
)
request = conn.build_base_http_request('GET', '/', None)
del os.environ['no_proxy']
self.assertEqual(request.path, '/')
def test_connection_behind_proxy_without_explicit_port(self):
os.environ['http_proxy'] = "http://127.0.0.1"
conn = AWSAuthConnection(
'mockservice.cc-zone-1.amazonaws.com',
aws_access_key_id='access_key',
aws_secret_access_key='secret',
suppress_consec_slashes=False,
port=8180
)
self.assertEqual(conn.proxy, '127.0.0.1')
self.assertEqual(conn.proxy_port, 8180)
del os.environ['http_proxy']
@mock.patch.object(socket, 'create_connection')
@mock.patch('boto.compat.http_client.HTTPResponse')
@mock.patch('boto.compat.http_client.ssl')
def test_proxy_ssl(self, ssl_mock, http_response_mock,
create_connection_mock):
type(http_response_mock.return_value).status = mock.PropertyMock(
return_value=200)
conn = AWSAuthConnection(
'mockservice.cc-zone-1.amazonaws.com',
aws_access_key_id='access_key',
aws_secret_access_key='secret',
suppress_consec_slashes=False,
proxy_port=80
)
conn.https_validate_certificates = False
# Attempt to call proxy_ssl and make sure it works
conn.proxy_ssl('mockservice.cc-zone-1.amazonaws.com', 80)
# this tests the proper setting of the host_header in v4 signing
def test_host_header_with_nonstandard_port(self):
# test standard port first
conn = V4AuthConnection(
'testhost',
aws_access_key_id='access_key',
aws_secret_access_key='secret')
request = conn.build_base_http_request(
method='POST', path='/', auth_path=None, params=None, headers=None,
data='', host=None)
conn.set_host_header(request)
self.assertEqual(request.headers['Host'], 'testhost')
# next, test non-standard port
conn = V4AuthConnection(
'testhost',
aws_access_key_id='access_key',
aws_secret_access_key='secret',
port=8773)
request = conn.build_base_http_request(
method='POST', path='/', auth_path=None, params=None, headers=None,
data='', host=None)
conn.set_host_header(request)
self.assertEqual(request.headers['Host'], 'testhost:8773')
class V4AuthConnection(AWSAuthConnection):
def __init__(self, host, aws_access_key_id, aws_secret_access_key, port=443):
AWSAuthConnection.__init__(
self, host, aws_access_key_id, aws_secret_access_key, port=port)
def _required_auth_capability(self):
return ['hmac-v4']
class TestAWSQueryConnection(unittest.TestCase):
def setUp(self):
self.region = RegionInfo(
name='cc-zone-1',
endpoint='mockservice.cc-zone-1.amazonaws.com',
connection_cls=MockAWSService)
HTTPretty.enable()
def tearDown(self):
HTTPretty.disable()
class TestAWSQueryConnectionSimple(TestAWSQueryConnection):
def test_query_connection_basis(self):
HTTPretty.register_uri(HTTPretty.POST,
'https://%s/' % self.region.endpoint,
json.dumps({'test': 'secure'}),
content_type='application/json')
conn = self.region.connect(aws_access_key_id='access_key',
aws_secret_access_key='secret')
self.assertEqual(conn.host, 'mockservice.cc-zone-1.amazonaws.com')
def test_query_connection_noproxy(self):
HTTPretty.register_uri(HTTPretty.POST,
'https://%s/' % self.region.endpoint,
json.dumps({'test': 'secure'}),
content_type='application/json')
os.environ['no_proxy'] = self.region.endpoint
conn = self.region.connect(aws_access_key_id='access_key',
aws_secret_access_key='secret',
proxy="NON_EXISTENT_HOSTNAME",
proxy_port="3128")
resp = conn.make_request('myCmd',
{'par1': 'foo', 'par2': 'baz'},
"/",
"POST")
del os.environ['no_proxy']
args = parse_qs(HTTPretty.last_request.body)
self.assertEqual(args[b'AWSAccessKeyId'], [b'access_key'])
def test_query_connection_noproxy_nosecure(self):
HTTPretty.register_uri(HTTPretty.POST,
'https://%s/' % self.region.endpoint,
json.dumps({'test': 'insecure'}),
content_type='application/json')
os.environ['no_proxy'] = self.region.endpoint
conn = self.region.connect(aws_access_key_id='access_key',
aws_secret_access_key='secret',
proxy="NON_EXISTENT_HOSTNAME",
proxy_port="3128",
is_secure=False)
resp = conn.make_request('myCmd',
{'par1': 'foo', 'par2': 'baz'},
"/",
"POST")
del os.environ['no_proxy']
args = parse_qs(HTTPretty.last_request.body)
self.assertEqual(args[b'AWSAccessKeyId'], [b'access_key'])
def test_single_command(self):
HTTPretty.register_uri(HTTPretty.POST,
'https://%s/' % self.region.endpoint,
json.dumps({'test': 'secure'}),
content_type='application/json')
conn = self.region.connect(aws_access_key_id='access_key',
aws_secret_access_key='secret')
resp = conn.make_request('myCmd',
{'par1': 'foo', 'par2': 'baz'},
"/",
"POST")
args = parse_qs(HTTPretty.last_request.body)
self.assertEqual(args[b'AWSAccessKeyId'], [b'access_key'])
self.assertEqual(args[b'SignatureMethod'], [b'HmacSHA256'])
self.assertEqual(args[b'Version'], [conn.APIVersion.encode('utf-8')])
self.assertEqual(args[b'par1'], [b'foo'])
self.assertEqual(args[b'par2'], [b'baz'])
self.assertEqual(resp.read(), b'{"test": "secure"}')
def test_multi_commands(self):
"""Check connection re-use"""
HTTPretty.register_uri(HTTPretty.POST,
'https://%s/' % self.region.endpoint,
json.dumps({'test': 'secure'}),
content_type='application/json')
conn = self.region.connect(aws_access_key_id='access_key',
aws_secret_access_key='secret')
resp1 = conn.make_request('myCmd1',
{'par1': 'foo', 'par2': 'baz'},
"/",
"POST")
body1 = parse_qs(HTTPretty.last_request.body)
resp2 = conn.make_request('myCmd2',
{'par3': 'bar', 'par4': 'narf'},
"/",
"POST")
body2 = parse_qs(HTTPretty.last_request.body)
self.assertEqual(body1[b'par1'], [b'foo'])
self.assertEqual(body1[b'par2'], [b'baz'])
with self.assertRaises(KeyError):
body1[b'par3']
self.assertEqual(body2[b'par3'], [b'bar'])
self.assertEqual(body2[b'par4'], [b'narf'])
with self.assertRaises(KeyError):
body2['par1']
self.assertEqual(resp1.read(), b'{"test": "secure"}')
self.assertEqual(resp2.read(), b'{"test": "secure"}')
def test_non_secure(self):
HTTPretty.register_uri(HTTPretty.POST,
'http://%s/' % self.region.endpoint,
json.dumps({'test': 'normal'}),
content_type='application/json')
conn = self.region.connect(aws_access_key_id='access_key',
aws_secret_access_key='secret',
is_secure=False)
resp = conn.make_request('myCmd1',
{'par1': 'foo', 'par2': 'baz'},
"/",
"POST")
self.assertEqual(resp.read(), b'{"test": "normal"}')
def test_alternate_port(self):
HTTPretty.register_uri(HTTPretty.POST,
'http://%s:8080/' % self.region.endpoint,
json.dumps({'test': 'alternate'}),
content_type='application/json')
conn = self.region.connect(aws_access_key_id='access_key',
aws_secret_access_key='secret',
port=8080,
is_secure=False)
resp = conn.make_request('myCmd1',
{'par1': 'foo', 'par2': 'baz'},
"/",
"POST")
self.assertEqual(resp.read(), b'{"test": "alternate"}')
def test_temp_failure(self):
responses = [HTTPretty.Response(body="{'test': 'fail'}", status=500),
HTTPretty.Response(body="{'test': 'success'}", status=200)]
HTTPretty.register_uri(HTTPretty.POST,
'https://%s/temp_fail/' % self.region.endpoint,
responses=responses)
conn = self.region.connect(aws_access_key_id='access_key',
aws_secret_access_key='secret')
resp = conn.make_request('myCmd1',
{'par1': 'foo', 'par2': 'baz'},
'/temp_fail/',
'POST')
self.assertEqual(resp.read(), b"{'test': 'success'}")
def test_unhandled_exception(self):
HTTPretty.register_uri(HTTPretty.POST,
'https://%s/temp_exception/' % self.region.endpoint,
responses=[])
def fake_connection(address, timeout=socket._GLOBAL_DEFAULT_TIMEOUT,
source_address=None):
raise socket.timeout('fake error')
socket.create_connection = fake_connection
conn = self.region.connect(aws_access_key_id='access_key',
aws_secret_access_key='secret')
conn.num_retries = 0
with self.assertRaises(socket.error):
resp = conn.make_request('myCmd1',
{'par1': 'foo', 'par2': 'baz'},
'/temp_exception/',
'POST')
def test_connection_close(self):
"""Check connection re-use after close header is received"""
HTTPretty.register_uri(HTTPretty.POST,
'https://%s/' % self.region.endpoint,
json.dumps({'test': 'secure'}),
content_type='application/json',
connection='close')
conn = self.region.connect(aws_access_key_id='access_key',
aws_secret_access_key='secret')
def mock_put_conn(*args, **kwargs):
raise Exception('put_http_connection should not be called!')
conn.put_http_connection = mock_put_conn
resp1 = conn.make_request('myCmd1',
{'par1': 'foo', 'par2': 'baz'},
"/",
"POST")
# If we've gotten this far then no exception was raised
# by attempting to put the connection back into the pool
# Now let's just confirm the close header was actually
# set or we have another problem.
self.assertEqual(resp1.getheader('connection'), 'close')
def test_port_pooling(self):
conn = self.region.connect(aws_access_key_id='access_key',
aws_secret_access_key='secret',
port=8080)
# Pick a connection, then put it back
con1 = conn.get_http_connection(conn.host, conn.port, conn.is_secure)
conn.put_http_connection(conn.host, conn.port, conn.is_secure, con1)
# Pick another connection, which hopefully is the same yet again
con2 = conn.get_http_connection(conn.host, conn.port, conn.is_secure)
conn.put_http_connection(conn.host, conn.port, conn.is_secure, con2)
self.assertEqual(con1, con2)
# Change the port and make sure a new connection is made
conn.port = 8081
con3 = conn.get_http_connection(conn.host, conn.port, conn.is_secure)
conn.put_http_connection(conn.host, conn.port, conn.is_secure, con3)
self.assertNotEqual(con1, con3)
class TestAWSQueryStatus(TestAWSQueryConnection):
def test_get_status(self):
HTTPretty.register_uri(HTTPretty.GET,
'https://%s/status' % self.region.endpoint,
'<status>ok</status>',
content_type='text/xml')
conn = self.region.connect(aws_access_key_id='access_key',
aws_secret_access_key='secret')
resp = conn.get_status('getStatus',
{'par1': 'foo', 'par2': 'baz'},
'status')
self.assertEqual(resp, "ok")
def test_get_status_blank_error(self):
HTTPretty.register_uri(HTTPretty.GET,
'https://%s/status' % self.region.endpoint,
'',
content_type='text/xml')
conn = self.region.connect(aws_access_key_id='access_key',
aws_secret_access_key='secret')
with self.assertRaises(BotoServerError):
resp = conn.get_status('getStatus',
{'par1': 'foo', 'par2': 'baz'},
'status')
def test_get_status_error(self):
HTTPretty.register_uri(HTTPretty.GET,
'https://%s/status' % self.region.endpoint,
'<status>error</status>',
content_type='text/xml',
status=400)
conn = self.region.connect(aws_access_key_id='access_key',
aws_secret_access_key='secret')
with self.assertRaises(BotoServerError):
resp = conn.get_status('getStatus',
{'par1': 'foo', 'par2': 'baz'},
'status')
class TestHTTPRequest(unittest.TestCase):
def test_user_agent_not_url_encoded(self):
headers = {'Some-Header': u'should be url encoded',
'User-Agent': UserAgent}
request = HTTPRequest('PUT', 'https', 'amazon.com', 443, None,
None, {}, headers, 'Body')
mock_connection = mock.Mock()
# Create a method that preserves the headers at the time of
# authorization.
def mock_add_auth(req, **kwargs):
mock_connection.headers_at_auth = req.headers.copy()
mock_connection._auth_handler.add_auth = mock_add_auth
request.authorize(mock_connection)
# Ensure the headers at authorization are as expected i.e.
# the user agent header was not url encoded but the other header was.
self.assertEqual(mock_connection.headers_at_auth,
{'Some-Header': 'should%20be%20url%20encoded',
'User-Agent': UserAgent})
def test_content_length_str(self):
request = HTTPRequest('PUT', 'https', 'amazon.com', 443, None,
None, {}, {}, 'Body')
mock_connection = mock.Mock()
request.authorize(mock_connection)
# Ensure Content-Length header is a str. This is more explicit than
# relying on other code cast the value later. (Python 2.7.0, for
# example, assumes headers are of type str.)
self.assertIsInstance(request.headers['Content-Length'], str)
if __name__ == '__main__':
unittest.main()
|
mit
| -7,608,701,348,387,864,000 | 40.879713 | 99 | 0.533759 | false |
louisrli/grabrc-client
|
tests/testbase.py
|
1
|
1922
|
import unittest
import os
import shutil
import subprocess
from client import const
class BaseIntegrationTest(unittest.TestCase):
def setUp(self):
"""
Clears out old temporary test directories, then
creates a new one.
"""
self.TMPDIR = "tmp-grabrc-test"
self.BACKUP_SUFFIX = const.Const.BACKUP_SUFFIX
self.TEST_USER = "louisrli"
self.script_dir = os.path.dirname(__file__)
self.client = self.script_dir + "/../client/client.py"
self.TEST_DIR = self.script_dir + "/" + self.TMPDIR
if os.path.exists(self.TEST_DIR):
shutil.rmtree(self.TEST_DIR)
os.mkdir(self.TEST_DIR)
os.chdir(self.TEST_DIR)
self.__setup_config()
def doCleanups(self):
"""
Delete the temporary test directory.
"""
shutil.rmtree(self.TEST_DIR)
def __setup_config(self):
"""
Overwrites the current configuration file with the test user
"""
config = open(os.path.expanduser("~/.grabrc"), "w+")
config.write(self.TEST_USER)
# Helper functions, usable by subclasses
def _path_in_tmpdir(self, filename):
"""Returns absolute path of a filename in the tmpdir"""
return self.TEST_DIR + "/" + filename
def _read_output(self, filename):
"""
Returns the contents of a local file as a string.
Strips whitespace.
"""
f = open(self._path_in_tmpdir(filename))
contents = f.read()
f.close()
return contents.strip()
def _execute_client(self, *args):
""" Some tests will expect failure here """
return subprocess.call([self.client] + list(args))
def _execute_client_output(self, *args):
# Command must have exit code of 0
return subprocess.check_output([self.client] + list(args))
if __name__ == 'main':
unittest.main()
|
gpl-3.0
| -6,224,113,858,382,469,000 | 28.121212 | 68 | 0.594693 | false |
uclapi/uclapi
|
backend/uclapi/roombookings/models.py
|
1
|
8730
|
from __future__ import unicode_literals
from django.db import models
models.options.DEFAULT_NAMES += ('_DATABASE',)
class Booking(models.Model):
setid = models.CharField(max_length=40, blank=True, null=True)
siteid = models.CharField(max_length=40, blank=True, null=True)
roomid = models.CharField(max_length=160, blank=True, null=True)
sitename = models.CharField(max_length=320, blank=True, null=True)
roomname = models.CharField(max_length=320, blank=True, null=True)
bookabletype = models.CharField(max_length=40, blank=True, null=True)
slotid = models.BigIntegerField(primary_key=True)
bookingid = models.CharField(max_length=80, blank=True, null=True)
starttime = models.CharField(max_length=80, blank=True, null=True)
finishtime = models.CharField(max_length=20, blank=True, null=True)
startdatetime = models.DateTimeField(blank=True, null=True)
finishdatetime = models.DateTimeField(blank=True, null=True)
weeknumber = models.FloatField(blank=True, null=True)
condisplayname = models.CharField(max_length=4000, blank=True, null=True)
phone = models.CharField(max_length=160, blank=True, null=True)
descrip = models.CharField(max_length=400, blank=True, null=True)
title = models.CharField(max_length=523, blank=True, null=True)
class Meta:
managed = False
db_table = 'CMIS_UCLAPI_V_BOOKINGS'
_DATABASE = 'roombookings'
class BookingA(models.Model):
setid = models.CharField(max_length=40, blank=True, null=True)
siteid = models.CharField(max_length=40, blank=True, null=True)
roomid = models.CharField(max_length=160, blank=True, null=True)
sitename = models.CharField(max_length=320, blank=True, null=True)
roomname = models.CharField(max_length=320, blank=True, null=True)
bookabletype = models.CharField(max_length=40, blank=True, null=True)
slotid = models.BigIntegerField(null=True)
bookingid = models.CharField(max_length=80, blank=True, null=True)
starttime = models.CharField(max_length=80, blank=True, null=True)
finishtime = models.CharField(max_length=20, blank=True, null=True)
startdatetime = models.DateTimeField(blank=True, null=True)
finishdatetime = models.DateTimeField(blank=True, null=True)
weeknumber = models.FloatField(blank=True, null=True)
condisplayname = models.CharField(max_length=4000, blank=True, null=True)
phone = models.CharField(max_length=160, blank=True, null=True)
descrip = models.CharField(max_length=400, blank=True, null=True)
title = models.CharField(max_length=523, blank=True, null=True)
class Meta:
_DATABASE = 'gencache'
class BookingB(models.Model):
setid = models.CharField(max_length=40, blank=True, null=True)
siteid = models.CharField(max_length=40, blank=True, null=True)
roomid = models.CharField(max_length=160, blank=True, null=True)
sitename = models.CharField(max_length=320, blank=True, null=True)
roomname = models.CharField(max_length=320, blank=True, null=True)
bookabletype = models.CharField(max_length=40, blank=True, null=True)
slotid = models.BigIntegerField(null=True)
bookingid = models.CharField(max_length=80, blank=True, null=True)
starttime = models.CharField(max_length=80, blank=True, null=True)
finishtime = models.CharField(max_length=20, blank=True, null=True)
startdatetime = models.DateTimeField(blank=True, null=True)
finishdatetime = models.DateTimeField(blank=True, null=True)
weeknumber = models.FloatField(blank=True, null=True)
condisplayname = models.CharField(max_length=4000, blank=True, null=True)
phone = models.CharField(max_length=160, blank=True, null=True)
descrip = models.CharField(max_length=400, blank=True, null=True)
title = models.CharField(max_length=523, blank=True, null=True)
class Meta:
_DATABASE = 'gencache'
class Room(models.Model):
setid = models.CharField(max_length=40, blank=True, null=True)
siteid = models.CharField(max_length=40, blank=True, null=True)
sitename = models.CharField(max_length=320, blank=True, null=True)
address1 = models.CharField(max_length=320, blank=True, null=True)
address2 = models.CharField(max_length=320, blank=True, null=True)
address3 = models.CharField(max_length=320, blank=True, null=True)
address4 = models.CharField(max_length=320, blank=True, null=True)
roomid = models.CharField(max_length=40, primary_key=True)
roomname = models.CharField(max_length=320, blank=True, null=True)
roomdeptid = models.CharField(max_length=40, blank=True, null=True)
bookabletype = models.CharField(max_length=40, blank=True, null=True)
roomclass = models.CharField(max_length=40, blank=True, null=True)
zone = models.CharField(max_length=40, blank=True, null=True)
webview = models.CharField(max_length=4, blank=True, null=True)
automated = models.CharField(max_length=4, blank=True, null=True)
capacity = models.FloatField(blank=True, null=True)
category = models.CharField(max_length=40, blank=True, null=True)
class Meta:
managed = False
db_table = 'CMIS_UCLAPI_V_ROOMS'
_DATABASE = 'roombookings'
class RoomA(models.Model):
setid = models.CharField(max_length=40, blank=True, null=True)
siteid = models.CharField(max_length=40, blank=True, null=True)
sitename = models.CharField(max_length=320, blank=True, null=True)
address1 = models.CharField(max_length=320, blank=True, null=True)
address2 = models.CharField(max_length=320, blank=True, null=True)
address3 = models.CharField(max_length=320, blank=True, null=True)
address4 = models.CharField(max_length=320, blank=True, null=True)
roomid = models.CharField(max_length=40)
roomname = models.CharField(max_length=320, blank=True, null=True)
roomdeptid = models.CharField(max_length=40, blank=True, null=True)
bookabletype = models.CharField(max_length=40, blank=True, null=True)
roomclass = models.CharField(max_length=40, blank=True, null=True)
zone = models.CharField(max_length=40, blank=True, null=True)
webview = models.CharField(max_length=4, blank=True, null=True)
automated = models.CharField(max_length=4, blank=True, null=True)
capacity = models.FloatField(blank=True, null=True)
category = models.CharField(max_length=40, blank=True, null=True)
class Meta:
_DATABASE = 'gencache'
class RoomB(models.Model):
setid = models.CharField(max_length=40, blank=True, null=True)
siteid = models.CharField(max_length=40, blank=True, null=True)
sitename = models.CharField(max_length=320, blank=True, null=True)
address1 = models.CharField(max_length=320, blank=True, null=True)
address2 = models.CharField(max_length=320, blank=True, null=True)
address3 = models.CharField(max_length=320, blank=True, null=True)
address4 = models.CharField(max_length=320, blank=True, null=True)
roomid = models.CharField(max_length=40)
roomname = models.CharField(max_length=320, blank=True, null=True)
roomdeptid = models.CharField(max_length=40, blank=True, null=True)
bookabletype = models.CharField(max_length=40, blank=True, null=True)
roomclass = models.CharField(max_length=40, blank=True, null=True)
zone = models.CharField(max_length=40, blank=True, null=True)
webview = models.CharField(max_length=4, blank=True, null=True)
automated = models.CharField(max_length=4, blank=True, null=True)
capacity = models.FloatField(blank=True, null=True)
category = models.CharField(max_length=40, blank=True, null=True)
class Meta:
_DATABASE = 'gencache'
class Equipment(models.Model):
setid = models.CharField(max_length=40, blank=True, null=True)
roomid = models.CharField(max_length=40, primary_key=True)
units = models.FloatField(blank=True, null=True)
description = models.CharField(max_length=320, blank=True, null=True)
siteid = models.CharField(max_length=40, blank=True, null=True)
type = models.CharField(max_length=8, blank=True, null=True)
class Meta:
managed = False
db_table = 'CMIS_UCLAPI_V_EQUIP_FEATURES'
_DATABASE = 'roombookings'
class Location(models.Model):
siteid = models.CharField(max_length=40)
roomid = models.CharField(max_length=40)
lat = models.CharField(max_length=30)
lng = models.CharField(max_length=30)
class Meta:
_DATABASE = 'default'
unique_together = ('siteid', 'roomid')
class SiteLocation(models.Model):
siteid = models.CharField(max_length=40)
lat = models.CharField(max_length=30)
lng = models.CharField(max_length=30)
class Meta:
_DATABASE = 'default'
|
mit
| -4,850,903,556,800,862,000 | 46.967033 | 77 | 0.716266 | false |
rogeriofalcone/treeio
|
services/forms.py
|
1
|
24922
|
# encoding: utf-8
# Copyright 2011 Tree.io Limited
# This file is part of Treeio.
# License www.tree.io/license
"""
Services module forms
"""
from django import forms
from django.db.models import Q
from django.core.urlresolvers import reverse
from django.utils.translation import ugettext as _
from django.utils.html import strip_tags
from treeio.core.conf import settings
from treeio.identities.models import Contact
from treeio.core.decorators import preprocess_form
from treeio.core.mail import SystemEmail
from treeio.core.models import Object, ModuleSetting
from treeio.core.rendering import get_template_source
from treeio.messaging.models import Message
from treeio.messaging.emails import EmailMessage
from treeio.services.models import Ticket, TicketRecord, ServiceAgent, TicketStatus, Service
from treeio.services.models import ServiceLevelAgreement, TicketQueue
preprocess_form()
class SettingsForm(forms.Form):
""" Administration settings form """
default_ticket_status = forms.ModelChoiceField(label='Default Ticket Status', queryset=[])
default_ticket_queue = forms.ModelChoiceField(label='Default Queue', queryset=[])
send_email_to_caller = forms.ChoiceField(label="Notify Caller By E-mail", choices=((True, _('Yes')),
(False, _('No'))), required=False)
send_email_template = forms.CharField(label="E-mail Template", widget=forms.Textarea, required=False)
def __init__(self, user, *args, **kwargs):
"Sets choices and initial value"
super(SettingsForm, self).__init__(*args, **kwargs)
# Translate
self.fields['default_ticket_status'].label = _('Default Ticket Status')
self.fields['default_ticket_queue'].label = _('Default Queue')
self.fields['send_email_to_caller'].label = _("Notify Caller By E-mail")
self.fields['send_email_template'].label = _("E-mail Template")
self.fields['default_ticket_status'].queryset = Object.filter_permitted(user, TicketStatus.objects, mode='x')
self.fields['default_ticket_queue'].queryset = Object.filter_permitted(user, TicketQueue.objects, mode='x')
try:
conf = ModuleSetting.get_for_module('treeio.services', 'default_ticket_status')[0]
default_ticket_status = TicketStatus.objects.get(pk=long(conf.value))
self.fields['default_ticket_status'].initial = default_ticket_status.id
except Exception:
pass
try:
conf = ModuleSetting.get_for_module('treeio.services', 'default_ticket_queue')[0]
default_ticket_queue = TicketQueue.objects.get(pk=long(conf.value))
self.fields['default_ticket_queue'].initial = default_ticket_queue.id
except Exception:
pass
try:
conf = ModuleSetting.get_for_module('treeio.services', 'send_email_to_caller')[0]
self.fields['send_email_to_caller'].initial = conf.value
except:
self.fields['send_email_to_caller'].initial = settings.HARDTREE_SEND_EMAIL_TO_CALLER
# notification template
try:
conf = ModuleSetting.get_for_module('treeio.services', 'send_email_template')[0]
self.fields['send_email_template'].initial = conf.value
except Exception:
self.fields['send_email_template'].initial = get_template_source('services/emails/notify_caller.html')
def save(self):
"Form processor"
try:
ModuleSetting.set_for_module('default_ticket_status',
self.cleaned_data['default_ticket_status'].id,
'treeio.services')
ModuleSetting.set_for_module('default_ticket_queue',
self.cleaned_data['default_ticket_queue'].id,
'treeio.services')
ModuleSetting.set_for_module('send_email_to_caller',
self.cleaned_data['send_email_to_caller'],
'treeio.services')
ModuleSetting.set_for_module('send_email_template',
self.cleaned_data['send_email_template'],
'treeio.services')
return True
except Exception:
return False
class MassActionForm(forms.Form):
""" Mass action form for Tickets """
status = forms.ModelChoiceField(queryset=[], required=False)
service = forms.ModelChoiceField(queryset=[], required=False)
queue = forms.ModelChoiceField(queryset=[], required=False)
delete = forms.ChoiceField(label=_("Delete"), choices=(('', '-----'), ('delete', _('Delete Completely')),
('trash', _('Move to Trash'))), required=False)
instance = None
def __init__(self, user, *args, **kwargs):
"Sets allowed values"
if 'instance' in kwargs:
self.instance = kwargs['instance']
del kwargs['instance']
super(MassActionForm, self).__init__(*args, **kwargs)
self.fields['status'].queryset = Object.filter_permitted(user, TicketStatus.objects, mode='x')
self.fields['status'].label = _("Status")
self.fields['service'].queryset = Object.filter_permitted(user, Service.objects, mode='x')
self.fields['service'].label = _("Service")
self.fields['queue'].queryset = Object.filter_permitted(user, TicketQueue.objects, mode='x')
self.fields['queue'].label = _("Queue")
self.fields['delete'] = forms.ChoiceField(label=_("Delete"), choices=(('', '-----'),
('delete', _('Delete Completely')),
('trash', _('Move to Trash'))), required=False)
def save(self, *args, **kwargs):
"Process form"
if self.instance:
if self.is_valid():
if self.cleaned_data['service']:
self.instance.service = self.cleaned_data['service']
if self.cleaned_data['status']:
self.instance.status = self.cleaned_data['status']
if self.cleaned_data['queue']:
self.instance.queue = self.cleaned_data['queue']
self.instance.save()
if self.cleaned_data['delete']:
if self.cleaned_data['delete'] == 'delete':
self.instance.delete()
if self.cleaned_data['delete'] == 'trash':
self.instance.trash = True
self.instance.save()
class TicketForm(forms.ModelForm):
""" Ticket form """
name = forms.CharField(label='Title', widget=forms.TextInput(attrs={'size':'50'}))
def __init__(self, user, queue, agent, *args, **kwargs):
"Sets allowed values"
super(TicketForm, self).__init__(*args, **kwargs)
# Filter allowed selections for TicketForm
self.fields['reference'].required = False
self.fields['reference'].label = _("Reference")
self.fields['caller'].queryset = Object.filter_permitted(user, Contact.objects)
self.fields['caller'].widget.attrs.update({'class': 'autocomplete',
'callback': reverse('identities_ajax_contact_lookup')})
self.fields['caller'].widget.attrs.update({'popuplink': reverse('identities_contact_add')})
self.fields['caller'].label = _("Caller")
self.fields['assigned'].queryset = Object.filter_permitted(user, ServiceAgent.objects, mode='x')
self.fields['assigned'].label = _("Assigned to")
self.fields['assigned'].help_text = ""
self.fields['assigned'].widget.attrs.update({'class': 'multicomplete',
'callback': reverse('services_ajax_agent_lookup')})
self.fields['assigned'].widget.attrs.update({'popuplink': reverse('services_agent_add')})
self.fields['status'].queryset = Object.filter_permitted(user, TicketStatus.objects, mode='x')
self.fields['status'].label = _("Status")
self.fields['service'].queryset = Object.filter_permitted(user, Service.objects, mode='x')
self.fields['service'].label = _("Service")
self.fields['queue'].queryset = Object.filter_permitted(user, TicketQueue.objects, mode='x')
self.fields['queue'].label = _("Queue")
self.fields['sla'].queryset = Object.filter_permitted(user, ServiceLevelAgreement.objects, mode='x')
self.fields['sla'].label = _("Service Level Agreement")
self.fields['resolution'].label = _("Resolution")
# Set default values if not editing
if not 'instance' in kwargs:
try:
self.fields['caller'].initial = user.get_contact().id
except Exception:
pass
if queue:
self.fields['queue'].initial = queue.id
if queue.default_ticket_status and queue.default_ticket_status in self.fields['status'].queryset:
self.fields['status'].initial = queue.default_ticket_status_id
else:
try:
conf = ModuleSetting.get_for_module('treeio.services', 'default_ticket_status')[0]
self.fields['status'].initial = long(conf.value)
except:
pass
if queue.default_ticket_priority:
self.fields['priority'].initial = queue.default_ticket_priority
if queue.default_service:
self.fields['service'].initial = queue.default_service_id
try:
default_sla = ServiceLevelAgreement.objects.get(service=queue.default_service, default=True)
if default_sla:
self.fields['sla'].initial = default_sla.id
except:
pass
else:
try:
conf = ModuleSetting.get_for_module('treeio.services', 'default_ticket_status')[0]
self.fields['status'].initial = long(conf.value)
except:
pass
try:
conf = ModuleSetting.get_for_module('treeio.services', 'default_ticket_queue')[0]
self.fields['queue'].initial = long(conf.value)
except:
pass
self.fields['name'].label = _("Name")
self.fields['name'].widget.attrs.update({'class': 'duplicates',
'callback': reverse('services_ajax_ticket_lookup')})
self.fields['priority'].label = _("Priority")
self.fields['priority'].choices = ((5, _('Highest')), (4, _('High')), (3, _('Normal')), (2, _('Low')), (1, _('Lowest')))
self.fields['urgency'].label = _("Urgency")
self.fields['urgency'].choices = ((5, _('Highest')), (4, _('High')), (3, _('Normal')), (2, _('Low')), (1, _('Lowest')))
self.fields['details'].label = _("Details")
if not agent:
del self.fields['caller']
del self.fields['reference']
del self.fields['priority']
del self.fields['status']
del self.fields['queue']
del self.fields['sla']
del self.fields['assigned']
del self.fields['resolution']
class Meta:
"Ticket specified as model"
model = Ticket
fields = ('name', 'reference', 'caller', 'assigned', 'urgency', 'priority',
'status', 'service', 'sla', 'queue', 'details', 'resolution')
class TicketStatusForm(forms.ModelForm):
""" TicketStatus form """
name = forms.CharField(widget=forms.TextInput(attrs={'size':'30'}))
def __init__(self, user, *args, **kwargs):
"Sets allowed values"
super(TicketStatusForm, self).__init__(*args, **kwargs)
class Meta:
"TicketStatus specified as model"
model = TicketStatus
fields = ('name', 'active', 'hidden', 'details')
class TicketRecordForm(forms.ModelForm):
""" TicketRecord form """
def __init__(self, agent, ticket, *args, **kwargs):
super(TicketRecordForm, self).__init__(*args, **kwargs)
self.ticket = ticket
self.fields['body'].label = _("body")
self.fields['body'].required = True
self.fields['notify'].label = _("Notify caller")
self.fields['resolution'] = forms.BooleanField(label=_("Set as Resolution"), required=False)
if not agent:
del self.fields['notify']
del self.fields['resolution']
def save(self, *args, **kwargs):
"Set Resolution if selected"
instance = super(TicketRecordForm, self).save(*args, **kwargs)
ticket = self.ticket
if 'resolution' in self.cleaned_data and self.cleaned_data['resolution']:
ticket.resolution = self.cleaned_data['body']
ticket.save()
# Send update if notify clicked
if 'notify' in self.cleaned_data and self.cleaned_data['notify'] and ticket.caller:
toaddr = ticket.caller.get_email()
if ticket.message or toaddr:
reply = Message()
reply.author = instance.sender
reply.body = instance.body
reply.auto_notify = False
if ticket.message:
reply.stream = ticket.message.stream
reply.reply_to = ticket.message
else:
reply.stream = ticket.queue.message_stream if ticket.queue else None
reply.title = "[#%s] %s" % (ticket.reference, ticket.name)
reply.save()
if not ticket.message:
ticket.message = reply
reply.recipients.add(ticket.caller)
email = EmailMessage(reply)
email.send_email()
return instance
class Meta:
"TicketRecord specified as model"
model = TicketRecord
fields = ['body', 'notify']
class QueueForm(forms.ModelForm):
""" Queue form """
name = forms.CharField(widget=forms.TextInput(attrs={'size':'50'}))
def __init__(self, user, *args, **kwargs):
"Sets allowed values"
super(QueueForm, self).__init__(*args, **kwargs)
manager = TicketQueue.objects
if 'instance' in kwargs:
instance = kwargs['instance']
manager = manager.exclude(Q(parent=instance) & Q(pk=instance.id))
self.fields['parent'].queryset = Object.filter_permitted(user, manager, mode='x')
self.fields['default_service'].queryset = Object.filter_permitted(user, Service.objects, mode='x')
self.fields['waiting_time'].help_text = "seconds"
self.fields['name'].label = _("Name")
self.fields['active'].label = _("Active")
self.fields['parent'].label = _("Parent")
self.fields['default_ticket_status'].label = _("Default ticket status")
self.fields['default_ticket_priority'].label = _("Default ticket priority")
self.fields['default_service'].label = _("Default service")
self.fields['waiting_time'].label = _("Waiting time")
self.fields['next_queue'].queryset = Object.filter_permitted(user, TicketQueue.objects, mode='x')
self.fields['next_queue'].label = _("Next queue")
self.fields['ticket_code'].label = _("Ticket code")
self.fields['message_stream'].label = _("Message stream")
self.fields['message_stream'].widget.attrs.update({'popuplink': reverse('messaging_stream_add')})
self.fields['details'].label = _("Details")
class Meta:
"TicketQueue specified as model"
model = TicketQueue
fields = ('name', 'active', 'parent', 'default_ticket_status',
'default_ticket_priority', 'default_service', 'waiting_time',
'next_queue', 'ticket_code', 'message_stream', 'details')
class ServiceForm(forms.ModelForm):
""" Service form """
name = forms.CharField(widget=forms.TextInput(attrs={'size':'50'}))
def __init__(self, user, *args, **kwargs):
"Sets allowed values"
super(ServiceForm, self).__init__(*args, **kwargs)
manager = Service.objects
if 'instance' in kwargs:
instance = kwargs['instance']
manager = manager.exclude(Q(parent=instance) & Q(pk=instance.id))
self.fields['parent'].queryset = Object.filter_permitted(user, manager, mode='x')
self.fields['name'].label = _("Name")
self.fields['parent'].label = _("Parent")
self.fields['details'].label = _("Details")
class Meta:
"Service specified as model"
model = Service
fields = ('name', 'parent', 'details')
class ServiceLevelAgreementForm(forms.ModelForm):
""" ServiceLevelAgreement form """
name = forms.CharField(widget=forms.TextInput(attrs={'size':'50'}))
def __init__(self, user, *args, **kwargs):
"Sets allowed values"
super(ServiceLevelAgreementForm, self).__init__(*args, **kwargs)
self.fields['name'].label = _("Name")
self.fields['response_time'].help_text = 'minutes'
self.fields['response_time'].widget.attrs.update({'size': 10})
self.fields['response_time'].label = _("Response time")
self.fields['uptime_rate'].help_text = 'percent'
self.fields['uptime_rate'].widget.attrs.update({'size': 5})
self.fields['uptime_rate'].label = _("Uptime rate")
self.fields['service'].queryset = Object.filter_permitted(user, Service.objects, mode='x')
self.fields['service'].label = _("Service")
self.fields['client'].queryset = Object.filter_permitted(user, Contact.objects, mode='x')
self.fields['client'].widget.attrs.update({'class': 'autocomplete',
'callback': reverse('identities_ajax_contact_lookup')})
self.fields['client'].widget.attrs.update({'popuplink': reverse('identities_contact_add')})
self.fields['client'].label = _("Client")
self.fields['provider'].queryset = Object.filter_permitted(user, Contact.objects, mode='x')
self.fields['provider'].widget.attrs.update({'class': 'autocomplete',
'callback': reverse('identities_ajax_contact_lookup')})
self.fields['provider'].widget.attrs.update({'popuplink': reverse('identities_contact_add')})
self.fields['provider'].label = _("Provider")
self.fields['available_from'].initial = "09:00"
self.fields['available_from'].widget.attrs.update({'size': 10})
self.fields['available_from'].label = _("Available from")
self.fields['available_to'].initial = "18:00"
self.fields['available_to'].widget.attrs.update({'size': 10})
self.fields['available_to'].label = _("Available to")
contact = user.default_group.get_contact()
if contact:
self.fields['provider'].initial = contact.id
class Meta:
"ServiceLevelAgreement specified as model"
model = ServiceLevelAgreement
fields = ('name', 'service', 'client', 'provider', 'response_time', 'uptime_rate', 'available_from',
'available_to')
class AgentForm(forms.ModelForm):
""" Agent form """
def __init__(self, user, *args, **kwargs):
"Sets allowed values"
super(AgentForm, self).__init__(*args, **kwargs)
self.fields['related_user'].label = _("Related user")
self.fields['related_user'].widget.attrs.update({'class': 'autocomplete',
'callback': reverse('identities_ajax_user_lookup')})
self.fields['active'].label = _("Active")
self.fields['occupied'].label = _("Occupied")
self.fields['available_from'].label = _("Available from")
self.fields['available_to'].label = _("Available to")
class Meta:
"Agent specified as model"
model = ServiceAgent
fields = ('related_user', 'active', 'occupied', 'available_from', 'available_to')
class FilterForm(forms.ModelForm):
""" Ticket Filters definition """
def __init__(self, user, skip=[], *args, **kwargs):
"Sets allowed values"
super(FilterForm, self).__init__(*args, **kwargs)
if 'caller' in skip:
del self.fields['caller']
else:
self.fields['caller'].queryset = Object.filter_permitted(user, Contact.objects, mode='x')
self.fields['caller'].widget.attrs.update({'class': 'autocomplete',
'callback': reverse('identities_ajax_contact_lookup')})
self.fields['caller'].label = _("Caller")
if 'status' in skip:
del self.fields['status']
else:
self.fields['status'].queryset = Object.filter_permitted(user, TicketStatus.objects, mode='x')
self.fields['status'].label = _("Status")
self.fields['service'].queryset = Object.filter_permitted(user, Service.objects, mode='x')
self.fields['service'].label = _("Service")
self.fields['sla'].queryset = Object.filter_permitted(user, ServiceLevelAgreement.objects, mode='x')
self.fields['sla'].label = _("SLA")
if 'queue' in skip:
del self.fields['queue']
else:
self.fields['queue'].queryset = Object.filter_permitted(user, TicketQueue.objects, mode='x')
self.fields['queue'].label = _("Queue")
if 'assigned' in skip:
del self.fields['assigned']
else:
self.fields['assigned'].queryset = Object.filter_permitted(user, ServiceAgent.objects, mode='x')
self.fields['assigned'].widget.attrs.update({'class': 'multicomplete',
'callback': reverse('services_ajax_agent_lookup')})
self.fields['assigned'].label = _("Assigned to")
self.fields['assigned'].help_text = ""
class Meta:
"Ticket specified as model"
model = Ticket
fields = ('caller', 'status', 'service', 'sla', 'queue', 'assigned')
class SLAFilterForm(forms.ModelForm):
""" SLA Filters definition """
def __init__(self, user, skip=[], *args, **kwargs):
"Sets allowed values"
super(SLAFilterForm, self).__init__(*args, **kwargs)
self.fields['client'].queryset = Object.filter_permitted(user, Contact.objects, mode='x')
self.fields['client'].required = False
self.fields['client'].widget.attrs.update({'class': 'autocomplete',
'callback': reverse('identities_ajax_contact_lookup')})
self.fields['client'].label = _("Client")
self.fields['provider'].queryset = Object.filter_permitted(user, Contact.objects, mode='x')
self.fields['provider'].required = False
self.fields['provider'].widget.attrs.update({'class': 'autocomplete',
'callback': reverse('identities_ajax_contact_lookup')})
self.fields['provider'].label = _("Provider")
self.fields['service'].queryset = Object.filter_permitted(user, Service.objects, mode='x')
self.fields['service'].required = False
self.fields['service'].label = _("Service")
class Meta:
"ServiceLevelAgreement specified as model"
model = ServiceLevelAgreement
fields = ('service', 'client', 'provider')
class AgentFilterForm(forms.ModelForm):
""" Agent Filters definition """
def __init__(self, user, skip=[], *args, **kwargs):
"Sets allowed values"
super(AgentFilterForm, self).__init__(*args, **kwargs)
self.fields['related_user'].required = True
self.fields['related_user'].label = _("Related user")
class Meta:
"ServiceAgent specified as model"
model = ServiceAgent
fields = ['related_user']
|
mit
| -6,035,473,457,789,518,000 | 45.066543 | 128 | 0.566367 | false |
quarkslab/arybo
|
tests/arybo/llvm.py
|
1
|
3837
|
import unittest
import operator
import six
import random
from six.moves import range,reduce
import ctypes
from ctypes import CFUNCTYPE
from arybo.lib.exprs_asm import llvmlite_available
if llvmlite_available:
import llvmlite.binding as llvm
import arybo.lib.mba_exprs as EX
from arybo.lib import MBA
from arybo.lib.exprs_asm import to_llvm_function, llvm_get_target
def int_size_to_type(s):
d = {
8: ctypes.c_ubyte,
16: ctypes.c_ushort,
32: ctypes.c_uint,
64: ctypes.c_ulonglong}
return d[s]
@unittest.skipIf(llvmlite_available == False, "skipping LLVM-related tests as llvmlite is not available")
class LLVMTest(unittest.TestCase):
def setUp(self):
self.mba = MBA(8)
self.x = self.mba.var('x')
self.y = self.mba.var('y')
self.ex = EX.ExprBV(self.x)
self.ey = EX.ExprBV(self.y)
self.args = [self.x,self.y]
self.eargs = [EX.ExprBV(self.x),EX.ExprBV(self.y)]
self.func_name = "__arybo"
self.llvm_target = llvm_get_target()
self.machine = self.llvm_target.create_target_machine()
self.engine = llvm.create_mcjit_compiler(llvm.parse_assembly(""), self.machine)
def get_c_func(self, e, args):
# Get the llvm function
M = to_llvm_function(e,self.args,self.func_name)
# JIT the function, and compare random values
M = llvm.parse_assembly(str(M))
M.verify()
self.engine.add_module(M)
self.engine.finalize_object()
func_ptr = self.engine.get_function_address(self.func_name)
cfunc_type = (int_size_to_type(e.nbits),) + tuple(int_size_to_type(a.nbits) for a in args)
cfunc = CFUNCTYPE(*cfunc_type)(func_ptr)
return M,cfunc
def check_expr(self, e, args):
M,cfunc = self.get_c_func(e, args)
# Eval 'e'
evale = EX.eval_expr(e)
for n in range(100):
args_v = [random.getrandbits(a.nbits) for a in args]
self.assertEqual(cfunc(*args_v), evale.eval({a: args_v[i] for i,a in enumerate(args)}))
self.engine.remove_module(M)
def test_tree(self):
e0 = EX.ExprXor(self.ex, self.ey)
e = EX.ExprAdd(e0,e0)
self.check_expr(e, self.args)
def test_binops(self):
for op in (EX.ExprAdd,EX.ExprSub,EX.ExprMul,EX.ExprOr,EX.ExprXor,EX.ExprAnd):
e = op(*self.eargs)
self.check_expr(e, self.args)
def test_shifts(self):
for op in (EX.ExprShl,EX.ExprLShr,EX.ExprRor,EX.ExprRol):
for n in range(8):
e = op(self.ex, EX.ExprCst(n, 8))
self.check_expr(e, [self.x])
def test_concat_slice(self):
e = EX.ExprConcat(self.ex[:4], self.ey[:4])
self.check_expr(e, self.args)
e = EX.ExprConcat(self.ex[:2], self.ey[2:8])
self.check_expr(e, self.args)
def test_broadcast(self):
for nbits in (8,16):
for i in range(8):
e = EX.ExprBroadcast(self.ex, i, nbits)
self.check_expr(e, [self.x])
def test_not(self):
e = EX.ExprNot(self.ex)
self.check_expr(e, [self.x])
def test_cond(self):
e = EX.ExprCond(EX.ExprCmpEq(self.ex, EX.ExprCst(10, 8)), self.ex, self.ey)
M,cfunc = self.get_c_func(e, self.args)
for i in range(256):
vref = 0xff if i != 10 else 10
self.assertEqual(cfunc(i, 0xff), vref)
self.engine.remove_module(M)
e = EX.ExprCond(EX.ExprCmpGte(self.ex, EX.ExprCst(10, 8), is_signed=True), self.ex, self.ey)
M,cfunc = self.get_c_func(e, self.args)
for i in range(-128,128):
vref = i if i >= 10 else 0xff
self.assertEqual(cfunc(i, 0xff), vref)
self.engine.remove_module(M)
if __name__ == "__main__":
unittest.main()
|
bsd-3-clause
| 6,109,709,905,965,048,000 | 32.365217 | 105 | 0.591869 | false |
mvo5/snapcraft
|
snapcraft/plugins/kbuild.py
|
1
|
9392
|
# -*- Mode:Python; indent-tabs-mode:nil; tab-width:4 -*-
#
# Copyright (C) 2016-2017 Canonical Ltd
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""The kbuild plugin is used for building kbuild based projects as snapcraft
parts.
This plugin is based on the snapcraft.BasePlugin and supports the properties
provided by that plus the following kbuild specific options with semantics as
explained above:
- kdefconfig:
(list of kdefconfigs)
defconfig target to use as the base configuration. default: "defconfig"
- kconfigfile:
(filepath)
path to file to use as base configuration. If provided this option wins
over everything else. default: None
- kconfigflavour
(string)
Ubuntu config flavour to use as base configuration. If provided this
option wins over kdefconfig. default: None
- kconfigs
(list of strings)
explicit list of configs to force; this will override the configs that
were set as base through kdefconfig and kconfigfile and dependent configs
will be fixed using the defaults encoded in the kbuild config
definitions. If you don't want default for one or more implicit configs
coming out of these, just add them to this list as well.
The plugin applies your selected defconfig first by running
make defconfig
and then uses the kconfigs flag to augment the resulting config by prepending
the configured kconfigs values to the .config and running
"yes" "" | make oldconfig
to create an updated .config file.
If kconfigfile is provided this plugin will use the provided config file
wholesale as the starting point instead of make $kdefconfig. In case user
configures both a kdefconfig as well as kconfigfile, kconfigfile approach will
be used.
"""
import logging
import os
import subprocess
import re
import snapcraft
from snapcraft.internal import errors
logger = logging.getLogger(__name__)
class KBuildPlugin(snapcraft.BasePlugin):
@classmethod
def schema(cls):
schema = super().schema()
schema["properties"]["kdefconfig"] = {"type": "array", "default": ["defconfig"]}
schema["properties"]["kconfigfile"] = {"type": "string", "default": None}
schema["properties"]["kconfigs"] = {
"type": "array",
"minitems": 1,
"uniqueItems": True,
"items": {"type": "string"},
"default": [],
}
schema["properties"]["kconfigflavour"] = {"type": "string", "default": None}
schema["required"] = ["source"]
return schema
@classmethod
def get_build_properties(cls):
# Inform Snapcraft of the properties associated with building. If these
# change in the YAML Snapcraft will consider the build step dirty.
return ["kdefconfig", "kconfigfile", "kconfigs", "kconfigflavour"]
def __init__(self, name, options, project):
super().__init__(name, options, project)
if project.info.base not in ("core", "core16", "core18"):
raise errors.PluginBaseError(part_name=self.name, base=project.info.base)
self.build_packages.extend(["bc", "gcc", "make"])
self.make_targets = []
self.make_install_targets = ["install"]
self.make_cmd = ["make", "-j{}".format(self.parallel_build_count)]
if logger.isEnabledFor(logging.DEBUG):
self.make_cmd.append("V=1")
def enable_cross_compilation(self):
self.make_cmd.append("ARCH={}".format(self.project.kernel_arch))
if os.environ.get("CROSS_COMPILE"):
toolchain = os.environ["CROSS_COMPILE"]
else:
toolchain = self.project.cross_compiler_prefix
self.make_cmd.append("CROSS_COMPILE={}".format(toolchain))
env = os.environ.copy()
self.make_cmd.append(
"PATH={}:/usr/{}/bin".format(env.get("PATH", ""), self.project.arch_triplet)
)
def assemble_ubuntu_config(self, config_path):
try:
with open(os.path.join(self.sourcedir, "debian", "debian.env"), "r") as f:
env = f.read()
except OSError as e:
raise RuntimeError("Unable to access {}: {}".format(e.filename, e.strerror))
arch = self.project.deb_arch
try:
branch = env.split(".")[1].strip()
except IndexError:
raise RuntimeError("Malformed debian.env, cannot extract branch name")
flavour = self.options.kconfigflavour
configfiles = []
baseconfigdir = os.path.join(
self.sourcedir, "debian.{}".format(branch), "config"
)
archconfigdir = os.path.join(
self.sourcedir, "debian.{}".format(branch), "config", arch
)
commonconfig = os.path.join(baseconfigdir, "config.common.ports")
ubuntuconfig = os.path.join(baseconfigdir, "config.common.ubuntu")
archconfig = os.path.join(archconfigdir, "config.common.{}".format(arch))
flavourconfig = os.path.join(archconfigdir, "config.flavour.{}".format(flavour))
configfiles.append(commonconfig)
configfiles.append(ubuntuconfig)
configfiles.append(archconfig)
configfiles.append(flavourconfig)
# assemble .config
try:
with open(config_path, "w") as config_file:
for config_part_path in (
commonconfig,
ubuntuconfig,
archconfig,
flavourconfig,
):
with open(config_part_path) as config_part:
config_file.write(config_part.read())
except OSError as e:
raise RuntimeError(
"Unable to access {!r}: {}".format(e.filename, e.strerror)
)
def get_config_path(self):
return os.path.join(self.builddir, ".config")
def do_base_config(self, config_path):
# if the parts build dir already contains a .config file,
# use it
if os.path.isfile(config_path):
return
# if kconfigfile is provided use that
# elif kconfigflavour is provided, assemble the ubuntu.flavour config
# otherwise use defconfig to seed the base config
if self.options.kconfigfile:
# This file gets modified, no hard links here
snapcraft.file_utils.copy(self.options.kconfigfile, config_path)
elif self.options.kconfigflavour:
self.assemble_ubuntu_config(config_path)
else:
# we need to run this with -j1, unit tests are a good defense here.
make_cmd = self.make_cmd.copy()
make_cmd[1] = "-j1"
self.run(make_cmd + self.options.kdefconfig)
def do_patch_config(self, config_path):
# prepend the generated file with provided kconfigs
# - concat kconfigs to buffer
# - read current .config and append
# - write out to disk
if not self.options.kconfigs:
return
config = "\n".join(self.options.kconfigs)
# note that prepending and appending the overrides seems
# only way to convince all kbuild versions to pick up the
# configs during oldconfig in .config
with open(config_path, "r") as f:
config = "{config_override}\n\n{config}\n{config_override}\n".format(
config_override=config, config=f.read()
)
with open(config_path, "w") as f:
f.write(config)
def do_remake_config(self):
# update config to include kconfig amendments using oldconfig
cmd = 'yes "" | {} oldconfig'.format(" ".join(self.make_cmd))
subprocess.check_call(cmd, shell=True, cwd=self.builddir)
def do_configure(self):
config_path = self.get_config_path()
self.do_base_config(config_path)
self.do_patch_config(config_path)
self.do_remake_config()
def do_build(self):
# Linux's kernel Makefile gets confused if it is invoked with the
# environment setup by another Linux's Makefile:
# linux/package/Makefile -> snapcraft -> linux/Makefile
# fix the problem removing the offending make option (-I...)
if "MAKEFLAGS" in os.environ:
makeflags = re.sub(r"-I[\S]*", "", os.environ["MAKEFLAGS"])
os.environ["MAKEFLAGS"] = makeflags
# build the software
self.run(self.make_cmd + self.make_targets)
def do_install(self):
# install to installdir
self.run(
self.make_cmd
+ ["CONFIG_PREFIX={}".format(self.installdir)]
+ self.make_install_targets
)
def build(self):
super().build()
self.do_configure()
self.do_build()
if "no-install" not in self.options.build_attributes:
self.do_install()
|
gpl-3.0
| 2,984,809,691,852,066,300 | 36.12253 | 88 | 0.627449 | false |
sliwhu/UWHousingTeam
|
model/house_price_model.py
|
1
|
6622
|
"""
Contains the house price model.
DON'T USE THIS MODEL! Use the HousePriceModel in house_price_model_2.py.
"""
import os
import numpy as np
import pandas as pd
from sklearn.ensemble import RandomForestRegressor
from sklearn.linear_model import RidgeCV
# Constants
BASE_DATE = pd.to_datetime('20140101', format='%Y%m%d', errors='ignore')
TO_TYPE = 'category'
# Note: It is expected that the following environment variables will be set so
# that the house price model will be able to locate its training data:
#
# SALES_DATA_PATH: The path of the sales data training file, e.g.: "~/directory"
# SALES_DATA_FILE: The name of the sales data training file, e.g.: "File.csv"
#
# os.environ["SALES_DATA_PATH"] = '~/UW Data Science/DATA 515A/Project'
# os.environ["SALES_DATA_FILE"] = 'Merged_Data_excel.csv' # 'KingCountyHomeSalesData.csv'
# Construct the sales data path, and read the sales data.
SALES_DATA_PATH = os.path.join(os.environ['SALES_DATA_PATH'], os.environ['SALES_DATA_FILE'])
SALES_DATA = pd.read_csv(SALES_DATA_PATH, parse_dates=['date'])
# Data cleansing plan:
#
# id: Discard
# date: Convert to integer; make categorical
# price: No conversion
# bedrooms: No conversion
# bathrooms: No conversion
# sqft_living: No conversion
# sqft_lot: No conversion
# floors: Make categorical
# waterfront: Make categorical
# view: Make categorical
# condition: Make categorical
# grade: Make categorical
# sqft_above: No conversion
# sqft_basement: No conversion
# yr_built: Make categorical
# yr_renovated: Copy over yr_built if missing; make categorical
# zipcode: Make categorical
# lat: No conversion
# long: No conversion
# sqft_living15 No conversion
# sqft_lot15 No conversion
# list_price No conversion
def construct_models():
"""
Constructs a ridge regression model, and a random forest model for housing
price data.
:return: A ridge regression model, and a random forest model for housing
price data
"""
return train_models(create_model_data_frame(SALES_DATA))
def create_model_data_frame(source):
"""
Creates a data frame suitable for constructing a model.
:param source: The source data frame
:return: A data frame suitable for constructing a model
"""
# Create an empty data frame. Get the date series from the source.
my_model_data = pd.DataFrame()
sales_date = source['date']
# Extract the sales date as an integer.
my_model_data['sale_day'] =\
(sales_date - get_base_date()).astype('timedelta64[D]').astype(int) + 1
# Extract the sale day-of-week as an integer, and the sale day in month.
my_model_data['sale_day_of_week'] = sales_date.dt.dayofweek.astype(TO_TYPE)
my_model_data['sale_day_in_month'] = sales_date.dt.day.astype(TO_TYPE)
# Extract common features as numeric, or categorical values.
# create_model_feature(my_model_data, source, 'price', False)
create_model_feature(my_model_data, source, 'price', False)
create_model_feature(my_model_data, source, 'bedrooms', False)
create_model_feature(my_model_data, source, 'bathrooms', False)
create_model_feature(my_model_data, source, 'sqft_living', False)
create_model_feature(my_model_data, source, 'sqft_lot', False)
create_model_feature(my_model_data, source, 'floors', True)
create_model_feature(my_model_data, source, 'waterfront', True)
create_model_feature(my_model_data, source, 'view', True)
create_model_feature(my_model_data, source, 'condition', True)
create_model_feature(my_model_data, source, 'grade', True)
create_model_feature(my_model_data, source, 'sqft_above', False)
create_model_feature(my_model_data, source, 'sqft_basement', False)
create_model_feature(my_model_data, source, 'yr_built', True)
# Use 'year built' in place of 'year renovated' if year renovated is zero
# in the source.
field_name = 'yr_renovated'
my_model_data[field_name] = pd.Categorical(np.where(
source[field_name] == 0,
source['yr_built'].astype(TO_TYPE),
source[field_name].astype(TO_TYPE)))
# Extract more common features as numeric, or categorical values.
create_model_feature(my_model_data, source, 'zipcode', True)
create_model_feature(my_model_data, source, 'lat', False)
create_model_feature(my_model_data, source, 'long', False)
create_model_feature(my_model_data, source, 'sqft_living15', False)
create_model_feature(my_model_data, source, 'sqft_lot15', False)
my_model_data['list_price'] = source['List price']
# Return the completed model data frame.
return my_model_data
def create_model_feature(destination, source, name, to_categorical=False):
"""
Creates a feature in a destination data frame.
:param destination: The destination data frame
:param source: The source data frame
:param name: The name of the feature to copy
:param to_categorical: True if the feature should be converted to
categorical, false otherwise
:return: None
"""
if to_categorical:
destination[name] = source[name].astype(TO_TYPE)
else:
destination[name] = source[name]
return None
def get_base_date():
"""
Gets the base date as a reference for day of sale.
:return: The base date as a reference for day of sale
"""
return BASE_DATE
def train_models(my_model_data):
"""
Trains a ridge regression model, and a random forest model, and returns
them.
:param my_model_data: The model data on which to train
:return: A ridge regression model, and a random forest model
"""
# Construct the ridge regression model.
my_ridge_model = RidgeCV(alphas=(0.1, 1.0, 10.0),
fit_intercept=True,
normalize=True,
scoring=None,
cv=None,
gcv_mode=None,
store_cv_values=True)
# Construct the random forest model.
my_forest_model = RandomForestRegressor()
# Divide the model data into predictor and response.
response_field = 'price'
predictors = my_model_data.ix[:, response_field != my_model_data.columns]
response = my_model_data[response_field]
# Fit the models, and return them.
my_ridge_model.fit(X=predictors, y=response)
my_forest_model.fit(X=predictors, y=response)
return my_ridge_model, my_forest_model
|
mit
| 3,879,649,261,701,865,000 | 35.788889 | 92 | 0.663546 | false |
elastic-event-components/e2c
|
source/python/e2c.examples/web_mud/contracts/token.py
|
1
|
1166
|
#
# Copyright 2017 The E2C Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
class FToken(object):
intro = 'int'
broadcast = 'brc'
text = 'txt'
enum = 'enum'
error = 'err'
room_name = 'rn'
room_object = 'ro'
room_desc = 'rd'
exit_title = 'et'
exit_object = 'eo'
item_title = 'it'
item_object = 'io'
inv_title = 'ivt'
inv_object = 'ivo'
occ_title = 'ot'
occ_object = 'oo'
line = '<hr>'
@staticmethod
def wrap(text, token):
return "<%s>%s</%s>" % (token, text, token)
|
apache-2.0
| -590,398,240,472,339,600 | 24.911111 | 80 | 0.598628 | false |
ThomasZh/legend-league-portal
|
foo/auth/auth_newsup.py
|
1
|
3243
|
#!/usr/bin/env python
# _*_ coding: utf-8_*_
#
# Copyright 2016 7x24hs.com
# thomas@7x24hs.com
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import tornado.web
import logging
import time
import sys
import os
import uuid
import smtplib
import hashlib
import json as JSON # 启用别名,不会跟方法里的局部变量混淆
from bson import json_util
import requests
sys.path.insert(0, os.path.join(os.path.dirname(__file__), "../"))
sys.path.insert(0, os.path.join(os.path.dirname(__file__), "../dao"))
from tornado.escape import json_encode, json_decode
from tornado.httpclient import *
from tornado.httputil import url_concat
from bson import json_util
from comm import *
from global_const import *
class AuthRegisterHandler(BaseHandler):
def get(self):
logging.info(self.request)
is_login = False
access_token = self.get_secure_cookie("access_token")
if access_token:
is_login = True
# league(联盟信息)
league_info = self.get_league_info()
self.render('newsup/register.html',
is_login=is_login,
league_info=league_info,
api_domain=API_DOMAIN)
class AuthLogoutHandler(AuthorizationHandler):
@tornado.web.authenticated # if no session, redirect to login page
def get(self):
access_token = self.get_secure_cookie("access_token")
# logout
url = API_DOMAIN+"/api/auth/tokens"
http_client = HTTPClient()
response = http_client.fetch(url, method="DELETE", headers={"Authorization":"Bearer "+access_token})
logging.info("got response %r", response.body)
self.clear_cookie("access_token")
self.clear_cookie("expires_at")
self.clear_cookie("login_next")
self.clear_cookie("refresh_token")
self.redirect("/");
class AuthLeagueSignupXHR(BaseHandler):
def post(self):
logging.info(self.request)
logging.info(self.request.body)
session_ticket = json_decode(self.request.body)
self.set_secure_cookie("access_token", session_ticket['access_token'])
self.set_secure_cookie("expires_at", str(session_ticket['expires_at']))
# signup into league
url = API_DOMAIN+"/api/leagues/"+LEAGUE_ID+"/signup"
http_client = HTTPClient()
headers={"Authorization":"Bearer "+session_ticket['access_token']}
body = {"role":"user"}
_json = json_encode(body)
response = http_client.fetch(url, method="POST", headers=headers, body=_json)
logging.info("got response %r", response.body)
self.set_status(200) # OK
self.write(JSON.dumps({"err_code":200, "err_msg":"success"}))
self.finish()
return
|
apache-2.0
| 3,766,114,325,257,546,000 | 30.673267 | 108 | 0.66427 | false |
renatopp/liac
|
liac/dataset/__init__.py
|
1
|
3050
|
# =============================================================================
# Federal University of Rio Grande do Sul (UFRGS)
# Connectionist Artificial Intelligence Laboratory (LIAC)
# Renato de Pontes Pereira - rppereira@inf.ufrgs.br
# =============================================================================
# Copyright (c) 2011 Renato de Pontes Pereira, renato.ppontes at gmail dot com
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# =============================================================================
'''
This module is an interface to pandas and provides some utility functions for
handling dataset.
'''
import os
import pandas as pd
from . import arff
__all__ = ['load', 'read_csv', 'read_clipboard', 'read_arff']
read_csv = pd.read_csv
read_clipboard = pd.read_clipboard
def read_arff(set_name):
'''
Read ARFF file into pandas DataFrame.
:param set_name: the dataset path.
'''
f = open(set_name)
info = arff.load(f)
f.close()
attributes = [a[0] for a in info['attributes']]
data = info['data']
return pd.DataFrame(data, columns=attributes)
def load(set_name, *args, **kwargs):
'''
This function loads automatically any dataset in the following formats:
arff; csv; excel; hdf; sql; json; html; stata; clipboard; pickle. Moreover,
it loads the default datasets such "iris" if the extension in `set_name` is
unknown.
:param set_name: the dataset path or the default dataset name.
:returns: a `pd.DataFrame` object.
'''
_, ext = os.path.splitext(set_name)
if ext == '.arff':
loader = read_arff
elif ext in ['.csv', '.txt']:
loader = read_csv
else:
loader = __load_default_set
dataset = loader(set_name, *args, **kwargs)
return dataset
def __load_default_set(set_name):
ALIASES = {'linaker':'linaker1v'}
name = ''.join([ALIASES.get(set_name, set_name), '.arff'])
file_name = os.path.join(os.path.dirname(__file__), 'sets', name)
return read_arff(file_name)
|
mit
| -752,506,588,079,396,900 | 36.654321 | 79 | 0.644918 | false |
thanatoskira/AndroGuard
|
elsim/elsim/elsim_db.py
|
1
|
7471
|
#!/usr/bin/env python
# This file is part of Elsim.
#
# Copyright (C) 2012, Anthony Desnos <desnos at t0t0.fr>
# All rights reserved.
#
# Elsim is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Elsim is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Elsim. If not, see <http://www.gnu.org/licenses/>.
import logging, re
from similarity.similarity import DBFormat, simhash
from androguard.core.analysis import analysis
DEFAULT_SIGNATURE = analysis.SIGNATURE_SEQUENCE_BB
def eval_res_per_class(ret) :
z = {}
for i in ret :
for j in ret[i] :
for k in ret[i][j] :
val = ret[i][j][k]
# print val, k
if len(val[0]) == 1 and val[1] > 1 :
continue
if len(val[0]) == 0:
continue
if j not in z :
z[j] = {}
val_percentage = (len(val[0]) / float(val[1]) ) * 100
if (val_percentage != 0) :
z[j][k] = val_percentage
return z
############################################################
class ElsimDB :
def __init__(self, database_path) :
self.db = DBFormat( database_path )
def eval_res(self, ret, info, threshold=10.0) :
sorted_elems = {}
for i in ret :
sorted_elems[i] = []
for j in ret[i] :
t_size = 0
elems = set()
for k in ret[i][j] :
val = ret[i][j][k]
if len(val[0]) == 1 and val[1] > 1:
continue
t_size += val[-1]
elems.add( k )
percentage_size = (t_size / float(info[i][j]["SIZE"])) * 100
if percentage_size > threshold :
sorted_elems[i].append( (j, percentage_size, elems) )
if len(sorted_elems[i]) == 0 :
del sorted_elems[i]
return sorted_elems
def percentages(self, vm, vmx, threshold=10) :
elems_hash = set()
for _class in vm.get_classes() :
for method in _class.get_methods() :
code = method.get_code()
if code == None :
continue
buff_list = vmx.get_method_signature( method, predef_sign = DEFAULT_SIGNATURE ).get_list()
for i in buff_list :
elem_hash = long(simhash( i ))
elems_hash.add( elem_hash )
ret, info = self.db.elems_are_presents( elems_hash )
sorted_ret = self.eval_res(ret, info, threshold)
info = {}
for i in sorted_ret :
v = sorted(sorted_ret[i], key=lambda x: x[1])
v.reverse()
info[i] = []
for j in v :
info[i].append( [j[0], j[1]] )
info_name = self.db.classes_are_presents( vm.get_classes_names() )
for i in info_name :
if i not in info :
info[i] = None
return info
def percentages_code(self, exclude_list) :
libs = re.compile('|'.join( "(" + i + ")" for i in exclude_list))
classes_size = 0
classes_db_size = 0
classes_edb_size = 0
classes_udb_size = 0
for _class in self.vm.get_classes() :
class_size = 0
elems_hash = set()
for method in _class.get_methods() :
code = method.get_code()
if code == None :
continue
buff_list = self.vmx.get_method_signature( method, predef_sign = DEFAULT_SIGNATURE ).get_list()
for i in buff_list :
elem_hash = long(simhash( i ))
elems_hash.add( elem_hash )
class_size += method.get_length()
classes_size += class_size
if class_size == 0 :
continue
ret = self.db.elems_are_presents( elems_hash )
sort_ret = eval_res_per_class( ret )
if sort_ret == {} :
if libs.search(_class.get_name()) != None :
classes_edb_size += class_size
else :
classes_udb_size += class_size
else :
classes_db_size += class_size
return (classes_db_size/float(classes_size)) * 100, (classes_edb_size/float(classes_size)) * 100, (classes_udb_size/float(classes_size)) * 100
def percentages_to_graph(self) :
info = { "info" : [], "nodes" : [], "links" : []}
N = {}
L = {}
for _class in self.vm.get_classes() :
elems_hash = set()
# print _class.get_name()
for method in _class.get_methods() :
code = method.get_code()
if code == None :
continue
buff_list = self.vmx.get_method_signature( method, predef_sign = DEFAULT_SIGNATURE ).get_list()
for i in buff_list :
elem_hash = long(simhash( i ))
elems_hash.add( elem_hash )
ret = self.db.elems_are_presents( elems_hash )
sort_ret = eval_res_per_class( ret )
if sort_ret != {} :
if _class.get_name() not in N :
info["nodes"].append( { "name" : _class.get_name().split("/")[-1], "group" : 0 } )
N[_class.get_name()] = len(N)
for j in sort_ret :
if j not in N :
N[j] = len(N)
info["nodes"].append( { "name" : j, "group" : 1 } )
key = _class.get_name() + j
if key not in L :
L[ key ] = { "source" : N[_class.get_name()], "target" : N[j], "value" : 0 }
info["links"].append( L[ key ] )
for k in sort_ret[j] :
if sort_ret[j][k] > L[ key ]["value"] :
L[ key ]["value"] = sort_ret[j][k]
return info
class ElsimDBIn :
def __init__(self, output) :
self.db = DBFormat( output )
def add_name(self, name, value) :
self.db.add_name( name, value )
def add(self, d, dx, name, sname, regexp_pattern, regexp_exclude_pattern) :
for _class in d.get_classes() :
if regexp_pattern != None :
if re.match(regexp_pattern, _class.get_name()) == None :
continue
if regexp_exclude_pattern != None :
if re.match(regexp_exclude_pattern, _class.get_name()) != None :
continue
print "\t", _class.get_name()
for method in _class.get_methods() :
code = method.get_code()
if code == None :
continue
if method.get_length() < 50 or method.get_name() == "<clinit>" or method.get_name() == "<init>" :
continue
buff_list = dx.get_method_signature( method, predef_sign = DEFAULT_SIGNATURE ).get_list()
if len(set(buff_list)) == 1 :
continue
for e in buff_list :
self.db.add_element( name, sname, _class.get_name(), method.get_length(), long(simhash(e)) )
def save(self) :
self.db.save()
|
lgpl-3.0
| -4,021,871,903,078,052,400 | 29.373984 | 148 | 0.504752 | false |
Maaack/Silent-Night-API
|
game_api/migrations/0002_auto_20160207_1603.py
|
1
|
1928
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.2 on 2016-02-08 00:03
from __future__ import unicode_literals
import django.contrib.postgres.fields.jsonb
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('game_api', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Game',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', models.DateTimeField(editable=False, null=True)),
('updated', models.DateTimeField(editable=False, null=True)),
('code', models.CharField(max_length=8, verbose_name='Code')),
('name', models.CharField(max_length=50, verbose_name='Name')),
],
options={
'verbose_name_plural': 'Games',
'ordering': ['-created'],
'verbose_name': 'Game',
},
),
migrations.CreateModel(
name='GameSettings',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', models.DateTimeField(editable=False, null=True)),
('updated', models.DateTimeField(editable=False, null=True)),
('data', django.contrib.postgres.fields.jsonb.JSONField(default={})),
],
options={
'verbose_name_plural': 'Game Settings',
'ordering': ['-created'],
'verbose_name': 'Game Settings',
},
),
migrations.AddField(
model_name='game',
name='settings',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='game_api.GameSettings'),
),
]
|
gpl-3.0
| -5,922,265,363,484,232,000 | 36.803922 | 114 | 0.547718 | false |
DBWangGroupUNSW/revs
|
revs/entity.py
|
1
|
1871
|
from elasticsearch.helpers import bulk
class EntityDoc:
def __init__(self, name, uri, num_occ):
self.name = name
self.uri = uri
self.num_occ = num_occ
class EntityIndexer:
def __init__(self, config, elastic_search):
self._config = config
self._elastic_search = elastic_search
self._search_limit = self._config['entity']['search_limit']
self._index_id = self._config['entity']['index_id']
def search(self, key):
"""
:param key: search key
"""
res = self._elastic_search.search(index=self._index_id, doc_type="entity", body={
"size": self._search_limit,
"query": {
"function_score": {
"query": {
"match": {
"name": {
"query": key,
"fuzziness": "AUTO",
"operator": "and"
}
}
},
"field_value_factor": {
"field": "num_occ"
}
}
}
})
results = []
for hit in res['hits']['hits']:
doc = hit['_source']
results.append({
'name': doc['name'],
'url': doc['uri'],
'score': hit['_score']
})
return results
def index(self, entity_docs):
bulk(self._elastic_search, [
{
'_index': self._index_id,
'_type': "entity",
'_source': {
'name': doc.name,
'uri': doc.uri,
'num_occ': doc.num_occ
}
}
for doc in entity_docs
])
|
mit
| 6,968,993,648,632,724,000 | 28.698413 | 89 | 0.377873 | false |
racetted/maestro-utils
|
bin/task_setup.py
|
1
|
50921
|
#!/usr/bin/env python
#/* Part of the Maestro sequencer software package.
# * Copyright (C) 2011-2015 Canadian Meteorological Centre
# * Environment Canada
# *
# * Maestro is free software; you can redistribute it and/or
# * modify it under the terms of the GNU Lesser General Public
# * License as published by the Free Software Foundation,
# * version 2.1 of the License.
# *
# * Maestro is distributed in the hope that it will be useful,
# * but WITHOUT ANY WARRANTY; without even the implied warranty of
# * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# * Lesser General Public License for more details.
# *
# * You should have received a copy of the GNU Lesser General Public
# * License along with this library; if not, write to the
# * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
# * Boston, MA 02111-1307, USA.
# */
#-------------------------------------------------------------------
# task_setup.py
#
# Module / executable to perform task setup operations
#-------------------------------------------------------------------
"""Create and fill a task runtime directory
INTRODUCTION
This task setup utility makes use of a pair of internal classes
('Section' and 'Config') to convert information contained in a
configuration file into the complete layout of a task directory.
The primary user access to this utility is through the 'Config'
class.
CONFIG CLASS
METHODS
cfg = Config('/path/to/config/file.cfg') - Class constructor. This
method reads and caches the contents of the named input
configuration file. The output from this method is a instance
of the Config class.
cfg.getSections() - Parse the sections of the configuration file
to generate a list of 'links', 'targets' and 'options'. These
values are stored internally by the instance.
cfg.link() - Generate the subdirectories and links to the files
identified in the config file.
cfg.setOption(option,value) - Set the named option ('delimiter_exec',
'verbosity','cleanup','force') to the value specified in the argument.
This method should be called before the 'getSections' method
to ensure that keywords are properly resolved.
CLASS VARIABLES
configData - Cached copy of the data read from the configuration file.
configFile - Name of the configuration file.
taskdir - User-specified (not true-path'd) path to task directory.
basepath - True path to working directory below task level.
taskname - Task name.
subdir_sectionMap - Name mapping from configuration file sections to
task subdirectories.
verbosity - Integer to control verbosity level.
cleanup - Boolean to clean task directory before setup.
force - Boolean to force actions despite warnings.
error - Error code for return.
ok - Successful completion code for return.
SECTION CLASS
METHODS
s = Section(section) - Class constructor. This method returns an
instance of the 'Section' class for the particular section
identified in the argument list.
s.add(line) - Add the contents of a configuration file line to
this this instance of the Section class. This information
will be appended to any existing additions made to the instance.
CLASS VARIABLES
delimiter_exec - Delimiter for embedded commands (default '`')
delimiter_target - Delimiter for multiple targets on the RHS (default \s or \n)
verbosity - Integer to control verbosity level.
cleanup - Boolean to clean task directory before setup.
force - Force action despite warnings.
"""
__version__ = "0.16.0"
__author__ = "Ron McTaggart-Cowan (ron.mctaggart-cowan@ec.gc.ca)"
#---------
# Imports
#---------
import os
import sys
import shutil
import re
import optparse
import tempfile
import types
import shlex
import copy
from time import time
class Store(object):
"""Space for saving values using a callable object"""
def __init__(self):
"""Class constructor"""
self.saved = []
def __call__(self,value):
"""Add an entry to the saved space"""
self.saved.append(value)
def mkdir_p(path):
import os,sys,errno
try:
os.makedirs(path)
except OSError:
value = sys.exc_info()[1][0]
if value == errno.EEXIST:
pass
else:
sys.stderr.write('task_setup.py::os.makedirs() returned the following error information on an attempt to create ' \
+path+': '+str(sys.exc_info())+"\n")
raise
def which(name,path=None,verbose=True):
"""Duplicates the functionality of UNIX 'which' command"""
if re.search('/',name):
return(name)
bin_path = path and path or os.environ['PATH']
for dir in re.split(':',bin_path):
fullname=os.path.join(dir,name)
try:
if os.path.isfile(fullname):
if os.access(fullname,os.X_OK): return(fullname)
except:
continue
if (verbose): print "Warning: unable to find "+name+" in path:\n"+bin_path
return('')
def path2host(machine,path):
"""Convert a machine/abspath pair to the heirarchical part of a URI"""
if machine:
return(machine+':'+path)
else:
return(path)
def resolveKeywords(entry,delim_exec='',set=None,verbose=False,internals={}):
"""Resolve special keywords in the entry (no procesing for keywords in embedded commands)"""
delim_start='\$\{'
delim_end='}'
delim = re.compile(delim_start+'(.*?)'+delim_end)
dollar = re.compile('\$')
elements = delim_exec and re.split(delim_exec+'(.*?)'+delim_exec,entry) or [entry]
found_internal = False
for i in range(0,len(elements)):
element_orig=elements[i]
if i%2:
# This is an embedded command. Add delimiters and do not substitute keywords.
elements[i] = delim_exec+elements[i]+delim_exec
else:
# This is a standard string. Attempt to replace all keywords.
for keyword in delim.findall(elements[i]):
vartype = 'unknown'
if not keyword: continue
if internals.has_key(keyword):
this_keyword = internals[keyword]
found_internal = True
vartype = 'internal'
if not vartype is 'internal':
try:
this_keyword = os.environ[keyword]
vartype = 'found'
except KeyError:
vartype = 'environment'
if set:
try:
this_keyword = set[keyword]
vartype='found'
except KeyError:
vartype = vartype+'/set'
if vartype is not 'found':
if not keyword in undef_list.saved:
warnline = "Warning: "+vartype+" variable "+keyword+" undefined ... empty substitution performed"
sys.stderr.write(warnline+'\n')
if (verbose): print warnline
undef_list(keyword)
this_keyword = ''
elements[i] = re.sub(delim_start+keyword+delim_end,this_keyword,elements[i])
# Final substitution attempt to support deep internal indexing
for keyword in delim.findall(elements[i]):
this_keyword = ''
if not keyword: continue
if internals.has_key(keyword):
this_keyword = internals[keyword]
found_internal = True
elements[i] = re.sub(delim_start+keyword+delim_end,this_keyword,elements[i])
# Check for leftover $ symbols and generate error message
if dollar.search(elements[i]):
warnline="Error: found a $ character after resolution of "+element_orig+" to "+elements[i]+ \
"\n The result of external keyword resolution cannot contain un-expanded shell variables. Evaluate the\n"+\
" string or remove extra quoting / escape characters before the task_setup call to avoid this problem. "
sys.stderr.write(warnline+'\n')
if (verbose): print warnline
updated = ''.join(elements)
return({'string':updated,'contains_internal':found_internal})
def getTruePath(node,verbosity):
"""Get the true path of a file/directory"""
if node == "": return ""
have_subprocess=True
if (int(verbosity) >= 2): startTime=time()
try:
import subprocess
except ImportError:
have_subprocess=False
try:
get_true_path = "true_path "+node
if have_subprocess:
p = subprocess.Popen(get_true_path,shell=True,stdout=subprocess.PIPE,stderr=subprocess.STDOUT)
true_src = p.stdout.read()
else:
(stdin,stdout_stderr) = os.popen4(get_true_path,'r')
true_src = stdout_stderr.read()
stdin.close()
stdout_stderr.close()
if true_src == '(null)' or not true_src or re.search('No such file or directory$',true_src,re.M) or \
re.search('Probleme avec le path',true_src):
true_src = node
except OSError:
if (os.path.exists(node)):
print "Warning: true_path does not exist or returned an error for "+src_file
true_src = node
if (int(verbosity) >= 2): print("Info 2: getTruePath exec time: " + str( time() - startTime))
return(true_src)
class LinkFile():
"""Structure for link file target information"""
def __init__(self,link,target_host,target,link_only,verbosity=False):
"""Class constructor"""
self.have_subprocess=True
try:
import subprocess
except ImportError:
self.have_subprocess=False
self.link = link
self.target_host = target_host
self.target = target
self.link_only = link_only
self.verbosity = verbosity
self.src = []
self.host = []
self._expandTarget()
self._trueSources()
self._setPrefixes()
self._sourceTypes()
def _expandTarget(self):
"""Complete target information through local or remote wildcard expansion"""
import glob
for i in range(0,len(self.target)):
src_expanded = []
try:
hostname = self.target_host[i]
except TypeError:
hostname = None
src_expanded = glob.glob(self.target[i])
if len(src_expanded) < 1 and hostname:
file_sep = '?'
file_list = "ssh "+hostname+" \"python -c 'import glob; f=glob.glob(\\\""+self.target[i]+"\\\"); print(\\\""+file_sep+"\\\".join(f))'\""
if self.have_subprocess:
import subprocess
p = subprocess.Popen(file_list,shell=True,stdout=subprocess.PIPE,stderr=subprocess.PIPE)
output = p.stdout.read()
error = p.stderr.read()
else:
(stdin,stdout,stderr) = os.popen3(file_list,'r')
output = stdout.read()
error = stderr.read()
src_expanded = [fname.rstrip('\n') for fname in re.split('\\'+file_sep,output.rstrip('\n')) if fname]
if len(src_expanded) < 1:
src_expanded = [self.target[i]]
self.src.extend(src_expanded)
self.host.extend([hostname for item in src_expanded])
def _trueSources(self):
"""Get true source paths for entries"""
self.true_src_file = [getTruePath(src_file,self.verbosity) for src_file in self.src]
def _setPrefixes(self):
"""Set hosts and prefixes for entries"""
self.src_file_prefix = [target_host and target_host+':' or '' for target_host in self.host]
def _sourceTypes(self):
"""Determine the type of source"""
self.remote_file_type = ['' for src in self.true_src_file]
for host in set(self.host):
if not host: continue
idx = []
for i in range(0,len(self.true_src_file)):
if self.host[i] == host:
idx.append(i)
check_file = "ssh "+host+" '"
for i in idx:
check_file += ' if [[ -d "'+self.true_src_file[i]+ \
'" ]] ; then echo 2 ; elif [[ -f "'+self.true_src_file[i]+ \
'" ]] ; then echo 1 ; else echo 0 ; fi;'
check_file.rstrip(';')
check_file += "'"
if self.have_subprocess:
import subprocess
p = subprocess.Popen(check_file,shell=True,stdout=subprocess.PIPE,stderr=subprocess.PIPE)
output = p.stdout.read().rstrip('\n')
error = p.stderr.read()
else:
(stdin,stdout,stderr) = os.popen3(check_file,'r')
output = stdout.read().rstrip('\n')
error = stderr.read()
if len(error) > 0:
warnline = "Warning: STDERR returned from "+self.host[i]+" is "+error
sys.stderr.write(warnline+'\n')
if (self.verbosity): print warnline
if len(output) > 0:
output_list = output.split('\n')
for i in range(0,len(idx)):
try:
ftype = int(output_list[i])
except:
if not self.link_only:
print "Warning: required file "+self.true_src_file[i]+" does not exist on host "+self.host[i]
if ftype == 1:
self.remote_file_type[idx[i]] = 'file'
elif ftype == 2:
self.remote_file_type[idx[i]] = 'directory'
else:
print "Warning: unable to login to target host "+self.host[i]+". See previous error statement for STDERR details."
def rephost(self):
"""Repeat host entry for all targets"""
self.src.extend(self.target)
self.host.extend([self.target_host[0] for item in self.target])
class Section(list):
"""Data and functions applicable to individual configuration sections"""
# Class variables
delimiter_exec = '`'
delimiter_target = '(?<!<no)\n|\s+(?!value>)'
verbosity = 0
cleanup = False
force = False
def __init__(self,section,set=None,cfg=None,attrib={},varcache=None):
"""Class constructor"""
self.section = section
self.set = set
self.cfg = cfg
self.attrib = attrib
self.varcacheFile = varcache
no_loop = {'var':None,'steps':list('0')}
self.loop = copy.copy(no_loop)
if self._isType('loop'):
try:
self.loop['var'] = self.attrib['var']
step = self.attrib.has_key('step') and int(self.attrib['step']) or 1
format_string = "%0"+str(len(self.attrib['start']))+"d"
self.loop['steps'] = [format_string % (i) for i in range(int(self.attrib['start']),int(self.attrib['end'])+1,step)]
except KeyError:
self.loop = no_loop
warnline = "Warning: incomplete loop specification for <"+section+"> - no looping for this section"
sys.stderr.write(warnline+'\n')
if (self.verbosity): print warnline
except ValueError:
self.loop = no_loop
warnline = "Warning: invalid loop specification for <"+section+"> - no looping for this section"
sys.stderr.write(warnline+'\n')
if (self.verbosity): print warnline
def _isType(self,check_type):
"""Determine whether this section is of a specific type"""
return(self.attrib.has_key('type') and self.attrib['type'] or None)
def _splitHost(self,entry):
"""Split a set of strings into host:path form"""
hostpath = {'host':[],'path':[]}
for item in entry:
try:
(host,path) = re.split(':',item)
host_noquote = re.sub('[\'\"]','',host)
except ValueError:
path = item
host_noquote = None
hostpath['host'].append(host_noquote)
hostpath['path'].append(path)
return(hostpath)
def _sectionResolveKeywords(self,entry,internals=None):
"""Resolve special keywords in the entry"""
return resolveKeywords(entry,delim_exec=self.delimiter_exec,set=self.set,verbose=self.verbosity,internals=internals)
def _executeEmbedded(self,entry,internals={}):
"""Execute backtic embedded commands and substitute result"""
have_subprocess=True
try:
import subprocess
except ImportError:
have_subprocess=False
updated = [entry]
delim = re.compile(self.delimiter_exec+'(.*?)'+self.delimiter_exec)
shell_dot_config = (self.cfg) and '. '+self.cfg+' >/dev/null 2>&1; ' or 'true; '
if (self.varcacheFile):
shell_gen_cachefile = 'task_setup_cachegen '+self.cfg+' '+self.varcacheFile+' ; . '+self.varcacheFile+' ; '
command_prefix = 'if [[ -s '+self.varcacheFile+' ]] ; then . '+self.varcacheFile+' >/dev/null 2>&1 ; else '+shell_gen_cachefile+'fi ; '
else:
command_prefix = shell_dot_config
for command in delim.finditer(entry):
for var in internals.keys():
command_prefix = command_prefix+str(var)+'='+str(internals[var])+'; '
if have_subprocess:
p = subprocess.Popen(command_prefix+command.group(1),shell=True,stdout=subprocess.PIPE,stderr=subprocess.PIPE)
error_message = p.stderr.read().rstrip('\n')
outbuf = p.stdout.read().rstrip('\n ')
else:
(stdin,stdout,stderr) = os.popen3(command_prefix+command.group(1),'r')
error_message = stderr.read().rstrip('\n')
outbuf = stdout.read().rstrip('\n ')
stdin.close()
stdout.close()
stderr.close()
elements = re.split(self.delimiter_target,outbuf)
target_list = []
for j in range(0,len(updated)):
for i in range(0,len(elements)):
target_list.append(command.re.sub(elements[i],updated[j],count=1))
updated = target_list
if error_message:
print "Warning: the embedded command "+self.delimiter_exec+command.string+self.delimiter_exec+ \
" in the configuration file returned an error: "+error_message
return(updated)
def add(self,line,search_path):
"""Add data to the section"""
data = re.split('\s+',re.sub('^(#)+',' ',line))
entry = {}
try:
rawLink = data[1]
rawTarget = ' '.join(data[2:]).rstrip()
except IndexError:
warnline = "Warning: ignoring malformed configuration line: "+line
sys.stderr.write(warnline)
if (self.verbosity): print(warnline)
return(False)
bin_path = None
if search_path:
try:
bin_path = self.set['PATH']
except:
pass
lastSlash = re.compile('/$',re.M)
noval = re.compile('^\s*[\'\"]*<no\svalue>',re.M)
comment = re.compile('^#',re.M)
for step in self.loop['steps']:
loopInternals={self.loop['var']:step}
link = self._sectionResolveKeywords(lastSlash.sub('',rawLink),internals=loopInternals)
link_split = self._splitHost([link['string']])
entry["link_host"] = link_split["host"][0]
entry["link"] = link_split["path"][0]
target = self._sectionResolveKeywords(rawTarget,internals=loopInternals)
target_executed = [str(item).replace("'","").rstrip() for item in self._executeEmbedded(target['string'],internals=loopInternals)]
target_list = re.split(self.delimiter_target,' '.join(target_executed))
target_split = self._splitHost(target_list)
if ([True for target_string in target_split["path"] if noval.match(target_string)]):
if any([True for target_string in target_split["path"] if not noval.match(target_string)]):
print "Info 1: some entries for "+link['string']+" will not be added because of special target value '<no value>'"
else:
print "Info 1: will not create link for "+link['string']+" because of special target value '<no value>'"
continue
if step != self.loop['steps'][0] and not (link['contains_internal'] or target['contains_internal']):
continue
entry["target"] = []
entry["target_host"] = []
for i in range(0,len(target_split["path"])):
if comment.match(target_split["path"][i]):
break
if not noval.match(target_split["path"][i]):
entry["target_host"].append(target_split["host"][i])
entry["target"].append(target_split["path"][i])
entry["target_type"] = (lastSlash.search(rawLink) or len(entry["target"]) > 1) and 'directory' or 'file'
if search_path:
entry["target"] = [which(target,path=bin_path) for target in entry["target"]]
entry["copy"] = False
entry["cleanup"] = False
entry["create_target"] = False
entry["link_only"] = False
if self.section == 'output':
entry["create_target"] = True
entry["link_only"] = True
self.append(copy.deepcopy(entry))
class Config(dict):
"""Data and functions applicable to the task setup"""
# Class variables
configData = {}
configFile = None
taskdir = None
basepath = None
taskname = None
verbosity = 0
cleanup = False
force = False
error = 0
ok = 1
subdir_sectionMap = {'input': 'input',
'executables': 'bin',
'work': 'work',
'output': 'output',
'setup': '.setup'} #Tags in config files (keys) are mapped to these subdir names (values)
force_sections = ['work','setup'] #Force the creation of these sections regardless of config file contents
search_path_sections = ['executables','setup'] #These sections will search the PATH for non-fully-qualified targets
ignore_sections = ['seq_scheduler'] #Ignore these sections in the configuration file
varcache_name = 'task_setup_varcache.txt' #Name for environment caching in embedded commands
def __init__(self,file=None,taskdir=None,set=None,varcache=None):
"""Class constructor"""
self.configFile = file
self.taskdir = taskdir
self.setFile = set
self.set = None
self.sectionList = []
self.callFile = self._createTmpFile(sys.argv)
self.envFile = self._createTmpFile(os.environ)
self.varcacheFile = (varcache) and varcache or self._createTmpFile(None)
self["file"] = file
if set:
self._readSetFile(set)
if not self.configData:
self._readConfigFile(self["file"])
def __del__(self):
"""Class destructor"""
os.unlink(self.callFile)
os.unlink(self.envFile)
def _createTmpFile(self,contents):
"""Create and fill a temporary file, returning the file name"""
try:
(fdunit,filename) = tempfile.mkstemp()
if not contents: return(filename)
fd = os.fdopen(fdunit,"w+b")
except OSError:
print "Warning: Unable to create temporary file for call statement"
return(None)
if type(contents) == types.InstanceType:
keys = contents.keys()
keys.sort()
for key in keys:
fd.write(str(key)+'='+str(contents[key])+'\n')
elif type(contents) == types.ListType:
fd.write(' '.join(contents)+'\n')
else:
fd.write(str(contents)+'\n')
fd.close()
return(filename)
def _readSetFile(self,file):
"""Read set file"""
try:
fd = open(file,"rb")
setData = fd.readlines()
except IOError:
print "Warning: unable to read set from "+file
self.set = None
return()
fd.close()
sep = re.compile(r"(?<!\\)'")
equal = re.compile("=")
quote_count = 0
self.set = {}; concat_line = ''
for line in setData:
quote_count += len(re.findall(sep,line))
if quote_count%2 == 0:
concat_line += line
try:
(key,value) = equal.split(concat_line,maxsplit=1)
except ValueError:
if quote_count == 0:
concat_line = ''
continue
self.set[key] = value.rstrip('\n')
concat_line = ''
else:
concat_line += line
def _readConfigFile(self,file):
"""Read configuration file"""
if not file:
self.configData = ''
return
try:
fd = open(file,"rb")
try:
self.configData = fd.readlines()
finally:
fd.close()
except IOError:
warnline = "Warning: unable to read from configuration file "+file
sys.stderr.write(warnline+'\n')
if (self.verbosity): print warnline
self.configData = []
self.configFile = '/dev/null'
def _map(self,section):
"""Map a section name to a task subdirectory name"""
try:
subdir = self.subdir_sectionMap[section]
except KeyError:
print "Warning: unknown section "+section+" encountered ... no mapping done"
return(section)
return(subdir)
def _subdir_setup(self,subdir):
"""Set up the requested subdirectory for the task"""
status = self.ok
if not os.path.isdir(subdir):
if (self.verbosity): print "Info 1: creating subdirectory "+subdir
try:
mkdir_p(subdir)
except OSError:
print "Error: could not create "+subdir
status = self.error
return(status)
def _get_subdirs(self,dir,absolute=True):
"""Return a list of relative or absolute expected subdirectories"""
subdirs = [self._map(section) for section in self["sections"].keys()]
subdirs.sort()
if absolute:
return([os.path.join(dir,subdir) for subdir in subdirs])
else:
return(subdirs)
def _taskdir_setup(self):
"""Set up task base directory"""
status = self.ok
if self.cleanup:
if os.path.isdir(self.taskdir):
contents = [entry for entry in os.listdir(self.taskdir) if os.path.isdir(os.path.join(self.taskdir,entry))]
if len(contents) > 0:
if self.force:
for sub in contents:
try:
shutil.rmtree(os.path.join(self.taskdir,sub))
except:
print "Error: unable to force clean workspace subdirectory "+sub
return(self.error)
else:
contents.sort()
if contents == self._get_subdirs(self.taskdir,absolute=False):
for sub in self._get_subdirs(self.taskdir,absolute=True):
try:
shutil.rmtree(sub)
except:
print "Error: unable to remove task subdirectory "+sub
return(self.error)
else:
print "Error: Invalid and/or changed subdirectory <-> section mapping in task_setup.py."
print " The requested task base directory "+self.taskdir+" contains a subdirectory that"
print " is not recognized based on the configuration file "+self["file"]+" If"
print " this is a valid task base directory, please remove it manually and relaunch."
print " Task subdirectories: "+str(contents)
print " Mapped config sections: "+str(self._get_subdirs(self.taskdir,absolute=False))
return(self.error)
if not os.path.isdir(self.taskdir):
try:
mkdir_p(self.taskdir)
except OSError:
print "Error: could not create task base directory "+self.taskdir
return(self.error)
elif not os.access(self.taskdir,os.W_OK):
print "Error: task directory "+self.taskdir+" is not writeable ... exiting"
return(self.error)
# Set task name and working path (needs to exist for `true_path` so it can't be done during construction)
basedir = getTruePath(self.taskdir,self.verbosity)
self.basepath = os.path.dirname(basedir)
self.taskname = os.path.basename(basedir)
return(status)
def _append_meta(self,section,meta):
"""Append metadata to the specified section"""
status = self.ok
try:
self["sections"][section].append(meta)
except KeyError:
status = self.error
return(status)
def _special_appends(self):
"""Add special values to sections"""
self._append_meta("setup",{"link":"task_setup",
"target":[sys.argv[0]],
"target_type":'file',
"target_host":[None],
"copy":False,
"cleanup":False,
"create_target":False,
"link_host":None,
"link_only":False})
if self["file"]:
self._append_meta("setup",{"link":"task_setup.cfg",
"target":[self.configFile],
"target_type":'file',
"target_host":[None],
"copy":True,
"cleanup":False,
"create_target":False,
"link_host":None,
"link_only":False})
if self.varcacheFile:
self._append_meta("setup",{"link":"task_setup_varcache.txt",
"target":[self.varcacheFile],
"target_type":'file',
"target_host":[None],
"copy":True,
"cleanup":True,
"create_target":False,
"link_host":None,
"link_only":False})
self._append_meta("setup",{"link":"task_setup_call.txt",
"target":[self.callFile],
"target_type":'file',
"target_host":[None],
"copy":True,
"cleanup":False,
"create_target":False,
"link_host":None,
"link_only":False})
self._append_meta("setup",{"link":"task_setup_env.txt",
"target":[self.envFile],
"target_host":[None],
"target_type":'file',
"copy":True,
"cleanup":False,
"create_target":False,
"link_host":None,
"link_only":False})
if self.setFile:
self._append_meta("setup",{"link":"task_setup_set.txt",
"target":[self.setFile],
"target_type":'file',
"target_host":[None],
"copy":True,
"cleanup":True,
"create_target":False,
"link_host":None,
"link_only":False})
cachegen = which('task_setup_cachegen',verbose=self.verbosity)
if cachegen:
self._append_meta("setup",{"link":"task_setup_cachegen",
"target":[cachegen],
"target_type":'file',
"target_host":[None],
"copy":False,
"cleanup":False,
"create_target":False,
"link_host":None,
"link_only":False})
true_path=which('true_path',verbose=self.verbosity)
if true_path:
self._append_meta("setup",{"link":"task_setup_truepath",
"target":[true_path],
"target_type":'file',
"target_host":[None],
"copy":False,
"cleanup":False,
"create_target":False,
"link_host":None,
"link_only":False})
return(self.ok)
def _createTarget(self,entry,host,path):
"""Create target directory"""
have_subprocess=True
try:
import subprocess
except ImportError:
have_subprocess=False
status = self.ok
if not entry["create_target"]: return(status)
directory = (entry["target_type"] == 'directory') and path or os.path.split(path)[0]
if not directory:
print "Error: no directory specified target in request for "+entry["link"]
status = self.error
return(status)
if host:
make_dir = "echo \"s.mkdir_onebyone "+directory+"; if [[ -d "+directory+ \
" ]] ; then echo TASK_SETUP_SUCCESS ; else echo TASK_SETUP_FAILURE ; fi\" | ssh "+ \
host+" bash --login"
if have_subprocess:
p = subprocess.Popen(make_dir,shell=True,stderr=subprocess.PIPE,stdout=subprocess.PIPE)
error = p.stderr.read()
output = p.stdout.read()
else:
(stdin,stdout,stderr) = os.popen3(make_dir,'r')
error = stderr.read()
output = stdout.read()
if not re.search("TASK_SETUP_SUCCESS",output):
status = self.error
if re.search("TASK_SETUP_FAILURE",output):
print "Error: login to "+host+" successful but "+directory+" not created"
else:
print "Error: unable to obtain directory status on "+host
if len(error) > 0:
sys.stderr.write("task_setup.py::_createTarget() attempt to connect to "+host+" returned STDERR "+error+"\n")
else:
if not os.path.isdir(directory):
try:
mkdir_p(directory)
if (self.verbosity): print "Info 1: created directory "+directory+" to complete target request"
except:
print "Error: unable to create "+directory+" to complete target request"
status = self.error
return(status)
def _parseSectionHead(self,head):
"""Parse section header into individual attributes"""
head = resolveKeywords(head,set=self.set,verbose=self.verbosity)
try:
att_string = re.split('\s+',head['string'],maxsplit=1)[1]
except IndexError:
return({})
return(dict(token.split('=') for token in shlex.split(att_string)))
def setOption(self,option,value):
"""Option handling dispatcher"""
try:
getattr(Section,option)
except AttributeError:
print "Error: attempt to change invalid setting "+option
return (self.error)
setattr(Section,option,value)
setattr(self,option,value)
return(self.ok)
def getSections(self):
"""Break input data into individual sections"""
currentSection = None
prefix='^\s*#\s*'
validLine = re.compile(prefix+'[^#](.+)',re.M)
sectionHead = re.compile(prefix+'<([^/]\S+)(.*)>',re.M)
sectionFoot = re.compile(prefix+'</(.*)>',re.M)
self["sections"] = {}
for raw_line in self.configData:
line = re.sub('^\s+','',raw_line,re.M)
head = False
valid = validLine.search(line)
if (valid):
foot = sectionFoot.search(line)
if foot and currentSection:
if foot.group(1) != currentSection:
print "Warning: section head <"+currentSection+"> does not match the section foot </"+foot.group(1)+"> in "+self["file"]
currentSection = None
else:
head = sectionHead.search(line)
if head:
if currentSection:
print "Error: found header for "+head.group(1)+" while still in open section for "+currentSection
print " Perhaps the configuration file "+self["file"]+" is missing an </"+currentSection+"> end section tag?"
sys.stderr.write("task_setup.py::getSections() failed parsing "+self["file"]+" at <"+head.group(1)+">\n")
self["sections"] = {}
return(self.error)
currentSection = head.group(1)
if currentSection in self.ignore_sections:
currentSection = None
else:
headAttrib = self._parseSectionHead(head.group(2))
self["sections"][currentSection] = Section(currentSection,set=self.set,cfg=self["file"],attrib=headAttrib,varcache=self.varcacheFile)
self.sectionList.append(currentSection)
if (currentSection and not head):
self["sections"][currentSection].add(line,currentSection in self.search_path_sections)
for force in self.force_sections:
self["sections"][force] = Section(force)
self._special_appends()
return(self.ok)
def write(self,fd):
"""Write the config file sections"""
for section in self.sectionList:
fd.write('#<'+section+'>\n')
for entry in self["sections"][section]:
append = (entry["target_type"] == 'directory') and '/' or ''
target = ''
for i in range(0,len(entry["target"])):
host = (entry["target_host"][i]) and entry["target_host"][i]+':' or ''
target += ' '+host+entry["target"][i]
fd.write('# '+entry["link"]+append+' '+target+'\n')
fd.write('#</'+section+'>\n')
def link(self):
"""Perform subdirectory creation and linking operations"""
have_subprocess=True
try:
import subprocess
except ImportError:
have_subprocess=False
status = self.ok
sub_status = self._taskdir_setup()
if sub_status != self.ok: return(sub_status)
for section in self["sections"].keys():
if (self.verbosity): print " <"+section+">"
abs_subdir = os.path.join(self.taskdir,self._map(section))
sub_status = self._subdir_setup(abs_subdir)
if sub_status != self.ok: return(sub_status)
for entry in self["sections"][section]:
if (int(self.verbosity) >= 2): startTime=time()
line = LinkFile(entry["link"],entry["target_host"],entry["target"],entry["link_only"],verbosity=self.verbosity)
if len(line.target) == 0:
print "Error: empty target for "+line.link+" ... skipping"
status = self.error
continue
link_only = entry["link_only"]
dest = os.path.join(abs_subdir,entry["link"])
if not os.path.isdir(os.path.dirname(dest)):
mkdir_p(os.path.dirname(dest))
if os.path.islink(dest): os.remove(dest)
dest_is_dir = False
if len(line.src) == 0:
line.rephost()
elif entry["target_type"] == 'directory' and not link_only or len(line.src) > 1:
dest_is_dir = True
if not os.path.isdir(dest):
try:
mkdir_p(dest)
except OSError:
print "Error: could not create "+section+" subdirectory "+dest
dest_is_dir = False
status = self.error
# Process each file on the line separately
for i in range(len(line.src)-1,-1,-1):
# Retrieve information about the source file
true_src_file = line.true_src_file[i]
src_file_prefix = line.src_file_prefix[i]
# Retrieve information about the destination
if dest_is_dir and not link_only:
dest_file = os.path.join(dest,os.path.basename(line.src[i]))
else:
dest_file = dest
dest_path_short = dest_file.replace(self.taskdir,'')
# Check that the source file information is valid
if not true_src_file:
print "Error: skipping entry because no source file given for "+dest_path_short
status = self.error
continue
# Take care of creating directory links
if os.path.isdir(true_src_file) or line.remote_file_type[i] is 'directory':
if entry["target_type"] != 'directory':
if (self.verbosity): print "Warning: "+entry["target_type"]+" link "+entry["link"]+ \
" refers to a directory target "+str(entry["target"])
if os.path.islink(dest_file):
print "Warning: updating directory link to "+dest_path_short+" => "+src_file_prefix+true_src_file+" (previous target was "+os.readlink(dest_file)+")"
os.remove(dest_file)
try:
os.symlink(path2host(line.host[i],true_src_file),dest_file)
if (self.verbosity): print "Info 1: linked directory "+dest_path_short+" => "+src_file_prefix+true_src_file
except IOError:
print "Error: error creating symlink for directory "+dest_path_short+" => "+src_file_prefix+true_src_file
status = self.error
except OSError:
status = self.error
if os.path.isdir(dest_file) and link_only:
print "Error: multiple entries for "+dest_path_short+" in the "+section+" section are not supported"
else:
raise
# Take care of creating file links or copies
else:
isfile = True
if line.remote_file_type[i] is not 'file':
try:
fd = open(true_src_file,'r')
except IOError:
isfile = False
if isfile and entry["target_type"] != 'file' and len(line.src) == 1:
if (self.verbosity): print "Warning: "+entry["target_type"]+" link "+entry["link"]+ \
"/ refers to a file target "+str(entry["target"])
if isfile or link_only:
try:
if entry["copy"] and not link_only:
if entry["cleanup"]:
shutil.move(true_src_file,dest_file)
link_type = "moved"
else:
shutil.copyfile(true_src_file,dest_file)
link_type = "copied"
else:
if entry["create_target"]:
status_create = self._createTarget(entry,line.host[i],true_src_file)
if status == self.ok: status = status_create
true_src_file = getTruePath(true_src_file,self.verbosity)
if true_src_file == "":
print "Error: attempting to create link to empty target string."
status = self.error
if os.path.islink(dest_file):
print "Warning: updating file link to "+dest_path_short+" => "+src_file_prefix+true_src_file+" (previous target was "+os.readlink(dest_file)+")"
os.remove(dest_file)
os.symlink(path2host(line.host[i],true_src_file),dest_file)
link_type = "linked"
if (self.verbosity): print "Info 1: "+link_type+" file "+dest_path_short+" => "+src_file_prefix+true_src_file
except OSError:
print "Error: error creating symlink for file "+dest_path_short+" => "+src_file_prefix+true_src_file
raise
status = self.error
else:
print "Error: unable to link "+dest_path_short+" => "+src_file_prefix+true_src_file+" ... source file is unavailable"
status = self.error
if (int(self.verbosity) >= 2): print("Info 2: Link creation time: " + str( time() - startTime))
if (self.verbosity): print " </"+section+">"
return(status)
# Executable segment
if __name__ == "__main__":
# Command line argument parsing
usage = "%prog [options] CONFIG_FILE"
parser = optparse.OptionParser(usage=usage)
parser.add_option("-b","--base",dest="basedir",default='.',
help="task base DIRECTORY",metavar="DIRECTORY")
parser.add_option("-v","--verbose",dest="verbose",action="count",
help="verbose runtime output",default=0)
parser.add_option("-c","--clean",dest="clean",action="store_true",
help="clean task directory before setup",default=False)
parser.add_option("-r","--force",dest="force",action="store_true",
help="force action (ignore warnings)",default=False)
parser.add_option("-e","--environment",dest="environment",default=None,
help="text FILE containing the set namespace in which to run",metavar="FILE")
parser.add_option("","--varcache",dest="varcache",default=None,
help="text FILE containing a 'sourceable' version of the set namespace",metavar="FILE")
parser.add_option("-d","--dry-run",dest="dryrun",action="store_true",
help="handle configuration file without acting on it",default=False)
(options,args) = parser.parse_args()
# Ensure that the user has provided a configuration file
try:
cfgFile = args[0]
except IndexError:
cfgFile = None
# Read, parse and act on configuration file for task setup
undef_list = Store()
cfg = Config(file=cfgFile,taskdir=options.basedir,set=options.environment,varcache=options.varcache)
cfg.setOption('cleanup',options.clean)
cfg.setOption('force',options.force)
cfg.setOption('verbosity',options.verbose)
if cfg.getSections():
pass
else:
if cfg.verbosity: print " *** Error: task_setup.py unable to continue *** "
sys.exit(1)
if options.dryrun:
cfg.write(sys.stdout)
del cfg
sys.exit(0)
if cfg.link():
del cfg
sys.exit(0)
else:
if cfg.verbosity: print " *** Error: problematic completion from task_setup.py *** "
del cfg
sys.exit(1)
|
lgpl-2.1
| -177,184,573,625,039,000 | 46.368372 | 184 | 0.50814 | false |
jbmouret/limbo
|
src/tutorials/ros.py
|
2
|
1325
|
#! /usr/bin/env python
# encoding: utf-8
# to be put in your experiment's directory
import os
from waflib.Configure import conf
def options(opt):
opt.add_option('--ros', type='string', help='path to ros', dest='ros')
@conf
def check_ros(conf):
if conf.options.ros:
includes_check = [conf.options.ros + '/include']
libs_check = [conf.options.ros + '/lib']
else:
if 'ROS_DISTRO' not in os.environ:
conf.start_msg('Checking for ROS')
conf.end_msg('ROS_DISTRO not in environmental variables', 'RED')
return 1
includes_check = ['/opt/ros/' + os.environ['ROS_DISTRO'] + '/include']
libs_check = ['/opt/ros/' + os.environ['ROS_DISTRO'] + '/lib/']
try:
conf.start_msg('Checking for ROS includes')
res = conf.find_file('ros/ros.h', includes_check)
conf.end_msg('ok')
libs = ['roscpp','rosconsole','roscpp_serialization','rostime', 'xmlrpcpp','rosconsole_log4cxx', 'rosconsole_backend_interface']
conf.start_msg('Checking for ROS libs')
for lib in libs:
res = res and conf.find_file('lib'+lib+'.so', libs_check)
conf.end_msg('ok')
conf.env.INCLUDES_ROS = includes_check
conf.env.LIBPATH_ROS = libs_check
conf.env.LIB_ROS = libs
conf.env.DEFINES_ROS = ['USE_ROS']
except:
conf.end_msg('Not found', 'RED')
return 1
return 1
|
gpl-3.0
| 5,223,864,863,413,622,000 | 31.317073 | 132 | 0.643019 | false |
pdehaye/theming-edx-platform
|
cms/djangoapps/contentstore/tests/test_contentstore.py
|
1
|
78438
|
#pylint: disable=E1101
import json
import shutil
import mock
from textwrap import dedent
from django.test.client import Client
from django.test.utils import override_settings
from django.conf import settings
from django.core.urlresolvers import reverse
from path import path
from tempdir import mkdtemp_clean
from fs.osfs import OSFS
import copy
from json import loads
from datetime import timedelta
from django.contrib.auth.models import User
from django.dispatch import Signal
from contentstore.utils import get_modulestore
from contentstore.tests.utils import parse_json
from auth.authz import add_user_to_creator_group
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase
from contentstore.tests.modulestore_config import TEST_MODULESTORE
from xmodule.modulestore.tests.factories import CourseFactory, ItemFactory
from xmodule.modulestore import Location, mongo
from xmodule.modulestore.store_utilities import clone_course
from xmodule.modulestore.store_utilities import delete_course
from xmodule.modulestore.django import modulestore
from xmodule.contentstore.django import contentstore, _CONTENTSTORE
from xmodule.modulestore.xml_exporter import export_to_xml
from xmodule.modulestore.xml_importer import import_from_xml, perform_xlint
from xmodule.modulestore.inheritance import own_metadata
from xmodule.contentstore.content import StaticContent
from xmodule.contentstore.utils import restore_asset_from_trashcan, empty_asset_trashcan
from xmodule.capa_module import CapaDescriptor
from xmodule.course_module import CourseDescriptor
from xmodule.seq_module import SequenceDescriptor
from xmodule.modulestore.exceptions import ItemNotFoundError
from contentstore.views.component import ADVANCED_COMPONENT_TYPES
from xmodule.exceptions import NotFoundError
from django_comment_common.utils import are_permissions_roles_seeded
from xmodule.exceptions import InvalidVersionError
import datetime
from pytz import UTC
from uuid import uuid4
from pymongo import MongoClient
from student.models import CourseEnrollment
TEST_DATA_CONTENTSTORE = copy.deepcopy(settings.CONTENTSTORE)
TEST_DATA_CONTENTSTORE['OPTIONS']['db'] = 'test_xcontent_%s' % uuid4().hex
class MongoCollectionFindWrapper(object):
def __init__(self, original):
self.original = original
self.counter = 0
def find(self, query, *args, **kwargs):
self.counter = self.counter + 1
return self.original(query, *args, **kwargs)
@override_settings(CONTENTSTORE=TEST_DATA_CONTENTSTORE, MODULESTORE=TEST_MODULESTORE)
class ContentStoreToyCourseTest(ModuleStoreTestCase):
"""
Tests that rely on the toy courses.
TODO: refactor using CourseFactory so they do not.
"""
def setUp(self):
settings.MODULESTORE['default']['OPTIONS']['fs_root'] = path('common/test/data')
settings.MODULESTORE['direct']['OPTIONS']['fs_root'] = path('common/test/data')
uname = 'testuser'
email = 'test+courses@edx.org'
password = 'foo'
# Create the use so we can log them in.
self.user = User.objects.create_user(uname, email, password)
# Note that we do not actually need to do anything
# for registration if we directly mark them active.
self.user.is_active = True
# Staff has access to view all courses
self.user.is_staff = True
# Save the data that we've just changed to the db.
self.user.save()
self.client = Client()
self.client.login(username=uname, password=password)
def tearDown(self):
MongoClient().drop_database(TEST_DATA_CONTENTSTORE['OPTIONS']['db'])
_CONTENTSTORE.clear()
def check_components_on_page(self, component_types, expected_types):
"""
Ensure that the right types end up on the page.
component_types is the list of advanced components.
expected_types is the list of elements that should appear on the page.
expected_types and component_types should be similar, but not
exactly the same -- for example, 'video' in
component_types should cause 'Video' to be present.
"""
store = modulestore('direct')
import_from_xml(store, 'common/test/data/', ['simple'])
course = store.get_item(Location(['i4x', 'edX', 'simple',
'course', '2012_Fall', None]), depth=None)
course.advanced_modules = component_types
# Save the data that we've just changed to the underlying
# MongoKeyValueStore before we update the mongo datastore.
course.save()
store.update_metadata(course.location, own_metadata(course))
# just pick one vertical
descriptor = store.get_items(Location('i4x', 'edX', 'simple', 'vertical', None, None))[0]
resp = self.client.get(reverse('edit_unit', kwargs={'location': descriptor.location.url()}))
self.assertEqual(resp.status_code, 200)
for expected in expected_types:
self.assertIn(expected, resp.content)
def test_advanced_components_in_edit_unit(self):
# This could be made better, but for now let's just assert that we see the advanced modules mentioned in the page
# response HTML
self.check_components_on_page(ADVANCED_COMPONENT_TYPES, ['Word cloud',
'Annotation',
'Open Response Assessment',
'Peer Grading Interface'])
def test_advanced_components_require_two_clicks(self):
self.check_components_on_page(['word_cloud'], ['Word cloud'])
def test_malformed_edit_unit_request(self):
store = modulestore('direct')
import_from_xml(store, 'common/test/data/', ['simple'])
# just pick one vertical
descriptor = store.get_items(Location('i4x', 'edX', 'simple', 'vertical', None, None))[0]
location = descriptor.location.replace(name='.' + descriptor.location.name)
resp = self.client.get(reverse('edit_unit', kwargs={'location': location.url()}))
self.assertEqual(resp.status_code, 400)
def check_edit_unit(self, test_course_name):
import_from_xml(modulestore('direct'), 'common/test/data/', [test_course_name])
for descriptor in modulestore().get_items(Location(None, None, 'vertical', None, None)):
print "Checking ", descriptor.location.url()
print descriptor.__class__, descriptor.location
resp = self.client.get(reverse('edit_unit', kwargs={'location': descriptor.location.url()}))
self.assertEqual(resp.status_code, 200)
def test_edit_unit_toy(self):
self.check_edit_unit('toy')
def _get_draft_counts(self, item):
cnt = 1 if getattr(item, 'is_draft', False) else 0
for child in item.get_children():
cnt = cnt + self._get_draft_counts(child)
return cnt
def test_get_items(self):
'''
This verifies a bug we had where the None setting in get_items() meant 'wildcard'
Unfortunately, None = published for the revision field, so get_items() would return
both draft and non-draft copies.
'''
store = modulestore('direct')
draft_store = modulestore('draft')
import_from_xml(store, 'common/test/data/', ['simple'])
html_module = draft_store.get_item(['i4x', 'edX', 'simple', 'html', 'test_html', None])
draft_store.convert_to_draft(html_module.location)
# now query get_items() to get this location with revision=None, this should just
# return back a single item (not 2)
items = store.get_items(['i4x', 'edX', 'simple', 'html', 'test_html', None])
self.assertEqual(len(items), 1)
self.assertFalse(getattr(items[0], 'is_draft', False))
# now refetch from the draft store. Note that even though we pass
# None in the revision field, the draft store will replace that with 'draft'
items = draft_store.get_items(['i4x', 'edX', 'simple', 'html', 'test_html', None])
self.assertEqual(len(items), 1)
self.assertTrue(getattr(items[0], 'is_draft', False))
def test_draft_metadata(self):
'''
This verifies a bug we had where inherited metadata was getting written to the
module as 'own-metadata' when publishing. Also verifies the metadata inheritance is
properly computed
'''
store = modulestore('direct')
draft_store = modulestore('draft')
import_from_xml(store, 'common/test/data/', ['simple'])
course = draft_store.get_item(Location(['i4x', 'edX', 'simple',
'course', '2012_Fall', None]), depth=None)
html_module = draft_store.get_item(['i4x', 'edX', 'simple', 'html', 'test_html', None])
self.assertEqual(html_module.lms.graceperiod, course.lms.graceperiod)
self.assertNotIn('graceperiod', own_metadata(html_module))
draft_store.convert_to_draft(html_module.location)
# refetch to check metadata
html_module = draft_store.get_item(['i4x', 'edX', 'simple', 'html', 'test_html', None])
self.assertEqual(html_module.lms.graceperiod, course.lms.graceperiod)
self.assertNotIn('graceperiod', own_metadata(html_module))
# publish module
draft_store.publish(html_module.location, 0)
# refetch to check metadata
html_module = draft_store.get_item(['i4x', 'edX', 'simple', 'html', 'test_html', None])
self.assertEqual(html_module.lms.graceperiod, course.lms.graceperiod)
self.assertNotIn('graceperiod', own_metadata(html_module))
# put back in draft and change metadata and see if it's now marked as 'own_metadata'
draft_store.convert_to_draft(html_module.location)
html_module = draft_store.get_item(['i4x', 'edX', 'simple', 'html', 'test_html', None])
new_graceperiod = timedelta(hours=1)
self.assertNotIn('graceperiod', own_metadata(html_module))
html_module.lms.graceperiod = new_graceperiod
# Save the data that we've just changed to the underlying
# MongoKeyValueStore before we update the mongo datastore.
html_module.save()
self.assertIn('graceperiod', own_metadata(html_module))
self.assertEqual(html_module.lms.graceperiod, new_graceperiod)
draft_store.update_metadata(html_module.location, own_metadata(html_module))
# read back to make sure it reads as 'own-metadata'
html_module = draft_store.get_item(['i4x', 'edX', 'simple', 'html', 'test_html', None])
self.assertIn('graceperiod', own_metadata(html_module))
self.assertEqual(html_module.lms.graceperiod, new_graceperiod)
# republish
draft_store.publish(html_module.location, 0)
# and re-read and verify 'own-metadata'
draft_store.convert_to_draft(html_module.location)
html_module = draft_store.get_item(['i4x', 'edX', 'simple', 'html', 'test_html', None])
self.assertIn('graceperiod', own_metadata(html_module))
self.assertEqual(html_module.lms.graceperiod, new_graceperiod)
def test_get_depth_with_drafts(self):
import_from_xml(modulestore('direct'), 'common/test/data/', ['simple'])
course = modulestore('draft').get_item(
Location(['i4x', 'edX', 'simple', 'course', '2012_Fall', None]),
depth=None
)
# make sure no draft items have been returned
num_drafts = self._get_draft_counts(course)
self.assertEqual(num_drafts, 0)
problem = modulestore('draft').get_item(
Location(['i4x', 'edX', 'simple', 'problem', 'ps01-simple', None])
)
# put into draft
modulestore('draft').convert_to_draft(problem.location)
# make sure we can query that item and verify that it is a draft
draft_problem = modulestore('draft').get_item(
Location(['i4x', 'edX', 'simple', 'problem', 'ps01-simple', None])
)
self.assertTrue(getattr(draft_problem, 'is_draft', False))
# now requery with depth
course = modulestore('draft').get_item(
Location(['i4x', 'edX', 'simple', 'course', '2012_Fall', None]),
depth=None
)
# make sure just one draft item have been returned
num_drafts = self._get_draft_counts(course)
self.assertEqual(num_drafts, 1)
def test_no_static_link_rewrites_on_import(self):
module_store = modulestore('direct')
import_from_xml(module_store, 'common/test/data/', ['toy'])
handouts = module_store.get_item(Location(['i4x', 'edX', 'toy', 'course_info', 'handouts', None]))
self.assertIn('/static/', handouts.data)
handouts = module_store.get_item(Location(['i4x', 'edX', 'toy', 'html', 'toyhtml', None]))
self.assertIn('/static/', handouts.data)
@mock.patch('xmodule.course_module.requests.get')
def test_import_textbook_as_content_element(self, mock_get):
mock_get.return_value.text = dedent("""
<?xml version="1.0"?><table_of_contents>
<entry page="5" page_label="ii" name="Table of Contents"/>
</table_of_contents>
""").strip()
module_store = modulestore('direct')
import_from_xml(module_store, 'common/test/data/', ['toy'])
course = module_store.get_item(Location(['i4x', 'edX', 'toy', 'course', '2012_Fall', None]))
self.assertGreater(len(course.textbooks), 0)
def test_default_tabs_on_create_course(self):
module_store = modulestore('direct')
CourseFactory.create(org='edX', course='999', display_name='Robot Super Course')
course_location = Location(['i4x', 'edX', '999', 'course', 'Robot_Super_Course', None])
course = module_store.get_item(course_location)
expected_tabs = []
expected_tabs.append({u'type': u'courseware'})
expected_tabs.append({u'type': u'course_info', u'name': u'Course Info'})
expected_tabs.append({u'type': u'textbooks'})
expected_tabs.append({u'type': u'discussion', u'name': u'Discussion'})
expected_tabs.append({u'type': u'wiki', u'name': u'Wiki'})
expected_tabs.append({u'type': u'progress', u'name': u'Progress'})
self.assertEqual(course.tabs, expected_tabs)
def test_static_tab_reordering(self):
module_store = modulestore('direct')
CourseFactory.create(org='edX', course='999', display_name='Robot Super Course')
course_location = Location(['i4x', 'edX', '999', 'course', 'Robot_Super_Course', None])
ItemFactory.create(
parent_location=course_location,
category="static_tab",
display_name="Static_1")
ItemFactory.create(
parent_location=course_location,
category="static_tab",
display_name="Static_2")
course = module_store.get_item(Location(['i4x', 'edX', '999', 'course', 'Robot_Super_Course', None]))
# reverse the ordering
reverse_tabs = []
for tab in course.tabs:
if tab['type'] == 'static_tab':
reverse_tabs.insert(0, 'i4x://edX/999/static_tab/{0}'.format(tab['url_slug']))
self.client.post(reverse('reorder_static_tabs'), json.dumps({'tabs': reverse_tabs}), "application/json")
course = module_store.get_item(Location(['i4x', 'edX', '999', 'course', 'Robot_Super_Course', None]))
# compare to make sure that the tabs information is in the expected order after the server call
course_tabs = []
for tab in course.tabs:
if tab['type'] == 'static_tab':
course_tabs.append('i4x://edX/999/static_tab/{0}'.format(tab['url_slug']))
self.assertEqual(reverse_tabs, course_tabs)
def test_import_polls(self):
module_store = modulestore('direct')
import_from_xml(module_store, 'common/test/data/', ['toy'])
items = module_store.get_items(['i4x', 'edX', 'toy', 'poll_question', None, None])
found = len(items) > 0
self.assertTrue(found)
# check that there's actually content in the 'question' field
self.assertGreater(len(items[0].question), 0)
def test_xlint_fails(self):
err_cnt = perform_xlint('common/test/data', ['toy'])
self.assertGreater(err_cnt, 0)
@override_settings(COURSES_WITH_UNSAFE_CODE=['edX/toy/.*'])
def test_module_preview_in_whitelist(self):
'''
Tests the ajax callback to render an XModule
'''
direct_store = modulestore('direct')
import_from_xml(direct_store, 'common/test/data/', ['toy'])
# also try a custom response which will trigger the 'is this course in whitelist' logic
problem_module_location = Location(['i4x', 'edX', 'toy', 'vertical', 'vertical_test', None])
url = reverse('preview_component', kwargs={'location': problem_module_location.url()})
resp = self.client.get(url)
self.assertEqual(resp.status_code, 200)
def test_video_module_caption_asset_path(self):
'''
This verifies that a video caption url is as we expect it to be
'''
direct_store = modulestore('direct')
import_from_xml(direct_store, 'common/test/data/', ['toy'])
# also try a custom response which will trigger the 'is this course in whitelist' logic
video_module_location = Location(['i4x', 'edX', 'toy', 'video', 'sample_video', None])
url = reverse('preview_component', kwargs={'location': video_module_location.url()})
resp = self.client.get(url)
self.assertEqual(resp.status_code, 200)
self.assertContains(resp, 'data-caption-asset-path="/c4x/edX/toy/asset/subs_"')
def test_delete(self):
direct_store = modulestore('direct')
CourseFactory.create(org='edX', course='999', display_name='Robot Super Course')
course_location = Location(['i4x', 'edX', '999', 'course', 'Robot_Super_Course', None])
chapterloc = ItemFactory.create(parent_location=course_location, display_name="Chapter").location
ItemFactory.create(parent_location=chapterloc, category='sequential', display_name="Sequential")
sequential = direct_store.get_item(Location(['i4x', 'edX', '999', 'sequential', 'Sequential', None]))
chapter = direct_store.get_item(Location(['i4x', 'edX', '999', 'chapter', 'Chapter', None]))
# make sure the parent points to the child object which is to be deleted
self.assertTrue(sequential.location.url() in chapter.children)
self.client.post(
reverse('delete_item'),
json.dumps({'id': sequential.location.url(), 'delete_children': 'true', 'delete_all_versions': 'true'}),
"application/json"
)
found = False
try:
direct_store.get_item(Location(['i4x', 'edX', '999', 'sequential', 'Sequential', None]))
found = True
except ItemNotFoundError:
pass
self.assertFalse(found)
chapter = direct_store.get_item(Location(['i4x', 'edX', '999', 'chapter', 'Chapter', None]))
# make sure the parent no longer points to the child object which was deleted
self.assertFalse(sequential.location.url() in chapter.children)
def test_about_overrides(self):
'''
This test case verifies that a course can use specialized override for about data, e.g. /about/Fall_2012/effort.html
while there is a base definition in /about/effort.html
'''
module_store = modulestore('direct')
import_from_xml(module_store, 'common/test/data/', ['toy'])
effort = module_store.get_item(Location(['i4x', 'edX', 'toy', 'about', 'effort', None]))
self.assertEqual(effort.data, '6 hours')
# this one should be in a non-override folder
effort = module_store.get_item(Location(['i4x', 'edX', 'toy', 'about', 'end_date', None]))
self.assertEqual(effort.data, 'TBD')
def test_remove_hide_progress_tab(self):
module_store = modulestore('direct')
CourseFactory.create(org='edX', course='999', display_name='Robot Super Course')
course_location = Location(['i4x', 'edX', '999', 'course', 'Robot_Super_Course', None])
course = module_store.get_item(course_location)
self.assertFalse(course.hide_progress_tab)
def test_asset_import(self):
'''
This test validates that an image asset is imported and a thumbnail was generated for a .gif
'''
content_store = contentstore()
module_store = modulestore('direct')
import_from_xml(module_store, 'common/test/data/', ['toy'], static_content_store=content_store, verbose=True)
course_location = CourseDescriptor.id_to_location('edX/toy/2012_Fall')
course = module_store.get_item(course_location)
self.assertIsNotNone(course)
# make sure we have some assets in our contentstore
all_assets = content_store.get_all_content_for_course(course_location)
self.assertGreater(len(all_assets), 0)
# make sure we have some thumbnails in our contentstore
content_store.get_all_content_thumbnails_for_course(course_location)
#
# cdodge: temporarily comment out assertion on thumbnails because many environments
# will not have the jpeg converter installed and this test will fail
#
#
# self.assertGreater(len(all_thumbnails), 0)
content = None
try:
location = StaticContent.get_location_from_path('/c4x/edX/toy/asset/sample_static.txt')
content = content_store.find(location)
except NotFoundError:
pass
self.assertIsNotNone(content)
#
# cdodge: temporarily comment out assertion on thumbnails because many environments
# will not have the jpeg converter installed and this test will fail
#
# self.assertIsNotNone(content.thumbnail_location)
#
# thumbnail = None
# try:
# thumbnail = content_store.find(content.thumbnail_location)
# except:
# pass
#
# self.assertIsNotNone(thumbnail)
def test_asset_delete_and_restore(self):
'''
This test will exercise the soft delete/restore functionality of the assets
'''
content_store = contentstore()
trash_store = contentstore('trashcan')
module_store = modulestore('direct')
import_from_xml(module_store, 'common/test/data/', ['toy'], static_content_store=content_store)
# look up original (and thumbnail) in content store, should be there after import
location = StaticContent.get_location_from_path('/c4x/edX/toy/asset/sample_static.txt')
content = content_store.find(location, throw_on_not_found=False)
thumbnail_location = content.thumbnail_location
self.assertIsNotNone(content)
#
# cdodge: temporarily comment out assertion on thumbnails because many environments
# will not have the jpeg converter installed and this test will fail
#
# self.assertIsNotNone(thumbnail_location)
# go through the website to do the delete, since the soft-delete logic is in the view
url = reverse('remove_asset', kwargs={'org': 'edX', 'course': 'toy', 'name': '2012_Fall'})
resp = self.client.post(url, {'location': '/c4x/edX/toy/asset/sample_static.txt'})
self.assertEqual(resp.status_code, 200)
asset_location = StaticContent.get_location_from_path('/c4x/edX/toy/asset/sample_static.txt')
# now try to find it in store, but they should not be there any longer
content = content_store.find(asset_location, throw_on_not_found=False)
self.assertIsNone(content)
if thumbnail_location:
thumbnail = content_store.find(thumbnail_location, throw_on_not_found=False)
self.assertIsNone(thumbnail)
# now try to find it and the thumbnail in trashcan - should be in there
content = trash_store.find(asset_location, throw_on_not_found=False)
self.assertIsNotNone(content)
if thumbnail_location:
thumbnail = trash_store.find(thumbnail_location, throw_on_not_found=False)
self.assertIsNotNone(thumbnail)
# let's restore the asset
restore_asset_from_trashcan('/c4x/edX/toy/asset/sample_static.txt')
# now try to find it in courseware store, and they should be back after restore
content = content_store.find(asset_location, throw_on_not_found=False)
self.assertIsNotNone(content)
if thumbnail_location:
thumbnail = content_store.find(thumbnail_location, throw_on_not_found=False)
self.assertIsNotNone(thumbnail)
def test_empty_trashcan(self):
'''
This test will exercise the empting of the asset trashcan
'''
content_store = contentstore()
trash_store = contentstore('trashcan')
module_store = modulestore('direct')
import_from_xml(module_store, 'common/test/data/', ['toy'], static_content_store=content_store)
course_location = CourseDescriptor.id_to_location('edX/toy/6.002_Spring_2012')
location = StaticContent.get_location_from_path('/c4x/edX/toy/asset/sample_static.txt')
content = content_store.find(location, throw_on_not_found=False)
self.assertIsNotNone(content)
# go through the website to do the delete, since the soft-delete logic is in the view
url = reverse('remove_asset', kwargs={'org': 'edX', 'course': 'toy', 'name': '2012_Fall'})
resp = self.client.post(url, {'location': '/c4x/edX/toy/asset/sample_static.txt'})
self.assertEqual(resp.status_code, 200)
# make sure there's something in the trashcan
all_assets = trash_store.get_all_content_for_course(course_location)
self.assertGreater(len(all_assets), 0)
# make sure we have some thumbnails in our trashcan
_all_thumbnails = trash_store.get_all_content_thumbnails_for_course(course_location)
#
# cdodge: temporarily comment out assertion on thumbnails because many environments
# will not have the jpeg converter installed and this test will fail
#
# self.assertGreater(len(all_thumbnails), 0)
# empty the trashcan
empty_asset_trashcan([course_location])
# make sure trashcan is empty
all_assets = trash_store.get_all_content_for_course(course_location)
self.assertEqual(len(all_assets), 0)
all_thumbnails = trash_store.get_all_content_thumbnails_for_course(course_location)
self.assertEqual(len(all_thumbnails), 0)
def test_clone_course(self):
course_data = {
'org': 'MITx',
'number': '999',
'display_name': 'Robot Super Course',
'run': '2013_Spring'
}
module_store = modulestore('direct')
draft_store = modulestore('draft')
import_from_xml(module_store, 'common/test/data/', ['toy'])
source_course_id = 'edX/toy/2012_Fall'
dest_course_id = 'MITx/999/2013_Spring'
source_location = CourseDescriptor.id_to_location(source_course_id)
dest_location = CourseDescriptor.id_to_location(dest_course_id)
# get a vertical (and components in it) to put into 'draft'
# this is to assert that draft content is also cloned over
vertical = module_store.get_instance(source_course_id, Location([
source_location.tag, source_location.org, source_location.course, 'vertical', 'vertical_test', None]), depth=1)
draft_store.convert_to_draft(vertical.location)
for child in vertical.get_children():
draft_store.convert_to_draft(child.location)
items = module_store.get_items(Location([source_location.tag, source_location.org, source_location.course, None, None, 'draft']))
self.assertGreater(len(items), 0)
resp = self.client.post(reverse('create_new_course'), course_data)
self.assertEqual(resp.status_code, 200)
data = parse_json(resp)
self.assertEqual(data['id'], 'i4x://MITx/999/course/2013_Spring')
content_store = contentstore()
# now do the actual cloning
clone_course(module_store, content_store, source_location, dest_location)
# first assert that all draft content got cloned as well
items = module_store.get_items(Location([source_location.tag, source_location.org, source_location.course, None, None, 'draft']))
self.assertGreater(len(items), 0)
clone_items = module_store.get_items(Location([dest_location.tag, dest_location.org, dest_location.course, None, None, 'draft']))
self.assertGreater(len(clone_items), 0)
self.assertEqual(len(items), len(clone_items))
# now loop through all the units in the course and verify that the clone can render them, which
# means the objects are at least present
items = module_store.get_items(Location([source_location.tag, source_location.org, source_location.course, None, None]))
self.assertGreater(len(items), 0)
clone_items = module_store.get_items(Location([dest_location.tag, dest_location.org, dest_location.course, None, None]))
self.assertGreater(len(clone_items), 0)
for descriptor in items:
source_item = module_store.get_instance(source_course_id, descriptor.location)
if descriptor.location.category == 'course':
new_loc = descriptor.location.replace(org=dest_location.org, course=dest_location.course, name='2013_Spring')
else:
new_loc = descriptor.location.replace(org=dest_location.org, course=dest_location.course)
print "Checking {0} should now also be at {1}".format(descriptor.location.url(), new_loc.url())
lookup_item = module_store.get_item(new_loc)
# we want to assert equality between the objects, but we know the locations
# differ, so just make them equal for testing purposes
source_item.location = new_loc
if hasattr(source_item, 'data') and hasattr(lookup_item, 'data'):
self.assertEqual(source_item.data, lookup_item.data)
# also make sure that metadata was cloned over and filtered with own_metadata, i.e. inherited
# values were not explicitly set
self.assertEqual(own_metadata(source_item), own_metadata(lookup_item))
# check that the children are as expected
self.assertEqual(source_item.has_children, lookup_item.has_children)
if source_item.has_children:
expected_children = []
for child_loc_url in source_item.children:
child_loc = Location(child_loc_url)
child_loc = child_loc._replace(
tag=dest_location.tag,
org=dest_location.org,
course=dest_location.course
)
expected_children.append(child_loc.url())
self.assertEqual(expected_children, lookup_item.children)
def test_portable_link_rewrites_during_clone_course(self):
course_data = {
'org': 'MITx',
'number': '999',
'display_name': 'Robot Super Course',
'run': '2013_Spring'
}
module_store = modulestore('direct')
draft_store = modulestore('draft')
content_store = contentstore()
import_from_xml(module_store, 'common/test/data/', ['toy'])
source_course_id = 'edX/toy/2012_Fall'
dest_course_id = 'MITx/999/2013_Spring'
source_location = CourseDescriptor.id_to_location(source_course_id)
dest_location = CourseDescriptor.id_to_location(dest_course_id)
# let's force a non-portable link in the clone source
# as a final check, make sure that any non-portable links are rewritten during cloning
html_module_location = Location([
source_location.tag, source_location.org, source_location.course, 'html', 'nonportable'])
html_module = module_store.get_instance(source_location.course_id, html_module_location)
self.assertTrue(isinstance(html_module.data, basestring))
new_data = html_module.data.replace('/static/', '/c4x/{0}/{1}/asset/'.format(
source_location.org, source_location.course))
module_store.update_item(html_module_location, new_data)
html_module = module_store.get_instance(source_location.course_id, html_module_location)
self.assertEqual(new_data, html_module.data)
# create the destination course
resp = self.client.post(reverse('create_new_course'), course_data)
self.assertEqual(resp.status_code, 200)
data = parse_json(resp)
self.assertEqual(data['id'], 'i4x://MITx/999/course/2013_Spring')
# do the actual cloning
clone_course(module_store, content_store, source_location, dest_location)
# make sure that any non-portable links are rewritten during cloning
html_module_location = Location([
dest_location.tag, dest_location.org, dest_location.course, 'html', 'nonportable'])
html_module = module_store.get_instance(dest_location.course_id, html_module_location)
self.assertIn('/static/foo.jpg', html_module.data)
def test_illegal_draft_crud_ops(self):
draft_store = modulestore('draft')
direct_store = modulestore('direct')
CourseFactory.create(org='MITx', course='999', display_name='Robot Super Course')
location = Location('i4x://MITx/999/chapter/neuvo')
# Ensure draft mongo store does not allow us to create chapters either directly or via convert to draft
self.assertRaises(InvalidVersionError, draft_store.create_and_save_xmodule, location)
direct_store.create_and_save_xmodule(location)
self.assertRaises(InvalidVersionError, draft_store.convert_to_draft, location)
self.assertRaises(InvalidVersionError, draft_store.update_item, location, 'chapter data')
# taking advantage of update_children and other functions never checking that the ids are valid
self.assertRaises(InvalidVersionError, draft_store.update_children, location,
['i4x://MITx/999/problem/doesntexist'])
self.assertRaises(InvalidVersionError, draft_store.update_metadata, location,
{'due': datetime.datetime.now(UTC)})
self.assertRaises(InvalidVersionError, draft_store.unpublish, location)
def test_bad_contentstore_request(self):
resp = self.client.get('http://localhost:8001/c4x/CDX/123123/asset/&images_circuits_Lab7Solution2.png')
self.assertEqual(resp.status_code, 400)
def test_rewrite_nonportable_links_on_import(self):
module_store = modulestore('direct')
content_store = contentstore()
import_from_xml(module_store, 'common/test/data/', ['toy'], static_content_store=content_store)
# first check a static asset link
html_module_location = Location(['i4x', 'edX', 'toy', 'html', 'nonportable'])
html_module = module_store.get_instance('edX/toy/2012_Fall', html_module_location)
self.assertIn('/static/foo.jpg', html_module.data)
# then check a intra courseware link
html_module_location = Location(['i4x', 'edX', 'toy', 'html', 'nonportable_link'])
html_module = module_store.get_instance('edX/toy/2012_Fall', html_module_location)
self.assertIn('/jump_to_id/nonportable_link', html_module.data)
def test_delete_course(self):
"""
This test will import a course, make a draft item, and delete it. This will also assert that the
draft content is also deleted
"""
module_store = modulestore('direct')
content_store = contentstore()
draft_store = modulestore('draft')
import_from_xml(module_store, 'common/test/data/', ['toy'], static_content_store=content_store)
location = CourseFactory.create(org='MITx', course='999', display_name='Robot Super Course').location
# get a vertical (and components in it) to put into 'draft'
vertical = module_store.get_item(Location(['i4x', 'edX', 'toy',
'vertical', 'vertical_test', None]), depth=1)
draft_store.convert_to_draft(vertical.location)
for child in vertical.get_children():
draft_store.convert_to_draft(child.location)
# delete the course
delete_course(module_store, content_store, location, commit=True)
# assert that there's absolutely no non-draft modules in the course
# this should also include all draft items
items = module_store.get_items(Location(['i4x', 'edX', '999', 'course', None]))
self.assertEqual(len(items), 0)
# assert that all content in the asset library is also deleted
assets = content_store.get_all_content_for_course(location)
self.assertEqual(len(assets), 0)
def verify_content_existence(self, store, root_dir, location, dirname, category_name, filename_suffix=''):
filesystem = OSFS(root_dir / 'test_export')
self.assertTrue(filesystem.exists(dirname))
query_loc = Location('i4x', location.org, location.course, category_name, None)
items = store.get_items(query_loc)
for item in items:
filesystem = OSFS(root_dir / ('test_export/' + dirname))
self.assertTrue(filesystem.exists(item.location.name + filename_suffix))
@mock.patch('xmodule.course_module.requests.get')
def test_export_course(self, mock_get):
mock_get.return_value.text = dedent("""
<?xml version="1.0"?><table_of_contents>
<entry page="5" page_label="ii" name="Table of Contents"/>
</table_of_contents>
""").strip()
module_store = modulestore('direct')
draft_store = modulestore('draft')
content_store = contentstore()
import_from_xml(module_store, 'common/test/data/', ['toy'])
location = CourseDescriptor.id_to_location('edX/toy/2012_Fall')
# get a vertical (and components in it) to copy into an orphan sub dag
vertical = module_store.get_item(
Location(['i4x', 'edX', 'toy', 'vertical', 'vertical_test', None]),
depth=1
)
# We had a bug where orphaned draft nodes caused export to fail. This is here to cover that case.
vertical.location = mongo.draft.as_draft(vertical.location.replace(name='no_references'))
draft_store.save_xmodule(vertical)
orphan_vertical = draft_store.get_item(vertical.location)
self.assertEqual(orphan_vertical.location.name, 'no_references')
# get the original vertical (and components in it) to put into 'draft'
vertical = module_store.get_item(
Location(['i4x', 'edX', 'toy', 'vertical', 'vertical_test', None]),
depth=1)
self.assertEqual(len(orphan_vertical.children), len(vertical.children))
draft_store.convert_to_draft(vertical.location)
for child in vertical.get_children():
draft_store.convert_to_draft(child.location)
root_dir = path(mkdtemp_clean())
# now create a new/different private (draft only) vertical
vertical.location = mongo.draft.as_draft(Location(['i4x', 'edX', 'toy', 'vertical', 'a_private_vertical', None]))
draft_store.save_xmodule(vertical)
private_vertical = draft_store.get_item(vertical.location)
vertical = None # blank out b/c i destructively manipulated its location 2 lines above
# add the new private to list of children
sequential = module_store.get_item(Location(['i4x', 'edX', 'toy',
'sequential', 'vertical_sequential', None]))
private_location_no_draft = private_vertical.location.replace(revision=None)
module_store.update_children(sequential.location, sequential.children +
[private_location_no_draft.url()])
# read back the sequential, to make sure we have a pointer to
sequential = module_store.get_item(Location(['i4x', 'edX', 'toy',
'sequential', 'vertical_sequential', None]))
self.assertIn(private_location_no_draft.url(), sequential.children)
print 'Exporting to tempdir = {0}'.format(root_dir)
# export out to a tempdir
export_to_xml(module_store, content_store, location, root_dir, 'test_export', draft_modulestore=draft_store)
# check for static tabs
self.verify_content_existence(module_store, root_dir, location, 'tabs', 'static_tab', '.html')
# check for about content
self.verify_content_existence(module_store, root_dir, location, 'about', 'about', '.html')
# check for graiding_policy.json
filesystem = OSFS(root_dir / 'test_export/policies/2012_Fall')
self.assertTrue(filesystem.exists('grading_policy.json'))
course = module_store.get_item(location)
# compare what's on disk compared to what we have in our course
with filesystem.open('grading_policy.json', 'r') as grading_policy:
on_disk = loads(grading_policy.read())
self.assertEqual(on_disk, course.grading_policy)
# check for policy.json
self.assertTrue(filesystem.exists('policy.json'))
# compare what's on disk to what we have in the course module
with filesystem.open('policy.json', 'r') as course_policy:
on_disk = loads(course_policy.read())
self.assertIn('course/2012_Fall', on_disk)
self.assertEqual(on_disk['course/2012_Fall'], own_metadata(course))
# remove old course
delete_course(module_store, content_store, location)
# reimport
import_from_xml(module_store, root_dir, ['test_export'], draft_store=draft_store)
items = module_store.get_items(Location(['i4x', 'edX', 'toy', 'vertical', None]))
self.assertGreater(len(items), 0)
for descriptor in items:
# don't try to look at private verticals. Right now we're running
# the service in non-draft aware
if getattr(descriptor, 'is_draft', False):
print "Checking {0}....".format(descriptor.location.url())
resp = self.client.get(reverse('edit_unit', kwargs={'location': descriptor.location.url()}))
self.assertEqual(resp.status_code, 200)
# verify that we have the content in the draft store as well
vertical = draft_store.get_item(Location(['i4x', 'edX', 'toy',
'vertical', 'vertical_test', None]), depth=1)
self.assertTrue(getattr(vertical, 'is_draft', False))
self.assertNotIn('index_in_children_list', child.xml_attributes)
self.assertNotIn('parent_sequential_url', vertical.xml_attributes)
for child in vertical.get_children():
self.assertTrue(getattr(child, 'is_draft', False))
self.assertNotIn('index_in_children_list', child.xml_attributes)
if hasattr(child, 'data'):
self.assertNotIn('index_in_children_list', child.data)
self.assertNotIn('parent_sequential_url', child.xml_attributes)
if hasattr(child, 'data'):
self.assertNotIn('parent_sequential_url', child.data)
# make sure that we don't have a sequential that is in draft mode
sequential = draft_store.get_item(Location(['i4x', 'edX', 'toy',
'sequential', 'vertical_sequential', None]))
self.assertFalse(getattr(sequential, 'is_draft', False))
# verify that we have the private vertical
test_private_vertical = draft_store.get_item(Location(['i4x', 'edX', 'toy',
'vertical', 'a_private_vertical', None]))
self.assertTrue(getattr(test_private_vertical, 'is_draft', False))
# make sure the textbook survived the export/import
course = module_store.get_item(Location(['i4x', 'edX', 'toy', 'course', '2012_Fall', None]))
self.assertGreater(len(course.textbooks), 0)
shutil.rmtree(root_dir)
def test_export_course_with_metadata_only_video(self):
module_store = modulestore('direct')
draft_store = modulestore('draft')
content_store = contentstore()
import_from_xml(module_store, 'common/test/data/', ['toy'])
location = CourseDescriptor.id_to_location('edX/toy/2012_Fall')
# create a new video module and add it as a child to a vertical
# this re-creates a bug whereby since the video template doesn't have
# anything in 'data' field, the export was blowing up
verticals = module_store.get_items(['i4x', 'edX', 'toy', 'vertical', None, None])
self.assertGreater(len(verticals), 0)
parent = verticals[0]
ItemFactory.create(parent_location=parent.location, category="video", display_name="untitled")
root_dir = path(mkdtemp_clean())
print 'Exporting to tempdir = {0}'.format(root_dir)
# export out to a tempdir
export_to_xml(module_store, content_store, location, root_dir, 'test_export', draft_modulestore=draft_store)
shutil.rmtree(root_dir)
def test_export_course_with_metadata_only_word_cloud(self):
"""
Similar to `test_export_course_with_metadata_only_video`.
"""
module_store = modulestore('direct')
draft_store = modulestore('draft')
content_store = contentstore()
import_from_xml(module_store, 'common/test/data/', ['word_cloud'])
location = CourseDescriptor.id_to_location('HarvardX/ER22x/2013_Spring')
verticals = module_store.get_items(['i4x', 'HarvardX', 'ER22x', 'vertical', None, None])
self.assertGreater(len(verticals), 0)
parent = verticals[0]
ItemFactory.create(parent_location=parent.location, category="word_cloud", display_name="untitled")
root_dir = path(mkdtemp_clean())
print 'Exporting to tempdir = {0}'.format(root_dir)
# export out to a tempdir
export_to_xml(module_store, content_store, location, root_dir, 'test_export', draft_modulestore=draft_store)
shutil.rmtree(root_dir)
def test_empty_data_roundtrip(self):
"""
Test that an empty `data` field is preserved through
export/import.
"""
module_store = modulestore('direct')
draft_store = modulestore('draft')
content_store = contentstore()
import_from_xml(module_store, 'common/test/data/', ['toy'])
location = CourseDescriptor.id_to_location('edX/toy/2012_Fall')
verticals = module_store.get_items(['i4x', 'edX', 'toy', 'vertical', None, None])
self.assertGreater(len(verticals), 0)
parent = verticals[0]
# Create a module, and ensure that its `data` field is empty
word_cloud = ItemFactory.create(parent_location=parent.location, category="word_cloud", display_name="untitled")
del word_cloud.data
self.assertEquals(word_cloud.data, '')
# Export the course
root_dir = path(mkdtemp_clean())
export_to_xml(module_store, content_store, location, root_dir, 'test_roundtrip', draft_modulestore=draft_store)
# Reimport and get the video back
import_from_xml(module_store, root_dir)
imported_word_cloud = module_store.get_item(Location(['i4x', 'edX', 'toy', 'word_cloud', 'untitled', None]))
# It should now contain empty data
self.assertEquals(imported_word_cloud.data, '')
def test_html_export_roundtrip(self):
"""
Test that a course which has HTML that has style formatting is preserved in export/import
"""
module_store = modulestore('direct')
content_store = contentstore()
import_from_xml(module_store, 'common/test/data/', ['toy'])
location = CourseDescriptor.id_to_location('edX/toy/2012_Fall')
# Export the course
root_dir = path(mkdtemp_clean())
export_to_xml(module_store, content_store, location, root_dir, 'test_roundtrip')
# Reimport and get the video back
import_from_xml(module_store, root_dir)
# get the sample HTML with styling information
html_module = module_store.get_instance(
'edX/toy/2012_Fall',
Location(['i4x', 'edX', 'toy', 'html', 'with_styling'])
)
self.assertIn('<p style="font:italic bold 72px/30px Georgia, serif; color: red; ">', html_module.data)
# get the sample HTML with just a simple <img> tag information
html_module = module_store.get_instance(
'edX/toy/2012_Fall',
Location(['i4x', 'edX', 'toy', 'html', 'just_img'])
)
self.assertIn('<img src="/static/foo_bar.jpg" />', html_module.data)
def test_course_handouts_rewrites(self):
module_store = modulestore('direct')
# import a test course
import_from_xml(module_store, 'common/test/data/', ['toy'])
handout_location = Location(['i4x', 'edX', 'toy', 'course_info', 'handouts'])
# get module info
resp = self.client.get(reverse('module_info', kwargs={'module_location': handout_location}))
# make sure we got a successful response
self.assertEqual(resp.status_code, 200)
# check that /static/ has been converted to the full path
# note, we know the link it should be because that's what in the 'toy' course in the test data
self.assertContains(resp, '/c4x/edX/toy/asset/handouts_sample_handout.txt')
def test_prefetch_children(self):
module_store = modulestore('direct')
import_from_xml(module_store, 'common/test/data/', ['toy'])
location = CourseDescriptor.id_to_location('edX/toy/2012_Fall')
wrapper = MongoCollectionFindWrapper(module_store.collection.find)
module_store.collection.find = wrapper.find
print module_store.metadata_inheritance_cache_subsystem
print module_store.request_cache
course = module_store.get_item(location, depth=2)
# make sure we haven't done too many round trips to DB
# note we say 3 round trips here for 1) the course, and 2 & 3) for the chapters and sequentials
# Because we're querying from the top of the tree, we cache information needed for inheritance,
# so we don't need to make an extra query to compute it.
self.assertEqual(wrapper.counter, 3)
# make sure we pre-fetched a known sequential which should be at depth=2
self.assertTrue(Location(['i4x', 'edX', 'toy', 'sequential',
'vertical_sequential', None]) in course.system.module_data)
# make sure we don't have a specific vertical which should be at depth=3
self.assertFalse(Location(['i4x', 'edX', 'toy', 'vertical', 'vertical_test', None])
in course.system.module_data)
def test_export_course_with_unknown_metadata(self):
module_store = modulestore('direct')
content_store = contentstore()
import_from_xml(module_store, 'common/test/data/', ['toy'])
location = CourseDescriptor.id_to_location('edX/toy/2012_Fall')
root_dir = path(mkdtemp_clean())
course = module_store.get_item(location)
metadata = own_metadata(course)
# add a bool piece of unknown metadata so we can verify we don't throw an exception
metadata['new_metadata'] = True
# Save the data that we've just changed to the underlying
# MongoKeyValueStore before we update the mongo datastore.
course.save()
module_store.update_metadata(location, metadata)
print 'Exporting to tempdir = {0}'.format(root_dir)
# export out to a tempdir
export_to_xml(module_store, content_store, location, root_dir, 'test_export')
@override_settings(CONTENTSTORE=TEST_DATA_CONTENTSTORE, MODULESTORE=TEST_MODULESTORE)
class ContentStoreTest(ModuleStoreTestCase):
"""
Tests for the CMS ContentStore application.
"""
def setUp(self):
"""
These tests need a user in the DB so that the django Test Client
can log them in.
They inherit from the ModuleStoreTestCase class so that the mongodb collection
will be cleared out before each test case execution and deleted
afterwards.
"""
uname = 'testuser'
email = 'test+courses@edx.org'
password = 'foo'
# Create the use so we can log them in.
self.user = User.objects.create_user(uname, email, password)
# Note that we do not actually need to do anything
# for registration if we directly mark them active.
self.user.is_active = True
# Staff has access to view all courses
self.user.is_staff = True
self.user.save()
self.client = Client()
self.client.login(username=uname, password=password)
self.course_data = {
'org': 'MITx',
'number': '999',
'display_name': 'Robot Super Course',
'run': '2013_Spring'
}
def tearDown(self):
mongo = MongoClient()
mongo.drop_database(TEST_DATA_CONTENTSTORE['OPTIONS']['db'])
_CONTENTSTORE.clear()
def test_create_course(self):
"""Test new course creation - happy path"""
self.assert_created_course()
def assert_created_course(self, number_suffix=None):
"""
Checks that the course was created properly.
"""
test_course_data = {}
test_course_data.update(self.course_data)
if number_suffix:
test_course_data['number'] = '{0}_{1}'.format(test_course_data['number'], number_suffix)
resp = self.client.post(reverse('create_new_course'), test_course_data)
self.assertEqual(resp.status_code, 200)
data = parse_json(resp)
self.assertNotIn('ErrMsg', data)
self.assertEqual(data['id'], 'i4x://MITx/{0}/course/2013_Spring'.format(test_course_data['number']))
# Verify that the creator is now registered in the course.
self.assertTrue(CourseEnrollment.is_enrolled(self.user, self._get_course_id(test_course_data)))
return test_course_data
def test_create_course_check_forum_seeding(self):
"""Test new course creation and verify forum seeding """
test_course_data = self.assert_created_course(number_suffix=uuid4().hex)
self.assertTrue(are_permissions_roles_seeded(self._get_course_id(test_course_data)))
def _get_course_id(self, test_course_data):
"""Returns the course ID (org/number/run)."""
return "{org}/{number}/{run}".format(**test_course_data)
def test_create_course_duplicate_course(self):
"""Test new course creation - error path"""
self.client.post(reverse('create_new_course'), self.course_data)
self.assert_course_creation_failed('There is already a course defined with the same organization, course number, and course run. Please change either organization or course number to be unique.')
def assert_course_creation_failed(self, error_message):
"""
Checks that the course did not get created
"""
course_id = self._get_course_id(self.course_data)
initially_enrolled = CourseEnrollment.is_enrolled(self.user, course_id)
resp = self.client.post(reverse('create_new_course'), self.course_data)
self.assertEqual(resp.status_code, 200)
data = parse_json(resp)
self.assertEqual(data['ErrMsg'], error_message)
# One test case involves trying to create the same course twice. Hence for that course,
# the user will be enrolled. In the other cases, initially_enrolled will be False.
self.assertEqual(initially_enrolled, CourseEnrollment.is_enrolled(self.user, course_id))
def test_create_course_duplicate_number(self):
"""Test new course creation - error path"""
self.client.post(reverse('create_new_course'), self.course_data)
self.course_data['display_name'] = 'Robot Super Course Two'
self.course_data['run'] = '2013_Summer'
self.assert_course_creation_failed('There is already a course defined with the same organization and course number. Please change at least one field to be unique.')
def test_create_course_with_bad_organization(self):
"""Test new course creation - error path for bad organization name"""
self.course_data['org'] = 'University of California, Berkeley'
self.assert_course_creation_failed(
"Unable to create course 'Robot Super Course'.\n\nInvalid characters in 'University of California, Berkeley'.")
def test_create_course_with_course_creation_disabled_staff(self):
"""Test new course creation -- course creation disabled, but staff access."""
with mock.patch.dict('django.conf.settings.MITX_FEATURES', {'DISABLE_COURSE_CREATION': True}):
self.assert_created_course()
def test_create_course_with_course_creation_disabled_not_staff(self):
"""Test new course creation -- error path for course creation disabled, not staff access."""
with mock.patch.dict('django.conf.settings.MITX_FEATURES', {'DISABLE_COURSE_CREATION': True}):
self.user.is_staff = False
self.user.save()
self.assert_course_permission_denied()
def test_create_course_no_course_creators_staff(self):
"""Test new course creation -- course creation group enabled, staff, group is empty."""
with mock.patch.dict('django.conf.settings.MITX_FEATURES', {'ENABLE_CREATOR_GROUP': True}):
self.assert_created_course()
def test_create_course_no_course_creators_not_staff(self):
"""Test new course creation -- error path for course creator group enabled, not staff, group is empty."""
with mock.patch.dict('django.conf.settings.MITX_FEATURES', {"ENABLE_CREATOR_GROUP": True}):
self.user.is_staff = False
self.user.save()
self.assert_course_permission_denied()
def test_create_course_with_course_creator(self):
"""Test new course creation -- use course creator group"""
with mock.patch.dict('django.conf.settings.MITX_FEATURES', {"ENABLE_CREATOR_GROUP": True}):
add_user_to_creator_group(self.user, self.user)
self.assert_created_course()
def assert_course_permission_denied(self):
"""
Checks that the course did not get created due to a PermissionError.
"""
resp = self.client.post(reverse('create_new_course'), self.course_data)
self.assertEqual(resp.status_code, 403)
def test_course_index_view_with_no_courses(self):
"""Test viewing the index page with no courses"""
# Create a course so there is something to view
resp = self.client.get(reverse('index'))
self.assertContains(
resp,
'<h1 class="page-header">My Courses</h1>',
status_code=200,
html=True
)
def test_course_factory(self):
"""Test that the course factory works correctly."""
course = CourseFactory.create()
self.assertIsInstance(course, CourseDescriptor)
def test_item_factory(self):
"""Test that the item factory works correctly."""
course = CourseFactory.create()
item = ItemFactory.create(parent_location=course.location)
self.assertIsInstance(item, SequenceDescriptor)
def test_course_index_view_with_course(self):
"""Test viewing the index page with an existing course"""
CourseFactory.create(display_name='Robot Super Educational Course')
resp = self.client.get(reverse('index'))
self.assertContains(
resp,
'<h3 class="course-title">Robot Super Educational Course</h3>',
status_code=200,
html=True
)
def test_course_overview_view_with_course(self):
"""Test viewing the course overview page with an existing course"""
CourseFactory.create(org='MITx', course='999', display_name='Robot Super Course')
data = {
'org': 'MITx',
'course': '999',
'name': Location.clean('Robot Super Course'),
}
resp = self.client.get(reverse('course_index', kwargs=data))
self.assertContains(
resp,
'<article class="courseware-overview" data-course-id="i4x://MITx/999/course/Robot_Super_Course">',
status_code=200,
html=True
)
def test_create_item(self):
"""Test cloning an item. E.g. creating a new section"""
CourseFactory.create(org='MITx', course='999', display_name='Robot Super Course')
section_data = {
'parent_location': 'i4x://MITx/999/course/Robot_Super_Course',
'category': 'chapter',
'display_name': 'Section One',
}
resp = self.client.post(reverse('create_item'), section_data)
self.assertEqual(resp.status_code, 200)
data = parse_json(resp)
self.assertRegexpMatches(
data['id'],
r"^i4x://MITx/999/chapter/([0-9]|[a-f]){32}$"
)
def test_capa_module(self):
"""Test that a problem treats markdown specially."""
CourseFactory.create(org='MITx', course='999', display_name='Robot Super Course')
problem_data = {
'parent_location': 'i4x://MITx/999/course/Robot_Super_Course',
'category': 'problem'
}
resp = self.client.post(reverse('create_item'), problem_data)
self.assertEqual(resp.status_code, 200)
payload = parse_json(resp)
problem_loc = Location(payload['id'])
problem = get_modulestore(problem_loc).get_item(problem_loc)
# should be a CapaDescriptor
self.assertIsInstance(problem, CapaDescriptor, "New problem is not a CapaDescriptor")
context = problem.get_context()
self.assertIn('markdown', context, "markdown is missing from context")
self.assertNotIn('markdown', problem.editable_metadata_fields, "Markdown slipped into the editable metadata fields")
def test_cms_imported_course_walkthrough(self):
"""
Import and walk through some common URL endpoints. This just verifies non-500 and no other
correct behavior, so it is not a deep test
"""
import_from_xml(modulestore('direct'), 'common/test/data/', ['simple'])
loc = Location(['i4x', 'edX', 'simple', 'course', '2012_Fall', None])
resp = self.client.get(reverse('course_index',
kwargs={'org': loc.org,
'course': loc.course,
'name': loc.name}))
self.assertEqual(resp.status_code, 200)
self.assertContains(resp, 'Chapter 2')
# go to various pages
# import page
resp = self.client.get(reverse('import_course',
kwargs={'org': loc.org,
'course': loc.course,
'name': loc.name}))
self.assertEqual(resp.status_code, 200)
# export page
resp = self.client.get(reverse('export_course',
kwargs={'org': loc.org,
'course': loc.course,
'name': loc.name}))
self.assertEqual(resp.status_code, 200)
# manage users
resp = self.client.get(reverse('manage_users',
kwargs={'org': loc.org,
'course': loc.course,
'name': loc.name}))
self.assertEqual(resp.status_code, 200)
# course info
resp = self.client.get(reverse('course_info',
kwargs={'org': loc.org,
'course': loc.course,
'name': loc.name}))
self.assertEqual(resp.status_code, 200)
# settings_details
resp = self.client.get(reverse('settings_details',
kwargs={'org': loc.org,
'course': loc.course,
'name': loc.name}))
self.assertEqual(resp.status_code, 200)
# settings_details
resp = self.client.get(reverse('settings_grading',
kwargs={'org': loc.org,
'course': loc.course,
'name': loc.name}))
self.assertEqual(resp.status_code, 200)
# static_pages
resp = self.client.get(reverse('static_pages',
kwargs={'org': loc.org,
'course': loc.course,
'coursename': loc.name}))
self.assertEqual(resp.status_code, 200)
# static_pages
resp = self.client.get(reverse('asset_index',
kwargs={'org': loc.org,
'course': loc.course,
'name': loc.name}))
self.assertEqual(resp.status_code, 200)
# go look at a subsection page
subsection_location = loc.replace(category='sequential', name='test_sequence')
resp = self.client.get(reverse('edit_subsection',
kwargs={'location': subsection_location.url()}))
self.assertEqual(resp.status_code, 200)
# go look at the Edit page
unit_location = loc.replace(category='vertical', name='test_vertical')
resp = self.client.get(reverse('edit_unit',
kwargs={'location': unit_location.url()}))
self.assertEqual(resp.status_code, 200)
# delete a component
del_loc = loc.replace(category='html', name='test_html')
resp = self.client.post(reverse('delete_item'),
json.dumps({'id': del_loc.url()}), "application/json")
self.assertEqual(resp.status_code, 204)
# delete a unit
del_loc = loc.replace(category='vertical', name='test_vertical')
resp = self.client.post(reverse('delete_item'),
json.dumps({'id': del_loc.url()}), "application/json")
self.assertEqual(resp.status_code, 204)
# delete a unit
del_loc = loc.replace(category='sequential', name='test_sequence')
resp = self.client.post(reverse('delete_item'),
json.dumps({'id': del_loc.url()}), "application/json")
self.assertEqual(resp.status_code, 204)
# delete a chapter
del_loc = loc.replace(category='chapter', name='chapter_2')
resp = self.client.post(reverse('delete_item'),
json.dumps({'id': del_loc.url()}), "application/json")
self.assertEqual(resp.status_code, 204)
def test_import_into_new_course_id(self):
module_store = modulestore('direct')
target_location = Location(['i4x', 'MITx', '999', 'course', '2013_Spring'])
course_data = {
'org': target_location.org,
'number': target_location.course,
'display_name': 'Robot Super Course',
'run': target_location.name
}
target_course_id = '{0}/{1}/{2}'.format(target_location.org, target_location.course, target_location.name)
resp = self.client.post(reverse('create_new_course'), course_data)
self.assertEqual(resp.status_code, 200)
data = parse_json(resp)
self.assertEqual(data['id'], target_location.url())
import_from_xml(module_store, 'common/test/data/', ['toy'], target_location_namespace=target_location)
modules = module_store.get_items(Location([
target_location.tag, target_location.org, target_location.course, None, None, None]))
# we should have a number of modules in there
# we can't specify an exact number since it'll always be changing
self.assertGreater(len(modules), 10)
#
# test various re-namespacing elements
#
# first check PDF textbooks, to make sure the url paths got updated
course_module = module_store.get_instance(target_course_id, target_location)
self.assertEquals(len(course_module.pdf_textbooks), 1)
self.assertEquals(len(course_module.pdf_textbooks[0]["chapters"]), 2)
self.assertEquals(course_module.pdf_textbooks[0]["chapters"][0]["url"], '/c4x/MITx/999/asset/Chapter1.pdf')
self.assertEquals(course_module.pdf_textbooks[0]["chapters"][1]["url"], '/c4x/MITx/999/asset/Chapter2.pdf')
# check that URL slug got updated to new course slug
self.assertEquals(course_module.wiki_slug, '999')
def test_import_metadata_with_attempts_empty_string(self):
module_store = modulestore('direct')
import_from_xml(module_store, 'common/test/data/', ['simple'])
did_load_item = False
try:
module_store.get_item(Location(['i4x', 'edX', 'simple', 'problem', 'ps01-simple', None]))
did_load_item = True
except ItemNotFoundError:
pass
# make sure we found the item (e.g. it didn't error while loading)
self.assertTrue(did_load_item)
def test_forum_id_generation(self):
module_store = modulestore('direct')
CourseFactory.create(org='edX', course='999', display_name='Robot Super Course')
new_component_location = Location('i4x', 'edX', '999', 'discussion', 'new_component')
# crate a new module and add it as a child to a vertical
module_store.create_and_save_xmodule(new_component_location)
new_discussion_item = module_store.get_item(new_component_location)
self.assertNotEquals(new_discussion_item.discussion_id, '$$GUID$$')
def test_update_modulestore_signal_did_fire(self):
module_store = modulestore('direct')
CourseFactory.create(org='edX', course='999', display_name='Robot Super Course')
try:
module_store.modulestore_update_signal = Signal(providing_args=['modulestore', 'course_id', 'location'])
self.got_signal = False
def _signal_hander(modulestore=None, course_id=None, location=None, **kwargs):
self.got_signal = True
module_store.modulestore_update_signal.connect(_signal_hander)
new_component_location = Location('i4x', 'edX', '999', 'html', 'new_component')
# crate a new module
module_store.create_and_save_xmodule(new_component_location)
finally:
module_store.modulestore_update_signal = None
self.assertTrue(self.got_signal)
def test_metadata_inheritance(self):
module_store = modulestore('direct')
import_from_xml(module_store, 'common/test/data/', ['toy'])
course = module_store.get_item(Location(['i4x', 'edX', 'toy', 'course', '2012_Fall', None]))
verticals = module_store.get_items(['i4x', 'edX', 'toy', 'vertical', None, None])
# let's assert on the metadata_inheritance on an existing vertical
for vertical in verticals:
self.assertEqual(course.lms.xqa_key, vertical.lms.xqa_key)
self.assertEqual(course.start, vertical.lms.start)
self.assertGreater(len(verticals), 0)
new_component_location = Location('i4x', 'edX', 'toy', 'html', 'new_component')
# crate a new module and add it as a child to a vertical
module_store.create_and_save_xmodule(new_component_location)
parent = verticals[0]
module_store.update_children(parent.location, parent.children + [new_component_location.url()])
# flush the cache
module_store.refresh_cached_metadata_inheritance_tree(new_component_location)
new_module = module_store.get_item(new_component_location)
# check for grace period definition which should be defined at the course level
self.assertEqual(parent.lms.graceperiod, new_module.lms.graceperiod)
self.assertEqual(parent.lms.start, new_module.lms.start)
self.assertEqual(course.start, new_module.lms.start)
self.assertEqual(course.lms.xqa_key, new_module.lms.xqa_key)
#
# now let's define an override at the leaf node level
#
new_module.lms.graceperiod = timedelta(1)
new_module.save()
module_store.update_metadata(new_module.location, own_metadata(new_module))
# flush the cache and refetch
module_store.refresh_cached_metadata_inheritance_tree(new_component_location)
new_module = module_store.get_item(new_component_location)
self.assertEqual(timedelta(1), new_module.lms.graceperiod)
def test_default_metadata_inheritance(self):
course = CourseFactory.create()
vertical = ItemFactory.create(parent_location=course.location)
course.children.append(vertical)
# in memory
self.assertIsNotNone(course.start)
self.assertEqual(course.start, vertical.lms.start)
self.assertEqual(course.textbooks, [])
self.assertIn('GRADER', course.grading_policy)
self.assertIn('GRADE_CUTOFFS', course.grading_policy)
self.assertGreaterEqual(len(course.checklists), 4)
# by fetching
module_store = modulestore('direct')
fetched_course = module_store.get_item(course.location)
fetched_item = module_store.get_item(vertical.location)
self.assertIsNotNone(fetched_course.start)
self.assertEqual(course.start, fetched_course.start)
self.assertEqual(fetched_course.start, fetched_item.lms.start)
self.assertEqual(course.textbooks, fetched_course.textbooks)
# is this test too strict? i.e., it requires the dicts to be ==
self.assertEqual(course.checklists, fetched_course.checklists)
def test_image_import(self):
"""Test backwards compatibilty of course image."""
module_store = modulestore('direct')
content_store = contentstore()
# Use conditional_and_poll, as it's got an image already
import_from_xml(
module_store,
'common/test/data/',
['conditional_and_poll'],
static_content_store=content_store
)
course = module_store.get_courses()[0]
# Make sure the course image is set to the right place
self.assertEqual(course.course_image, 'images_course_image.jpg')
# Ensure that the imported course image is present -- this shouldn't raise an exception
location = course.location._replace(tag='c4x', category='asset', name=course.course_image)
content_store.find(location)
@override_settings(MODULESTORE=TEST_MODULESTORE)
class MetadataSaveTestCase(ModuleStoreTestCase):
"""Test that metadata is correctly cached and decached."""
def setUp(self):
CourseFactory.create(
org='edX', course='999', display_name='Robot Super Course')
course_location = Location(
['i4x', 'edX', '999', 'course', 'Robot_Super_Course', None])
video_sample_xml = '''
<video display_name="Test Video"
youtube="1.0:p2Q6BrNhdh8,0.75:izygArpw-Qo,1.25:1EeWXzPdhSA,1.5:rABDYkeK0x8"
show_captions="false"
from="00:00:01"
to="00:01:00">
<source src="http://www.example.com/file.mp4"/>
<track src="http://www.example.com/track"/>
</video>
'''
self.video_descriptor = ItemFactory.create(
parent_location=course_location, category='video',
data={'data': video_sample_xml}
)
def test_metadata_not_persistence(self):
"""
Test that descriptors which set metadata fields in their
constructor are correctly deleted.
"""
self.assertIn('html5_sources', own_metadata(self.video_descriptor))
attrs_to_strip = {
'show_captions',
'youtube_id_1_0',
'youtube_id_0_75',
'youtube_id_1_25',
'youtube_id_1_5',
'start_time',
'end_time',
'source',
'html5_sources',
'track'
}
fields = self.video_descriptor.fields
location = self.video_descriptor.location
for field in fields:
if field.name in attrs_to_strip:
field.delete_from(self.video_descriptor)
self.assertNotIn('html5_sources', own_metadata(self.video_descriptor))
get_modulestore(location).update_metadata(
location,
own_metadata(self.video_descriptor)
)
module = get_modulestore(location).get_item(location)
self.assertNotIn('html5_sources', own_metadata(module))
def test_metadata_persistence(self):
# TODO: create the same test as `test_metadata_not_persistence`,
# but check persistence for some other module.
pass
|
agpl-3.0
| -9,173,351,805,722,796,000 | 43.140687 | 203 | 0.626826 | false |
ifiddes/pycbio
|
pycbio/sys/dbDict.py
|
1
|
3316
|
# Copyright 2006-2012 Mark Diekhans
# Copyright sebsauvage.net
"""
Code from:
http://sebsauvage.net/python/snyppets/index.html#dbdict
A dictionnary-like object for LARGE datasets
Python dictionnaries are very efficient objects for fast data access. But when data is too large to fit in memory, you're in trouble.
Here's a dictionnary-like object which uses a SQLite database and behaves like a dictionnary object:
- You can work on datasets which to not fit in memory. Size is not limited
by memory, but by disk. Can hold up to several tera-bytes of data (thanks
to SQLite).
- Behaves like a dictionnary (can be used in place of a dictionnary object
in most cases.)
- Data persists between program runs.
- ACID (data integrity): Storage file integrity is assured. No half-written
data. It's really hard to mess up data.
- Efficient: You do not have to re-write a whole 500 Gb file when changing
only one item. Only the relevant parts of the file are changed.
- You can mix several key types (you can do d["foo"]=bar and d[7]=5468)
(You can't to this with a standard dictionnary.)
- You can share this dictionnary with other languages and systems (SQLite
databases are portable, and the SQlite library is available on a wide
range of systems/languages, from mainframes to PDA/iPhone, from Python to
Java/C++/C#/perl...)
Modified by markd:
- renamed dbdict -> DbDict
- include key name in KeyError exceptions
- specify name of file, not the dictName in imported code that didn't allow
specifying the directory.
- added table option to allow storing multiple dictionaries in table
- add truncate constructor option
"""
import os, UserDict
from sqlite3 import dbapi2 as sqlite
class DbDict(UserDict.DictMixin):
''' DbDict, a dictionnary-like object for large datasets (several
Tera-bytes) backed by an SQLite database'''
def __init__(self, db_filename, table="data", truncate=False):
self.db_filename = db_filename
self.table = table
self.con = sqlite.connect(self.db_filename)
if truncate:
self.con.execute("drop table if exists " + self.table)
self.con.execute("create table if not exists " + self.table + " (key PRIMARY KEY,value)")
def __getitem__(self, key):
row = self.con.execute("select value from " + self.table + " where key=?",(key,)).fetchone()
if not row:
raise KeyError(str(key))
return row[0]
def __setitem__(self, key, item):
if self.con.execute("select key from " + self.table + " where key=?",(key,)).fetchone():
self.con.execute("update " + self.table + " set value=? where key=?",(item,key))
else:
self.con.execute("insert into " + self.table + " (key,value) values (?,?)",(key, item))
self.con.commit()
def __delitem__(self, key):
if self.con.execute("select key from " + self.table + " where key=?",(key,)).fetchone():
self.con.execute("delete from " + self.table + " where key=?",(key,))
self.con.commit()
else:
raise KeyError(str(key))
def keys(self):
return [row[0] for row in self.con.execute("select key from " + self.table).fetchall()]
|
bsd-3-clause
| -6,402,318,023,815,042,000 | 43.810811 | 133 | 0.658625 | false |
tdsimao/tt
|
algoritmos/algGenetico-backup.py
|
1
|
12029
|
# -*- coding: utf-8 -*-
from celulas.models import Celula, Encontro
from random import choice, random, sample
from time import time
from src.variaveisGlobais import pesos
class CelulaAlg(Celula):
"""
classe criada para acumular lista de encontros sem salva-los
e editar o metodo save() permitindo salvar os encontros
"""
encontros = {}
def __init__(self, *args, **kwargs):
super(Celula,self).__init__(*args, **kwargs)
self.encontros = {}
def save(self, *args, **kwargs):
newCel = Celula()
newCel.grade = self.grade
newCel.fitness = self.fitness
if getattr(self, 'commit', True):
# Caso seja necessario salvar
newCel.save()
# salva tambem todos os encontros
for slot in self.encontros.values():
for encontro in slot:
encontro.celula = newCel
encontro.save()
return newCel
def avaliacao(celula,grade,turmas,aulas,totalAulasProfessor,slots,tipoRestricaoProfessorNoSlot):
#=======================================================================
# Verifica Criterios
# Número de choques de professores
# Número de aulas em horário inviável para algum professor
# Número de aulas em horário indesejável para algum professor
#TODO Número de vezes que houve blocos de disciplinas
#TODO Número de aulas que estão sendo ministradas desrespeitando o limite diário de aulas de uma mesma disciplina
# Número de janelas no horário dos professores
# Número de dias em excesso dos professores (diasProfessorVaiNaEscola - totalDiasProfessoṛ/grade.auladia)
#TODO Número de aulas isoladas no horário dos professores
#
#
# Gera o Fitness da solucao.
#=======================================================================
#Penalidades
choqueProfessores = 0
professorInviavel = 0
professorIndesejavel = 0
janelas = 0
professoresComExcessoDias = 0
professores = totalAulasProfessor.keys()
totalDiasProfessor = {}
for dia in range(grade.dias):
ultimaAula = {}
professoresNoDia = []
for horario in range(grade.auladia):
slot = slots[dia*grade.dias+horario]
professoresNoSlot = []
for encontro in celula.encontros[slot]:
professor = encontro.aula.professor
#
# VERIFICA SE O PROFESSOR DA MAIS DE UMA AULA NO SLOT
#
if professor in professoresNoSlot:
choqueProfessores = choqueProfessores + 1
else:
professoresNoSlot.append(professor)
#
# VERFICA A DISPONIBILIDADE DO PROFESSOR NO HORARIO
#
if tipoRestricaoProfessorNoSlot[professor][slot] == 0:
professorInviavel = professorInviavel + 1
elif tipoRestricaoProfessorNoSlot[professor][slot] == 1:
professorIndesejavel = professorIndesejavel + 1
if ultimaAula.get(professor) == 'Janela':
janelas = janelas + 1
ultimaAula[professor] = encontro.aula
for professor in ultimaAula.keys():
if not professor in professoresNoSlot:
ultimaAula[professor] = 'Janela'
professoresNoDia = list(set(professoresNoDia + professoresNoSlot))
for professor in professoresNoDia:
totalDiasProfessor[professor] = totalDiasProfessor.get(professor,0) + 1
for professor,quantidadeAulas in totalAulasProfessor.items():
professoresComExcessoDias = professoresComExcessoDias + (totalDiasProfessor[professor] - quantidadeAulas/grade.auladia)
totalPenalidades = pesos['choqueProfessores'] * choqueProfessores + pesos['professorInviavel'] * professorInviavel + pesos['professorIndesejavel'] * professorIndesejavel + pesos['janelas'] * janelas + pesos['professoresComExcessoDias'] * professoresComExcessoDias
celula.fitness = 1./(1.+totalPenalidades)
def criarCelula(grade,turmas,aulas,totalAulasProfessor,slots,tipoRestricaoProfessorNoSlot,opcoes):
# cria uma celula que representa uma solucao para a grade
# possui um conjunto de encontros
# encontros sao definidos aleatoriamente
celula = CelulaAlg(grade)
celula.grade = grade
for slot in slots:
celula.encontros[slot] = []
# cria encontros definindo um slotsVazio da turma para cada aula
aulasNaoAtribuidas = {}
for turma in turmas:
aulasNaoAtribuidas[turma] = []
for aula in aulas[turma]:
for i in range(aula.quantidade):
aulasNaoAtribuidas[turma].append(aula)
for slot in slots:
"""
Em vez de checar se as aulas acabarem checar se a grade é valida
"""
aula = choice(aulasNaoAtribuidas[turma])
aulasNaoAtribuidas[turma].remove(aula)
encontro = Encontro()
encontro.slot = slot
encontro.aula = aula
celula.encontros[slot].append(encontro)
return celula
def fimDoAlgoritmo(populacao,opcoes):
if populacao['geracao'] == opcoes['limiteGeracoes']:
print 'Limite de Gerações Atingido'
return True
return False
def selecao(populacao,opcoes):
pais = []
quantidadePais = (opcoes['populacao'] / 100) * opcoes['quantidadePais']
print '-=-=-=-=-=-=- ',quantidadePais
if opcoes['metodoSelecao'] == 'roleta':
fitnessTotal = 0
for celula in populacao['individuos']:
fitnessTotal = fitnessTotal + celula.fitness
for i in range():
somaFitness = 0
t = random() * fitnessTotal
for celula in populacao['individuos']:
if somaFitness > t and celula not in pais:
pais.append(celula)
break
somaFitness = somaFitness + celula.fitness
elif opcoes['metodoSelecao'] == 'torneio':
concorrentes = sample(populacao['individuos'],quantidadePais*2)
while concorrentes != []:
concorrente1 = choice(concorrentes)
concorrentes.remove(concorrente1)
concorrente2 = choice(concorrentes)
concorrentes.remove(concorrente2)
if concorrente1.fitness>concorrente2.fitness:
pais.append(concorrente1)
else:
pais.append(concorrente2)
elif opcoes['metodoSelecao'] == 'aleatorio':
pais = sample(populacao['individuos'],opcoes['quantidadePais'])
return pais
def crossover(pais,opcoes):
filhos = []
return filhos
def mutacao(populacao,opcoes):
probabilidadeMutacao = float(opcoes['probabilidadeMutacao'])/100
for celula in populacao['individuos']:
if random() < probabilidadeMutacao:
"""
Sorteia 2 slots
sorteia um encontro do primeiro slot
a seguir seleciona o encontro do segundo slot cuja aula é compatível com a turma do encontro selecionado
"""
slot1,slot2 = sample(celula.encontros.values(),2)
encontroA = choice(slot1)
for encontro in slot2:
if encontro.aula.turma == encontroA.aula.turma:
encontroB = encontro
#troca as aulas dos encontros selecionados
aulaAux = encontroA.aula
encontroA.aula = encontroB.aula
encontroB.aula = aulaAux
def exportarEstadoAlgoritmo(statusList,grade):
from csv import DictWriter as csvDictWriter
f = open(str(grade)+str(grade.id)+'.csv','wb')
writer = csvDictWriter(f,statusList[0].keys())
writer.writeheader()
writer.writerows(statusList)
f.close()
def getStatus(populacao,opcoes):
status = {}
status['geracao'] = populacao['geracao']
status['melhorFitness'] = populacao['melhorFitness']
status['fitnessMedio'] = populacao['fitnessTotal']/opcoes['populacao']
return(status)
def criarHorario(grade, opcoes):
inicio = time()
# TRAZ INFORMACOES DO BANCO DE DADOS PARA MEMORIA
turmas = grade.turma_set.all()
aulas = {}
totalAulasProfessor = {}
for turma in turmas:
aulas[turma] = turma.aula_set.all()
for aula in turma.aula_set.all():
totalAulasProfessor[aula.professor] = totalAulasProfessor.get(aula.professor,0) + aula.quantidade
#for p,quantidade in totalAulasProfessor.items():
# print p,quantidade
slots = grade.slot_set.order_by('dia','horario').all()
#import pprint
#pprint.pprint(locals())
tipoRestricaoProfessorNoSlot = {}
for professor in totalAulasProfessor.keys():
tipoRestricaoProfessorNoSlot[professor] = {}
for slot in slots:
tipoRestricaoProfessorNoSlot[professor][slot] = professor.restricao_set.get(slot=slot).tipo
#TODO
# CHECAR SE A GRADE E VALIDA
if not(grade.is_valid()):
print 'Grade Invalida'
return
print 'Tempo para carregar dados ',time()-inicio
inicio = time()
#POPULACAO INICIAL
populacao = {}
populacao['individuos'] = []
populacao['fitnessTotal'] = 0
populacao['melhorFitness'] = 0
for i in range(opcoes['populacao']):
celula = criarCelula(grade,turmas,aulas,totalAulasProfessor,slots,tipoRestricaoProfessorNoSlot,opcoes)
populacao['individuos'].append(celula)
populacao['geracao'] = 0
print 'Tempo de criacao da populacao inicial de tamanho ',opcoes['populacao'],': ',time()-inicio
statusList = []
#ENQUANTO CRITERIOS DE PARADA NÃO FOREM SATISFEITOS REPETE-SE AS AÇÕES
while not(fimDoAlgoritmo(populacao,opcoes)):
print 'Geração ', populacao['geracao'],
for celula in populacao['individuos']:
avaliacao(celula,grade,turmas,aulas,totalAulasProfessor,slots,tipoRestricaoProfessorNoSlot)
populacao['fitnessTotal'] += celula.fitness
if celula.fitness > populacao['melhorFitness']:
populacao['melhorFitness'] = celula.fitness
populacao['melhorIndividuo'] = celula
pais = selecao(populacao, opcoes)
mutacao(populacao, opcoes)
filhos = crossover(pais, opcoes)
statusList.append(getStatus(populacao,opcoes))
populacao['geracao'] += 1
exportarEstadoAlgoritmo(statusList,grade)
print 'Tempo para avaliar populacao de tamanho',opcoes['populacao'],': ',time()-inicio
inicio = time()
#BUSCA MELHOR CELULA
melhorCelula = populacao['individuos'][0]
melhoresCelulas = []
melhoresCelulas.append(populacao['individuos'][0])
for celula in populacao['individuos']:
if celula.fitness > melhoresCelulas[0].fitness:
melhoresCelulas = []
melhoresCelulas.append(celula)
elif celula.fitness == melhoresCelulas[0].fitness:
melhoresCelulas.append(celula)
if celula.fitness > melhorCelula.fitness:
melhorCelula = celula
print 'Tempo de selecao da melhor celula em uma populacao de tamanho ',opcoes['populacao'],': ',time()-inicio
print 'melhores celulas: ',melhoresCelulas
#SALVA APENAS MELHOR CELULA
import pprint
aux = str(pprint.pprint(opcoes))
print aux
aux = str(opcoes)
print aux
melhorCelula.save()
|
gpl-2.0
| 9,191,960,471,433,302,000 | 32.157459 | 267 | 0.5994 | false |
specify/specify7
|
specifyweb/context/schema_localization.py
|
1
|
2364
|
from collections import defaultdict
import json
from django.conf import settings
from django.db import connection
from specifyweb.specify.models import (
Splocalecontainer as Container,
Splocalecontaineritem as Item,
Splocaleitemstr as SpString)
schema_localization_cache = {}
def get_schema_localization(collection, schematype):
disc = collection.discipline
if (disc, schematype) in schema_localization_cache:
return schema_localization_cache[(disc, schematype)]
lang = settings.SCHEMA_LANGUAGE
cursor = connection.cursor()
cursor.execute("""
select name, format, ishidden!=0, isuiformatter, picklistname, type, aggregator, defaultui, n.text, d.text
from splocalecontainer
left outer join splocaleitemstr n on n.splocalecontainernameid = splocalecontainerid and n.language = %s
left outer join splocaleitemstr d on d.splocalecontainerdescid = splocalecontainerid and d.language = %s
where schematype = %s and disciplineid = %s;
""", [lang, lang, schematype, disc.id])
cfields = ('format', 'ishidden', 'isuiformatter', 'picklistname', 'type', 'aggregator', 'defaultui', 'name', 'desc')
containers = {
row[0]: dict(items={}, **{field: row[i+1] for i, field in enumerate(cfields)})
for row in cursor.fetchall()
}
cursor.execute("""
select container.name, item.name,
item.format, item.ishidden!=0, item.isuiformatter, item.picklistname,
item.type, item.isrequired, item.weblinkname, n.text, d.text
from splocalecontainer container
inner join splocalecontaineritem item on item.splocalecontainerid = container.splocalecontainerid
left outer join splocaleitemstr n on n.splocalecontaineritemnameid = item.splocalecontaineritemid and n.language = %s
left outer join splocaleitemstr d on d.splocalecontaineritemdescid = item.splocalecontaineritemid and d.language = %s
where schematype = %s and disciplineid = %s;
""", [lang, lang, schematype, disc.id])
ifields = ('format', 'ishidden', 'isuiformatter', 'picklistname', 'type', 'isrequired', 'weblinkname', 'name', 'desc')
for row in cursor.fetchall():
containers[row[0]]['items'][row[1].lower()] = {field: row[i+2] for i, field in enumerate(ifields)}
sl = schema_localization_cache[(disc, schematype)] = json.dumps(containers)
return sl
|
gpl-2.0
| -2,763,643,266,562,375,700 | 41.981818 | 122 | 0.713198 | false |
Edraak/edraak-platform
|
cms/djangoapps/contentstore/storage.py
|
1
|
1226
|
"""
Storage backend for course import and export.
"""
from __future__ import absolute_import
from django.conf import settings
from django.core.files.storage import get_storage_class
from storages.backends.s3boto import S3BotoStorage
from storages.utils import setting
class ImportExportS3Storage(S3BotoStorage): # pylint: disable=abstract-method
"""
S3 backend for course import and export OLX files.
"""
def __init__(self):
bucket = setting('COURSE_IMPORT_EXPORT_BUCKET', settings.AWS_STORAGE_BUCKET_NAME)
super(ImportExportS3Storage, self).__init__(bucket=bucket, custom_domain=None, querystring_auth=True)
def get_course_import_export_storage():
"""
Configures and returns a django Storage instance that can be used
to store course export import tgz files.
This class is Edraak-specific to work with GCloud and any other provider.
"""
config = settings.COURSE_IMPORT_EXPORT_BACKEND
if config:
storage_class = get_storage_class(config['class'])
return storage_class(**config['options'])
# Backward compatibility if `COURSE_IMPORT_EXPORT_BACKEND` is not configured.
return get_storage_class(settings.COURSE_IMPORT_EXPORT_STORAGE)()
|
agpl-3.0
| 1,933,521,590,470,509,600 | 34.028571 | 109 | 0.729201 | false |
sedouard/scaleio-flocker-driver
|
scaleio_flocker_driver/__init__.py
|
1
|
1317
|
# Copyright 2015 EMC Corporation
from flocker.node import BackendDescription, DeployerType
from .emc_sio import (
scaleio_from_configuration, DEFAULT_STORAGE_POOL,
DEFAULT_PROTECTION_DOMAIN, DEFAULT_PORT, DEBUG
)
def api_factory(cluster_id, **kwargs):
protection_domain = DEFAULT_PROTECTION_DOMAIN
if "protection_domain" in kwargs:
protection_domain = kwargs[u"protection_domain"]
storage_pool = DEFAULT_STORAGE_POOL
if "storage_pool" in kwargs:
storage_pool= kwargs[u"storage_pool"]
port = DEFAULT_PORT
if "port" in kwargs:
port= kwargs[u"port"]
debug = DEBUG
if "debug" in kwargs:
debug = kwargs[u"debug"]
certificate = None
if "certificate" in kwargs:
certificate= kwargs[u"certificate"]
return scaleio_from_configuration(cluster_id=cluster_id, username=kwargs[u"username"],
password=kwargs[u"password"], mdm_ip=kwargs[u"mdm"], port=port,
protection_domain=protection_domain, storage_pool=storage_pool,
certificate=certificate, ssl=kwargs[u"ssl"], debug=debug)
FLOCKER_BACKEND = BackendDescription(
name=u"scaleio_flocker_driver",
needs_reactor=False, needs_cluster_id=True,
api_factory=api_factory, deployer_type=DeployerType.block)
|
apache-2.0
| -3,388,212,268,403,209,700 | 32.769231 | 90 | 0.678815 | false |
shlomif/patool
|
patoolib/programs/bzip2.py
|
1
|
1327
|
# -*- coding: utf-8 -*-
# Copyright (C) 2010-2012 Bastian Kleineidam
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""Archive commands for the bzip2 program."""
from patoolib import util
from patoolib.programs import extract_singlefile_standard, \
test_singlefile_standard
extract_bzip2 = extract_singlefile_standard
test_bzip2 = test_singlefile_standard
def create_bzip2 (archive, compression, cmd, *args, **kwargs):
"""Create a BZIP2 archive."""
cmdlist = [util.shell_quote(cmd)]
if kwargs['verbose']:
cmdlist.append('-v')
cmdlist.extend(['-c', '-z', '--'])
cmdlist.extend([util.shell_quote(x) for x in args])
cmdlist.extend(['>', util.shell_quote(archive)])
return (cmdlist, {'shell': True})
|
gpl-3.0
| 3,623,195,511,221,700,000 | 40.46875 | 71 | 0.718161 | false |
larrylindsey/reconstructmergetool
|
build/lib.linux-x86_64-2.7/pyrecon/toolsgui/excelTool.py
|
1
|
4104
|
#!/usr/bin/env python
import sys, os
from pyrecon.tools import excelTool
from PySide import QtGui, QtCore
class excelToolWindow(QtGui.QWidget):
def __init__(self, parent = None):
QtGui.QWidget.__init__(self, parent)
self.parent = parent
self.setGeometry(0,0,500,200)
self.seriesPathLine = None
self.seriesPathBrowse = None
self.seriesPath = 'Enter or browse path to series'
self.savePathLine = None
self.savePathBrowse = None
self.savePath = 'Enter or browse path to save excel workbook'
self.goButton = None
# GUI Start Functions
self.functionalItems()
self.layout()
self.show()
def functionalItems(self):
self.seriesPathLine = QtGui.QLineEdit(self)
self.seriesPathLine.setText( self.seriesPath )
self.seriesPathLine.setAlignment( QtCore.Qt.AlignCenter )
self.seriesPathBrowse = QtGui.QPushButton(self)
self.seriesPathBrowse.clicked.connect( self.browse )
self.seriesPathBrowse.setIconSize(QtCore.QSize(25,25))
self.seriesPathBrowse.setText('Browse')
self.savePathLine = QtGui.QLineEdit(self)
self.savePathLine.setText( self.savePath )
self.savePathLine.setAlignment( QtCore.Qt.AlignCenter ) #===
self.savePathBrowse = QtGui.QPushButton(self)
self.savePathBrowse.clicked.connect( self.browse )
self.savePathBrowse.setIconSize(QtCore.QSize(25,25))
self.savePathBrowse.setText('Browse')
self.goButton = QtGui.QPushButton(self)
self.goButton.setText('Create Excel Workbook (.xlsx)')
self.goButton.clicked.connect( self.checkAndFinish )
def layout(self):
vbox = QtGui.QVBoxLayout()
hbox1 = QtGui.QHBoxLayout()
hbox1.addWidget( self.seriesPathLine )
hbox1.addWidget( self.seriesPathBrowse )
hbox1.insertSpacing(0,25)
hbox1.insertSpacing(-1,25)
hbox2 = QtGui.QHBoxLayout()
hbox2.addWidget( self.savePathLine )
hbox2.addWidget( self.savePathBrowse )
hbox2.insertSpacing(0,25)
hbox2.insertSpacing(-1,25)
hbox3 = QtGui.QHBoxLayout()
hbox3.insertSpacing(0,225)
hbox3.addWidget( self.goButton )
hbox3.insertSpacing(-1,225)
vbox.addLayout(hbox1)
vbox.addLayout(hbox2)
vbox.addLayout(hbox3)
self.setLayout(vbox)
def browse(self):
if self.sender() == self.seriesPathBrowse:
path = QtGui.QFileDialog.getOpenFileName(self,
'Load Series',
'/home/',
'Series File (*.ser)')
path = str(path[0])
if path != '':
self.seriesPathLine.setText(path)
elif self.sender() == self.savePathBrowse:
path = str( QtGui.QFileDialog.getExistingDirectory(self) )
if path != '':
self.savePathLine.setText(path)
def checkAndFinish(self):
self.seriesPath = self.seriesPathLine.text()
self.savePath = self.savePathLine.text()
if '.ser' not in self.seriesPath:
msg = QtGui.QMessageBox(self)
msg.setText('Invalid series file -- Please try again.')
msg.show()
if self.savePath == 'Enter or browse path to save excel workbook' or '/' not in self.savePath:
msg = QtGui.QMessageBox(self)
msg.setText('Invalid save path!')
msg.show()
else:
print('Continuing...')
print(self.seriesPath)
print(self.savePath)
excelTool.main(self.seriesPath, self.savePath)
self.close()
def main():
app = QtGui.QApplication(sys.argv)
t = excelToolWindow()
sys.exit( app.exec_() )
main()
|
gpl-3.0
| 3,869,149,202,501,233,000 | 35.327434 | 102 | 0.570906 | false |
sYnfo/samba
|
python/samba/getopt.py
|
1
|
11288
|
# Samba-specific bits for optparse
# Copyright (C) Jelmer Vernooij <jelmer@samba.org> 2007
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
"""Support for parsing Samba-related command-line options."""
from __future__ import absolute_import
from __future__ import print_function
__docformat__ = "restructuredText"
import optparse
import os
from samba.credentials import (
Credentials,
AUTO_USE_KERBEROS,
DONT_USE_KERBEROS,
MUST_USE_KERBEROS,
)
from samba.hostconfig import Hostconfig
import sys
class SambaOptions(optparse.OptionGroup):
"""General Samba-related command line options."""
def __init__(self, parser):
from samba.param import LoadParm
optparse.OptionGroup.__init__(self, parser, "Samba Common Options")
self.add_option("-s", "--configfile", action="callback",
type=str, metavar="FILE", help="Configuration file",
callback=self._load_configfile)
self.add_option("-d", "--debuglevel", action="callback",
type=int, metavar="DEBUGLEVEL", help="debug level",
callback=self._set_debuglevel)
self.add_option("--option", action="callback",
type=str, metavar="OPTION",
help="set smb.conf option from command line",
callback=self._set_option)
self.add_option("--realm", action="callback",
type=str, metavar="REALM", help="set the realm name",
callback=self._set_realm)
self._configfile = None
self._lp = LoadParm()
self.realm = None
def get_loadparm_path(self):
"""Return path to the smb.conf file specified on the command line."""
return self._configfile
def _load_configfile(self, option, opt_str, arg, parser):
self._configfile = arg
def _set_debuglevel(self, option, opt_str, arg, parser):
if arg < 0:
raise optparse.OptionValueError("invalid %s option value: %s" %
(opt_str, arg))
self._lp.set('debug level', str(arg))
def _set_realm(self, option, opt_str, arg, parser):
self._lp.set('realm', arg)
self.realm = arg
def _set_option(self, option, opt_str, arg, parser):
if arg.find('=') == -1:
raise optparse.OptionValueError(
"--option option takes a 'a=b' argument")
a = arg.split('=')
try:
self._lp.set(a[0], a[1])
except Exception as e:
raise optparse.OptionValueError(
"invalid --option option value %r: %s" % (arg, e))
def get_loadparm(self):
"""Return loadparm object with data specified on the command line."""
if self._configfile is not None:
self._lp.load(self._configfile)
elif os.getenv("SMB_CONF_PATH") is not None:
self._lp.load(os.getenv("SMB_CONF_PATH"))
else:
self._lp.load_default()
return self._lp
def get_hostconfig(self):
return Hostconfig(self.get_loadparm())
class VersionOptions(optparse.OptionGroup):
"""Command line option for printing Samba version."""
def __init__(self, parser):
optparse.OptionGroup.__init__(self, parser, "Version Options")
self.add_option("-V", "--version", action="callback",
callback=self._display_version,
help="Display version number")
def _display_version(self, option, opt_str, arg, parser):
import samba
print(samba.version)
sys.exit(0)
def parse_kerberos_arg(arg, opt_str):
if arg.lower() in ["yes", 'true', '1']:
return MUST_USE_KERBEROS
elif arg.lower() in ["no", 'false', '0']:
return DONT_USE_KERBEROS
elif arg.lower() in ["auto"]:
return AUTO_USE_KERBEROS
else:
raise optparse.OptionValueError("invalid %s option value: %s" %
(opt_str, arg))
class CredentialsOptions(optparse.OptionGroup):
"""Command line options for specifying credentials."""
def __init__(self, parser, special_name=None):
self.special_name = special_name
if special_name is not None:
self.section = "Credentials Options (%s)" % special_name
else:
self.section = "Credentials Options"
self.ask_for_password = True
self.ipaddress = None
self.machine_pass = False
optparse.OptionGroup.__init__(self, parser, self.section)
self._add_option("--simple-bind-dn", metavar="DN", action="callback",
callback=self._set_simple_bind_dn, type=str,
help="DN to use for a simple bind")
self._add_option("--password", metavar="PASSWORD", action="callback",
help="Password", type=str, callback=self._set_password)
self._add_option("-U", "--username", metavar="USERNAME",
action="callback", type=str,
help="Username", callback=self._parse_username)
self._add_option("-W", "--workgroup", metavar="WORKGROUP",
action="callback", type=str,
help="Workgroup", callback=self._parse_workgroup)
self._add_option("-N", "--no-pass", action="callback",
help="Don't ask for a password",
callback=self._set_no_password)
self._add_option("-k", "--kerberos", metavar="KERBEROS",
action="callback", type=str,
help="Use Kerberos", callback=self._set_kerberos)
self._add_option("", "--ipaddress", metavar="IPADDRESS",
action="callback", type=str,
help="IP address of server",
callback=self._set_ipaddress)
self._add_option("-P", "--machine-pass",
action="callback",
help="Use stored machine account password",
callback=self._set_machine_pass)
self.creds = Credentials()
def _add_option(self, *args1, **kwargs):
if self.special_name is None:
return self.add_option(*args1, **kwargs)
args2 = ()
for a in args1:
if not a.startswith("--"):
continue
args2 += (a.replace("--", "--%s-" % self.special_name),)
self.add_option(*args2, **kwargs)
def _parse_username(self, option, opt_str, arg, parser):
self.creds.parse_string(arg)
self.machine_pass = False
def _parse_workgroup(self, option, opt_str, arg, parser):
self.creds.set_domain(arg)
def _set_password(self, option, opt_str, arg, parser):
self.creds.set_password(arg)
self.ask_for_password = False
self.machine_pass = False
def _set_no_password(self, option, opt_str, arg, parser):
self.ask_for_password = False
def _set_machine_pass(self, option, opt_str, arg, parser):
self.machine_pass = True
def _set_ipaddress(self, option, opt_str, arg, parser):
self.ipaddress = arg
def _set_kerberos(self, option, opt_str, arg, parser):
self.creds.set_kerberos_state(parse_kerberos_arg(arg, opt_str))
def _set_simple_bind_dn(self, option, opt_str, arg, parser):
self.creds.set_bind_dn(arg)
def get_credentials(self, lp, fallback_machine=False):
"""Obtain the credentials set on the command-line.
:param lp: Loadparm object to use.
:return: Credentials object
"""
self.creds.guess(lp)
if self.machine_pass:
self.creds.set_machine_account(lp)
elif self.ask_for_password:
self.creds.set_cmdline_callbacks()
# possibly fallback to using the machine account, if we have
# access to the secrets db
if fallback_machine and not self.creds.authentication_requested():
try:
self.creds.set_machine_account(lp)
except Exception:
pass
return self.creds
class CredentialsOptionsDouble(CredentialsOptions):
"""Command line options for specifying credentials of two servers."""
def __init__(self, parser):
CredentialsOptions.__init__(self, parser)
self.no_pass2 = True
self.add_option("--simple-bind-dn2", metavar="DN2", action="callback",
callback=self._set_simple_bind_dn2, type=str,
help="DN to use for a simple bind")
self.add_option("--password2", metavar="PASSWORD2", action="callback",
help="Password", type=str,
callback=self._set_password2)
self.add_option("--username2", metavar="USERNAME2",
action="callback", type=str,
help="Username for second server",
callback=self._parse_username2)
self.add_option("--workgroup2", metavar="WORKGROUP2",
action="callback", type=str,
help="Workgroup for second server",
callback=self._parse_workgroup2)
self.add_option("--no-pass2", action="store_true",
help="Don't ask for a password for the second server")
self.add_option("--kerberos2", metavar="KERBEROS2",
action="callback", type=str,
help="Use Kerberos", callback=self._set_kerberos2)
self.creds2 = Credentials()
def _parse_username2(self, option, opt_str, arg, parser):
self.creds2.parse_string(arg)
def _parse_workgroup2(self, option, opt_str, arg, parser):
self.creds2.set_domain(arg)
def _set_password2(self, option, opt_str, arg, parser):
self.creds2.set_password(arg)
self.no_pass2 = False
def _set_kerberos2(self, option, opt_str, arg, parser):
self.creds2.set_kerberos_state(parse_kerberos_arg(arg, opt_str))
def _set_simple_bind_dn2(self, option, opt_str, arg, parser):
self.creds2.set_bind_dn(arg)
def get_credentials2(self, lp, guess=True):
"""Obtain the credentials set on the command-line.
:param lp: Loadparm object to use.
:param guess: Try guess Credentials from environment
:return: Credentials object
"""
if guess:
self.creds2.guess(lp)
elif not self.creds2.get_username():
self.creds2.set_anonymous()
if self.no_pass2:
self.creds2.set_cmdline_callbacks()
return self.creds2
|
gpl-3.0
| 6,134,900,008,198,523,000 | 38.468531 | 79 | 0.582123 | false |
hjhsalo/reana-server
|
reana_server/api_client.py
|
1
|
1852
|
# -*- coding: utf-8 -*-
#
# This file is part of REANA.
# Copyright (C) 2017 CERN.
#
# REANA is free software; you can redistribute it and/or modify it under the
# terms of the GNU General Public License as published by the Free Software
# Foundation; either version 2 of the License, or (at your option) any later
# version.
#
# REANA is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
# A PARTICULAR PURPOSE. See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with REANA; if not, see <http://www.gnu.org/licenses>.
#
# In applying this license, CERN does not waive the privileges and immunities
# granted to it by virtue of its status as an Intergovernmental Organization or
# submit itself to any jurisdiction.
"""REST API client generator."""
import json
import os
import pkg_resources
from bravado.client import SwaggerClient
from .config import COMPONENTS_DATA
def get_spec(spec_file):
"""Get json specification from package data."""
spec_file_path = os.path.join(
pkg_resources.
resource_filename(
'reana_server',
'openapi_connections'),
spec_file)
with open(spec_file_path) as f:
json_spec = json.load(f)
return json_spec
def create_openapi_client(component):
"""Create a OpenAPI client for a given spec."""
try:
address, spec_file = COMPONENTS_DATA[component]
json_spec = get_spec(spec_file)
client = SwaggerClient.from_spec(
json_spec,
config={'also_return_response': True})
client.swagger_spec.api_url = address
return client
except KeyError:
raise Exception('Unkown component {}'.format(component))
|
gpl-2.0
| 6,410,380,712,022,820,000 | 30.931034 | 79 | 0.692765 | false |
dontnod/weblate
|
weblate/accounts/migrations/0012_auto_20190805_1248.py
|
1
|
1517
|
# Generated by Django 2.2.3 on 2019-08-05 12:48
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [("accounts", "0011_auto_20190721_1810")]
operations = [
migrations.AlterField(
model_name="subscription",
name="notification",
field=models.CharField(
choices=[
("MergeFailureNotification", "Merge failure"),
("ParseErrorNotification", "Parse error"),
("NewStringNotificaton", "New string"),
("NewContributorNotificaton", "New contributor"),
("NewSuggestionNotificaton", "New suggestion"),
("LastAuthorCommentNotificaton", "Comment on own translation"),
("MentionCommentNotificaton", "Mentioned in comment"),
("NewCommentNotificaton", "New comment"),
("ChangedStringNotificaton", "Changed string"),
("NewTranslationNotificaton", "New language"),
("NewComponentNotificaton", "New translation component"),
("NewWhiteboardMessageNotificaton", "New whiteboard message"),
("NewAlertNotificaton", "New alert"),
("PendingSuggestionsNotification", "Pending suggestions"),
("ToDoStringsNotification", "Strings needing action"),
],
max_length=100,
),
)
]
|
gpl-3.0
| -129,581,008,454,388,270 | 42.342857 | 83 | 0.54911 | false |
samuto/ladybug
|
src/Ladybug_Open EPW Weather File.py
|
1
|
1873
|
# Open Weather data file
#
# Ladybug: A Plugin for Environmental Analysis (GPL) started by Mostapha Sadeghipour Roudsari
#
# This file is part of Ladybug.
#
# Copyright (c) 2013-2015, Mostapha Sadeghipour Roudsari <Sadeghipour@gmail.com>
# Ladybug is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published
# by the Free Software Foundation; either version 3 of the License,
# or (at your option) any later version.
#
# Ladybug is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ladybug; If not, see <http://www.gnu.org/licenses/>.
#
# @license GPL-3.0+ <http://spdx.org/licenses/GPL-3.0+>
"""
Use this component to open an .epw weather file from a location on your computer.
-
Provided by Ladybug 0.0.60
Args:
_open: Set Boolean to True to browse for a weather file on your system.
Returns:
readMe!: ...
epwFile: The file path of the selected epw file.
"""
ghenv.Component.Name = "Ladybug_Open EPW Weather File"
ghenv.Component.NickName = 'Open weather file'
ghenv.Component.Message = 'VER 0.0.60\nJUL_06_2015'
ghenv.Component.Category = "Ladybug"
ghenv.Component.SubCategory = "0 | Ladybug"
#compatibleLBVersion = VER 0.0.59\nFEB_01_2015
try: ghenv.Component.AdditionalHelpFromDocStrings = "2"
except: pass
import rhinoscriptsyntax as rs
if _open == True:
filter = "EPW file (*.epw)|*.epw|All Files (*.*)|*.*||"
epwFile = rs.OpenFileName("Open .epw Weather File", filter)
print 'Done!'
else:
print 'Please set open to True'
|
gpl-3.0
| 6,968,332,342,942,576,000 | 33.339623 | 93 | 0.697811 | false |
taschik/ramcloud
|
scripts/log.py
|
1
|
2528
|
#!/usr/bin/env python
# Copyright (c) 2010 Stanford University
#
# Permission to use, copy, modify, and distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR(S) DISCLAIM ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL AUTHORS BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
"""
A collection of methods for managing log files for applications such as
cluster.py.
"""
import os
import glob
import time
__all__ = ['scan']
def createDir(top):
"""
Given a top-level log directory, create a subdirectory within that
directory to use for log files for a particular run of an application,
and make a symbolic link from "latest" to that subdirectory. Return the
path to the subdirectory.
"""
try:
os.mkdir(top)
except:
pass
datetime = time.strftime('%Y%m%d%H%M%S')
latest = '%s/latest' % top
subdir = '%s/%s' % (top, datetime)
os.mkdir(subdir)
try:
os.remove('%s/latest' % top)
except:
pass
os.symlink(datetime, latest)
return subdir
def scan(dir, strings, skip_strings = []):
"""
Read all .log files in dir, searching for lines that contain any
strings in strings (and omitting lines that contain any string in
skip_strings). Return all of the matching lines, along with
info about which log file they were in.
"""
result = ""
for name in glob.iglob(dir + '/*.log'):
matchesThisFile = False
for line in open(name, 'r'):
for s in strings:
if line.find(s) >= 0:
skip = False
for skip_string in skip_strings:
if line.find(skip_string) >= 0:
skip = True
if skip:
continue
if not matchesThisFile:
result += '**** %s:\n' % os.path.basename(name)
matchesThisFile = True
result += line
break;
return result
|
isc
| 2,893,362,188,098,615,300 | 32.706667 | 76 | 0.615902 | false |
lhfei/spark-in-action
|
spark-2.x/src/main/python/mllib/svd_example.py
|
1
|
1821
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from pyspark import SparkContext
# $example on$
from pyspark.mllib.linalg import Vectors
from pyspark.mllib.linalg.distributed import RowMatrix
# $example off$
if __name__ == "__main__":
sc = SparkContext(appName="PythonSVDExample")
# $example on$
rows = sc.parallelize([
Vectors.sparse(5, {1: 1.0, 3: 7.0}),
Vectors.dense(2.0, 0.0, 3.0, 4.0, 5.0),
Vectors.dense(4.0, 0.0, 0.0, 6.0, 7.0)
])
mat = RowMatrix(rows)
# Compute the top 5 singular values and corresponding singular vectors.
svd = mat.computeSVD(5, computeU=True)
U = svd.U # The U factor is a RowMatrix.
s = svd.s # The singular values are stored in a local dense vector.
V = svd.V # The V factor is a local dense matrix.
# $example off$
collected = U.rows.collect()
print("U factor is:")
for vector in collected:
print(vector)
print("Singular values are: %s" % s)
print("V factor is:\n%s" % V)
sc.stop()
|
apache-2.0
| 8,269,495,635,788,406,000 | 35.9375 | 77 | 0.666118 | false |
JeroenZegers/Nabu-MSSS
|
nabu/neuralnetworks/loss_computers/pit_noise_loss.py
|
1
|
1142
|
"""@file pit_noise_loss.py
contains the PITNoiseLoss"""
import loss_computer
from nabu.neuralnetworks.components import ops
class PITNoiseLoss(loss_computer.LossComputer):
"""A loss computer that calculates the loss"""
def __call__(self, targets, logits, seq_length):
"""
Compute the loss
Creates the operation to compute the Permudation Invariant Training loss, including a noise mask
Args:
targets: a dictionary of [batch_size x time x ...] tensor containing
the targets
logits: a dictionary of [batch_size x time x ...] tensors containing the logits
seq_length: a dictionary of [batch_size] vectors containing
the sequence lengths
Returns:
loss: a scalar value containing the loss
norm: a scalar value indicating how to normalize the loss
"""
multi_targets = targets['multi_targets']
mix_to_mask = targets['mix_to_mask']
seq_length = seq_length['bin_est']
bin_est = logits['bin_est']
noise_filter = logits['noise_filter']
loss, norm = ops.pit_noise_loss(
multi_targets, bin_est, noise_filter, mix_to_mask, seq_length, self.batch_size, activation='softmax')
return loss, norm
|
mit
| -1,165,106,504,995,643,600 | 29.052632 | 104 | 0.71979 | false |
GongYiLiao/Python_Daily
|
2014/Sep/8/test_opencl_benchmark.py
|
1
|
3178
|
import pyopencl as cl
import numpy
import numpy.linalg as la
import scipy.special as ssf
import datetime
from time import time
import test_cython_32
def test_cython_0(zz):
time1 = time()
a, b, c = test_cython_32.test_0(zz)
run_time = time() - time1
print("Execution time of Cython on float32: ", run_time, "s")
return([a, b, c, run_time])
def test_opencl_0(zz, a, b, c_result, ref_run_time):
for platform in cl.get_platforms():
print("===============================================================")
print("Platform name:", platform.name)
print("Platform profile:", platform.profile)
print("Platform vendor:", platform.vendor)
print("Platform version:", platform.version)
for device in platform.get_devices()[0:]:
print("---------------------------------------------------------------")
print("Device name:", device.name)
print("Device type:", cl.device_type.to_string(device.type))
print("Device memory: ", device.global_mem_size//1024//1024, 'MB')
print("Device max clock speed:", device.max_clock_frequency, 'MHz')
print("Device compute units:", device.max_compute_units)
# Simnple speed test
ctx = cl.Context([device])
queue = cl.CommandQueue(ctx,
properties=cl.command_queue_properties.PROFILING_ENABLE)
mf = cl.mem_flags
a_buf = cl.Buffer(ctx, mf.READ_ONLY | mf.COPY_HOST_PTR, hostbuf=a)
b_buf = cl.Buffer(ctx, mf.READ_ONLY | mf.COPY_HOST_PTR, hostbuf=b)
dest_buf = cl.Buffer(ctx, mf.WRITE_ONLY, b.nbytes)
prg = cl.Program(ctx, """
__kernel void sum(__global const float *a,
__global float *b, __global float *c)
{
int loop;
int gid = get_global_id(0);
for(loop=0; loop<%s;loop++)
{
c[gid] = log(a[gid]) + exp(b[gid]);
c[gid] = c[gid] * (a[gid] + b[gid]);
c[gid] = c[gid] * cos(a[gid] / 2);
c[gid] = sin(c[gid]);
}
}
""" % (zz)).build()
exec_evt = prg.sum(queue, a.shape, None, a_buf, b_buf, dest_buf)
exec_evt.wait()
elapsed = 1e-9*(exec_evt.profile.end - exec_evt.profile.start)
print("Execution time of test: %g s" % elapsed)
print("OpenCL speedup factor: %g " % (ref_run_time / elapsed - 1))
c = numpy.empty_like(a)
cl.enqueue_read_buffer(queue, dest_buf, c).wait()
if max(abs(c - c_result)) > 1e-6:
print("Results doesn't match!!")
else:
print("Results OK")
print("Max error: %g " % max(abs(c - c_result)))
print("_______________________________________________________________")
a_0, b_0, c_0, t_0 = test_cython_0(1000)
test_opencl_0(1000, a_0, b_0, c_0, t_0)
|
mit
| -1,539,835,912,262,503,000 | 40.272727 | 92 | 0.465702 | false |
pylover/pymlconf
|
sphinx/conf.py
|
1
|
5697
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# pymlconf documentation build configuration file, created by
# sphinx-quickstart on Sat Mar 25 00:12:51 2017.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
import sphinx_rtd_theme
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.doctest',
'sphinx.ext.intersphinx',
'sphinx.ext.todo',
'sphinx.ext.coverage',
'sphinx.ext.viewcode',
'sphinx.ext.githubpages',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'pymlconf'
copyright = '2017, Vahid Mardani'
author = 'Vahid Mardani'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
import os.path
import re
# reading package's version (same way sqlalchemy does)
with open(
os.path.join(os.path.dirname(__file__), '../pymlconf', '__init__.py')
) as v_file:
package_version = \
re.compile('.*__version__ = \'(.*?)\'', re.S)\
.match(v_file.read())\
.group(1)
# The short X.Y version.
version = '.'.join(package_version.split('.')[:2])
# The full version, including alpha/beta/rc tags.
release = package_version
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
# -- Options for HTML output ----------------------------------------------
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# -- Options for HTMLHelp output ------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'pymlconfdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'pymlconf.tex', 'pymlconf Documentation',
'Vahid Mardani', 'manual'),
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'pymlconf', 'pymlconf Documentation',
[author], 1)
]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'pymlconf', 'pymlconf Documentation',
author, 'pymlconf', 'A very micro HTTP framework.',
'Miscellaneous'),
]
autodoc_default_flags = [
'members',
'show-inheritance',
]
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'https://docs.python.org/': None}
# The theme to use for HTML and HTML Help pages
html_title = 'pymlconf Documentation'
html_theme = 'sphinx_rtd_theme'
# html_favicon = '../stuff/favicon.ico'
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# html_theme_options = {
# 'logo_only': True
# }
html_show_sourcelink = False
# html_logo = '../stuff/logo.svg'
|
mit
| 4,386,085,715,522,529,000 | 28.827225 | 79 | 0.667544 | false |
cmusatyalab/opendiamond
|
opendiamond/blaster/cache.py
|
1
|
5375
|
#
# The OpenDiamond Platform for Interactive Search
#
# Copyright (c) 2011-2012 Carnegie Mellon University
# All rights reserved.
#
# This software is distributed under the terms of the Eclipse Public
# License, Version 1.0 which can be found in the file named LICENSE.
# ANY USE, REPRODUCTION OR DISTRIBUTION OF THIS SOFTWARE CONSTITUTES
# RECIPIENT'S ACCEPTANCE OF THIS AGREEMENT
#
from future import standard_library
standard_library.install_aliases()
from builtins import object
from collections import Mapping
import pickle as pickle
from datetime import datetime
from hashlib import sha256
import logging
import os
import shutil
from tempfile import NamedTemporaryFile
import zipfile
import dateutil.parser
from dateutil.tz import tzutc
_log = logging.getLogger(__name__)
def _attr_key_to_member(key):
if key == '':
return '__OBJECT_DATA__'
return key
def _member_to_attr_key(name):
if name == '__OBJECT_DATA__':
return ''
return name
class SearchCacheLoadError(Exception):
pass
class _CachedSearchResult(Mapping):
def __init__(self, path): # pylint: disable=super-init-not-called
self._zip = zipfile.ZipFile(path, 'r')
def __len__(self):
return len(self._zip.namelist())
def __iter__(self):
return (_member_to_attr_key(n) for n in self._zip.namelist())
def __contains__(self, key):
return _attr_key_to_member(key) in self._zip.namelist()
def __getitem__(self, key):
return self._zip.read(_attr_key_to_member(key))
class SearchCache(object):
'''Assumes single-threaded, single-process access to the cache
(except for pruning).'''
def __init__(self, path):
if not os.path.exists(path):
os.makedirs(path)
self._basedir = path
def _search_dir_path(self, search_key):
return os.path.join(self._basedir, search_key)
def _search_path(self, search_key):
return os.path.join(self._search_dir_path(search_key), 'search')
def _search_expiration_path(self, search_key):
return os.path.join(self._search_dir_path(search_key), 'expires')
def _object_path(self, search_key, object_key):
return os.path.join(self._search_dir_path(search_key), object_key)
def _object_key(self, object_id):
return sha256(object_id).hexdigest()
def _hash_file(self, filh):
filh.seek(0)
hash = sha256()
while True:
buf = filh.read(131072)
if buf == '':
break
hash.update(buf)
return hash.hexdigest()
def put_search(self, obj, expiration):
'''obj is an application-defined search object. expiration is a
timezone-aware datetime specifying when the search expires.'''
obj_fh = NamedTemporaryFile(dir=self._basedir, delete=False)
pickle.dump(obj, obj_fh, pickle.HIGHEST_PROTOCOL)
search_key = self._hash_file(obj_fh)
obj_fh.close()
exp_fh = NamedTemporaryFile(dir=self._basedir, delete=False)
exp_fh.write(expiration.isoformat() + '\n')
exp_fh.close()
dirpath = self._search_dir_path(search_key)
filepath = self._search_path(search_key)
if not os.path.exists(dirpath):
os.makedirs(dirpath)
if not os.path.exists(filepath):
os.rename(exp_fh.name, self._search_expiration_path(search_key))
os.rename(obj_fh.name, filepath)
else:
os.unlink(exp_fh.name)
os.unlink(obj_fh.name)
return search_key
def get_search(self, search_key):
try:
with open(self._search_path(search_key), 'rb') as fh:
return pickle.load(fh)
except IOError:
raise KeyError()
except Exception:
raise SearchCacheLoadError()
def put_search_result(self, search_key, object_id, result):
'''result is a dict of object attributes.'''
fh = NamedTemporaryFile(dir=self._basedir, delete=False)
zf = zipfile.ZipFile(fh, 'w', zipfile.ZIP_STORED, True)
for k, v in result.items():
zf.writestr(_attr_key_to_member(k), v)
zf.close()
fh.close()
object_key = self._object_key(object_id)
os.rename(fh.name, self._object_path(search_key, object_key))
return object_key
def get_search_result(self, search_key, object_key):
try:
return _CachedSearchResult(
self._object_path(search_key, object_key))
except IOError:
raise KeyError()
def prune(self):
'''May be run in a different thread.'''
now = datetime.now(tzutc())
expired = 0
for search_key in os.listdir(self._basedir):
# search_key may or may not be a search key; we have to check
# the filesystem to make sure
exp_path = self._search_expiration_path(search_key)
try:
with open(exp_path, 'r') as fh:
exp_time = dateutil.parser.parse(fh.read())
except IOError:
# No expiration file ==> not a search
continue
if exp_time < now:
shutil.rmtree(self._search_dir_path(search_key))
expired += 1
if expired:
_log.info('Expired %d searches', expired)
|
epl-1.0
| 6,612,470,580,327,325,000 | 30.617647 | 76 | 0.611535 | false |
c4fcm/CivilServant
|
alembic/versions/960085fce39c_generating_migration_for_a_praw_object.py
|
1
|
2408
|
"""Generating migration for a PRAW object
Revision ID: 960085fce39c
Revises: 4d46b88366fc
Create Date: 2016-06-13 17:30:49.056215
"""
# revision identifiers, used by Alembic.
revision = '960085fce39c'
down_revision = '4d46b88366fc'
branch_labels = None
depends_on = None
from alembic import op
import sqlalchemy as sa
def upgrade(engine_name):
globals()["upgrade_%s" % engine_name]()
def downgrade(engine_name):
globals()["downgrade_%s" % engine_name]()
def upgrade_development():
### commands auto generated by Alembic - please adjust! ###
op.create_table('praw_keys',
sa.Column('id', sa.String(256), nullable=False),
sa.Column('created_at', sa.DateTime(), nullable=True),
sa.Column('access_token', sa.String(256), nullable=True),
sa.Column('scope', sa.String(256), nullable=True),
sa.Column('refresh_token', sa.String(256), nullable=True),
sa.PrimaryKeyConstraint('id')
)
### end Alembic commands ###
def downgrade_development():
### commands auto generated by Alembic - please adjust! ###
op.drop_table('praw_keys')
### end Alembic commands ###
def upgrade_test():
### commands auto generated by Alembic - please adjust! ###
op.create_table('praw_keys',
sa.Column('id', sa.String(256), nullable=False),
sa.Column('created_at', sa.DateTime(), nullable=True),
sa.Column('access_token', sa.String(256), nullable=True),
sa.Column('scope', sa.String(256), nullable=True),
sa.Column('refresh_token', sa.String(256), nullable=True),
sa.PrimaryKeyConstraint('id')
)
### end Alembic commands ###
def downgrade_test():
### commands auto generated by Alembic - please adjust! ###
op.drop_table('praw_keys')
### end Alembic commands ###
def upgrade_production():
### commands auto generated by Alembic - please adjust! ###
op.create_table('praw_keys',
sa.Column('id', sa.String(256), nullable=False),
sa.Column('created_at', sa.DateTime(), nullable=True),
sa.Column('access_token', sa.String(256), nullable=True),
sa.Column('scope', sa.String(256), nullable=True),
sa.Column('refresh_token', sa.String(256), nullable=True),
sa.PrimaryKeyConstraint('id')
)
### end Alembic commands ###
def downgrade_production():
### commands auto generated by Alembic - please adjust! ###
op.drop_table('praw_keys')
### end Alembic commands ###
|
mit
| -8,745,198,961,622,810,000 | 27.329412 | 63 | 0.66113 | false |
TribeMedia/synapse
|
synapse/rest/media/v1/upload_resource.py
|
2
|
3506
|
# -*- coding: utf-8 -*-
# Copyright 2014-2016 OpenMarket Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from synapse.http.server import respond_with_json, request_handler
from synapse.api.errors import SynapseError
from twisted.web.server import NOT_DONE_YET
from twisted.internet import defer
from twisted.web.resource import Resource
import logging
logger = logging.getLogger(__name__)
class UploadResource(Resource):
isLeaf = True
def __init__(self, hs, media_repo):
Resource.__init__(self)
self.media_repo = media_repo
self.filepaths = media_repo.filepaths
self.store = hs.get_datastore()
self.clock = hs.get_clock()
self.server_name = hs.hostname
self.auth = hs.get_auth()
self.max_upload_size = hs.config.max_upload_size
self.version_string = hs.version_string
self.clock = hs.get_clock()
def render_POST(self, request):
self._async_render_POST(request)
return NOT_DONE_YET
def render_OPTIONS(self, request):
respond_with_json(request, 200, {}, send_cors=True)
return NOT_DONE_YET
@request_handler()
@defer.inlineCallbacks
def _async_render_POST(self, request):
requester = yield self.auth.get_user_by_req(request)
# TODO: The checks here are a bit late. The content will have
# already been uploaded to a tmp file at this point
content_length = request.getHeader("Content-Length")
if content_length is None:
raise SynapseError(
msg="Request must specify a Content-Length", code=400
)
if int(content_length) > self.max_upload_size:
raise SynapseError(
msg="Upload request body is too large",
code=413,
)
upload_name = request.args.get("filename", None)
if upload_name:
try:
upload_name = upload_name[0].decode('UTF-8')
except UnicodeDecodeError:
raise SynapseError(
msg="Invalid UTF-8 filename parameter: %r" % (upload_name),
code=400,
)
headers = request.requestHeaders
if headers.hasHeader("Content-Type"):
media_type = headers.getRawHeaders("Content-Type")[0]
else:
raise SynapseError(
msg="Upload request missing 'Content-Type'",
code=400,
)
# if headers.hasHeader("Content-Disposition"):
# disposition = headers.getRawHeaders("Content-Disposition")[0]
# TODO(markjh): parse content-dispostion
content_uri = yield self.media_repo.create_content(
media_type, upload_name, request.content.read(),
content_length, requester.user
)
logger.info("Uploaded content with URI %r", content_uri)
respond_with_json(
request, 200, {"content_uri": content_uri}, send_cors=True
)
|
apache-2.0
| 7,054,650,231,856,196,000 | 32.711538 | 79 | 0.625214 | false |
kata-csc/ckanext-oaipmh
|
docs/conf.py
|
1
|
8464
|
# -*- coding: utf-8 -*-
#
# Kata documentation build configuration file, created by
# sphinx-quickstart on Tue Jul 15 10:01:47 2014.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.viewcode',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Kata'
copyright = u'2014, CSC'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '1.0.0'
# The full version, including alpha/beta/rc tags.
release = '1.0.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'agogo'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
html_theme_options = {
#"footerbgcolor": "red",
#"footertextcolor": "black",
#"bgcolor": "gray",
#"textcolor": "black",
#"sidebarbgcolor": "blue",
#"sidebartextcolor": "black"
"footerbg": "black",
"headerbg": "black",
"headercolor1": "black",
"headercolor2": "#311F4C",
"headerlinkcolor": "#B894FF",
"linkcolor": "#5C007A"
}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
html_title = "Kata metadata catalogue documentation"
# A shorter title for the navigation bar. Default is the same as html_title.
html_short_title = "Kata"
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'Katadoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'Kata.tex', u'Kata Documentation',
u'CSC', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'kata', u'Kata Documentation',
[u'CSC'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'Kata', u'Kata Documentation',
u'CSC', 'Kata', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
|
agpl-3.0
| 1,165,127,676,307,662,300 | 29.890511 | 79 | 0.70345 | false |
cgart/photobooth
|
mainapp/preview.py
|
1
|
2172
|
import piggyphoto
from kivy.uix.widget import Widget
from kivy.properties import StringProperty
from kivy.properties import NumericProperty
from kivy.properties import ListProperty, ObjectProperty
from kivy.core.image import Image as CoreImage
from kivy.uix.image import Image
# ----------------------------------------------------------------------
# Preview widget - showing the current preview picture
# ----------------------------------------------------------------------
class Preview(Widget):
camera = None
preview_image = ObjectProperty()
image = Image()
alpha = NumericProperty()
preview_file = 'preview.jpg'
enable_preview = False
def __init__(self, **kwargs):
super(Preview, self).__init__(**kwargs)
self.alpha = 0
# ------------------------------------------------------------------
def setCamera(self, capturePreviewFile = 'preview.jpg', camera = None):
self.preview_file = capturePreviewFile
self.image = Image(source = capturePreviewFile)
self.camera = camera
self.alpha = 0
if camera != None:
self.camera.capture_preview(capturePreviewFile)
pass
# ------------------------------------------------------------------
def enablePreview(self):
self.enable_preview = True
# ------------------------------------------------------------------
def disablePreview(self):
self.enable_preview = False
# ------------------------------------------------------------------
def show(self):
self.alpha = 1.
# ------------------------------------------------------------------
def hide(self):
self.alpha = 0.
# ------------------------------------------------------------------
def updateFrame(self):
if self.alpha < 0.1 or self.enable_preview == False:
return
if self.camera != None:
self.camera.capture_preview(self.preview_file)
self.image.reload()
self.preview_image.texture = self.image.texture
pass
|
mit
| -8,266,618,033,812,970,000 | 30.941176 | 75 | 0.445212 | false |
eseidel/native_client_patches
|
tests/tone/run.py
|
1
|
1783
|
#!/usr/bin/python
# Copyright 2008, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import sys
sys.path.append("../../common")
import nacl_util
NEXE = "tone.nexe"
DEFAULT_ARGS = None
if __name__ == '__main__':
nacl_util.LaunchSelLdr(nacl_util.GetExe(NEXE),
nacl_util.GetArgs(DEFAULT_ARGS))
|
bsd-3-clause
| -1,862,171,788,224,770,300 | 42.487805 | 72 | 0.757712 | false |
HaroldMills/Vesper
|
vesper/mpg_ranch/nfc_bounding_interval_annotator_1_0/annotator.py
|
1
|
7220
|
"""
Module containing NFC bounding interval annotator, version 1.0.
An NFC bounding interval annotator sets values for the `Call Start Index`
and `Call End Index` annotations for a clip containing a nocturnal flight
call (NFC). If the annotations already exist their values are overwritten,
and if they do not already exist they are created. The clip is assumed to
contain an NFC.
"""
from collections import defaultdict
import logging
import resampy
from vesper.command.annotator import Annotator as AnnotatorBase
from vesper.django.app.models import AnnotationInfo
from vesper.mpg_ranch.nfc_bounding_interval_annotator_1_0.inferrer \
import Inferrer
from vesper.singleton.clip_manager import clip_manager
import vesper.django.app.model_utils as model_utils
import vesper.mpg_ranch.nfc_bounding_interval_annotator_1_0.dataset_utils \
as dataset_utils
import vesper.util.open_mp_utils as open_mp_utils
_CLASSIFICATION_ANNOTATION_NAME = 'Classification'
_START_INDEX_ANNOTATION_NAME = 'Call Start Index'
_END_INDEX_ANNOTATION_NAME = 'Call End Index'
_MODEL_INFOS = {
# Tseep 14k
'Tseep':
(('Tseep_Start_2020-08-07_14.02.08', 30),
('Tseep_End_2020-08-07_15.10.03', 10)),
# Tseep 9.5k
# 'Tseep':
# ('Tseep_Start_2020-07-10_17.17.48', 'Tseep_End_2020-07-10_18.02.04'),
# Tseep 10k
# 'Tseep':
# ('Tseep_Start_2020-07-10_11.53.54', 'Tseep_End_2020-07-10_12.27.40'),
# Tseep 5k without dropout
# 'Tseep':
# ('Tseep_Start_2020-07-08_19.11.45', 'Tseep_End_2020-07-08_19.37.02'),
# Tseep 5k with dropout of .25 : performance worse than without
# 'Tseep':
# ('Tseep_Start_2020-07-08_20.36.20', 'Tseep_End_2020-07-09_11.00.19'),
}
class Annotator(AnnotatorBase):
extension_name = 'MPG Ranch NFC Bounding Interval Annotator 1.0'
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
open_mp_utils.work_around_multiple_copies_issue()
# Suppress TensorFlow INFO and DEBUG log messages.
logging.getLogger('tensorflow').setLevel(logging.WARN)
self._inferrers = dict(
(t, _create_inferrer(t))
for t in ('Tseep',))
self._annotation_infos = _get_annotation_infos()
def annotate_clips(self, clips):
clip_lists = self._get_call_clip_lists(clips)
annotated_clip_count = 0
for clip_type, clips in clip_lists.items():
inferrer = self._inferrers.get(clip_type)
if inferrer is not None:
# have inferrer for this clip type
inference_sample_rate = inferrer.sample_rate
clips, waveform_dataset = \
self._get_clip_waveforms(clips, inference_sample_rate)
bounds = inferrer.get_call_bounds(waveform_dataset)
for clip, (start_index, end_index) in zip(clips, bounds):
self._annotate_clip(
clip, _START_INDEX_ANNOTATION_NAME, start_index,
inference_sample_rate)
self._annotate_clip(
clip, _END_INDEX_ANNOTATION_NAME, end_index,
inference_sample_rate)
annotated_clip_count += len(clips)
return annotated_clip_count
def _get_call_clip_lists(self, clips):
"""Gets a mapping from clip types to lists of call clips."""
# Get mapping from clip types to call clip lists.
clip_lists = defaultdict(list)
for clip in clips:
if _is_call_clip(clip):
clip_type = model_utils.get_clip_type(clip)
clip_lists[clip_type].append(clip)
return clip_lists
def _get_clip_waveforms(self, clips, inference_sample_rate):
result_clips = []
waveforms = []
for clip in clips:
try:
waveform = self._get_clip_samples(clip, inference_sample_rate)
except Exception as e:
logging.warning(
f'Could not annotate clip "{clip}", since its samples '
f'could not be obtained. Error message was: {str(e)}')
else:
# got clip samples
result_clips.append(clip)
waveforms.append(waveform)
waveforms = \
dataset_utils.create_waveform_dataset_from_tensors(waveforms)
return result_clips, waveforms
def _get_clip_samples(self, clip, inference_sample_rate):
# Get clip samples.
samples = clip_manager.get_samples(clip)
if clip.sample_rate != inference_sample_rate:
# need to resample
samples = resampy.resample(
samples, clip.sample_rate, inference_sample_rate)
return samples
def _annotate_clip(
self, clip, annotation_name, index, inference_sample_rate):
# If needed, modify index to account for difference between
# clip and inference sample rates.
if clip.sample_rate != inference_sample_rate:
factor = clip.sample_rate / inference_sample_rate
index = int(round(index * factor))
# Make index a recording index rather than a clip index.
index += clip.start_index
annotation_info = self._annotation_infos[annotation_name]
annotation_value = str(index)
model_utils.annotate_clip(
clip, annotation_info, annotation_value,
creating_user=self._creating_user,
creating_job=self._creating_job,
creating_processor=self._creating_processor)
def _create_inferrer(clip_type):
model_infos = _MODEL_INFOS[clip_type]
return Inferrer(*model_infos)
def _get_annotation_infos():
return dict(
(name, _get_annotation_info(name))
for name in (_START_INDEX_ANNOTATION_NAME, _END_INDEX_ANNOTATION_NAME))
def _get_annotation_info(name):
try:
return AnnotationInfo.objects.get(name=name)
except AnnotationInfo.DoesNotExist:
raise ValueError(f'Unrecognized annotation "{name}".')
def _is_call_clip(clip):
annotations = model_utils.get_clip_annotations(clip)
classification = annotations.get(_CLASSIFICATION_ANNOTATION_NAME)
return classification is not None and classification.startswith('Call')
def _convert_clip_index_to_recording_index(
clip, clip_index, sample_rate):
if sample_rate != clip.sample_rate:
clip_index = int(round(clip_index * clip.sample_rate / sample_rate))
return clip.start_index + clip_index
|
mit
| -6,382,661,020,857,981,000 | 31.232143 | 79 | 0.571884 | false |
mlperf/training_results_v0.7
|
Fujitsu/benchmarks/resnet/implementations/implementation_open/mxnet/3rdparty/onnx-tensorrt/third_party/onnx/setup.py
|
1
|
11020
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from distutils.spawn import find_executable
from distutils import sysconfig, log
import setuptools
import setuptools.command.build_py
import setuptools.command.develop
import setuptools.command.build_ext
from collections import namedtuple
from contextlib import contextmanager
import glob
import os
import shlex
import subprocess
import sys
import struct
from textwrap import dedent
import multiprocessing
TOP_DIR = os.path.realpath(os.path.dirname(__file__))
SRC_DIR = os.path.join(TOP_DIR, 'onnx')
TP_DIR = os.path.join(TOP_DIR, 'third_party')
CMAKE_BUILD_DIR = os.path.join(TOP_DIR, '.setuptools-cmake-build')
WINDOWS = (os.name == 'nt')
CMAKE = find_executable('cmake3') or find_executable('cmake')
MAKE = find_executable('make')
install_requires = []
setup_requires = []
tests_require = []
extras_require = {}
################################################################################
# Global variables for controlling the build variant
################################################################################
ONNX_ML = bool(os.getenv('ONNX_ML') == '1')
ONNX_NAMESPACE = os.getenv('ONNX_NAMESPACE', 'onnx')
ONNX_BUILD_TESTS = bool(os.getenv('ONNX_BUILD_TESTS') == '1')
DEBUG = bool(os.getenv('DEBUG'))
COVERAGE = bool(os.getenv('COVERAGE'))
################################################################################
# Version
################################################################################
try:
git_version = subprocess.check_output(['git', 'rev-parse', 'HEAD'],
cwd=TOP_DIR).decode('ascii').strip()
except (OSError, subprocess.CalledProcessError):
git_version = None
with open(os.path.join(TOP_DIR, 'VERSION_NUMBER')) as version_file:
VersionInfo = namedtuple('VersionInfo', ['version', 'git_version'])(
version=version_file.read().strip(),
git_version=git_version
)
################################################################################
# Pre Check
################################################################################
assert CMAKE, 'Could not find "cmake" executable!'
################################################################################
# Utilities
################################################################################
@contextmanager
def cd(path):
if not os.path.isabs(path):
raise RuntimeError('Can only cd to absolute path, got: {}'.format(path))
orig_path = os.getcwd()
os.chdir(path)
try:
yield
finally:
os.chdir(orig_path)
################################################################################
# Customized commands
################################################################################
class ONNXCommand(setuptools.Command):
user_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
class create_version(ONNXCommand):
def run(self):
with open(os.path.join(SRC_DIR, 'version.py'), 'w') as f:
f.write(dedent('''\
# This file is generated by setup.py. DO NOT EDIT!
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
version = '{version}'
git_version = '{git_version}'
'''.format(**dict(VersionInfo._asdict()))))
class cmake_build(setuptools.Command):
"""
Compiles everything when `python setupmnm.py build` is run using cmake.
Custom args can be passed to cmake by specifying the `CMAKE_ARGS`
environment variable.
The number of CPUs used by `make` can be specified by passing `-j<ncpus>`
to `setup.py build`. By default all CPUs are used.
"""
user_options = [
(str('jobs='), str('j'), str('Specifies the number of jobs to use with make'))
]
built = False
def initialize_options(self):
self.jobs = multiprocessing.cpu_count()
def finalize_options(self):
self.jobs = int(self.jobs)
def run(self):
if cmake_build.built:
return
cmake_build.built = True
if not os.path.exists(CMAKE_BUILD_DIR):
os.makedirs(CMAKE_BUILD_DIR)
with cd(CMAKE_BUILD_DIR):
# configure
cmake_args = [
CMAKE,
'-DPYTHON_INCLUDE_DIR={}'.format(sysconfig.get_python_inc()),
'-DPYTHON_EXECUTABLE={}'.format(sys.executable),
'-DBUILD_ONNX_PYTHON=ON',
'-DCMAKE_EXPORT_COMPILE_COMMANDS=ON',
'-DONNX_NAMESPACE={}'.format(ONNX_NAMESPACE),
'-DPY_EXT_SUFFIX={}'.format(sysconfig.get_config_var('EXT_SUFFIX') or ''),
]
if COVERAGE:
cmake_args.append('-DONNX_COVERAGE=ON')
if COVERAGE or DEBUG:
# in order to get accurate coverage information, the
# build needs to turn off optimizations
cmake_args.append('-DCMAKE_BUILD_TYPE=Debug')
if WINDOWS:
cmake_args.extend([
# we need to link with libpython on windows, so
# passing python version to window in order to
# find python in cmake
'-DPY_VERSION={}'.format('{0}.{1}'.format(*sys.version_info[:2])),
'-DONNX_USE_MSVC_STATIC_RUNTIME=ON',
])
if 8 * struct.calcsize("P") == 64:
# Temp fix for CI
# TODO: need a better way to determine generator
cmake_args.append('-DCMAKE_GENERATOR_PLATFORM=x64')
if ONNX_ML:
cmake_args.append('-DONNX_ML=1')
if ONNX_BUILD_TESTS:
cmake_args.append('-DONNX_BUILD_TESTS=ON')
if 'CMAKE_ARGS' in os.environ:
extra_cmake_args = shlex.split(os.environ['CMAKE_ARGS'])
# prevent crossfire with downstream scripts
del os.environ['CMAKE_ARGS']
log.info('Extra cmake args: {}'.format(extra_cmake_args))
cmake_args.extend(extra_cmake_args)
cmake_args.append(TOP_DIR)
subprocess.check_call(cmake_args)
build_args = [CMAKE, '--build', os.curdir]
if WINDOWS:
build_args.extend(['--', '/maxcpucount:{}'.format(self.jobs)])
else:
build_args.extend(['--', '-j', str(self.jobs)])
subprocess.check_call(build_args)
class build_py(setuptools.command.build_py.build_py):
def run(self):
self.run_command('create_version')
self.run_command('cmake_build')
generated_python_files = \
glob.glob(os.path.join(CMAKE_BUILD_DIR, 'onnx', '*.py')) + \
glob.glob(os.path.join(CMAKE_BUILD_DIR, 'onnx', '*.pyi'))
for src in generated_python_files:
dst = os.path.join(
TOP_DIR, os.path.relpath(src, CMAKE_BUILD_DIR))
self.copy_file(src, dst)
return setuptools.command.build_py.build_py.run(self)
class develop(setuptools.command.develop.develop):
def run(self):
self.run_command('build_py')
setuptools.command.develop.develop.run(self)
class build_ext(setuptools.command.build_ext.build_ext):
def run(self):
self.run_command('cmake_build')
setuptools.command.build_ext.build_ext.run(self)
def build_extensions(self):
for ext in self.extensions:
fullname = self.get_ext_fullname(ext.name)
filename = os.path.basename(self.get_ext_filename(fullname))
lib_path = CMAKE_BUILD_DIR
if os.name == 'nt':
debug_lib_dir = os.path.join(lib_path, "Debug")
release_lib_dir = os.path.join(lib_path, "Release")
if os.path.exists(debug_lib_dir):
lib_path = debug_lib_dir
elif os.path.exists(release_lib_dir):
lib_path = release_lib_dir
src = os.path.join(lib_path, filename)
dst = os.path.join(os.path.realpath(self.build_lib), "onnx", filename)
self.copy_file(src, dst)
class mypy_type_check(ONNXCommand):
description = 'Run MyPy type checker'
def run(self):
"""Run command."""
onnx_script = os.path.realpath(os.path.join(os.path.dirname(os.path.abspath(__file__)), "tools/mypy-onnx.py"))
returncode = subprocess.call([sys.executable, onnx_script])
sys.exit(returncode)
cmdclass = {
'create_version': create_version,
'cmake_build': cmake_build,
'build_py': build_py,
'develop': develop,
'build_ext': build_ext,
'typecheck': mypy_type_check,
}
################################################################################
# Extensions
################################################################################
ext_modules = [
setuptools.Extension(
name=str('onnx.onnx_cpp2py_export'),
sources=[])
]
################################################################################
# Packages
################################################################################
# no need to do fancy stuff so far
packages = setuptools.find_packages()
install_requires.extend([
'protobuf',
'numpy',
'six',
'typing>=3.6.4',
'typing-extensions>=3.6.2.1',
])
################################################################################
# Test
################################################################################
setup_requires.append('pytest-runner')
tests_require.append('pytest')
tests_require.append('nbval')
tests_require.append('tabulate')
tests_require.append('typing')
tests_require.append('typing-extensions')
if sys.version_info[0] == 3:
# Mypy doesn't work with Python 2
extras_require['mypy'] = ['mypy==0.600']
################################################################################
# Final
################################################################################
setuptools.setup(
name="onnx",
version=VersionInfo.version,
description="Open Neural Network Exchange",
ext_modules=ext_modules,
cmdclass=cmdclass,
packages=packages,
include_package_data=True,
install_requires=install_requires,
setup_requires=setup_requires,
tests_require=tests_require,
extras_require=extras_require,
author='bddppq',
author_email='jbai@fb.com',
url='https://github.com/onnx/onnx',
entry_points={
'console_scripts': [
'check-model = onnx.bin.checker:check_model',
'check-node = onnx.bin.checker:check_node',
'backend-test-tools = onnx.backend.test.cmd_tools:main',
]
},
)
|
apache-2.0
| -948,853,856,165,309,600 | 32.293051 | 118 | 0.522505 | false |
sherpya/archiver
|
setup_all.py
|
1
|
2040
|
#!/usr/bin/env python
# -*- Mode: Python; tab-width: 4 -*-
#
# Netfarm Mail Archiver - release 2
#
# Copyright (C) 2005-2007 Gianluigi Tiesi <sherpya@netfarm.it>
# Copyright (C) 2005-2007 NetFarm S.r.l. [http://www.netfarm.it]
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 2, or (at your option) any later
# version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTIBILITY
# or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
# for more details.
# ======================================================================
## @file setup_all.py
## Netfarm Mail Archiver [py2exe]
import sys, os, glob
sys.path.append('.')
try:
import modulefinder
except ImportError:
pass
from distutils.core import setup
import py2exe
backends = [ 'backend_filesystem', 'backend_pgsql', 'backend_xmlrpc', 'backend_vfsimage.py' ]
psycopg = [ 'psycopg', 'mx.DateTime']
deps = backends + psycopg + ['lmtp'] + ['dbhash']
py2exe_options = dict(
excludes = '',
optimize = '02',
compressed = '1',
includes = deps
)
nma = dict(
company_name = 'Netfarm S.r.l.',
copyright = 'Copyright (C) 2007 Gianluigi Tiesi',
comments = 'Netfarm Mail Archiver',
icon_resources = [(1, 'nma.ico')],
modules = ['archiver_svc']
)
archiver = dict(
company_name = 'Netfarm S.r.l.',
copyright = 'Copyright (C) 2007 Gianluigi Tiesi',
comments = 'Netfarm Mail Archiver',
icon_resources = [(1, 'nma.ico')],
script = 'archiver.py'
)
if len(sys.argv)==1 or \
(len(sys.argv)==2 and sys.argv[1] in ['-q', '-n']):
sys.argv.append('py2exe')
setup(name = 'nma.py',
version = '2.1.0',
description = 'Netfarm Mail Archiver',
service = [nma],
console = [archiver],
options = {'py2exe' : py2exe_options},
zipfile = 'nma.zip',
)
|
gpl-2.0
| -6,462,490,321,913,439,000 | 27.732394 | 93 | 0.633333 | false |
ragupta-git/ImcSdk
|
imcsdk/mometa/storage/StorageLocalDiskEp.py
|
1
|
1913
|
"""This module contains the general information for StorageLocalDiskEp ManagedObject."""
from ...imcmo import ManagedObject
from ...imccoremeta import MoPropertyMeta, MoMeta
from ...imcmeta import VersionMeta
class StorageLocalDiskEpConsts:
pass
class StorageLocalDiskEp(ManagedObject):
"""This is StorageLocalDiskEp class."""
consts = StorageLocalDiskEpConsts()
naming_props = set([u'id'])
mo_meta = {
"modular": MoMeta("StorageLocalDiskEp", "storageLocalDiskEp", "diskRef-[id]", VersionMeta.Version2013e, "OutputOnly", 0xf, [], ["admin", "read-only", "user"], [u'storageController'], [u'faultInst'], ["Get"])
}
prop_meta = {
"modular": {
"dn": MoPropertyMeta("dn", "dn", "string", VersionMeta.Version2013e, MoPropertyMeta.READ_ONLY, 0x2, 0, 255, None, [], []),
"health": MoPropertyMeta("health", "health", "string", VersionMeta.Version2013e, MoPropertyMeta.READ_ONLY, None, 0, 510, None, [], []),
"id": MoPropertyMeta("id", "id", "string", VersionMeta.Version2013e, MoPropertyMeta.NAMING, None, 0, 510, None, [], []),
"rn": MoPropertyMeta("rn", "rn", "string", VersionMeta.Version2013e, MoPropertyMeta.READ_ONLY, 0x4, 0, 255, None, [], []),
"status": MoPropertyMeta("status", "status", "string", VersionMeta.Version2013e, MoPropertyMeta.READ_ONLY, 0x8, None, None, None, ["", "created", "deleted", "modified", "removed"], []),
},
}
prop_map = {
"modular": {
"dn": "dn",
"health": "health",
"id": "id",
"rn": "rn",
"status": "status",
},
}
def __init__(self, parent_mo_or_dn, id, **kwargs):
self._dirty_mask = 0
self.id = id
self.health = None
self.status = None
ManagedObject.__init__(self, "StorageLocalDiskEp", parent_mo_or_dn, **kwargs)
|
apache-2.0
| 8,475,116,203,333,382,000 | 34.425926 | 215 | 0.592263 | false |
joharei/plasma-netctl
|
contents/code/netctl.py
|
1
|
2541
|
# -*- coding: utf-8 -*-
#
# Author: Johan Reitan <johan.reitan@gmail.com>
# Date: Sat Jan 18 2014, 17:03:40
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Library General Public License as
# published by the Free Software Foundation; either version 2, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details
#
# You should have received a copy of the GNU Library General Public
# License along with this program; if not, write to the
# Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# Import essential modules
from PyKDE4.kdeui import KIconLoader, KIcon
from PyQt4.QtCore import *
from PyQt4.QtGui import *
from PyKDE4.plasma import Plasma
from PyKDE4 import plasmascript
#from __future__ import with_statement
import os
import subprocess
import re
from widget import Netctl
TIMEOUT = 5
class NetctlApplet(plasmascript.Applet):
# Constructor, forward initialization to its superclass
# Note: try to NOT modify this constructor; all the setup code
# should be placed in the init method.
def __init__(self, parent, args=None):
plasmascript.Applet.__init__(self, parent)
# init method
# Put here all the code needed to initialize our plasmoid
def init(self):
plasmascript.Applet.init(self)
# self.applet.setPopupIcon("network-wireless-0")
self.setHasConfigurationInterface(False)
self.setAspectRatioMode(Plasma.Square)
self.theme = Plasma.Svg(self)
# self.theme.setImagePath("widgets/background")
self.setBackgroundHints(Plasma.Applet.StandardBackground)
# self.layout = QGraphicsLinearLayout(Qt.Horizontal, self.applet)
# label = Plasma.Label(self.applet)
# label.setText(str(wpa_status()))
# self.layout.addItem(label)
# self.applet.setLayout(self.layout)
# self.resize(125, 125)
self.widget = Netctl(self)
self.widget.init()
self.setGraphicsWidget(self.widget)
self.applet.setPassivePopup(True)
self.setPopupIcon(KIcon("network-wireless-0"))
self.setGraphicsWidget(self.widget)
# self.update_text()
# self.updateIcon()
def CreateApplet(parent):
return NetctlApplet(parent)
|
gpl-3.0
| 5,458,483,675,457,180,000 | 32 | 73 | 0.706415 | false |
rudhir-upretee/Sumo17_With_Netsim
|
tools/net/batch0103to0110.py
|
1
|
1060
|
#!/usr/bin/python
"""
@file batch0103to0110.py
@author Daniel Krajzewicz
@author Michael Behrisch
@date 2007
@version $Id: batch0103to0110.py 13811 2013-05-01 20:31:43Z behrisch $
Applies the transformation on all nets in the given folder or
- if no folder is given - in the base folder (../..).
SUMO, Simulation of Urban MObility; see http://sumo.sourceforge.net/
Copyright (C) 2009-2013 DLR (http://www.dlr.de/) and contributors
All rights reserved
"""
import os, os.path, sys
r = "../../"
if len(sys.argv)>1:
r = sys.argv[1]
srcRoot = os.path.join(os.path.dirname(sys.argv[0]), r)
for root, dirs, files in os.walk(srcRoot):
for name in files:
if name.endswith(".net.xml") or name=="net.netconvert" or name=="net.netgen":
p = os.path.join(root, name)
print "Patching " + p + "..."
os.system("0103to0110.py " + p)
os.remove(p)
os.rename(p+".chg", p)
for ignoreDir in ['.svn', 'foreign']:
if ignoreDir in dirs:
dirs.remove(ignoreDir)
|
gpl-3.0
| -8,821,300,692,409,846,000 | 31.121212 | 85 | 0.616981 | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.