repo_name
stringlengths
5
100
path
stringlengths
4
375
copies
stringclasses
991 values
size
stringlengths
4
7
content
stringlengths
666
1M
license
stringclasses
15 values
mydongistiny/external_chromium_org
third_party/tlslite/tlslite/utils/asn1parser.py
206
1191
# Author: Trevor Perrin # Patch from Google adding getChildBytes() # # See the LICENSE file for legal information regarding use of this file. """Class for parsing ASN.1""" from .compat import * from .codec import * #Takes a byte array which has a DER TLV field at its head class ASN1Parser(object): def __init__(self, bytes): p = Parser(bytes) p.get(1) #skip Type #Get Length self.length = self._getASN1Length(p) #Get Value self.value = p.getFixBytes(self.length) #Assuming this is a sequence... def getChild(self, which): return ASN1Parser(self.getChildBytes(which)) def getChildBytes(self, which): p = Parser(self.value) for x in range(which+1): markIndex = p.index p.get(1) #skip Type length = self._getASN1Length(p) p.getFixBytes(length) return p.bytes[markIndex : p.index] #Decode the ASN.1 DER length field def _getASN1Length(self, p): firstLength = p.get(1) if firstLength<=127: return firstLength else: lengthLength = firstLength & 0x7F return p.get(lengthLength)
bsd-3-clause
vainglori0us/supreme-enigma
node_modules/pygmentize-bundled/vendor/pygments/build-2.7/pygments/styles/manni.py
364
2374
# -*- coding: utf-8 -*- """ pygments.styles.manni ~~~~~~~~~~~~~~~~~~~~~ A colorful style, inspired by the terminal highlighting style. This is a port of the style used in the `php port`_ of pygments by Manni. The style is called 'default' there. :copyright: Copyright 2006-2013 by the Pygments team, see AUTHORS. :license: BSD, see LICENSE for details. """ from pygments.style import Style from pygments.token import Keyword, Name, Comment, String, Error, \ Number, Operator, Generic, Whitespace class ManniStyle(Style): """ A colorful style, inspired by the terminal highlighting style. """ background_color = '#f0f3f3' styles = { Whitespace: '#bbbbbb', Comment: 'italic #0099FF', Comment.Preproc: 'noitalic #009999', Comment.Special: 'bold', Keyword: 'bold #006699', Keyword.Pseudo: 'nobold', Keyword.Type: '#007788', Operator: '#555555', Operator.Word: 'bold #000000', Name.Builtin: '#336666', Name.Function: '#CC00FF', Name.Class: 'bold #00AA88', Name.Namespace: 'bold #00CCFF', Name.Exception: 'bold #CC0000', Name.Variable: '#003333', Name.Constant: '#336600', Name.Label: '#9999FF', Name.Entity: 'bold #999999', Name.Attribute: '#330099', Name.Tag: 'bold #330099', Name.Decorator: '#9999FF', String: '#CC3300', String.Doc: 'italic', String.Interpol: '#AA0000', String.Escape: 'bold #CC3300', String.Regex: '#33AAAA', String.Symbol: '#FFCC33', String.Other: '#CC3300', Number: '#FF6600', Generic.Heading: 'bold #003300', Generic.Subheading: 'bold #003300', Generic.Deleted: 'border:#CC0000 bg:#FFCCCC', Generic.Inserted: 'border:#00CC00 bg:#CCFFCC', Generic.Error: '#FF0000', Generic.Emph: 'italic', Generic.Strong: 'bold', Generic.Prompt: 'bold #000099', Generic.Output: '#AAAAAA', Generic.Traceback: '#99CC66', Error: 'bg:#FFAAAA #AA0000' }
mit
google/active-qa
px/nmt/train_combined.py
1
21724
# Copyright 2018 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """For training NMT models in one joined graph for train/infer/eval.""" from __future__ import print_function import math import os import random import time import tensorflow as tf from third_party.nmt.utils import misc_utils as utils from px.nmt import attention_model from px.nmt import gnmt_model from px.nmt import inference from px.nmt import model as nmt_model from px.nmt import model_helper from px.nmt.utils import nmt_utils utils.check_tensorflow_version() __all__ = [ "run_sample_decode", "run_internal_eval", "run_external_eval", "run_full_eval", "init_stats", "update_stats", "print_step_info", "process_stats", "train" ] def run_sample_decode(infer_model, infer_sess, hparams, summary_writer, src_data, ctx_data, tgt_data, annot_data): """Sample decode a random sentence from src_data.""" with infer_model.graph.as_default(): global_step = model_helper.get_global_step(infer_model.model, infer_sess) _sample_decode(infer_model.model, global_step, infer_sess, hparams, infer_model.iterator, src_data, ctx_data, tgt_data, annot_data, infer_model.src_placeholder, infer_model.ctx_placeholder, infer_model.annot_placeholder, infer_model.batch_size_placeholder, summary_writer) def run_internal_eval(eval_model, sess, hparams, summary_writer, use_test_set=True): """Compute internal evaluation (perplexity) for both dev / test.""" utils.print_out( "Computing internal evaluation (perplexity) for both dev / test.") with eval_model.graph.as_default(): global_step = model_helper.get_global_step(eval_model.model, sess) dev_src_file = "%s.%s" % (hparams.dev_prefix, hparams.src) dev_tgt_file = "%s.%s" % (hparams.dev_prefix, hparams.tgt) dev_ctx_file = None if hparams.ctx is not None: dev_ctx_file = "%s.%s" % (hparams.dev_prefix, hparams.ctx) dev_iterator_feed_dict = { eval_model.src_file_placeholder: dev_src_file, eval_model.tgt_file_placeholder: dev_tgt_file, } if dev_ctx_file is not None: dev_iterator_feed_dict[eval_model.ctx_file_placeholder] = dev_ctx_file if hparams.dev_annotations is not None: dev_iterator_feed_dict[ eval_model.annot_file_placeholder] = hparams.dev_annotations dev_ppl = _internal_eval(hparams, eval_model.model, global_step, sess, eval_model.iterator, dev_iterator_feed_dict, summary_writer, "dev") test_ppl = None if use_test_set and hparams.test_prefix: test_src_file = "%s.%s" % (hparams.test_prefix, hparams.src) test_tgt_file = "%s.%s" % (hparams.test_prefix, hparams.tgt) test_ctx_file = None if hparams.ctx is not None: test_ctx_file = "%s.%s" % (hparams.test_prefix, hparams.ctx) test_iterator_feed_dict = { eval_model.src_file_placeholder: test_src_file, eval_model.tgt_file_placeholder: test_tgt_file, } if test_ctx_file is not None: test_iterator_feed_dict[eval_model.ctx_file_placeholder] = test_ctx_file if hparams.test_annotations is not None: test_iterator_feed_dict[ eval_model.annot_file_placeholder] = hparams.test_annotations test_ppl = _internal_eval(hparams, eval_model.model, global_step, sess, eval_model.iterator, test_iterator_feed_dict, summary_writer, "test") return dev_ppl, test_ppl def run_external_eval(infer_model, sess, hparams, summary_writer, save_best_dev=True, use_test_set=True): """Compute external evaluation (bleu, rouge, etc.) for both dev / test.""" with infer_model.graph.as_default(): global_step = model_helper.get_global_step(infer_model.model, sess) dev_src_file = "%s.%s" % (hparams.dev_prefix, hparams.src) dev_tgt_file = "%s.%s" % (hparams.dev_prefix, hparams.tgt) dev_ctx_file = None if hparams.ctx is not None: dev_ctx_file = "%s.%s" % (hparams.dev_prefix, hparams.ctx) dev_iterator_feed_dict = { infer_model.src_placeholder: inference.load_data(dev_src_file), infer_model.batch_size_placeholder: hparams.infer_batch_size } if dev_ctx_file is not None: dev_iterator_feed_dict[infer_model.ctx_placeholder] = inference.load_data( dev_ctx_file) if hparams.dev_annotations is not None: dev_iterator_feed_dict[infer_model.annot_placeholder] = inference.load_data( hparams.dev_annotations) dev_scores = _external_eval( infer_model.model, global_step, sess, hparams, infer_model.iterator, dev_iterator_feed_dict, dev_tgt_file, "dev", summary_writer, save_on_best=save_best_dev) test_scores = None if use_test_set and hparams.test_prefix: test_src_file = "%s.%s" % (hparams.test_prefix, hparams.src) test_tgt_file = "%s.%s" % (hparams.test_prefix, hparams.tgt) test_ctx_file = None if hparams.ctx is not None: test_ctx_file = "%s.%s" % (hparams.test_prefix, hparams.ctx) test_iterator_feed_dict = { infer_model.src_placeholder: inference.load_data(test_src_file), infer_model.batch_size_placeholder: hparams.infer_batch_size } if test_ctx_file is not None: test_iterator_feed_dict[ infer_model.ctx_placeholder] = inference.load_data(test_ctx_file) if hparams.test_annotations is not None: test_iterator_feed_dict[ infer_model.annot_file_placeholder] = inference.load_data( hparams.test_annotations) test_scores = _external_eval( infer_model.model, global_step, sess, hparams, infer_model.iterator, test_iterator_feed_dict, test_tgt_file, "test", summary_writer, save_on_best=False) return dev_scores, test_scores, global_step def run_full_eval(infer_model, infer_sess, eval_model, eval_sess, hparams, summary_writer, sample_src_data, sample_ctx_data, sample_tgt_data, sample_annot_data): """Wrapper for running sample_decode, internal_eval and external_eval.""" run_sample_decode(infer_model, infer_sess, hparams, summary_writer, sample_src_data, sample_ctx_data, sample_tgt_data, sample_annot_data) dev_ppl = None test_ppl = None # only evaluate perplexity when using supervised learning if not hparams.use_rl: dev_ppl, test_ppl = run_internal_eval(eval_model, eval_sess, hparams, summary_writer) dev_scores, test_scores, global_step = run_external_eval( infer_model, infer_sess, hparams, summary_writer) metrics = { "dev_ppl": dev_ppl, "test_ppl": test_ppl, "dev_scores": dev_scores, "test_scores": test_scores, } result_summary = _format_results("dev", dev_ppl, dev_scores, hparams.metrics) if hparams.test_prefix: result_summary += ", " + _format_results("test", test_ppl, test_scores, hparams.metrics) return result_summary, global_step, metrics def init_stats(): """Initialize statistics that we want to accumulate.""" return { "step_time": 0.0, "loss": 0.0, "predict_count": 0.0, "total_count": 0.0, "grad_norm": 0.0 } def update_stats(stats, start_time, step_result): """Update stats: write summary and accumulate statistics.""" (_, step_loss, step_reward, step_predict_count, step_summary, global_step, step_word_count, batch_size, grad_norm, learning_rate) = step_result # Update statistics stats["step_time"] += (time.time() - start_time) stats["loss"] += (step_loss * batch_size) stats["predict_count"] += step_predict_count stats["total_count"] += float(step_word_count) stats["grad_norm"] += grad_norm return global_step, learning_rate, step_summary def print_step_info(prefix, global_step, info, result_summary, log_f): """Print all info at the current global step.""" utils.print_out( "%sstep %d lr %g step-time %.2fs wps %.2fK ppl %.2f gN %.2f %s, %s" % (prefix, global_step, info["learning_rate"], info["avg_step_time"], info["speed"], info["train_ppl"], info["avg_grad_norm"], result_summary, time.ctime()), log_f) def process_stats(stats, info, global_step, steps_per_stats, log_f): """Update info and check for overflow.""" # Update info info["avg_step_time"] = stats["step_time"] / steps_per_stats info["avg_grad_norm"] = stats["grad_norm"] / steps_per_stats info["train_ppl"] = utils.safe_exp(stats["loss"] / stats["predict_count"]) info["speed"] = stats["total_count"] / (1000 * stats["step_time"]) # Check for overflow is_overflow = False train_ppl = info["train_ppl"] if math.isnan(train_ppl) or math.isinf(train_ppl) or train_ppl > 1e20: utils.print_out(" step %d overflow, stop early" % global_step, log_f) is_overflow = True return is_overflow def before_train(loaded_train_model, train_model, train_sess, global_step, hparams, log_f): """Misc tasks to do before training.""" stats = init_stats() info = { "train_ppl": 0.0, "speed": 0.0, "avg_step_time": 0.0, "avg_grad_norm": 0.0, "learning_rate": loaded_train_model.learning_rate.eval(session=train_sess) } start_train_time = time.time() utils.print_out( "# Start step %d, lr %g, %s" % (global_step, info["learning_rate"], time.ctime()), log_f) # Initialize all of the iterators skip_count = hparams.batch_size * hparams.epoch_step utils.print_out("# Init train iterator, skipping %d elements" % skip_count) train_sess.run( train_model.iterator.initializer, feed_dict={train_model.skip_count_placeholder: skip_count}) return stats, info, start_train_time def train(hparams, scope=None, target_session=""): """Train a translation model.""" log_device_placement = hparams.log_device_placement out_dir = hparams.out_dir num_train_steps = hparams.num_train_steps steps_per_stats = hparams.steps_per_stats steps_per_external_eval = hparams.steps_per_external_eval steps_per_eval = 10 * steps_per_stats if not steps_per_external_eval: steps_per_external_eval = 5 * steps_per_eval if not hparams.attention: model_creator = nmt_model.Model else: # Attention if (hparams.encoder_type == "gnmt" or hparams.attention_architecture in ["gnmt", "gnmt_v2"]): model_creator = gnmt_model.GNMTModel elif hparams.attention_architecture == "standard": model_creator = attention_model.AttentionModel else: raise ValueError( "Unknown attention architecture %s" % hparams.attention_architecture) combined_graph = tf.Graph() train_model = model_helper.create_train_model( model_creator, hparams, scope, graph=combined_graph) eval_model = model_helper.create_eval_model( model_creator, hparams, scope, graph=combined_graph) infer_model = model_helper.create_infer_model( model_creator, hparams, scope, graph=combined_graph) # Preload data for sample decoding. dev_src_file = "%s.%s" % (hparams.dev_prefix, hparams.src) dev_tgt_file = "%s.%s" % (hparams.dev_prefix, hparams.tgt) dev_ctx_file = None if hparams.ctx is not None: dev_ctx_file = "%s.%s" % (hparams.dev_prefix, hparams.ctx) sample_src_data = inference.load_data(dev_src_file) sample_tgt_data = inference.load_data(dev_tgt_file) sample_ctx_data = None if dev_ctx_file is not None: sample_ctx_data = inference.load_data(dev_ctx_file) sample_annot_data = None if hparams.dev_annotations is not None: sample_annot_data = inference.load_data(hparams.dev_annotations) summary_name = "train_log" model_dir = hparams.out_dir # Log and output files log_file = os.path.join(out_dir, "log_%d" % time.time()) log_f = tf.gfile.GFile(log_file, mode="a") utils.print_out("# log_file=%s" % log_file, log_f) # TensorFlow model config_proto = utils.get_config_proto( log_device_placement=log_device_placement, num_intra_threads=hparams.num_intra_threads, num_inter_threads=hparams.num_inter_threads) sess = tf.Session( target=target_session, config=config_proto, graph=combined_graph) with train_model.graph.as_default(): sess.run(tf.global_variables_initializer()) sess.run(tf.tables_initializer()) loaded_train_model, global_step = model_helper.create_or_load_model( train_model.model, model_dir, sess, "train") # Summary writer summary_writer = tf.summary.FileWriter( os.path.join(out_dir, summary_name), train_model.graph) # First evaluation run_full_eval(infer_model, sess, eval_model, sess, hparams, summary_writer, sample_src_data, sample_ctx_data, sample_tgt_data, sample_annot_data) last_stats_step = global_step last_eval_step = global_step last_external_eval_step = global_step # This is the training loop. stats, info, start_train_time = before_train( loaded_train_model, train_model, sess, global_step, hparams, log_f) while global_step < num_train_steps: ### Run a step ### start_time = time.time() try: step_result = loaded_train_model.train(sess) hparams.epoch_step += 1 except tf.errors.OutOfRangeError: # Finished going through the training dataset. Go to next epoch. hparams.epoch_step = 0 utils.print_out( "# Finished an epoch, step %d. Perform external evaluation" % global_step) run_sample_decode(infer_model, sess, hparams, summary_writer, sample_src_data, sample_ctx_data, sample_tgt_data, sample_annot_data) run_external_eval(infer_model, sess, hparams, summary_writer) sess.run( train_model.iterator.initializer, feed_dict={train_model.skip_count_placeholder: 0}) continue # Process step_result, accumulate stats, and write summary global_step, info["learning_rate"], step_summary = update_stats( stats, start_time, step_result) summary_writer.add_summary(step_summary, global_step) # Once in a while, we print statistics. if global_step - last_stats_step >= steps_per_stats: last_stats_step = global_step is_overflow = process_stats(stats, info, global_step, steps_per_stats, log_f) print_step_info(" ", global_step, info, _get_best_results(hparams), log_f) if is_overflow: break # Reset statistics stats = init_stats() if global_step - last_eval_step >= steps_per_eval: last_eval_step = global_step utils.print_out("# Save eval, global step %d" % global_step) utils.add_summary(summary_writer, global_step, "train_ppl", info["train_ppl"]) # Save checkpoint loaded_train_model.saver.save( sess, os.path.join(out_dir, "translate.ckpt"), global_step=global_step) # Evaluate on dev/test run_sample_decode(infer_model, sess, hparams, summary_writer, sample_src_data, sample_ctx_data, sample_tgt_data, sample_annot_data) dev_ppl, test_ppl = None, None # only evaluate perplexity when supervised learning if not hparams.use_rl: dev_ppl, test_ppl = run_internal_eval(eval_model, sess, hparams, summary_writer) if global_step - last_external_eval_step >= steps_per_external_eval: last_external_eval_step = global_step # Save checkpoint loaded_train_model.saver.save( sess, os.path.join(out_dir, "translate.ckpt"), global_step=global_step) run_sample_decode(infer_model, sess, hparams, summary_writer, sample_src_data, sample_ctx_data, sample_tgt_data, sample_annot_data) run_external_eval(infer_model, sess, hparams, summary_writer) # Done training loaded_train_model.saver.save( sess, os.path.join(out_dir, "translate.ckpt"), global_step=global_step) (result_summary, _, final_eval_metrics) = ( run_full_eval(infer_model, sess, eval_model, sess, hparams, summary_writer, sample_src_data, sample_ctx_data, sample_tgt_data, sample_annot_data)) print_step_info("# Final, ", global_step, info, result_summary, log_f) utils.print_time("# Done training!", start_train_time) summary_writer.close() return final_eval_metrics, global_step def _format_results(name, ppl, scores, metrics): """Format results.""" result_str = "" if ppl: result_str = "%s ppl %.2f" % (name, ppl) if scores: for metric in metrics: result_str += ", %s %s %.1f" % (name, metric, scores[metric]) return result_str def _get_best_results(hparams): """Summary of the current best results.""" tokens = [] for metric in hparams.metrics: tokens.append("%s %.2f" % (metric, getattr(hparams, "best_" + metric))) return ", ".join(tokens) def _internal_eval(hparams, model, global_step, sess, iterator, iterator_feed_dict, summary_writer, label): """Computing perplexity.""" utils.print_out( "# Internal evaluation (perplexity), global step %d" % global_step) sess.run(iterator.initializer, feed_dict=iterator_feed_dict) ppl = model_helper.compute_perplexity(hparams, model, sess, label) utils.add_summary(summary_writer, global_step, "%s_ppl" % label, ppl) return ppl def _sample_decode(model, global_step, sess, hparams, iterator, src_data, ctx_data, tgt_data, annot_data, iterator_src_placeholder, iterator_ctx_placeholder, iterator_annot_placeholder, iterator_batch_size_placeholder, summary_writer): """Pick a sentence and decode.""" if hparams.sample_id >= 0: assert hparams.sample_id < len(src_data), "sample_id too large" decode_id = hparams.sample_id else: decode_id = random.randint(0, len(src_data) - 1) utils.print_out(" # %d" % decode_id) iterator_feed_dict = { iterator_src_placeholder: [src_data[decode_id]], iterator_batch_size_placeholder: 1 } if ctx_data is not None: iterator_feed_dict[iterator_ctx_placeholder] = [ctx_data[decode_id]] if annot_data is not None: iterator_feed_dict[iterator_annot_placeholder] = [annot_data[decode_id]] sess.run(iterator.initializer, feed_dict=iterator_feed_dict) _, attention_summary, _, nmt_outputs, rewards = model.infer(sess) if hparams.beam_width > 0: # get the top translation. nmt_outputs = nmt_outputs[0] translation = nmt_utils.get_translation( nmt_outputs, sent_id=0, tgt_eos=hparams.eos, subword_option=hparams.subword_option) utils.print_out("Running a sample decode") utils.print_out(" src: %s" % src_data[decode_id]) if ctx_data is not None: utils.print_out(" ctx: %s" % ctx_data[decode_id]) if annot_data is not None: utils.print_out(" annot: %s" % annot_data[decode_id]) utils.print_out(" ref: %s" % tgt_data[decode_id]) utils.print_out(" nmt: %s" % translation) if hparams.use_rl: utils.print_out(" reward: %s" % rewards[0]) # Summary if attention_summary is not None: summary_writer.add_summary(attention_summary, global_step) def _external_eval(model, global_step, sess, hparams, iterator, iterator_feed_dict, tgt_file, label, summary_writer, save_on_best): """External evaluation such as BLEU and ROUGE scores.""" out_dir = hparams.out_dir utils.print_out("# External evaluation, global step %d" % global_step) sess.run(iterator.initializer, feed_dict=iterator_feed_dict) output = os.path.join(out_dir, "output_%s" % label) scores = nmt_utils.decode_and_evaluate( label, model, sess, output, ref_file=tgt_file, metrics=hparams.metrics, subword_option=hparams.subword_option, beam_width=hparams.beam_width, tgt_eos=hparams.eos, hparams=hparams, decode=True) # Save on best metrics if global_step > 0: for metric in hparams.metrics: best_metric_label = "best_" + metric utils.add_summary(summary_writer, global_step, "%s_%s" % (label, metric), scores[metric]) # metric: larger is better if save_on_best and scores[metric] > getattr(hparams, best_metric_label): setattr(hparams, best_metric_label, scores[metric]) model.saver.save( sess, os.path.join( getattr(hparams, "best_" + metric + "_dir"), "translate.ckpt"), global_step=model.global_step) utils.save_hparams(out_dir, hparams) return scores
apache-2.0
GRArmstrong/invenio-inspire-ops
modules/websubmit/lib/websubmitadmin_regression_tests.py
5
11942
# -*- coding: utf-8 -*- ## ## This file is part of Invenio. ## Copyright (C) 2006, 2007, 2008, 2010, 2011 CERN. ## ## Invenio is free software; you can redistribute it and/or ## modify it under the terms of the GNU General Public License as ## published by the Free Software Foundation; either version 2 of the ## License, or (at your option) any later version. ## ## Invenio is distributed in the hope that it will be useful, but ## WITHOUT ANY WARRANTY; without even the implied warranty of ## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ## General Public License for more details. ## ## You should have received a copy of the GNU General Public License ## along with Invenio; if not, write to the Free Software Foundation, Inc., ## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA. """WebSubmit Admin Regression Test Suite.""" __revision__ = "$Id$" import unittest from invenio.config import CFG_SITE_URL from invenio.testutils import make_test_suite, run_test_suite, \ test_web_page_content, merge_error_messages from invenio.websubmitadmincli import \ dump_submission, \ load_submission, \ remove_submission, \ diff_submission from invenio.dbquery import run_sql class WebSubmitAdminWebPagesAvailabilityTest(unittest.TestCase): """Check WebSubmit Admin web pages whether they are up or not.""" def test_websubmit_admin_interface_pages_availability(self): """websubmitadmin - availability of WebSubmit Admin interface pages""" baseurl = CFG_SITE_URL + '/admin/websubmit/websubmitadmin.py/' _exports = ['', 'showall', 'doctypelist', 'doctypeadd', 'doctyperemove', 'actionlist', 'jschecklist', 'elementlist', 'functionlist'] error_messages = [] for url in [baseurl + page for page in _exports]: # first try as guest: error_messages.extend(test_web_page_content(url, username='guest', expected_text= 'Authorization failure')) # then try as admin: error_messages.extend(test_web_page_content(url, username='admin')) if error_messages: self.fail(merge_error_messages(error_messages)) return def test_websubmit_admin_guide_availability(self): """websubmitadmin - availability of WebSubmit Admin guide pages""" url = CFG_SITE_URL + '/help/admin/websubmit-admin-guide' error_messages = test_web_page_content(url, expected_text="WebSubmit Admin Guide") if error_messages: self.fail(merge_error_messages(error_messages)) return class WebSubmitAdminCLITest(unittest.TestCase): """Test WebSubmit Admin CLI.""" dummy_submission_dump_1 = r""" DELETE FROM sbmFUNDESC WHERE function LIKE 'DUMMY1%'; DELETE FROM sbmFIELD WHERE subname LIKE '%DUMMY1'; DELETE FROM sbmFIELDDESC WHERE name LIKE 'DUMMY1%'; DELETE FROM sbmALLFUNCDESCR WHERE function LIKE 'DUMMY1%'; DELETE FROM sbmDOCTYPE WHERE sdocname='DUMMY1'; DELETE FROM sbmCATEGORIES WHERE doctype ='DUMMY1'; DELETE FROM sbmFUNCTIONS WHERE doctype='DUMMY1'; DELETE FROM sbmIMPLEMENT WHERE docname='DUMMY1'; DELETE FROM sbmPARAMETERS WHERE doctype='DUMMY1'; INSERT INTO sbmCATEGORIES VALUES ('DUMMY1','ARTICLE','Article',1); INSERT INTO sbmDOCTYPE VALUES ('Dummy test submission 1','DUMMY1','2008-03-06','2008-03-06','Dummy submission 1 for tests'); INSERT INTO sbmFIELD VALUES ('SBIDUMMY1',1,1,'DUMMY1_AU','<br /><br /><table width=\"100%\"><tr><td valign=\"top\"><span style=\"color: red;\">*</span>Author of the Document: <i>(one per line)</i><br />','M','Author(s)','','2008-03-07','2008-03-07',NULL,NULL); INSERT INTO sbmFIELD VALUES ('SBIDUMMY1',1,2,'DUMMY1_ABS','</td></tr></table><br /><span style=\"color: red;\">*</span>Abstract:<br />','M','Abstract','','2008-03-07','2008-03-07',NULL,NULL); INSERT INTO sbmFIELDDESC VALUES ('DUMMY1_ABS',NULL,'520__a','T',NULL,12,80,NULL,NULL,NULL,'2008-03-07','2008-03-07','<br />Abstract:<br />',NULL,0); INSERT INTO sbmFIELDDESC VALUES ('DUMMY1_AU',NULL,'100__a','T',NULL,6,60,NULL,NULL,NULL,'2008-03-07','2008-03-07','<br />Authors: <i>(one per line)</i><br />',NULL,0); INSERT INTO sbmFUNCTIONS VALUES ('SBI','DUMMY1','Create_Recid',10,1); INSERT INTO sbmFUNCTIONS VALUES ('SBI','DUMMY1','Insert_Record',40,1); INSERT INTO sbmFUNCTIONS VALUES ('SBI','DUMMY1','Mail_Submitter',60,1); INSERT INTO sbmFUNCTIONS VALUES ('SBI','DUMMY1','Make_Record',30,1); INSERT INTO sbmFUNCTIONS VALUES ('SBI','DUMMY1','Print_Success',50,1); INSERT INTO sbmFUNCTIONS VALUES ('SBI','DUMMY1','Report_Number_Generation',20,1); INSERT INTO sbmIMPLEMENT VALUES ('DUMMY1','SBI','Y','SBIDUMMY1',1,'2008-03-06','2008-03-07',1,'','',0,0,''); INSERT INTO sbmPARAMETERS VALUES ('DUMMY1','authorfile','DUMMY1_AU'); INSERT INTO sbmPARAMETERS VALUES ('DUMMY1','autorngen','Y'); INSERT INTO sbmPARAMETERS VALUES ('DUMMY1','counterpath','lastid_DUMMY1_<PA>categ</PA>_<PA>yy</PA>'); INSERT INTO sbmPARAMETERS VALUES ('DUMMY1','createTemplate','DUMMY1create.tpl'); INSERT INTO sbmPARAMETERS VALUES ('DUMMY1','documenttype','fulltext'); INSERT INTO sbmPARAMETERS VALUES ('DUMMY1','edsrn','DUMMY1_RN'); INSERT INTO sbmPARAMETERS VALUES ('DUMMY1','emailFile','SuE'); INSERT INTO sbmPARAMETERS VALUES ('DUMMY1','fieldnameMBI','DUMMY1_CHANGE'); INSERT INTO sbmPARAMETERS VALUES ('DUMMY1','iconsize','180'); INSERT INTO sbmPARAMETERS VALUES ('DUMMY1','modifyTemplate','DUMMY1modify.tpl'); INSERT INTO sbmPARAMETERS VALUES ('DUMMY1','newrnin',''); INSERT INTO sbmPARAMETERS VALUES ('DUMMY1','paths_and_suffixes','{\"DUMMY1_FILE\":\"\"}'); INSERT INTO sbmPARAMETERS VALUES ('DUMMY1','rename','<PA>file:DUMMY1_RN</PA>'); INSERT INTO sbmPARAMETERS VALUES ('DUMMY1','rnformat','DEMO-<PA>categ</PA>-<PA>yy</PA>'); INSERT INTO sbmPARAMETERS VALUES ('DUMMY1','rnin','comboDUMMY1'); INSERT INTO sbmPARAMETERS VALUES ('DUMMY1','sourceDoc','Textual Document'); INSERT INTO sbmPARAMETERS VALUES ('DUMMY1','sourceTemplate','DUMMY1.tpl'); INSERT INTO sbmPARAMETERS VALUES ('DUMMY1','status','ADDED'); INSERT INTO sbmPARAMETERS VALUES ('DUMMY1','titleFile','DUMMY1_TITLE'); INSERT INTO sbmPARAMETERS VALUES ('DUMMY1','yeargen','AUTO');""" def test_load_submission(self): """websubmitadmin - test loading submission dump""" load_submission('DUMMY1', self.dummy_submission_dump_1, method="NAMES") dumped_submission = dump_submission("DUMMY1", 'NAMES', True) dummy_submission_dump_1_and_header = '-- Extra:NAMES (the following dump contains rows in sbmALLFUNCDESCR, sbmFUNDESC, sbmFIELD and sbmFIELDDESC tables which are not specific to this submission, but that include keyword DUMMY1)' + \ self.dummy_submission_dump_1 diffed_submission = diff_submission(dumped_submission, dummy_submission_dump_1_and_header, verbose=2, ignore_dates=False, ignore_positions=False, ignore_pages=False) # Only the header should differ self.assertEqual('\n'.join(diffed_submission.splitlines()[1:]), "") def test_diff_submission(self): """websubmitadmin - test diffing submissions """ insert_submission_dump(self.dummy_submission_dump_1) dumped_submission = dump_submission("DUMMY1", 'NAMES', True) dummy_submission_dump_1_and_header = '-- Extra:NAMES (the following dump contains rows in sbmALLFUNCDESCR, sbmFUNDESC, sbmFIELD and sbmFIELDDESC tables which are not specific to this submission, but that include keyword DUMMY1)' + \ self.dummy_submission_dump_1 diffed_submission = diff_submission(dumped_submission, dummy_submission_dump_1_and_header, verbose=2, ignore_dates=False, ignore_positions=False, ignore_pages=False) # Only the header should differ self.assertEqual('\n'.join(diffed_submission.splitlines()[1:]), "") def test_dump_submission_method_names(self): """websubmitadmin - test dumping submissions with --method=NAMES""" insert_submission_dump(self.dummy_submission_dump_1) dumped_submission = dump_submission("DUMMY1", 'NAMES', True) self.assert_(dumped_submission.startswith('-- DUMMY1 dump ')) # Submission will contain date/timestamps that we don't want to consider dumped_submission = '\n'.join(dumped_submission.splitlines()[1:]) expected_dump = """-- Extra:NAMES (the following dump contains rows in sbmALLFUNCDESCR, sbmFUNDESC, sbmFIELD and sbmFIELDDESC tables which are not specific to this submission, but that include keyword DUMMY1) """ + self.dummy_submission_dump_1 self.assertEqual(dumped_submission, expected_dump) # Dump without delete statements dumped_submission = dump_submission("DUMMY1", 'NAMES', False) self.assert_("DELETE" not in dumped_submission.upper()) # Dump without "IGNORE" for duplicate insertions dumped_submission = dump_submission("DUMMY1", 'NAMES', False, True) self.assert_("IGNORE" in dumped_submission.upper()) # Test dumping demo submission dumped_submission = dump_submission("DEMOART", 'NAMES', True) self.assert_(dumped_submission.startswith('-- DEMOART dump ')) def test_dump_submission_method_relations(self): """websubmitadmin - test dumping submissions with --method=RELATIONS""" insert_submission_dump(self.dummy_submission_dump_1) dumped_submission = dump_submission("DUMMY1", 'RELATIONS', True) self.assert_(dumped_submission.startswith('-- DUMMY1 dump ')) # Submission will contain date/timestamps that we don't want to consider dumped_submission = '\n'.join(dumped_submission.splitlines()[1:]) self.assert_(dumped_submission.startswith("-- Extra:RELATIONS (the following dump contains rows in sbmALLFUNCDESCR, sbmFUNDESC, sbmFIELD and sbmFIELDDESC tables that are not specific to doctype DUMMY1")) def test_remove_submission(self): """websubmitadmin - test removing submissions """ insert_submission_dump(self.dummy_submission_dump_1) remove_submission(doctype="DUMMY1", method="NAMES") self.assert_(len(run_sql('SELECT * FROM sbmDOCTYPE WHERE sdocname="DUMMY1"')) == 0) self.assert_(len(run_sql('SELECT * FROM sbmCATEGORIES WHERE doctype="DUMMY1"')) == 0) self.assert_(len(run_sql('SELECT * FROM sbmFUNCTIONS WHERE doctype="DUMMY1"')) == 0) self.assert_(len(run_sql('SELECT * FROM sbmIMPLEMENT WHERE docname="DUMMY1"')) == 0) self.assert_(len(run_sql('SELECT * FROM sbmPARAMETERS WHERE doctype="DUMMY1"')) == 0) self.assert_(len(run_sql('SELECT * FROM sbmFUNDESC WHERE function LIKE "DUMMY1%"')) == 0) self.assert_(len(run_sql('SELECT * FROM sbmFIELD WHERE subname LIKE "DUMMY1%"')) == 0) self.assert_(len(run_sql('SELECT * FROM sbmFIELDDESC WHERE name LIKE "DUMMY1%"')) == 0) self.assert_(len(run_sql('SELECT * FROM sbmALLFUNCDESCR WHERE function LIKE "DUMMY1%"')) == 0) def tearDown(self): insert_submission_dump('\n'.join([line for line in self.dummy_submission_dump_1.splitlines() if line.startswith('DELETE')])) def insert_submission_dump(dump): """Helper function to insert submisson dump via run_sql()""" for sql_statement in dump.replace(r'\"', '"').splitlines(): if sql_statement: run_sql(sql_statement) TEST_SUITE = make_test_suite(WebSubmitAdminWebPagesAvailabilityTest, WebSubmitAdminCLITest) if __name__ == "__main__": run_test_suite(TEST_SUITE, warn_user=True)
gpl-2.0
KevinOConnor/klipper
klippy/extras/thermistor.py
1
5754
# Temperature measurements with thermistors # # Copyright (C) 2016-2019 Kevin O'Connor <kevin@koconnor.net> # # This file may be distributed under the terms of the GNU GPLv3 license. import math, logging from . import adc_temperature KELVIN_TO_CELSIUS = -273.15 # Analog voltage to temperature converter for thermistors class Thermistor: def __init__(self, pullup, inline_resistor): self.pullup = pullup self.inline_resistor = inline_resistor self.c1 = self.c2 = self.c3 = 0. def setup_coefficients(self, t1, r1, t2, r2, t3, r3, name=""): # Calculate Steinhart-Hart coefficents from temp measurements. # Arrange samples as 3 linear equations and solve for c1, c2, and c3. inv_t1 = 1. / (t1 - KELVIN_TO_CELSIUS) inv_t2 = 1. / (t2 - KELVIN_TO_CELSIUS) inv_t3 = 1. / (t3 - KELVIN_TO_CELSIUS) ln_r1 = math.log(r1) ln_r2 = math.log(r2) ln_r3 = math.log(r3) ln3_r1, ln3_r2, ln3_r3 = ln_r1**3, ln_r2**3, ln_r3**3 inv_t12, inv_t13 = inv_t1 - inv_t2, inv_t1 - inv_t3 ln_r12, ln_r13 = ln_r1 - ln_r2, ln_r1 - ln_r3 ln3_r12, ln3_r13 = ln3_r1 - ln3_r2, ln3_r1 - ln3_r3 self.c3 = ((inv_t12 - inv_t13 * ln_r12 / ln_r13) / (ln3_r12 - ln3_r13 * ln_r12 / ln_r13)) if self.c3 <= 0.: beta = ln_r13 / inv_t13 logging.warn("Using thermistor beta %.3f in heater %s", beta, name) self.setup_coefficients_beta(t1, r1, beta) return self.c2 = (inv_t12 - self.c3 * ln3_r12) / ln_r12 self.c1 = inv_t1 - self.c2 * ln_r1 - self.c3 * ln3_r1 def setup_coefficients_beta(self, t1, r1, beta): # Calculate equivalent Steinhart-Hart coefficents from beta inv_t1 = 1. / (t1 - KELVIN_TO_CELSIUS) ln_r1 = math.log(r1) self.c3 = 0. self.c2 = 1. / beta self.c1 = inv_t1 - self.c2 * ln_r1 def calc_temp(self, adc): # Calculate temperature from adc adc = max(.00001, min(.99999, adc)) r = self.pullup * adc / (1.0 - adc) ln_r = math.log(r - self.inline_resistor) inv_t = self.c1 + self.c2 * ln_r + self.c3 * ln_r**3 return 1.0/inv_t + KELVIN_TO_CELSIUS def calc_adc(self, temp): # Calculate adc reading from a temperature if temp <= KELVIN_TO_CELSIUS: return 1. inv_t = 1. / (temp - KELVIN_TO_CELSIUS) if self.c3: # Solve for ln_r using Cardano's formula y = (self.c1 - inv_t) / (2. * self.c3) x = math.sqrt((self.c2 / (3. * self.c3))**3 + y**2) ln_r = math.pow(x - y, 1./3.) - math.pow(x + y, 1./3.) else: ln_r = (inv_t - self.c1) / self.c2 r = math.exp(ln_r) + self.inline_resistor return r / (self.pullup + r) # Create an ADC converter with a thermistor def PrinterThermistor(config, params): pullup = config.getfloat('pullup_resistor', 4700., above=0.) inline_resistor = config.getfloat('inline_resistor', 0., minval=0.) thermistor = Thermistor(pullup, inline_resistor) if 'beta' in params: thermistor.setup_coefficients_beta( params['t1'], params['r1'], params['beta']) else: thermistor.setup_coefficients( params['t1'], params['r1'], params['t2'], params['r2'], params['t3'], params['r3'], name=config.get_name()) return adc_temperature.PrinterADCtoTemperature(config, thermistor) # Custom defined thermistors from the config file class CustomThermistor: def __init__(self, config): self.name = " ".join(config.get_name().split()[1:]) t1 = config.getfloat("temperature1", minval=KELVIN_TO_CELSIUS) r1 = config.getfloat("resistance1", minval=0.) beta = config.getfloat("beta", None, above=0.) if beta is not None: self.params = {'t1': t1, 'r1': r1, 'beta': beta} return t2 = config.getfloat("temperature2", minval=KELVIN_TO_CELSIUS) r2 = config.getfloat("resistance2", minval=0.) t3 = config.getfloat("temperature3", minval=KELVIN_TO_CELSIUS) r3 = config.getfloat("resistance3", minval=0.) (t1, r1), (t2, r2), (t3, r3) = sorted([(t1, r1), (t2, r2), (t3, r3)]) self.params = {'t1': t1, 'r1': r1, 't2': t2, 'r2': r2, 't3': t3, 'r3': r3} def create(self, config): return PrinterThermistor(config, self.params) # Default sensors Sensors = { "EPCOS 100K B57560G104F": { 't1': 25., 'r1': 100000., 't2': 150., 'r2': 1641.9, 't3': 250., 'r3': 226.15 }, "ATC Semitec 104GT-2": { 't1': 20., 'r1': 126800., 't2': 150., 'r2': 1360., 't3': 300., 'r3': 80.65 }, "SliceEngineering 450": { 't1': 25., 'r1': 500000., 't2': 200., 'r2': 3734., 't3': 400., 'r3': 240. }, "TDK NTCG104LH104JT1": { 't1': 25., 'r1': 100000., 't2': 50., 'r2': 31230., 't3': 125., 'r3': 2066. }, "NTC 100K beta 3950": { 't1': 25., 'r1': 100000., 'beta': 3950. }, "Honeywell 100K 135-104LAG-J01": { 't1': 25., 'r1': 100000., 'beta': 3974.}, "NTC 100K MGB18-104F39050L32": { 't1': 25., 'r1': 100000., 'beta': 4100. }, } def load_config(config): # Register default thermistor types pheaters = config.get_printer().load_object(config, "heaters") for sensor_type, params in Sensors.items(): func = (lambda config, params=params: PrinterThermistor(config, params)) pheaters.add_sensor_factory(sensor_type, func) def load_config_prefix(config): thermistor = CustomThermistor(config) pheaters = config.get_printer().load_object(config, "heaters") pheaters.add_sensor_factory(thermistor.name, thermistor.create)
gpl-3.0
Russell-IO/ansible
lib/ansible/modules/network/nxos/nxos_pim_interface.py
16
18888
#!/usr/bin/python # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. # ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ['preview'], 'supported_by': 'network'} DOCUMENTATION = ''' --- module: nxos_pim_interface extends_documentation_fragment: nxos version_added: "2.2" short_description: Manages PIM interface configuration. description: - Manages PIM interface configuration settings. author: - Jason Edelman (@jedelman8) notes: - Tested against NXOSv 7.3.(0)D1(1) on VIRL - When C(state=default), supported params will be reset to a default state. These include C(dr_prio), C(hello_auth_key), C(hello_interval), C(jp_policy_out), C(jp_policy_in), C(jp_type_in), C(jp_type_out), C(border), C(neighbor_policy), C(neighbor_type). - The C(hello_auth_key) param is not idempotent. - C(hello_auth_key) only supports clear text passwords. - When C(state=absent), pim interface configuration will be set to defaults and pim-sm will be disabled on the interface. - PIM must be enabled on the device to use this module. - This module is for Layer 3 interfaces. options: interface: description: - Full name of the interface such as Ethernet1/33. required: true sparse: description: - Enable/disable sparse-mode on the interface. type: bool default: 'no' dr_prio: description: - Configures priority for PIM DR election on interface. hello_auth_key: description: - Authentication for hellos on this interface. hello_interval: description: - Hello interval in milliseconds for this interface. type: bool jp_policy_out: description: - Policy for join-prune messages (outbound). jp_policy_in: description: - Policy for join-prune messages (inbound). jp_type_out: description: - Type of policy mapped to C(jp_policy_out). choices: ['prefix', 'routemap'] jp_type_in: description: - Type of policy mapped to C(jp_policy_in). choices: ['prefix', 'routemap'] border: description: - Configures interface to be a boundary of a PIM domain. type: bool default: 'no' neighbor_policy: description: - Configures a neighbor policy for filtering adjacencies. neighbor_type: description: - Type of policy mapped to neighbor_policy. choices: ['prefix', 'routemap'] state: description: - Manages desired state of the resource. default: present choices: ['present', 'default'] ''' EXAMPLES = ''' # ensure PIM is not running on the interface - nxos_pim_interface: interface: eth1/33 state: absent # ensure the interface has pim-sm enabled with the appropriate priority and hello interval - nxos_pim_interface: interface: eth1/33 dr_prio: 10 hello_interval: 40 state: present # ensure join-prune policies exist - nxos_pim_interface: interface: eth1/33 jp_policy_in: JPIN jp_policy_out: JPOUT jp_type_in: routemap jp_type_out: routemap # ensure defaults are in place - nxos_pim_interface: interface: eth1/33 state: default ''' RETURN = ''' commands: description: command sent to the device returned: always type: list sample: ["interface eth1/33", "ip pim neighbor-policy test", "ip pim neighbor-policy test"] ''' import re from ansible.module_utils.basic import AnsibleModule from ansible.module_utils.network.nxos.nxos import get_config, load_config, run_commands from ansible.module_utils.network.nxos.nxos import nxos_argument_spec, check_args from ansible.module_utils.six import string_types PARAM_TO_COMMAND_KEYMAP = { 'interface': '', 'sparse': 'ip pim sparse-mode', 'dr_prio': 'ip pim dr-priority {0}', 'hello_interval': 'ip pim hello-interval {0}', 'hello_auth_key': 'ip pim hello-authentication ah-md5 {0}', 'border': 'ip pim border', 'jp_policy_out': 'ip pim jp-policy prefix-list {0} out', 'jp_policy_in': 'ip pim jp-policy prefix-list {0} in', 'jp_type_in': '', 'jp_type_out': '', 'neighbor_policy': 'ip pim neighbor-policy prefix-list {0}', 'neighbor_type': '', } PARAM_TO_DEFAULT_KEYMAP = { 'dr_prio': '1', 'hello_interval': '30000', 'sparse': False, 'border': False, 'hello_auth_key': False, } def execute_show_command(command, module, text=False): if text: cmds = [{ 'command': command, 'output': 'text' }] else: cmds = [{ 'command': command, 'output': 'json' }] return run_commands(module, cmds) def flatten_list(command_lists): flat_command_list = [] for command in command_lists: if isinstance(command, list): flat_command_list.extend(command) else: flat_command_list.append(command) return flat_command_list def local_existing(gexisting): jp_bidir = False isauth = False if gexisting: jp_bidir = gexisting.get('jp_bidir') isauth = gexisting.get('isauth') if jp_bidir and isauth: gexisting.pop('jp_bidir') gexisting.pop('isauth') return gexisting, jp_bidir, isauth def get_interface_type(interface): if interface.upper().startswith('ET'): return 'ethernet' elif interface.upper().startswith('VL'): return 'svi' elif interface.upper().startswith('LO'): return 'loopback' elif interface.upper().startswith('MG'): return 'management' elif interface.upper().startswith('MA'): return 'management' elif interface.upper().startswith('PO'): return 'portchannel' else: return 'unknown' def get_interface_mode(interface, intf_type, module): mode = 'unknown' command = 'show interface {0}'.format(interface) body = execute_show_command(command, module) try: interface_table = body[0]['TABLE_interface']['ROW_interface'] except (KeyError, AttributeError, IndexError): return mode if intf_type in ['ethernet', 'portchannel']: mode = str(interface_table.get('eth_mode', 'layer3')) if mode in ['access', 'trunk']: mode = 'layer2' elif mode == 'routed': mode = 'layer3' elif intf_type in ['loopback', 'svi']: mode = 'layer3' return mode def get_pim_interface(module, interface): pim_interface = {} body = get_config(module, flags=['interface {0}'.format(interface)]) pim_interface['neighbor_type'] = None pim_interface['neighbor_policy'] = None pim_interface['jp_policy_in'] = None pim_interface['jp_policy_out'] = None pim_interface['jp_type_in'] = None pim_interface['jp_type_out'] = None pim_interface['jp_bidir'] = False pim_interface['isauth'] = False if body: all_lines = body.splitlines() for each in all_lines: if 'jp-policy' in each: policy_name = \ re.search(r'ip pim jp-policy(?: prefix-list)? (\S+)(?: \S+)?', each).group(1) if 'prefix-list' in each: ptype = 'prefix' else: ptype = 'routemap' if 'out' in each: pim_interface['jp_policy_out'] = policy_name pim_interface['jp_type_out'] = ptype elif 'in' in each: pim_interface['jp_policy_in'] = policy_name pim_interface['jp_type_in'] = ptype else: pim_interface['jp_policy_in'] = policy_name pim_interface['jp_policy_out'] = policy_name pim_interface['jp_bidir'] = True elif 'neighbor-policy' in each: pim_interface['neighbor_policy'] = \ re.search(r'ip pim neighbor-policy(?: prefix-list)? (\S+)', each).group(1) if 'prefix-list' in each: pim_interface['neighbor_type'] = 'prefix' else: pim_interface['neighbor_type'] = 'routemap' elif 'ah-md5' in each: pim_interface['isauth'] = True elif 'sparse-mode' in each: pim_interface['sparse'] = True elif 'border' in each: pim_interface['border'] = True elif 'hello-interval' in each: pim_interface['hello_interval'] = \ re.search(r'ip pim hello-interval (\d+)', body).group(1) elif 'dr-priority' in each: pim_interface['dr_prio'] = \ re.search(r'ip pim dr-priority (\d+)', body).group(1) return pim_interface def fix_delta(delta, existing): for key in list(delta): if key in ['dr_prio', 'hello_interval', 'sparse', 'border']: if delta.get(key) == PARAM_TO_DEFAULT_KEYMAP.get(key) and existing.get(key) is None: delta.pop(key) return delta def config_pim_interface(delta, existing, jp_bidir, isauth): command = None commands = [] delta = fix_delta(delta, existing) if jp_bidir: if delta.get('jp_policy_in') or delta.get('jp_policy_out'): if existing.get('jp_type_in') == 'prefix': command = 'no ip pim jp-policy prefix-list {0}'.format(existing.get('jp_policy_in')) else: command = 'no ip pim jp-policy {0}'.format(existing.get('jp_policy_in')) if command: commands.append(command) for k, v in delta.items(): if k in ['dr_prio', 'hello_interval', 'hello_auth_key', 'border', 'sparse']: if v: command = PARAM_TO_COMMAND_KEYMAP.get(k).format(v) elif k == 'hello_auth_key': if isauth: command = 'no ip pim hello-authentication ah-md5' else: command = 'no ' + PARAM_TO_COMMAND_KEYMAP.get(k).format(v) if command: commands.append(command) elif k in ['neighbor_policy', 'jp_policy_in', 'jp_policy_out', 'neighbor_type']: if k in ['neighbor_policy', 'neighbor_type']: temp = delta.get('neighbor_policy') or existing.get( 'neighbor_policy') if delta.get('neighbor_type') == 'prefix': command = PARAM_TO_COMMAND_KEYMAP.get(k).format(temp) elif delta.get('neighbor_type') == 'routemap': command = 'ip pim neighbor-policy {0}'.format(temp) elif existing.get('neighbor_type') == 'prefix': command = PARAM_TO_COMMAND_KEYMAP.get(k).format(temp) elif existing.get('neighbor_type') == 'routemap': command = 'ip pim neighbor-policy {0}'.format(temp) elif k in ['jp_policy_in', 'jp_type_in']: temp = delta.get('jp_policy_in') or existing.get( 'jp_policy_in') if delta.get('jp_type_in') == 'prefix': command = PARAM_TO_COMMAND_KEYMAP.get(k).format(temp) elif delta.get('jp_type_in') == 'routemap': command = 'ip pim jp-policy {0} in'.format(temp) elif existing.get('jp_type_in') == 'prefix': command = PARAM_TO_COMMAND_KEYMAP.get(k).format(temp) elif existing.get('jp_type_in') == 'routemap': command = 'ip pim jp-policy {0} in'.format(temp) elif k in ['jp_policy_out', 'jp_type_out']: temp = delta.get('jp_policy_out') or existing.get( 'jp_policy_out') if delta.get('jp_type_out') == 'prefix': command = PARAM_TO_COMMAND_KEYMAP.get(k).format(temp) elif delta.get('jp_type_out') == 'routemap': command = 'ip pim jp-policy {0} out'.format(temp) elif existing.get('jp_type_out') == 'prefix': command = PARAM_TO_COMMAND_KEYMAP.get(k).format(temp) elif existing.get('jp_type_out') == 'routemap': command = 'ip pim jp-policy {0} out'.format(temp) if command: commands.append(command) command = None return commands def get_pim_interface_defaults(): args = dict(dr_prio=PARAM_TO_DEFAULT_KEYMAP.get('dr_prio'), border=PARAM_TO_DEFAULT_KEYMAP.get('border'), sparse=PARAM_TO_DEFAULT_KEYMAP.get('sparse'), hello_interval=PARAM_TO_DEFAULT_KEYMAP.get('hello_interval'), hello_auth_key=PARAM_TO_DEFAULT_KEYMAP.get('hello_auth_key')) default = dict((param, value) for (param, value) in args.items() if value is not None) return default def default_pim_interface_policies(existing, jp_bidir): commands = [] if jp_bidir: if existing.get('jp_policy_in') or existing.get('jp_policy_out'): if existing.get('jp_type_in') == 'prefix': command = 'no ip pim jp-policy prefix-list {0}'.format(existing.get('jp_policy_in')) if command: commands.append(command) elif not jp_bidir: command = None for k in existing: if k == 'jp_policy_in': if existing.get('jp_policy_in'): if existing.get('jp_type_in') == 'prefix': command = 'no ip pim jp-policy prefix-list {0} in'.format( existing.get('jp_policy_in') ) else: command = 'no ip pim jp-policy {0} in'.format( existing.get('jp_policy_in') ) elif k == 'jp_policy_out': if existing.get('jp_policy_out'): if existing.get('jp_type_out') == 'prefix': command = 'no ip pim jp-policy prefix-list {0} out'.format( existing.get('jp_policy_out') ) else: command = 'no ip pim jp-policy {0} out'.format( existing.get('jp_policy_out') ) if command: commands.append(command) command = None if existing.get('neighbor_policy'): command = 'no ip pim neighbor-policy' commands.append(command) return commands def config_pim_interface_defaults(existing, jp_bidir, isauth): command = [] # returns a dict defaults = get_pim_interface_defaults() delta = dict(set(defaults.items()).difference( existing.items())) if delta: # returns a list command = config_pim_interface(delta, existing, jp_bidir, isauth) comm = default_pim_interface_policies(existing, jp_bidir) if comm: for each in comm: command.append(each) return command def main(): argument_spec = dict( interface=dict(required=True), sparse=dict(type='bool', default=False), dr_prio=dict(type='str'), hello_auth_key=dict(type='str'), hello_interval=dict(type='int'), jp_policy_out=dict(type='str'), jp_policy_in=dict(type='str'), jp_type_out=dict(choices=['prefix', 'routemap']), jp_type_in=dict(choices=['prefix', 'routemap']), border=dict(type='bool', default=False), neighbor_policy=dict(type='str'), neighbor_type=dict(choices=['prefix', 'routemap']), state=dict(choices=['present', 'absent', 'default'], default='present'), ) argument_spec.update(nxos_argument_spec) module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True) warnings = list() check_args(module, warnings) results = {'changed': False, 'commands': [], 'warnings': warnings} state = module.params['state'] interface = module.params['interface'] jp_type_in = module.params['jp_type_in'] jp_type_out = module.params['jp_type_out'] jp_policy_in = module.params['jp_policy_in'] jp_policy_out = module.params['jp_policy_out'] neighbor_policy = module.params['neighbor_policy'] neighbor_type = module.params['neighbor_type'] hello_interval = module.params['hello_interval'] intf_type = get_interface_type(interface) if get_interface_mode(interface, intf_type, module) == 'layer2': module.fail_json(msg='this module only works on Layer 3 interfaces.') if jp_policy_in: if not jp_type_in: module.fail_json(msg='jp_type_in required when using jp_policy_in.') if jp_policy_out: if not jp_type_out: module.fail_json(msg='jp_type_out required when using jp_policy_out.') if neighbor_policy: if not neighbor_type: module.fail_json(msg='neighbor_type required when using neighbor_policy.') get_existing = get_pim_interface(module, interface) existing, jp_bidir, isauth = local_existing(get_existing) args = PARAM_TO_COMMAND_KEYMAP.keys() proposed = dict((k, v) for k, v in module.params.items() if v is not None and k in args) if hello_interval: proposed['hello_interval'] = str(proposed['hello_interval'] * 1000) delta = dict(set(proposed.items()).difference(existing.items())) commands = [] if state == 'present': if delta: command = config_pim_interface(delta, existing, jp_bidir, isauth) if command: commands.append(command) elif state == 'default' or state == 'absent': defaults = config_pim_interface_defaults(existing, jp_bidir, isauth) if defaults: commands.append(defaults) if commands: commands.insert(0, ['interface {0}'.format(interface)]) cmds = flatten_list(commands) if cmds: results['changed'] = True if not module.check_mode: load_config(module, cmds) if 'configure' in cmds: cmds.pop(0) results['commands'] = cmds module.exit_json(**results) if __name__ == '__main__': main()
gpl-3.0
Softmotions/edx-platform
common/lib/i18n/tests/test_extract_and_generate.py
121
4581
""" This test tests that i18n extraction (`paver i18n_extract -v`) works properly. """ from datetime import datetime, timedelta import os import random import re import sys import string import subprocess from unittest import TestCase from mock import patch from polib import pofile from pytz import UTC from i18n import extract from i18n import generate from i18n import dummy from i18n.config import CONFIGURATION class TestGenerate(TestCase): """ Tests functionality of i18n/generate.py """ generated_files = ('django-partial.po', 'djangojs-partial.po', 'mako.po') @classmethod def setUpClass(cls): sys.stderr.write( "\nThis test tests that i18n extraction (`paver i18n_extract`) works properly. " "If you experience failures, please check that all instances of `gettext` and " "`ngettext` are used correctly. You can also try running `paver i18n_extract -v` " "locally for more detail.\n" ) sys.stderr.write( "\nExtracting i18n strings and generating dummy translations; " "this may take a few minutes\n" ) sys.stderr.flush() extract.main(verbosity=0) dummy.main(verbosity=0) @classmethod def tearDownClass(cls): # Clear the Esperanto & RTL directories of any test artifacts cmd = "git checkout conf/locale/eo conf/locale/rtl" sys.stderr.write("Cleaning up dummy language directories: " + cmd) sys.stderr.flush() returncode = subprocess.call(cmd, shell=True) assert returncode == 0 super(TestGenerate, cls).tearDownClass() def setUp(self): # Subtract 1 second to help comparisons with file-modify time succeed, # since os.path.getmtime() is not millisecond-accurate self.start_time = datetime.now(UTC) - timedelta(seconds=1) def test_merge(self): """ Tests merge script on English source files. """ filename = os.path.join(CONFIGURATION.source_messages_dir, random_name()) generate.merge(CONFIGURATION.source_locale, target=filename) self.assertTrue(os.path.exists(filename)) os.remove(filename) # Patch dummy_locales to not have esperanto present @patch.object(CONFIGURATION, 'dummy_locales', ['fake2']) def test_main(self): """ Runs generate.main() which should merge source files, then compile all sources in all configured languages. Validates output by checking all .mo files in all configured languages. .mo files should exist, and be recently created (modified after start of test suite) """ generate.main(verbosity=0, strict=False) for locale in CONFIGURATION.translated_locales: for filename in ('django', 'djangojs'): mofile = filename + '.mo' path = os.path.join(CONFIGURATION.get_messages_dir(locale), mofile) exists = os.path.exists(path) self.assertTrue(exists, msg='Missing file in locale %s: %s' % (locale, mofile)) self.assertTrue( datetime.fromtimestamp(os.path.getmtime(path), UTC) >= self.start_time, msg='File not recently modified: %s' % path ) # Segmenting means that the merge headers don't work they way they # used to, so don't make this check for now. I'm not sure if we'll # get the merge header back eventually, or delete this code eventually. # self.assert_merge_headers(locale) def assert_merge_headers(self, locale): """ This is invoked by test_main to ensure that it runs after calling generate.main(). There should be exactly three merge comment headers in our merged .po file. This counts them to be sure. A merge comment looks like this: # #-#-#-#-# django-partial.po (0.1a) #-#-#-#-# """ path = os.path.join(CONFIGURATION.get_messages_dir(locale), 'django.po') pof = pofile(path) pattern = re.compile('^#-#-#-#-#', re.M) match = pattern.findall(pof.header) self.assertEqual( len(match), 3, msg="Found %s (should be 3) merge comments in the header for %s" % (len(match), path) ) def random_name(size=6): """Returns random filename as string, like test-4BZ81W""" chars = string.ascii_uppercase + string.digits return 'test-' + ''.join(random.choice(chars) for x in range(size))
agpl-3.0
tempbottle/kbengine
kbe/src/lib/python/Lib/test/test_codecencodings_iso2022.py
88
1464
# Codec encoding tests for ISO 2022 encodings. from test import support from test import multibytecodec_support import unittest COMMON_CODEC_TESTS = ( # invalid bytes (b'ab\xFFcd', 'replace', 'ab\uFFFDcd'), (b'ab\x1Bdef', 'replace', 'ab\x1Bdef'), (b'ab\x1B$def', 'replace', 'ab\uFFFD'), ) class Test_ISO2022_JP(multibytecodec_support.TestBase, unittest.TestCase): encoding = 'iso2022_jp' tstring = multibytecodec_support.load_teststring('iso2022_jp') codectests = COMMON_CODEC_TESTS + ( (b'ab\x1BNdef', 'replace', 'ab\x1BNdef'), ) class Test_ISO2022_JP2(multibytecodec_support.TestBase, unittest.TestCase): encoding = 'iso2022_jp_2' tstring = multibytecodec_support.load_teststring('iso2022_jp') codectests = COMMON_CODEC_TESTS + ( (b'ab\x1BNdef', 'replace', 'abdef'), ) class Test_ISO2022_KR(multibytecodec_support.TestBase, unittest.TestCase): encoding = 'iso2022_kr' tstring = multibytecodec_support.load_teststring('iso2022_kr') codectests = COMMON_CODEC_TESTS + ( (b'ab\x1BNdef', 'replace', 'ab\x1BNdef'), ) # iso2022_kr.txt cannot be used to test "chunk coding": the escape # sequence is only written on the first line @unittest.skip('iso2022_kr.txt cannot be used to test "chunk coding"') def test_chunkcoding(self): pass def test_main(): support.run_unittest(__name__) if __name__ == "__main__": test_main()
lgpl-3.0
GeoscienceAustralia/PF3D
testing/extract_windprofiles_for_guntur.py
1
1345
"""Test script for aim to extract wind profiles from NCEP1 data """ # Vent location in geographic coordinates (decimal degrees) of the Guntur crater vent_easting = 439423 vent_northing = 9167213 vent_zone = 49 vent_hemisphere = 'S' # Time to start extraction start_year = 2003 start_month = 10 start_day = 29 start_hour = 18 # Time to end extraction end_year = 2003 end_month = 10 end_day = 30 end_hour = 12 # Location of NCEP files NCEP_dir = '/model_area/tephra/3D_wind/NCEP1/indonesia/2003' # Location of generated windprofiles windfield_directory = '/model_area/tephra/3D_wind/NCEP1/merapi_single_scenario_2003' # Determine if generated wind profile should be used for hazard modelling # Options are # 'multiple' (hazard modelling) # 'merged' (scenario modelling) wind_field_type = 'merged' #wind_field_type = 'multiple' #-------------------------------------- if __name__ == '__main__': from aim import generate_wind_profiles_from_ncep if wind_field_type == 'multiple': generate_wind_profiles_from_ncep(__file__) elif wind_field_type == 'merged': from aim import join_wind_profiles generate_wind_profiles_from_ncep(__file__, update_timeblocks=True) join_wind_profiles(windfield_directory) else: print 'wind_field_type must be either \'multiple\' or \'merged\''
gpl-3.0
santisiri/popego
envs/ALPHA-POPEGO/lib/python2.5/site-packages/SQLAlchemy-0.4.5-py2.5.egg/sqlalchemy/ext/sqlsoup.py
3
19461
""" Introduction ============ SqlSoup provides a convenient way to access database tables without having to declare table or mapper classes ahead of time. Suppose we have a database with users, books, and loans tables (corresponding to the PyWebOff dataset, if you're curious). For testing purposes, we'll create this db as follows:: >>> from sqlalchemy import create_engine >>> e = create_engine('sqlite:///:memory:') >>> for sql in _testsql: e.execute(sql) #doctest: +ELLIPSIS <... Creating a SqlSoup gateway is just like creating an SQLAlchemy engine:: >>> from sqlalchemy.ext.sqlsoup import SqlSoup >>> db = SqlSoup('sqlite:///:memory:') or, you can re-use an existing metadata or engine:: >>> db = SqlSoup(MetaData(e)) You can optionally specify a schema within the database for your SqlSoup:: # >>> db.schema = myschemaname Loading objects =============== Loading objects is as easy as this:: >>> users = db.users.all() >>> users.sort() >>> users [MappedUsers(name='Joe Student',email='student@example.edu',password='student',classname=None,admin=0), MappedUsers(name='Bhargan Basepair',email='basepair@example.edu',password='basepair',classname=None,admin=1)] Of course, letting the database do the sort is better:: >>> db.users.order_by(db.users.name).all() [MappedUsers(name='Bhargan Basepair',email='basepair@example.edu',password='basepair',classname=None,admin=1), MappedUsers(name='Joe Student',email='student@example.edu',password='student',classname=None,admin=0)] Field access is intuitive:: >>> users[0].email u'student@example.edu' Of course, you don't want to load all users very often. Let's add a WHERE clause. Let's also switch the order_by to DESC while we're at it:: >>> from sqlalchemy import or_, and_, desc >>> where = or_(db.users.name=='Bhargan Basepair', db.users.email=='student@example.edu') >>> db.users.filter(where).order_by(desc(db.users.name)).all() [MappedUsers(name='Joe Student',email='student@example.edu',password='student',classname=None,admin=0), MappedUsers(name='Bhargan Basepair',email='basepair@example.edu',password='basepair',classname=None,admin=1)] You can also use .first() (to retrieve only the first object from a query) or .one() (like .first when you expect exactly one user -- it will raise an exception if more were returned):: >>> db.users.filter(db.users.name=='Bhargan Basepair').one() MappedUsers(name='Bhargan Basepair',email='basepair@example.edu',password='basepair',classname=None,admin=1) Since name is the primary key, this is equivalent to >>> db.users.get('Bhargan Basepair') MappedUsers(name='Bhargan Basepair',email='basepair@example.edu',password='basepair',classname=None,admin=1) This is also equivalent to >>> db.users.filter_by(name='Bhargan Basepair').one() MappedUsers(name='Bhargan Basepair',email='basepair@example.edu',password='basepair',classname=None,admin=1) filter_by is like filter, but takes kwargs instead of full clause expressions. This makes it more concise for simple queries like this, but you can't do complex queries like the or\_ above or non-equality based comparisons this way. Full query documentation ------------------------ Get, filter, filter_by, order_by, limit, and the rest of the query methods are explained in detail in the `SQLAlchemy documentation`__. __ http://www.sqlalchemy.org/docs/04/ormtutorial.html#datamapping_querying Modifying objects ================= Modifying objects is intuitive:: >>> user = _ >>> user.email = 'basepair+nospam@example.edu' >>> db.flush() (SqlSoup leverages the sophisticated SQLAlchemy unit-of-work code, so multiple updates to a single object will be turned into a single ``UPDATE`` statement when you flush.) To finish covering the basics, let's insert a new loan, then delete it:: >>> book_id = db.books.filter_by(title='Regional Variation in Moss').first().id >>> db.loans.insert(book_id=book_id, user_name=user.name) MappedLoans(book_id=2,user_name='Bhargan Basepair',loan_date=None) >>> db.flush() >>> loan = db.loans.filter_by(book_id=2, user_name='Bhargan Basepair').one() >>> db.delete(loan) >>> db.flush() You can also delete rows that have not been loaded as objects. Let's do our insert/delete cycle once more, this time using the loans table's delete method. (For SQLAlchemy experts: note that no flush() call is required since this delete acts at the SQL level, not at the Mapper level.) The same where-clause construction rules apply here as to the select methods. :: >>> db.loans.insert(book_id=book_id, user_name=user.name) MappedLoans(book_id=2,user_name='Bhargan Basepair',loan_date=None) >>> db.flush() >>> db.loans.delete(db.loans.book_id==2) You can similarly update multiple rows at once. This will change the book_id to 1 in all loans whose book_id is 2:: >>> db.loans.update(db.loans.book_id==2, book_id=1) >>> db.loans.filter_by(book_id=1).all() [MappedLoans(book_id=1,user_name='Joe Student',loan_date=datetime.datetime(2006, 7, 12, 0, 0))] Joins ===== Occasionally, you will want to pull out a lot of data from related tables all at once. In this situation, it is far more efficient to have the database perform the necessary join. (Here we do not have *a lot of data* but hopefully the concept is still clear.) SQLAlchemy is smart enough to recognize that loans has a foreign key to users, and uses that as the join condition automatically. :: >>> join1 = db.join(db.users, db.loans, isouter=True) >>> join1.filter_by(name='Joe Student').all() [MappedJoin(name='Joe Student',email='student@example.edu',password='student',classname=None,admin=0,book_id=1,user_name='Joe Student',loan_date=datetime.datetime(2006, 7, 12, 0, 0))] If you're unfortunate enough to be using MySQL with the default MyISAM storage engine, you'll have to specify the join condition manually, since MyISAM does not store foreign keys. Here's the same join again, with the join condition explicitly specified:: >>> db.join(db.users, db.loans, db.users.name==db.loans.user_name, isouter=True) <class 'sqlalchemy.ext.sqlsoup.MappedJoin'> You can compose arbitrarily complex joins by combining Join objects with tables or other joins. Here we combine our first join with the books table:: >>> join2 = db.join(join1, db.books) >>> join2.all() [MappedJoin(name='Joe Student',email='student@example.edu',password='student',classname=None,admin=0,book_id=1,user_name='Joe Student',loan_date=datetime.datetime(2006, 7, 12, 0, 0),id=1,title='Mustards I Have Known',published_year='1989',authors='Jones')] If you join tables that have an identical column name, wrap your join with `with_labels`, to disambiguate columns with their table name (.c is short for .columns):: >>> db.with_labels(join1).c.keys() [u'users_name', u'users_email', u'users_password', u'users_classname', u'users_admin', u'loans_book_id', u'loans_user_name', u'loans_loan_date'] You can also join directly to a labeled object:: >>> labeled_loans = db.with_labels(db.loans) >>> db.join(db.users, labeled_loans, isouter=True).c.keys() [u'name', u'email', u'password', u'classname', u'admin', u'loans_book_id', u'loans_user_name', u'loans_loan_date'] Relations ========= You can define relations on SqlSoup classes: >>> db.users.relate('loans', db.loans) These can then be used like a normal SA property: >>> db.users.get('Joe Student').loans [MappedLoans(book_id=1,user_name='Joe Student',loan_date=datetime.datetime(2006, 7, 12, 0, 0))] >>> db.users.filter(~db.users.loans.any()).all() [MappedUsers(name='Bhargan Basepair',email='basepair+nospam@example.edu',password='basepair',classname=None,admin=1)] relate can take any options that the relation function accepts in normal mapper definition: >>> del db._cache['users'] >>> db.users.relate('loans', db.loans, order_by=db.loans.loan_date, cascade='all, delete-orphan') Advanced Use ============ Accessing the Session --------------------- SqlSoup uses a SessionContext to provide thread-local sessions. You can get a reference to the current one like this:: >>> from sqlalchemy.ext.sqlsoup import objectstore >>> session = objectstore.current Now you have access to all the standard session-based SA features, such as transactions. (SqlSoup's ``flush()`` is normally transactionalized, but you can perform manual transaction management if you need a transaction to span multiple flushes.) Mapping arbitrary Selectables ----------------------------- SqlSoup can map any SQLAlchemy ``Selectable`` with the map method. Let's map a ``Select`` object that uses an aggregate function; we'll use the SQLAlchemy ``Table`` that SqlSoup introspected as the basis. (Since we're not mapping to a simple table or join, we need to tell SQLAlchemy how to find the *primary key* which just needs to be unique within the select, and not necessarily correspond to a *real* PK in the database.) :: >>> from sqlalchemy import select, func >>> b = db.books._table >>> s = select([b.c.published_year, func.count('*').label('n')], from_obj=[b], group_by=[b.c.published_year]) >>> s = s.alias('years_with_count') >>> years_with_count = db.map(s, primary_key=[s.c.published_year]) >>> years_with_count.filter_by(published_year='1989').all() [MappedBooks(published_year='1989',n=1)] Obviously if we just wanted to get a list of counts associated with book years once, raw SQL is going to be less work. The advantage of mapping a Select is reusability, both standalone and in Joins. (And if you go to full SQLAlchemy, you can perform mappings like this directly to your object models.) An easy way to save mapped selectables like this is to just hang them on your db object:: >>> db.years_with_count = years_with_count Python is flexible like that! Raw SQL ------- SqlSoup works fine with SQLAlchemy's `text block support`__. __ http://www.sqlalchemy.org/docs/04/sqlexpression.html#sql_text You can also access the SqlSoup's `engine` attribute to compose SQL directly. The engine's ``execute`` method corresponds to the one of a DBAPI cursor, and returns a ``ResultProxy`` that has ``fetch`` methods you would also see on a cursor:: >>> rp = db.bind.execute('select name, email from users order by name') >>> for name, email in rp.fetchall(): print name, email Bhargan Basepair basepair+nospam@example.edu Joe Student student@example.edu You can also pass this engine object to other SQLAlchemy constructs. Dynamic table names ------------------- You can load a table whose name is specified at runtime with the entity() method: >>> tablename = 'loans' >>> db.entity(tablename) == db.loans True Extra tests =========== Boring tests here. Nothing of real expository value. :: >>> db.users.filter_by(classname=None).order_by(db.users.name).all() [MappedUsers(name='Bhargan Basepair',email='basepair+nospam@example.edu',password='basepair',classname=None,admin=1), MappedUsers(name='Joe Student',email='student@example.edu',password='student',classname=None,admin=0)] >>> db.nopk Traceback (most recent call last): ... PKNotFoundError: table 'nopk' does not have a primary key defined [columns: i] >>> db.nosuchtable Traceback (most recent call last): ... NoSuchTableError: nosuchtable >>> years_with_count.insert(published_year='2007', n=1) Traceback (most recent call last): ... InvalidRequestError: SQLSoup can only modify mapped Tables (found: Alias) [tests clear()] >>> db.loans.count() 1 >>> _ = db.loans.insert(book_id=1, user_name='Bhargan Basepair') >>> db.clear() >>> db.flush() >>> db.loans.count() 1 """ from sqlalchemy import * from sqlalchemy import schema, sql from sqlalchemy.orm import * from sqlalchemy.ext.sessioncontext import SessionContext from sqlalchemy.exceptions import * from sqlalchemy.sql import expression _testsql = """ CREATE TABLE books ( id integer PRIMARY KEY, -- auto-increments in sqlite title text NOT NULL, published_year char(4) NOT NULL, authors text NOT NULL ); CREATE TABLE users ( name varchar(32) PRIMARY KEY, email varchar(128) NOT NULL, password varchar(128) NOT NULL, classname text, admin int NOT NULL -- 0 = false ); CREATE TABLE loans ( book_id int PRIMARY KEY REFERENCES books(id), user_name varchar(32) references users(name) ON DELETE SET NULL ON UPDATE CASCADE, loan_date datetime DEFAULT current_timestamp ); insert into users(name, email, password, admin) values('Bhargan Basepair', 'basepair@example.edu', 'basepair', 1); insert into users(name, email, password, admin) values('Joe Student', 'student@example.edu', 'student', 0); insert into books(title, published_year, authors) values('Mustards I Have Known', '1989', 'Jones'); insert into books(title, published_year, authors) values('Regional Variation in Moss', '1971', 'Flim and Flam'); insert into loans(book_id, user_name, loan_date) values ( (select min(id) from books), (select name from users where name like 'Joe%'), '2006-07-12 0:0:0') ; CREATE TABLE nopk ( i int ); """.split(';') __all__ = ['PKNotFoundError', 'SqlSoup'] # # thread local SessionContext # class Objectstore(SessionContext): def __getattr__(self, key): return getattr(self.current, key) def get_session(self): return self.current objectstore = Objectstore(create_session) class PKNotFoundError(SQLAlchemyError): pass def _ddl_error(cls): msg = 'SQLSoup can only modify mapped Tables (found: %s)' \ % cls._table.__class__.__name__ raise InvalidRequestError(msg) # metaclass is necessary to expose class methods with getattr, e.g. # we want to pass db.users.select through to users._mapper.select class SelectableClassType(type): def insert(cls, **kwargs): _ddl_error(cls) def delete(cls, *args, **kwargs): _ddl_error(cls) def update(cls, whereclause=None, values=None, **kwargs): _ddl_error(cls) def __selectable__(cls): return cls._table def __getattr__(cls, attr): if attr == '_query': # called during mapper init raise AttributeError() return getattr(cls._query, attr) class TableClassType(SelectableClassType): def insert(cls, **kwargs): o = cls() o.__dict__.update(kwargs) return o def delete(cls, *args, **kwargs): cls._table.delete(*args, **kwargs).execute() def update(cls, whereclause=None, values=None, **kwargs): cls._table.update(whereclause, values).execute(**kwargs) def relate(cls, propname, *args, **kwargs): class_mapper(cls)._compile_property(propname, relation(*args, **kwargs)) def _is_outer_join(selectable): if not isinstance(selectable, sql.Join): return False if selectable.isouter: return True return _is_outer_join(selectable.left) or _is_outer_join(selectable.right) def _selectable_name(selectable): if isinstance(selectable, sql.Alias): return _selectable_name(selectable.selectable) elif isinstance(selectable, sql.Select): return ''.join([_selectable_name(s) for s in selectable.froms]) elif isinstance(selectable, schema.Table): return selectable.name.capitalize() else: x = selectable.__class__.__name__ if x[0] == '_': x = x[1:] return x def class_for_table(selectable, **mapper_kwargs): selectable = expression._selectable(selectable) mapname = 'Mapped' + _selectable_name(selectable) if isinstance(selectable, Table): klass = TableClassType(mapname, (object,), {}) else: klass = SelectableClassType(mapname, (object,), {}) def __cmp__(self, o): L = self.__class__.c.keys() L.sort() t1 = [getattr(self, k) for k in L] try: t2 = [getattr(o, k) for k in L] except AttributeError: raise TypeError('unable to compare with %s' % o.__class__) return cmp(t1, t2) def __repr__(self): import locale encoding = locale.getdefaultlocale()[1] or 'ascii' L = [] for k in self.__class__.c.keys(): value = getattr(self, k, '') if isinstance(value, unicode): value = value.encode(encoding) L.append("%s=%r" % (k, value)) return '%s(%s)' % (self.__class__.__name__, ','.join(L)) for m in ['__cmp__', '__repr__']: setattr(klass, m, eval(m)) klass._table = selectable mappr = mapper(klass, selectable, extension=objectstore.mapper_extension, allow_null_pks=_is_outer_join(selectable), **mapper_kwargs) klass._query = Query(mappr) return klass class SqlSoup: def __init__(self, *args, **kwargs): """Initialize a new ``SqlSoup``. `args` may either be an ``SQLEngine`` or a set of arguments suitable for passing to ``create_engine``. """ # meh, sometimes having method overloading instead of kwargs would be easier if isinstance(args[0], MetaData): args = list(args) metadata = args.pop(0) if args or kwargs: raise ArgumentError('Extra arguments not allowed when metadata is given') else: metadata = MetaData(*args, **kwargs) self._metadata = metadata self._cache = {} self.schema = None def engine(self): return self._metadata.bind engine = property(engine) bind = engine def delete(self, *args, **kwargs): objectstore.delete(*args, **kwargs) def flush(self): objectstore.get_session().flush() def clear(self): objectstore.clear() def map(self, selectable, **kwargs): try: t = self._cache[selectable] except KeyError: t = class_for_table(selectable, **kwargs) self._cache[selectable] = t return t def with_labels(self, item): # TODO give meaningful aliases return self.map(expression._selectable(item).select(use_labels=True).alias('foo')) def join(self, *args, **kwargs): j = join(*args, **kwargs) return self.map(j) def entity(self, attr): try: t = self._cache[attr] except KeyError: table = Table(attr, self._metadata, autoload=True, schema=self.schema) if not table.primary_key.columns: raise PKNotFoundError('table %r does not have a primary key defined [columns: %s]' % (attr, ','.join(table.c.keys()))) if table.columns: t = class_for_table(table) else: t = None self._cache[attr] = t return t def __getattr__(self, attr): return self.entity(attr) def __repr__(self): return 'SqlSoup(%r)' % self._metadata if __name__ == '__main__': import logging logging.basicConfig() import doctest doctest.testmod()
bsd-3-clause
alshedivat/tensorflow
tensorflow/python/keras/layers/serialization.py
24
2693
# Copyright 2015 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Layer serialization/deserialization functions. """ # pylint: disable=wildcard-import # pylint: disable=unused-import from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow.python.keras.engine.input_layer import Input from tensorflow.python.keras.engine.input_layer import InputLayer from tensorflow.python.keras.layers.advanced_activations import * from tensorflow.python.keras.layers.convolutional import * from tensorflow.python.keras.layers.convolutional_recurrent import * from tensorflow.python.keras.layers.core import * from tensorflow.python.keras.layers.cudnn_recurrent import * from tensorflow.python.keras.layers.embeddings import * from tensorflow.python.keras.layers.local import * from tensorflow.python.keras.layers.merge import * from tensorflow.python.keras.layers.noise import * from tensorflow.python.keras.layers.normalization import * from tensorflow.python.keras.layers.pooling import * from tensorflow.python.keras.layers.recurrent import * from tensorflow.python.keras.layers.wrappers import * from tensorflow.python.keras.utils.generic_utils import deserialize_keras_object def serialize(layer): return {'class_name': layer.__class__.__name__, 'config': layer.get_config()} def deserialize(config, custom_objects=None): """Instantiates a layer from a config dictionary. Arguments: config: dict of the form {'class_name': str, 'config': dict} custom_objects: dict mapping class names (or function names) of custom (non-Keras) objects to class/functions Returns: Layer instance (may be Model, Sequential, Layer...) """ from tensorflow.python.keras import models # pylint: disable=g-import-not-at-top globs = globals() # All layers. globs['Model'] = models.Model globs['Sequential'] = models.Sequential return deserialize_keras_object( config, module_objects=globs, custom_objects=custom_objects, printable_module_name='layer')
apache-2.0
python-cn/flask-social-blueprint
example/sqla/website/settings.py
5
2741
# coding=utf-8 # Created 2014 by Janusz Skonieczny import logging import os SRC_DIR = os.path.abspath(os.path.dirname(os.path.dirname(__file__))) # ============================================================================ # a flask settings # http://flask.pocoo.org/docs/config/#configuring-from-files # ============================================================================ SECRET_KEY = '47e585de7f22984d5ee291c2f31412384bfc32d0' FLASH_MESSAGES = True # Flask-SQLAlchemy # http://pythonhosted.org/Flask-SQLAlchemy/config.html SQLALCHEMY_DATABASE_URI = "sqlite:///" + os.path.join(SRC_DIR, "db.sqlite") SQLALCHEMY_ECHO = False # Doubles log statements, investigate # Flask-Login # https://flask-login.readthedocs.org/en/latest/#protecting-views LOGIN_DISABLED = False # Flask-Security # http://pythonhosted.org/Flask-Security/configuration.html SECURITY_PASSWORD_SALT = "abc" # SECURITY_PASSWORD_HASH = "bcrypt" # requires py-bcrypt # SECURITY_PASSWORD_HASH = "pbkdf2_sha512" SECURITY_PASSWORD_HASH = "plaintext" SECURITY_EMAIL_SENDER = "support@example.com" SECURITY_CONFIRMABLE = True SECURITY_REGISTERABLE = True SECURITY_RECOVERABLE = True SECURITY_CHANGEABLE = True SECURITY_CONFIRM_SALT = "570be5f24e690ce5af208244f3e539a93b6e4f05" SECURITY_REMEMBER_SALT = "de154140385c591ea771dcb3b33f374383e6ea47" SECURITY_DEFAULT_REMEMBER_ME = True # Set secret keys for CSRF protection CSRF_SESSION_KEY = '8a7474974efcf76896aa84eea9cbe016bbc08828' CSRF_ENABLED = True # Flask-Babel # http://pythonhosted.org/Flask-Babel/ BABEL_DEFAULT_LOCALE = "en" BABEL_DEFAULT_TIMEZONE = "UTC" # Flask-Mail # http://pythonhosted.org/Flask-Mail/ SERVER_EMAIL = 'Flask-SocialBlueprint <support@example.com>' # Flask-SocialBlueprint # https://github.com/wooyek/flask-social-blueprint SOCIAL_BLUEPRINT = { # https://developers.facebook.com/apps/ "flask_social_blueprint.providers.Facebook": { # App ID 'consumer_key': '197…', # App Secret 'consumer_secret': 'c956c1…' }, # https://apps.twitter.com/app/new "flask_social_blueprint.providers.Twitter": { # Your access token from API Keys tab 'consumer_key': 'bkp…', # access token secret 'consumer_secret': 'pHUx…' }, # https://console.developers.google.com/project "flask_social_blueprint.providers.Google": { # Client ID 'consumer_key': '797….apps.googleusercontent.com', # Client secret 'consumer_secret': 'bDG…' }, # https://github.com/settings/applications/new "flask_social_blueprint.providers.Github": { # Client ID 'consumer_key': '6f6…', # Client Secret 'consumer_secret': '1a9…' }, }
mit
31415us/trajectory
py/env/lib/python2.7/site-packages/pip/commands/search.py
84
4717
import sys import textwrap import pkg_resources import pip.download from pip.basecommand import Command, SUCCESS from pip.util import get_terminal_size from pip.log import logger from pip.backwardcompat import xmlrpclib, reduce, cmp from pip.exceptions import CommandError from pip.status_codes import NO_MATCHES_FOUND from distutils.version import StrictVersion, LooseVersion class SearchCommand(Command): """Search for PyPI packages whose name or summary contains <query>.""" name = 'search' usage = """ %prog [options] <query>""" summary = 'Search PyPI for packages.' def __init__(self, *args, **kw): super(SearchCommand, self).__init__(*args, **kw) self.cmd_opts.add_option( '--index', dest='index', metavar='URL', default='https://pypi.python.org/pypi', help='Base URL of Python Package Index (default %default)') self.parser.insert_option_group(0, self.cmd_opts) def run(self, options, args): if not args: raise CommandError('Missing required argument (search query).') query = args index_url = options.index pypi_hits = self.search(query, index_url) hits = transform_hits(pypi_hits) terminal_width = None if sys.stdout.isatty(): terminal_width = get_terminal_size()[0] print_results(hits, terminal_width=terminal_width) if pypi_hits: return SUCCESS return NO_MATCHES_FOUND def search(self, query, index_url): pypi = xmlrpclib.ServerProxy(index_url) hits = pypi.search({'name': query, 'summary': query}, 'or') return hits def transform_hits(hits): """ The list from pypi is really a list of versions. We want a list of packages with the list of versions stored inline. This converts the list from pypi into one we can use. """ packages = {} for hit in hits: name = hit['name'] summary = hit['summary'] version = hit['version'] score = hit['_pypi_ordering'] if score is None: score = 0 if name not in packages.keys(): packages[name] = {'name': name, 'summary': summary, 'versions': [version], 'score': score} else: packages[name]['versions'].append(version) # if this is the highest version, replace summary and score if version == highest_version(packages[name]['versions']): packages[name]['summary'] = summary packages[name]['score'] = score # each record has a unique name now, so we will convert the dict into a list sorted by score package_list = sorted(packages.values(), key=lambda x: x['score'], reverse=True) return package_list def print_results(hits, name_column_width=25, terminal_width=None): installed_packages = [p.project_name for p in pkg_resources.working_set] for hit in hits: name = hit['name'] summary = hit['summary'] or '' if terminal_width is not None: # wrap and indent summary to fit terminal summary = textwrap.wrap(summary, terminal_width - name_column_width - 5) summary = ('\n' + ' ' * (name_column_width + 3)).join(summary) line = '%s - %s' % (name.ljust(name_column_width), summary) try: logger.notify(line) if name in installed_packages: dist = pkg_resources.get_distribution(name) logger.indent += 2 try: latest = highest_version(hit['versions']) if dist.version == latest: logger.notify('INSTALLED: %s (latest)' % dist.version) else: logger.notify('INSTALLED: %s' % dist.version) logger.notify('LATEST: %s' % latest) finally: logger.indent -= 2 except UnicodeEncodeError: pass def compare_versions(version1, version2): try: return cmp(StrictVersion(version1), StrictVersion(version2)) # in case of abnormal version number, fall back to LooseVersion except ValueError: pass try: return cmp(LooseVersion(version1), LooseVersion(version2)) except TypeError: # certain LooseVersion comparions raise due to unorderable types, # fallback to string comparison return cmp([str(v) for v in LooseVersion(version1).version], [str(v) for v in LooseVersion(version2).version]) def highest_version(versions): return reduce((lambda v1, v2: compare_versions(v1, v2) == 1 and v1 or v2), versions)
mit
rtucker-mozilla/mozilla_inventory
vendor-local/src/django-tastypie/tests/core/tests/throttle.py
13
6172
import time from django.core.cache import cache from django.test import TestCase from tastypie.models import ApiAccess from tastypie.throttle import BaseThrottle, CacheThrottle, CacheDBThrottle class NoThrottleTestCase(TestCase): def test_init(self): throttle_1 = BaseThrottle() self.assertEqual(throttle_1.throttle_at, 150) self.assertEqual(throttle_1.timeframe, 3600) self.assertEqual(throttle_1.expiration, 604800) throttle_2 = BaseThrottle(throttle_at=50, timeframe=60*30, expiration=1) self.assertEqual(throttle_2.throttle_at, 50) self.assertEqual(throttle_2.timeframe, 1800) self.assertEqual(throttle_2.expiration, 1) def test_convert_identifier_to_key(self): throttle_1 = BaseThrottle() self.assertEqual(throttle_1.convert_identifier_to_key(''), '_accesses') self.assertEqual(throttle_1.convert_identifier_to_key('alnum10'), 'alnum10_accesses') self.assertEqual(throttle_1.convert_identifier_to_key('Mr. Pants'), 'Mr.Pants_accesses') self.assertEqual(throttle_1.convert_identifier_to_key('Mr_Pants'), 'Mr_Pants_accesses') self.assertEqual(throttle_1.convert_identifier_to_key('%^@@$&!a'), 'a_accesses') def test_should_be_throttled(self): throttle_1 = BaseThrottle() self.assertEqual(throttle_1.should_be_throttled('foobaz'), False) def test_accessed(self): throttle_1 = BaseThrottle() self.assertEqual(throttle_1.accessed('foobaz'), None) class CacheThrottleTestCase(TestCase): def tearDown(self): cache.delete('daniel_accesses') cache.delete('cody_accesses') def test_throttling(self): throttle_1 = CacheThrottle(throttle_at=2, timeframe=5, expiration=2) self.assertEqual(throttle_1.should_be_throttled('daniel'), False) self.assertEqual(len(cache.get('daniel_accesses')), 0) self.assertEqual(throttle_1.accessed('daniel'), None) self.assertEqual(throttle_1.should_be_throttled('daniel'), False) self.assertEqual(len(cache.get('daniel_accesses')), 1) self.assertEqual(cache.get('cody_accesses'), None) self.assertEqual(throttle_1.accessed('daniel'), None) self.assertEqual(throttle_1.accessed('cody'), None) self.assertEqual(throttle_1.should_be_throttled('cody'), False) self.assertEqual(len(cache.get('daniel_accesses')), 2) self.assertEqual(len(cache.get('cody_accesses')), 1) # THROTTLE'D! self.assertEqual(throttle_1.should_be_throttled('daniel'), True) self.assertEqual(len(cache.get('daniel_accesses')), 2) self.assertEqual(throttle_1.accessed('daniel'), None) self.assertEqual(throttle_1.should_be_throttled('daniel'), True) self.assertEqual(len(cache.get('daniel_accesses')), 3) self.assertEqual(throttle_1.accessed('daniel'), None) # Should be no interplay. self.assertEqual(throttle_1.should_be_throttled('cody'), False) self.assertEqual(throttle_1.accessed('cody'), None) # Test the timeframe. time.sleep(3) self.assertEqual(throttle_1.should_be_throttled('daniel'), False) self.assertEqual(len(cache.get('daniel_accesses')), 0) class CacheDBThrottleTestCase(TestCase): def tearDown(self): cache.delete('daniel_accesses') cache.delete('cody_accesses') def test_throttling(self): throttle_1 = CacheDBThrottle(throttle_at=2, timeframe=5, expiration=2) self.assertEqual(throttle_1.should_be_throttled('daniel'), False) self.assertEqual(len(cache.get('daniel_accesses')), 0) self.assertEqual(ApiAccess.objects.count(), 0) self.assertEqual(ApiAccess.objects.filter(identifier='daniel').count(), 0) self.assertEqual(throttle_1.accessed('daniel'), None) self.assertEqual(throttle_1.should_be_throttled('daniel'), False) self.assertEqual(len(cache.get('daniel_accesses')), 1) self.assertEqual(cache.get('cody_accesses'), None) self.assertEqual(ApiAccess.objects.count(), 1) self.assertEqual(ApiAccess.objects.filter(identifier='daniel').count(), 1) self.assertEqual(throttle_1.accessed('daniel'), None) self.assertEqual(throttle_1.accessed('cody'), None) self.assertEqual(throttle_1.should_be_throttled('cody'), False) self.assertEqual(len(cache.get('daniel_accesses')), 2) self.assertEqual(len(cache.get('cody_accesses')), 1) self.assertEqual(ApiAccess.objects.count(), 3) self.assertEqual(ApiAccess.objects.filter(identifier='daniel').count(), 2) self.assertEqual(throttle_1.accessed('cody'), None) # THROTTLE'D! self.assertEqual(throttle_1.accessed('daniel'), None) self.assertEqual(throttle_1.should_be_throttled('daniel'), True) self.assertEqual(len(cache.get('daniel_accesses')), 3) self.assertEqual(ApiAccess.objects.count(), 5) self.assertEqual(ApiAccess.objects.filter(identifier='daniel').count(), 3) self.assertEqual(throttle_1.accessed('daniel'), None) self.assertEqual(throttle_1.should_be_throttled('daniel'), True) self.assertEqual(len(cache.get('daniel_accesses')), 4) self.assertEqual(ApiAccess.objects.count(), 6) self.assertEqual(ApiAccess.objects.filter(identifier='daniel').count(), 4) # Should be no interplay. self.assertEqual(throttle_1.should_be_throttled('cody'), True) self.assertEqual(throttle_1.accessed('cody'), None) self.assertEqual(ApiAccess.objects.count(), 7) self.assertEqual(ApiAccess.objects.filter(identifier='daniel').count(), 4) # Test the timeframe. time.sleep(3) self.assertEqual(throttle_1.should_be_throttled('daniel'), False) self.assertEqual(len(cache.get('daniel_accesses')), 0) self.assertEqual(ApiAccess.objects.count(), 7) self.assertEqual(ApiAccess.objects.filter(identifier='daniel').count(), 4)
bsd-3-clause
wileeam/airflow
tests/providers/google/cloud/operators/test_functions.py
4
29951
# # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. import unittest from copy import deepcopy import mock from googleapiclient.errors import HttpError from parameterized import parameterized from airflow import AirflowException from airflow.providers.google.cloud.operators.functions import ( FUNCTION_NAME_PATTERN, CloudFunctionDeleteFunctionOperator, CloudFunctionDeployFunctionOperator, CloudFunctionInvokeFunctionOperator, ) from airflow.version import version EMPTY_CONTENT = b'' MOCK_RESP_404 = type('', (object,), {"status": 404})() GCP_PROJECT_ID = 'test_project_id' GCP_LOCATION = 'test_region' GCF_SOURCE_ARCHIVE_URL = 'gs://folder/file.zip' GCF_ENTRYPOINT = 'helloWorld' FUNCTION_NAME = 'projects/{}/locations/{}/functions/{}'.format(GCP_PROJECT_ID, GCP_LOCATION, GCF_ENTRYPOINT) GCF_RUNTIME = 'nodejs6' VALID_RUNTIMES = ['nodejs6', 'nodejs8', 'python37'] VALID_BODY = { "name": FUNCTION_NAME, "entryPoint": GCF_ENTRYPOINT, "runtime": GCF_RUNTIME, "httpsTrigger": {}, "sourceArchiveUrl": GCF_SOURCE_ARCHIVE_URL } def _prepare_test_bodies(): body_no_name = deepcopy(VALID_BODY) body_no_name.pop('name', None) body_empty_entry_point = deepcopy(VALID_BODY) body_empty_entry_point['entryPoint'] = '' body_empty_runtime = deepcopy(VALID_BODY) body_empty_runtime['runtime'] = '' body_values = [ ({}, "The required parameter 'body' is missing"), (body_no_name, "The required body field 'name' is missing"), (body_empty_entry_point, "The body field 'entryPoint' of value '' does not match"), (body_empty_runtime, "The body field 'runtime' of value '' does not match"), ] return body_values class TestGcfFunctionDeploy(unittest.TestCase): @parameterized.expand(_prepare_test_bodies()) @mock.patch('airflow.providers.google.cloud.operators.functions.CloudFunctionsHook') def test_body_empty_or_missing_fields(self, body, message, mock_hook): mock_hook.return_value.upload_function_zip.return_value = 'https://uploadUrl' with self.assertRaises(AirflowException) as cm: op = CloudFunctionDeployFunctionOperator( project_id="test_project_id", location="test_region", body=body, task_id="id" ) op.execute(None) err = cm.exception self.assertIn(message, str(err)) @mock.patch('airflow.providers.google.cloud.operators.functions.CloudFunctionsHook') def test_deploy_execute(self, mock_hook): mock_hook.return_value.get_function.side_effect = mock.Mock( side_effect=HttpError(resp=MOCK_RESP_404, content=b'not found')) mock_hook.return_value.create_new_function.return_value = True op = CloudFunctionDeployFunctionOperator( project_id=GCP_PROJECT_ID, location=GCP_LOCATION, body=deepcopy(VALID_BODY), task_id="id" ) op.execute(None) mock_hook.assert_called_once_with(api_version='v1', gcp_conn_id='google_cloud_default') mock_hook.return_value.get_function.assert_called_once_with( 'projects/test_project_id/locations/test_region/functions/helloWorld' ) expected_body = deepcopy(VALID_BODY) expected_body['labels'] = { 'airflow-version': 'v' + version.replace('.', '-').replace('+', '-') } mock_hook.return_value.create_new_function.assert_called_once_with( project_id='test_project_id', location='test_region', body=expected_body ) @mock.patch('airflow.providers.google.cloud.operators.functions.CloudFunctionsHook') def test_update_function_if_exists(self, mock_hook): mock_hook.return_value.get_function.return_value = True mock_hook.return_value.update_function.return_value = True op = CloudFunctionDeployFunctionOperator( project_id=GCP_PROJECT_ID, location=GCP_LOCATION, body=deepcopy(VALID_BODY), task_id="id" ) op.execute(None) mock_hook.assert_called_once_with(api_version='v1', gcp_conn_id='google_cloud_default') mock_hook.return_value.get_function.assert_called_once_with( 'projects/test_project_id/locations/test_region/functions/helloWorld' ) expected_body = deepcopy(VALID_BODY) expected_body['labels'] = { 'airflow-version': 'v' + version.replace('.', '-').replace('+', '-') } mock_hook.return_value.update_function.assert_called_once_with( 'projects/test_project_id/locations/test_region/functions/helloWorld', expected_body, expected_body.keys()) mock_hook.return_value.create_new_function.assert_not_called() @mock.patch('airflow.providers.google.cloud.operators.functions.CloudFunctionsHook') def test_empty_project_id_is_ok(self, mock_hook): mock_hook.return_value.get_function.side_effect = \ HttpError(resp=MOCK_RESP_404, content=b'not found') operator = CloudFunctionDeployFunctionOperator( location="test_region", body=deepcopy(VALID_BODY), task_id="id" ) operator.execute(None) mock_hook.assert_called_once_with(api_version='v1', gcp_conn_id='google_cloud_default') new_body = deepcopy(VALID_BODY) new_body['labels'] = { 'airflow-version': 'v' + version.replace('.', '-').replace('+', '-')} mock_hook.return_value.create_new_function.assert_called_once_with( project_id=None, location="test_region", body=new_body) @mock.patch('airflow.providers.google.cloud.operators.functions.CloudFunctionsHook') def test_empty_location(self, mock_hook): with self.assertRaises(AirflowException) as cm: CloudFunctionDeployFunctionOperator( project_id="test_project_id", location="", body=None, task_id="id" ) err = cm.exception self.assertIn("The required parameter 'location' is missing", str(err)) @mock.patch('airflow.providers.google.cloud.operators.functions.CloudFunctionsHook') def test_empty_body(self, mock_hook): with self.assertRaises(AirflowException) as cm: CloudFunctionDeployFunctionOperator( project_id="test_project_id", location="test_region", body=None, task_id="id" ) err = cm.exception self.assertIn("The required parameter 'body' is missing", str(err)) @parameterized.expand([ (runtime,) for runtime in VALID_RUNTIMES ]) @mock.patch('airflow.providers.google.cloud.operators.functions.CloudFunctionsHook') def test_correct_runtime_field(self, runtime, mock_hook): mock_hook.return_value.create_new_function.return_value = True body = deepcopy(VALID_BODY) body['runtime'] = runtime op = CloudFunctionDeployFunctionOperator( project_id="test_project_id", location="test_region", body=body, task_id="id" ) op.execute(None) mock_hook.assert_called_once_with(api_version='v1', gcp_conn_id='google_cloud_default') mock_hook.reset_mock() @parameterized.expand([ (network,) for network in [ "network-01", "n-0-2-3-4", "projects/PROJECT/global/networks/network-01" "projects/PRÓJECT/global/networks/netwórk-01" ] ]) @mock.patch('airflow.providers.google.cloud.operators.functions.CloudFunctionsHook') def test_valid_network_field(self, network, mock_hook): mock_hook.return_value.create_new_function.return_value = True body = deepcopy(VALID_BODY) body['network'] = network op = CloudFunctionDeployFunctionOperator( project_id="test_project_id", location="test_region", body=body, task_id="id" ) op.execute(None) mock_hook.assert_called_once_with(api_version='v1', gcp_conn_id='google_cloud_default') mock_hook.reset_mock() @parameterized.expand([ (labels,) for labels in [ {}, {"label": 'value-01'}, {"label_324234_a_b_c": 'value-01_93'}, ] ]) @mock.patch('airflow.providers.google.cloud.operators.functions.CloudFunctionsHook') def test_valid_labels_field(self, labels, mock_hook): mock_hook.return_value.create_new_function.return_value = True body = deepcopy(VALID_BODY) body['labels'] = labels op = CloudFunctionDeployFunctionOperator( project_id="test_project_id", location="test_region", body=body, task_id="id" ) op.execute(None) mock_hook.assert_called_once_with(api_version='v1', gcp_conn_id='google_cloud_default') mock_hook.reset_mock() @mock.patch('airflow.providers.google.cloud.operators.functions.CloudFunctionsHook') def test_validation_disabled(self, mock_hook): mock_hook.return_value.create_new_function.return_value = True body = { "name": "function_name", "some_invalid_body_field": "some_invalid_body_field_value" } op = CloudFunctionDeployFunctionOperator( project_id="test_project_id", location="test_region", body=body, validate_body=False, task_id="id" ) op.execute(None) mock_hook.assert_called_once_with(api_version='v1', gcp_conn_id='google_cloud_default') mock_hook.reset_mock() @mock.patch('airflow.providers.google.cloud.operators.functions.CloudFunctionsHook') def test_body_validation_simple(self, mock_hook): mock_hook.return_value.create_new_function.return_value = True body = deepcopy(VALID_BODY) body['name'] = '' with self.assertRaises(AirflowException) as cm: op = CloudFunctionDeployFunctionOperator( project_id="test_project_id", location="test_region", body=body, task_id="id" ) op.execute(None) err = cm.exception self.assertIn("The body field 'name' of value '' does not match", str(err)) mock_hook.assert_called_once_with(api_version='v1', gcp_conn_id='google_cloud_default') mock_hook.reset_mock() @parameterized.expand([ ('name', '', "The body field 'name' of value '' does not match"), ('description', '', "The body field 'description' of value '' does not match"), ('entryPoint', '', "The body field 'entryPoint' of value '' does not match"), ('availableMemoryMb', '0', "The available memory has to be greater than 0"), ('availableMemoryMb', '-1', "The available memory has to be greater than 0"), ('availableMemoryMb', 'ss', "invalid literal for int() with base 10: 'ss'"), ('network', '', "The body field 'network' of value '' does not match"), ('maxInstances', '0', "The max instances parameter has to be greater than 0"), ('maxInstances', '-1', "The max instances parameter has to be greater than 0"), ('maxInstances', 'ss', "invalid literal for int() with base 10: 'ss'"), ]) @mock.patch('airflow.providers.google.cloud.operators.functions.CloudFunctionsHook') def test_invalid_field_values(self, key, value, message, mock_hook): mock_hook.return_value.create_new_function.return_value = True body = deepcopy(VALID_BODY) body[key] = value with self.assertRaises(AirflowException) as cm: op = CloudFunctionDeployFunctionOperator( project_id="test_project_id", location="test_region", body=body, task_id="id" ) op.execute(None) err = cm.exception self.assertIn(message, str(err)) mock_hook.assert_called_once_with(api_version='v1', gcp_conn_id='google_cloud_default') mock_hook.reset_mock() @parameterized.expand([ ({'sourceArchiveUrl': ''}, "The body field 'source_code.sourceArchiveUrl' of value '' does not match"), ({'sourceArchiveUrl': '', 'zip_path': '/path/to/file'}, "Only one of 'sourceArchiveUrl' in body or 'zip_path' argument allowed."), ({'sourceArchiveUrl': 'gs://url', 'zip_path': '/path/to/file'}, "Only one of 'sourceArchiveUrl' in body or 'zip_path' argument allowed."), ({'sourceArchiveUrl': '', 'sourceUploadUrl': ''}, "Parameter 'sourceUploadUrl' is empty in the body and argument " "'zip_path' is missing or empty."), ({'sourceArchiveUrl': 'gs://adasda', 'sourceRepository': ''}, "The field 'source_code.sourceRepository' should be of dictionary type"), ({'sourceUploadUrl': '', 'sourceRepository': ''}, "Parameter 'sourceUploadUrl' is empty in the body and argument 'zip_path' " "is missing or empty."), ({'sourceArchiveUrl': '', 'sourceUploadUrl': '', 'sourceRepository': ''}, "Parameter 'sourceUploadUrl' is empty in the body and argument 'zip_path' " "is missing or empty."), ({'sourceArchiveUrl': 'gs://url', 'sourceUploadUrl': 'https://url'}, "The mutually exclusive fields 'sourceUploadUrl' and 'sourceArchiveUrl' " "belonging to the union 'source_code' are both present. Please remove one"), ({'sourceUploadUrl': 'https://url', 'zip_path': '/path/to/file'}, "Only one of 'sourceUploadUrl' in body " "or 'zip_path' argument allowed. Found both."), ({'sourceUploadUrl': ''}, "Parameter 'sourceUploadUrl' is empty in the body " "and argument 'zip_path' is missing or empty."), ({'sourceRepository': ''}, "The field 'source_code.sourceRepository' " "should be of dictionary type"), ({'sourceRepository': {}}, "The required body field " "'source_code.sourceRepository.url' is missing"), ({'sourceRepository': {'url': ''}}, "The body field 'source_code.sourceRepository.url' of value '' does not match"), ] ) def test_invalid_source_code_union_field(self, source_code, message): body = deepcopy(VALID_BODY) body.pop('sourceUploadUrl', None) body.pop('sourceArchiveUrl', None) zip_path = source_code.pop('zip_path', None) body.update(source_code) with self.assertRaises(AirflowException) as cm: op = CloudFunctionDeployFunctionOperator( project_id="test_project_id", location="test_region", body=body, task_id="id", zip_path=zip_path ) op.execute(None) err = cm.exception self.assertIn(message, str(err)) @parameterized.expand([ ({'sourceArchiveUrl': 'gs://url'}, 'test_project_id'), ({'zip_path': '/path/to/file', 'sourceUploadUrl': None}, 'test_project_id'), ({'zip_path': '/path/to/file', 'sourceUploadUrl': None}, None), ({'sourceUploadUrl': 'https://source.developers.google.com/projects/a/repos/b/revisions/c/paths/d'}, 'test_project_id'), ({'sourceRepository': {'url': 'https://source.developers.google.com/projects/a/' 'repos/b/revisions/c/paths/d'}}, 'test_project_id'), ]) @mock.patch('airflow.providers.google.cloud.operators.functions.CloudFunctionsHook') def test_valid_source_code_union_field(self, source_code, project_id, mock_hook): mock_hook.return_value.upload_function_zip.return_value = 'https://uploadUrl' mock_hook.return_value.get_function.side_effect = mock.Mock( side_effect=HttpError(resp=MOCK_RESP_404, content=b'not found')) mock_hook.return_value.create_new_function.return_value = True body = deepcopy(VALID_BODY) body.pop('sourceUploadUrl', None) body.pop('sourceArchiveUrl', None) body.pop('sourceRepository', None) body.pop('sourceRepositoryUrl', None) zip_path = source_code.pop('zip_path', None) body.update(source_code) if project_id: op = CloudFunctionDeployFunctionOperator( project_id="test_project_id", location="test_region", body=body, task_id="id", zip_path=zip_path ) else: op = CloudFunctionDeployFunctionOperator( location="test_region", body=body, task_id="id", zip_path=zip_path ) op.execute(None) mock_hook.assert_called_once_with(api_version='v1', gcp_conn_id='google_cloud_default') if zip_path: mock_hook.return_value.upload_function_zip.assert_called_once_with( project_id=project_id, location='test_region', zip_path='/path/to/file' ) mock_hook.return_value.get_function.assert_called_once_with( 'projects/test_project_id/locations/test_region/functions/helloWorld' ) mock_hook.return_value.create_new_function.assert_called_once_with( project_id=project_id, location='test_region', body=body ) mock_hook.reset_mock() @parameterized.expand([ ({'eventTrigger': {}}, "The required body field 'trigger.eventTrigger.eventType' is missing"), ({'eventTrigger': {'eventType': 'providers/test/eventTypes/a.b'}}, "The required body field 'trigger.eventTrigger.resource' is missing"), ({'eventTrigger': {'eventType': 'providers/test/eventTypes/a.b', 'resource': ''}}, "The body field 'trigger.eventTrigger.resource' of value '' does not match"), ({'eventTrigger': {'eventType': 'providers/test/eventTypes/a.b', 'resource': 'res', 'service': ''}}, "The body field 'trigger.eventTrigger.service' of value '' does not match"), ({'eventTrigger': {'eventType': 'providers/test/eventTypes/a.b', 'resource': 'res', 'service': 'service_name', 'failurePolicy': {'retry': ''}}}, "The field 'trigger.eventTrigger.failurePolicy.retry' " "should be of dictionary type") ] ) @mock.patch('airflow.providers.google.cloud.operators.functions.CloudFunctionsHook') def test_invalid_trigger_union_field(self, trigger, message, mock_hook): mock_hook.return_value.upload_function_zip.return_value = 'https://uploadUrl' body = deepcopy(VALID_BODY) body.pop('httpsTrigger', None) body.pop('eventTrigger', None) body.update(trigger) with self.assertRaises(AirflowException) as cm: op = CloudFunctionDeployFunctionOperator( project_id="test_project_id", location="test_region", body=body, task_id="id", ) op.execute(None) err = cm.exception self.assertIn(message, str(err)) mock_hook.assert_called_once_with(api_version='v1', gcp_conn_id='google_cloud_default') mock_hook.reset_mock() @parameterized.expand([ ({'httpsTrigger': {}},), ({'eventTrigger': {'eventType': 'providers/test/eventTypes/a.b', 'resource': 'res'}},), ({'eventTrigger': {'eventType': 'providers/test/eventTypes/a.b', 'resource': 'res', 'service': 'service_name'}},), ({'eventTrigger': {'eventType': 'providers/test/eventTypes/ą.b', 'resource': 'reś', 'service': 'service_namę'}},), ({'eventTrigger': {'eventType': 'providers/test/eventTypes/a.b', 'resource': 'res', 'service': 'service_name', 'failurePolicy': {'retry': {}}}},) ]) @mock.patch('airflow.providers.google.cloud.operators.functions.CloudFunctionsHook') def test_valid_trigger_union_field(self, trigger, mock_hook): mock_hook.return_value.upload_function_zip.return_value = 'https://uploadUrl' mock_hook.return_value.get_function.side_effect = mock.Mock( side_effect=HttpError(resp=MOCK_RESP_404, content=b'not found')) mock_hook.return_value.create_new_function.return_value = True body = deepcopy(VALID_BODY) body.pop('httpsTrigger', None) body.pop('eventTrigger', None) body.update(trigger) op = CloudFunctionDeployFunctionOperator( project_id="test_project_id", location="test_region", body=body, task_id="id", ) op.execute(None) mock_hook.assert_called_once_with(api_version='v1', gcp_conn_id='google_cloud_default') mock_hook.return_value.get_function.assert_called_once_with( 'projects/test_project_id/locations/test_region/functions/helloWorld' ) mock_hook.return_value.create_new_function.assert_called_once_with( project_id='test_project_id', location='test_region', body=body ) mock_hook.reset_mock() @mock.patch('airflow.providers.google.cloud.operators.functions.CloudFunctionsHook') def test_extra_parameter(self, mock_hook): mock_hook.return_value.create_new_function.return_value = True body = deepcopy(VALID_BODY) body['extra_parameter'] = 'extra' op = CloudFunctionDeployFunctionOperator( project_id="test_project_id", location="test_region", body=body, task_id="id" ) op.execute(None) mock_hook.assert_called_once_with(api_version='v1', gcp_conn_id='google_cloud_default') mock_hook.reset_mock() class TestGcfFunctionDelete(unittest.TestCase): _FUNCTION_NAME = 'projects/project_name/locations/project_location/functions' \ '/function_name' _DELETE_FUNCTION_EXPECTED = { '@type': 'type.googleapis.com/google.cloud.functions.v1.CloudFunction', 'name': _FUNCTION_NAME, 'sourceArchiveUrl': 'gs://functions/hello.zip', 'httpsTrigger': { 'url': 'https://project_location-project_name.cloudfunctions.net' '/function_name'}, 'status': 'ACTIVE', 'entryPoint': 'entry_point', 'timeout': '60s', 'availableMemoryMb': 256, 'serviceAccountEmail': 'project_name@appspot.gserviceaccount.com', 'updateTime': '2018-08-23T00:00:00Z', 'versionId': '1', 'runtime': 'nodejs6'} @mock.patch('airflow.providers.google.cloud.operators.functions.CloudFunctionsHook') def test_delete_execute(self, mock_hook): mock_hook.return_value.delete_function.return_value = \ self._DELETE_FUNCTION_EXPECTED op = CloudFunctionDeleteFunctionOperator( name=self._FUNCTION_NAME, task_id="id" ) result = op.execute(None) mock_hook.assert_called_once_with(api_version='v1', gcp_conn_id='google_cloud_default') mock_hook.return_value.delete_function.assert_called_once_with( 'projects/project_name/locations/project_location/functions/function_name' ) self.assertEqual(result['name'], self._FUNCTION_NAME) @mock.patch('airflow.providers.google.cloud.operators.functions.CloudFunctionsHook') def test_correct_name(self, mock_hook): op = CloudFunctionDeleteFunctionOperator( name="projects/project_name/locations/project_location/functions" "/function_name", task_id="id" ) op.execute(None) mock_hook.assert_called_once_with(api_version='v1', gcp_conn_id='google_cloud_default') @mock.patch('airflow.providers.google.cloud.operators.functions.CloudFunctionsHook') def test_invalid_name(self, mock_hook): with self.assertRaises(AttributeError) as cm: op = CloudFunctionDeleteFunctionOperator( name="invalid_name", task_id="id" ) op.execute(None) err = cm.exception self.assertEqual(str(err), 'Parameter name must match pattern: {}'.format( FUNCTION_NAME_PATTERN)) mock_hook.assert_not_called() @mock.patch('airflow.providers.google.cloud.operators.functions.CloudFunctionsHook') def test_empty_name(self, mock_hook): mock_hook.return_value.delete_function.return_value = \ self._DELETE_FUNCTION_EXPECTED with self.assertRaises(AttributeError) as cm: CloudFunctionDeleteFunctionOperator( name="", task_id="id" ) err = cm.exception self.assertEqual(str(err), 'Empty parameter: name') mock_hook.assert_not_called() @mock.patch('airflow.providers.google.cloud.operators.functions.CloudFunctionsHook') def test_gcf_error_silenced_when_function_doesnt_exist(self, mock_hook): op = CloudFunctionDeleteFunctionOperator( name=self._FUNCTION_NAME, task_id="id" ) mock_hook.return_value.delete_function.side_effect = mock.Mock( side_effect=HttpError(resp=MOCK_RESP_404, content=b'not found')) op.execute(None) mock_hook.assert_called_once_with(api_version='v1', gcp_conn_id='google_cloud_default') mock_hook.return_value.delete_function.assert_called_once_with( 'projects/project_name/locations/project_location/functions/function_name' ) @mock.patch('airflow.providers.google.cloud.operators.functions.CloudFunctionsHook') def test_non_404_gcf_error_bubbled_up(self, mock_hook): op = CloudFunctionDeleteFunctionOperator( name=self._FUNCTION_NAME, task_id="id" ) resp = type('', (object,), {"status": 500})() mock_hook.return_value.delete_function.side_effect = mock.Mock( side_effect=HttpError(resp=resp, content=b'error')) with self.assertRaises(HttpError): op.execute(None) mock_hook.assert_called_once_with(api_version='v1', gcp_conn_id='google_cloud_default') mock_hook.return_value.delete_function.assert_called_once_with( 'projects/project_name/locations/project_location/functions/function_name' ) class TestGcfFunctionInvokeOperator(unittest.TestCase): @mock.patch("airflow.providers.google.cloud.operators.functions.BaseOperator.xcom_push") @mock.patch("airflow.providers.google.cloud.operators.functions.CloudFunctionsHook") def test_execute(self, mock_gcf_hook, mock_xcom): exec_id = 'exec_id' mock_gcf_hook.return_value.call_function.return_value = {'executionId': exec_id} function_id = "test_function" payload = {'key': 'value'} api_version = 'test' gcp_conn_id = 'test_conn' op = CloudFunctionInvokeFunctionOperator( task_id='test', function_id=function_id, input_data=payload, location=GCP_LOCATION, project_id=GCP_PROJECT_ID, api_version=api_version, gcp_conn_id=gcp_conn_id ) op.execute(None) mock_gcf_hook.assert_called_once_with( api_version=api_version, gcp_conn_id=gcp_conn_id ) mock_gcf_hook.return_value.call_function.assert_called_once_with( function_id=function_id, input_data=payload, location=GCP_LOCATION, project_id=GCP_PROJECT_ID ) mock_xcom.assert_called_once_with( context=None, key='execution_id', value=exec_id )
apache-2.0
Branlala/docker-sickbeardfr
sickbeard/lib/requests/packages/chardet2/hebrewprober.py
63
13248
######################## BEGIN LICENSE BLOCK ######################## # The Original Code is Mozilla Universal charset detector code. # # The Initial Developer of the Original Code is # Shy Shalom # Portions created by the Initial Developer are Copyright (C) 2005 # the Initial Developer. All Rights Reserved. # # Contributor(s): # Mark Pilgrim - port to Python # # This library is free software; you can redistribute it and/or # modify it under the terms of the GNU Lesser General Public # License as published by the Free Software Foundation; either # version 2.1 of the License, or (at your option) any later version. # # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this library; if not, write to the Free Software # Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA # 02110-1301 USA ######################### END LICENSE BLOCK ######################### from .charsetprober import CharSetProber from . import constants # This prober doesn't actually recognize a language or a charset. # It is a helper prober for the use of the Hebrew model probers ### General ideas of the Hebrew charset recognition ### # # Four main charsets exist in Hebrew: # "ISO-8859-8" - Visual Hebrew # "windows-1255" - Logical Hebrew # "ISO-8859-8-I" - Logical Hebrew # "x-mac-hebrew" - ?? Logical Hebrew ?? # # Both "ISO" charsets use a completely identical set of code points, whereas # "windows-1255" and "x-mac-hebrew" are two different proper supersets of # these code points. windows-1255 defines additional characters in the range # 0x80-0x9F as some misc punctuation marks as well as some Hebrew-specific # diacritics and additional 'Yiddish' ligature letters in the range 0xc0-0xd6. # x-mac-hebrew defines similar additional code points but with a different # mapping. # # As far as an average Hebrew text with no diacritics is concerned, all four # charsets are identical with respect to code points. Meaning that for the # main Hebrew alphabet, all four map the same values to all 27 Hebrew letters # (including final letters). # # The dominant difference between these charsets is their directionality. # "Visual" directionality means that the text is ordered as if the renderer is # not aware of a BIDI rendering algorithm. The renderer sees the text and # draws it from left to right. The text itself when ordered naturally is read # backwards. A buffer of Visual Hebrew generally looks like so: # "[last word of first line spelled backwards] [whole line ordered backwards # and spelled backwards] [first word of first line spelled backwards] # [end of line] [last word of second line] ... etc' " # adding punctuation marks, numbers and English text to visual text is # naturally also "visual" and from left to right. # # "Logical" directionality means the text is ordered "naturally" according to # the order it is read. It is the responsibility of the renderer to display # the text from right to left. A BIDI algorithm is used to place general # punctuation marks, numbers and English text in the text. # # Texts in x-mac-hebrew are almost impossible to find on the Internet. From # what little evidence I could find, it seems that its general directionality # is Logical. # # To sum up all of the above, the Hebrew probing mechanism knows about two # charsets: # Visual Hebrew - "ISO-8859-8" - backwards text - Words and sentences are # backwards while line order is natural. For charset recognition purposes # the line order is unimportant (In fact, for this implementation, even # word order is unimportant). # Logical Hebrew - "windows-1255" - normal, naturally ordered text. # # "ISO-8859-8-I" is a subset of windows-1255 and doesn't need to be # specifically identified. # "x-mac-hebrew" is also identified as windows-1255. A text in x-mac-hebrew # that contain special punctuation marks or diacritics is displayed with # some unconverted characters showing as question marks. This problem might # be corrected using another model prober for x-mac-hebrew. Due to the fact # that x-mac-hebrew texts are so rare, writing another model prober isn't # worth the effort and performance hit. # #### The Prober #### # # The prober is divided between two SBCharSetProbers and a HebrewProber, # all of which are managed, created, fed data, inquired and deleted by the # SBCSGroupProber. The two SBCharSetProbers identify that the text is in # fact some kind of Hebrew, Logical or Visual. The final decision about which # one is it is made by the HebrewProber by combining final-letter scores # with the scores of the two SBCharSetProbers to produce a final answer. # # The SBCSGroupProber is responsible for stripping the original text of HTML # tags, English characters, numbers, low-ASCII punctuation characters, spaces # and new lines. It reduces any sequence of such characters to a single space. # The buffer fed to each prober in the SBCS group prober is pure text in # high-ASCII. # The two SBCharSetProbers (model probers) share the same language model: # Win1255Model. # The first SBCharSetProber uses the model normally as any other # SBCharSetProber does, to recognize windows-1255, upon which this model was # built. The second SBCharSetProber is told to make the pair-of-letter # lookup in the language model backwards. This in practice exactly simulates # a visual Hebrew model using the windows-1255 logical Hebrew model. # # The HebrewProber is not using any language model. All it does is look for # final-letter evidence suggesting the text is either logical Hebrew or visual # Hebrew. Disjointed from the model probers, the results of the HebrewProber # alone are meaningless. HebrewProber always returns 0.00 as confidence # since it never identifies a charset by itself. Instead, the pointer to the # HebrewProber is passed to the model probers as a helper "Name Prober". # When the Group prober receives a positive identification from any prober, # it asks for the name of the charset identified. If the prober queried is a # Hebrew model prober, the model prober forwards the call to the # HebrewProber to make the final decision. In the HebrewProber, the # decision is made according to the final-letters scores maintained and Both # model probers scores. The answer is returned in the form of the name of the # charset identified, either "windows-1255" or "ISO-8859-8". # windows-1255 / ISO-8859-8 code points of interest FINAL_KAF = '\xea' NORMAL_KAF = '\xeb' FINAL_MEM = '\xed' NORMAL_MEM = '\xee' FINAL_NUN = '\xef' NORMAL_NUN = '\xf0' FINAL_PE = '\xf3' NORMAL_PE = '\xf4' FINAL_TSADI = '\xf5' NORMAL_TSADI = '\xf6' # Minimum Visual vs Logical final letter score difference. # If the difference is below this, don't rely solely on the final letter score distance. MIN_FINAL_CHAR_DISTANCE = 5 # Minimum Visual vs Logical model score difference. # If the difference is below this, don't rely at all on the model score distance. MIN_MODEL_DISTANCE = 0.01 VISUAL_HEBREW_NAME = "ISO-8859-8" LOGICAL_HEBREW_NAME = "windows-1255" class HebrewProber(CharSetProber): def __init__(self): CharSetProber.__init__(self) self._mLogicalProber = None self._mVisualProber = None self.reset() def reset(self): self._mFinalCharLogicalScore = 0 self._mFinalCharVisualScore = 0 # The two last characters seen in the previous buffer, # mPrev and mBeforePrev are initialized to space in order to simulate a word # delimiter at the beginning of the data self._mPrev = ' ' self._mBeforePrev = ' ' # These probers are owned by the group prober. def set_model_probers(self, logicalProber, visualProber): self._mLogicalProber = logicalProber self._mVisualProber = visualProber def is_final(self, c): return c in [FINAL_KAF, FINAL_MEM, FINAL_NUN, FINAL_PE, FINAL_TSADI] def is_non_final(self, c): # The normal Tsadi is not a good Non-Final letter due to words like # 'lechotet' (to chat) containing an apostrophe after the tsadi. This # apostrophe is converted to a space in FilterWithoutEnglishLetters causing # the Non-Final tsadi to appear at an end of a word even though this is not # the case in the original text. # The letters Pe and Kaf rarely display a related behavior of not being a # good Non-Final letter. Words like 'Pop', 'Winamp' and 'Mubarak' for # example legally end with a Non-Final Pe or Kaf. However, the benefit of # these letters as Non-Final letters outweighs the damage since these words # are quite rare. return c in [NORMAL_KAF, NORMAL_MEM, NORMAL_NUN, NORMAL_PE] def feed(self, aBuf): # Final letter analysis for logical-visual decision. # Look for evidence that the received buffer is either logical Hebrew or # visual Hebrew. # The following cases are checked: # 1) A word longer than 1 letter, ending with a final letter. This is an # indication that the text is laid out "naturally" since the final letter # really appears at the end. +1 for logical score. # 2) A word longer than 1 letter, ending with a Non-Final letter. In normal # Hebrew, words ending with Kaf, Mem, Nun, Pe or Tsadi, should not end with # the Non-Final form of that letter. Exceptions to this rule are mentioned # above in isNonFinal(). This is an indication that the text is laid out # backwards. +1 for visual score # 3) A word longer than 1 letter, starting with a final letter. Final letters # should not appear at the beginning of a word. This is an indication that # the text is laid out backwards. +1 for visual score. # # The visual score and logical score are accumulated throughout the text and # are finally checked against each other in GetCharSetName(). # No checking for final letters in the middle of words is done since that case # is not an indication for either Logical or Visual text. # # We automatically filter out all 7-bit characters (replace them with spaces) # so the word boundary detection works properly. [MAP] if self.get_state() == constants.eNotMe: # Both model probers say it's not them. No reason to continue. return constants.eNotMe aBuf = self.filter_high_bit_only(aBuf) for cur in aBuf: if cur == ' ': # We stand on a space - a word just ended if self._mBeforePrev != ' ': # next-to-last char was not a space so self._mPrev is not a 1 letter word if self.is_final(self._mPrev): # case (1) [-2:not space][-1:final letter][cur:space] self._mFinalCharLogicalScore += 1 elif self.is_non_final(self._mPrev): # case (2) [-2:not space][-1:Non-Final letter][cur:space] self._mFinalCharVisualScore += 1 else: # Not standing on a space if (self._mBeforePrev == ' ') and (self.is_final(self._mPrev)) and (cur != ' '): # case (3) [-2:space][-1:final letter][cur:not space] self._mFinalCharVisualScore += 1 self._mBeforePrev = self._mPrev self._mPrev = cur # Forever detecting, till the end or until both model probers return eNotMe (handled above) return constants.eDetecting def get_charset_name(self): # Make the decision: is it Logical or Visual? # If the final letter score distance is dominant enough, rely on it. finalsub = self._mFinalCharLogicalScore - self._mFinalCharVisualScore if finalsub >= MIN_FINAL_CHAR_DISTANCE: return LOGICAL_HEBREW_NAME if finalsub <= -MIN_FINAL_CHAR_DISTANCE: return VISUAL_HEBREW_NAME # It's not dominant enough, try to rely on the model scores instead. modelsub = self._mLogicalProber.get_confidence() - self._mVisualProber.get_confidence() if modelsub > MIN_MODEL_DISTANCE: return LOGICAL_HEBREW_NAME if modelsub < -MIN_MODEL_DISTANCE: return VISUAL_HEBREW_NAME # Still no good, back to final letter distance, maybe it'll save the day. if finalsub < 0.0: return VISUAL_HEBREW_NAME # (finalsub > 0 - Logical) or (don't know what to do) default to Logical. return LOGICAL_HEBREW_NAME def get_state(self): # Remain active as long as any of the model probers are active. if (self._mLogicalProber.get_state() == constants.eNotMe) and \ (self._mVisualProber.get_state() == constants.eNotMe): return constants.eNotMe return constants.eDetecting
mit
curtisstpierre/django
django/contrib/gis/geos/collections.py
292
4986
""" This module houses the Geometry Collection objects: GeometryCollection, MultiPoint, MultiLineString, and MultiPolygon """ import json from ctypes import byref, c_int, c_uint from django.contrib.gis.geos import prototypes as capi from django.contrib.gis.geos.geometry import ( GEOSGeometry, ProjectInterpolateMixin, ) from django.contrib.gis.geos.libgeos import get_pointer_arr from django.contrib.gis.geos.linestring import LinearRing, LineString from django.contrib.gis.geos.point import Point from django.contrib.gis.geos.polygon import Polygon from django.utils.six.moves import range class GeometryCollection(GEOSGeometry): _typeid = 7 def __init__(self, *args, **kwargs): "Initializes a Geometry Collection from a sequence of Geometry objects." # Checking the arguments if not args: raise TypeError('Must provide at least one Geometry to initialize %s.' % self.__class__.__name__) if len(args) == 1: # If only one geometry provided or a list of geometries is provided # in the first argument. if isinstance(args[0], (tuple, list)): init_geoms = args[0] else: init_geoms = args else: init_geoms = args # Ensuring that only the permitted geometries are allowed in this collection # this is moved to list mixin super class self._check_allowed(init_geoms) # Creating the geometry pointer array. collection = self._create_collection(len(init_geoms), iter(init_geoms)) super(GeometryCollection, self).__init__(collection, **kwargs) def __iter__(self): "Iterates over each Geometry in the Collection." for i in range(len(self)): yield self[i] def __len__(self): "Returns the number of geometries in this Collection." return self.num_geom # ### Methods for compatibility with ListMixin ### def _create_collection(self, length, items): # Creating the geometry pointer array. geoms = get_pointer_arr(length) for i, g in enumerate(items): # this is a little sloppy, but makes life easier # allow GEOSGeometry types (python wrappers) or pointer types geoms[i] = capi.geom_clone(getattr(g, 'ptr', g)) return capi.create_collection(c_int(self._typeid), byref(geoms), c_uint(length)) def _get_single_internal(self, index): return capi.get_geomn(self.ptr, index) def _get_single_external(self, index): "Returns the Geometry from this Collection at the given index (0-based)." # Checking the index and returning the corresponding GEOS geometry. return GEOSGeometry(capi.geom_clone(self._get_single_internal(index)), srid=self.srid) def _set_list(self, length, items): "Create a new collection, and destroy the contents of the previous pointer." prev_ptr = self.ptr srid = self.srid self.ptr = self._create_collection(length, items) if srid: self.srid = srid capi.destroy_geom(prev_ptr) _set_single = GEOSGeometry._set_single_rebuild _assign_extended_slice = GEOSGeometry._assign_extended_slice_rebuild @property def json(self): if self.__class__.__name__ == 'GeometryCollection': return json.dumps({ 'type': self.__class__.__name__, 'geometries': [ {'type': geom.__class__.__name__, 'coordinates': geom.coords} for geom in self ], }) return super(GeometryCollection, self).json geojson = json @property def kml(self): "Returns the KML for this Geometry Collection." return '<MultiGeometry>%s</MultiGeometry>' % ''.join(g.kml for g in self) @property def tuple(self): "Returns a tuple of all the coordinates in this Geometry Collection" return tuple(g.tuple for g in self) coords = tuple # MultiPoint, MultiLineString, and MultiPolygon class definitions. class MultiPoint(GeometryCollection): _allowed = Point _typeid = 4 class MultiLineString(ProjectInterpolateMixin, GeometryCollection): _allowed = (LineString, LinearRing) _typeid = 5 @property def merged(self): """ Returns a LineString representing the line merge of this MultiLineString. """ return self._topology(capi.geos_linemerge(self.ptr)) class MultiPolygon(GeometryCollection): _allowed = Polygon _typeid = 6 @property def cascaded_union(self): "Returns a cascaded union of this MultiPolygon." return GEOSGeometry(capi.geos_cascaded_union(self.ptr), self.srid) # Setting the allowed types here since GeometryCollection is defined before # its subclasses. GeometryCollection._allowed = (Point, LineString, LinearRing, Polygon, MultiPoint, MultiLineString, MultiPolygon)
bsd-3-clause
TRESCLOUD/odoopub
openerp/addons/base/__openerp__.py
336
3703
# -*- coding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Management Solution # Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>). # Copyright (C) 2010-2012 OpenERP s.a. (<http://openerp.com>). # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## { 'name': 'Base', 'version': '1.3', 'category': 'Hidden', 'description': """ The kernel of OpenERP, needed for all installation. =================================================== """, 'author': 'OpenERP SA', 'maintainer': 'OpenERP SA', 'website': 'http://www.openerp.com', 'depends': [], 'data': [ 'base_data.xml', 'res/res_currency_data.xml', 'res/res_country_data.xml', 'security/base_security.xml', 'base_menu.xml', 'res/res_config.xml', 'res/res.country.state.csv', 'ir/ir_actions.xml', 'ir/ir_config_parameter_view.xml', 'ir/ir_cron_view.xml', 'ir/ir_filters.xml', 'ir/ir_mail_server_view.xml', 'ir/ir_model_view.xml', 'ir/ir_attachment_view.xml', 'ir/ir_rule_view.xml', 'ir/ir_sequence_view.xml', 'ir/ir_translation_view.xml', 'ir/ir_ui_menu_view.xml', 'ir/ir_ui_view_view.xml', 'ir/ir_values_view.xml', 'ir/osv_memory_autovacuum.xml', 'ir/ir_model_report.xml', 'ir/ir_logging_view.xml', 'ir/ir_qweb.xml', 'workflow/workflow_view.xml', 'module/module_view.xml', 'module/module_data.xml', 'module/module_report.xml', 'module/wizard/base_module_update_view.xml', 'module/wizard/base_language_install_view.xml', 'module/wizard/base_import_language_view.xml', 'module/wizard/base_module_upgrade_view.xml', 'module/wizard/base_module_configuration_view.xml', 'module/wizard/base_export_language_view.xml', 'module/wizard/base_update_translations_view.xml', 'module/wizard/base_module_immediate_install.xml', 'res/res_company_view.xml', 'res/res_request_view.xml', 'res/res_lang_view.xml', 'res/res_partner_report.xml', 'res/res_partner_view.xml', 'res/res_bank_view.xml', 'res/res_country_view.xml', 'res/res_currency_view.xml', 'res/res_users_view.xml', 'res/res_partner_data.xml', 'res/ir_property_view.xml', 'res/res_security.xml', 'security/ir.model.access.csv', ], 'demo': [ 'base_demo.xml', 'res/res_partner_demo.xml', 'res/res_partner_demo.yml', 'res/res_partner_image_demo.xml', ], 'test': [ 'tests/base_test.yml', 'tests/test_osv_expression.yml', 'tests/test_ir_rule.yml', # <-- These tests modify/add/delete ir_rules. ], 'installable': True, 'auto_install': True, } # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
agpl-3.0
CanalTP/fabric_navitia
integration_tests/test_kraken/test_distributed.py
2
8607
# encoding: utf-8 import time from ..test_common import skipifdev from ..test_common.test_kraken import (_test_stop_restart_kraken, _test_stop_start_apache, _test_test_kraken_nowait_nofail, ) from ..utils import get_running_krakens SHOW_CALL_TRACKER_DATA = False instances_names = {'us-wa', 'fr-nw', 'fr-npdc', 'fr-ne-amiens', 'fr-idf', 'fr-cen'} nominal_krakens = {'host1': {'us-wa', 'fr-nw', 'fr-npdc'}, 'host2': {'fr-ne-amiens', 'fr-idf', 'fr-cen'}} krakens_after_stop = {'host1': {'fr-nw', 'fr-npdc'}, 'host2': {'fr-idf', 'fr-cen'}} # @skipifdev def test_kraken_setup(distributed): platform, fabric = distributed assert platform.path_exists('/var/log/kraken') for krak in nominal_krakens['host1']: assert platform.path_exists('/etc/init.d/kraken_{}'.format(krak), 'host1') assert platform.path_exists('/etc/init.d/kraken_{}'.format(krak), 'host2', negate=True) assert platform.path_exists('/etc/jormungandr.d/{}.json'.format(krak), 'host1') assert platform.path_exists('/etc/jormungandr.d/{}.json'.format(krak), 'host2', negate=True) assert platform.path_exists('/srv/kraken/{}/kraken.ini'.format(krak), 'host1') assert platform.path_exists('/srv/kraken/{}/kraken.ini'.format(krak), 'host2', negate=True) assert platform.path_exists('/var/log/kraken/{}.log'.format(krak), 'host1') assert platform.path_exists('/var/log/kraken/{}.log'.format(krak), 'host2', negate=True) for krak in nominal_krakens['host2']: assert platform.path_exists('/etc/init.d/kraken_{}'.format(krak), 'host2') assert platform.path_exists('/etc/init.d/kraken_{}'.format(krak), 'host1', negate=True) assert platform.path_exists('/etc/jormungandr.d/{}.json'.format(krak), 'host1') assert platform.path_exists('/etc/jormungandr.d/{}.json'.format(krak), 'host2', negate=True) assert platform.path_exists('/srv/kraken/{}/kraken.ini'.format(krak), 'host2') assert platform.path_exists('/srv/kraken/{}/kraken.ini'.format(krak), 'host1', negate=True) assert platform.path_exists('/var/log/kraken/{}.log'.format(krak), 'host2') assert platform.path_exists('/var/log/kraken/{}.log'.format(krak), 'host1', negate=True) @skipifdev def test_stop_restart_single_kraken(distributed): _test_stop_restart_kraken(distributed, map_start=nominal_krakens, map_stop=krakens_after_stop, stop_pat=('stop_kraken', ('us-wa', 'fr-ne-amiens')), start_pat=('component.kraken.restart_kraken', ('us-wa', 'fr-ne-amiens'), dict(test=False)) ) @skipifdev def test_restart_all_krakens(distributed): _test_stop_restart_kraken(distributed, map_start=nominal_krakens, map_stop=krakens_after_stop, stop_pat=('stop_kraken', ('us-wa', 'fr-ne-amiens')), start_pat=('restart_all_krakens', (), dict(wait=False)) ) @skipifdev def test_stop_require_start_kraken(distributed): _test_stop_restart_kraken(distributed, map_start=nominal_krakens, map_stop=krakens_after_stop, stop_pat=('stop_kraken', ('us-wa', 'fr-ne-amiens')), start_pat=('require_kraken_started', ('us-wa', 'fr-ne-amiens'), {}), ) @skipifdev def test_require_all_krakens_started(distributed): _test_stop_restart_kraken(distributed, map_start=nominal_krakens, map_stop=krakens_after_stop, stop_pat=('stop_kraken', ('us-wa', 'fr-ne-amiens')), start_pat=('require_all_krakens_started', (), {}), ) @skipifdev def test_stop_start_apache(distributed): time.sleep(2) _, fabric = distributed with fabric.set_call_tracker('component.kraken.require_kraken_started') as data: _test_stop_start_apache(distributed, ('host1', 'host2')) # task require_kraken_started is called for each instance assert set((x[0][0].name for x in data()['require_kraken_started'])) == instances_names @skipifdev def test_test_kraken_nowait_nofail(distributed, capsys): # wait for krakens to be fully started time.sleep(15) _test_test_kraken_nowait_nofail(distributed, capsys, map={'host1': {'us-wa'}, 'host2': {'fr-ne-amiens'}}, ret_val=False) # TODO https://ci.navitia.io/job/deploy_navitia_on_internal/35/console @skipifdev def test_get_no_data_instances(distributed, capsys): platform, fabric = distributed time.sleep(2) fabric.execute('component.kraken.get_no_data_instances') stdout, stderr = capsys.readouterr() assert stdout.count('NOTICE: ') == len(fabric.env.instances) for instance in fabric.env.instances: assert "NOTICE: no data for {}, append it to exclude list".format(instance) in stdout assert set(fabric.env.excluded_instances) == set(fabric.env.instances) @skipifdev def test_test_all_krakens_no_wait(distributed): platform, fabric = distributed # wait for krakens to be fully started time.sleep(15) value, exception, stdout, stderr = fabric.execute_forked('test_all_krakens') assert stdout.count('WARNING: ') == len(fabric.env.instances) for instance in fabric.env.instances: assert stdout.count("WARNING: instance {} has no loaded data".format(instance)) == 1 @skipifdev def test_check_dead_instances(distributed): platform, fabric = distributed value, exception, stdout, stderr = fabric.execute_forked('component.kraken.check_dead_instances') assert value is None assert isinstance(exception, SystemExit) assert 'The threshold of allowed dead instances is exceeded: ' \ 'Found 6 dead instances out of 6.' in stdout @skipifdev def test_create_remove_eng_instance(distributed): platform, fabric = distributed fabric.get_object('instance.add_instance')('toto', 'passwd', zmq_socket_port=30004, zmq_server=fabric.env.host1_ip) with fabric.set_call_tracker('component.kraken.update_eng_instance_conf') as data: value, exception, stdout, stderr = fabric.execute_forked('create_eng_instance', 'toto') if SHOW_CALL_TRACKER_DATA: from pprint import pprint pprint(dict(data())) # there is only one call to update_eng_instance_conf assert len(data()['update_eng_instance_conf']) == 1 host_string = 'root@{}'.format(platform.get_hosts()['host1']) # first parameter is the newly created instance assert data()['update_eng_instance_conf'][0][0][0].name == 'toto' # second parameter is the host string assert data()['update_eng_instance_conf'][0][0][1] == host_string # host string is also set assert data()['update_eng_instance_conf'][0][-1] == host_string time.sleep(2) assert 'INFO: kraken toto instance is starting on {}'.format(platform.get_hosts()['host1']) in stdout assert 'INFO: kraken toto instance is running on {}'.format(platform.get_hosts()['host1']) in stdout assert platform.path_exists('/srv/kraken/toto/kraken.ini', 'host1') assert platform.path_exists('/etc/init.d//kraken_toto', 'host1') assert platform.path_exists('/var/log/kraken/toto.log', 'host1') assert platform.path_exists('/srv/kraken/toto/kraken.ini', 'host2', negate=True) assert platform.path_exists('/etc/init.d//kraken_toto', 'host2', negate=True) assert platform.path_exists('/var/log/kraken/toto.log', 'host2', negate=True) # check that new kraken is running on host1 but not host2 assert set(get_running_krakens(platform, 'host1')) == {'toto'} | nominal_krakens['host1'] assert set(get_running_krakens(platform, 'host2')) == nominal_krakens['host2'] fabric.execute('remove_kraken_instance', 'toto', purge_logs=True) assert platform.path_exists('/srv/kraken/toto/kraken.ini', 'host1', negate=True) assert platform.path_exists('/etc/init.d//kraken_toto', 'host1', negate=True) assert platform.path_exists('/var/log/kraken/toto.log', 'host1', negate=True) assert set(get_running_krakens(platform, 'host1')) == nominal_krakens['host1'] assert set(get_running_krakens(platform, 'host2')) == nominal_krakens['host2']
agpl-3.0
vegetableman/phantomjs
src/qt/qtwebkit/Tools/QueueStatusServer/config/charts.py
122
2458
# Copyright (C) 2013 Google Inc. All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following disclaimer # in the documentation and/or other materials provided with the # distribution. # * Neither the name of Google Inc. nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. patch_log_limit = 500 # All units are represented numerically as seconds. one_minute = 60.0 one_hour = one_minute * 60.0 one_day = one_hour * 24.0 one_month = one_day * 30.0 # How far back to view the history, specified in seconds. view_range_choices = [ {"name": "1 day", "view_range": one_day}, {"name": "1 week", "view_range": one_day * 7}, {"name": "1 month", "view_range": one_month}, ] default_view_range = one_day _time_units = [ #(threshold, time unit, name) (0, one_hour, "hours"), (4 * one_day, one_day, "days"), (3 * one_month, one_month, "months"), ] def get_time_unit(view_range): current_threshold, current_time_unit, current_name = _time_units[0] for threshold, time_unit, name in _time_units[1:]: if view_range >= threshold: current_time_unit, current_name = time_unit, name else: break return current_time_unit, current_name
bsd-3-clause
dpmatthews/cylc
lib/parsec/tests/test_parsec.py
2
1416
#!/usr/bin/env python3 # THIS FILE IS PART OF THE CYLC SUITE ENGINE. # Copyright (C) 2008-2019 NIWA & British Crown (Met Office) & Contributors. # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. import unittest from parsec.exceptions import ParsecError class TestParsec(unittest.TestCase): def test_parsec_error_msg(self): parsec_error = ParsecError() self.assertEqual('', str(parsec_error)) parsec_error = ParsecError('foo') self.assertEqual('foo', str(parsec_error)) parsec_error = ParsecError('foo', 'bar', 'baz') self.assertEqual('foo bar baz', str(parsec_error)) def test_parsec_error_str(self): msg = 'Turbulence!' parsec_error = ParsecError(msg) self.assertEqual(msg, str(parsec_error)) if __name__ == '__main__': unittest.main()
gpl-3.0
southpawtech/TACTIC-DEV
src/pyasm/biz/hierarchical_status_attr.py
6
3607
########################################################### # # Copyright (c) 2005, Southpaw Technology # All Rights Reserved # # PROPRIETARY INFORMATION. This software is proprietary to # Southpaw Technology, and is not to be reproduced, transmitted, # or disclosed in any way without written permission. # # # __all__ = ["HierarchicalStatusAttr"] import string, types from pyasm.common import * from pyasm.search import SObjectAttr from pipeline import Pipeline from status_attr import * class HierarchicalStatusAttr(SObjectAttr): '''A more complex attribute that uses any number of pipelines''' def __init__(my, name, sobject): SObjectAttr.__init__(my, name, sobject) def init(my): my.pipelines = [] pipeline_types = my.get_option("pipeline").split(",") for pipeline_type in pipeline_types: pipeline = Pipeline.get_by_name(pipeline_type) if pipeline == None: raise SetupException("Pipeline '%s' does not exist" % \ pipeline_type ) my.pipelines.append(pipeline) my.level = 1 def set_level(my, level): my.level = level def _get_seq_values(my): num_pipelines = len(my.pipelines) value = my.sobject.get_value(my.name) values = value.split(",") for i in range(len(values), num_pipelines): values.append("") return values def get_value(my): values = my._get_seq_values() return values[my.level] def set_value(my, value): values = my._get_seq_values() # when a child level is done, the parent level is pushed up one. processes = ['roughDesign','finalColor','colorKey','flashReady'] if my.level != 0: if value == "publish": values[my.level-1] = processes[1] value = "artist" # when a child level is at the first level, the parent is # pushed down elif value == "artist": values[my.level-1] = processes[0] value = "check" values[my.level] = value current_value = ",".join(values) my.sobject.set_value(my.name, current_value) def get_web_display(my): values = my._get_seq_values() return values def get_pipeline(my): return my.pipelines[my.level] def set_status(my, status): my.set_value(status) def get_current_process(my): '''The current process is the process that we are currently at depending what level we are looking at''' pipeline = my.get_pipeline() if my.get_value() == "": return pipeline.get_processes()[0] else: return pipeline.get_process(my.get_value() ) def get_completion(my): '''finds the completion of this status. Returns a number from 0 to 1''' context = my.get_current_process() completion = context.get_completion() if completion != "": return float(completion)/100 # calculate the completion of the asset if there are no percentages pipeline = my.get_pipeline() processes = pipeline.get_processes() if len(processes) == 1: return 1 percent = 0.0 for process in processes: if context == process: break percent += 1.0 percent = percent/(len(processes)-1) return percent def get_percent_completion(my): return int(my.get_completion() * 100)
epl-1.0
dnozay/lettuce
tests/integration/lib/Django-1.2.5/django/utils/timesince.py
319
2698
import datetime import time from django.utils.tzinfo import LocalTimezone from django.utils.translation import ungettext, ugettext def timesince(d, now=None): """ Takes two datetime objects and returns the time between d and now as a nicely formatted string, e.g. "10 minutes". If d occurs after now, then "0 minutes" is returned. Units used are years, months, weeks, days, hours, and minutes. Seconds and microseconds are ignored. Up to two adjacent units will be displayed. For example, "2 weeks, 3 days" and "1 year, 3 months" are possible outputs, but "2 weeks, 3 hours" and "1 year, 5 days" are not. Adapted from http://blog.natbat.co.uk/archive/2003/Jun/14/time_since """ chunks = ( (60 * 60 * 24 * 365, lambda n: ungettext('year', 'years', n)), (60 * 60 * 24 * 30, lambda n: ungettext('month', 'months', n)), (60 * 60 * 24 * 7, lambda n : ungettext('week', 'weeks', n)), (60 * 60 * 24, lambda n : ungettext('day', 'days', n)), (60 * 60, lambda n: ungettext('hour', 'hours', n)), (60, lambda n: ungettext('minute', 'minutes', n)) ) # Convert datetime.date to datetime.datetime for comparison. if not isinstance(d, datetime.datetime): d = datetime.datetime(d.year, d.month, d.day) if now and not isinstance(now, datetime.datetime): now = datetime.datetime(now.year, now.month, now.day) if not now: if d.tzinfo: now = datetime.datetime.now(LocalTimezone(d)) else: now = datetime.datetime.now() # ignore microsecond part of 'd' since we removed it from 'now' delta = now - (d - datetime.timedelta(0, 0, d.microsecond)) since = delta.days * 24 * 60 * 60 + delta.seconds if since <= 0: # d is in the future compared to now, stop processing. return u'0 ' + ugettext('minutes') for i, (seconds, name) in enumerate(chunks): count = since // seconds if count != 0: break s = ugettext('%(number)d %(type)s') % {'number': count, 'type': name(count)} if i + 1 < len(chunks): # Now get the second item seconds2, name2 = chunks[i + 1] count2 = (since - (seconds * count)) // seconds2 if count2 != 0: s += ugettext(', %(number)d %(type)s') % {'number': count2, 'type': name2(count2)} return s def timeuntil(d, now=None): """ Like timesince, but returns a string measuring the time until the given time. """ if not now: if getattr(d, 'tzinfo', None): now = datetime.datetime.now(LocalTimezone(d)) else: now = datetime.datetime.now() return timesince(now, d)
gpl-3.0
jackru/pybrain
pybrain/rl/environments/mazes/tasks/mdp.py
31
1145
__author__ = 'Thomas Rueckstiess, ruecksti@in.tum.de' from pybrain.rl.environments import Task from scipy import array class MDPMazeTask(Task): """ This is a MDP task for the MazeEnvironment. The state is fully observable, giving the agent the current position of perseus. Reward is given on reaching the goal, otherwise no reward. """ def getReward(self): """ compute and return the current reward (i.e. corresponding to the last action performed) """ if self.env.goal == self.env.perseus: self.env.reset() reward = 1. else: reward = 0. return reward def performAction(self, action): """ The action vector is stripped and the only element is cast to integer and given to the super class. """ Task.performAction(self, int(action[0])) def getObservation(self): """ The agent receives its position in the maze, to make this a fully observable MDP problem. """ obs = array([self.env.perseus[0] * self.env.mazeTable.shape[0] + self.env.perseus[1]]) return obs
bsd-3-clause
TravisCG/SI_scripts
vcfrevange.py
1
1407
#!/usr/bin/python # This script is a very complicated one, because I need # to use another script which is not suitable for a task # but there is no better solution. # # So this hack helps me to run another hack. import sys import os table = open(sys.argv[1]) table.readline() for i in table: fields = i.rstrip().split("\t") samplename = fields[1] normalname = fields[11].replace(".bam", "") os.symlink(fields[5], "runstuff3/"+samplename+".cave.annot.vcf.gz") os.symlink(fields[6], "runstuff3/"+normalname+".bam") # Exome bam os.symlink(fields[7], "runstuff3/"+normalname+".bam.bai") # Exome bam index file os.symlink(fields[9], "runstuff3/"+samplename+".bam") # RNA-seq os.symlink(fields[10],"runstuff3/"+samplename+".bam.bai") # RNA-seq index file ini = open("runstuff3/simple.ini", "w") ini.write("[111]\n") ini.write(samplename + "=" + samplename + "\n\n") ini.write("[genome_build]\ngenome=human\nbuild=37\n") ini.close() os.system("/software/CGP/projects/vcfCommons/perl/bin/mergeAndPileup.pl -i runstuff3/simple.ini -d runstuff3 -a snp -s 1 -o output3 -f cave_java") os.remove("runstuff3/"+samplename+".cave.annot.vcf.gz") os.remove("runstuff3/"+normalname+".bam") os.remove("runstuff3/"+normalname+".bam.bai") os.remove("runstuff3/"+samplename+".bam") os.remove("runstuff3/"+samplename+".bam.bai") table.close()
gpl-3.0
b1-systems/kiwi
test/unit/storage_subformat_vdi_test.py
2
1223
from mock import patch import mock from kiwi.storage.subformat.vdi import DiskFormatVdi class TestDiskFormatVdi(object): @patch('platform.machine') def setup(self, mock_machine): mock_machine.return_value = 'x86_64' xml_data = mock.Mock() xml_data.get_name = mock.Mock( return_value='some-disk-image' ) self.xml_state = mock.Mock() self.xml_state.xml_data = xml_data self.xml_state.get_image_version = mock.Mock( return_value='1.2.3' ) self.disk_format = DiskFormatVdi( self.xml_state, 'root_dir', 'target_dir' ) def test_post_init(self): self.disk_format.post_init({'option': 'value'}) assert self.disk_format.options == ['-o', 'option=value'] @patch('kiwi.storage.subformat.vdi.Command.run') def test_create_image_format(self, mock_command): self.disk_format.create_image_format() mock_command.assert_called_once_with( [ 'qemu-img', 'convert', '-f', 'raw', 'target_dir/some-disk-image.x86_64-1.2.3.raw', '-O', 'vdi', 'target_dir/some-disk-image.x86_64-1.2.3.vdi' ] )
gpl-3.0
blueyed/pip
pip/_vendor/requests/packages/urllib3/poolmanager.py
550
8977
# urllib3/poolmanager.py # Copyright 2008-2014 Andrey Petrov and contributors (see CONTRIBUTORS.txt) # # This module is part of urllib3 and is released under # the MIT License: http://www.opensource.org/licenses/mit-license.php import logging try: # Python 3 from urllib.parse import urljoin except ImportError: from urlparse import urljoin from ._collections import RecentlyUsedContainer from .connectionpool import HTTPConnectionPool, HTTPSConnectionPool from .connectionpool import port_by_scheme from .request import RequestMethods from .util import parse_url __all__ = ['PoolManager', 'ProxyManager', 'proxy_from_url'] pool_classes_by_scheme = { 'http': HTTPConnectionPool, 'https': HTTPSConnectionPool, } log = logging.getLogger(__name__) SSL_KEYWORDS = ('key_file', 'cert_file', 'cert_reqs', 'ca_certs', 'ssl_version') class PoolManager(RequestMethods): """ Allows for arbitrary requests while transparently keeping track of necessary connection pools for you. :param num_pools: Number of connection pools to cache before discarding the least recently used pool. :param headers: Headers to include with all requests, unless other headers are given explicitly. :param \**connection_pool_kw: Additional parameters are used to create fresh :class:`urllib3.connectionpool.ConnectionPool` instances. Example: :: >>> manager = PoolManager(num_pools=2) >>> r = manager.request('GET', 'http://google.com/') >>> r = manager.request('GET', 'http://google.com/mail') >>> r = manager.request('GET', 'http://yahoo.com/') >>> len(manager.pools) 2 """ proxy = None def __init__(self, num_pools=10, headers=None, **connection_pool_kw): RequestMethods.__init__(self, headers) self.connection_pool_kw = connection_pool_kw self.pools = RecentlyUsedContainer(num_pools, dispose_func=lambda p: p.close()) def _new_pool(self, scheme, host, port): """ Create a new :class:`ConnectionPool` based on host, port and scheme. This method is used to actually create the connection pools handed out by :meth:`connection_from_url` and companion methods. It is intended to be overridden for customization. """ pool_cls = pool_classes_by_scheme[scheme] kwargs = self.connection_pool_kw if scheme == 'http': kwargs = self.connection_pool_kw.copy() for kw in SSL_KEYWORDS: kwargs.pop(kw, None) return pool_cls(host, port, **kwargs) def clear(self): """ Empty our store of pools and direct them all to close. This will not affect in-flight connections, but they will not be re-used after completion. """ self.pools.clear() def connection_from_host(self, host, port=None, scheme='http'): """ Get a :class:`ConnectionPool` based on the host, port, and scheme. If ``port`` isn't given, it will be derived from the ``scheme`` using ``urllib3.connectionpool.port_by_scheme``. """ scheme = scheme or 'http' port = port or port_by_scheme.get(scheme, 80) pool_key = (scheme, host, port) with self.pools.lock: # If the scheme, host, or port doesn't match existing open # connections, open a new ConnectionPool. pool = self.pools.get(pool_key) if pool: return pool # Make a fresh ConnectionPool of the desired type pool = self._new_pool(scheme, host, port) self.pools[pool_key] = pool return pool def connection_from_url(self, url): """ Similar to :func:`urllib3.connectionpool.connection_from_url` but doesn't pass any additional parameters to the :class:`urllib3.connectionpool.ConnectionPool` constructor. Additional parameters are taken from the :class:`.PoolManager` constructor. """ u = parse_url(url) return self.connection_from_host(u.host, port=u.port, scheme=u.scheme) def urlopen(self, method, url, redirect=True, **kw): """ Same as :meth:`urllib3.connectionpool.HTTPConnectionPool.urlopen` with custom cross-host redirect logic and only sends the request-uri portion of the ``url``. The given ``url`` parameter must be absolute, such that an appropriate :class:`urllib3.connectionpool.ConnectionPool` can be chosen for it. """ u = parse_url(url) conn = self.connection_from_host(u.host, port=u.port, scheme=u.scheme) kw['assert_same_host'] = False kw['redirect'] = False if 'headers' not in kw: kw['headers'] = self.headers if self.proxy is not None and u.scheme == "http": response = conn.urlopen(method, url, **kw) else: response = conn.urlopen(method, u.request_uri, **kw) redirect_location = redirect and response.get_redirect_location() if not redirect_location: return response # Support relative URLs for redirecting. redirect_location = urljoin(url, redirect_location) # RFC 2616, Section 10.3.4 if response.status == 303: method = 'GET' log.info("Redirecting %s -> %s" % (url, redirect_location)) kw['retries'] = kw.get('retries', 3) - 1 # Persist retries countdown kw['redirect'] = redirect return self.urlopen(method, redirect_location, **kw) class ProxyManager(PoolManager): """ Behaves just like :class:`PoolManager`, but sends all requests through the defined proxy, using the CONNECT method for HTTPS URLs. :param proxy_url: The URL of the proxy to be used. :param proxy_headers: A dictionary contaning headers that will be sent to the proxy. In case of HTTP they are being sent with each request, while in the HTTPS/CONNECT case they are sent only once. Could be used for proxy authentication. Example: >>> proxy = urllib3.ProxyManager('http://localhost:3128/') >>> r1 = proxy.request('GET', 'http://google.com/') >>> r2 = proxy.request('GET', 'http://httpbin.org/') >>> len(proxy.pools) 1 >>> r3 = proxy.request('GET', 'https://httpbin.org/') >>> r4 = proxy.request('GET', 'https://twitter.com/') >>> len(proxy.pools) 3 """ def __init__(self, proxy_url, num_pools=10, headers=None, proxy_headers=None, **connection_pool_kw): if isinstance(proxy_url, HTTPConnectionPool): proxy_url = '%s://%s:%i' % (proxy_url.scheme, proxy_url.host, proxy_url.port) proxy = parse_url(proxy_url) if not proxy.port: port = port_by_scheme.get(proxy.scheme, 80) proxy = proxy._replace(port=port) self.proxy = proxy self.proxy_headers = proxy_headers or {} assert self.proxy.scheme in ("http", "https"), \ 'Not supported proxy scheme %s' % self.proxy.scheme connection_pool_kw['_proxy'] = self.proxy connection_pool_kw['_proxy_headers'] = self.proxy_headers super(ProxyManager, self).__init__( num_pools, headers, **connection_pool_kw) def connection_from_host(self, host, port=None, scheme='http'): if scheme == "https": return super(ProxyManager, self).connection_from_host( host, port, scheme) return super(ProxyManager, self).connection_from_host( self.proxy.host, self.proxy.port, self.proxy.scheme) def _set_proxy_headers(self, url, headers=None): """ Sets headers needed by proxies: specifically, the Accept and Host headers. Only sets headers not provided by the user. """ headers_ = {'Accept': '*/*'} netloc = parse_url(url).netloc if netloc: headers_['Host'] = netloc if headers: headers_.update(headers) return headers_ def urlopen(self, method, url, redirect=True, **kw): "Same as HTTP(S)ConnectionPool.urlopen, ``url`` must be absolute." u = parse_url(url) if u.scheme == "http": # For proxied HTTPS requests, httplib sets the necessary headers # on the CONNECT to the proxy. For HTTP, we'll definitely # need to set 'Host' at the very least. kw['headers'] = self._set_proxy_headers(url, kw.get('headers', self.headers)) return super(ProxyManager, self).urlopen(method, url, redirect, **kw) def proxy_from_url(url, **kw): return ProxyManager(proxy_url=url, **kw)
mit
thedrow/django
tests/select_related_onetoone/tests.py
301
10516
from __future__ import unicode_literals import unittest from django.core.exceptions import FieldError from django.test import SimpleTestCase, TestCase from .models import ( AdvancedUserStat, Child1, Child2, Child3, Child4, Image, Parent1, Parent2, Product, StatDetails, User, UserProfile, UserStat, UserStatResult, ) class ReverseSelectRelatedTestCase(TestCase): def setUp(self): user = User.objects.create(username="test") UserProfile.objects.create(user=user, state="KS", city="Lawrence") results = UserStatResult.objects.create(results='first results') userstat = UserStat.objects.create(user=user, posts=150, results=results) StatDetails.objects.create(base_stats=userstat, comments=259) user2 = User.objects.create(username="bob") results2 = UserStatResult.objects.create(results='moar results') advstat = AdvancedUserStat.objects.create(user=user2, posts=200, karma=5, results=results2) StatDetails.objects.create(base_stats=advstat, comments=250) p1 = Parent1(name1="Only Parent1") p1.save() c1 = Child1(name1="Child1 Parent1", name2="Child1 Parent2", value=1) c1.save() p2 = Parent2(name2="Child2 Parent2") p2.save() c2 = Child2(name1="Child2 Parent1", parent2=p2, value=2) c2.save() def test_basic(self): with self.assertNumQueries(1): u = User.objects.select_related("userprofile").get(username="test") self.assertEqual(u.userprofile.state, "KS") def test_follow_next_level(self): with self.assertNumQueries(1): u = User.objects.select_related("userstat__results").get(username="test") self.assertEqual(u.userstat.posts, 150) self.assertEqual(u.userstat.results.results, 'first results') def test_follow_two(self): with self.assertNumQueries(1): u = User.objects.select_related("userprofile", "userstat").get(username="test") self.assertEqual(u.userprofile.state, "KS") self.assertEqual(u.userstat.posts, 150) def test_follow_two_next_level(self): with self.assertNumQueries(1): u = User.objects.select_related("userstat__results", "userstat__statdetails").get(username="test") self.assertEqual(u.userstat.results.results, 'first results') self.assertEqual(u.userstat.statdetails.comments, 259) def test_forward_and_back(self): with self.assertNumQueries(1): stat = UserStat.objects.select_related("user__userprofile").get(user__username="test") self.assertEqual(stat.user.userprofile.state, 'KS') self.assertEqual(stat.user.userstat.posts, 150) def test_back_and_forward(self): with self.assertNumQueries(1): u = User.objects.select_related("userstat").get(username="test") self.assertEqual(u.userstat.user.username, 'test') def test_not_followed_by_default(self): with self.assertNumQueries(2): u = User.objects.select_related().get(username="test") self.assertEqual(u.userstat.posts, 150) def test_follow_from_child_class(self): with self.assertNumQueries(1): stat = AdvancedUserStat.objects.select_related('user', 'statdetails').get(posts=200) self.assertEqual(stat.statdetails.comments, 250) self.assertEqual(stat.user.username, 'bob') def test_follow_inheritance(self): with self.assertNumQueries(1): stat = UserStat.objects.select_related('user', 'advanceduserstat').get(posts=200) self.assertEqual(stat.advanceduserstat.posts, 200) self.assertEqual(stat.user.username, 'bob') with self.assertNumQueries(1): self.assertEqual(stat.advanceduserstat.user.username, 'bob') def test_nullable_relation(self): im = Image.objects.create(name="imag1") p1 = Product.objects.create(name="Django Plushie", image=im) p2 = Product.objects.create(name="Talking Django Plushie") with self.assertNumQueries(1): result = sorted(Product.objects.select_related("image"), key=lambda x: x.name) self.assertEqual([p.name for p in result], ["Django Plushie", "Talking Django Plushie"]) self.assertEqual(p1.image, im) # Check for ticket #13839 self.assertIsNone(p2.image) def test_missing_reverse(self): """ Ticket #13839: select_related() should NOT cache None for missing objects on a reverse 1-1 relation. """ with self.assertNumQueries(1): user = User.objects.select_related('userprofile').get(username='bob') with self.assertRaises(UserProfile.DoesNotExist): user.userprofile def test_nullable_missing_reverse(self): """ Ticket #13839: select_related() should NOT cache None for missing objects on a reverse 0-1 relation. """ Image.objects.create(name="imag1") with self.assertNumQueries(1): image = Image.objects.select_related('product').get() with self.assertRaises(Product.DoesNotExist): image.product def test_parent_only(self): with self.assertNumQueries(1): p = Parent1.objects.select_related('child1').get(name1="Only Parent1") with self.assertNumQueries(0): with self.assertRaises(Child1.DoesNotExist): p.child1 def test_multiple_subclass(self): with self.assertNumQueries(1): p = Parent1.objects.select_related('child1').get(name1="Child1 Parent1") self.assertEqual(p.child1.name2, 'Child1 Parent2') def test_onetoone_with_subclass(self): with self.assertNumQueries(1): p = Parent2.objects.select_related('child2').get(name2="Child2 Parent2") self.assertEqual(p.child2.name1, 'Child2 Parent1') def test_onetoone_with_two_subclasses(self): with self.assertNumQueries(1): p = Parent2.objects.select_related('child2', "child2__child3").get(name2="Child2 Parent2") self.assertEqual(p.child2.name1, 'Child2 Parent1') with self.assertRaises(Child3.DoesNotExist): p.child2.child3 p3 = Parent2(name2="Child3 Parent2") p3.save() c2 = Child3(name1="Child3 Parent1", parent2=p3, value=2, value3=3) c2.save() with self.assertNumQueries(1): p = Parent2.objects.select_related('child2', "child2__child3").get(name2="Child3 Parent2") self.assertEqual(p.child2.name1, 'Child3 Parent1') self.assertEqual(p.child2.child3.value3, 3) self.assertEqual(p.child2.child3.value, p.child2.value) self.assertEqual(p.child2.name1, p.child2.child3.name1) def test_multiinheritance_two_subclasses(self): with self.assertNumQueries(1): p = Parent1.objects.select_related('child1', 'child1__child4').get(name1="Child1 Parent1") self.assertEqual(p.child1.name2, 'Child1 Parent2') self.assertEqual(p.child1.name1, p.name1) with self.assertRaises(Child4.DoesNotExist): p.child1.child4 Child4(name1='n1', name2='n2', value=1, value4=4).save() with self.assertNumQueries(1): p = Parent2.objects.select_related('child1', 'child1__child4').get(name2="n2") self.assertEqual(p.name2, 'n2') self.assertEqual(p.child1.name1, 'n1') self.assertEqual(p.child1.name2, p.name2) self.assertEqual(p.child1.value, 1) self.assertEqual(p.child1.child4.name1, p.child1.name1) self.assertEqual(p.child1.child4.name2, p.child1.name2) self.assertEqual(p.child1.child4.value, p.child1.value) self.assertEqual(p.child1.child4.value4, 4) @unittest.expectedFailure def test_inheritance_deferred(self): c = Child4.objects.create(name1='n1', name2='n2', value=1, value4=4) with self.assertNumQueries(1): p = Parent2.objects.select_related('child1').only( 'id2', 'child1__value').get(name2="n2") self.assertEqual(p.id2, c.id2) self.assertEqual(p.child1.value, 1) p = Parent2.objects.select_related('child1').only( 'id2', 'child1__value').get(name2="n2") with self.assertNumQueries(1): self.assertEqual(p.name2, 'n2') p = Parent2.objects.select_related('child1').only( 'id2', 'child1__value').get(name2="n2") with self.assertNumQueries(1): self.assertEqual(p.child1.name2, 'n2') @unittest.expectedFailure def test_inheritance_deferred2(self): c = Child4.objects.create(name1='n1', name2='n2', value=1, value4=4) qs = Parent2.objects.select_related('child1', 'child4').only( 'id2', 'child1__value', 'child1__child4__value4') with self.assertNumQueries(1): p = qs.get(name2="n2") self.assertEqual(p.id2, c.id2) self.assertEqual(p.child1.value, 1) self.assertEqual(p.child1.child4.value4, 4) self.assertEqual(p.child1.child4.id2, c.id2) p = qs.get(name2="n2") with self.assertNumQueries(1): self.assertEqual(p.child1.name2, 'n2') p = qs.get(name2="n2") with self.assertNumQueries(1): self.assertEqual(p.child1.name1, 'n1') with self.assertNumQueries(1): self.assertEqual(p.child1.child4.name1, 'n1') class ReverseSelectRelatedValidationTests(SimpleTestCase): """ Rverse related fields should be listed in the validation message when an invalid field is given in select_related(). """ non_relational_error = "Non-relational field given in select_related: '%s'. Choices are: %s" invalid_error = "Invalid field name(s) given in select_related: '%s'. Choices are: %s" def test_reverse_related_validation(self): fields = 'userprofile, userstat' with self.assertRaisesMessage(FieldError, self.invalid_error % ('foobar', fields)): list(User.objects.select_related('foobar')) with self.assertRaisesMessage(FieldError, self.non_relational_error % ('username', fields)): list(User.objects.select_related('username'))
bsd-3-clause
leoliujie/odoo
addons/mail/ir_attachment.py
378
5643
# -*- coding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Management Solution # Copyright (C) 2014-TODAY OpenERP SA (http://www.openerp.com) # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## from openerp.osv import fields, osv import os.path class IrAttachment(osv.Model): """ Update partner to add a field about notification preferences """ _name = "ir.attachment" _inherit = 'ir.attachment' _fileext_to_type = { '7z': 'archive', 'aac': 'audio', 'ace': 'archive', 'ai': 'vector', 'aiff': 'audio', 'apk': 'archive', 'app': 'binary', 'as': 'script', 'asf': 'video', 'ass': 'text', 'avi': 'video', 'bat': 'script', 'bin': 'binary', 'bmp': 'image', 'bzip2': 'archive', 'c': 'script', 'cab': 'archive', 'cc': 'script', 'ccd': 'disk', 'cdi': 'disk', 'cdr': 'vector', 'cer': 'certificate', 'cgm': 'vector', 'cmd': 'script', 'coffee': 'script', 'com': 'binary', 'cpp': 'script', 'crl': 'certificate', 'crt': 'certificate', 'cs': 'script', 'csr': 'certificate', 'css': 'html', 'csv': 'spreadsheet', 'cue': 'disk', 'd': 'script', 'dds': 'image', 'deb': 'archive', 'der': 'certificate', 'djvu': 'image', 'dmg': 'archive', 'dng': 'image', 'doc': 'document', 'docx': 'document', 'dvi': 'print', 'eot': 'font', 'eps': 'vector', 'exe': 'binary', 'exr': 'image', 'flac': 'audio', 'flv': 'video', 'gif': 'webimage', 'gz': 'archive', 'gzip': 'archive', 'h': 'script', 'htm': 'html', 'html': 'html', 'ico': 'image', 'icon': 'image', 'img': 'disk', 'iso': 'disk', 'jar': 'archive', 'java': 'script', 'jp2': 'image', 'jpe': 'webimage', 'jpeg': 'webimage', 'jpg': 'webimage', 'jpx': 'image', 'js': 'script', 'key': 'presentation', 'keynote': 'presentation', 'lisp': 'script', 'lz': 'archive', 'lzip': 'archive', 'm': 'script', 'm4a': 'audio', 'm4v': 'video', 'mds': 'disk', 'mdx': 'disk', 'mid': 'audio', 'midi': 'audio', 'mkv': 'video', 'mng': 'image', 'mp2': 'audio', 'mp3': 'audio', 'mp4': 'video', 'mpe': 'video', 'mpeg': 'video', 'mpg': 'video', 'nrg': 'disk', 'numbers': 'spreadsheet', 'odg': 'vector', 'odm': 'document', 'odp': 'presentation', 'ods': 'spreadsheet', 'odt': 'document', 'ogg': 'audio', 'ogm': 'video', 'otf': 'font', 'p12': 'certificate', 'pak': 'archive', 'pbm': 'image', 'pdf': 'print', 'pem': 'certificate', 'pfx': 'certificate', 'pgf': 'image', 'pgm': 'image', 'pk3': 'archive', 'pk4': 'archive', 'pl': 'script', 'png': 'webimage', 'pnm': 'image', 'ppm': 'image', 'pps': 'presentation', 'ppt': 'presentation', 'ps': 'print', 'psd': 'image', 'psp': 'image', 'py': 'script', 'r': 'script', 'ra': 'audio', 'rar': 'archive', 'rb': 'script', 'rpm': 'archive', 'rtf': 'text', 'sh': 'script', 'sub': 'disk', 'svg': 'vector', 'sxc': 'spreadsheet', 'sxd': 'vector', 'tar': 'archive', 'tga': 'image', 'tif': 'image', 'tiff': 'image', 'ttf': 'font', 'txt': 'text', 'vbs': 'script', 'vc': 'spreadsheet', 'vml': 'vector', 'wav': 'audio', 'webp': 'image', 'wma': 'audio', 'wmv': 'video', 'woff': 'font', 'xar': 'vector', 'xbm': 'image', 'xcf': 'image', 'xhtml': 'html', 'xls': 'spreadsheet', 'xlsx': 'spreadsheet', 'xml': 'html', 'zip': 'archive' } def get_attachment_type(self, cr, uid, ids, name, args, context=None): result = {} for attachment in self.browse(cr, uid, ids, context=context): fileext = os.path.splitext(attachment.datas_fname or '')[1].lower()[1:] result[attachment.id] = self._fileext_to_type.get(fileext, 'unknown') return result _columns = { 'file_type_icon': fields.function(get_attachment_type, type='char', string='File Type Icon'), 'file_type': fields.related('file_type_icon', type='char'), # FIXME remove in trunk }
agpl-3.0
sjdines/mezzanine
mezzanine/galleries/migrations/0001_initial.py
40
1837
# -*- coding: utf-8 -*- from __future__ import unicode_literals from django.db import models, migrations import mezzanine.core.fields class Migration(migrations.Migration): dependencies = [ ('pages', '__first__'), ] operations = [ migrations.CreateModel( name='Gallery', fields=[ ('page_ptr', models.OneToOneField(parent_link=True, auto_created=True, primary_key=True, serialize=False, to='pages.Page')), ('content', mezzanine.core.fields.RichTextField(verbose_name='Content')), ('zip_import', models.FileField(help_text="Upload a zip file containing images, and they'll be imported into this gallery.", upload_to='galleries', verbose_name='Zip import', blank=True)), ], options={ 'ordering': ('_order',), 'verbose_name': 'Gallery', 'verbose_name_plural': 'Galleries', }, bases=('pages.page', models.Model), ), migrations.CreateModel( name='GalleryImage', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('_order', models.IntegerField(null=True, verbose_name='Order')), ('file', mezzanine.core.fields.FileField(max_length=200, verbose_name='File')), ('description', models.CharField(max_length=1000, verbose_name='Description', blank=True)), ('gallery', models.ForeignKey(related_name='images', to='galleries.Gallery')), ], options={ 'ordering': ('_order',), 'verbose_name': 'Image', 'verbose_name_plural': 'Images', }, bases=(models.Model,), ), ]
bsd-2-clause
inspirehep/invenio
modules/bibformat/lib/elements/bfe_record_url.py
11
1270
# -*- coding: utf-8 -*- ## ## This file is part of Invenio. ## Copyright (C) 2007, 2008, 2009, 2010, 2011 CERN. ## ## Invenio is free software; you can redistribute it and/or ## modify it under the terms of the GNU General Public License as ## published by the Free Software Foundation; either version 2 of the ## License, or (at your option) any later version. ## ## Invenio is distributed in the hope that it will be useful, but ## WITHOUT ANY WARRANTY; without even the implied warranty of ## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ## General Public License for more details. ## ## You should have received a copy of the GNU General Public License ## along with Invenio; if not, write to the Free Software Foundation, Inc., ## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA. """BibFormat element - Prints the record URL. """ __revision__ = "$Id$" from invenio.config import \ CFG_SITE_URL, \ CFG_SITE_RECORD def format_element(bfo, with_ln="yes"): """ Prints the record URL. @param with_ln: if "yes" include "ln" attribute in the URL """ url = CFG_SITE_URL + "/" + CFG_SITE_RECORD + "/" + bfo.control_field('001') if with_ln.lower() == "yes": url += "?ln=" + bfo.lang return url
gpl-2.0
inspirehep/invenio
modules/oairepository/lib/oai_repository_webinterface.py
3
6039
## This file is part of Invenio. ## Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011, 2012 CERN. ## ## Invenio is free software; you can redistribute it and/or ## modify it under the terms of the GNU General Public License as ## published by the Free Software Foundation; either version 2 of the ## License, or (at your option) any later version. ## ## Invenio is distributed in the hope that it will be useful, but ## WITHOUT ANY WARRANTY; without even the implied warranty of ## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ## General Public License for more details. ## ## You should have received a copy of the GNU General Public License ## along with Invenio; if not, write to the Free Software Foundation, Inc., ## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA. """Invenio OAI provider interface, compliant with OAI-PMH/2.0""" __revision__ = "$Id$" import os import time import cStringIO from invenio import webinterface_handler_config as apache from invenio import oai_repository_server from invenio.errorlib import register_exception from invenio.config import CFG_CACHEDIR, CFG_OAI_SLEEP, CFG_DEVEL_SITE, \ CFG_ETCDIR from invenio.webinterface_handler import wash_urlargd, WebInterfaceDirectory CFG_VALIDATE_RESPONSES = False OAI_PMH_VALIDATOR = None if CFG_DEVEL_SITE: try: from lxml import etree OAI_PMH_VALIDATOR = etree.XMLSchema(etree.parse(open(os.path.join(CFG_ETCDIR, 'oairepository', 'OAI-PMH.xsd')))) CFG_VALIDATE_RESPONSES = True except ImportError: pass class WebInterfaceOAIProviderPages(WebInterfaceDirectory): """Defines the set of /oai2d OAI provider pages.""" _exports = [''] def __call__(self, req, form): """OAI repository interface""" # Clean input arguments. The protocol specifies that an error # has to be returned if the same argument is specified several # times. Eg: # oai2d?verb=ListIdentifiers&metadataPrefix=marcxml&metadataPrefix=marcxml # So keep the arguments as list for now so that check_argd can # return an error if needed (check_argd also transforms these # lists into strings) argd = wash_urlargd(form, {'verb': (list, []), 'metadataPrefix': (list, []), 'from': (list, []), 'until': (list, []), 'set': (list, []), 'identifier': (list, []), 'resumptionToken': (list, []), }) if CFG_VALIDATE_RESPONSES: req.track_writings = True ## wash_urlargd(..) function cleaned everything, but also added ## unwanted parameters. Remove them now for param in argd.keys(): if not param in form and param != 'verb': del argd[param] ## wash_urlargd(..) function also removed unknown parameters ## that we would like to keep in order to send back an error ## as required by the protocol. But we do not need that value, ## so set it to empty string. for param in form.keys(): if param not in argd.keys(): argd[param] = '' ## But still remove 'ln' parameter that was automatically added. if argd.has_key('ln'): del argd['ln'] ## check request for OAI compliancy ## also transform all the list arguments into string oai_errors = oai_repository_server.check_argd(argd) ## check availability (OAI requests for Identify, ListSets and ## ListMetadataFormats are served immediately, otherwise we ## shall wait for CFG_OAI_SLEEP seconds between requests): if os.path.exists("%s/RTdata/RTdata" % CFG_CACHEDIR) and (argd['verb'] not in ["Identify", "ListMetadataFormats", "ListSets"] and not argd.get('resumptionToken')): time_gap = int(time.time() - os.path.getmtime("%s/RTdata/RTdata" % CFG_CACHEDIR)) if(time_gap < CFG_OAI_SLEEP): req.headers_out["Status-Code"] = "503" req.headers_out["Retry-After"] = "%d" % (CFG_OAI_SLEEP - time_gap) req.status = apache.HTTP_SERVICE_UNAVAILABLE return "Retry after %d seconds" % (CFG_OAI_SLEEP - time_gap) command = "touch %s/RTdata/RTdata" % CFG_CACHEDIR os.system(command) ## create OAI response req.content_type = "text/xml; charset=utf-8" req.encoding = "UTF-8" req.send_http_header() if not oai_errors: ## OAI Identify if argd['verb'] == "Identify": req.write(oai_repository_server.oai_identify(argd)) ## OAI ListSets elif argd['verb'] == "ListSets": req.write(oai_repository_server.oai_list_sets(argd)) ## OAI ListIdentifiers or OAI ListRecords elif argd['verb'] in ("ListIdentifiers", "ListRecords"): oai_repository_server.oai_list_records_or_identifiers(req, argd) ## OAI GetRecord elif argd['verb'] == "GetRecord": req.write(oai_repository_server.oai_get_record(argd)) ## OAI ListMetadataFormats elif argd['verb'] == "ListMetadataFormats": req.write(oai_repository_server.oai_list_metadata_formats(argd)) ## Unknown verb ## OAI error else: req.write(oai_repository_server.oai_error(argd, oai_errors)) if CFG_VALIDATE_RESPONSES: req.track_writings = False try: OAI_PMH_VALIDATOR.assertValid(etree.parse(cStringIO.StringIO(req.what_was_written))) except etree.DocumentInvalid: register_exception(req=req, alert_admin=True) raise return "\n" ## Return the same page wether we ask for /oai2d?verb or /oai2d/?verb index = __call__
gpl-2.0
yvaucher/stock-logistics-barcode
__unported__/tr_barcode_on_product/__openerp__.py
3
1774
# -*- coding: utf-8 -*- ################################################################################# # # OpenERP, Open Source Management Solution # Copyright (C) 2012 Julius Network Solutions SARL <contact@julius.fr> # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ################################################################################# { "name": "Barcode for product", "version": "1.1", "author": "Julius Network Solutions", "website": "http://www.julius.fr", "category": "Warehouse Management", "depends": [ "product", "tr_barcode_config", "tr_barcode_field", ], "description": """ This module provides a product.product model deriving from barcode_osv, which will manage the population of the x_barcode_id column. It is still necessary to configure the model to specify which field is used to generate the barcode. """, "demo": [], "data": [ 'data/model_data.xml', 'data/config_data.xml', 'res_config_view.xml', ], "active": False, 'installable': False, } # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
agpl-3.0
doormon/doormon-server
server/lib/jinja2/testsuite/ext.py
402
18086
# -*- coding: utf-8 -*- """ jinja2.testsuite.ext ~~~~~~~~~~~~~~~~~~~~ Tests for the extensions. :copyright: (c) 2010 by the Jinja Team. :license: BSD, see LICENSE for more details. """ import re import unittest from jinja2.testsuite import JinjaTestCase from jinja2 import Environment, DictLoader, contextfunction, nodes from jinja2.exceptions import TemplateAssertionError from jinja2.ext import Extension from jinja2.lexer import Token, count_newlines from jinja2._compat import next, BytesIO, itervalues, text_type importable_object = 23 _gettext_re = re.compile(r'_\((.*?)\)(?s)') i18n_templates = { 'master.html': '<title>{{ page_title|default(_("missing")) }}</title>' '{% block body %}{% endblock %}', 'child.html': '{% extends "master.html" %}{% block body %}' '{% trans %}watch out{% endtrans %}{% endblock %}', 'plural.html': '{% trans user_count %}One user online{% pluralize %}' '{{ user_count }} users online{% endtrans %}', 'plural2.html': '{% trans user_count=get_user_count() %}{{ user_count }}s' '{% pluralize %}{{ user_count }}p{% endtrans %}', 'stringformat.html': '{{ _("User: %(num)s")|format(num=user_count) }}' } newstyle_i18n_templates = { 'master.html': '<title>{{ page_title|default(_("missing")) }}</title>' '{% block body %}{% endblock %}', 'child.html': '{% extends "master.html" %}{% block body %}' '{% trans %}watch out{% endtrans %}{% endblock %}', 'plural.html': '{% trans user_count %}One user online{% pluralize %}' '{{ user_count }} users online{% endtrans %}', 'stringformat.html': '{{ _("User: %(num)s", num=user_count) }}', 'ngettext.html': '{{ ngettext("%(num)s apple", "%(num)s apples", apples) }}', 'ngettext_long.html': '{% trans num=apples %}{{ num }} apple{% pluralize %}' '{{ num }} apples{% endtrans %}', 'transvars1.html': '{% trans %}User: {{ num }}{% endtrans %}', 'transvars2.html': '{% trans num=count %}User: {{ num }}{% endtrans %}', 'transvars3.html': '{% trans count=num %}User: {{ count }}{% endtrans %}', 'novars.html': '{% trans %}%(hello)s{% endtrans %}', 'vars.html': '{% trans %}{{ foo }}%(foo)s{% endtrans %}', 'explicitvars.html': '{% trans foo="42" %}%(foo)s{% endtrans %}' } languages = { 'de': { 'missing': u'fehlend', 'watch out': u'pass auf', 'One user online': u'Ein Benutzer online', '%(user_count)s users online': u'%(user_count)s Benutzer online', 'User: %(num)s': u'Benutzer: %(num)s', 'User: %(count)s': u'Benutzer: %(count)s', '%(num)s apple': u'%(num)s Apfel', '%(num)s apples': u'%(num)s Äpfel' } } @contextfunction def gettext(context, string): language = context.get('LANGUAGE', 'en') return languages.get(language, {}).get(string, string) @contextfunction def ngettext(context, s, p, n): language = context.get('LANGUAGE', 'en') if n != 1: return languages.get(language, {}).get(p, p) return languages.get(language, {}).get(s, s) i18n_env = Environment( loader=DictLoader(i18n_templates), extensions=['jinja2.ext.i18n'] ) i18n_env.globals.update({ '_': gettext, 'gettext': gettext, 'ngettext': ngettext }) newstyle_i18n_env = Environment( loader=DictLoader(newstyle_i18n_templates), extensions=['jinja2.ext.i18n'] ) newstyle_i18n_env.install_gettext_callables(gettext, ngettext, newstyle=True) class TestExtension(Extension): tags = set(['test']) ext_attr = 42 def parse(self, parser): return nodes.Output([self.call_method('_dump', [ nodes.EnvironmentAttribute('sandboxed'), self.attr('ext_attr'), nodes.ImportedName(__name__ + '.importable_object'), nodes.ContextReference() ])]).set_lineno(next(parser.stream).lineno) def _dump(self, sandboxed, ext_attr, imported_object, context): return '%s|%s|%s|%s' % ( sandboxed, ext_attr, imported_object, context.blocks ) class PreprocessorExtension(Extension): def preprocess(self, source, name, filename=None): return source.replace('[[TEST]]', '({{ foo }})') class StreamFilterExtension(Extension): def filter_stream(self, stream): for token in stream: if token.type == 'data': for t in self.interpolate(token): yield t else: yield token def interpolate(self, token): pos = 0 end = len(token.value) lineno = token.lineno while 1: match = _gettext_re.search(token.value, pos) if match is None: break value = token.value[pos:match.start()] if value: yield Token(lineno, 'data', value) lineno += count_newlines(token.value) yield Token(lineno, 'variable_begin', None) yield Token(lineno, 'name', 'gettext') yield Token(lineno, 'lparen', None) yield Token(lineno, 'string', match.group(1)) yield Token(lineno, 'rparen', None) yield Token(lineno, 'variable_end', None) pos = match.end() if pos < end: yield Token(lineno, 'data', token.value[pos:]) class ExtensionsTestCase(JinjaTestCase): def test_extend_late(self): env = Environment() env.add_extension('jinja2.ext.autoescape') t = env.from_string('{% autoescape true %}{{ "<test>" }}{% endautoescape %}') assert t.render() == '&lt;test&gt;' def test_loop_controls(self): env = Environment(extensions=['jinja2.ext.loopcontrols']) tmpl = env.from_string(''' {%- for item in [1, 2, 3, 4] %} {%- if item % 2 == 0 %}{% continue %}{% endif -%} {{ item }} {%- endfor %}''') assert tmpl.render() == '13' tmpl = env.from_string(''' {%- for item in [1, 2, 3, 4] %} {%- if item > 2 %}{% break %}{% endif -%} {{ item }} {%- endfor %}''') assert tmpl.render() == '12' def test_do(self): env = Environment(extensions=['jinja2.ext.do']) tmpl = env.from_string(''' {%- set items = [] %} {%- for char in "foo" %} {%- do items.append(loop.index0 ~ char) %} {%- endfor %}{{ items|join(', ') }}''') assert tmpl.render() == '0f, 1o, 2o' def test_with(self): env = Environment(extensions=['jinja2.ext.with_']) tmpl = env.from_string('''\ {% with a=42, b=23 -%} {{ a }} = {{ b }} {% endwith -%} {{ a }} = {{ b }}\ ''') assert [x.strip() for x in tmpl.render(a=1, b=2).splitlines()] \ == ['42 = 23', '1 = 2'] def test_extension_nodes(self): env = Environment(extensions=[TestExtension]) tmpl = env.from_string('{% test %}') assert tmpl.render() == 'False|42|23|{}' def test_identifier(self): assert TestExtension.identifier == __name__ + '.TestExtension' def test_rebinding(self): original = Environment(extensions=[TestExtension]) overlay = original.overlay() for env in original, overlay: for ext in itervalues(env.extensions): assert ext.environment is env def test_preprocessor_extension(self): env = Environment(extensions=[PreprocessorExtension]) tmpl = env.from_string('{[[TEST]]}') assert tmpl.render(foo=42) == '{(42)}' def test_streamfilter_extension(self): env = Environment(extensions=[StreamFilterExtension]) env.globals['gettext'] = lambda x: x.upper() tmpl = env.from_string('Foo _(bar) Baz') out = tmpl.render() assert out == 'Foo BAR Baz' def test_extension_ordering(self): class T1(Extension): priority = 1 class T2(Extension): priority = 2 env = Environment(extensions=[T1, T2]) ext = list(env.iter_extensions()) assert ext[0].__class__ is T1 assert ext[1].__class__ is T2 class InternationalizationTestCase(JinjaTestCase): def test_trans(self): tmpl = i18n_env.get_template('child.html') assert tmpl.render(LANGUAGE='de') == '<title>fehlend</title>pass auf' def test_trans_plural(self): tmpl = i18n_env.get_template('plural.html') assert tmpl.render(LANGUAGE='de', user_count=1) == 'Ein Benutzer online' assert tmpl.render(LANGUAGE='de', user_count=2) == '2 Benutzer online' def test_trans_plural_with_functions(self): tmpl = i18n_env.get_template('plural2.html') def get_user_count(): get_user_count.called += 1 return 1 get_user_count.called = 0 assert tmpl.render(LANGUAGE='de', get_user_count=get_user_count) == '1s' assert get_user_count.called == 1 def test_complex_plural(self): tmpl = i18n_env.from_string('{% trans foo=42, count=2 %}{{ count }} item{% ' 'pluralize count %}{{ count }} items{% endtrans %}') assert tmpl.render() == '2 items' self.assert_raises(TemplateAssertionError, i18n_env.from_string, '{% trans foo %}...{% pluralize bar %}...{% endtrans %}') def test_trans_stringformatting(self): tmpl = i18n_env.get_template('stringformat.html') assert tmpl.render(LANGUAGE='de', user_count=5) == 'Benutzer: 5' def test_extract(self): from jinja2.ext import babel_extract source = BytesIO(''' {{ gettext('Hello World') }} {% trans %}Hello World{% endtrans %} {% trans %}{{ users }} user{% pluralize %}{{ users }} users{% endtrans %} '''.encode('ascii')) # make python 3 happy assert list(babel_extract(source, ('gettext', 'ngettext', '_'), [], {})) == [ (2, 'gettext', u'Hello World', []), (3, 'gettext', u'Hello World', []), (4, 'ngettext', (u'%(users)s user', u'%(users)s users', None), []) ] def test_comment_extract(self): from jinja2.ext import babel_extract source = BytesIO(''' {# trans first #} {{ gettext('Hello World') }} {% trans %}Hello World{% endtrans %}{# trans second #} {#: third #} {% trans %}{{ users }} user{% pluralize %}{{ users }} users{% endtrans %} '''.encode('utf-8')) # make python 3 happy assert list(babel_extract(source, ('gettext', 'ngettext', '_'), ['trans', ':'], {})) == [ (3, 'gettext', u'Hello World', ['first']), (4, 'gettext', u'Hello World', ['second']), (6, 'ngettext', (u'%(users)s user', u'%(users)s users', None), ['third']) ] class NewstyleInternationalizationTestCase(JinjaTestCase): def test_trans(self): tmpl = newstyle_i18n_env.get_template('child.html') assert tmpl.render(LANGUAGE='de') == '<title>fehlend</title>pass auf' def test_trans_plural(self): tmpl = newstyle_i18n_env.get_template('plural.html') assert tmpl.render(LANGUAGE='de', user_count=1) == 'Ein Benutzer online' assert tmpl.render(LANGUAGE='de', user_count=2) == '2 Benutzer online' def test_complex_plural(self): tmpl = newstyle_i18n_env.from_string('{% trans foo=42, count=2 %}{{ count }} item{% ' 'pluralize count %}{{ count }} items{% endtrans %}') assert tmpl.render() == '2 items' self.assert_raises(TemplateAssertionError, i18n_env.from_string, '{% trans foo %}...{% pluralize bar %}...{% endtrans %}') def test_trans_stringformatting(self): tmpl = newstyle_i18n_env.get_template('stringformat.html') assert tmpl.render(LANGUAGE='de', user_count=5) == 'Benutzer: 5' def test_newstyle_plural(self): tmpl = newstyle_i18n_env.get_template('ngettext.html') assert tmpl.render(LANGUAGE='de', apples=1) == '1 Apfel' assert tmpl.render(LANGUAGE='de', apples=5) == u'5 Äpfel' def test_autoescape_support(self): env = Environment(extensions=['jinja2.ext.autoescape', 'jinja2.ext.i18n']) env.install_gettext_callables(lambda x: u'<strong>Wert: %(name)s</strong>', lambda s, p, n: s, newstyle=True) t = env.from_string('{% autoescape ae %}{{ gettext("foo", name=' '"<test>") }}{% endautoescape %}') assert t.render(ae=True) == '<strong>Wert: &lt;test&gt;</strong>' assert t.render(ae=False) == '<strong>Wert: <test></strong>' def test_num_used_twice(self): tmpl = newstyle_i18n_env.get_template('ngettext_long.html') assert tmpl.render(apples=5, LANGUAGE='de') == u'5 Äpfel' def test_num_called_num(self): source = newstyle_i18n_env.compile(''' {% trans num=3 %}{{ num }} apple{% pluralize %}{{ num }} apples{% endtrans %} ''', raw=True) # quite hacky, but the only way to properly test that. The idea is # that the generated code does not pass num twice (although that # would work) for better performance. This only works on the # newstyle gettext of course assert re.search(r"l_ngettext, u?'\%\(num\)s apple', u?'\%\(num\)s " r"apples', 3", source) is not None def test_trans_vars(self): t1 = newstyle_i18n_env.get_template('transvars1.html') t2 = newstyle_i18n_env.get_template('transvars2.html') t3 = newstyle_i18n_env.get_template('transvars3.html') assert t1.render(num=1, LANGUAGE='de') == 'Benutzer: 1' assert t2.render(count=23, LANGUAGE='de') == 'Benutzer: 23' assert t3.render(num=42, LANGUAGE='de') == 'Benutzer: 42' def test_novars_vars_escaping(self): t = newstyle_i18n_env.get_template('novars.html') assert t.render() == '%(hello)s' t = newstyle_i18n_env.get_template('vars.html') assert t.render(foo='42') == '42%(foo)s' t = newstyle_i18n_env.get_template('explicitvars.html') assert t.render() == '%(foo)s' class AutoEscapeTestCase(JinjaTestCase): def test_scoped_setting(self): env = Environment(extensions=['jinja2.ext.autoescape'], autoescape=True) tmpl = env.from_string(''' {{ "<HelloWorld>" }} {% autoescape false %} {{ "<HelloWorld>" }} {% endautoescape %} {{ "<HelloWorld>" }} ''') assert tmpl.render().split() == \ [u'&lt;HelloWorld&gt;', u'<HelloWorld>', u'&lt;HelloWorld&gt;'] env = Environment(extensions=['jinja2.ext.autoescape'], autoescape=False) tmpl = env.from_string(''' {{ "<HelloWorld>" }} {% autoescape true %} {{ "<HelloWorld>" }} {% endautoescape %} {{ "<HelloWorld>" }} ''') assert tmpl.render().split() == \ [u'<HelloWorld>', u'&lt;HelloWorld&gt;', u'<HelloWorld>'] def test_nonvolatile(self): env = Environment(extensions=['jinja2.ext.autoescape'], autoescape=True) tmpl = env.from_string('{{ {"foo": "<test>"}|xmlattr|escape }}') assert tmpl.render() == ' foo="&lt;test&gt;"' tmpl = env.from_string('{% autoescape false %}{{ {"foo": "<test>"}' '|xmlattr|escape }}{% endautoescape %}') assert tmpl.render() == ' foo=&#34;&amp;lt;test&amp;gt;&#34;' def test_volatile(self): env = Environment(extensions=['jinja2.ext.autoescape'], autoescape=True) tmpl = env.from_string('{% autoescape foo %}{{ {"foo": "<test>"}' '|xmlattr|escape }}{% endautoescape %}') assert tmpl.render(foo=False) == ' foo=&#34;&amp;lt;test&amp;gt;&#34;' assert tmpl.render(foo=True) == ' foo="&lt;test&gt;"' def test_scoping(self): env = Environment(extensions=['jinja2.ext.autoescape']) tmpl = env.from_string('{% autoescape true %}{% set x = "<x>" %}{{ x }}' '{% endautoescape %}{{ x }}{{ "<y>" }}') assert tmpl.render(x=1) == '&lt;x&gt;1<y>' def test_volatile_scoping(self): env = Environment(extensions=['jinja2.ext.autoescape']) tmplsource = ''' {% autoescape val %} {% macro foo(x) %} [{{ x }}] {% endmacro %} {{ foo().__class__.__name__ }} {% endautoescape %} {{ '<testing>' }} ''' tmpl = env.from_string(tmplsource) assert tmpl.render(val=True).split()[0] == 'Markup' assert tmpl.render(val=False).split()[0] == text_type.__name__ # looking at the source we should see <testing> there in raw # (and then escaped as well) env = Environment(extensions=['jinja2.ext.autoescape']) pysource = env.compile(tmplsource, raw=True) assert '<testing>\\n' in pysource env = Environment(extensions=['jinja2.ext.autoescape'], autoescape=True) pysource = env.compile(tmplsource, raw=True) assert '&lt;testing&gt;\\n' in pysource def suite(): suite = unittest.TestSuite() suite.addTest(unittest.makeSuite(ExtensionsTestCase)) suite.addTest(unittest.makeSuite(InternationalizationTestCase)) suite.addTest(unittest.makeSuite(NewstyleInternationalizationTestCase)) suite.addTest(unittest.makeSuite(AutoEscapeTestCase)) return suite
gpl-2.0
stkubr/zipline
zipline/finance/commission.py
33
5126
# # Copyright 2014 Quantopian, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from six import iteritems from zipline.utils.serialization_utils import ( VERSION_LABEL ) class PerShare(object): """ Calculates a commission for a transaction based on a per share cost with an optional minimum cost per trade. """ def __init__(self, cost=0.03, min_trade_cost=None): """ Cost parameter is the cost of a trade per-share. $0.03 means three cents per share, which is a very conservative (quite high) for per share costs. min_trade_cost parameter is the minimum trade cost regardless of the number of shares traded (e.g. $1.00). """ self.cost = float(cost) self.min_trade_cost = None if min_trade_cost is None\ else float(min_trade_cost) def __repr__(self): return "{class_name}(cost={cost}, min trade cost={min_trade_cost})"\ .format(class_name=self.__class__.__name__, cost=self.cost, min_trade_cost=self.min_trade_cost) def calculate(self, transaction): """ returns a tuple of: (per share commission, total transaction commission) """ commission = abs(transaction.amount * self.cost) if self.min_trade_cost is None: return self.cost, commission else: commission = max(commission, self.min_trade_cost) return abs(commission / transaction.amount), commission def __getstate__(self): state_dict = \ {k: v for k, v in iteritems(self.__dict__) if not k.startswith('_')} STATE_VERSION = 1 state_dict[VERSION_LABEL] = STATE_VERSION return state_dict def __setstate__(self, state): OLDEST_SUPPORTED_STATE = 1 version = state.pop(VERSION_LABEL) if version < OLDEST_SUPPORTED_STATE: raise BaseException("PerShare saved state is too old.") self.__dict__.update(state) class PerTrade(object): """ Calculates a commission for a transaction based on a per trade cost. """ def __init__(self, cost=5.0): """ Cost parameter is the cost of a trade, regardless of share count. $5.00 per trade is fairly typical of discount brokers. """ # Cost needs to be floating point so that calculation using division # logic does not floor to an integer. self.cost = float(cost) def calculate(self, transaction): """ returns a tuple of: (per share commission, total transaction commission) """ if transaction.amount == 0: return 0.0, 0.0 return abs(self.cost / transaction.amount), self.cost def __getstate__(self): state_dict = \ {k: v for k, v in iteritems(self.__dict__) if not k.startswith('_')} STATE_VERSION = 1 state_dict[VERSION_LABEL] = STATE_VERSION return state_dict def __setstate__(self, state): OLDEST_SUPPORTED_STATE = 1 version = state.pop(VERSION_LABEL) if version < OLDEST_SUPPORTED_STATE: raise BaseException("PerTrade saved state is too old.") self.__dict__.update(state) class PerDollar(object): """ Calculates a commission for a transaction based on a per dollar cost. """ def __init__(self, cost=0.0015): """ Cost parameter is the cost of a trade per-dollar. 0.0015 on $1 million means $1,500 commission (=1,000,000 x 0.0015) """ self.cost = float(cost) def __repr__(self): return "{class_name}(cost={cost})".format( class_name=self.__class__.__name__, cost=self.cost) def calculate(self, transaction): """ returns a tuple of: (per share commission, total transaction commission) """ cost_per_share = transaction.price * self.cost return cost_per_share, abs(transaction.amount) * cost_per_share def __getstate__(self): state_dict = \ {k: v for k, v in iteritems(self.__dict__) if not k.startswith('_')} STATE_VERSION = 1 state_dict[VERSION_LABEL] = STATE_VERSION return state_dict def __setstate__(self, state): OLDEST_SUPPORTED_STATE = 1 version = state.pop(VERSION_LABEL) if version < OLDEST_SUPPORTED_STATE: raise BaseException("PerDollar saved state is too old.") self.__dict__.update(state)
apache-2.0
robbi/pyload
module/plugins/hoster/FileStoreTo.py
6
2003
# -*- coding: utf-8 -*- import re from ..internal.SimpleHoster import SimpleHoster class FileStoreTo(SimpleHoster): __name__ = "FileStoreTo" __type__ = "hoster" __version__ = "0.12" __status__ = "testing" __pattern__ = r'http://(?:www\.)?filestore\.to/\?d=(?P<ID>\w+)' __config__ = [("activated", "bool", "Activated", True), ("use_premium", "bool", "Use premium account if available", True), ("fallback", "bool", "Fallback to free download if premium fails", True), ("chk_filesize", "bool", "Check file size", True), ("max_wait", "int", "Reconnect if waiting time is greater than minutes", 10)] __description__ = """FileStore.to hoster plugin""" __license__ = "GPLv3" __authors__ = [("Walter Purcaro", "vuolter@gmail.com"), ("stickell", "l.stickell@yahoo.it"), ("GammaC0de", "nitzo2001[AT]yahoo[DOT]com")] NAME_PATTERN = r'<div class="file">(?P<N>.+?)</div>' SIZE_PATTERN = r'<div class="size">(?P<S>[\d.,]+) (?P<U>[\w^_]+)</div>' OFFLINE_PATTERN = r'>Download-Datei wurde nicht gefunden<' TEMP_OFFLINE_PATTERN = r'>Der Download ist nicht bereit !<' WAIT_PATTERN = r'data-wait="(\d+?)"' LINK_PATTERN = r'klicke <a href="(.+?)">hier<' def setup(self): self.resume_download = True self.multiDL = True def handle_free(self, pyfile): self.data = self.load(pyfile.url, post={'Aktion': "Download"}) self.check_errors() m = re.search(r'name="DID" value="(.+?)"', self.data) if m is None: self.fail(_("DID pattern not found")) self.data = self.load(pyfile.url, post={'DID': m.group(1), 'Aktion': "Downloading"}) self.check_errors() m = re.search(self.LINK_PATTERN, self.data) if m is not None: self.link = m.group(1)
gpl-3.0
arunkgupta/gramps
gramps/plugins/drawreport/descendtree.py
1
66162
# # Gramps - a GTK+/GNOME based genealogy program # # Copyright (C) 2000-2007 Donald N. Allingham # Copyright (C) 2007-2008 Brian G. Matherly # Copyright (C) 2010 Jakim Friant # Copyright (C) 2009-2010 Craig J. Anderson # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA # # $Id$ """ Reports/Graphical Reports/Familial Tree Reports/Graphical Reports/Personal Tree """ #------------------------------------------------------------------------ # # GRAMPS modules # #------------------------------------------------------------------------ from gramps.gen.ggettext import sgettext as _ from gramps.gen.errors import ReportError from gramps.gen.plug.menu import TextOption from gramps.gen.plug.menu import NumberOption from gramps.gen.plug.menu import EnumeratedListOption from gramps.gen.plug.menu import StringOption from gramps.gen.plug.menu import BooleanOption from gramps.gen.plug.menu import PersonOption from gramps.gen.plug.menu import FamilyOption from gramps.gen.plug.report import Report from gramps.gen.plug.report import utils as ReportUtils from gramps.gen.plug.report import MenuReportOptions PT2CM = ReportUtils.pt2cm #------------------------------------------------------------------------ # # Constants # #------------------------------------------------------------------------ _BORN = _('short for born|b.') _DIED = _('short for died|d.') _MARR = _('short for married|m.') _RPT_NAME = 'descend_chart' from gramps.plugins.lib.libtreebase import * #------------------------------------------------------------------------ # # Box classes # #------------------------------------------------------------------------ class DescendantBoxBase(BoxBase): """ Base for all descendant boxes. Set the boxstr and some new attributes that are needed """ def __init__(self, boxstr): BoxBase.__init__(self) self.boxstr = boxstr self.next = None self.father = None def calc_text(self, database, person, family): """ A single place to calculate box text """ gui = GuiConnect() calc = gui.calc_lines(database) self.text = calc.calc_lines(person, family, gui.working_lines(self)) class PersonBox(DescendantBoxBase): """ Calculates information about the box that will print on a page """ def __init__(self, level, boldable = 0): DescendantBoxBase.__init__(self, "CG2-box") self.level = level def set_bold(self): """ update me to a bolded box """ self.boxstr = "CG2b-box" class FamilyBox(DescendantBoxBase): """ Calculates information about the box that will print on a page """ def __init__(self, level): DescendantBoxBase.__init__(self, "CG2-fam-box") self.level = level class PlaceHolderBox(BoxBase): """ I am a box that does not print. I am used to make sure information does not run over areas that we don't want information (boxes) """ def __init__(self, level): BoxBase.__init__(self) self.boxstr = "None" self.level = level self.line_to = None self.next = None def calc_text(self, database, person, family): """ move along. Nothing to see here """ return #------------------------------------------------------------------------ # # Titles Class(es) # #------------------------------------------------------------------------ class DescendantTitleBase(TitleBox): def __init__(self, dbase, doc, boxstr = "CG2-Title"): TitleBox.__init__(self, doc, boxstr) self.database = dbase def descendant_print(self, person_list, person_list2 = []): """ calculate the Descendant title Person_list will always be passed If in the Family reports and there are two families, person_list2 will be used. """ if len(person_list) == len(person_list2) == 1: person_list = person_list + person_list2 person_list2 = [] names = self._get_names(person_list) if person_list2: names2 = self._get_names(person_list2) if len(names) + len(names2) == 3: if len(names) == 1: title = _("Descendant Chart for %(person)s and " "%(father1)s, %(mother1)s") % \ {'person': names[0], 'father1': names2[0], 'mother1': names2[1], } else: # Should be 2 items in names list title = _("Descendant Chart for %(person)s, %(father1)s " "and %(mother1)s") % \ {'father1': names[0], 'mother1': names[1], 'person': names2[0], } else: # Should be 2 items in both names and names2 lists title = _("Descendant Chart for %(father1)s, %(father2)s " "and %(mother1)s, %(mother2)s") % \ {'father1': names[0], 'mother1': names[1], 'father2': names2[0], 'mother2': names2[1], } else: # No person_list2: Just one family if len(names) == 1: title = _("Descendant Chart for %(person)s") % \ {'person': names[0]} else: # Should be two items in names list title = _("Descendant Chart for %(father)s and %(mother)s") % \ {'father': names[0], 'mother': names[1], } return title def get_parents(self, family_id): """ For a family_id, return the father and mother """ family1 = self.database.get_family_from_gramps_id(family_id) father_h = family1.get_father_handle() mother_h = family1.get_mother_handle() parents = [self.database.get_person_from_handle(handle) for handle in [father_h, mother_h] if handle] return parents class TitleNone(TitleNoDisplay): """Family Chart Title class for the report """ def __init__(self, dbase, doc): TitleNoDisplay.__init__(self, doc, "CG2-Title") def calc_title(self, persons): """Calculate the title of the report""" self.text = 'Descendant Graph' class TitleDPY(DescendantTitleBase): """Descendant (Person yes start with parents) Chart Title class for the report """ def __init__(self, dbase, doc): DescendantTitleBase.__init__(self, dbase, doc) def calc_title(self, person_id): """Calculate the title of the report""" center = self.database.get_person_from_gramps_id(person_id) family2_h = center.get_main_parents_family_handle() family2 = self.database.get_family_from_handle(family2_h) person_list = None if family2: father2_h = family2.get_father_handle() mother2_h = family2.get_mother_handle() person_list = [self.database.get_person_from_handle(handle) for handle in [father2_h, mother2_h] if handle] if not person_list: person_list = [center] self.text = self.descendant_print(person_list) self.set_box_height_width() class TitleDPN(DescendantTitleBase): """Descendant (Person no start with parents) Chart Title class for the report """ def __init__(self, dbase, doc): DescendantTitleBase.__init__(self, dbase, doc) def calc_title(self, person_id): """Calculate the title of the report""" center = self.database.get_person_from_gramps_id(person_id) title = self.descendant_print([center]) self.text = title self.set_box_height_width() class TitleDFY(DescendantTitleBase): """Descendant (Family yes start with parents) Chart Title class for the report """ def __init__(self, dbase, doc): DescendantTitleBase.__init__(self, dbase, doc) def get_parent_list(self, person): """ return a list of my parents. If none, return me """ if not person: return None parent_list = None family_h = person.get_main_parents_family_handle() family = self.database.get_family_from_handle(family_h) if family: #family = fathers parents father_h = family.get_father_handle() mother_h = family.get_mother_handle() parent_list = [self.database.get_person_from_handle(handle) for handle in [father_h, mother_h] if handle] return parent_list or [person] def calc_title(self, family_id): """Calculate the title of the report""" my_parents = self.get_parents(family_id) dad_parents = self.get_parent_list(my_parents[0]) mom_parents = [] if len(my_parents) > 1: if not dad_parents: dad_parents = self.get_parent_list(my_parents[1]) else: mom_parents = self.get_parent_list(my_parents[1]) self.text = self.descendant_print(dad_parents, mom_parents) self.set_box_height_width() class TitleDFN(DescendantTitleBase): """Descendant (Family no start with parents) Chart Title class for the report """ def __init__(self, dbase, doc): DescendantTitleBase.__init__(self, dbase, doc) def calc_title(self, family_id): """Calculate the title of the report""" self.text = self.descendant_print( self.get_parents(family_id) ) self.set_box_height_width() class TitleF(DescendantTitleBase): """Family Chart Title class for the report """ def __init__(self, dbase, doc): DescendantTitleBase.__init__(self, dbase, doc) def calc_title(self, family_id): """Calculate the title of the report""" parents = self.get_parents(family_id) names = self._get_names(parents) if len(parents) == 1: title = _("Family Chart for %(person)s") % {'person': names[0] } elif len(parents) == 2: title = _("Family Chart for %(father1)s and %(mother1)s") % \ {'father1': names[0], 'mother1': names[1] } #else: # title = str(tmp) + " " + str(len(tmp)) self.text = title self.set_box_height_width() class TitleC(DescendantTitleBase): """Cousin Chart Title class for the report """ def __init__(self, dbase, doc): DescendantTitleBase.__init__(self, dbase, doc) def calc_title(self, family_id): """Calculate the title of the report""" family = self.database.get_family_from_gramps_id(family_id) kids = [self.database.get_person_from_handle(kid.ref) for kid in family.get_child_ref_list()] #ok we have the children. Make a title off of them tmp = self._get_names(kids) self.text = _("Cousin Chart for " + ", ".join(self._get_names(kids))) self.set_box_height_width() #------------------------------------------------------------------------ # # Class RecurseDown # #------------------------------------------------------------------------ class RecurseDown: """ The main recursive functions that will use add_person to make the tree of people (Descendants) to be included within the report. """ def __init__(self, dbase, canvas): self.database = dbase self.canvas = canvas self.families_seen = set() self.cols = [] self.__last_direct = [] gui = GuiConnect() self.do_parents = gui.get_val('show_parents') self.max_generations = gui.get_val('maxgen') self.max_spouses = gui.get_val('maxspouse') self.inlc_marr = gui.get_val("inc_marr") if not self.max_spouses: self.inlc_marr = False #is the option even available? self.bold_direct = gui.get_val('bolddirect') #can we bold direct descendants? #bold_now will have only three values #0 - no bolding #1 - Only bold the first person #2 - Bold all direct descendants self.bold_now = 0 gui = None def add_to_col(self, box): """ Add the box to a column on the canvas. we will do these things: set the .next attrib for the boxs in this col get the height and width of this box and set it no the column also we set the .x_cm to any s_level (indentation) here we will calculate the real .x_cm later (with indentation) """ level = box.level[0] #make the column list of people while len(self.cols) <= level: self.cols.append(None) self.__last_direct.append(None) if self.cols[level]: #if (not the first box in this column) last_box = self.cols[level] last_box.next = box #calculate the .y_cm for this box. box.y_cm = last_box.y_cm box.y_cm += last_box.height if last_box.boxstr in ["CG2-box", "CG2b-box"]: box.y_cm += self.canvas.report_opts.box_shadow if box.boxstr in ["CG2-box", "CG2b-box"]: box.y_cm += self.canvas.report_opts.box_pgap else: box.y_cm += self.canvas.report_opts.box_mgap if box.level[1] == 0 and self.__last_direct[level]: #ok, a new direct descendant. #print level, box.father is not None, self.__last_direct[level].father is not None, box.text[0], \ # self.__last_direct[level].text[0] if box.father != self.__last_direct[level].father and \ box.father != self.__last_direct[level]: box.y_cm += self.canvas.report_opts.box_pgap self.cols[level] = box if box.level[1] == 0: self.__last_direct[level] = box box.x_cm = self.canvas.report_opts.spouse_offset * box.level[1] self.canvas.set_box_height_width(box) def add_person_box(self, level, indi_handle, fams_handle, father): """ Makes a person box and add that person into the Canvas. """ myself = PersonBox(level) myself.father = father if myself.level[1] == 0 and self.bold_direct and self.bold_now: if self.bold_now == 1: self.bold_now = 0 myself.set_bold() if level[1] == 0 and father and myself.level[0] != father.level[0]: #I am a child if father.line_to: line = father.line_to else: line = LineBase(father) father.line_to = line #self.canvas.add_line(line) line.end.append(myself) #calculate the text. myself.calc_text(self.database, indi_handle, fams_handle) myself.add_mark(self.database, self.database.get_person_from_handle(indi_handle)) self.add_to_col(myself) self.canvas.add_box(myself) return myself def add_marriage_box(self, level, indi_handle, fams_handle, father): """ Makes a marriage box and add that person into the Canvas. """ myself = FamilyBox(level) #if father is not None: # myself.father = father #calculate the text. myself.calc_text(self.database, indi_handle, fams_handle) self.add_to_col(myself) self.canvas.add_box(myself) return myself def recurse(self, person_handle, x_level, s_level, father): """traverse the ancestors recursively until either the end of a line is found, or until we reach the maximum number of generations or we reach the max number of spouses that we want to deal with""" if not person_handle: return if x_level > self.max_generations: return if s_level > 0 and s_level == self.max_spouses: return if person_handle in self.families_seen: return myself = None person = self.database.get_person_from_handle(person_handle) family_handles = person.get_family_handle_list() if s_level == 0: val = family_handles[0] if family_handles else None myself = self.add_person_box( (x_level, s_level), person_handle, val, father) marr = None spouse = None if s_level == 1: tmp_bold = self.bold_now self.bold_now = 0 for family_handle in family_handles: if family_handle not in self.families_seen: self.families_seen.add(family_handle) family = self.database.get_family_from_handle(family_handle) #Marriage box if the option is there. if self.inlc_marr and self.max_spouses > 0: marr = self.add_marriage_box((x_level, s_level+1), person_handle, family_handle, father if s_level else myself) spouse_handle = ReportUtils.find_spouse(person, family) if self.max_spouses > s_level and \ spouse_handle not in self.families_seen: def _spouse_box(who): return self.add_person_box((x_level, s_level+1), spouse_handle, family_handle, who) if s_level > 0: spouse = _spouse_box(father) elif self.inlc_marr: spouse = _spouse_box(marr) else: spouse = _spouse_box(myself) mykids = [kid.ref for kid in family.get_child_ref_list()] def _child_recurse(who): self.recurse(child_ref, x_level+1, 0, who) for child_ref in mykids: if self.inlc_marr and self.max_spouses > 0: _child_recurse(marr) elif spouse: _child_recurse(spouse) else: _child_recurse(myself) if self.max_spouses > s_level and \ spouse_handle not in self.families_seen: #spouse_handle = ReportUtils.find_spouse(person,family) self.recurse(spouse_handle, x_level, s_level+1, spouse) if s_level == 1: self.bold_now = tmp_bold def add_family(self, level, family, father2): """ Adds a family into the canvas. only will be used for my direct grandparents, and my parents only. """ family_h = family.get_handle() father_h = family.get_father_handle() mother_h = family.get_mother_handle() self.bold_now = 2 if father_h: father_b = self.add_person_box( (level, 0), father_h, family_h, father2) else: father_b = self.add_person_box( (level, 0), None, None, father2) retrn = [father_b] if self.inlc_marr: family_b = self.add_marriage_box( (level, 1), father_h, family_h, father_b) retrn.append(family_b) self.families_seen.add(family_h) if mother_h: mother_b = self.add_person_box( (level, 0), mother_h, family_h, father_b) else: mother_b = self.add_person_box( (level, 0), None, None, father_b) retrn.append(mother_b) family_line = family_b if self.inlc_marr else father_b for child_ref in family.get_child_ref_list(): self.recurse(child_ref.ref, level+1, 0, family_line) self.bold_now = 0 #Set up the lines for the family if not family_line.line_to: #no children. family_line.line_to = LineBase(family_line) if self.inlc_marr: family_line.line_to.start.append(father_b) family_line.line_to.start.append(mother_b) return retrn def has_children(self, person_handle): """ Quickly check to see if this person has children still we want to respect the FamiliesSeen list """ if not person_handle or person_handle in self.families_seen: return False person = self.database.get_person_from_handle(person_handle) for family_handle in person.get_family_handle_list(): if family_handle not in self.families_seen: family = self.database.get_family_from_handle(family_handle) if family.get_child_ref_list(): return True return False def recurse_if(self, person_handle, level): """ Quickly check to see if we want to continue recursion still we want to respect the FamiliesSeen list """ person = self.database.get_person_from_handle(person_handle) show = False myfams = person.get_family_handle_list() if len(myfams) > 1: #and self.max_spouses > 0 show = True if not self.inlc_marr: #if the condition is true, we only want to show #this parent again IF s/he has other children show = self.has_children(person_handle) #if self.max_spouses == 0 and not self.has_children(person_handle): # self.families_seen.add(person_handle) # show = False if show: self.bold_now = 1 self.recurse(person_handle, level, 0, None) #------------------------------------------------------------------------ # # Class MakePersonTree (Personal Descendant Tree option) # #------------------------------------------------------------------------ class MakePersonTree(RecurseDown): """ The main procedure to use recursion to make the tree based off of a person. order of people inserted into Persons is important. makes sure that order is done correctly. """ def __init__(self, dbase, canvas): RecurseDown.__init__(self, dbase, canvas) self.max_generations -= 1 def start(self, person_id): """follow the steps to make a tree off of a person""" persons = [] center1 = self.database.get_person_from_gramps_id(person_id) if center1 is None: raise ReportError(_("Person %s is not in the Database") % person_id) center1_h = center1.get_handle() #could be mom too. family2 = family2_h = None if self.do_parents: family2_h = center1.get_main_parents_family_handle() family2 = self.database.get_family_from_handle(family2_h) mother2_h = father2_h = None if family2: father2_h = family2.get_father_handle() mother2_h = family2.get_mother_handle() ####################### #don't do center person's parents family. if family2_h: self.families_seen.add(family2_h) ####################### #Center person's Fathers OTHER wives ####################### #update to only run if he HAD other wives! if father2_h: self.recurse_if(father2_h, 0) ####################### #Center persons parents only! ####################### #now it will ONLY be my fathers parents if family2: self.add_family( 0, family2, None ) else: self.bold_now = 2 self.recurse(center1_h, 0, 0, None) self.bold_now = 0 ####################### #Center person's mothers OTHER husbands ####################### #update to only run if she HAD other husbands! if mother2_h: self.recurse_if(mother2_h, 0) return persons #------------------------------------------------------------------------ # # Class MakeFamilyTree (Familial Descendant Tree option) # #------------------------------------------------------------------------ class MakeFamilyTree(RecurseDown): """ The main procedure to use recursion to make the tree based off of a family. order of people inserted into Persons is important. makes sure that order is done correctly. """ def __init__(self, dbase, canvas): RecurseDown.__init__(self, dbase, canvas) def start(self, family_id): """follow the steps to make a tree off of a family""" ## (my) referes to the children of family_id # Step 1 print out my fathers, fathers, # other wives families first (if needed) family1 = self.database.get_family_from_gramps_id(family_id) if family1 is None: raise ReportError(_("Family %s is not in the Database") % family_id) family1_h = family1.get_handle() ####################### #Initial setup of variables ####################### father1_h = family1.get_father_handle() mother1_h = family1.get_mother_handle() father1 = mother1 = family2 = family2_h = None if father1_h: father1 = self.database.get_person_from_handle(father1_h) if self.do_parents: #b3 - remove grandparents? family2_h = father1.get_main_parents_family_handle() family2 = self.database.get_family_from_handle(family2_h) if mother1_h: mother1 = self.database.get_person_from_handle(mother1_h) mother2_h = father2_h = None if family2: #family2 = fathers parents mother2_h = family2.get_mother_handle() mother2 = self.database.get_person_from_handle(mother2_h) father2_h = family2.get_father_handle() father2 = self.database.get_person_from_handle(father2_h) #Helper variables. Assigned in one section, used in another. father2_id = family2_id = None mother1_id = None ####################### #don't do my fathers parents family. will be done later if family2_h: self.families_seen.add(family2_h) ####################### #my father mothers OTHER husbands ####################### #update to only run if she HAD other husbands! if mother2_h: self.recurse_if(mother2_h, 0) ####################### #father Fathers OTHER wives ####################### #update to only run if he HAD other wives! if father2_h: self.recurse_if(father2_h, 0) ####################### #don't do my parents family in recurse. will be done later self.families_seen.add(family1_h) ##If dad has no other children from other marriages. remove him if self.max_spouses == 0 and not self.has_children(father1_h): self.families_seen.add(father1_h) ####################### #my fathers parents! ####################### #now it will ONLY be my fathers parents #will print dads parents. dad's other wifes will also print if family2: myfams = father1.get_family_handle_list() show = False if len(myfams) > 1: show = True if not self.inlc_marr and self.max_spouses == 0: #if the condition is true, we only want to show #this parent again IF s/he has children show = self.has_children(father1_h) if not show: self.families_seen.add(father1_h) family2_l = self.add_family( 0, family2, None ) elif father1: ####################### #my father other wives (if all of the above does nothing) #if my father does not have parents (he is the highest) ####################### #do his OTHER wives first. self.recurse_if(father1_h, 1) ####################### #my father, marriage info, mother, siblings, me ####################### if family2: #We need to add dad to the family family2_line = family2_l[1] if self.inlc_marr else family2_l[0] else: family2_line = None family1_l = self.add_family(1, family1, family2_line) mother1_b = family1_l[-1] #Mom's Box #make sure there is at least one child in this family. #if not put in a placeholder family1_line = family1_l[1] if self.inlc_marr else family1_l[0] if family1_line.line_to.end == []: box = PlaceHolderBox((mother1_b.level[0]+1, 0)) box.father = family1_l[0] self.add_to_col(box) family1_line.line_to.end = [box] ####################### ####################### #Lower half #This will be quite like the first half. #Just on the mothers side... #Mom has already been printed with the family ####################### ####################### ####################### #Initial setup of variables ####################### mother1_h = family1.get_mother_handle() family2_h = mother1 = family2 = None if mother1_h: mother1 = self.database.get_person_from_handle(mother1_h) if self.do_parents: #b3 - remove grandparents? family2_h = mother1.get_main_parents_family_handle() family2 = self.database.get_family_from_handle(family2_h) mother2_h = father2_h = None if family2: mother2_h = family2.get_mother_handle() mother2 = self.database.get_person_from_handle(mother2_h) father2_h = family2.get_father_handle() father2 = self.database.get_person_from_handle(father2_h) ####################### #don't do my parents family. self.families_seen = set([family1_h] ) ##If mom has no other children from other marriages. remove her if self.max_spouses == 0 and not self.has_children(mother1_h): self.families_seen.add(mother1_h) if mother1_h: myfams = mother1.get_family_handle_list() if len(myfams) < 2: #If mom didn't have any other families, don't even do her #she is already here with dad and will be added later self.families_seen.add(mother1_h) ####################### #my mother other spouses (if no parents) ####################### #if my mother does not have parents (she is the highest) #Then do her OTHER spouses. if not family2 and mother1: self.recurse_if(mother1_h, 1) ####################### #my mothers parents! ####################### if family2: family2_l = self.add_family( 0, family2, None ) family2_line = family2_l[1] if self.inlc_marr else family2_l[0] family2_line = family2_line.line_to if family2_line.end != []: family2_line.end.insert(0, mother1_b) else: family2_line.end = [mother1_b] #fix me. Moms other siblings have been given an extra space #Because Moms-father is not siblings-father right now. mother1_b.father = family2_line ####################### #my mother mothers OTHER husbands ####################### #update to only run if she HAD other husbands! if mother2_h: self.recurse_if(mother2_h, 0) ####################### #mother Fathers OTHER wives ####################### #update to only run if he HAD other wives! if father2_h: self.recurse_if(father2_h, 0) #------------------------------------------------------------------------ # # Class MakeReport # #------------------------------------------------------------------------ class MakeReport(object): """ Make a report out of a list of people. The list of people is already made. Use this information to find where people will be placed on the canvas. """ def __init__(self, dbase, canvas, ind_spouse, compress_tree): self.database = dbase self.canvas = canvas gui = GuiConnect() self.do_parents = gui.get_val('show_parents') self.inlc_marr = gui.get_val("inc_marr") self.max_spouses = gui.get_val('maxspouse') gui = None self.ind_spouse = ind_spouse self.compress_tree = compress_tree self.cols = [[]] #self.max_generations = 0 #already done in recurse, #Some of this code needs to be moved up to RecurseDown.add_to_col() def calc_box(self, box): """ calculate the max_box_width and max_box_height for the report """ width = box.x_cm + box.width if width > self.canvas.report_opts.max_box_width: self.canvas.report_opts.max_box_width = width if box.height > self.canvas.report_opts.max_box_height: self.canvas.report_opts.max_box_height = box.height while len(self.cols) <= box.level[0]: self.cols.append([]) self.cols[box.level[0]].append(box) #tmp = box.level[0] #if tmp > self.max_generations: # self.max_generations = tmp def __move_col_from_here_down(self, box, amount): """Move me and everyone below me in this column only down""" while box: box.y_cm += amount box = box.next def __move_next_cols_from_here_down(self, box, amount): """Move me, everyone below me in this column, and all of our children (and childrens children) down.""" col = [box] while col: if len(col) == 1 and col[0].line_to: col.append(col[0].line_to.end[0]) col[0].y_cm += amount col[0] = col[0].next if col[0] is None: col.pop(0) def __next_family_group(self, box): """ a helper function. Assume box is at the start of a family block. get this family block. """ while box: left_group = [] line = None #Form the parental (left) group. #am I a direct descendant? if box.level[1] == 0: #I am the father/mother. left_group.append(box) if box.line_to: line = box.line_to box = box.next if box and box.level[1] != 0 and self.inlc_marr: #add/start with the marriage box left_group.append(box) if box.line_to: line = box.line_to box = box.next if box and box.level[1] != 0 and self.max_spouses > 0: #add/start with the spousal box left_group.append(box) if box.line_to: line = box.line_to box = box.next if line: if len(line.start) > 1 and line.start[-1].level[1] == 0: #a dad and mom family from RecurseDown.add_family. add mom left_group.append(line.start[-1]) box = box.next #we now have everyone we want return left_group, line.end #else # no children, so no family. go again until we find one to return. return None, None def __reverse_family_group(self): """ go through the n-1 to 0 cols of boxes looking for families (parents with children) that may need to be moved. """ for x_col in range(len(self.cols)-1, -1, -1): box = self.cols[x_col][0] #The first person in this col while box: left_group, right_group = self.__next_family_group(box) if not left_group: box = None #we found the end of this col else: yield left_group, right_group box = left_group[-1].next def __calc_movements(self, left_group, right_group): """ for a family group, see if parents or children need to be moved down so everyone is the the right/left of each other. return a right y_cm and a left y_cm. these points will be used to move parents/children down. """ left_up = left_group[0].y_cm right_up = right_group[0].y_cm left_center = left_up right_center = right_up if self.compress_tree: #calculate a new left and right move points for left_line in left_group: if left_line.line_to: break left_center = left_line.y_cm + (left_line.height /2) left_down = left_group[-1].y_cm + left_group[-1].height right_down = right_group[-1].y_cm + right_group[-1].height #Lazy. Move down either side only as much as we NEED to. if left_center < right_up: right_center = right_group[0].y_cm elif left_up == right_up: left_center = left_up #Lets keep it. top line. elif left_center > right_down: right_center = right_down else: right_center = left_center return right_center, left_center def Make_report(self): """ Everyone on the page is as far up as they can go. Move them down to where they belong. We are going to go through everyone from right to left top to bottom moving everyone down as needed to make the report. """ seen_parents = False for left_group, right_group in self.__reverse_family_group(): right_y_cm, left_y_cm = self.__calc_movements(left_group, right_group) #1. Are my children too high? if so move then down! if right_y_cm < left_y_cm: #we have to push our kids (and their kids) down. #We also need to push down all the kids (under) #these kids (in their column) amt = (left_y_cm - right_y_cm) self.__move_next_cols_from_here_down(right_group[0], amt) #2. Am I (and spouses) too high? if so move us down! elif left_y_cm < right_y_cm: #Ok, I am too high. Move me down amt = (right_y_cm - left_y_cm) self.__move_col_from_here_down(left_group[0], amt) #6. now check to see if we are working with dad and mom. #if so we need to move down marriage information #and mom! left_line = left_group[0].line_to if not left_line: left_line = left_group[1].line_to #left_line = left_line.start if len(left_line.start) > 1 and not seen_parents: #only do Dad and Mom. len(left_line) > 1 seen_parents = True mom_cm = left_group[-1].y_cm + left_group[-1].height/2 last_child_cm = right_group[-1].y_cm if not self.compress_tree: last_child_cm += right_group[-1].height/2 move_amt = last_child_cm - mom_cm #if the moms height is less than the last childs height #The 0.2 is to see if this is even worth it. if move_amt > 0.2: #our children take up more space than us parents. #so space mom out! self.__move_col_from_here_down(left_group[-1], move_amt) #move marriage info if self.inlc_marr: left_group[1].y_cm += move_amt/2 if left_line.end[0].boxstr == 'None': left_line.end = [] def start(self): """Make the report""" #for person in self.persons.depth_first_gen(): for box in self.canvas.boxes: self.calc_box(box) #At this point we know everything we need to make the report. #Width of each column of people - self.rept_opt.box_width #width of each column (or row) of lines - self.rept_opt.col_width if not self.cols[0]: #We wanted to print parents of starting person/family but #there were none! #remove column 0 and move everyone back one level self.cols.pop(0) for box in self.canvas.boxes: box.level = (box.level[0] - 1, box.level[1]) #go ahead and set it now. width = self.canvas.report_opts.max_box_width for box in self.canvas.boxes: box.width = width - box.x_cm box.x_cm += self.canvas.report_opts.littleoffset box.x_cm += (box.level[0] * (self.canvas.report_opts.col_width + self.canvas.report_opts.max_box_width)) box.y_cm += self.canvas.report_opts.littleoffset box.y_cm += self.canvas.title.height self.Make_report() class GuiConnect(): """ This is a BORG object. There is ONLY one. This give some common routines that EVERYONE can use like get the value from a GUI variable """ __shared_state = {} def __init__(self): #We are BORG! self.__dict__ = self.__shared_state def set__opts(self, options, which): self._opts = options self._which_report = which.split(",")[0] def get_val(self, val): """ Get a GUI value. """ value = self._opts.get_option_by_name(val) if value: return value.get_value() else: False def Title_class(self, database, doc): Title_type = self.get_val('report_title') if Title_type == 0: #None return TitleNone(database, doc) if Title_type == 1: #Descendant Chart if self._which_report == _RPT_NAME: if self.get_val('show_parents'): return TitleDPY(database, doc) else: return TitleDPN(database, doc) else: if self.get_val('show_parents'): return TitleDFY(database, doc) else: return TitleDFN(database, doc) if Title_type == 2: return TitleF(database, doc) else: #Title_type == 3 return TitleC(database, doc) def Make_Tree(self, database, canvas): if self._which_report == _RPT_NAME: return MakePersonTree(database, canvas) else: return MakeFamilyTree(database, canvas) def calc_lines(self, database): #calculate the printed lines for each box display_repl = self.get_val("replace_list") #str = "" #if self.get_val('miss_val'): # str = "_____" return CalcLines(database, display_repl) def working_lines(self, box): display = self.get_val("descend_disp") #if self.get_val('diffspouse'): display_spou = self.get_val("spouse_disp") #else: # display_spou = display display_marr = [self.get_val("marr_disp")] if box.boxstr == "CG2-fam-box": #((((( workinglines = display_marr elif box.level[1] > 0 or (box.level[0] == 0 and box.father): workinglines = display_spou else: workinglines = display return workinglines #------------------------------------------------------------------------ # # DescendTree # #------------------------------------------------------------------------ class DescendTree(Report): def __init__(self, database, options, user): """ Create DescendTree object that produces the report. The arguments are: database - the GRAMPS database instance options - instance of the Options class for this report user - a gen.user.User() instance """ Report.__init__(self, database, options, user) self.options = options self.database = database """ make the report in its full size and pages to print on scale one or both as needed/desired. """ database = self.database self.Connect = GuiConnect() self.Connect.set__opts(self.options.menu, self.options.name) style_sheet = self.doc.get_style_sheet() font_normal = style_sheet.get_paragraph_style("CG2-Normal").get_font() #The canvas that we will put our report on and print off of self.canvas = Canvas(self.doc, ReportOptions(self.doc, font_normal, "CG2-line")) self.canvas.report_opts.box_shadow *= \ self.Connect.get_val('shadowscale') self.canvas.report_opts.box_pgap *= self.Connect.get_val('box_Yscale') self.canvas.report_opts.box_mgap *= self.Connect.get_val('box_Yscale') center_id = self.Connect.get_val('pid') #make the tree tree = self.Connect.Make_Tree(database, self.canvas) tree.start(center_id) tree = None #Title title = self.Connect.Title_class(database, self.doc) title.calc_title(center_id) self.canvas.add_title(title) #make the report as big as it wants to be. ind_spouse = self.Connect.get_val("ind_spouse") compress_tree = self.Connect.get_val('compress_tree') report = MakeReport(database, self.canvas, ind_spouse, compress_tree) report.start() report = None #note? if self.Connect.get_val("inc_note"): note_box = NoteBox(self.doc, "CG2-note-box", self.Connect.get_val("note_place")) subst = SubstKeywords(self.database, None, None) note_box.text = subst.replace_and_clean( self.Connect.get_val('note_disp')) self.canvas.add_note(note_box) #Now we have the report in its full size. #Do we want to scale the report? one_page = self.Connect.get_val("resize_page") scale_report = self.Connect.get_val("scale_tree") scale = self.canvas.scale_report(one_page, scale_report != 0, scale_report == 2) if scale != 1 or self.Connect.get_val('shadowscale') != 1.0: self.scale_styles(scale) def write_report(self): """ Canvas now has everyone ready to print. Get some misc stuff together and print. """ one_page = self.Connect.get_val("resize_page") scale_report = self.Connect.get_val("scale_tree") #Inlc_marr = self.Connect.get_val("inc_marr") inc_border = self.Connect.get_val('inc_border') incblank = self.Connect.get_val("inc_blank") prnnum = self.Connect.get_val("inc_pagenum") #ind_spouse = self.Connect.get_val("ind_spouse") lines = self.Connect.get_val('note_disp') ##################### #Setup page information colsperpage = self.doc.get_usable_width() colsperpage += self.canvas.report_opts.col_width tmp = self.canvas.report_opts.max_box_width tmp += self.canvas.report_opts.col_width colsperpage = int(colsperpage / tmp) colsperpage = colsperpage or 1 ##################### #Vars #p = self.doc.get_style_sheet().get_paragraph_style("CG2-Normal") #font = p.get_font() if prnnum: page_num_box = PageNumberBox(self.doc, 'CG2-box') ##################### #ok, everyone is now ready to print on the canvas. Paginate? self.canvas.sort_boxes_on_y_cm() self.canvas.paginate(colsperpage, one_page) ##################### #Yeah!!! #lets finally make some pages!!! ##################### for page in self.canvas.page_iter_gen(incblank): self.doc.start_page() #do we need to print a border? if inc_border: page.draw_border('CG2-line') #Do we need to print the page number? if prnnum: page_num_box.display(page) page.display() self.doc.end_page() def scale_styles(self, amount): """ Scale the styles for this report. This must be done in the constructor. """ style_sheet = self.doc.get_style_sheet() graph_style = style_sheet.get_draw_style("CG2-fam-box") graph_style.set_shadow(graph_style.get_shadow(), 0) graph_style.set_line_width(graph_style.get_line_width() * amount) style_sheet.add_draw_style("CG2-fam-box", graph_style) graph_style = style_sheet.get_draw_style("CG2-box") graph_style.set_shadow(graph_style.get_shadow(), self.canvas.report_opts.box_shadow * amount) graph_style.set_line_width(graph_style.get_line_width() * amount) style_sheet.add_draw_style("CG2-box", graph_style) graph_style = style_sheet.get_draw_style("CG2b-box") graph_style.set_shadow(graph_style.get_shadow(), self.canvas.report_opts.box_shadow * amount) graph_style.set_line_width(graph_style.get_line_width() * amount) style_sheet.add_draw_style("CG2b-box", graph_style) graph_style = style_sheet.get_draw_style("CG2-note-box") graph_style.set_shadow(graph_style.get_shadow(), 0) graph_style.set_line_width(graph_style.get_line_width() * amount) style_sheet.add_draw_style("CG2-note-box", graph_style) para_style = style_sheet.get_paragraph_style("CG2-Title") font = para_style.get_font() font.set_size(font.get_size() * amount) para_style.set_font(font) style_sheet.add_paragraph_style("CG2-Title", para_style) para_style = style_sheet.get_paragraph_style("CG2-Normal") font = para_style.get_font() font.set_size(font.get_size() * amount) para_style.set_font(font) style_sheet.add_paragraph_style("CG2-Normal", para_style) para_style = style_sheet.get_paragraph_style("CG2-Bold") font = para_style.get_font() font.set_bold(True) font.set_size(font.get_size() * amount) para_style.set_font(font) style_sheet.add_paragraph_style("CG2-Bold", para_style) para_style = style_sheet.get_paragraph_style("CG2-Note") font = para_style.get_font() font.set_size(font.get_size() * amount) para_style.set_font(font) style_sheet.add_paragraph_style("CG2-Note", para_style) self.doc.set_style_sheet(style_sheet) #------------------------------------------------------------------------ # # DescendTreeOptions # #------------------------------------------------------------------------ class DescendTreeOptions(MenuReportOptions): """ Defines options and provides handling interface. """ def __init__(self, name, dbase): self.__pid = None self.__onepage = None self.__inc_title = None self.__title = None self.__blank = None self.scale = None self.__db = dbase self.name = name self.box_Y_sf = None self.box_shadow_sf = None MenuReportOptions.__init__(self, name, dbase) def add_menu_options(self, menu): """ Add options to the menu for the descendant report. """ ################## category_name = _("Tree Options") if self.name.split(",")[0] == _RPT_NAME: self.__pid = PersonOption(_("Report for")) self.__pid.set_help(_("The main person for the report")) menu.add_option(category_name, "pid", self.__pid) else: #if self.name == "familial_descend_tree": self.__pid = FamilyOption(_("Report for")) self.__pid.set_help(_("The main family for the report")) menu.add_option(category_name, "pid", self.__pid) self.showparents = BooleanOption( _('Start with the parent(s) of the selected first'), False) self.showparents.set_help( _("Will show the parents, brother and sisters of the " "selected person.") ) menu.add_option(category_name, "show_parents", self.showparents) max_gen = NumberOption(_("Generations"), 10, 1, 50) max_gen.set_help(_("The number of generations to include in the tree")) menu.add_option(category_name, "maxgen", max_gen) max_spouse = NumberOption(_("Level of Spouses"), 1, 0, 10) max_spouse.set_help(_("0=no Spouses, 1=include Spouses, 2=include " "Spouses of the spouse, etc")) menu.add_option(category_name, "maxspouse", max_spouse) compresst = BooleanOption(_('Co_mpress tree'), False) compresst.set_help(_("Whether to move people up, where possible, " "resulting in a smaller tree")) menu.add_option(category_name, "compress_tree", compresst) ################## category_name = _("Display") disp = TextOption(_("Descendant\nDisplay Format"), ["$n", "%s $b" %_BORN, "{%s $d}" %_DIED]) disp.set_help(_("Display format for a descendant.")) menu.add_option(category_name, "descend_disp", disp) bold = BooleanOption(_('Bold direct descendants'), True) bold.set_help( _("Whether to bold those people that are direct " "(not step or half) descendants.") ) menu.add_option(category_name, "bolddirect", bold) #bug 4767 #diffspouse = BooleanOption( # _("Use separate display format for spouses"), # True) #diffspouse.set_help(_("Whether spouses can have a different format.")) #menu.add_option(category_name, "diffspouse", diffspouse) indspouce = BooleanOption(_('Indent Spouses'), True) indspouce.set_help(_("Whether to indent the spouses in the tree.")) menu.add_option(category_name, "ind_spouse", indspouce) sdisp = TextOption(_("Spousal\nDisplay Format"), ["$n", "%s $b" %_BORN, "{%s $d}" %_DIED]) sdisp.set_help(_("Display format for a spouse.")) menu.add_option(category_name, "spouse_disp", sdisp) incmarr = BooleanOption(_('Include Marriage box'), True) incmarr.set_help( _("Whether to include a separate marital box in the report")) menu.add_option(category_name, "inc_marr", incmarr) marrdisp = StringOption(_("Marriage\nDisplay Format"), "%s $m" % _MARR) marrdisp.set_help(_("Display format for the marital box.")) menu.add_option(category_name, "marr_disp", marrdisp) ################## category_name = _("Replace") repldisp = TextOption( _("Replace Display Format:\n'Replace this'/' with this'"), []) repldisp.set_help(_("i.e.\nUnited States of America/U.S.A")) menu.add_option(category_name, "replace_list", repldisp) ################## category_name = _("Size") self.scale = EnumeratedListOption(_("Scale tree to fit"), 0) self.scale.add_item( 0, _("Do not scale tree")) self.scale.add_item( 1, _("Scale tree to fit page width only")) self.scale.add_item( 2, _("Scale tree to fit the size of the page")) self.scale.set_help( _("Whether to scale the tree to fit a specific paper size") ) menu.add_option(category_name, "scale_tree", self.scale) self.scale.connect('value-changed', self.__check_blank) if "BKI" not in self.name.split(","): self.__onepage = BooleanOption(_("Resize Page to Fit Tree size\n" "\n" "Note: Overrides options in the 'Paper Option' tab" ), False) self.__onepage.set_help( _("Whether to resize the page to fit the size \n" "of the tree. Note: the page will have a \n" "non standard size.\n" "\n" "With this option selected, the following will happen:\n" "\n" "With the 'Do not scale tree' option the page\n" " is resized to the height/width of the tree\n" "\n" "With 'Scale tree to fit page width only' the height of\n" " the page is resized to the height of the tree\n" "\n" "With 'Scale tree to fit the size of the page' the page\n" " is resized to remove any gap in either height or width" )) menu.add_option(category_name, "resize_page", self.__onepage) self.__onepage.connect('value-changed', self.__check_blank) else: self.__onepage = None self.box_Y_sf = NumberOption(_("inter-box Y scale factor"), 1.00, 0.10, 2.00, 0.01) self.box_Y_sf.set_help(_("Make the inter-box Y bigger or smaller")) menu.add_option(category_name, "box_Yscale", self.box_Y_sf) self.box_shadow_sf = NumberOption(_("box shadow scale factor"), 1.00, 0.00, 2.00, 0.01) # down to 0 self.box_shadow_sf.set_help(_("Make the box shadow bigger or smaller")) menu.add_option(category_name, "shadowscale", self.box_shadow_sf) ################## category_name = _("Include") self.title = EnumeratedListOption(_("Report Title"), 0) self.title.add_item( 0, _("Do not include a title")) self.title.add_item( 1, _("Descendant Chart for [selected person(s)]")) self.title.set_help(_("Choose a title for the report")) menu.add_option(category_name, "report_title", self.title) self.showparents.connect('value-changed', self.__Title_enum) border = BooleanOption(_('Include a border'), False) border.set_help(_("Whether to make a border around the report.")) menu.add_option(category_name, "inc_border", border) prnnum = BooleanOption(_('Include Page Numbers'), False) prnnum.set_help(_("Whether to include page numbers on each page.")) menu.add_option(category_name, "inc_pagenum", prnnum) self.__blank = BooleanOption(_('Include Blank Pages'), True) self.__blank.set_help(_("Whether to include pages that are blank.")) menu.add_option(category_name, "inc_blank", self.__blank) #category_name = _("Notes") self.usenote = BooleanOption(_('Include a note'), False) self.usenote.set_help( _("Whether to include a note on the report.") ) menu.add_option(category_name, "inc_note", self.usenote) self.notedisp = TextOption(_("Note"),[]) self.notedisp.set_help(_("Add a note" "\n\n$T inserts today's date")) menu.add_option(category_name, "note_disp", self.notedisp) locals = NoteType(0) notelocal = EnumeratedListOption(_("Note Location"), 2) for num, text in locals.note_locals(): notelocal.add_item( num, text ) notelocal.set_help(_("Where to place the note.")) menu.add_option(category_name, "note_place", notelocal) def __check_blank(self): """dis/enables the 'print blank pages' checkbox""" if self.__onepage: value = not self.__onepage.get_value() else: value = True off = value and (self.scale.get_value() != 2) self.__blank.set_available( off ) def __Title_enum(self): item_list = [ [0, _("Do not include a title") ], [1, _("Descendant Chart for [selected person(s)]") ], ] if self.name.split(",")[0] != _RPT_NAME: item_list.append( [2, _("Family Chart for [names of chosen family]") ] ) if self.showparents.get_value(): item_list.append( [3, _("Cousin Chart for [names of children]") ] ) self.title.set_items(item_list) def make_default_style(self, default_style): """Make the default output style for the Descendant Tree.""" from gramps.gen.plug.docgen import (FontStyle, ParagraphStyle, GraphicsStyle, FONT_SANS_SERIF, PARA_ALIGN_CENTER) ## Paragraph Styles: font = FontStyle() font.set_size(16) font.set_type_face(FONT_SANS_SERIF) para_style = ParagraphStyle() para_style.set_font(font) para_style.set_alignment(PARA_ALIGN_CENTER) para_style.set_description( _("The basic style used for the title display.") ) default_style.add_paragraph_style("CG2-Title", para_style) font = FontStyle() font.set_size(9) font.set_type_face(FONT_SANS_SERIF) para_style = ParagraphStyle() para_style.set_font(font) para_style.set_description( _('The basic style used for the text display.') ) default_style.add_paragraph_style("CG2-Normal", para_style) #Set the size of the shadow based on the font size! Much better #will be set later too. box_shadow = PT2CM(font.get_size()) * .6 font.set_bold(True) para_style = ParagraphStyle() para_style.set_font(font) para_style.set_description( _('The bold style used for the text display.') ) default_style.add_paragraph_style("CG2-Bold", para_style) font = FontStyle() font.set_size(9) font.set_type_face(FONT_SANS_SERIF) para_style = ParagraphStyle() para_style.set_font(font) para_style.set_description( _('The basic style used for the note display.') ) default_style.add_paragraph_style("CG2-Note", para_style) graph_style = GraphicsStyle() graph_style.set_paragraph_style("CG2-Title") graph_style.set_color((0, 0, 0)) graph_style.set_fill_color((255, 255, 255)) graph_style.set_line_width(0) default_style.add_draw_style("CG2-Title", graph_style) ## Draw styles graph_style = GraphicsStyle() graph_style.set_paragraph_style("CG2-Normal") graph_style.set_fill_color((255, 255, 255)) default_style.add_draw_style("CG2-fam-box", graph_style) graph_style = GraphicsStyle() graph_style.set_paragraph_style("CG2-Normal") graph_style.set_shadow(1, box_shadow) graph_style.set_fill_color((255, 255, 255)) default_style.add_draw_style("CG2-box", graph_style) graph_style = GraphicsStyle() graph_style.set_paragraph_style("CG2-Bold") graph_style.set_shadow(1, box_shadow) graph_style.set_fill_color((255, 255, 255)) default_style.add_draw_style("CG2b-box", graph_style) graph_style = GraphicsStyle() graph_style.set_paragraph_style("CG2-Note") graph_style.set_fill_color((255, 255, 255)) default_style.add_draw_style("CG2-note-box", graph_style) graph_style = GraphicsStyle() default_style.add_draw_style("CG2-line", graph_style) #===================================== #So do not fear, for I am with you; do not be dismayed, #for I am your God. I will strengthen you and help you; #I will uphold you with my righteous right hand. #Isaiah 41:10
gpl-2.0
LubyRuffy/spiderfoot
ext/stem/descriptor/server_descriptor.py
11
30404
# Copyright 2012-2015, Damian Johnson and The Tor Project # See LICENSE for licensing information """ Parsing for Tor server descriptors, which contains the infrequently changing information about a Tor relay (contact information, exit policy, public keys, etc). This information is provided from a few sources... * The control port via 'GETINFO desc/\*' queries. * The 'cached-descriptors' file in Tor's data directory. * Archived descriptors provided by CollecTor (https://collector.torproject.org/). * Directory authorities and mirrors via their DirPort. **Module Overview:** :: ServerDescriptor - Tor server descriptor. |- RelayDescriptor - Server descriptor for a relay. | |- BridgeDescriptor - Scrubbed server descriptor for a bridge. | |- is_scrubbed - checks if our content has been properly scrubbed | +- get_scrubbing_issues - description of issues with our scrubbing | |- digest - calculates the upper-case hex digest value for our content |- get_annotations - dictionary of content prior to the descriptor entry +- get_annotation_lines - lines that provided the annotations """ import functools import hashlib import re import stem.descriptor.extrainfo_descriptor import stem.exit_policy import stem.prereq import stem.util.connection import stem.util.str_tools import stem.util.tor_tools import stem.version from stem import str_type from stem.descriptor import ( PGP_BLOCK_END, Descriptor, _get_descriptor_components, _read_until_keywords, _bytes_for_block, _value, _values, _parse_simple_line, _parse_bytes_line, _parse_timestamp_line, _parse_forty_character_hex, _parse_key_block, ) try: # added in python 3.2 from functools import lru_cache except ImportError: from stem.util.lru_cache import lru_cache # relay descriptors must have exactly one of the following REQUIRED_FIELDS = ( 'router', 'bandwidth', 'published', 'onion-key', 'signing-key', 'router-signature', ) # optional entries that can appear at most once SINGLE_FIELDS = ( 'platform', 'fingerprint', 'hibernating', 'uptime', 'contact', 'read-history', 'write-history', 'eventdns', 'family', 'caches-extra-info', 'extra-info-digest', 'hidden-service-dir', 'protocols', 'allow-single-hop-exits', 'ntor-onion-key', ) DEFAULT_IPV6_EXIT_POLICY = stem.exit_policy.MicroExitPolicy('reject 1-65535') REJECT_ALL_POLICY = stem.exit_policy.ExitPolicy('reject *:*') def _parse_file(descriptor_file, is_bridge = False, validate = False, **kwargs): """ Iterates over the server descriptors in a file. :param file descriptor_file: file with descriptor content :param bool is_bridge: parses the file as being a bridge descriptor :param bool validate: checks the validity of the descriptor's content if **True**, skips these checks otherwise :param dict kwargs: additional arguments for the descriptor constructor :returns: iterator for ServerDescriptor instances in the file :raises: * **ValueError** if the contents is malformed and validate is True * **IOError** if the file can't be read """ # Handler for relay descriptors # # Cached descriptors consist of annotations followed by the descriptor # itself. For instance... # # @downloaded-at 2012-03-14 16:31:05 # @source "145.53.65.130" # router caerSidi 71.35.143.157 9001 0 0 # platform Tor 0.2.1.30 on Linux x86_64 # <rest of the descriptor content> # router-signature # -----BEGIN SIGNATURE----- # <signature for the above descriptor> # -----END SIGNATURE----- # # Metrics descriptor files are the same, but lack any annotations. The # following simply does the following... # # - parse as annotations until we get to 'router' # - parse as descriptor content until we get to 'router-signature' followed # by the end of the signature block # - construct a descriptor and provide it back to the caller # # Any annotations after the last server descriptor is ignored (never provided # to the caller). while True: annotations = _read_until_keywords('router', descriptor_file) if not is_bridge: descriptor_content = _read_until_keywords('router-signature', descriptor_file) # we've reached the 'router-signature', now include the pgp style block block_end_prefix = PGP_BLOCK_END.split(' ', 1)[0] descriptor_content += _read_until_keywords(block_end_prefix, descriptor_file, True) else: descriptor_content = _read_until_keywords('router-digest', descriptor_file, True) if descriptor_content: if descriptor_content[0].startswith(b'@type'): descriptor_content = descriptor_content[1:] # strip newlines from annotations annotations = list(map(bytes.strip, annotations)) descriptor_text = bytes.join(b'', descriptor_content) if is_bridge: yield BridgeDescriptor(descriptor_text, validate, annotations, **kwargs) else: yield RelayDescriptor(descriptor_text, validate, annotations, **kwargs) else: if validate and annotations: orphaned_annotations = stem.util.str_tools._to_unicode(b'\n'.join(annotations)) raise ValueError('Content conform to being a server descriptor:\n%s' % orphaned_annotations) break # done parsing descriptors def _parse_router_line(descriptor, entries): # "router" nickname address ORPort SocksPort DirPort value = _value('router', entries) router_comp = value.split() if len(router_comp) < 5: raise ValueError('Router line must have five values: router %s' % value) elif not stem.util.tor_tools.is_valid_nickname(router_comp[0]): raise ValueError("Router line entry isn't a valid nickname: %s" % router_comp[0]) elif not stem.util.connection.is_valid_ipv4_address(router_comp[1]): raise ValueError("Router line entry isn't a valid IPv4 address: %s" % router_comp[1]) elif not stem.util.connection.is_valid_port(router_comp[2], allow_zero = True): raise ValueError("Router line's ORPort is invalid: %s" % router_comp[2]) elif not stem.util.connection.is_valid_port(router_comp[3], allow_zero = True): raise ValueError("Router line's SocksPort is invalid: %s" % router_comp[3]) elif not stem.util.connection.is_valid_port(router_comp[4], allow_zero = True): raise ValueError("Router line's DirPort is invalid: %s" % router_comp[4]) descriptor.nickname = router_comp[0] descriptor.address = router_comp[1] descriptor.or_port = int(router_comp[2]) descriptor.socks_port = None if router_comp[3] == '0' else int(router_comp[3]) descriptor.dir_port = None if router_comp[4] == '0' else int(router_comp[4]) def _parse_bandwidth_line(descriptor, entries): # "bandwidth" bandwidth-avg bandwidth-burst bandwidth-observed value = _value('bandwidth', entries) bandwidth_comp = value.split() if len(bandwidth_comp) < 3: raise ValueError('Bandwidth line must have three values: bandwidth %s' % value) elif not bandwidth_comp[0].isdigit(): raise ValueError("Bandwidth line's average rate isn't numeric: %s" % bandwidth_comp[0]) elif not bandwidth_comp[1].isdigit(): raise ValueError("Bandwidth line's burst rate isn't numeric: %s" % bandwidth_comp[1]) elif not bandwidth_comp[2].isdigit(): raise ValueError("Bandwidth line's observed rate isn't numeric: %s" % bandwidth_comp[2]) descriptor.average_bandwidth = int(bandwidth_comp[0]) descriptor.burst_bandwidth = int(bandwidth_comp[1]) descriptor.observed_bandwidth = int(bandwidth_comp[2]) def _parse_platform_line(descriptor, entries): # "platform" string _parse_bytes_line('platform', 'platform')(descriptor, entries) # The platform attribute was set earlier. This line can contain any # arbitrary data, but tor seems to report its version followed by the # os like the following... # # platform Tor 0.2.2.35 (git-73ff13ab3cc9570d) on Linux x86_64 # # There's no guarantee that we'll be able to pick these out the # version, but might as well try to save our caller the effort. value = _value('platform', entries) platform_match = re.match('^(?:node-)?Tor (\S*).* on (.*)$', value) if platform_match: version_str, descriptor.operating_system = platform_match.groups() try: descriptor.tor_version = stem.version._get_version(version_str) except ValueError: pass def _parse_fingerprint_line(descriptor, entries): # This is forty hex digits split into space separated groups of four. # Checking that we match this pattern. value = _value('fingerprint', entries) fingerprint = value.replace(' ', '') for grouping in value.split(' '): if len(grouping) != 4: raise ValueError('Fingerprint line should have groupings of four hex digits: %s' % value) if not stem.util.tor_tools.is_valid_fingerprint(fingerprint): raise ValueError('Tor relay fingerprints consist of forty hex digits: %s' % value) descriptor.fingerprint = fingerprint def _parse_hibernating_line(descriptor, entries): # "hibernating" 0|1 (in practice only set if one) value = _value('hibernating', entries) if value not in ('0', '1'): raise ValueError('Hibernating line had an invalid value, must be zero or one: %s' % value) descriptor.hibernating = value == '1' def _parse_hidden_service_dir_line(descriptor, entries): value = _value('hidden-service-dir', entries) if value: descriptor.hidden_service_dir = value.split(' ') else: descriptor.hidden_service_dir = ['2'] def _parse_uptime_line(descriptor, entries): # We need to be tolerant of negative uptimes to accommodate a past tor # bug... # # Changes in version 0.1.2.7-alpha - 2007-02-06 # - If our system clock jumps back in time, don't publish a negative # uptime in the descriptor. Also, don't let the global rate limiting # buckets go absurdly negative. # # After parsing all of the attributes we'll double check that negative # uptimes only occurred prior to this fix. value = _value('uptime', entries) try: descriptor.uptime = int(value) except ValueError: raise ValueError('Uptime line must have an integer value: %s' % value) def _parse_protocols_line(descriptor, entries): value = _value('protocols', entries) protocols_match = re.match('^Link (.*) Circuit (.*)$', value) if not protocols_match: raise ValueError('Protocols line did not match the expected pattern: protocols %s' % value) link_versions, circuit_versions = protocols_match.groups() descriptor.link_protocols = link_versions.split(' ') descriptor.circuit_protocols = circuit_versions.split(' ') def _parse_or_address_line(descriptor, entries): all_values = _values('or-address', entries) or_addresses = [] for entry in all_values: line = 'or-address %s' % entry if ':' not in entry: raise ValueError('or-address line missing a colon: %s' % line) address, port = entry.rsplit(':', 1) is_ipv6 = address.startswith('[') and address.endswith(']') if is_ipv6: address = address[1:-1] # remove brackets if not ((not is_ipv6 and stem.util.connection.is_valid_ipv4_address(address)) or (is_ipv6 and stem.util.connection.is_valid_ipv6_address(address))): raise ValueError('or-address line has a malformed address: %s' % line) if not stem.util.connection.is_valid_port(port): raise ValueError('or-address line has a malformed port: %s' % line) or_addresses.append((address, int(port), is_ipv6)) descriptor.or_addresses = or_addresses def _parse_history_line(keyword, history_end_attribute, history_interval_attribute, history_values_attribute, descriptor, entries): value = _value(keyword, entries) timestamp, interval, remainder = stem.descriptor.extrainfo_descriptor._parse_timestamp_and_interval(keyword, value) try: if remainder: history_values = [int(entry) for entry in remainder.split(',')] else: history_values = [] except ValueError: raise ValueError('%s line has non-numeric values: %s %s' % (keyword, keyword, value)) setattr(descriptor, history_end_attribute, timestamp) setattr(descriptor, history_interval_attribute, interval) setattr(descriptor, history_values_attribute, history_values) def _parse_exit_policy(descriptor, entries): if hasattr(descriptor, '_unparsed_exit_policy'): if descriptor._unparsed_exit_policy == [str_type('reject *:*')]: descriptor.exit_policy = REJECT_ALL_POLICY else: descriptor.exit_policy = stem.exit_policy.ExitPolicy(*descriptor._unparsed_exit_policy) del descriptor._unparsed_exit_policy _parse_contact_line = _parse_bytes_line('contact', 'contact') _parse_published_line = _parse_timestamp_line('published', 'published') _parse_extrainfo_digest_line = _parse_forty_character_hex('extra-info-digest', 'extra_info_digest') _parse_read_history_line = functools.partial(_parse_history_line, 'read-history', 'read_history_end', 'read_history_interval', 'read_history_values') _parse_write_history_line = functools.partial(_parse_history_line, 'write-history', 'write_history_end', 'write_history_interval', 'write_history_values') _parse_ipv6_policy_line = lambda descriptor, entries: setattr(descriptor, 'exit_policy_v6', stem.exit_policy.MicroExitPolicy(_value('ipv6-policy', entries))) _parse_allow_single_hop_exits_line = lambda descriptor, entries: setattr(descriptor, 'allow_single_hop_exits', 'allow_single_hop_exits' in entries) _parse_caches_extra_info_line = lambda descriptor, entries: setattr(descriptor, 'extra_info_cache', 'extra_info_cache' in entries) _parse_family_line = lambda descriptor, entries: setattr(descriptor, 'family', set(_value('family', entries).split(' '))) _parse_eventdns_line = lambda descriptor, entries: setattr(descriptor, 'eventdns', _value('eventdns', entries) == '1') _parse_onion_key_line = _parse_key_block('onion-key', 'onion_key', 'RSA PUBLIC KEY') _parse_signing_key_line = _parse_key_block('signing-key', 'signing_key', 'RSA PUBLIC KEY') _parse_router_signature_line = _parse_key_block('router-signature', 'signature', 'SIGNATURE') _parse_ntor_onion_key_line = _parse_simple_line('ntor-onion-key', 'ntor_onion_key') _parse_router_digest_line = _parse_forty_character_hex('router-digest', '_digest') class ServerDescriptor(Descriptor): """ Common parent for server descriptors. :var str nickname: **\*** relay's nickname :var str fingerprint: identity key fingerprint :var datetime published: **\*** time in UTC when this descriptor was made :var str address: **\*** IPv4 address of the relay :var int or_port: **\*** port used for relaying :var int socks_port: **\*** port used as client (deprecated, always **None**) :var int dir_port: **\*** port used for descriptor mirroring :var bytes platform: line with operating system and tor version :var stem.version.Version tor_version: version of tor :var str operating_system: operating system :var int uptime: uptime when published in seconds :var bytes contact: contact information :var stem.exit_policy.ExitPolicy exit_policy: **\*** stated exit policy :var stem.exit_policy.MicroExitPolicy exit_policy_v6: **\*** exit policy for IPv6 :var set family: **\*** nicknames or fingerprints of declared family :var int average_bandwidth: **\*** average rate it's willing to relay in bytes/s :var int burst_bandwidth: **\*** burst rate it's willing to relay in bytes/s :var int observed_bandwidth: **\*** estimated capacity based on usage in bytes/s :var list link_protocols: link protocols supported by the relay :var list circuit_protocols: circuit protocols supported by the relay :var bool hibernating: **\*** hibernating when published :var bool allow_single_hop_exits: **\*** flag if single hop exiting is allowed :var bool extra_info_cache: **\*** flag if a mirror for extra-info documents :var str extra_info_digest: upper-case hex encoded digest of our extra-info document :var bool eventdns: flag for evdns backend (deprecated, always unset) :var list or_addresses: **\*** alternative for our address/or_port attributes, each entry is a tuple of the form (address (**str**), port (**int**), is_ipv6 (**bool**)) Deprecated, moved to extra-info descriptor... :var datetime read_history_end: end of the sampling interval :var int read_history_interval: seconds per interval :var list read_history_values: bytes read during each interval :var datetime write_history_end: end of the sampling interval :var int write_history_interval: seconds per interval :var list write_history_values: bytes written during each interval **\*** attribute is either required when we're parsed with validation or has a default value, others are left as **None** if undefined """ ATTRIBUTES = { 'nickname': (None, _parse_router_line), 'fingerprint': (None, _parse_fingerprint_line), 'contact': (None, _parse_contact_line), 'published': (None, _parse_published_line), 'exit_policy': (None, _parse_exit_policy), 'address': (None, _parse_router_line), 'or_port': (None, _parse_router_line), 'socks_port': (None, _parse_router_line), 'dir_port': (None, _parse_router_line), 'platform': (None, _parse_platform_line), 'tor_version': (None, _parse_platform_line), 'operating_system': (None, _parse_platform_line), 'uptime': (None, _parse_uptime_line), 'exit_policy_v6': (DEFAULT_IPV6_EXIT_POLICY, _parse_ipv6_policy_line), 'family': (set(), _parse_family_line), 'average_bandwidth': (None, _parse_bandwidth_line), 'burst_bandwidth': (None, _parse_bandwidth_line), 'observed_bandwidth': (None, _parse_bandwidth_line), 'link_protocols': (None, _parse_protocols_line), 'circuit_protocols': (None, _parse_protocols_line), 'hibernating': (False, _parse_hibernating_line), 'allow_single_hop_exits': (False, _parse_allow_single_hop_exits_line), 'extra_info_cache': (False, _parse_caches_extra_info_line), 'extra_info_digest': (None, _parse_extrainfo_digest_line), 'hidden_service_dir': (None, _parse_hidden_service_dir_line), 'eventdns': (None, _parse_eventdns_line), 'or_addresses': ([], _parse_or_address_line), 'read_history_end': (None, _parse_read_history_line), 'read_history_interval': (None, _parse_read_history_line), 'read_history_values': (None, _parse_read_history_line), 'write_history_end': (None, _parse_write_history_line), 'write_history_interval': (None, _parse_write_history_line), 'write_history_values': (None, _parse_write_history_line), } PARSER_FOR_LINE = { 'router': _parse_router_line, 'bandwidth': _parse_bandwidth_line, 'platform': _parse_platform_line, 'published': _parse_published_line, 'fingerprint': _parse_fingerprint_line, 'contact': _parse_contact_line, 'hibernating': _parse_hibernating_line, 'extra-info-digest': _parse_extrainfo_digest_line, 'hidden-service-dir': _parse_hidden_service_dir_line, 'uptime': _parse_uptime_line, 'protocols': _parse_protocols_line, 'or-address': _parse_or_address_line, 'read-history': _parse_read_history_line, 'write-history': _parse_write_history_line, 'ipv6-policy': _parse_ipv6_policy_line, 'allow-single-hop-exits': _parse_allow_single_hop_exits_line, 'caches-extra-info': _parse_caches_extra_info_line, 'family': _parse_family_line, 'eventdns': _parse_eventdns_line, } def __init__(self, raw_contents, validate = False, annotations = None): """ Server descriptor constructor, created from an individual relay's descriptor content (as provided by 'GETINFO desc/*', cached descriptors, and metrics). By default this validates the descriptor's content as it's parsed. This validation can be disables to either improve performance or be accepting of malformed data. :param str raw_contents: descriptor content provided by the relay :param bool validate: checks the validity of the descriptor's content if **True**, skips these checks otherwise :param list annotations: lines that appeared prior to the descriptor :raises: **ValueError** if the contents is malformed and validate is True """ super(ServerDescriptor, self).__init__(raw_contents, lazy_load = not validate) self._annotation_lines = annotations if annotations else [] # A descriptor contains a series of 'keyword lines' which are simply a # keyword followed by an optional value. Lines can also be followed by a # signature block. # # We care about the ordering of 'accept' and 'reject' entries because this # influences the resulting exit policy, but for everything else the order # does not matter so breaking it into key / value pairs. entries, self._unparsed_exit_policy = _get_descriptor_components(stem.util.str_tools._to_unicode(raw_contents), validate, ('accept', 'reject')) if validate: self._parse(entries, validate) _parse_exit_policy(self, entries) # if we have a negative uptime and a tor version that shouldn't exhibit # this bug then fail validation if validate and self.uptime and self.tor_version: if self.uptime < 0 and self.tor_version >= stem.version.Version('0.1.2.7'): raise ValueError("Descriptor for version '%s' had a negative uptime value: %i" % (self.tor_version, self.uptime)) self._check_constraints(entries) else: self._entries = entries def digest(self): """ Provides the hex encoded sha1 of our content. This value is part of the network status entry for this relay. :returns: **unicode** with the upper-case hex digest value for this server descriptor """ raise NotImplementedError('Unsupported Operation: this should be implemented by the ServerDescriptor subclass') @lru_cache() def get_annotations(self): """ Provides content that appeared prior to the descriptor. If this comes from the cached-descriptors file then this commonly contains content like... :: @downloaded-at 2012-03-18 21:18:29 @source "173.254.216.66" :returns: **dict** with the key/value pairs in our annotations """ annotation_dict = {} for line in self._annotation_lines: if b' ' in line: key, value = line.split(b' ', 1) annotation_dict[key] = value else: annotation_dict[line] = None return annotation_dict def get_annotation_lines(self): """ Provides the lines of content that appeared prior to the descriptor. This is the same as the :func:`~stem.descriptor.server_descriptor.ServerDescriptor.get_annotations` results, but with the unparsed lines and ordering retained. :returns: **list** with the lines of annotation that came before this descriptor """ return self._annotation_lines def _check_constraints(self, entries): """ Does a basic check that the entries conform to this descriptor type's constraints. :param dict entries: keyword => (value, pgp key) entries :raises: **ValueError** if an issue arises in validation """ for keyword in self._required_fields(): if keyword not in entries: raise ValueError("Descriptor must have a '%s' entry" % keyword) for keyword in self._single_fields(): if keyword in entries and len(entries[keyword]) > 1: raise ValueError("The '%s' entry can only appear once in a descriptor" % keyword) expected_first_keyword = self._first_keyword() if expected_first_keyword and expected_first_keyword != list(entries.keys())[0]: raise ValueError("Descriptor must start with a '%s' entry" % expected_first_keyword) expected_last_keyword = self._last_keyword() if expected_last_keyword and expected_last_keyword != list(entries.keys())[-1]: raise ValueError("Descriptor must end with a '%s' entry" % expected_last_keyword) if not self.exit_policy: raise ValueError("Descriptor must have at least one 'accept' or 'reject' entry") # Constraints that the descriptor must meet to be valid. These can be None if # not applicable. def _required_fields(self): return REQUIRED_FIELDS def _single_fields(self): return REQUIRED_FIELDS + SINGLE_FIELDS def _first_keyword(self): return 'router' def _last_keyword(self): return 'router-signature' class RelayDescriptor(ServerDescriptor): """ Server descriptor (`descriptor specification <https://gitweb.torproject.org/torspec.git/tree/dir-spec.txt>`_) :var str onion_key: **\*** key used to encrypt EXTEND cells :var str ntor_onion_key: base64 key used to encrypt EXTEND in the ntor protocol :var str signing_key: **\*** relay's long-term identity key :var str signature: **\*** signature for this descriptor **\*** attribute is required when we're parsed with validation """ ATTRIBUTES = dict(ServerDescriptor.ATTRIBUTES, **{ 'onion_key': (None, _parse_onion_key_line), 'ntor_onion_key': (None, _parse_ntor_onion_key_line), 'signing_key': (None, _parse_signing_key_line), 'signature': (None, _parse_router_signature_line), }) PARSER_FOR_LINE = dict(ServerDescriptor.PARSER_FOR_LINE, **{ 'onion-key': _parse_onion_key_line, 'ntor-onion-key': _parse_ntor_onion_key_line, 'signing-key': _parse_signing_key_line, 'router-signature': _parse_router_signature_line, }) def __init__(self, raw_contents, validate = False, annotations = None): super(RelayDescriptor, self).__init__(raw_contents, validate, annotations) if validate: if self.fingerprint: key_hash = hashlib.sha1(_bytes_for_block(self.signing_key)).hexdigest() if key_hash != self.fingerprint.lower(): raise ValueError('Fingerprint does not match the hash of our signing key (fingerprint: %s, signing key hash: %s)' % (self.fingerprint.lower(), key_hash)) if stem.prereq.is_crypto_available(): signed_digest = self._digest_for_signature(self.signing_key, self.signature) if signed_digest != self.digest(): raise ValueError('Decrypted digest does not match local digest (calculated: %s, local: %s)' % (signed_digest, self.digest())) @lru_cache() def digest(self): """ Provides the digest of our descriptor's content. :returns: the digest string encoded in uppercase hex :raises: ValueError if the digest canot be calculated """ return self._digest_for_content(b'router ', b'\nrouter-signature\n') def _compare(self, other, method): if not isinstance(other, RelayDescriptor): return False return method(str(self).strip(), str(other).strip()) def __hash__(self): return hash(str(self).strip()) def __eq__(self, other): return self._compare(other, lambda s, o: s == o) def __lt__(self, other): return self._compare(other, lambda s, o: s < o) def __le__(self, other): return self._compare(other, lambda s, o: s <= o) class BridgeDescriptor(ServerDescriptor): """ Bridge descriptor (`bridge descriptor specification <https://collector.torproject.org/formats.html#bridge-descriptors>`_) """ ATTRIBUTES = dict(ServerDescriptor.ATTRIBUTES, **{ '_digest': (None, _parse_router_digest_line), }) PARSER_FOR_LINE = dict(ServerDescriptor.PARSER_FOR_LINE, **{ 'router-digest': _parse_router_digest_line, }) def digest(self): return self._digest def is_scrubbed(self): """ Checks if we've been properly scrubbed in accordance with the `bridge descriptor specification <https://collector.torproject.org/formats.html#bridge-descriptors>`_. Validation is a moving target so this may not be fully up to date. :returns: **True** if we're scrubbed, **False** otherwise """ return self.get_scrubbing_issues() == [] @lru_cache() def get_scrubbing_issues(self): """ Provides issues with our scrubbing. :returns: **list** of strings which describe issues we have with our scrubbing, this list is empty if we're properly scrubbed """ issues = [] if not self.address.startswith('10.'): issues.append("Router line's address should be scrubbed to be '10.x.x.x': %s" % self.address) if self.contact and self.contact != 'somebody': issues.append("Contact line should be scrubbed to be 'somebody', but instead had '%s'" % self.contact) for address, _, is_ipv6 in self.or_addresses: if not is_ipv6 and not address.startswith('10.'): issues.append("or-address line's address should be scrubbed to be '10.x.x.x': %s" % address) elif is_ipv6 and not address.startswith('fd9f:2e19:3bcf::'): # TODO: this check isn't quite right because we aren't checking that # the next grouping of hex digits contains 1-2 digits issues.append("or-address line's address should be scrubbed to be 'fd9f:2e19:3bcf::xx:xxxx': %s" % address) for line in self.get_unrecognized_lines(): if line.startswith('onion-key '): issues.append('Bridge descriptors should have their onion-key scrubbed: %s' % line) elif line.startswith('signing-key '): issues.append('Bridge descriptors should have their signing-key scrubbed: %s' % line) elif line.startswith('router-signature '): issues.append('Bridge descriptors should have their signature scrubbed: %s' % line) return issues def _required_fields(self): # bridge required fields are the same as a relay descriptor, minus items # excluded according to the format page excluded_fields = [ 'onion-key', 'signing-key', 'router-signature', ] included_fields = [ 'router-digest', ] return tuple(included_fields + [f for f in REQUIRED_FIELDS if f not in excluded_fields]) def _single_fields(self): return self._required_fields() + SINGLE_FIELDS def _last_keyword(self): return None def _compare(self, other, method): if not isinstance(other, BridgeDescriptor): return False return method(str(self).strip(), str(other).strip()) def __hash__(self): return hash(str(self).strip()) def __eq__(self, other): return self._compare(other, lambda s, o: s == o) def __lt__(self, other): return self._compare(other, lambda s, o: s < o) def __le__(self, other): return self._compare(other, lambda s, o: s <= o)
gpl-2.0
ChristosChristofidis/bokeh
bokeh/models/formatters.py
18
12319
""" Models for controlling the text and visual formatting of tick labels on Bokeh plot axes. """ from __future__ import absolute_import from .tickers import Ticker from ..plot_object import PlotObject from ..properties import Bool, Int, String, Enum, Auto, List, Dict, Either, Instance from ..enums import DatetimeUnits, RoundingFunction, NumeralLanguage class TickFormatter(PlotObject): """ A base class for all tick formatter types. ``TickFormatter`` is not generally useful to instantiate on its own. """ pass class BasicTickFormatter(TickFormatter): """ Display tick values from continuous ranges as "basic numbers", using scientific notation when appropriate by default. """ precision = Either(Auto, Int, help=""" How many digits of precision to display in tick labels. """) use_scientific = Bool(True, help=""" Whether to ever display scientific notation. If ``True``, then when to use scientific notation is controlled by ``power_limit_low`` and ``power_limit_high``. """) power_limit_high = Int(5, help=""" Limit the use of scientific notation to when:: log(x) >= power_limit_high """) power_limit_low = Int(-3, help=""" Limit the use of scientific notation to when:: log(x) <= power_limit_low """) class NumeralTickFormatter(TickFormatter): """ Tick formatter based on a human-readable format string. """ format = String("0,0", help=""" The number format, as defined in the following tables: **NUMBERS**: ============ ============== =============== Number Format String ============ ============== =============== 10000 '0,0.0000' 10,000.0000 10000.23 '0,0' 10,000 10000.23 '+0,0' +10,000 -10000 '0,0.0' -10,000.0 10000.1234 '0.000' 10000.123 10000.1234 '0[.]00000' 10000.12340 -10000 '(0,0.0000)' (10,000.0000) -0.23 '.00' -.23 -0.23 '(.00)' (.23) 0.23 '0.00000' 0.23000 0.23 '0.0[0000]' 0.23 1230974 '0.0a' 1.2m 1460 '0 a' 1 k -104000 '0a' -104k 1 '0o' 1st 52 '0o' 52nd 23 '0o' 23rd 100 '0o' 100th ============ ============== =============== **CURRENCY**: =========== =============== ============= Number Format String =========== =============== ============= 1000.234 '$0,0.00' $1,000.23 1000.2 '0,0[.]00 $' 1,000.20 $ 1001 '$ 0,0[.]00' $ 1,001 -1000.234 '($0,0)' ($1,000) -1000.234 '$0.00' -$1000.23 1230974 '($ 0.00 a)' $ 1.23 m =========== =============== ============= **BYTES**: =============== =========== ============ Number Format String =============== =========== ============ 100 '0b' 100B 2048 '0 b' 2 KB 7884486213 '0.0b' 7.3GB 3467479682787 '0.000 b' 3.154 TB =============== =========== ============ **PERCENTAGES**: ============= ============= =========== Number Format String ============= ============= =========== 1 '0%' 100% 0.974878234 '0.000%' 97.488% -0.43 '0 %' -43 % 0.43 '(0.000 %)' 43.000 % ============= ============= =========== **TIME**: ============ ============== ============ Number Format String ============ ============== ============ 25 '00:00:00' 0:00:25 238 '00:00:00' 0:03:58 63846 '00:00:00' 17:44:06 ============ ============== ============ """) language = Enum(NumeralLanguage, default="en", help=""" The language to use for formatting language-specific features (e.g. thousands separator). """) rounding = Enum(RoundingFunction, help=""" Rounding functions (round, floor, ceil) and their synonyms (nearest, rounddown, roundup). """) class PrintfTickFormatter(TickFormatter): """ Tick formatter based on a printf-style format string. """ format = String("%s", help=""" The numer format, as defined as follows: the placeholder in the format string is marked by % and is followed by one or more of these elements, in this order: * An optional ``+`` sign Causes the result to be preceded with a plus or minus sign on numeric values. By default, only the ``-`` sign is used on negative numbers. * An optional padding specifier Specifies what (if any) character to use for padding. Possible values are 0 or any other character precedeed by a ``'`` (single quote). The default is to pad with spaces. * An optional ``-`` sign Causes sprintf to left-align the result of this placeholder. The default is to right-align the result. * An optional number Specifies how many characters the result should have. If the value to be returned is shorter than this number, the result will be padded. * An optional precision modifier Consists of a ``.`` (dot) followed by a number, specifies how many digits should be displayed for floating point numbers. When used on a string, it causes the result to be truncated. * A type specifier Can be any of: - ``%`` --- yields a literal ``%`` character - ``b`` --- yields an integer as a binary number - ``c`` --- yields an integer as the character with that ASCII value - ``d`` or ``i`` --- yields an integer as a signed decimal number - ``e`` --- yields a float using scientific notation - ``u`` --- yields an integer as an unsigned decimal number - ``f`` --- yields a float as is - ``o`` --- yields an integer as an octal number - ``s`` --- yields a string as is - ``x`` --- yields an integer as a hexadecimal number (lower-case) - ``X`` --- yields an integer as a hexadecimal number (upper-case) """) class LogTickFormatter(TickFormatter): """ Display tick values from continuous ranges as powers of some base. Most often useful in conjunction with a ``LogTicker``. """ ticker = Instance(Ticker, help=""" The corresponding ``LogTicker``, used to determine the correct base to use. If unset, the formatter will use base 10 as a default. """) class CategoricalTickFormatter(TickFormatter): """ Display tick values from categorical ranges as string values. """ pass class DatetimeTickFormatter(TickFormatter): """ Display tick values from a continuous range as formatted datetimes. """ formats = Dict(Enum(DatetimeUnits), List(String), help=""" User defined formats for displaying datetime values. The enum values correspond roughly to different "time scales". The corresponding value is a list of `strftime`_ formats to use for formatting datetime values that fall in in that "time scale". This list of supported `strftime`_ formats is reproduced below. .. warning:: The client library BokehJS uses the `timezone`_ library to format datetimes. The inclusion of the list below is based on the claim that `timezone`_ makes to support "the full compliment of GNU date format specifiers." However, this claim has not been tested exhaustively against this list. If you find formats that do not function as expected, please submit a `github issue`, so that the documentation can be updated appropriately. %a The abbreviated name of the day of the week according to the current locale. %A The full name of the day of the week according to the current locale. %b The abbreviated month name according to the current locale. %B The full month name according to the current locale. %c The preferred date and time representation for the current locale. %C The century number (year/100) as a 2-digit integer. %d The day of the month as a decimal number (range 01 to 31). %D Equivalent to %m/%d/%y. (Americans should note that in many other countries %d/%m/%y is rather common. This means that in international context this format is ambiguous and should not be used.) %e Like %d, the day of the month as a decimal number, but a leading zero is replaced by a space. %F Equivalent to %Y-%m-%d (the ISO 8601 date format). %G The ISO 8601 week-based year with century as a decimal number. The 4-digit year corresponding to the ISO week number (see %V). This has the same format and value as %Y, except that if the ISO week number belongs to the previous or next year, that year is used instead. %g Like %G, but without century, that is, with a 2-digit year (00-99). %h Equivalent to %b. %H The hour as a decimal number using a 24-hour clock (range 00 to 23). %I The hour as a decimal number using a 12-hour clock (range 01 to 12). %j The day of the year as a decimal number (range 001 to 366). %k The hour (24-hour clock) as a decimal number (range 0 to 23). Single digits are preceded by a blank. (See also %H.) %l The hour (12-hour clock) as a decimal number (range 1 to 12). Single digits are preceded by a blank. (See also %I.) (TZ) %m The month as a decimal number (range 01 to 12). %M The minute as a decimal number (range 00 to 59). %n A newline character. %p Either "AM" or "PM" according to the given time value, or the corresponding strings for the current locale. Noon is treated as "PM" and midnight as "AM". %P Like %p but in lowercase: "am" or "pm" or a corresponding string for the current locale. %r The time in a.m. or p.m. notation. In the POSIX locale this is equivalent to %I:%M:%S %p. %R The time in 24-hour notation (%H:%M). For a version including the seconds, see %T below. %s The number of seconds since the Epoch, 1970-01-01 00:00:00 +0000 (UTC). %S The second as a decimal number (range 00 to 60). (The range is up to 60 to allow for occasional leap seconds.) %t A tab character. %T The time in 24-hour notation (%H:%M:%S). %u The day of the week as a decimal, range 1 to 7, Monday being 1. See also %w. %U The week number of the current year as a decimal number, range 00 to 53, starting with the first Sunday as the first day of week 01. See also %V and %W. %V The ISO 8601 week number (see NOTES) of the current year as a decimal number, range 01 to 53, where week 1 is the first week that has at least 4 days in the new year. See also %U and %W. %w The day of the week as a decimal, range 0 to 6, Sunday being 0. See also %u. %W The week number of the current year as a decimal number, range 00 to 53, starting with the first Monday as the first day of week 01. %x The preferred date representation for the current locale without the time. %X The preferred time representation for the current locale without the date. %y The year as a decimal number without a century (range 00 to 99). %Y The year as a decimal number including the century. %z The +hhmm or -hhmm numeric timezone (that is, the hour and minute offset from UTC). %Z The timezone name or abbreviation. %% A literal '%' character. .. _strftime: http://man7.org/linux/man-pages/man3/strftime.3.html .. _timezone: http://bigeasy.github.io/timezone/ .. _github issue: https://github.com/bokeh/bokeh/issues """)
bsd-3-clause
mikewiebe-ansible/ansible
lib/ansible/modules/cloud/google/gcp_compute_instance.py
10
70672
#!/usr/bin/python # -*- coding: utf-8 -*- # # Copyright (C) 2017 Google # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) # ---------------------------------------------------------------------------- # # *** AUTO GENERATED CODE *** AUTO GENERATED CODE *** # # ---------------------------------------------------------------------------- # # This file is automatically generated by Magic Modules and manual # changes will be clobbered when the file is regenerated. # # Please read more about how to change this file at # https://www.github.com/GoogleCloudPlatform/magic-modules # # ---------------------------------------------------------------------------- from __future__ import absolute_import, division, print_function __metaclass__ = type ################################################################################ # Documentation ################################################################################ ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ["preview"], 'supported_by': 'community'} DOCUMENTATION = ''' --- module: gcp_compute_instance description: - An instance is a virtual machine (VM) hosted on Google's infrastructure. short_description: Creates a GCP Instance version_added: '2.6' author: Google Inc. (@googlecloudplatform) requirements: - python >= 2.6 - requests >= 2.18.4 - google-auth >= 1.3.0 options: state: description: - Whether the given object should exist in GCP choices: - present - absent default: present type: str can_ip_forward: description: - Allows this instance to send and receive packets with non-matching destination or source IPs. This is required if you plan to use this instance to forward routes. required: false type: bool aliases: - ip_forward deletion_protection: description: - Whether the resource should be protected against deletion. required: false type: bool version_added: '2.9' disks: description: - An array of disks that are associated with the instances that are created from this template. required: false type: list suboptions: auto_delete: description: - Specifies whether the disk will be auto-deleted when the instance is deleted (but not when the disk is detached from the instance). - 'Tip: Disks should be set to autoDelete=true so that leftover disks are not left behind on machine deletion.' required: false type: bool boot: description: - Indicates that this is a boot disk. The virtual machine will use the first partition of the disk for its root filesystem. required: false type: bool device_name: description: - Specifies a unique device name of your choice that is reflected into the /dev/disk/by-id/google-* tree of a Linux operating system running within the instance. This name can be used to reference the device for mounting, resizing, and so on, from within the instance. required: false type: str disk_encryption_key: description: - Encrypts or decrypts a disk using a customer-supplied encryption key. required: false type: dict suboptions: raw_key: description: - Specifies a 256-bit customer-supplied encryption key, encoded in RFC 4648 base64 to either encrypt or decrypt this resource. required: false type: str rsa_encrypted_key: description: - Specifies an RFC 4648 base64 encoded, RSA-wrapped 2048-bit customer-supplied encryption key to either encrypt or decrypt this resource. required: false type: str index: description: - Assigns a zero-based index to this disk, where 0 is reserved for the boot disk. For example, if you have many disks attached to an instance, each disk would have a unique index number. If not specified, the server will choose an appropriate value. required: false type: int initialize_params: description: - Specifies the parameters for a new disk that will be created alongside the new instance. Use initialization parameters to create boot disks or local SSDs attached to the new instance. required: false type: dict suboptions: disk_name: description: - Specifies the disk name. If not specified, the default is to use the name of the instance. required: false type: str disk_size_gb: description: - Specifies the size of the disk in base-2 GB. required: false type: int disk_type: description: - Reference to a disk type. - Specifies the disk type to use to create the instance. - If not specified, the default is pd-standard. required: false type: str source_image: description: - The source image to create this disk. When creating a new instance, one of initializeParams.sourceImage or disks.source is required. To create a disk with one of the public operating system images, specify the image by its family name. required: false type: str aliases: - image - image_family source_image_encryption_key: description: - The customer-supplied encryption key of the source image. Required if the source image is protected by a customer-supplied encryption key. - Instance templates do not store customer-supplied encryption keys, so you cannot create disks for instances in a managed instance group if the source images are encrypted with your own keys. required: false type: dict suboptions: raw_key: description: - Specifies a 256-bit customer-supplied encryption key, encoded in RFC 4648 base64 to either encrypt or decrypt this resource. required: false type: str interface: description: - Specifies the disk interface to use for attaching this disk, which is either SCSI or NVME. The default is SCSI. - Persistent disks must always use SCSI and the request will fail if you attempt to attach a persistent disk in any other format than SCSI. - 'Some valid choices include: "SCSI", "NVME"' required: false type: str mode: description: - The mode in which to attach this disk, either READ_WRITE or READ_ONLY. If not specified, the default is to attach the disk in READ_WRITE mode. - 'Some valid choices include: "READ_WRITE", "READ_ONLY"' required: false type: str source: description: - Reference to a disk. When creating a new instance, one of initializeParams.sourceImage or disks.source is required. - If desired, you can also attach existing non-root persistent disks using this property. This field is only applicable for persistent disks. - 'This field represents a link to a Disk resource in GCP. It can be specified in two ways. First, you can place a dictionary with key ''selfLink'' and value of your resource''s selfLink Alternatively, you can add `register: name-of-resource` to a gcp_compute_disk task and then set this source field to "{{ name-of-resource }}"' required: false type: dict type: description: - Specifies the type of the disk, either SCRATCH or PERSISTENT. If not specified, the default is PERSISTENT. - 'Some valid choices include: "SCRATCH", "PERSISTENT"' required: false type: str guest_accelerators: description: - List of the type and count of accelerator cards attached to the instance . required: false type: list suboptions: accelerator_count: description: - The number of the guest accelerator cards exposed to this instance. required: false type: int accelerator_type: description: - Full or partial URL of the accelerator type resource to expose to this instance. required: false type: str hostname: description: - The hostname of the instance to be created. The specified hostname must be RFC1035 compliant. If hostname is not specified, the default hostname is [INSTANCE_NAME].c.[PROJECT_ID].internal when using the global DNS, and [INSTANCE_NAME].[ZONE].c.[PROJECT_ID].internal when using zonal DNS. required: false type: str version_added: '2.9' labels: description: - Labels to apply to this instance. A list of key->value pairs. required: false type: dict version_added: '2.9' metadata: description: - The metadata key/value pairs to assign to instances that are created from this template. These pairs can consist of custom metadata or predefined keys. required: false type: dict machine_type: description: - A reference to a machine type which defines VM kind. required: false type: str min_cpu_platform: description: - Specifies a minimum CPU platform for the VM instance. Applicable values are the friendly names of CPU platforms . required: false type: str name: description: - The name of the resource, provided by the client when initially creating the resource. The resource name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash. required: false type: str network_interfaces: description: - An array of configurations for this interface. This specifies how this interface is configured to interact with other network services, such as connecting to the internet. Only one network interface is supported per instance. required: false type: list suboptions: access_configs: description: - An array of configurations for this interface. Currently, only one access config, ONE_TO_ONE_NAT, is supported. If there are no accessConfigs specified, then this instance will have no external internet access. required: false type: list suboptions: name: description: - The name of this access configuration. The default and recommended name is External NAT but you can use any arbitrary string you would like. For example, My external IP or Network Access. required: true type: str nat_ip: description: - Reference to an address. - An external IP address associated with this instance. - Specify an unused static external IP address available to the project or leave this field undefined to use an IP from a shared ephemeral IP address pool. If you specify a static external IP address, it must live in the same region as the zone of the instance. - 'This field represents a link to a Address resource in GCP. It can be specified in two ways. First, you can place a dictionary with key ''address'' and value of your resource''s address Alternatively, you can add `register: name-of-resource` to a gcp_compute_address task and then set this nat_ip field to "{{ name-of-resource }}"' required: false type: dict type: description: - The type of configuration. The default and only option is ONE_TO_ONE_NAT. - 'Some valid choices include: "ONE_TO_ONE_NAT"' required: true type: str set_public_ptr: description: - Specifies whether a public DNS PTR record should be created to map the external IP address of the instance to a DNS domain name. required: false type: bool version_added: '2.10' public_ptr_domain_name: description: - The DNS domain name for the public PTR record. You can set this field only if the setPublicPtr field is enabled. required: false type: str version_added: '2.10' network_tier: description: - This signifies the networking tier used for configuring this access configuration. If an AccessConfig is specified without a valid external IP address, an ephemeral IP will be created with this networkTier. If an AccessConfig with a valid external IP address is specified, it must match that of the networkTier associated with the Address resource owning that IP. - 'Some valid choices include: "PREMIUM", "STANDARD"' required: false type: str version_added: '2.10' alias_ip_ranges: description: - An array of alias IP ranges for this network interface. Can only be specified for network interfaces on subnet-mode networks. required: false type: list suboptions: ip_cidr_range: description: - The IP CIDR range represented by this alias IP range. - This IP CIDR range must belong to the specified subnetwork and cannot contain IP addresses reserved by system or used by other network interfaces. This range may be a single IP address (e.g. 10.2.3.4), a netmask (e.g. /24) or a CIDR format string (e.g. 10.1.2.0/24). required: false type: str subnetwork_range_name: description: - Optional subnetwork secondary range name specifying the secondary range from which to allocate the IP CIDR range for this alias IP range. If left unspecified, the primary range of the subnetwork will be used. required: false type: str network: description: - Specifies the title of an existing network. Not setting the network title will select the default network interface, which could have SSH already configured . - 'This field represents a link to a Network resource in GCP. It can be specified in two ways. First, you can place a dictionary with key ''selfLink'' and value of your resource''s selfLink Alternatively, you can add `register: name-of-resource` to a gcp_compute_network task and then set this network field to "{{ name-of-resource }}"' required: false type: dict network_ip: description: - An IPv4 internal network address to assign to the instance for this network interface. If not specified by the user, an unused internal IP is assigned by the system. required: false type: str subnetwork: description: - Reference to a VPC network. - If the network resource is in legacy mode, do not provide this property. If the network is in auto subnet mode, providing the subnetwork is optional. If the network is in custom subnet mode, then this field should be specified. - 'This field represents a link to a Subnetwork resource in GCP. It can be specified in two ways. First, you can place a dictionary with key ''selfLink'' and value of your resource''s selfLink Alternatively, you can add `register: name-of-resource` to a gcp_compute_subnetwork task and then set this subnetwork field to "{{ name-of-resource }}"' required: false type: dict scheduling: description: - Sets the scheduling options for this instance. required: false type: dict suboptions: automatic_restart: description: - Specifies whether the instance should be automatically restarted if it is terminated by Compute Engine (not terminated by a user). - You can only set the automatic restart option for standard instances. Preemptible instances cannot be automatically restarted. required: false type: bool on_host_maintenance: description: - Defines the maintenance behavior for this instance. For standard instances, the default behavior is MIGRATE. For preemptible instances, the default and only possible behavior is TERMINATE. - For more information, see Setting Instance Scheduling Options. required: false type: str preemptible: description: - Defines whether the instance is preemptible. This can only be set during instance creation, it cannot be set or changed after the instance has been created. required: false type: bool service_accounts: description: - A list of service accounts, with their specified scopes, authorized for this instance. Only one service account per VM instance is supported. required: false type: list suboptions: email: description: - Email address of the service account. required: false type: str scopes: description: - The list of scopes to be made available for this service account. required: false type: list shielded_instance_config: description: - Configuration for various parameters related to shielded instances. required: false type: dict version_added: '2.9' suboptions: enable_secure_boot: description: - Defines whether the instance has Secure Boot enabled. required: false type: bool enable_vtpm: description: - Defines whether the instance has the vTPM enabled. required: false type: bool enable_integrity_monitoring: description: - Defines whether the instance has integrity monitoring enabled. required: false type: bool status: description: - 'The status of the instance. One of the following values: PROVISIONING, STAGING, RUNNING, STOPPING, SUSPENDING, SUSPENDED, and TERMINATED.' - As a user, use RUNNING to keep a machine "on" and TERMINATED to turn a machine off . - 'Some valid choices include: "PROVISIONING", "STAGING", "RUNNING", "STOPPING", "SUSPENDING", "SUSPENDED", "TERMINATED"' required: false type: str version_added: '2.8' tags: description: - A list of tags to apply to this instance. Tags are used to identify valid sources or targets for network firewalls and are specified by the client during instance creation. The tags can be later modified by the setTags method. Each tag within the list must comply with RFC1035. required: false type: dict suboptions: fingerprint: description: - Specifies a fingerprint for this request, which is essentially a hash of the metadata's contents and used for optimistic locking. - The fingerprint is initially generated by Compute Engine and changes after every request to modify or update metadata. You must always provide an up-to-date fingerprint hash in order to update or change metadata. required: false type: str items: description: - An array of tags. Each tag must be 1-63 characters long, and comply with RFC1035. required: false type: list zone: description: - A reference to the zone where the machine resides. required: true type: str project: description: - The Google Cloud Platform project to use. type: str auth_kind: description: - The type of credential used. type: str required: true choices: - application - machineaccount - serviceaccount service_account_contents: description: - The contents of a Service Account JSON file, either in a dictionary or as a JSON string that represents it. type: jsonarg service_account_file: description: - The path of a Service Account JSON file if serviceaccount is selected as type. type: path service_account_email: description: - An optional service account email address if machineaccount is selected and the user does not wish to use the default email. type: str scopes: description: - Array of scopes to be used type: list env_type: description: - Specifies which Ansible environment you're running this module within. - This should not be set unless you know what you're doing. - This only alters the User Agent string for any API requests. type: str ''' EXAMPLES = ''' - name: create a disk gcp_compute_disk: name: disk-instance size_gb: 50 source_image: projects/ubuntu-os-cloud/global/images/family/ubuntu-1604-lts zone: us-central1-a project: "{{ gcp_project }}" auth_kind: "{{ gcp_cred_kind }}" service_account_file: "{{ gcp_cred_file }}" state: present register: disk - name: create a network gcp_compute_network: name: network-instance project: "{{ gcp_project }}" auth_kind: "{{ gcp_cred_kind }}" service_account_file: "{{ gcp_cred_file }}" state: present register: network - name: create a address gcp_compute_address: name: address-instance region: us-central1 project: "{{ gcp_project }}" auth_kind: "{{ gcp_cred_kind }}" service_account_file: "{{ gcp_cred_file }}" state: present register: address - name: create a instance gcp_compute_instance: name: test_object machine_type: n1-standard-1 disks: - auto_delete: 'true' boot: 'true' source: "{{ disk }}" - auto_delete: 'true' interface: NVME type: SCRATCH initialize_params: disk_type: local-ssd metadata: startup-script-url: gs:://graphite-playground/bootstrap.sh cost-center: '12345' labels: environment: production network_interfaces: - network: "{{ network }}" access_configs: - name: External NAT nat_ip: "{{ address }}" type: ONE_TO_ONE_NAT zone: us-central1-a project: test_project auth_kind: serviceaccount service_account_file: "/tmp/auth.pem" state: present ''' RETURN = ''' canIpForward: description: - Allows this instance to send and receive packets with non-matching destination or source IPs. This is required if you plan to use this instance to forward routes. returned: success type: bool cpuPlatform: description: - The CPU platform used by this instance. returned: success type: str creationTimestamp: description: - Creation timestamp in RFC3339 text format. returned: success type: str deletionProtection: description: - Whether the resource should be protected against deletion. returned: success type: bool disks: description: - An array of disks that are associated with the instances that are created from this template. returned: success type: complex contains: autoDelete: description: - Specifies whether the disk will be auto-deleted when the instance is deleted (but not when the disk is detached from the instance). - 'Tip: Disks should be set to autoDelete=true so that leftover disks are not left behind on machine deletion.' returned: success type: bool boot: description: - Indicates that this is a boot disk. The virtual machine will use the first partition of the disk for its root filesystem. returned: success type: bool deviceName: description: - Specifies a unique device name of your choice that is reflected into the /dev/disk/by-id/google-* tree of a Linux operating system running within the instance. This name can be used to reference the device for mounting, resizing, and so on, from within the instance. returned: success type: str diskEncryptionKey: description: - Encrypts or decrypts a disk using a customer-supplied encryption key. returned: success type: complex contains: rawKey: description: - Specifies a 256-bit customer-supplied encryption key, encoded in RFC 4648 base64 to either encrypt or decrypt this resource. returned: success type: str rsaEncryptedKey: description: - Specifies an RFC 4648 base64 encoded, RSA-wrapped 2048-bit customer-supplied encryption key to either encrypt or decrypt this resource. returned: success type: str sha256: description: - The RFC 4648 base64 encoded SHA-256 hash of the customer-supplied encryption key that protects this resource. returned: success type: str index: description: - Assigns a zero-based index to this disk, where 0 is reserved for the boot disk. For example, if you have many disks attached to an instance, each disk would have a unique index number. If not specified, the server will choose an appropriate value. returned: success type: int initializeParams: description: - Specifies the parameters for a new disk that will be created alongside the new instance. Use initialization parameters to create boot disks or local SSDs attached to the new instance. returned: success type: complex contains: diskName: description: - Specifies the disk name. If not specified, the default is to use the name of the instance. returned: success type: str diskSizeGb: description: - Specifies the size of the disk in base-2 GB. returned: success type: int diskType: description: - Reference to a disk type. - Specifies the disk type to use to create the instance. - If not specified, the default is pd-standard. returned: success type: str sourceImage: description: - The source image to create this disk. When creating a new instance, one of initializeParams.sourceImage or disks.source is required. To create a disk with one of the public operating system images, specify the image by its family name. returned: success type: str sourceImageEncryptionKey: description: - The customer-supplied encryption key of the source image. Required if the source image is protected by a customer-supplied encryption key. - Instance templates do not store customer-supplied encryption keys, so you cannot create disks for instances in a managed instance group if the source images are encrypted with your own keys. returned: success type: complex contains: rawKey: description: - Specifies a 256-bit customer-supplied encryption key, encoded in RFC 4648 base64 to either encrypt or decrypt this resource. returned: success type: str sha256: description: - The RFC 4648 base64 encoded SHA-256 hash of the customer-supplied encryption key that protects this resource. returned: success type: str interface: description: - Specifies the disk interface to use for attaching this disk, which is either SCSI or NVME. The default is SCSI. - Persistent disks must always use SCSI and the request will fail if you attempt to attach a persistent disk in any other format than SCSI. returned: success type: str mode: description: - The mode in which to attach this disk, either READ_WRITE or READ_ONLY. If not specified, the default is to attach the disk in READ_WRITE mode. returned: success type: str source: description: - Reference to a disk. When creating a new instance, one of initializeParams.sourceImage or disks.source is required. - If desired, you can also attach existing non-root persistent disks using this property. This field is only applicable for persistent disks. returned: success type: dict type: description: - Specifies the type of the disk, either SCRATCH or PERSISTENT. If not specified, the default is PERSISTENT. returned: success type: str guestAccelerators: description: - List of the type and count of accelerator cards attached to the instance . returned: success type: complex contains: acceleratorCount: description: - The number of the guest accelerator cards exposed to this instance. returned: success type: int acceleratorType: description: - Full or partial URL of the accelerator type resource to expose to this instance. returned: success type: str hostname: description: - The hostname of the instance to be created. The specified hostname must be RFC1035 compliant. If hostname is not specified, the default hostname is [INSTANCE_NAME].c.[PROJECT_ID].internal when using the global DNS, and [INSTANCE_NAME].[ZONE].c.[PROJECT_ID].internal when using zonal DNS. returned: success type: str id: description: - The unique identifier for the resource. This identifier is defined by the server. returned: success type: int labelFingerprint: description: - The fingerprint used for optimistic locking of this resource. Used internally during updates. returned: success type: str labels: description: - Labels to apply to this instance. A list of key->value pairs. returned: success type: dict metadata: description: - The metadata key/value pairs to assign to instances that are created from this template. These pairs can consist of custom metadata or predefined keys. returned: success type: dict machineType: description: - A reference to a machine type which defines VM kind. returned: success type: str minCpuPlatform: description: - Specifies a minimum CPU platform for the VM instance. Applicable values are the friendly names of CPU platforms . returned: success type: str name: description: - The name of the resource, provided by the client when initially creating the resource. The resource name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash. returned: success type: str networkInterfaces: description: - An array of configurations for this interface. This specifies how this interface is configured to interact with other network services, such as connecting to the internet. Only one network interface is supported per instance. returned: success type: complex contains: accessConfigs: description: - An array of configurations for this interface. Currently, only one access config, ONE_TO_ONE_NAT, is supported. If there are no accessConfigs specified, then this instance will have no external internet access. returned: success type: complex contains: name: description: - The name of this access configuration. The default and recommended name is External NAT but you can use any arbitrary string you would like. For example, My external IP or Network Access. returned: success type: str natIP: description: - Reference to an address. - An external IP address associated with this instance. - Specify an unused static external IP address available to the project or leave this field undefined to use an IP from a shared ephemeral IP address pool. If you specify a static external IP address, it must live in the same region as the zone of the instance. returned: success type: dict type: description: - The type of configuration. The default and only option is ONE_TO_ONE_NAT. returned: success type: str setPublicPtr: description: - Specifies whether a public DNS PTR record should be created to map the external IP address of the instance to a DNS domain name. returned: success type: bool publicPtrDomainName: description: - The DNS domain name for the public PTR record. You can set this field only if the setPublicPtr field is enabled. returned: success type: str networkTier: description: - This signifies the networking tier used for configuring this access configuration. If an AccessConfig is specified without a valid external IP address, an ephemeral IP will be created with this networkTier. If an AccessConfig with a valid external IP address is specified, it must match that of the networkTier associated with the Address resource owning that IP. returned: success type: str aliasIpRanges: description: - An array of alias IP ranges for this network interface. Can only be specified for network interfaces on subnet-mode networks. returned: success type: complex contains: ipCidrRange: description: - The IP CIDR range represented by this alias IP range. - This IP CIDR range must belong to the specified subnetwork and cannot contain IP addresses reserved by system or used by other network interfaces. This range may be a single IP address (e.g. 10.2.3.4), a netmask (e.g. /24) or a CIDR format string (e.g. 10.1.2.0/24). returned: success type: str subnetworkRangeName: description: - Optional subnetwork secondary range name specifying the secondary range from which to allocate the IP CIDR range for this alias IP range. If left unspecified, the primary range of the subnetwork will be used. returned: success type: str name: description: - The name of the network interface, generated by the server. For network devices, these are eth0, eth1, etc . returned: success type: str network: description: - Specifies the title of an existing network. Not setting the network title will select the default network interface, which could have SSH already configured . returned: success type: dict networkIP: description: - An IPv4 internal network address to assign to the instance for this network interface. If not specified by the user, an unused internal IP is assigned by the system. returned: success type: str subnetwork: description: - Reference to a VPC network. - If the network resource is in legacy mode, do not provide this property. If the network is in auto subnet mode, providing the subnetwork is optional. If the network is in custom subnet mode, then this field should be specified. returned: success type: dict scheduling: description: - Sets the scheduling options for this instance. returned: success type: complex contains: automaticRestart: description: - Specifies whether the instance should be automatically restarted if it is terminated by Compute Engine (not terminated by a user). - You can only set the automatic restart option for standard instances. Preemptible instances cannot be automatically restarted. returned: success type: bool onHostMaintenance: description: - Defines the maintenance behavior for this instance. For standard instances, the default behavior is MIGRATE. For preemptible instances, the default and only possible behavior is TERMINATE. - For more information, see Setting Instance Scheduling Options. returned: success type: str preemptible: description: - Defines whether the instance is preemptible. This can only be set during instance creation, it cannot be set or changed after the instance has been created. returned: success type: bool serviceAccounts: description: - A list of service accounts, with their specified scopes, authorized for this instance. Only one service account per VM instance is supported. returned: success type: complex contains: email: description: - Email address of the service account. returned: success type: str scopes: description: - The list of scopes to be made available for this service account. returned: success type: list shieldedInstanceConfig: description: - Configuration for various parameters related to shielded instances. returned: success type: complex contains: enableSecureBoot: description: - Defines whether the instance has Secure Boot enabled. returned: success type: bool enableVtpm: description: - Defines whether the instance has the vTPM enabled. returned: success type: bool enableIntegrityMonitoring: description: - Defines whether the instance has integrity monitoring enabled. returned: success type: bool status: description: - 'The status of the instance. One of the following values: PROVISIONING, STAGING, RUNNING, STOPPING, SUSPENDING, SUSPENDED, and TERMINATED.' - As a user, use RUNNING to keep a machine "on" and TERMINATED to turn a machine off . returned: success type: str statusMessage: description: - An optional, human-readable explanation of the status. returned: success type: str tags: description: - A list of tags to apply to this instance. Tags are used to identify valid sources or targets for network firewalls and are specified by the client during instance creation. The tags can be later modified by the setTags method. Each tag within the list must comply with RFC1035. returned: success type: complex contains: fingerprint: description: - Specifies a fingerprint for this request, which is essentially a hash of the metadata's contents and used for optimistic locking. - The fingerprint is initially generated by Compute Engine and changes after every request to modify or update metadata. You must always provide an up-to-date fingerprint hash in order to update or change metadata. returned: success type: str items: description: - An array of tags. Each tag must be 1-63 characters long, and comply with RFC1035. returned: success type: list zone: description: - A reference to the zone where the machine resides. returned: success type: str ''' ################################################################################ # Imports ################################################################################ from ansible.module_utils.gcp_utils import navigate_hash, GcpSession, GcpModule, GcpRequest, remove_nones_from_dict, replace_resource_dict import json import re import time ################################################################################ # Main ################################################################################ def main(): """Main function""" module = GcpModule( argument_spec=dict( state=dict(default='present', choices=['present', 'absent'], type='str'), can_ip_forward=dict(type='bool', aliases=['ip_forward']), deletion_protection=dict(type='bool'), disks=dict( type='list', elements='dict', options=dict( auto_delete=dict(type='bool'), boot=dict(type='bool'), device_name=dict(type='str'), disk_encryption_key=dict(type='dict', options=dict(raw_key=dict(type='str'), rsa_encrypted_key=dict(type='str'))), index=dict(type='int'), initialize_params=dict( type='dict', options=dict( disk_name=dict(type='str'), disk_size_gb=dict(type='int'), disk_type=dict(type='str'), source_image=dict(type='str', aliases=['image', 'image_family']), source_image_encryption_key=dict(type='dict', options=dict(raw_key=dict(type='str'))), ), ), interface=dict(type='str'), mode=dict(type='str'), source=dict(type='dict'), type=dict(type='str'), ), ), guest_accelerators=dict(type='list', elements='dict', options=dict(accelerator_count=dict(type='int'), accelerator_type=dict(type='str'))), hostname=dict(type='str'), labels=dict(type='dict'), metadata=dict(type='dict'), machine_type=dict(type='str'), min_cpu_platform=dict(type='str'), name=dict(type='str'), network_interfaces=dict( type='list', elements='dict', options=dict( access_configs=dict( type='list', elements='dict', options=dict( name=dict(required=True, type='str'), nat_ip=dict(type='dict'), type=dict(required=True, type='str'), set_public_ptr=dict(type='bool'), public_ptr_domain_name=dict(type='str'), network_tier=dict(type='str'), ), ), alias_ip_ranges=dict(type='list', elements='dict', options=dict(ip_cidr_range=dict(type='str'), subnetwork_range_name=dict(type='str'))), network=dict(type='dict'), network_ip=dict(type='str'), subnetwork=dict(type='dict'), ), ), scheduling=dict( type='dict', options=dict(automatic_restart=dict(type='bool'), on_host_maintenance=dict(type='str'), preemptible=dict(type='bool')) ), service_accounts=dict(type='list', elements='dict', options=dict(email=dict(type='str'), scopes=dict(type='list', elements='str'))), shielded_instance_config=dict( type='dict', options=dict(enable_secure_boot=dict(type='bool'), enable_vtpm=dict(type='bool'), enable_integrity_monitoring=dict(type='bool')) ), status=dict(type='str'), tags=dict(type='dict', options=dict(fingerprint=dict(type='str'), items=dict(type='list', elements='str'))), zone=dict(required=True, type='str'), ) ) if not module.params['scopes']: module.params['scopes'] = ['https://www.googleapis.com/auth/compute'] state = module.params['state'] kind = 'compute#instance' fetch = fetch_resource(module, self_link(module), kind) changed = False if fetch: if state == 'present': if is_different(module, fetch): update(module, self_link(module), kind, fetch) fetch = fetch_resource(module, self_link(module), kind) changed = True else: delete(module, self_link(module), kind) fetch = {} changed = True else: if state == 'present': fetch = create(module, collection(module), kind) changed = True else: fetch = {} if fetch: instance = InstancePower(module, fetch.get('status')) instance.run() if module.params.get('status'): fetch.update({'status': module.params['status']}) fetch.update({'changed': changed}) module.exit_json(**fetch) def create(module, link, kind): auth = GcpSession(module, 'compute') return wait_for_operation(module, auth.post(link, resource_to_request(module))) def update(module, link, kind, fetch): update_fields(module, resource_to_request(module), response_to_hash(module, fetch)) return fetch_resource(module, self_link(module), kind) def update_fields(module, request, response): if response.get('deletionProtection') != request.get('deletionProtection'): deletion_protection_update(module, request, response) if response.get('labels') != request.get('labels'): label_fingerprint_update(module, request, response) if response.get('machineType') != request.get('machineType'): machine_type_update(module, request, response) if response.get('shieldedInstanceConfig') != request.get('shieldedInstanceConfig'): shielded_instance_config_update(module, request, response) def label_fingerprint_update(module, request, response): auth = GcpSession(module, 'compute') auth.post( ''.join(["https://www.googleapis.com/compute/v1/", "projects/{project}/zones/{zone}/instances/{name}/setLabels"]).format(**module.params), {u'labelFingerprint': response.get('labelFingerprint'), u'labels': module.params.get('labels')}, ) def machine_type_update(module, request, response): auth = GcpSession(module, 'compute') auth.post( ''.join(["https://www.googleapis.com/compute/v1/", "projects/{project}/zones/{zone}/instances/{name}/setMachineType"]).format(**module.params), {u'machineType': machine_type_selflink(module.params.get('machine_type'), module.params)}, ) def delete(module, link, kind): auth = GcpSession(module, 'compute') return wait_for_operation(module, auth.delete(link)) def resource_to_request(module): request = { u'kind': 'compute#instance', u'canIpForward': module.params.get('can_ip_forward'), u'deletionProtection': module.params.get('deletion_protection'), u'disks': InstanceDisksArray(module.params.get('disks', []), module).to_request(), u'guestAccelerators': InstanceGuestacceleratorsArray(module.params.get('guest_accelerators', []), module).to_request(), u'hostname': module.params.get('hostname'), u'labels': module.params.get('labels'), u'metadata': module.params.get('metadata'), u'machineType': machine_type_selflink(module.params.get('machine_type'), module.params), u'minCpuPlatform': module.params.get('min_cpu_platform'), u'name': module.params.get('name'), u'networkInterfaces': InstanceNetworkinterfacesArray(module.params.get('network_interfaces', []), module).to_request(), u'scheduling': InstanceScheduling(module.params.get('scheduling', {}), module).to_request(), u'serviceAccounts': InstanceServiceaccountsArray(module.params.get('service_accounts', []), module).to_request(), u'shieldedInstanceConfig': InstanceShieldedinstanceconfig(module.params.get('shielded_instance_config', {}), module).to_request(), u'status': module.params.get('status'), u'tags': InstanceTags(module.params.get('tags', {}), module).to_request(), } request = encode_request(request, module) return_vals = {} for k, v in request.items(): if v or v is False: return_vals[k] = v return return_vals def fetch_resource(module, link, kind, allow_not_found=True): auth = GcpSession(module, 'compute') return return_if_object(module, auth.get(link), kind, allow_not_found) def self_link(module): return "https://www.googleapis.com/compute/v1/projects/{project}/zones/{zone}/instances/{name}".format(**module.params) def collection(module): return "https://www.googleapis.com/compute/v1/projects/{project}/zones/{zone}/instances".format(**module.params) def return_if_object(module, response, kind, allow_not_found=False): # If not found, return nothing. if allow_not_found and response.status_code == 404: return None # If no content, return nothing. if response.status_code == 204: return None try: module.raise_for_status(response) result = response.json() except getattr(json.decoder, 'JSONDecodeError', ValueError): module.fail_json(msg="Invalid JSON response with error: %s" % response.text) result = decode_response(result, module) if navigate_hash(result, ['error', 'errors']): module.fail_json(msg=navigate_hash(result, ['error', 'errors'])) return result def is_different(module, response): request = resource_to_request(module) response = response_to_hash(module, response) request = decode_response(request, module) # Remove all output-only from response. response_vals = {} for k, v in response.items(): if k in request: response_vals[k] = v request_vals = {} for k, v in request.items(): if k in response: request_vals[k] = v return GcpRequest(request_vals) != GcpRequest(response_vals) # Remove unnecessary properties from the response. # This is for doing comparisons with Ansible's current parameters. def response_to_hash(module, response): return { u'canIpForward': response.get(u'canIpForward'), u'cpuPlatform': response.get(u'cpuPlatform'), u'creationTimestamp': response.get(u'creationTimestamp'), u'deletionProtection': response.get(u'deletionProtection'), u'disks': InstanceDisksArray(module.params.get('disks', []), module).to_request(), u'guestAccelerators': InstanceGuestacceleratorsArray(response.get(u'guestAccelerators', []), module).from_response(), u'hostname': response.get(u'hostname'), u'id': response.get(u'id'), u'labelFingerprint': response.get(u'labelFingerprint'), u'labels': response.get(u'labels'), u'metadata': response.get(u'metadata'), u'machineType': response.get(u'machineType'), u'minCpuPlatform': response.get(u'minCpuPlatform'), u'name': response.get(u'name'), u'networkInterfaces': InstanceNetworkinterfacesArray(response.get(u'networkInterfaces', []), module).from_response(), u'scheduling': InstanceScheduling(response.get(u'scheduling', {}), module).from_response(), u'serviceAccounts': InstanceServiceaccountsArray(response.get(u'serviceAccounts', []), module).from_response(), u'shieldedInstanceConfig': InstanceShieldedinstanceconfig(response.get(u'shieldedInstanceConfig', {}), module).from_response(), u'status': response.get(u'status'), u'statusMessage': response.get(u'statusMessage'), u'tags': InstanceTags(response.get(u'tags', {}), module).from_response(), } def disk_type_selflink(name, params): if name is None: return url = r"https://www.googleapis.com/compute/v1/projects/.*/zones/.*/diskTypes/.*" if not re.match(url, name): name = "https://www.googleapis.com/compute/v1/projects/{project}/zones/{zone}/diskTypes/%s".format(**params) % name return name def machine_type_selflink(name, params): if name is None: return url = r"https://www.googleapis.com/compute/v1/projects/.*/zones/.*/machineTypes/.*" if not re.match(url, name): name = "https://www.googleapis.com/compute/v1/projects/{project}/zones/{zone}/machineTypes/%s".format(**params) % name return name def async_op_url(module, extra_data=None): if extra_data is None: extra_data = {} url = "https://www.googleapis.com/compute/v1/projects/{project}/zones/{zone}/operations/{op_id}" combined = extra_data.copy() combined.update(module.params) return url.format(**combined) def wait_for_operation(module, response): op_result = return_if_object(module, response, 'compute#operation') if op_result is None: return {} status = navigate_hash(op_result, ['status']) wait_done = wait_for_completion(status, op_result, module) response = fetch_resource(module, navigate_hash(wait_done, ['targetLink']), 'compute#instance') if response: return decode_response(response, module) else: return {} def wait_for_completion(status, op_result, module): op_id = navigate_hash(op_result, ['name']) op_uri = async_op_url(module, {'op_id': op_id}) while status != 'DONE': raise_if_errors(op_result, ['error', 'errors'], module) time.sleep(1.0) op_result = fetch_resource(module, op_uri, 'compute#operation', False) status = navigate_hash(op_result, ['status']) return op_result def raise_if_errors(response, err_path, module): errors = navigate_hash(response, err_path) if errors is not None: module.fail_json(msg=errors) def encode_request(request, module): if 'metadata' in request and request['metadata'] is not None: request['metadata'] = metadata_encoder(request['metadata']) return request def decode_response(response, module): if 'metadata' in response and response['metadata'] is not None: response['metadata'] = metadata_decoder(response['metadata']) return response # TODO(alexstephen): Implement updating metadata on existing resources. # Expose instance 'metadata' as a simple name/value pair hash. However the API # defines metadata as a NestedObject with the following layout: # # metadata { # fingerprint: 'hash-of-last-metadata' # items: [ # { # key: 'metadata1-key' # value: 'metadata1-value' # }, # ... # ] # } # def metadata_encoder(metadata): metadata_new = [] for key in metadata: value = metadata[key] metadata_new.append({"key": key, "value": value}) return {'items': metadata_new} # Map metadata.items[]{key:,value:} => metadata[key]=value def metadata_decoder(metadata): items = {} if 'items' in metadata: metadata_items = metadata['items'] for item in metadata_items: items[item['key']] = item['value'] return items class InstancePower(object): def __init__(self, module, current_status): self.module = module self.current_status = current_status self.desired_status = self.module.params.get('status') def run(self): # GcpRequest handles unicode text handling if GcpRequest({'status': self.current_status}) == GcpRequest({'status': self.desired_status}): return elif self.desired_status == 'RUNNING': self.start() elif self.desired_status == 'TERMINATED': self.stop() elif self.desired_status == 'SUSPENDED': self.module.fail_json(msg="Instances cannot be suspended using Ansible") def start(self): auth = GcpSession(self.module, 'compute') wait_for_operation(self.module, auth.post(self._start_url())) def stop(self): auth = GcpSession(self.module, 'compute') wait_for_operation(self.module, auth.post(self._stop_url())) def _start_url(self): return "https://www.googleapis.com/compute/v1/projects/{project}/zones/{zone}/instances/{name}/start".format(**self.module.params) def _stop_url(self): return "https://www.googleapis.com/compute/v1/projects/{project}/zones/{zone}/instances/{name}/stop".format(**self.module.params) def deletion_protection_update(module, request, response): auth = GcpSession(module, 'compute') auth.post( ''.join( [ "https://www.googleapis.com/compute/v1/", "projects/{project}/zones/{zone}/instances/{name}/setDeletionProtection?deletionProtection={deletionProtection}", ] ).format(**module.params), {}, ) def shielded_instance_config_update(module, request, response): auth = GcpSession(module, 'compute') auth.post( ''.join(["https://www.googleapis.com/compute/v1/", "projects/{project}/zones/{zone}/instances/{name}/updateShieldedInstanceConfig"]).format( **module.params ), { u'enableSecureBoot': navigate_hash(module.params, ['shielded_instance_config', 'enable_secure_boot']), u'enableVtpm': navigate_hash(module.params, ['shielded_instance_config', 'enable_vtpm']), u'enableIntegrityMonitoring': navigate_hash(module.params, ['shielded_instance_config', 'enable_integrity_monitoring']), }, ) class InstanceDisksArray(object): def __init__(self, request, module): self.module = module if request: self.request = request else: self.request = [] def to_request(self): items = [] for item in self.request: items.append(self._request_for_item(item)) return items def from_response(self): items = [] for item in self.request: items.append(self._response_from_item(item)) return items def _request_for_item(self, item): return remove_nones_from_dict( { u'autoDelete': item.get('auto_delete'), u'boot': item.get('boot'), u'deviceName': item.get('device_name'), u'diskEncryptionKey': InstanceDiskencryptionkey(item.get('disk_encryption_key', {}), self.module).to_request(), u'index': item.get('index'), u'initializeParams': InstanceInitializeparams(item.get('initialize_params', {}), self.module).to_request(), u'interface': item.get('interface'), u'mode': item.get('mode'), u'source': replace_resource_dict(item.get(u'source', {}), 'selfLink'), u'type': item.get('type'), } ) def _response_from_item(self, item): return remove_nones_from_dict( { u'autoDelete': item.get(u'autoDelete'), u'boot': item.get(u'boot'), u'deviceName': item.get(u'deviceName'), u'diskEncryptionKey': InstanceDiskencryptionkey(item.get(u'diskEncryptionKey', {}), self.module).from_response(), u'index': item.get(u'index'), u'initializeParams': InstanceInitializeparams(self.module.params.get('initialize_params', {}), self.module).to_request(), u'interface': item.get(u'interface'), u'mode': item.get(u'mode'), u'source': item.get(u'source'), u'type': item.get(u'type'), } ) class InstanceDiskencryptionkey(object): def __init__(self, request, module): self.module = module if request: self.request = request else: self.request = {} def to_request(self): return remove_nones_from_dict({u'rawKey': self.request.get('raw_key'), u'rsaEncryptedKey': self.request.get('rsa_encrypted_key')}) def from_response(self): return remove_nones_from_dict({u'rawKey': self.request.get(u'rawKey'), u'rsaEncryptedKey': self.request.get(u'rsaEncryptedKey')}) class InstanceInitializeparams(object): def __init__(self, request, module): self.module = module if request: self.request = request else: self.request = {} def to_request(self): return remove_nones_from_dict( { u'diskName': self.request.get('disk_name'), u'diskSizeGb': self.request.get('disk_size_gb'), u'diskType': disk_type_selflink(self.request.get('disk_type'), self.module.params), u'sourceImage': self.request.get('source_image'), u'sourceImageEncryptionKey': InstanceSourceimageencryptionkey(self.request.get('source_image_encryption_key', {}), self.module).to_request(), } ) def from_response(self): return remove_nones_from_dict( { u'diskName': self.request.get(u'diskName'), u'diskSizeGb': self.request.get(u'diskSizeGb'), u'diskType': self.request.get(u'diskType'), u'sourceImage': self.request.get(u'sourceImage'), u'sourceImageEncryptionKey': InstanceSourceimageencryptionkey(self.request.get(u'sourceImageEncryptionKey', {}), self.module).from_response(), } ) class InstanceSourceimageencryptionkey(object): def __init__(self, request, module): self.module = module if request: self.request = request else: self.request = {} def to_request(self): return remove_nones_from_dict({u'rawKey': self.request.get('raw_key')}) def from_response(self): return remove_nones_from_dict({u'rawKey': self.request.get(u'rawKey')}) class InstanceGuestacceleratorsArray(object): def __init__(self, request, module): self.module = module if request: self.request = request else: self.request = [] def to_request(self): items = [] for item in self.request: items.append(self._request_for_item(item)) return items def from_response(self): items = [] for item in self.request: items.append(self._response_from_item(item)) return items def _request_for_item(self, item): return remove_nones_from_dict({u'acceleratorCount': item.get('accelerator_count'), u'acceleratorType': item.get('accelerator_type')}) def _response_from_item(self, item): return remove_nones_from_dict({u'acceleratorCount': item.get(u'acceleratorCount'), u'acceleratorType': item.get(u'acceleratorType')}) class InstanceNetworkinterfacesArray(object): def __init__(self, request, module): self.module = module if request: self.request = request else: self.request = [] def to_request(self): items = [] for item in self.request: items.append(self._request_for_item(item)) return items def from_response(self): items = [] for item in self.request: items.append(self._response_from_item(item)) return items def _request_for_item(self, item): return remove_nones_from_dict( { u'accessConfigs': InstanceAccessconfigsArray(item.get('access_configs', []), self.module).to_request(), u'aliasIpRanges': InstanceAliasiprangesArray(item.get('alias_ip_ranges', []), self.module).to_request(), u'network': replace_resource_dict(item.get(u'network', {}), 'selfLink'), u'networkIP': item.get('network_ip'), u'subnetwork': replace_resource_dict(item.get(u'subnetwork', {}), 'selfLink'), } ) def _response_from_item(self, item): return remove_nones_from_dict( { u'accessConfigs': InstanceAccessconfigsArray(item.get(u'accessConfigs', []), self.module).from_response(), u'aliasIpRanges': InstanceAliasiprangesArray(item.get(u'aliasIpRanges', []), self.module).from_response(), u'network': item.get(u'network'), u'networkIP': item.get(u'networkIP'), u'subnetwork': item.get(u'subnetwork'), } ) class InstanceAccessconfigsArray(object): def __init__(self, request, module): self.module = module if request: self.request = request else: self.request = [] def to_request(self): items = [] for item in self.request: items.append(self._request_for_item(item)) return items def from_response(self): items = [] for item in self.request: items.append(self._response_from_item(item)) return items def _request_for_item(self, item): return remove_nones_from_dict( { u'name': item.get('name'), u'natIP': replace_resource_dict(item.get(u'nat_ip', {}), 'address'), u'type': item.get('type'), u'setPublicPtr': item.get('set_public_ptr'), u'publicPtrDomainName': item.get('public_ptr_domain_name'), u'networkTier': item.get('network_tier'), } ) def _response_from_item(self, item): return remove_nones_from_dict( { u'name': item.get(u'name'), u'natIP': item.get(u'natIP'), u'type': item.get(u'type'), u'setPublicPtr': item.get(u'setPublicPtr'), u'publicPtrDomainName': item.get(u'publicPtrDomainName'), u'networkTier': item.get(u'networkTier'), } ) class InstanceAliasiprangesArray(object): def __init__(self, request, module): self.module = module if request: self.request = request else: self.request = [] def to_request(self): items = [] for item in self.request: items.append(self._request_for_item(item)) return items def from_response(self): items = [] for item in self.request: items.append(self._response_from_item(item)) return items def _request_for_item(self, item): return remove_nones_from_dict({u'ipCidrRange': item.get('ip_cidr_range'), u'subnetworkRangeName': item.get('subnetwork_range_name')}) def _response_from_item(self, item): return remove_nones_from_dict({u'ipCidrRange': item.get(u'ipCidrRange'), u'subnetworkRangeName': item.get(u'subnetworkRangeName')}) class InstanceScheduling(object): def __init__(self, request, module): self.module = module if request: self.request = request else: self.request = {} def to_request(self): return remove_nones_from_dict( { u'automaticRestart': self.request.get('automatic_restart'), u'onHostMaintenance': self.request.get('on_host_maintenance'), u'preemptible': self.request.get('preemptible'), } ) def from_response(self): return remove_nones_from_dict( { u'automaticRestart': self.request.get(u'automaticRestart'), u'onHostMaintenance': self.request.get(u'onHostMaintenance'), u'preemptible': self.request.get(u'preemptible'), } ) class InstanceServiceaccountsArray(object): def __init__(self, request, module): self.module = module if request: self.request = request else: self.request = [] def to_request(self): items = [] for item in self.request: items.append(self._request_for_item(item)) return items def from_response(self): items = [] for item in self.request: items.append(self._response_from_item(item)) return items def _request_for_item(self, item): return remove_nones_from_dict({u'email': item.get('email'), u'scopes': item.get('scopes')}) def _response_from_item(self, item): return remove_nones_from_dict({u'email': item.get(u'email'), u'scopes': item.get(u'scopes')}) class InstanceShieldedinstanceconfig(object): def __init__(self, request, module): self.module = module if request: self.request = request else: self.request = {} def to_request(self): return remove_nones_from_dict( { u'enableSecureBoot': self.request.get('enable_secure_boot'), u'enableVtpm': self.request.get('enable_vtpm'), u'enableIntegrityMonitoring': self.request.get('enable_integrity_monitoring'), } ) def from_response(self): return remove_nones_from_dict( { u'enableSecureBoot': self.request.get(u'enableSecureBoot'), u'enableVtpm': self.request.get(u'enableVtpm'), u'enableIntegrityMonitoring': self.request.get(u'enableIntegrityMonitoring'), } ) class InstanceTags(object): def __init__(self, request, module): self.module = module if request: self.request = request else: self.request = {} def to_request(self): return remove_nones_from_dict({u'fingerprint': self.request.get('fingerprint'), u'items': self.request.get('items')}) def from_response(self): return remove_nones_from_dict({u'fingerprint': self.request.get(u'fingerprint'), u'items': self.request.get(u'items')}) if __name__ == '__main__': main()
gpl-3.0
Fizzixnerd/pydel
del.py
1
7908
#! /usr/bin/python # This script shouldn't be dependant on the version of python, as long # as it's at least, like, 2.6 or something like that. Will work with # python3. # February 24ish, 2012 (1.0): # * initial version # February 27, 2012 (1.0.1): # * changed version info # * removed "copyright" notice # September 13, 2012: # * Fixed some spelling errors, etc. # * Began work on undeletion framework import argparse import logging import os import shutil import sys version = "1.0.1" trash_folder = os.path.expanduser(os.getenv("TRASH", default="~/.local/share/Trash/files/")) class DelError(Exception): def __init__(self, msg): self.msg = msg def __str__(self): return self.msg.__repr__() class TrashDoesNotExistError(DelError): def __init__(self, msg): DelError.__init__(self, msg) class TrashIsNotFolderError(DelError): def __init__(self, msg): DelError.__init__(self, msg) class FileNotFoundError(DelError): def __init__(self, msg): DelError.__init__(self, msg) class FilenameConflictError(DelError): def __init__(self, msg): DelError.__init__(self, msg) def init_parser(): """Return a parser that is set up with the appropriate options for del.py.""" # TODO: Refactor this, subclassing argparse.ArgumentParser instead # of having this function. parser = argparse.ArgumentParser(description="Moves files to the TRASH.") parser.add_argument("-o", "--overwrite", action="store_true", help="Overwrite files with identical names already present in the TRASH. Default behavior is to append a number to the end of the filename before moving it to the trash.") parser.add_argument("-c", "--complain", action="store_true", help="Skip and print a complaint to stderr if a file with identical name is already present in the TRASH; raises exception if -b is present.") parser.add_argument("-v", "--verbose", action="store_true", help="Display helpful messages. Useful when used interactively.") parser.add_argument("-vv", "--debug", "--very-verbose", action="store_true", help="Display debugging information.") parser.add_argument("-b", "--brittle", action="store_true", help="Immediately raise an exception and exit on any error.") parser.add_argument("--version", action="store_true", help="Print name and version info and then exit.") parser.add_argument("-t", "--trash-folder", action="store", nargs=1, default=trash_folder, help="Specify the TRASH folder, where del should move trash to. Defaults to %s." % (trash_folder)) parser.add_argument("files", metavar="FILE", nargs="+", action="store", help="Path(s) to the FILE(S) which you wish to move to the TRASH.") return parser def trash_is_okay(trash_path): """Raise appropriate exceptions if there are problems with the filepath referenced by trash_path. Returns True if everything is a-okay.""" if not os.path.exists(trash_path): msg = "trash folder '%s' does not exist." % (trash_path) logging.critical(msg) raise TrashDoesNotExistError("ERROR: " + msg) elif not os.path.isdir(trash_path): msg = "trash folder '%s' is not a folder." % (trash_path) logging.critical(msg) raise TrashIsNotFolderError("ERROR: " + msg) else: return True def resolve_name_conflict(filepath, parsed_args): """Resolve a conflict with a file of the same name already existing in the trash, according to the flags in parsed_args. Default behavior is to append an integer to the end of filename to uniquify it""" filename = os.path.basename(filepath) msg = "A file with the name '%s' already exists in the trash." % \ (filename) if parsed_args.complain: logging.warning(msg + " Skipping.") if parsed_args.brittle: raise FilenameConflictError("ERROR: " + msg) elif parsed_args.overwrite: logging.info(msg + " Overwriting.") problem_filepath = os.path.join(parsed_args.trash_folder, filename) shutil.rmtree(problem_filepath) logging.debug("%s has been removed." % (problem_filepath)) shutil.move(filepath, parsed_args.trash_folder) logging.debug("%s has been moved to the trash." % (filepath)) else: # Default behavior; append an integer to filename to uniquify # it in the TRASH folder, then move file to TRASH as the new # name. logging.info(msg) ii = 0 new_filename = filename + str(ii) while os.path.exists(os.path.join(parsed_args.trash_folder, new_filename)): ii += 1 new_filename = filename + str(ii) # Now new_filename is unique in the trash . logging.info("Moving %s to trash as %s" % (filepath, new_filename)) unique_destination_path = os.path.join(parsed_args.trash_folder, new_filename) shutil.move(filepath, unique_destination_path) logging.debug("%s has been moved to %s." % \ (filepath, unique_destination_path)) def get_logging_level(parsed_args): """Return the appropriate logging level, given the commandline arguments parsed by init_parser.parse_args().""" # Default logging level is WARNING logging_level = logging.WARNING if parsed_args.verbose: logging_level = logging.INFO if parsed_args.debug: logging_level = logging.DEBUG return logging_level if __name__ == "__main__": # HACK: Check to see if --version is in the args given. parser # complains if there are no files given along with --version, so I # have to check directly. if "--version" in sys.argv: version_info = "del.py version %s\nLovingly crafted in 2012 by Matt Walker <matt.g.d.walker@gmail.com>\nLicensed under the GNU GPLv2 as published by the Free Software Foundation." % (version) print (version_info) exit (0) # Initialize the parser and logger. parsed_args = init_parser().parse_args() logging.basicConfig(level=get_logging_level(parsed_args)) # logging.exit_code keeps track to see if there were any errors logging.exit_code = 0 logging.debug("The parsed_args Namespace is the following: %s" % \ (parsed_args.__repr__())) # Make sure trash_folder actually exist and is a folder. if trash_is_okay(parsed_args.trash_folder): logging.debug("Trash folder is okay.") for filepath in parsed_args.files: logging.debug("Now operating on: %s" % (filepath)) # TODO: should refactor this: should be if os.path.exists: do # stuff and having non-existence as the else clause. if not os.path.exists(filepath): msg = "The file '%s' does not exist." % (filepath) logging.error(msg) # note that there was an error; set logging.exit_code = 1 logging.exit_code = 1 if parsed_args.brittle: raise FileNotFoundError(msg) else: # filepath exists filename = os.path.basename(filepath) if os.path.exists(os.path.join(parsed_args.trash_folder, filename)): # filename already exists in the trash as well resolve_name_conflict(filepath, parsed_args) else: # filename doesn't exist in the trash shutil.move(filepath, parsed_args.trash_folder) logging.debug("moved %s to trash." % (filepath)) # zero if everything was peachy; nonzero otherwise exit (logging.exit_code)
gpl-3.0
twiest/openshift-tools
openshift/installer/vendored/openshift-ansible-3.8.36-1/roles/lib_openshift/library/oc_obj.py
6
57367
#!/usr/bin/env python # pylint: disable=missing-docstring # flake8: noqa: T001 # ___ ___ _ _ ___ ___ _ _____ ___ ___ # / __| __| \| | __| _ \ /_\_ _| __| \ # | (_ | _|| .` | _|| / / _ \| | | _|| |) | # \___|___|_|\_|___|_|_\/_/_\_\_|_|___|___/_ _____ # | \ / _ \ | \| |/ _ \_ _| | __| \_ _|_ _| # | |) | (_) | | .` | (_) || | | _|| |) | | | | # |___/ \___/ |_|\_|\___/ |_| |___|___/___| |_| # # Copyright 2016 Red Hat, Inc. and/or its affiliates # and other contributors as indicated by the @author tags. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # -*- -*- -*- Begin included fragment: lib/import.py -*- -*- -*- ''' OpenShiftCLI class that wraps the oc commands in a subprocess ''' # pylint: disable=too-many-lines from __future__ import print_function import atexit import copy import json import os import re import shutil import subprocess import tempfile # pylint: disable=import-error try: import ruamel.yaml as yaml except ImportError: import yaml from ansible.module_utils.basic import AnsibleModule # -*- -*- -*- End included fragment: lib/import.py -*- -*- -*- # -*- -*- -*- Begin included fragment: doc/obj -*- -*- -*- DOCUMENTATION = ''' --- module: oc_obj short_description: Generic interface to openshift objects description: - Manage openshift objects programmatically. options: state: description: - Currently present is only supported state. required: true default: present choices: ["present", "absent", "list"] aliases: [] kubeconfig: description: - The path for the kubeconfig file to use for authentication required: false default: /etc/origin/master/admin.kubeconfig aliases: [] debug: description: - Turn on debug output. required: false default: False aliases: [] name: description: - Name of the object that is being queried. required: false default: None aliases: [] namespace: description: - The namespace where the object lives. required: false default: str aliases: [] all_namespaces: description: - Search in all namespaces for the object. required: false default: false aliases: [] kind: description: - The kind attribute of the object. e.g. dc, bc, svc, route. May be a comma-separated list, e.g. "dc,po,svc". required: True default: None aliases: [] files: description: - A list of files provided for object required: false default: None aliases: [] delete_after: description: - Whether or not to delete the files after processing them. required: false default: false aliases: [] content: description: - Content of the object being managed. required: false default: None aliases: [] force: description: - Whether or not to force the operation required: false default: None aliases: [] selector: description: - Selector that gets added to the query. required: false default: None aliases: [] author: - "Kenny Woodson <kwoodson@redhat.com>" extends_documentation_fragment: [] ''' EXAMPLES = ''' oc_obj: kind: dc name: router namespace: default register: router_output ''' # -*- -*- -*- End included fragment: doc/obj -*- -*- -*- # -*- -*- -*- Begin included fragment: ../../lib_utils/src/class/yedit.py -*- -*- -*- class YeditException(Exception): # pragma: no cover ''' Exception class for Yedit ''' pass # pylint: disable=too-many-public-methods class Yedit(object): # pragma: no cover ''' Class to modify yaml files ''' re_valid_key = r"(((\[-?\d+\])|([0-9a-zA-Z%s/_-]+)).?)+$" re_key = r"(?:\[(-?\d+)\])|([0-9a-zA-Z{}/_-]+)" com_sep = set(['.', '#', '|', ':']) # pylint: disable=too-many-arguments def __init__(self, filename=None, content=None, content_type='yaml', separator='.', backup=False): self.content = content self._separator = separator self.filename = filename self.__yaml_dict = content self.content_type = content_type self.backup = backup self.load(content_type=self.content_type) if self.__yaml_dict is None: self.__yaml_dict = {} @property def separator(self): ''' getter method for separator ''' return self._separator @separator.setter def separator(self, inc_sep): ''' setter method for separator ''' self._separator = inc_sep @property def yaml_dict(self): ''' getter method for yaml_dict ''' return self.__yaml_dict @yaml_dict.setter def yaml_dict(self, value): ''' setter method for yaml_dict ''' self.__yaml_dict = value @staticmethod def parse_key(key, sep='.'): '''parse the key allowing the appropriate separator''' common_separators = list(Yedit.com_sep - set([sep])) return re.findall(Yedit.re_key.format(''.join(common_separators)), key) @staticmethod def valid_key(key, sep='.'): '''validate the incoming key''' common_separators = list(Yedit.com_sep - set([sep])) if not re.match(Yedit.re_valid_key.format(''.join(common_separators)), key): return False return True @staticmethod def remove_entry(data, key, sep='.'): ''' remove data at location key ''' if key == '' and isinstance(data, dict): data.clear() return True elif key == '' and isinstance(data, list): del data[:] return True if not (key and Yedit.valid_key(key, sep)) and \ isinstance(data, (list, dict)): return None key_indexes = Yedit.parse_key(key, sep) for arr_ind, dict_key in key_indexes[:-1]: if dict_key and isinstance(data, dict): data = data.get(dict_key) elif (arr_ind and isinstance(data, list) and int(arr_ind) <= len(data) - 1): data = data[int(arr_ind)] else: return None # process last index for remove # expected list entry if key_indexes[-1][0]: if isinstance(data, list) and int(key_indexes[-1][0]) <= len(data) - 1: # noqa: E501 del data[int(key_indexes[-1][0])] return True # expected dict entry elif key_indexes[-1][1]: if isinstance(data, dict): del data[key_indexes[-1][1]] return True @staticmethod def add_entry(data, key, item=None, sep='.'): ''' Get an item from a dictionary with key notation a.b.c d = {'a': {'b': 'c'}}} key = a#b return c ''' if key == '': pass elif (not (key and Yedit.valid_key(key, sep)) and isinstance(data, (list, dict))): return None key_indexes = Yedit.parse_key(key, sep) for arr_ind, dict_key in key_indexes[:-1]: if dict_key: if isinstance(data, dict) and dict_key in data and data[dict_key]: # noqa: E501 data = data[dict_key] continue elif data and not isinstance(data, dict): raise YeditException("Unexpected item type found while going through key " + "path: {} (at key: {})".format(key, dict_key)) data[dict_key] = {} data = data[dict_key] elif (arr_ind and isinstance(data, list) and int(arr_ind) <= len(data) - 1): data = data[int(arr_ind)] else: raise YeditException("Unexpected item type found while going through key path: {}".format(key)) if key == '': data = item # process last index for add # expected list entry elif key_indexes[-1][0] and isinstance(data, list) and int(key_indexes[-1][0]) <= len(data) - 1: # noqa: E501 data[int(key_indexes[-1][0])] = item # expected dict entry elif key_indexes[-1][1] and isinstance(data, dict): data[key_indexes[-1][1]] = item # didn't add/update to an existing list, nor add/update key to a dict # so we must have been provided some syntax like a.b.c[<int>] = "data" for a # non-existent array else: raise YeditException("Error adding to object at path: {}".format(key)) return data @staticmethod def get_entry(data, key, sep='.'): ''' Get an item from a dictionary with key notation a.b.c d = {'a': {'b': 'c'}}} key = a.b return c ''' if key == '': pass elif (not (key and Yedit.valid_key(key, sep)) and isinstance(data, (list, dict))): return None key_indexes = Yedit.parse_key(key, sep) for arr_ind, dict_key in key_indexes: if dict_key and isinstance(data, dict): data = data.get(dict_key) elif (arr_ind and isinstance(data, list) and int(arr_ind) <= len(data) - 1): data = data[int(arr_ind)] else: return None return data @staticmethod def _write(filename, contents): ''' Actually write the file contents to disk. This helps with mocking. ''' tmp_filename = filename + '.yedit' with open(tmp_filename, 'w') as yfd: yfd.write(contents) os.rename(tmp_filename, filename) def write(self): ''' write to file ''' if not self.filename: raise YeditException('Please specify a filename.') if self.backup and self.file_exists(): shutil.copy(self.filename, self.filename + '.orig') # Try to set format attributes if supported try: self.yaml_dict.fa.set_block_style() except AttributeError: pass # Try to use RoundTripDumper if supported. try: Yedit._write(self.filename, yaml.dump(self.yaml_dict, Dumper=yaml.RoundTripDumper)) except AttributeError: Yedit._write(self.filename, yaml.safe_dump(self.yaml_dict, default_flow_style=False)) return (True, self.yaml_dict) def read(self): ''' read from file ''' # check if it exists if self.filename is None or not self.file_exists(): return None contents = None with open(self.filename) as yfd: contents = yfd.read() return contents def file_exists(self): ''' return whether file exists ''' if os.path.exists(self.filename): return True return False def load(self, content_type='yaml'): ''' return yaml file ''' contents = self.read() if not contents and not self.content: return None if self.content: if isinstance(self.content, dict): self.yaml_dict = self.content return self.yaml_dict elif isinstance(self.content, str): contents = self.content # check if it is yaml try: if content_type == 'yaml' and contents: # Try to set format attributes if supported try: self.yaml_dict.fa.set_block_style() except AttributeError: pass # Try to use RoundTripLoader if supported. try: self.yaml_dict = yaml.safe_load(contents, yaml.RoundTripLoader) except AttributeError: self.yaml_dict = yaml.safe_load(contents) # Try to set format attributes if supported try: self.yaml_dict.fa.set_block_style() except AttributeError: pass elif content_type == 'json' and contents: self.yaml_dict = json.loads(contents) except yaml.YAMLError as err: # Error loading yaml or json raise YeditException('Problem with loading yaml file. {}'.format(err)) return self.yaml_dict def get(self, key): ''' get a specified key''' try: entry = Yedit.get_entry(self.yaml_dict, key, self.separator) except KeyError: entry = None return entry def pop(self, path, key_or_item): ''' remove a key, value pair from a dict or an item for a list''' try: entry = Yedit.get_entry(self.yaml_dict, path, self.separator) except KeyError: entry = None if entry is None: return (False, self.yaml_dict) if isinstance(entry, dict): # AUDIT:maybe-no-member makes sense due to fuzzy types # pylint: disable=maybe-no-member if key_or_item in entry: entry.pop(key_or_item) return (True, self.yaml_dict) return (False, self.yaml_dict) elif isinstance(entry, list): # AUDIT:maybe-no-member makes sense due to fuzzy types # pylint: disable=maybe-no-member ind = None try: ind = entry.index(key_or_item) except ValueError: return (False, self.yaml_dict) entry.pop(ind) return (True, self.yaml_dict) return (False, self.yaml_dict) def delete(self, path): ''' remove path from a dict''' try: entry = Yedit.get_entry(self.yaml_dict, path, self.separator) except KeyError: entry = None if entry is None: return (False, self.yaml_dict) result = Yedit.remove_entry(self.yaml_dict, path, self.separator) if not result: return (False, self.yaml_dict) return (True, self.yaml_dict) def exists(self, path, value): ''' check if value exists at path''' try: entry = Yedit.get_entry(self.yaml_dict, path, self.separator) except KeyError: entry = None if isinstance(entry, list): if value in entry: return True return False elif isinstance(entry, dict): if isinstance(value, dict): rval = False for key, val in value.items(): if entry[key] != val: rval = False break else: rval = True return rval return value in entry return entry == value def append(self, path, value): '''append value to a list''' try: entry = Yedit.get_entry(self.yaml_dict, path, self.separator) except KeyError: entry = None if entry is None: self.put(path, []) entry = Yedit.get_entry(self.yaml_dict, path, self.separator) if not isinstance(entry, list): return (False, self.yaml_dict) # AUDIT:maybe-no-member makes sense due to loading data from # a serialized format. # pylint: disable=maybe-no-member entry.append(value) return (True, self.yaml_dict) # pylint: disable=too-many-arguments def update(self, path, value, index=None, curr_value=None): ''' put path, value into a dict ''' try: entry = Yedit.get_entry(self.yaml_dict, path, self.separator) except KeyError: entry = None if isinstance(entry, dict): # AUDIT:maybe-no-member makes sense due to fuzzy types # pylint: disable=maybe-no-member if not isinstance(value, dict): raise YeditException('Cannot replace key, value entry in dict with non-dict type. ' + 'value=[{}] type=[{}]'.format(value, type(value))) entry.update(value) return (True, self.yaml_dict) elif isinstance(entry, list): # AUDIT:maybe-no-member makes sense due to fuzzy types # pylint: disable=maybe-no-member ind = None if curr_value: try: ind = entry.index(curr_value) except ValueError: return (False, self.yaml_dict) elif index is not None: ind = index if ind is not None and entry[ind] != value: entry[ind] = value return (True, self.yaml_dict) # see if it exists in the list try: ind = entry.index(value) except ValueError: # doesn't exist, append it entry.append(value) return (True, self.yaml_dict) # already exists, return if ind is not None: return (False, self.yaml_dict) return (False, self.yaml_dict) def put(self, path, value): ''' put path, value into a dict ''' try: entry = Yedit.get_entry(self.yaml_dict, path, self.separator) except KeyError: entry = None if entry == value: return (False, self.yaml_dict) # deepcopy didn't work # Try to use ruamel.yaml and fallback to pyyaml try: tmp_copy = yaml.load(yaml.round_trip_dump(self.yaml_dict, default_flow_style=False), yaml.RoundTripLoader) except AttributeError: tmp_copy = copy.deepcopy(self.yaml_dict) # set the format attributes if available try: tmp_copy.fa.set_block_style() except AttributeError: pass result = Yedit.add_entry(tmp_copy, path, value, self.separator) if result is None: return (False, self.yaml_dict) # When path equals "" it is a special case. # "" refers to the root of the document # Only update the root path (entire document) when its a list or dict if path == '': if isinstance(result, list) or isinstance(result, dict): self.yaml_dict = result return (True, self.yaml_dict) return (False, self.yaml_dict) self.yaml_dict = tmp_copy return (True, self.yaml_dict) def create(self, path, value): ''' create a yaml file ''' if not self.file_exists(): # deepcopy didn't work # Try to use ruamel.yaml and fallback to pyyaml try: tmp_copy = yaml.load(yaml.round_trip_dump(self.yaml_dict, default_flow_style=False), yaml.RoundTripLoader) except AttributeError: tmp_copy = copy.deepcopy(self.yaml_dict) # set the format attributes if available try: tmp_copy.fa.set_block_style() except AttributeError: pass result = Yedit.add_entry(tmp_copy, path, value, self.separator) if result is not None: self.yaml_dict = tmp_copy return (True, self.yaml_dict) return (False, self.yaml_dict) @staticmethod def get_curr_value(invalue, val_type): '''return the current value''' if invalue is None: return None curr_value = invalue if val_type == 'yaml': curr_value = yaml.load(invalue) elif val_type == 'json': curr_value = json.loads(invalue) return curr_value @staticmethod def parse_value(inc_value, vtype=''): '''determine value type passed''' true_bools = ['y', 'Y', 'yes', 'Yes', 'YES', 'true', 'True', 'TRUE', 'on', 'On', 'ON', ] false_bools = ['n', 'N', 'no', 'No', 'NO', 'false', 'False', 'FALSE', 'off', 'Off', 'OFF'] # It came in as a string but you didn't specify value_type as string # we will convert to bool if it matches any of the above cases if isinstance(inc_value, str) and 'bool' in vtype: if inc_value not in true_bools and inc_value not in false_bools: raise YeditException('Not a boolean type. str=[{}] vtype=[{}]'.format(inc_value, vtype)) elif isinstance(inc_value, bool) and 'str' in vtype: inc_value = str(inc_value) # There is a special case where '' will turn into None after yaml loading it so skip if isinstance(inc_value, str) and inc_value == '': pass # If vtype is not str then go ahead and attempt to yaml load it. elif isinstance(inc_value, str) and 'str' not in vtype: try: inc_value = yaml.safe_load(inc_value) except Exception: raise YeditException('Could not determine type of incoming value. ' + 'value=[{}] vtype=[{}]'.format(type(inc_value), vtype)) return inc_value @staticmethod def process_edits(edits, yamlfile): '''run through a list of edits and process them one-by-one''' results = [] for edit in edits: value = Yedit.parse_value(edit['value'], edit.get('value_type', '')) if edit.get('action') == 'update': # pylint: disable=line-too-long curr_value = Yedit.get_curr_value( Yedit.parse_value(edit.get('curr_value')), edit.get('curr_value_format')) rval = yamlfile.update(edit['key'], value, edit.get('index'), curr_value) elif edit.get('action') == 'append': rval = yamlfile.append(edit['key'], value) else: rval = yamlfile.put(edit['key'], value) if rval[0]: results.append({'key': edit['key'], 'edit': rval[1]}) return {'changed': len(results) > 0, 'results': results} # pylint: disable=too-many-return-statements,too-many-branches @staticmethod def run_ansible(params): '''perform the idempotent crud operations''' yamlfile = Yedit(filename=params['src'], backup=params['backup'], separator=params['separator']) state = params['state'] if params['src']: rval = yamlfile.load() if yamlfile.yaml_dict is None and state != 'present': return {'failed': True, 'msg': 'Error opening file [{}]. Verify that the '.format(params['src']) + 'file exists, that it is has correct permissions, and is valid yaml.'} if state == 'list': if params['content']: content = Yedit.parse_value(params['content'], params['content_type']) yamlfile.yaml_dict = content if params['key']: rval = yamlfile.get(params['key']) return {'changed': False, 'result': rval, 'state': state} elif state == 'absent': if params['content']: content = Yedit.parse_value(params['content'], params['content_type']) yamlfile.yaml_dict = content if params['update']: rval = yamlfile.pop(params['key'], params['value']) else: rval = yamlfile.delete(params['key']) if rval[0] and params['src']: yamlfile.write() return {'changed': rval[0], 'result': rval[1], 'state': state} elif state == 'present': # check if content is different than what is in the file if params['content']: content = Yedit.parse_value(params['content'], params['content_type']) # We had no edits to make and the contents are the same if yamlfile.yaml_dict == content and \ params['value'] is None: return {'changed': False, 'result': yamlfile.yaml_dict, 'state': state} yamlfile.yaml_dict = content # If we were passed a key, value then # we enapsulate it in a list and process it # Key, Value passed to the module : Converted to Edits list # edits = [] _edit = {} if params['value'] is not None: _edit['value'] = params['value'] _edit['value_type'] = params['value_type'] _edit['key'] = params['key'] if params['update']: _edit['action'] = 'update' _edit['curr_value'] = params['curr_value'] _edit['curr_value_format'] = params['curr_value_format'] _edit['index'] = params['index'] elif params['append']: _edit['action'] = 'append' edits.append(_edit) elif params['edits'] is not None: edits = params['edits'] if edits: results = Yedit.process_edits(edits, yamlfile) # if there were changes and a src provided to us we need to write if results['changed'] and params['src']: yamlfile.write() return {'changed': results['changed'], 'result': results['results'], 'state': state} # no edits to make if params['src']: # pylint: disable=redefined-variable-type rval = yamlfile.write() return {'changed': rval[0], 'result': rval[1], 'state': state} # We were passed content but no src, key or value, or edits. Return contents in memory return {'changed': False, 'result': yamlfile.yaml_dict, 'state': state} return {'failed': True, 'msg': 'Unkown state passed'} # -*- -*- -*- End included fragment: ../../lib_utils/src/class/yedit.py -*- -*- -*- # -*- -*- -*- Begin included fragment: lib/base.py -*- -*- -*- # pylint: disable=too-many-lines # noqa: E301,E302,E303,T001 class OpenShiftCLIError(Exception): '''Exception class for openshiftcli''' pass ADDITIONAL_PATH_LOOKUPS = ['/usr/local/bin', os.path.expanduser('~/bin')] def locate_oc_binary(): ''' Find and return oc binary file ''' # https://github.com/openshift/openshift-ansible/issues/3410 # oc can be in /usr/local/bin in some cases, but that may not # be in $PATH due to ansible/sudo paths = os.environ.get("PATH", os.defpath).split(os.pathsep) + ADDITIONAL_PATH_LOOKUPS oc_binary = 'oc' # Use shutil.which if it is available, otherwise fallback to a naive path search try: which_result = shutil.which(oc_binary, path=os.pathsep.join(paths)) if which_result is not None: oc_binary = which_result except AttributeError: for path in paths: if os.path.exists(os.path.join(path, oc_binary)): oc_binary = os.path.join(path, oc_binary) break return oc_binary # pylint: disable=too-few-public-methods class OpenShiftCLI(object): ''' Class to wrap the command line tools ''' def __init__(self, namespace, kubeconfig='/etc/origin/master/admin.kubeconfig', verbose=False, all_namespaces=False): ''' Constructor for OpenshiftCLI ''' self.namespace = namespace self.verbose = verbose self.kubeconfig = Utils.create_tmpfile_copy(kubeconfig) self.all_namespaces = all_namespaces self.oc_binary = locate_oc_binary() # Pylint allows only 5 arguments to be passed. # pylint: disable=too-many-arguments def _replace_content(self, resource, rname, content, force=False, sep='.'): ''' replace the current object with the content ''' res = self._get(resource, rname) if not res['results']: return res fname = Utils.create_tmpfile(rname + '-') yed = Yedit(fname, res['results'][0], separator=sep) changes = [] for key, value in content.items(): changes.append(yed.put(key, value)) if any([change[0] for change in changes]): yed.write() atexit.register(Utils.cleanup, [fname]) return self._replace(fname, force) return {'returncode': 0, 'updated': False} def _replace(self, fname, force=False): '''replace the current object with oc replace''' # We are removing the 'resourceVersion' to handle # a race condition when modifying oc objects yed = Yedit(fname) results = yed.delete('metadata.resourceVersion') if results[0]: yed.write() cmd = ['replace', '-f', fname] if force: cmd.append('--force') return self.openshift_cmd(cmd) def _create_from_content(self, rname, content): '''create a temporary file and then call oc create on it''' fname = Utils.create_tmpfile(rname + '-') yed = Yedit(fname, content=content) yed.write() atexit.register(Utils.cleanup, [fname]) return self._create(fname) def _create(self, fname): '''call oc create on a filename''' return self.openshift_cmd(['create', '-f', fname]) def _delete(self, resource, name=None, selector=None): '''call oc delete on a resource''' cmd = ['delete', resource] if selector is not None: cmd.append('--selector={}'.format(selector)) elif name is not None: cmd.append(name) else: raise OpenShiftCLIError('Either name or selector is required when calling delete.') return self.openshift_cmd(cmd) def _process(self, template_name, create=False, params=None, template_data=None): # noqa: E501 '''process a template template_name: the name of the template to process create: whether to send to oc create after processing params: the parameters for the template template_data: the incoming template's data; instead of a file ''' cmd = ['process'] if template_data: cmd.extend(['-f', '-']) else: cmd.append(template_name) if params: param_str = ["{}={}".format(key, str(value).replace("'", r'"')) for key, value in params.items()] cmd.append('-v') cmd.extend(param_str) results = self.openshift_cmd(cmd, output=True, input_data=template_data) if results['returncode'] != 0 or not create: return results fname = Utils.create_tmpfile(template_name + '-') yed = Yedit(fname, results['results']) yed.write() atexit.register(Utils.cleanup, [fname]) return self.openshift_cmd(['create', '-f', fname]) def _get(self, resource, name=None, selector=None): '''return a resource by name ''' cmd = ['get', resource] if selector is not None: cmd.append('--selector={}'.format(selector)) elif name is not None: cmd.append(name) cmd.extend(['-o', 'json']) rval = self.openshift_cmd(cmd, output=True) # Ensure results are retuned in an array if 'items' in rval: rval['results'] = rval['items'] elif not isinstance(rval['results'], list): rval['results'] = [rval['results']] return rval def _schedulable(self, node=None, selector=None, schedulable=True): ''' perform oadm manage-node scheduable ''' cmd = ['manage-node'] if node: cmd.extend(node) else: cmd.append('--selector={}'.format(selector)) cmd.append('--schedulable={}'.format(schedulable)) return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw') # noqa: E501 def _list_pods(self, node=None, selector=None, pod_selector=None): ''' perform oadm list pods node: the node in which to list pods selector: the label selector filter if provided pod_selector: the pod selector filter if provided ''' cmd = ['manage-node'] if node: cmd.extend(node) else: cmd.append('--selector={}'.format(selector)) if pod_selector: cmd.append('--pod-selector={}'.format(pod_selector)) cmd.extend(['--list-pods', '-o', 'json']) return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw') # pylint: disable=too-many-arguments def _evacuate(self, node=None, selector=None, pod_selector=None, dry_run=False, grace_period=None, force=False): ''' perform oadm manage-node evacuate ''' cmd = ['manage-node'] if node: cmd.extend(node) else: cmd.append('--selector={}'.format(selector)) if dry_run: cmd.append('--dry-run') if pod_selector: cmd.append('--pod-selector={}'.format(pod_selector)) if grace_period: cmd.append('--grace-period={}'.format(int(grace_period))) if force: cmd.append('--force') cmd.append('--evacuate') return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw') def _version(self): ''' return the openshift version''' return self.openshift_cmd(['version'], output=True, output_type='raw') def _import_image(self, url=None, name=None, tag=None): ''' perform image import ''' cmd = ['import-image'] image = '{0}'.format(name) if tag: image += ':{0}'.format(tag) cmd.append(image) if url: cmd.append('--from={0}/{1}'.format(url, image)) cmd.append('-n{0}'.format(self.namespace)) cmd.append('--confirm') return self.openshift_cmd(cmd) def _run(self, cmds, input_data): ''' Actually executes the command. This makes mocking easier. ''' curr_env = os.environ.copy() curr_env.update({'KUBECONFIG': self.kubeconfig}) proc = subprocess.Popen(cmds, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=curr_env) stdout, stderr = proc.communicate(input_data) return proc.returncode, stdout.decode('utf-8'), stderr.decode('utf-8') # pylint: disable=too-many-arguments,too-many-branches def openshift_cmd(self, cmd, oadm=False, output=False, output_type='json', input_data=None): '''Base command for oc ''' cmds = [self.oc_binary] if oadm: cmds.append('adm') cmds.extend(cmd) if self.all_namespaces: cmds.extend(['--all-namespaces']) elif self.namespace is not None and self.namespace.lower() not in ['none', 'emtpy']: # E501 cmds.extend(['-n', self.namespace]) if self.verbose: print(' '.join(cmds)) try: returncode, stdout, stderr = self._run(cmds, input_data) except OSError as ex: returncode, stdout, stderr = 1, '', 'Failed to execute {}: {}'.format(subprocess.list2cmdline(cmds), ex) rval = {"returncode": returncode, "cmd": ' '.join(cmds)} if output_type == 'json': rval['results'] = {} if output and stdout: try: rval['results'] = json.loads(stdout) except ValueError as verr: if "No JSON object could be decoded" in verr.args: rval['err'] = verr.args elif output_type == 'raw': rval['results'] = stdout if output else '' if self.verbose: print("STDOUT: {0}".format(stdout)) print("STDERR: {0}".format(stderr)) if 'err' in rval or returncode != 0: rval.update({"stderr": stderr, "stdout": stdout}) return rval class Utils(object): # pragma: no cover ''' utilities for openshiftcli modules ''' @staticmethod def _write(filename, contents): ''' Actually write the file contents to disk. This helps with mocking. ''' with open(filename, 'w') as sfd: sfd.write(contents) @staticmethod def create_tmp_file_from_contents(rname, data, ftype='yaml'): ''' create a file in tmp with name and contents''' tmp = Utils.create_tmpfile(prefix=rname) if ftype == 'yaml': # AUDIT:no-member makes sense here due to ruamel.YAML/PyYAML usage # pylint: disable=no-member if hasattr(yaml, 'RoundTripDumper'): Utils._write(tmp, yaml.dump(data, Dumper=yaml.RoundTripDumper)) else: Utils._write(tmp, yaml.safe_dump(data, default_flow_style=False)) elif ftype == 'json': Utils._write(tmp, json.dumps(data)) else: Utils._write(tmp, data) # Register cleanup when module is done atexit.register(Utils.cleanup, [tmp]) return tmp @staticmethod def create_tmpfile_copy(inc_file): '''create a temporary copy of a file''' tmpfile = Utils.create_tmpfile('lib_openshift-') Utils._write(tmpfile, open(inc_file).read()) # Cleanup the tmpfile atexit.register(Utils.cleanup, [tmpfile]) return tmpfile @staticmethod def create_tmpfile(prefix='tmp'): ''' Generates and returns a temporary file name ''' with tempfile.NamedTemporaryFile(prefix=prefix, delete=False) as tmp: return tmp.name @staticmethod def create_tmp_files_from_contents(content, content_type=None): '''Turn an array of dict: filename, content into a files array''' if not isinstance(content, list): content = [content] files = [] for item in content: path = Utils.create_tmp_file_from_contents(item['path'] + '-', item['data'], ftype=content_type) files.append({'name': os.path.basename(item['path']), 'path': path}) return files @staticmethod def cleanup(files): '''Clean up on exit ''' for sfile in files: if os.path.exists(sfile): if os.path.isdir(sfile): shutil.rmtree(sfile) elif os.path.isfile(sfile): os.remove(sfile) @staticmethod def exists(results, _name): ''' Check to see if the results include the name ''' if not results: return False if Utils.find_result(results, _name): return True return False @staticmethod def find_result(results, _name): ''' Find the specified result by name''' rval = None for result in results: if 'metadata' in result and result['metadata']['name'] == _name: rval = result break return rval @staticmethod def get_resource_file(sfile, sfile_type='yaml'): ''' return the service file ''' contents = None with open(sfile) as sfd: contents = sfd.read() if sfile_type == 'yaml': # AUDIT:no-member makes sense here due to ruamel.YAML/PyYAML usage # pylint: disable=no-member if hasattr(yaml, 'RoundTripLoader'): contents = yaml.load(contents, yaml.RoundTripLoader) else: contents = yaml.safe_load(contents) elif sfile_type == 'json': contents = json.loads(contents) return contents @staticmethod def filter_versions(stdout): ''' filter the oc version output ''' version_dict = {} version_search = ['oc', 'openshift', 'kubernetes'] for line in stdout.strip().split('\n'): for term in version_search: if not line: continue if line.startswith(term): version_dict[term] = line.split()[-1] # horrible hack to get openshift version in Openshift 3.2 # By default "oc version in 3.2 does not return an "openshift" version if "openshift" not in version_dict: version_dict["openshift"] = version_dict["oc"] return version_dict @staticmethod def add_custom_versions(versions): ''' create custom versions strings ''' versions_dict = {} for tech, version in versions.items(): # clean up "-" from version if "-" in version: version = version.split("-")[0] if version.startswith('v'): versions_dict[tech + '_numeric'] = version[1:].split('+')[0] # "v3.3.0.33" is what we have, we want "3.3" versions_dict[tech + '_short'] = version[1:4] return versions_dict @staticmethod def openshift_installed(): ''' check if openshift is installed ''' import rpm transaction_set = rpm.TransactionSet() rpmquery = transaction_set.dbMatch("name", "atomic-openshift") return rpmquery.count() > 0 # Disabling too-many-branches. This is a yaml dictionary comparison function # pylint: disable=too-many-branches,too-many-return-statements,too-many-statements @staticmethod def check_def_equal(user_def, result_def, skip_keys=None, debug=False): ''' Given a user defined definition, compare it with the results given back by our query. ''' # Currently these values are autogenerated and we do not need to check them skip = ['metadata', 'status'] if skip_keys: skip.extend(skip_keys) for key, value in result_def.items(): if key in skip: continue # Both are lists if isinstance(value, list): if key not in user_def: if debug: print('User data does not have key [%s]' % key) print('User data: %s' % user_def) return False if not isinstance(user_def[key], list): if debug: print('user_def[key] is not a list key=[%s] user_def[key]=%s' % (key, user_def[key])) return False if len(user_def[key]) != len(value): if debug: print("List lengths are not equal.") print("key=[%s]: user_def[%s] != value[%s]" % (key, len(user_def[key]), len(value))) print("user_def: %s" % user_def[key]) print("value: %s" % value) return False for values in zip(user_def[key], value): if isinstance(values[0], dict) and isinstance(values[1], dict): if debug: print('sending list - list') print(type(values[0])) print(type(values[1])) result = Utils.check_def_equal(values[0], values[1], skip_keys=skip_keys, debug=debug) if not result: print('list compare returned false') return False elif value != user_def[key]: if debug: print('value should be identical') print(user_def[key]) print(value) return False # recurse on a dictionary elif isinstance(value, dict): if key not in user_def: if debug: print("user_def does not have key [%s]" % key) return False if not isinstance(user_def[key], dict): if debug: print("dict returned false: not instance of dict") return False # before passing ensure keys match api_values = set(value.keys()) - set(skip) user_values = set(user_def[key].keys()) - set(skip) if api_values != user_values: if debug: print("keys are not equal in dict") print(user_values) print(api_values) return False result = Utils.check_def_equal(user_def[key], value, skip_keys=skip_keys, debug=debug) if not result: if debug: print("dict returned false") print(result) return False # Verify each key, value pair is the same else: if key not in user_def or value != user_def[key]: if debug: print("value not equal; user_def does not have key") print(key) print(value) if key in user_def: print(user_def[key]) return False if debug: print('returning true') return True class OpenShiftCLIConfig(object): '''Generic Config''' def __init__(self, rname, namespace, kubeconfig, options): self.kubeconfig = kubeconfig self.name = rname self.namespace = namespace self._options = options @property def config_options(self): ''' return config options ''' return self._options def to_option_list(self, ascommalist=''): '''return all options as a string if ascommalist is set to the name of a key, and the value of that key is a dict, format the dict as a list of comma delimited key=value pairs''' return self.stringify(ascommalist) def stringify(self, ascommalist=''): ''' return the options hash as cli params in a string if ascommalist is set to the name of a key, and the value of that key is a dict, format the dict as a list of comma delimited key=value pairs ''' rval = [] for key in sorted(self.config_options.keys()): data = self.config_options[key] if data['include'] \ and (data['value'] is not None or isinstance(data['value'], int)): if key == ascommalist: val = ','.join(['{}={}'.format(kk, vv) for kk, vv in sorted(data['value'].items())]) else: val = data['value'] rval.append('--{}={}'.format(key.replace('_', '-'), val)) return rval # -*- -*- -*- End included fragment: lib/base.py -*- -*- -*- # -*- -*- -*- Begin included fragment: class/oc_obj.py -*- -*- -*- # pylint: disable=too-many-instance-attributes class OCObject(OpenShiftCLI): ''' Class to wrap the oc command line tools ''' # pylint allows 5. we need 6 # pylint: disable=too-many-arguments def __init__(self, kind, namespace, name=None, selector=None, kubeconfig='/etc/origin/master/admin.kubeconfig', verbose=False, all_namespaces=False): ''' Constructor for OpenshiftOC ''' super(OCObject, self).__init__(namespace, kubeconfig=kubeconfig, verbose=verbose, all_namespaces=all_namespaces) self.kind = kind self.name = name self.selector = selector def get(self): '''return a kind by name ''' results = self._get(self.kind, name=self.name, selector=self.selector) if (results['returncode'] != 0 and 'stderr' in results and '\"{}\" not found'.format(self.name) in results['stderr']): results['returncode'] = 0 return results def delete(self): '''delete the object''' results = self._delete(self.kind, name=self.name, selector=self.selector) if (results['returncode'] != 0 and 'stderr' in results and '\"{}\" not found'.format(self.name) in results['stderr']): results['returncode'] = 0 return results def create(self, files=None, content=None): ''' Create a config NOTE: This creates the first file OR the first conent. TODO: Handle all files and content passed in ''' if files: return self._create(files[0]) # pylint: disable=no-member # The purpose of this change is twofold: # - we need a check to only use the ruamel specific dumper if ruamel is loaded # - the dumper or the flow style change is needed so openshift is able to parse # the resulting yaml, at least until gopkg.in/yaml.v2 is updated if hasattr(yaml, 'RoundTripDumper'): content['data'] = yaml.dump(content['data'], Dumper=yaml.RoundTripDumper) else: content['data'] = yaml.safe_dump(content['data'], default_flow_style=False) content_file = Utils.create_tmp_files_from_contents(content)[0] return self._create(content_file['path']) # pylint: disable=too-many-function-args def update(self, files=None, content=None, force=False): '''update a current openshift object This receives a list of file names or content and takes the first and calls replace. TODO: take an entire list ''' if files: return self._replace(files[0], force) if content and 'data' in content: content = content['data'] return self.update_content(content, force) def update_content(self, content, force=False): '''update an object through using the content param''' return self._replace_content(self.kind, self.name, content, force=force) def needs_update(self, files=None, content=None, content_type='yaml'): ''' check to see if we need to update ''' objects = self.get() if objects['returncode'] != 0: return objects data = None if files: data = Utils.get_resource_file(files[0], content_type) elif content and 'data' in content: data = content['data'] else: data = content # if equal then no need. So not equal is True return not Utils.check_def_equal(data, objects['results'][0], skip_keys=None, debug=False) # pylint: disable=too-many-return-statements,too-many-branches @staticmethod def run_ansible(params, check_mode=False): '''perform the ansible idempotent code''' ocobj = OCObject(params['kind'], params['namespace'], params['name'], params['selector'], kubeconfig=params['kubeconfig'], verbose=params['debug'], all_namespaces=params['all_namespaces']) state = params['state'] api_rval = ocobj.get() ##### # Get ##### if state == 'list': return {'changed': False, 'results': api_rval, 'state': state} ######## # Delete ######## if state == 'absent': # verify its not in our results if (params['name'] is not None or params['selector'] is not None) and \ (len(api_rval['results']) == 0 or \ ('items' in api_rval['results'][0] and len(api_rval['results'][0]['items']) == 0)): return {'changed': False, 'state': state} if check_mode: return {'changed': True, 'msg': 'CHECK_MODE: Would have performed a delete'} api_rval = ocobj.delete() if api_rval['returncode'] != 0: return {'failed': True, 'msg': api_rval} return {'changed': True, 'results': api_rval, 'state': state} # create/update: Must define a name beyond this point if not params['name']: return {'failed': True, 'msg': 'Please specify a name when state is present.'} if state == 'present': ######## # Create ######## if not Utils.exists(api_rval['results'], params['name']): if check_mode: return {'changed': True, 'msg': 'CHECK_MODE: Would have performed a create'} # Create it here api_rval = ocobj.create(params['files'], params['content']) if api_rval['returncode'] != 0: return {'failed': True, 'msg': api_rval} # return the created object api_rval = ocobj.get() if api_rval['returncode'] != 0: return {'failed': True, 'msg': api_rval} # Remove files if params['files'] and params['delete_after']: Utils.cleanup(params['files']) return {'changed': True, 'results': api_rval, 'state': state} ######## # Update ######## # if a file path is passed, use it. update = ocobj.needs_update(params['files'], params['content']) if not isinstance(update, bool): return {'failed': True, 'msg': update} # No changes if not update: if params['files'] and params['delete_after']: Utils.cleanup(params['files']) return {'changed': False, 'results': api_rval['results'][0], 'state': state} if check_mode: return {'changed': True, 'msg': 'CHECK_MODE: Would have performed an update.'} api_rval = ocobj.update(params['files'], params['content'], params['force']) if api_rval['returncode'] != 0: return {'failed': True, 'msg': api_rval} # return the created object api_rval = ocobj.get() if api_rval['returncode'] != 0: return {'failed': True, 'msg': api_rval} return {'changed': True, 'results': api_rval, 'state': state} # -*- -*- -*- End included fragment: class/oc_obj.py -*- -*- -*- # -*- -*- -*- Begin included fragment: ansible/oc_obj.py -*- -*- -*- # pylint: disable=too-many-branches def main(): ''' ansible oc module for services ''' module = AnsibleModule( argument_spec=dict( kubeconfig=dict(default='/etc/origin/master/admin.kubeconfig', type='str'), state=dict(default='present', type='str', choices=['present', 'absent', 'list']), debug=dict(default=False, type='bool'), namespace=dict(default='default', type='str'), all_namespaces=dict(defaul=False, type='bool'), name=dict(default=None, type='str'), files=dict(default=None, type='list'), kind=dict(required=True, type='str'), delete_after=dict(default=False, type='bool'), content=dict(default=None, type='dict'), force=dict(default=False, type='bool'), selector=dict(default=None, type='str'), ), mutually_exclusive=[["content", "files"], ["selector", "name"]], supports_check_mode=True, ) rval = OCObject.run_ansible(module.params, module.check_mode) if 'failed' in rval: module.fail_json(**rval) module.exit_json(**rval) if __name__ == '__main__': main() # -*- -*- -*- End included fragment: ansible/oc_obj.py -*- -*- -*-
apache-2.0
saradbowman/osf.io
osf/utils/tokens/handlers.py
3
4246
from rest_framework import status as http_status from flask import redirect, request import markupsafe from framework.auth.decorators import must_be_logged_in from framework.exceptions import HTTPError, PermissionsError from framework import status from osf.exceptions import UnsupportedSanctionHandlerKind, TokenError def registration_approval_handler(action, registration, registered_from): # TODO: Unnecessary and duplicated dictionary. status.push_status_message({ 'approve': 'Your registration approval has been accepted.', 'reject': 'Your disapproval has been accepted and the registration has been cancelled.', }[action], kind='success', trust=False) # Allow decorated view function to return response return None def embargo_handler(action, registration, registered_from): status.push_status_message({ 'approve': 'Your embargo approval has been accepted.', 'reject': 'Your disapproval has been accepted and the embargo has been cancelled.', }[action], kind='success', trust=False) # Allow decorated view function to return response return None def embargo_termination_handler(action, registration, registered_from): status.push_status_message({ 'approve': 'Your approval to make this embargo public has been accepted.', 'reject': 'Your disapproval has been accepted and this embargo will not be made public.', }[action], kind='success', trust=False) # Allow decorated view function to return response return None def retraction_handler(action, registration, registered_from): status.push_status_message({ 'approve': 'Your withdrawal approval has been accepted.', 'reject': 'Your disapproval has been accepted and the withdrawal has been cancelled.' }[action], kind='success', trust=False) # Allow decorated view function to return response return None @must_be_logged_in def sanction_handler(kind, action, payload, encoded_token, auth, **kwargs): from osf.models import ( Embargo, EmbargoTerminationApproval, RegistrationApproval, Retraction ) Model = { 'registration': RegistrationApproval, 'embargo': Embargo, 'embargo_termination_approval': EmbargoTerminationApproval, 'retraction': Retraction }.get(kind, None) if not Model: raise UnsupportedSanctionHandlerKind sanction_id = payload.get('sanction_id', None) sanction = Model.load(sanction_id) err_code = None err_message = None if not sanction: err_code = http_status.HTTP_400_BAD_REQUEST err_message = 'There is no {0} associated with this token.'.format( markupsafe.escape(Model.DISPLAY_NAME)) elif sanction.is_approved: # Simply strip query params and redirect if already approved return redirect(request.base_url) elif sanction.is_rejected: err_code = http_status.HTTP_410_GONE if kind in ['registration', 'embargo'] else http_status.HTTP_400_BAD_REQUEST err_message = 'This registration {0} has been rejected.'.format( markupsafe.escape(sanction.DISPLAY_NAME)) if err_code: raise HTTPError(err_code, data=dict( message_long=err_message )) do_action = getattr(sanction, action, None) if do_action: registration = sanction.registrations.get() registered_from = registration.registered_from try: do_action(auth.user, encoded_token) except TokenError as e: raise HTTPError(http_status.HTTP_400_BAD_REQUEST, data={ 'message_short': e.message_short, 'message_long': str(e) }) except PermissionsError as e: raise HTTPError(http_status.HTTP_401_UNAUTHORIZED, data={ 'message_short': 'Unauthorized access', 'message_long': str(e) }) sanction.save() return { 'registration': registration_approval_handler, 'embargo': embargo_handler, 'embargo_termination_approval': embargo_termination_handler, 'retraction': retraction_handler, }[kind](action, registration, registered_from)
apache-2.0
mrquim/repository.mrquim
script.module.youtube.dl/lib/youtube_dl/extractor/spike.py
34
2296
from __future__ import unicode_literals import re from .mtv import MTVServicesInfoExtractor class SpikeIE(MTVServicesInfoExtractor): _VALID_URL = r'https?://(?:[^/]+\.)?spike\.com/[^/]+/[\da-z]{6}(?:[/?#&]|$)' _TESTS = [{ 'url': 'http://www.spike.com/video-clips/lhtu8m/auction-hunters-can-allen-ride-a-hundred-year-old-motorcycle', 'md5': '1a9265f32b0c375793d6c4ce45255256', 'info_dict': { 'id': 'b9c8221a-4e50-479a-b86d-3333323e38ba', 'ext': 'mp4', 'title': 'Auction Hunters|December 27, 2013|4|414|Can Allen Ride A Hundred Year-Old Motorcycle?', 'description': 'md5:fbed7e82ed5fad493615b3094a9499cb', 'timestamp': 1388120400, 'upload_date': '20131227', }, }, { 'url': 'http://www.spike.com/full-episodes/j830qm/lip-sync-battle-joel-mchale-vs-jim-rash-season-2-ep-209', 'md5': 'b25c6f16418aefb9ad5a6cae2559321f', 'info_dict': { 'id': '37ace3a8-1df6-48be-85b8-38df8229e241', 'ext': 'mp4', 'title': 'Lip Sync Battle|April 28, 2016|2|209|Joel McHale Vs. Jim Rash|Act 1', 'description': 'md5:a739ca8f978a7802f67f8016d27ce114', }, }, { 'url': 'http://www.spike.com/video-clips/lhtu8m/', 'only_matching': True, }, { 'url': 'http://www.spike.com/video-clips/lhtu8m', 'only_matching': True, }, { 'url': 'http://bellator.spike.com/fight/atwr7k/bellator-158-michael-page-vs-evangelista-cyborg', 'only_matching': True, }, { 'url': 'http://bellator.spike.com/video-clips/bw6k7n/bellator-158-foundations-michael-venom-page', 'only_matching': True, }] _FEED_URL = 'http://www.spike.com/feeds/mrss/' _MOBILE_TEMPLATE = 'http://m.spike.com/videos/video.rbml?id=%s' _CUSTOM_URL_REGEX = re.compile(r'spikenetworkapp://([^/]+/[-a-fA-F0-9]+)') def _extract_mgid(self, webpage): mgid = super(SpikeIE, self)._extract_mgid(webpage) if mgid is None: url_parts = self._search_regex(self._CUSTOM_URL_REGEX, webpage, 'episode_id') video_type, episode_id = url_parts.split('/', 1) mgid = 'mgid:arc:{0}:spike.com:{1}'.format(video_type, episode_id) return mgid
gpl-2.0
wangxinxi/litecoin
test/functional/pruning.py
10
21280
#!/usr/bin/env python3 # Copyright (c) 2014-2016 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. """Test the pruning code. WARNING: This test uses 4GB of disk space. This test takes 30 mins or more (up to 2 hours) """ from test_framework.test_framework import BitcoinTestFramework from test_framework.util import * import time import os MIN_BLOCKS_TO_KEEP = 288 # Rescans start at the earliest block up to 2 hours before a key timestamp, so # the manual prune RPC avoids pruning blocks in the same window to be # compatible with pruning based on key creation time. TIMESTAMP_WINDOW = 2 * 60 * 60 def calc_usage(blockdir): return sum(os.path.getsize(blockdir+f) for f in os.listdir(blockdir) if os.path.isfile(blockdir+f)) / (1024. * 1024.) class PruneTest(BitcoinTestFramework): def set_test_params(self): self.setup_clean_chain = True self.num_nodes = 6 # Create nodes 0 and 1 to mine. # Create node 2 to test pruning. self.full_node_default_args = ["-maxreceivebuffer=20000","-blockmaxsize=999000", "-checkblocks=5", "-limitdescendantcount=100", "-limitdescendantsize=5000", "-limitancestorcount=100", "-limitancestorsize=5000" ] # Create nodes 3 and 4 to test manual pruning (they will be re-started with manual pruning later) # Create nodes 5 to test wallet in prune mode, but do not connect self.extra_args = [self.full_node_default_args, self.full_node_default_args, ["-maxreceivebuffer=20000", "-prune=550"], ["-maxreceivebuffer=20000", "-blockmaxsize=999000"], ["-maxreceivebuffer=20000", "-blockmaxsize=999000"], ["-prune=550"]] def setup_network(self): self.setup_nodes() self.prunedir = self.options.tmpdir + "/node2/regtest/blocks/" connect_nodes(self.nodes[0], 1) connect_nodes(self.nodes[1], 2) connect_nodes(self.nodes[2], 0) connect_nodes(self.nodes[0], 3) connect_nodes(self.nodes[0], 4) sync_blocks(self.nodes[0:5]) def setup_nodes(self): self.add_nodes(self.num_nodes, self.extra_args, timewait=900) self.start_nodes() def create_big_chain(self): # Start by creating some coinbases we can spend later self.nodes[1].generate(200) sync_blocks(self.nodes[0:2]) self.nodes[0].generate(150) # Then mine enough full blocks to create more than 550MiB of data for i in range(645): mine_large_block(self.nodes[0], self.utxo_cache_0) sync_blocks(self.nodes[0:5]) def test_height_min(self): if not os.path.isfile(self.prunedir+"blk00000.dat"): raise AssertionError("blk00000.dat is missing, pruning too early") self.log.info("Success") self.log.info("Though we're already using more than 550MiB, current usage: %d" % calc_usage(self.prunedir)) self.log.info("Mining 25 more blocks should cause the first block file to be pruned") # Pruning doesn't run until we're allocating another chunk, 20 full blocks past the height cutoff will ensure this for i in range(25): mine_large_block(self.nodes[0], self.utxo_cache_0) waitstart = time.time() while os.path.isfile(self.prunedir+"blk00000.dat"): time.sleep(0.1) if time.time() - waitstart > 30: raise AssertionError("blk00000.dat not pruned when it should be") self.log.info("Success") usage = calc_usage(self.prunedir) self.log.info("Usage should be below target: %d" % usage) if (usage > 550): raise AssertionError("Pruning target not being met") def create_chain_with_staleblocks(self): # Create stale blocks in manageable sized chunks self.log.info("Mine 24 (stale) blocks on Node 1, followed by 25 (main chain) block reorg from Node 0, for 12 rounds") for j in range(12): # Disconnect node 0 so it can mine a longer reorg chain without knowing about node 1's soon-to-be-stale chain # Node 2 stays connected, so it hears about the stale blocks and then reorg's when node0 reconnects # Stopping node 0 also clears its mempool, so it doesn't have node1's transactions to accidentally mine self.stop_node(0) self.start_node(0, extra_args=self.full_node_default_args) # Mine 24 blocks in node 1 for i in range(24): if j == 0: mine_large_block(self.nodes[1], self.utxo_cache_1) else: # Add node1's wallet transactions back to the mempool, to # avoid the mined blocks from being too small. self.nodes[1].resendwallettransactions() self.nodes[1].generate(1) #tx's already in mempool from previous disconnects # Reorg back with 25 block chain from node 0 for i in range(25): mine_large_block(self.nodes[0], self.utxo_cache_0) # Create connections in the order so both nodes can see the reorg at the same time connect_nodes(self.nodes[1], 0) connect_nodes(self.nodes[2], 0) sync_blocks(self.nodes[0:3]) self.log.info("Usage can be over target because of high stale rate: %d" % calc_usage(self.prunedir)) def reorg_test(self): # Node 1 will mine a 300 block chain starting 287 blocks back from Node 0 and Node 2's tip # This will cause Node 2 to do a reorg requiring 288 blocks of undo data to the reorg_test chain # Reboot node 1 to clear its mempool (hopefully make the invalidate faster) # Lower the block max size so we don't keep mining all our big mempool transactions (from disconnected blocks) self.stop_node(1) self.start_node(1, extra_args=["-maxreceivebuffer=20000","-blockmaxsize=5000", "-checkblocks=5", "-disablesafemode"]) height = self.nodes[1].getblockcount() self.log.info("Current block height: %d" % height) invalidheight = height-287 badhash = self.nodes[1].getblockhash(invalidheight) self.log.info("Invalidating block %s at height %d" % (badhash,invalidheight)) self.nodes[1].invalidateblock(badhash) # We've now switched to our previously mined-24 block fork on node 1, but thats not what we want # So invalidate that fork as well, until we're on the same chain as node 0/2 (but at an ancestor 288 blocks ago) mainchainhash = self.nodes[0].getblockhash(invalidheight - 1) curhash = self.nodes[1].getblockhash(invalidheight - 1) while curhash != mainchainhash: self.nodes[1].invalidateblock(curhash) curhash = self.nodes[1].getblockhash(invalidheight - 1) assert(self.nodes[1].getblockcount() == invalidheight - 1) self.log.info("New best height: %d" % self.nodes[1].getblockcount()) # Reboot node1 to clear those giant tx's from mempool self.stop_node(1) self.start_node(1, extra_args=["-maxreceivebuffer=20000","-blockmaxsize=5000", "-checkblocks=5", "-disablesafemode"]) self.log.info("Generating new longer chain of 300 more blocks") self.nodes[1].generate(300) self.log.info("Reconnect nodes") connect_nodes(self.nodes[0], 1) connect_nodes(self.nodes[2], 1) sync_blocks(self.nodes[0:3], timeout=120) self.log.info("Verify height on node 2: %d" % self.nodes[2].getblockcount()) self.log.info("Usage possibly still high bc of stale blocks in block files: %d" % calc_usage(self.prunedir)) self.log.info("Mine 220 more blocks so we have requisite history (some blocks will be big and cause pruning of previous chain)") # Get node0's wallet transactions back in its mempool, to avoid the # mined blocks from being too small. self.nodes[0].resendwallettransactions() for i in range(22): # This can be slow, so do this in multiple RPC calls to avoid # RPC timeouts. self.nodes[0].generate(10) #node 0 has many large tx's in its mempool from the disconnects sync_blocks(self.nodes[0:3], timeout=300) usage = calc_usage(self.prunedir) self.log.info("Usage should be below target: %d" % usage) if (usage > 550): raise AssertionError("Pruning target not being met") return invalidheight,badhash def reorg_back(self): # Verify that a block on the old main chain fork has been pruned away assert_raises_rpc_error(-1, "Block not available (pruned data)", self.nodes[2].getblock, self.forkhash) self.log.info("Will need to redownload block %d" % self.forkheight) # Verify that we have enough history to reorg back to the fork point # Although this is more than 288 blocks, because this chain was written more recently # and only its other 299 small and 220 large block are in the block files after it, # its expected to still be retained self.nodes[2].getblock(self.nodes[2].getblockhash(self.forkheight)) first_reorg_height = self.nodes[2].getblockcount() curchainhash = self.nodes[2].getblockhash(self.mainchainheight) self.nodes[2].invalidateblock(curchainhash) goalbestheight = self.mainchainheight goalbesthash = self.mainchainhash2 # As of 0.10 the current block download logic is not able to reorg to the original chain created in # create_chain_with_stale_blocks because it doesn't know of any peer thats on that chain from which to # redownload its missing blocks. # Invalidate the reorg_test chain in node 0 as well, it can successfully switch to the original chain # because it has all the block data. # However it must mine enough blocks to have a more work chain than the reorg_test chain in order # to trigger node 2's block download logic. # At this point node 2 is within 288 blocks of the fork point so it will preserve its ability to reorg if self.nodes[2].getblockcount() < self.mainchainheight: blocks_to_mine = first_reorg_height + 1 - self.mainchainheight self.log.info("Rewind node 0 to prev main chain to mine longer chain to trigger redownload. Blocks needed: %d" % blocks_to_mine) self.nodes[0].invalidateblock(curchainhash) assert(self.nodes[0].getblockcount() == self.mainchainheight) assert(self.nodes[0].getbestblockhash() == self.mainchainhash2) goalbesthash = self.nodes[0].generate(blocks_to_mine)[-1] goalbestheight = first_reorg_height + 1 self.log.info("Verify node 2 reorged back to the main chain, some blocks of which it had to redownload") waitstart = time.time() while self.nodes[2].getblockcount() < goalbestheight: time.sleep(0.1) if time.time() - waitstart > 900: raise AssertionError("Node 2 didn't reorg to proper height") assert(self.nodes[2].getbestblockhash() == goalbesthash) # Verify we can now have the data for a block previously pruned assert(self.nodes[2].getblock(self.forkhash)["height"] == self.forkheight) def manual_test(self, node_number, use_timestamp): # at this point, node has 995 blocks and has not yet run in prune mode self.start_node(node_number) node = self.nodes[node_number] assert_equal(node.getblockcount(), 995) assert_raises_rpc_error(-1, "not in prune mode", node.pruneblockchain, 500) # now re-start in manual pruning mode self.stop_node(node_number) self.start_node(node_number, extra_args=["-prune=1"]) node = self.nodes[node_number] assert_equal(node.getblockcount(), 995) def height(index): if use_timestamp: return node.getblockheader(node.getblockhash(index))["time"] + TIMESTAMP_WINDOW else: return index def prune(index, expected_ret=None): ret = node.pruneblockchain(height(index)) # Check the return value. When use_timestamp is True, just check # that the return value is less than or equal to the expected # value, because when more than one block is generated per second, # a timestamp will not be granular enough to uniquely identify an # individual block. if expected_ret is None: expected_ret = index if use_timestamp: assert_greater_than(ret, 0) assert_greater_than(expected_ret + 1, ret) else: assert_equal(ret, expected_ret) def has_block(index): return os.path.isfile(self.options.tmpdir + "/node{}/regtest/blocks/blk{:05}.dat".format(node_number, index)) # should not prune because chain tip of node 3 (995) < PruneAfterHeight (1000) assert_raises_rpc_error(-1, "Blockchain is too short for pruning", node.pruneblockchain, height(500)) # mine 6 blocks so we are at height 1001 (i.e., above PruneAfterHeight) node.generate(6) assert_equal(node.getblockchaininfo()["blocks"], 1001) # negative heights should raise an exception assert_raises_rpc_error(-8, "Negative", node.pruneblockchain, -10) # height=100 too low to prune first block file so this is a no-op prune(100) if not has_block(0): raise AssertionError("blk00000.dat is missing when should still be there") # Does nothing node.pruneblockchain(height(0)) if not has_block(0): raise AssertionError("blk00000.dat is missing when should still be there") # height=500 should prune first file prune(500) if has_block(0): raise AssertionError("blk00000.dat is still there, should be pruned by now") if not has_block(1): raise AssertionError("blk00001.dat is missing when should still be there") # height=650 should prune second file prune(650) if has_block(1): raise AssertionError("blk00001.dat is still there, should be pruned by now") # height=1000 should not prune anything more, because tip-288 is in blk00002.dat. prune(1000, 1001 - MIN_BLOCKS_TO_KEEP) if not has_block(2): raise AssertionError("blk00002.dat is still there, should be pruned by now") # advance the tip so blk00002.dat and blk00003.dat can be pruned (the last 288 blocks should now be in blk00004.dat) node.generate(288) prune(1000) if has_block(2): raise AssertionError("blk00002.dat is still there, should be pruned by now") if has_block(3): raise AssertionError("blk00003.dat is still there, should be pruned by now") # stop node, start back up with auto-prune at 550MB, make sure still runs self.stop_node(node_number) self.start_node(node_number, extra_args=["-prune=550"]) self.log.info("Success") def wallet_test(self): # check that the pruning node's wallet is still in good shape self.log.info("Stop and start pruning node to trigger wallet rescan") self.stop_node(2) self.start_node(2, extra_args=["-prune=550"]) self.log.info("Success") # check that wallet loads successfully when restarting a pruned node after IBD. # this was reported to fail in #7494. self.log.info("Syncing node 5 to test wallet") connect_nodes(self.nodes[0], 5) nds = [self.nodes[0], self.nodes[5]] sync_blocks(nds, wait=5, timeout=300) self.stop_node(5) #stop and start to trigger rescan self.start_node(5, extra_args=["-prune=550"]) self.log.info("Success") def run_test(self): self.log.info("Warning! This test requires 4GB of disk space and takes over 30 mins (up to 2 hours)") self.log.info("Mining a big blockchain of 995 blocks") # Determine default relay fee self.relayfee = self.nodes[0].getnetworkinfo()["relayfee"] # Cache for utxos, as the listunspent may take a long time later in the test self.utxo_cache_0 = [] self.utxo_cache_1 = [] self.create_big_chain() # Chain diagram key: # * blocks on main chain # +,&,$,@ blocks on other forks # X invalidated block # N1 Node 1 # # Start by mining a simple chain that all nodes have # N0=N1=N2 **...*(995) # stop manual-pruning node with 995 blocks self.stop_node(3) self.stop_node(4) self.log.info("Check that we haven't started pruning yet because we're below PruneAfterHeight") self.test_height_min() # Extend this chain past the PruneAfterHeight # N0=N1=N2 **...*(1020) self.log.info("Check that we'll exceed disk space target if we have a very high stale block rate") self.create_chain_with_staleblocks() # Disconnect N0 # And mine a 24 block chain on N1 and a separate 25 block chain on N0 # N1=N2 **...*+...+(1044) # N0 **...**...**(1045) # # reconnect nodes causing reorg on N1 and N2 # N1=N2 **...*(1020) *...**(1045) # \ # +...+(1044) # # repeat this process until you have 12 stale forks hanging off the # main chain on N1 and N2 # N0 *************************...***************************(1320) # # N1=N2 **...*(1020) *...**(1045) *.. ..**(1295) *...**(1320) # \ \ \ # +...+(1044) &.. $...$(1319) # Save some current chain state for later use self.mainchainheight = self.nodes[2].getblockcount() #1320 self.mainchainhash2 = self.nodes[2].getblockhash(self.mainchainheight) self.log.info("Check that we can survive a 288 block reorg still") (self.forkheight,self.forkhash) = self.reorg_test() #(1033, ) # Now create a 288 block reorg by mining a longer chain on N1 # First disconnect N1 # Then invalidate 1033 on main chain and 1032 on fork so height is 1032 on main chain # N1 **...*(1020) **...**(1032)X.. # \ # ++...+(1031)X.. # # Now mine 300 more blocks on N1 # N1 **...*(1020) **...**(1032) @@...@(1332) # \ \ # \ X... # \ \ # ++...+(1031)X.. .. # # Reconnect nodes and mine 220 more blocks on N1 # N1 **...*(1020) **...**(1032) @@...@@@(1552) # \ \ # \ X... # \ \ # ++...+(1031)X.. .. # # N2 **...*(1020) **...**(1032) @@...@@@(1552) # \ \ # \ *...**(1320) # \ \ # ++...++(1044) .. # # N0 ********************(1032) @@...@@@(1552) # \ # *...**(1320) self.log.info("Test that we can rerequest a block we previously pruned if needed for a reorg") self.reorg_back() # Verify that N2 still has block 1033 on current chain (@), but not on main chain (*) # Invalidate 1033 on current chain (@) on N2 and we should be able to reorg to # original main chain (*), but will require redownload of some blocks # In order to have a peer we think we can download from, must also perform this invalidation # on N0 and mine a new longest chain to trigger. # Final result: # N0 ********************(1032) **...****(1553) # \ # X@...@@@(1552) # # N2 **...*(1020) **...**(1032) **...****(1553) # \ \ # \ X@...@@@(1552) # \ # +.. # # N1 doesn't change because 1033 on main chain (*) is invalid self.log.info("Test manual pruning with block indices") self.manual_test(3, use_timestamp=False) self.log.info("Test manual pruning with timestamps") self.manual_test(4, use_timestamp=True) self.log.info("Test wallet re-scan") self.wallet_test() self.log.info("Done") if __name__ == '__main__': PruneTest().main()
mit
pratyushanand/linux
tools/perf/python/twatch.py
625
2726
#! /usr/bin/python # -*- python -*- # -*- coding: utf-8 -*- # twatch - Experimental use of the perf python interface # Copyright (C) 2011 Arnaldo Carvalho de Melo <acme@redhat.com> # # This application is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public License # as published by the Free Software Foundation; version 2. # # This application is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # General Public License for more details. import perf def main(context_switch = 0, thread = -1): cpus = perf.cpu_map() threads = perf.thread_map(thread) evsel = perf.evsel(type = perf.TYPE_SOFTWARE, config = perf.COUNT_SW_DUMMY, task = 1, comm = 1, mmap = 0, freq = 0, wakeup_events = 1, watermark = 1, sample_id_all = 1, context_switch = context_switch, sample_type = perf.SAMPLE_PERIOD | perf.SAMPLE_TID | perf.SAMPLE_CPU) """What we want are just the PERF_RECORD_ lifetime events for threads, using the default, PERF_TYPE_HARDWARE + PERF_COUNT_HW_CYCLES & freq=1 (the default), makes perf reenable irq_vectors:local_timer_entry, when disabling nohz, not good for some use cases where all we want is to get threads comes and goes... So use (perf.TYPE_SOFTWARE, perf_COUNT_SW_DUMMY, freq=0) instead.""" evsel.open(cpus = cpus, threads = threads); evlist = perf.evlist(cpus, threads) evlist.add(evsel) evlist.mmap() while True: evlist.poll(timeout = -1) for cpu in cpus: event = evlist.read_on_cpu(cpu) if not event: continue print "cpu: %2d, pid: %4d, tid: %4d" % (event.sample_cpu, event.sample_pid, event.sample_tid), print event if __name__ == '__main__': """ To test the PERF_RECORD_SWITCH record, pick a pid and replace in the following line. Example output: cpu: 3, pid: 31463, tid: 31593 { type: context_switch, next_prev_pid: 31463, next_prev_tid: 31593, switch_out: 1 } cpu: 1, pid: 31463, tid: 31489 { type: context_switch, next_prev_pid: 31463, next_prev_tid: 31489, switch_out: 1 } cpu: 2, pid: 31463, tid: 31496 { type: context_switch, next_prev_pid: 31463, next_prev_tid: 31496, switch_out: 1 } cpu: 3, pid: 31463, tid: 31491 { type: context_switch, next_prev_pid: 31463, next_prev_tid: 31491, switch_out: 0 } It is possible as well to use event.misc & perf.PERF_RECORD_MISC_SWITCH_OUT to figure out if this is a context switch in or out of the monitored threads. If bored, please add command line option parsing support for these options :-) """ # main(context_switch = 1, thread = 31463) main()
gpl-2.0
dannyman/ganeti_webmgr
ganeti_webmgr/muddle/shots/tests/templatetags.py
3
1527
from django.template import Context, Template from ganeti_webmgr.muddle.shots import register, TemplateMixer from ganeti_webmgr.muddle.shots.tests.registration import ShotsTestsBase __all__ = ['ShotTagTests'] TEMPLATE = "<b>{% load shots %}{% shot foo %}{% endshot %}</b>" TEMPLATE_INNER = ( "<ul>{% load shots %}{% shot foo %}<li>{{mixer}}</li>{% endshot %}</ul>") class ShotTagTests(ShotsTestsBase): def test_no_shot(self): text = Template(TEMPLATE).render(Context()) self.assertEqual("<b></b>", text) def test_no_mixers(self): register('foo') text = Template(TEMPLATE).render(Context()) self.assertEqual("<b></b>", text) def test_single_mixer(self): register('foo', TemplateMixer('shots/tests/foo1.html')) text = Template(TEMPLATE).render(Context()) self.assertEqual("<b>foo1</b>", text) def test_multiple_mixers(self): register('foo', TemplateMixer('shots/tests/foo1.html')) register('foo', TemplateMixer('shots/tests/foo2.html')) text = Template(TEMPLATE).render(Context()) self.assertEqual("<b>foo1foo2</b>", text) def test_inner_content(self): """ tests {% shot %} with inner content to wrap shots around """ register('foo', TemplateMixer('shots/tests/foo1.html')) register('foo', TemplateMixer('shots/tests/foo2.html')) text = Template(TEMPLATE_INNER).render(Context()) self.assertEqual("<ul><li>foo1</li><li>foo2</li></ul>", text)
gpl-2.0
seaotterman/tensorflow
tensorflow/python/ops/sparse_grad.py
19
10174
# Copyright 2015 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Gradients for operators defined in sparse_ops.py.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow.python.framework import ops from tensorflow.python.framework import sparse_tensor from tensorflow.python.ops import array_ops from tensorflow.python.ops import gen_sparse_ops from tensorflow.python.ops import math_ops from tensorflow.python.ops import sparse_ops # TODO(b/31222613): This op may be differentiable, and there may be # latent bugs here. ops.NotDifferentiable("SparseAddGrad") ops.NotDifferentiable("SparseConcat") ops.NotDifferentiable("SparseToDense") @ops.RegisterGradient("SparseReorder") def _SparseReorderGrad(op, unused_output_indices_grad, output_values_grad): """Gradients for the SparseReorder op. Args: op: the SparseReorder op unused_output_indices_grad: the incoming gradients of the output indices output_values_grad: the incoming gradients of the output values Returns: Gradient for each of the 3 input tensors: (input_indices, input_values, input_shape) The gradients for input_indices and input_shape is None. """ input_indices = op.inputs[0] input_shape = op.inputs[2] num_entries = array_ops.shape(input_indices)[0] entry_indices = math_ops.range(num_entries) sp_unordered = sparse_tensor.SparseTensor( input_indices, entry_indices, input_shape) sp_ordered = sparse_ops.sparse_reorder(sp_unordered) inverted_permutation = array_ops.invert_permutation(sp_ordered.values) return (None, array_ops.gather(output_values_grad, inverted_permutation), None) @ops.RegisterGradient("SparseAdd") def _SparseAddGrad(op, *grads): """The backward operator for the SparseAdd op. The SparseAdd op calculates A + B, where A, B, and the sum are all represented as `SparseTensor` objects. This op takes in the upstream gradient w.r.t. non-empty values of the sum, and outputs the gradients w.r.t. the non-empty values of A and B. Args: op: the SparseAdd op *grads: the incoming gradients, one element per output of `op` Returns: Gradient for each of the 6 input tensors of SparseAdd: (a_indices, a_values, a_shape, b_indices, b_values, b_shape, thresh) The gradients for the indices, shapes, and the threshold are None. """ val_grad = grads[1] a_indices = op.inputs[0] b_indices = op.inputs[3] sum_indices = op.outputs[0] # NOTE: we do not need to take `thresh` into account, since it simply affects # the non-zero elements of the sum, and we will peek into `sum_indices` in the # gradient op. # pylint: disable=protected-access a_val_grad, b_val_grad = gen_sparse_ops._sparse_add_grad(val_grad, a_indices, b_indices, sum_indices) a_val_grad.set_shape(op.inputs[1].get_shape()) b_val_grad.set_shape(op.inputs[4].get_shape()) # (a_indices, a_values, a_shape, b_indices, b_values, b_shape, thresh) return (None, a_val_grad, None, None, b_val_grad, None, None) @ops.RegisterGradient("SparseTensorDenseAdd") def _SparseTensorDenseAddGrad(op, out_grad): sp_indices = op.inputs[0] # (sparse_indices, sparse_values, sparse_shape, dense) return (None, array_ops.gather_nd(out_grad, sp_indices), None, out_grad) @ops.RegisterGradient("SparseReduceSum") def _SparseReduceSumGrad(op, out_grad): """Similar to gradient for the Sum Op (i.e. tf.reduce_sum()).""" sp_indices = op.inputs[0] sp_shape = op.inputs[2] output_shape_kept_dims = math_ops.reduced_shape(sp_shape, op.inputs[3]) out_grad_reshaped = array_ops.reshape(out_grad, output_shape_kept_dims) scale = sp_shape // math_ops.to_int64(output_shape_kept_dims) # (sparse_indices, sparse_values, sparse_shape, reduction_axes) return (None, array_ops.gather_nd(out_grad_reshaped, sp_indices // scale), None, None) @ops.RegisterGradient("SparseTensorDenseMatMul") def _SparseTensorDenseMatMulGrad(op, grad): """Gradients for the dense tensor in the SparseTensorDenseMatMul op. If either input is complex, no gradient is provided. Args: op: the SparseTensorDenseMatMul op grad: the incoming gradient Returns: Gradient for each of the 4 input tensors: (sparse_indices, sparse_values, sparse_shape, dense_tensor) The gradients for indices and shape are None. Raises: TypeError: When the two operands don't have the same type. """ a_indices, a_values, a_shape = op.inputs[:3] b = op.inputs[3] adj_a = op.get_attr("adjoint_a") adj_b = op.get_attr("adjoint_b") a_type = a_values.dtype.base_dtype b_type = b.dtype.base_dtype if a_type != b_type: raise TypeError("SparseTensorDenseMatMul op received operands with " "different types: ", a_type, " and ", b_type) if a_type in (ops.dtypes.complex64, ops.dtypes.complex128): raise NotImplementedError("SparseTensorDenseMatMul op does not support " "complex gradients.") # gradient w.r.t. dense b_grad = gen_sparse_ops._sparse_tensor_dense_mat_mul( # pylint: disable=protected-access a_indices, a_values, a_shape, grad, adjoint_a=not adj_a) if adj_b: b_grad = array_ops.transpose(b_grad) # gradient w.r.t. sparse values rows = a_indices[:, 0] cols = a_indices[:, 1] # TODO(zongheng, ebrevdo): add conjugates in the right places when complex # values are allowed. # TODO(zongheng): these gather calls could potentially duplicate rows/cols in # memory. If there is a need, we should look into implementing this more # intelligently to avoid duplicating data. parts_a = array_ops.gather(grad, rows if not adj_a else cols) parts_b = array_ops.gather(b if not adj_b else array_ops.transpose(b), cols if not adj_a else rows) a_values_grad = math_ops.reduce_sum(parts_a * parts_b, reduction_indices=1) # gradients w.r.t. (a_indices, a_values, a_shape, b) return (None, a_values_grad, None, b_grad) @ops.RegisterGradient("SparseDenseCwiseAdd") def _SparseDenseCwiseAddGrad(unused_op, unused_grad): raise NotImplementedError("Gradient for SparseDenseCwiseAdd is currently not" " implemented yet.") def _SparseDenseCwiseMulOrDivGrad(op, grad, is_mul): """Common code for SparseDenseCwise{Mul,Div} gradients.""" x_indices = op.inputs[0] x_shape = op.inputs[2] y = op.inputs[3] y_shape = math_ops.to_int64(array_ops.shape(y)) num_added_dims = array_ops.expand_dims( array_ops.size(x_shape) - array_ops.size(y_shape), 0) augmented_y_shape = array_ops.concat( [array_ops.ones(num_added_dims, ops.dtypes.int64), y_shape], 0) scaling = x_shape // augmented_y_shape scaled_indices = x_indices // scaling scaled_indices = array_ops.slice(scaled_indices, array_ops.concat([[0], num_added_dims], 0), [-1, -1]) dense_vals = array_ops.gather_nd(y, scaled_indices) if is_mul: dx = grad * dense_vals dy_val = grad * op.inputs[1] else: dx = grad / dense_vals dy_val = grad * (-op.inputs[1] / math_ops.square(dense_vals)) # indices can repeat after scaling, so we can't use sparse_to_dense(). dy = sparse_ops.sparse_add( array_ops.zeros_like(y), sparse_tensor.SparseTensor(scaled_indices, dy_val, y_shape)) # (sp_indices, sp_vals, sp_shape, dense) return (None, dx, None, dy) @ops.RegisterGradient("SparseDenseCwiseMul") def _SparseDenseCwiseMulGrad(op, grad): """Gradients for SparseDenseCwiseMul.""" return _SparseDenseCwiseMulOrDivGrad(op, grad, True) @ops.RegisterGradient("SparseDenseCwiseDiv") def _SparseDenseCwiseDivGrad(op, grad): """Gradients for SparseDenseCwiseDiv.""" return _SparseDenseCwiseMulOrDivGrad(op, grad, False) @ops.RegisterGradient("SparseSoftmax") def _SparseSoftmaxGrad(op, grad): """Gradients for SparseSoftmax. The calculation is the same as SoftmaxGrad: grad_x = grad_softmax * softmax - sum(grad_softmax * softmax) * softmax where we now only operate on the non-zero values present in the SparseTensors. Args: op: the SparseSoftmax op. grad: the upstream gradient w.r.t. the non-zero SparseSoftmax output values. Returns: Gradients w.r.t. the input (sp_indices, sp_values, sp_shape). """ indices, shape = op.inputs[0], op.inputs[2] out_vals = op.outputs[0] sp_output = sparse_tensor.SparseTensor(indices, out_vals, shape) sp_grad = sparse_tensor.SparseTensor(indices, grad, shape) sp_product = sparse_tensor.SparseTensor( indices, sp_output.values * sp_grad.values, shape) # [..., B, 1], dense. sum_reduced = -sparse_ops.sparse_reduce_sum(sp_product, [-1], keep_dims=True) # sparse [..., B, C] + dense [..., B, 1] with broadcast; outputs sparse. sp_sum = sparse_ops.sparse_dense_cwise_add(sp_grad, sum_reduced) grad_x = sp_sum.values * sp_output.values return [None, grad_x, None] @ops.RegisterGradient("SparseSparseMaximum") def _SparseSparseMaximumGrad(unused_op, unused_grad): raise NotImplementedError("Gradient for SparseSparseMaximum is currently not" " implemented yet.") @ops.RegisterGradient("SparseSparseMinimum") def _SparseSparseMinimumGrad(unused_op, unused_grad): raise NotImplementedError("Gradient for SparseSparseMinimum is currently not" " implemented yet.")
apache-2.0
Dineshs91/youtube-dl
youtube_dl/extractor/historicfilms.py
142
1580
from __future__ import unicode_literals from .common import InfoExtractor from ..utils import parse_duration class HistoricFilmsIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?historicfilms\.com/(?:tapes/|play)(?P<id>\d+)' _TEST = { 'url': 'http://www.historicfilms.com/tapes/4728', 'md5': 'd4a437aec45d8d796a38a215db064e9a', 'info_dict': { 'id': '4728', 'ext': 'mov', 'title': 'Historic Films: GP-7', 'description': 'md5:1a86a0f3ac54024e419aba97210d959a', 'thumbnail': 're:^https?://.*\.jpg$', 'duration': 2096, }, } def _real_extract(self, url): video_id = self._match_id(url) webpage = self._download_webpage(url, video_id) tape_id = self._search_regex( [r'class="tapeId"[^>]*>([^<]+)<', r'tapeId\s*:\s*"([^"]+)"'], webpage, 'tape id') title = self._og_search_title(webpage) description = self._og_search_description(webpage) thumbnail = self._html_search_meta( 'thumbnailUrl', webpage, 'thumbnails') or self._og_search_thumbnail(webpage) duration = parse_duration(self._html_search_meta( 'duration', webpage, 'duration')) video_url = 'http://www.historicfilms.com/video/%s_%s_web.mov' % (tape_id, video_id) return { 'id': video_id, 'url': video_url, 'title': title, 'description': description, 'thumbnail': thumbnail, 'duration': duration, }
unlicense
segwit/atbcoin-insight
qa/rpc-tests/bipdersig.py
107
3245
#!/usr/bin/env python3 # Copyright (c) 2014-2016 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. # # Test the BIP66 changeover logic # from test_framework.test_framework import BitcoinTestFramework from test_framework.util import * class BIP66Test(BitcoinTestFramework): def __init__(self): super().__init__() self.num_nodes = 3 self.setup_clean_chain = False def setup_network(self): self.nodes = [] self.nodes.append(start_node(0, self.options.tmpdir, [])) self.nodes.append(start_node(1, self.options.tmpdir, ["-blockversion=2"])) self.nodes.append(start_node(2, self.options.tmpdir, ["-blockversion=3"])) connect_nodes(self.nodes[1], 0) connect_nodes(self.nodes[2], 0) self.is_network_split = False self.sync_all() def run_test(self): cnt = self.nodes[0].getblockcount() # Mine some old-version blocks self.nodes[1].generate(100) self.sync_all() if (self.nodes[0].getblockcount() != cnt + 100): raise AssertionError("Failed to mine 100 version=2 blocks") # Mine 750 new-version blocks for i in range(15): self.nodes[2].generate(50) self.sync_all() if (self.nodes[0].getblockcount() != cnt + 850): raise AssertionError("Failed to mine 750 version=3 blocks") # TODO: check that new DERSIG rules are not enforced # Mine 1 new-version block self.nodes[2].generate(1) self.sync_all() if (self.nodes[0].getblockcount() != cnt + 851): raise AssertionError("Failed to mine a version=3 blocks") # TODO: check that new DERSIG rules are enforced # Mine 198 new-version blocks for i in range(2): self.nodes[2].generate(99) self.sync_all() if (self.nodes[0].getblockcount() != cnt + 1049): raise AssertionError("Failed to mine 198 version=3 blocks") # Mine 1 old-version block self.nodes[1].generate(1) self.sync_all() if (self.nodes[0].getblockcount() != cnt + 1050): raise AssertionError("Failed to mine a version=2 block after 949 version=3 blocks") # Mine 1 new-version blocks self.nodes[2].generate(1) self.sync_all() if (self.nodes[0].getblockcount() != cnt + 1051): raise AssertionError("Failed to mine a version=3 block") # Mine 1 old-version blocks try: self.nodes[1].generate(1) raise AssertionError("Succeeded to mine a version=2 block after 950 version=3 blocks") except JSONRPCException: pass self.sync_all() if (self.nodes[0].getblockcount() != cnt + 1051): raise AssertionError("Accepted a version=2 block after 950 version=3 blocks") # Mine 1 new-version blocks self.nodes[2].generate(1) self.sync_all() if (self.nodes[0].getblockcount() != cnt + 1052): raise AssertionError("Failed to mine a version=3 block") if __name__ == '__main__': BIP66Test().main()
mit
Jgarcia-IAS/localizacion
openerp/addons/mrp/wizard/stock_move.py
110
3398
# -*- coding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Management Solution # Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>). # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## from openerp.osv import fields, osv from openerp.tools.translate import _ import openerp.addons.decimal_precision as dp class stock_move_consume(osv.osv_memory): _name = "stock.move.consume" _description = "Consume Products" _columns = { 'product_id': fields.many2one('product.product', 'Product', required=True, select=True), 'product_qty': fields.float('Quantity', digits_compute=dp.get_precision('Product Unit of Measure'), required=True), 'product_uom': fields.many2one('product.uom', 'Product Unit of Measure', required=True), 'location_id': fields.many2one('stock.location', 'Location', required=True), 'restrict_lot_id': fields.many2one('stock.production.lot', 'Lot'), } #TOFIX: product_uom should not have different category of default UOM of product. Qty should be convert into UOM of original move line before going in consume and scrap def default_get(self, cr, uid, fields, context=None): if context is None: context = {} res = super(stock_move_consume, self).default_get(cr, uid, fields, context=context) move = self.pool.get('stock.move').browse(cr, uid, context['active_id'], context=context) if 'product_id' in fields: res.update({'product_id': move.product_id.id}) if 'product_uom' in fields: res.update({'product_uom': move.product_uom.id}) if 'product_qty' in fields: res.update({'product_qty': move.product_uom_qty}) if 'location_id' in fields: res.update({'location_id': move.location_id.id}) return res def do_move_consume(self, cr, uid, ids, context=None): if context is None: context = {} move_obj = self.pool.get('stock.move') uom_obj = self.pool.get('product.uom') move_ids = context['active_ids'] for data in self.browse(cr, uid, ids, context=context): if move_ids and move_ids[0]: move = move_obj.browse(cr, uid, move_ids[0], context=context) qty = uom_obj._compute_qty(cr, uid, data['product_uom'].id, data.product_qty, data.product_id.uom_id.id) move_obj.action_consume(cr, uid, move_ids, qty, data.location_id.id, restrict_lot_id=data.restrict_lot_id.id, context=context) return {'type': 'ir.actions.act_window_close'}
agpl-3.0
shubhdev/edx-platform
cms/djangoapps/contentstore/views/tests/test_checklists.py
104
7657
""" Unit tests for checklist methods in views.py. """ from contentstore.utils import reverse_course_url from contentstore.views.checklist import expand_checklist_action_url from xmodule.modulestore.tests.factories import CourseFactory from xmodule.modulestore.django import modulestore import json from contentstore.tests.utils import CourseTestCase class ChecklistTestCase(CourseTestCase): """ Test for checklist get and put methods. """ def setUp(self): """ Creates the test course. """ super(ChecklistTestCase, self).setUp() self.course = CourseFactory.create(org='mitX', number='333', display_name='Checklists Course') self.checklists_url = self.get_url() def get_url(self, checklist_index=None): url_args = {'checklist_index': checklist_index} if checklist_index else None return reverse_course_url('checklists_handler', self.course.id, kwargs=url_args) def get_persisted_checklists(self): """ Returns the checklists as persisted in the modulestore. """ return modulestore().get_item(self.course.location).checklists def compare_checklists(self, persisted, request): """ Handles url expansion as possible difference and descends into guts """ self.assertEqual(persisted['short_description'], request['short_description']) expanded_checklist = expand_checklist_action_url(self.course, persisted) for pers, req in zip(expanded_checklist['items'], request['items']): self.assertEqual(pers['short_description'], req['short_description']) self.assertEqual(pers['long_description'], req['long_description']) self.assertEqual(pers['is_checked'], req['is_checked']) self.assertEqual(pers['action_url'], req['action_url']) self.assertEqual(pers['action_text'], req['action_text']) self.assertEqual(pers['action_external'], req['action_external']) def test_get_checklists(self): """ Tests the get checklists method and URL expansion. """ response = self.client.get(self.checklists_url) self.assertContains(response, "Getting Started With Studio") # Verify expansion of action URL happened. self.assertContains(response, 'course_team/mitX/333/Checklists_Course') # Verify persisted checklist does NOT have expanded URL. checklist_0 = self.get_persisted_checklists()[0] self.assertEqual('ManageUsers', get_action_url(checklist_0, 0)) payload = response.content # Now delete the checklists from the course and verify they get repopulated (for courses # created before checklists were introduced). self.course.checklists = None # Save the changed `checklists` to the underlying KeyValueStore before updating the modulestore self.course.save() modulestore().update_item(self.course, self.user.id) self.assertEqual(self.get_persisted_checklists(), None) response = self.client.get(self.checklists_url) self.assertEqual(payload, response.content) def test_get_checklists_html(self): """ Tests getting the HTML template for the checklists page). """ response = self.client.get(self.checklists_url, HTTP_ACCEPT='text/html') self.assertContains(response, "Getting Started With Studio") # The HTML generated will define the handler URL (for use by the Backbone model). self.assertContains(response, self.checklists_url) def test_update_checklists_no_index(self): """ No checklist index, should return all of them. """ returned_checklists = json.loads(self.client.get(self.checklists_url).content) # Verify that persisted checklists do not have expanded action URLs. # compare_checklists will verify that returned_checklists DO have expanded action URLs. pers = self.get_persisted_checklists() self.assertEqual('CourseOutline', get_first_item(pers[1]).get('action_url')) for pay, resp in zip(pers, returned_checklists): self.compare_checklists(pay, resp) def test_update_checklists_index_ignored_on_get(self): """ Checklist index ignored on get. """ update_url = self.get_url(1) returned_checklists = json.loads(self.client.get(update_url).content) for pay, resp in zip(self.get_persisted_checklists(), returned_checklists): self.compare_checklists(pay, resp) def test_update_checklists_post_no_index(self): """ No checklist index, will error on post. """ response = self.client.post(self.checklists_url) self.assertContains(response, 'Could not save checklist', status_code=400) def test_update_checklists_index_out_of_range(self): """ Checklist index out of range, will error on post. """ update_url = self.get_url(100) response = self.client.post(update_url) self.assertContains(response, 'Could not save checklist', status_code=400) def test_update_checklists_index(self): """ Check that an update of a particular checklist works. """ update_url = self.get_url(1) payload = self.course.checklists[1] self.assertFalse(get_first_item(payload).get('is_checked')) self.assertEqual('CourseOutline', get_first_item(payload).get('action_url')) get_first_item(payload)['is_checked'] = True returned_checklist = json.loads(self.client.ajax_post(update_url, payload).content) self.assertTrue(get_first_item(returned_checklist).get('is_checked')) persisted_checklist = self.get_persisted_checklists()[1] # Verify that persisted checklist does not have expanded action URLs. # compare_checklists will verify that returned_checklist DOES have expanded action URLs. self.assertEqual('CourseOutline', get_first_item(persisted_checklist).get('action_url')) self.compare_checklists(persisted_checklist, returned_checklist) def test_update_checklists_delete_unsupported(self): """ Delete operation is not supported. """ update_url = self.get_url(100) response = self.client.delete(update_url) self.assertEqual(response.status_code, 405) def test_expand_checklist_action_url(self): """ Tests the method to expand checklist action url. """ def test_expansion(checklist, index, stored, expanded): """ Tests that the expected expanded value is returned for the item at the given index. Also verifies that the original checklist is not modified. """ self.assertEqual(get_action_url(checklist, index), stored) expanded_checklist = expand_checklist_action_url(self.course, checklist) self.assertEqual(get_action_url(expanded_checklist, index), expanded) # Verify no side effect in the original list. self.assertEqual(get_action_url(checklist, index), stored) test_expansion(self.course.checklists[0], 0, 'ManageUsers', '/course_team/mitX/333/Checklists_Course') test_expansion(self.course.checklists[1], 1, 'CourseOutline', '/course/mitX/333/Checklists_Course') test_expansion(self.course.checklists[2], 0, 'http://help.edge.edx.org/', 'http://help.edge.edx.org/') def get_first_item(checklist): """ Returns the first item from the checklist. """ return checklist['items'][0] def get_action_url(checklist, index): """ Returns the action_url for the item at the specified index in the given checklist. """ return checklist['items'][index]['action_url']
agpl-3.0
zeehio/META-SHARE
metashare/accounts/tests.py
4
30182
import logging import django.test from django.contrib.admin.helpers import ACTION_CHECKBOX_NAME from django.contrib.admin.sites import LOGIN_FORM_KEY from django.contrib.auth import REDIRECT_FIELD_NAME from django.contrib.auth.models import User, Permission, Group from django.contrib.contenttypes.models import ContentType from django.core.urlresolvers import reverse from django.test.client import Client from metashare import test_utils from metashare.accounts import views from metashare.accounts.models import RegistrationRequest, ResetRequest, \ EditorGroup, UserProfile, Organization, EditorGroupApplication, \ EditorGroupManagers from metashare.repository.management import GROUP_GLOBAL_EDITORS from metashare.settings import DJANGO_BASE, LOG_HANDLER # Setup logging support. LOGGER = logging.getLogger(__name__) LOGGER.addHandler(LOG_HANDLER) class ContactFormTest(django.test.TestCase): """ A test case for tests around the node maintainer contact form page. """ test_login = None @classmethod def setUpClass(cls): LOGGER.info("running '{}' tests...".format(cls.__name__)) UserProfileTest.test_login = { REDIRECT_FIELD_NAME: '/{}'.format(DJANGO_BASE), LOGIN_FORM_KEY: 1, 'username': 'editoruser', 'password': 'secret', } User.objects.create_user(UserProfileTest.test_login['username'], 'editor@example.com', UserProfileTest.test_login['password']) @classmethod def tearDownClass(cls): test_utils.clean_user_db() LOGGER.info("finished '{}' tests".format(cls.__name__)) def test_contact_form_access(self): """ Verifies that the contact form page is only accessible by registered users. """ # verify that anonymous access is forbidden and we are redirected to the # login page client = Client() response = client.get(reverse(views.contact), follow=True) self.assertNotContains(response, 'Contact Node Maintainers') self.assertTemplateUsed(response, 'login.html') # verify that access with a normal/registered user is possible client = test_utils.get_client_with_user_logged_in( UserProfileTest.test_login) response = client.get(reverse(views.contact)) self.assertContains(response, 'Contact Node Maintainers') self.assertTemplateUsed(response, 'accounts/contact_maintainers.html') def test_contact_form_sending(self): """ Verifies that the submitting the contact form is possible. """ # verify that submitting as an anonymous user is forbidden and we are # redirected to the login page client = Client() response = client.post(reverse(views.contact), data={'message': 'This is a sufficiently long test message for the node ' 'maintainer contact form.', 'subject': 'Test Request'}, follow=True) self.assertNotContains(response, 'We have received your message and successfully sent it') self.assertTemplateUsed(response, 'login.html') # verify that submitting with a normal/registered user is possible client = test_utils.get_client_with_user_logged_in( UserProfileTest.test_login) response = client.post(reverse(views.contact), data={'message': 'This is a sufficiently long test message for the node ' 'maintainer contact form.', 'subject': 'Test Request'}, follow=True) self.assertContains(response, 'We have received your message and successfully sent it') class UserProfileTest(django.test.TestCase): """ A test case for tests around the user profile page. """ test_login = None test_organization_1 = None test_organization_2 = None test_editor_group_1 = None test_editor_group_2 = None ms_full_member_perm = None ms_assoc_member_perm = None @classmethod def setUpClass(cls): LOGGER.info("running '{}' tests...".format(cls.__name__)) UserProfileTest.test_login = { REDIRECT_FIELD_NAME: '/{}'.format(DJANGO_BASE), LOGIN_FORM_KEY: 1, 'username': 'editoruser', 'password': 'secret', } # create some test organizations UserProfileTest.test_organization_1 = \ Organization.objects.create(name='test_organization_1') UserProfileTest.test_organization_2 = \ Organization.objects.create(name='test_organization_2') # create some test editor groups UserProfileTest.test_editor_group_1 = \ EditorGroup.objects.create(name='test_editor_group_1') UserProfileTest.test_editor_group_2 = \ EditorGroup.objects.create(name='test_editor_group_2') # get the two META-SHARE membership permissions _profile_ct = ContentType.objects.get_for_model(UserProfile) UserProfileTest.ms_full_member_perm = Permission.objects.get( content_type=_profile_ct, codename='ms_full_member') UserProfileTest.ms_assoc_member_perm = Permission.objects.get( content_type=_profile_ct, codename='ms_associate_member') @classmethod def tearDownClass(cls): test_utils.clean_user_db() LOGGER.info("finished '{}' tests".format(cls.__name__)) def setUp(self): self.test_user = test_utils.create_editor_user( UserProfileTest.test_login['username'], 'editor@example.com', UserProfileTest.test_login['password'], (UserProfileTest.test_editor_group_1,)) self.client = test_utils.get_client_with_user_logged_in( UserProfileTest.test_login) def test_verify_profile_is_editable(self): """ Verifies that the user profile page can be edited. """ _profile = self.test_user.get_profile() _profile.position = 'whatever' _profile.homepage = 'http://example.org' _profile.save() self.client.post(reverse(views.edit_profile), data={'affiliation': 'test organization', 'homepage': 'http://www.example.com/new', 'birthdate': '', 'position': 'whatever'}, follow=True) # reload the profile from the database to work around Django bug #17750 _profile = UserProfile.objects.get(user=self.test_user) self.assertEquals(_profile.affiliation, 'test organization') self.assertEquals(_profile.homepage, 'http://www.example.com/new') self.assertEquals(_profile.birthdate, None) self.assertEquals(_profile.position, 'whatever') def test_verify_ms_membership_shown(self): """ Verifies that the current META-SHARE membership status is shown on the user profile page. """ # verify that the user doesn't have any META-SHARE membership by default response = self.client.get(reverse(views.edit_profile)) self.assertNotContains(response, 'META-SHARE Membership') # verify that an added META-SHARE associate membership is shown self.test_user.user_permissions.add( UserProfileTest.ms_assoc_member_perm) response = self.client.get(reverse(views.edit_profile)) self.assertContains(response, 'META-SHARE Membership') self.assertContains(response, 'associate member') self.test_user.user_permissions.remove( UserProfileTest.ms_assoc_member_perm) response = self.client.get(reverse(views.edit_profile)) self.assertNotContains(response, 'META-SHARE Membership') # verify that an added META-SHARE full membership is shown self.test_user.user_permissions.add(UserProfileTest.ms_full_member_perm) response = self.client.get(reverse(views.edit_profile)) self.assertContains(response, 'META-SHARE Membership') self.assertContains(response, 'full member') self.test_user.user_permissions.remove( UserProfileTest.ms_full_member_perm) response = self.client.get(reverse(views.edit_profile)) self.assertNotContains(response, 'META-SHARE Membership') def test_verify_editor_groups_shown(self): """ Verifies that the current editor group memberships are shown on the user profile page. """ # verify that the user is shown as belonging to editor group 1 response = self.client.get(reverse(views.edit_profile)) self.assertContains(response, 'Editor Group') self.assertContains(response, 'test_editor_group_1') # verify that the user is shown as belonging to editor groups 1 and 2 # after adding editor group 2 self.test_user.groups.add(UserProfileTest.test_editor_group_2) response = self.client.get(reverse(views.edit_profile)) self.assertContains(response, 'Editor Group') self.assertContains(response, 'test_editor_group_1') self.assertContains(response, 'test_editor_group_2') # verify that the user is not shown as belonging to any editor groups # after removing group membership self.test_user.groups.clear() response = self.client.get(reverse(views.edit_profile)) self.assertNotContains(response, 'Editor Group') def test_verify_organizations_shown(self): """ Verifies that the current organization memberships are shown on the user profile page. """ # verify that the user is not shown as belonging to any organizations by # default response = self.client.get(reverse(views.edit_profile)) self.assertNotContains(response, 'Organization') # verify that the user is shown as belonging to organization 1 after # adding organization 1 self.test_user.groups.add(UserProfileTest.test_organization_1) response = self.client.get(reverse(views.edit_profile)) self.assertContains(response, 'Organization') self.assertContains(response, 'test_organization_1') # verify that the user is shown as belonging to organizations 1 and 2 # after adding organization 2 self.test_user.groups.add(UserProfileTest.test_organization_2) response = self.client.get(reverse(views.edit_profile)) self.assertContains(response, 'Organization') self.assertContains(response, 'test_organization_1') self.assertContains(response, 'test_organization_2') class CreateViewTest(django.test.TestCase): @classmethod def setUpClass(cls): LOGGER.info("running '{}' tests...".format(cls.__name__)) @classmethod def tearDownClass(cls): LOGGER.info("finished '{}' tests".format(cls.__name__)) def testCreateInitial(self): client = Client() response = client.get('/{0}accounts/create/'.format(DJANGO_BASE)) self.assertEqual('accounts/create_account.html', response.templates[0].name) self.assertNotContains(response, "Please fill in all fields", status_code=200) def testCreatePost(self): client = Client() post_data = {'shortname':'test', 'first_name':'Test', 'last_name':'Testson', 'email':'a@b.com', 'password':'test', 'confirm_password': 'test', 'accepted_tos': 'yes'} response = client.post('/{0}accounts/create/'.format(DJANGO_BASE), follow=True, data=post_data) self.assertEqual('frontpage.html', response.templates[0].name) self.assertNotContains(response, "Please fill in all fields", status_code=200) def testBrokenPost(self): client = Client() post_data = {'shortname':'test', 'first_name':'Test', 'last_name':'Testson', 'password':'test', 'confirm_password': 'test'} response = client.post('/{0}accounts/create/'.format(DJANGO_BASE), follow=True, data=post_data) self.assertEqual('accounts/create_account.html', response.templates[0].name) self.assertFormError(response, 'form', 'email', 'This field is required.') self.assertContains(response, "Please fill in all fields", status_code=200) def tearDown(self): test_utils.clean_user_db() class RegistrationRequestTest(django.test.TestCase): @classmethod def setUpClass(cls): LOGGER.info("running '{}' tests...".format(cls.__name__)) @classmethod def tearDownClass(cls): LOGGER.info("finished '{}' tests".format(cls.__name__)) def setUp(self): _user = User.objects.create_user('test', 'test@test.com', 'test') _user.first_name = 'Test' _user.last_name = 'Testson' _user.save() self.reg_request = RegistrationRequest.objects.create(user=_user) self.client = Client() def testRegistrationRequestionCorrect(self): self.assertEqual('<RegistrationRequest "test">', self.reg_request.__unicode__()) self.assertEqual('test', self.reg_request.user.username) self.assertEqual('Test', self.reg_request.user.first_name) self.assertEqual('Testson', self.reg_request.user.last_name) self.assertEqual('test@test.com', self.reg_request.user.email) def testCanRetrieveFromDB(self): test_entry = RegistrationRequest.objects.get(pk=self.reg_request.pk) self.assertIsNotNone(test_entry) self.assertEqual(test_entry, self.reg_request) def testCanValidate(self): self.reg_request.full_clean() def testCanRegister(self): _prev_count = RegistrationRequest.objects.count() response = self.client.post(reverse(views.create), {'first_name': 'Test', 'last_name': 'Testson2', 'shortname': 'good', 'email': 'ok@example.com', 'password': 'secret', 'confirm_password': 'secret', 'accepted_tos': 'yes'}, follow=True) self.assertContains(response, 'received your registration data', msg_prefix="should have successfully created a registration") self.assertEquals(_prev_count + 1, RegistrationRequest.objects.count(), "should have successfully created an additional registration") def testValidateCatchesBrokenRequest1(self): response = self.client.post(reverse(views.create), {'first_name': 'Test', 'last_name': 'Testson', 'email': 'broken1@test.com', 'password': 'test1', 'confirm_password': 'test1'}) self.assertContains(response, 'This field is required.', msg_prefix="should have shown an error due to missing user name") def testValidateCatchesBrokenRequest2(self): response = self.client.post(reverse(views.create), {'shortname': 'broken2', 'last_name': 'Testson', 'email': 'broken2@test.com', 'password': 'test2', 'confirm_password': 'test2'}) self.assertContains(response, 'This field is required.', msg_prefix="should have shown an error due to missing first name") def testValidateCatchesBrokenRequest3(self): response = self.client.post(reverse(views.create), {'shortname': 'broken3', 'first_name': 'Test', 'email': 'broken3@test.com', 'password': 'test3', 'confirm_password': 'test3'}) self.assertContains(response, 'This field is required.', msg_prefix="should have shown an error due to missing last name") def testValidateCatchesBrokenRequest4(self): response = self.client.post(reverse(views.create), {'shortname': 'broken4', 'first_name': 'Test', 'last_name': 'Testson', 'email': 'not an email', 'password': 'test4', 'confirm_password': 'test4'}) self.assertContains(response, 'Enter a valid e-mail address.', msg_prefix="should have shown an error due to bad e-mail") def testValidateCatchesBrokenRequest5(self): response = self.client.post(reverse(views.create), {'shortname': 'broken5', 'first_name': 'Test', 'last_name': 'Testson', 'password': 'test5', 'confirm_password': 'test5'}) self.assertContains(response, 'This field is required.', msg_prefix="should have shown an error due to missing e-mail") def testValidateCatchesBrokenRequest6(self): response = self.client.post(reverse(views.create), {'shortname': 'broken6', 'first_name': 'Test', 'email': 'x@bla.com', 'last_name': 'Testson', 'confirm_password': 'test6'}) self.assertContains(response, 'This field is required.', msg_prefix="should have shown an error due to missing password") def testValidateCatchesBrokenRequest7(self): response = self.client.post(reverse(views.create), {'shortname': 'broken7', 'first_name': 'Test', 'email': 'x@bla.com', 'last_name': 'Testson', 'password': 'test7'}) self.assertContains(response, 'This field is required.', msg_prefix="should have shown an error due to missing confirmation") def testValidateCatchesBrokenRequest8(self): response = self.client.post(reverse(views.create), {'shortname': 'broken8', 'first_name': 'Test', 'email': 'x@bla.com', 'last_name': 'Testson', 'password': 'test8', 'confirm_password': 'bad'}) self.assertContains(response, "The two password fields did not match.", msg_prefix="should have shown an error due to bad confirmation") def testUniqueUserNameRegistration(self): response = self.client.post(reverse(views.create), {'shortname': self.reg_request.user.username, 'first_name': 'Test', 'email': 'x@bla.com', 'last_name': 'Testson', 'password': 'test', 'confirm_password': 'test'}) self.assertContains(response, "account name already exists", msg_prefix="should have shown an error due to duplicate account") def testUniqueEmailRegistration(self): response = self.client.post(reverse(views.create), {'shortname': 'bla', 'first_name': 'Test', 'email': self.reg_request.user.email, 'last_name': 'Testson', 'password': 'test', 'confirm_password': 'test'}) self.assertContains(response, "already an account registered with this e-mail address", msg_prefix="should have shown an error due to duplicate e-mail") def tearDown(self): test_utils.clean_user_db() class ResetPasswordTest(django.test.TestCase): user = None @classmethod def setUpClass(cls): LOGGER.info("running '{}' tests...".format(cls.__name__)) @classmethod def tearDownClass(cls): LOGGER.info("finished '{}' tests".format(cls.__name__)) def setUp(self): """ Sets up some resources with which to test. """ self.user = User.objects.create_user('normaluser', 'normal@example.com', 'secret') def tearDown(self): """ Clean up the test """ test_utils.clean_user_db() def test_reset_requires_user_name_and_email(self): client = Client() # invalid user name post_data = {'username':'normaluserABC', 'email':'normal@example.com'} response = client.post('/{0}accounts/reset/'.format(DJANGO_BASE), follow=True, data=post_data) self.assertEqual('accounts/reset_account.html', response.templates[0].name) self.assertContains(response, "Not a valid username-email combination", status_code=200) # invalid email post_data = {'username':'normaluser', 'email':'normal@example.comABC'} response = client.post('/{0}accounts/reset/'.format(DJANGO_BASE), follow=True, data=post_data) self.assertEqual('accounts/reset_account.html', response.templates[0].name) self.assertContains(response, "Not a valid username-email combination", status_code=200) # invalid user name and email post_data = {'username':'normaluserABC', 'email':'normal@example.comABC'} response = client.post('/{0}accounts/reset/'.format(DJANGO_BASE), follow=True, data=post_data) self.assertEqual('accounts/reset_account.html', response.templates[0].name) self.assertContains(response, "Not a valid username-email combination", status_code=200) # valid user name and email post_data = {'username':'normaluser', 'email':'normal@example.com'} response = client.post('/{0}accounts/reset/'.format(DJANGO_BASE), follow=True, data=post_data) self.assertEqual('frontpage.html', response.templates[0].name) self.assertContains( response, "We have received your reset request and sent you an email with further reset instructions", status_code=200) def test_reset_request_validation(self): client = Client() old_passwd = self.user.password post_data = {'username':'normaluser', 'email':'normal@example.com'} # create reset request client.post('/{0}accounts/reset/'.format(DJANGO_BASE), follow=True, data=post_data) self.assertNotEqual(None, ResetRequest.objects.get(user=self.user)) request = ResetRequest.objects.get(user=self.user) # confirm reset request response = client.get( '/{0}accounts/reset/{1}/'.format(DJANGO_BASE, request.uuid), follow=True) self.assertEqual('frontpage.html', response.templates[0].name) self.assertContains( response, "We have re-activated your user account and sent you an email with your personal password which allows you to login to the website.", status_code=200) # check that password has changed for user self.user = User.objects.get(username=self.user.username) self.assertNotEquals(old_passwd, self.user.password) # check that reset request is deleted self.assertEquals(0, len(ResetRequest.objects.all())) class ChangePasswordTest(django.test.TestCase): user = None @classmethod def setUpClass(cls): LOGGER.info("running '{}' tests...".format(cls.__name__)) @classmethod def tearDownClass(cls): LOGGER.info("finished '{}' tests".format(cls.__name__)) def setUp(self): """ Sets up some resources with which to test. """ self.user = User.objects.create_user('normaluser', 'normal@example.com', 'secret') def tearDown(self): """ Clean up the test """ test_utils.clean_user_db() def test_password_change(self): client = Client() client.login(username='normaluser', password='secret') old_passwd = self.user.password post_data = {'old_password':'secret', 'new_password1':'new_secret', 'new_password2':'new_secret'} response = client.post('/{0}accounts/profile/change_password/'.format(DJANGO_BASE), follow=True, data=post_data) self.assertEqual('accounts/change_password_done.html', response.templates[0].name) self.assertContains( response, "Password change successful", status_code=200) # check that password has changed for user self.user = User.objects.get(username=self.user.username) self.assertNotEquals(old_passwd, self.user.password) class EditorGroupApplicationTest(django.test.TestCase): """ A test case for `EditorGroupApplication`-related functionality. """ app_url = "/{}accounts/editor_group_application/".format(DJANGO_BASE) manage_url = "/{}admin/accounts/editorgroupapplication/".format(DJANGO_BASE) editor_group_1 = None editor_group_2 = None manager_group_1 = None manager_group_2 = None manager_user_1 = None manager_user_2 = None superuser = None @classmethod def setUpClass(cls): LOGGER.info("running '{}' tests...".format(cls.__name__)) # create two test editor groups EditorGroupApplicationTest.editor_group_1 = \ EditorGroup.objects.create(name='test_editor_group_1') EditorGroupApplicationTest.editor_group_2 = \ EditorGroup.objects.create(name='test_editor_group_2') # create two test managing editors groups EditorGroupApplicationTest.manager_group_1 = \ EditorGroupManagers.objects.create(name='test_manager_group_1', managed_group=EditorGroupApplicationTest.editor_group_1) EditorGroupApplicationTest.manager_group_2 = \ EditorGroupManagers.objects.create(name='test_manager_group_2', managed_group=EditorGroupApplicationTest.editor_group_2) # create two test managing editor users for the two created groups EditorGroupApplicationTest.manager_user_1 = \ test_utils.create_manager_user('manageruser1', 'mu@example.com', 'secret', (EditorGroupApplicationTest.editor_group_1, EditorGroupApplicationTest.manager_group_1)) EditorGroupApplicationTest.manager_user_2 = \ test_utils.create_manager_user('manageruser2', 'mu@example.com', 'secret', (EditorGroupApplicationTest.editor_group_2, EditorGroupApplicationTest.manager_group_2)) # create a test superuser EditorGroupApplicationTest.superuser = User.objects.create_superuser( 'superuser', 'su@example.com', 'secret') @classmethod def tearDownClass(cls): test_utils.clean_user_db() LOGGER.info("finished '{}' tests".format(cls.__name__)) def setUp(self): """ Sets up a test user and a `Client` with which to test. """ self.normal_user = User.objects.create_user( 'normaluser', 'normal@example.com', 'secret') self.client = Client() def tearDown(self): """ Clean up the test """ self.normal_user.delete() EditorGroupApplication.objects.all().delete() def test_editor_permissions_are_assigned(self): """ Verifies that a registered user who has not been an editor, yet, gets all sufficient permissions for editing resources with a successul editor group application. This test also verifies that an editor group manager can accept an editor group application. """ # create an editor group application as a normal user: self.client.login(username=self.normal_user.username, password='secret') response = self.client.post(EditorGroupApplicationTest.app_url, {'editor_group': EditorGroupApplicationTest.editor_group_1.pk}, follow=True) self.assertContains(response, 'You have successfully applied for editor group') self.assertContains(response, EditorGroupApplicationTest.editor_group_1.name) eg_applications = EditorGroupApplication.objects.all() self.assertEqual(eg_applications.count(), 1) # accept the application from a manager account: self.client.logout() self.client.login( username=EditorGroupApplicationTest.superuser.username, password='secret') response = self.client.post(EditorGroupApplicationTest.manage_url, {"action": "accept_selected", ACTION_CHECKBOX_NAME: eg_applications[0].pk}, follow=True) self.assertContains(response, 'You have successfully accepted') self.assertContains(response, '0 editor group applications') # verify that the normal user has all required permissions for editing # language resources: self.normal_user.has_perms(['{}.{}'.format(app, name) for app, name in Group.objects.get(name=GROUP_GLOBAL_EDITORS).permissions \ .values_list('content_type__app_label', 'codename')]) def test_only_superuser_and_manager_can_see_group_applications(self): """ Verifies that only superusers and managing editor users can see editor group applications. """ # create a test editor group application: EditorGroupApplication.objects.create(user=self.normal_user, editor_group=EditorGroupApplicationTest.editor_group_1) # make sure that a normal user cannot see the editor group applications # list: self.client.login( username=self.normal_user.username, password='secret') response = self.client.get( EditorGroupApplicationTest.manage_url, follow=True) if response.status_code == 200: self.assertNotContains(response, "editor group application") else: self.assertEqual(response.status_code, 403) # make sure that non-authorized manager user cannot see "foreign" editor # group applications; they must still be able to see the list: self.client.login( username=self.manager_user_2.username, password='secret') response = self.client.get( EditorGroupApplicationTest.manage_url, follow=True) self.assertContains(response, '0 editor group applications') # make sure that an authorized manager user can see his editor group # applications: self.client.login( username=self.manager_user_1.username, password='secret') response = self.client.get( EditorGroupApplicationTest.manage_url, follow=True) self.assertContains(response, '1 editor group application') # make sure that a superuser can see all editor group applications: self.client.login( username=self.superuser.username, password='secret') response = self.client.get( EditorGroupApplicationTest.manage_url, follow=True) self.assertContains(response, '1 editor group application')
bsd-3-clause
iakovos-panourgias/fluidity
python/fluidity/diagnostics/structured_fields.py
5
10761
#!/usr/bin/env python # This library is free software; you can redistribute it and/or # modify it under the terms of the GNU Lesser General Public # License as published by the Free Software Foundation; either # version 2.1 of the License, or (at your option) any later version. # # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this library; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA """ Structured field classes """ import copy import math import unittest import fluidity.diagnostics.debug as debug try: import numpy except: debug.deprint("Warning: Failed to import numpy module") try: import vtk except: debug.deprint("Warning: Failed to import vtk module") import fluidity.diagnostics.annulus_mesh as annulus_mesh import fluidity.diagnostics.calc as calc import fluidity.diagnostics.elements as elements import fluidity.diagnostics.meshes as meshes import fluidity.diagnostics.optimise as optimise import fluidity.diagnostics.utils as utils class StructuredField2D: def __init__(self, xCoords, yCoords, type = None, shape = None, data = None, name = None): if type is None: assert(shape is None) self.SetName(name) self._xCoords = copy.deepcopy(xCoords) self._xCoords.sort() self._yCoords = copy.deepcopy(yCoords) self._yCoords.sort() self._type = type self._shape = shape self._NewData() if not data is None: self.SetData(data) return def _DataLen(self): assert(not self._shape is None) if len(self._shape) == 0: return 0 if not hasattr(self, "_dataLen"): self._dataLen = self._shape[0] for length in self._shape[1:]: self._dataLen *= length return self._dataLen def _NewData(self): if self._shape is None: self._data = [[None for i in range(self.YCoordsCount())] for j in range(self.XCoordsCount())] else: self._data = [] for i in range(self.XCoordsCount()): self._data.append([]) for j in range(self.YCoordsCount()): self._data[-1].append(numpy.array([self._type() for i in range(self._DataLen())])) self._data[-1][-1].shape = self._shape return def GetName(self): return self._name def SetName(self, name): self._name = name return def XCoordsCount(self): return len(self._xCoords) def YCoordsCount(self): return len(self._yCoords) def GetType(self): return self._type def GetShape(self): return self._shape def XCoords(self): return self._xCoords def XCoord(self, index): return self._xCoords[index] def YCoords(self): return self._yCoords def YCoord(self, index): return self._yCoords[index] def GetVal(self, xIndex, yIndex): return self._data[xIndex][yIndex] def SetVal(self, xIndex, yIndex, val): if self._shape is None: self._data[xIndex][yIndex] = self._type(val) else: self._data[xIndex][yIndex] = numpy.array(val) self._data[xIndex][yIndex].shape = self._shape return def GetData(self): return utils.ExpandList(self._data) def SetData(self, data): assert(len(data) <= self.XCoordsCount() * self.YCoordsCount()) self._NewData() for i, datum in enumerate(data): xIndex = i % self.XCoordsCount() yIndex = i / self.XCoordsCount() self.SetVal(xIndex, yIndex, datum) return def LinearlyInterpolate(self, x, y): """ Probe the slice data at the supplied coordinate, by linearly interpolating from the surrounding data points. """ assert(self.XCoordsCount() > 0 and self.YCoordsCount() > 0) assert(x >= self.XCoord(0) and x <= self.XCoord(-1)) assert(y >= self.YCoord(0) and y < self.YCoord(-1)) # Peform a binary search for the left index left = calc.IndexBinaryLboundSearch(x, self.XCoords()) # Perform a binary search for the lower index lower = calc.IndexBinaryLboundSearch(y, self.YCoords()) if self.XCoordsCount() > 1: right = left + 1 else: # This is slightly inefficient (could avoid a linear interpolation if we # wanted) right = left if self.YCoordsCount() > 1: upper = lower + 1 else: # This is slightly inefficient (could avoid a linear interpolation if we # wanted) upper = lower debug.dprint("left = " + str(left), 3) debug.dprint("lower = " + str(lower), 3) return calc.BilinearlyInterpolate(self.GetVal(left, upper), self.GetVal(right, upper), self.GetVal(left, lower), self.GetVal(right, lower), \ (x - self.XCoord(left)) / (self.XCoord(right) - self.XCoord(left)), (y - self.YCoord(lower)) / (self.YCoord(upper) - self.YCoord(lower))) def Mesh(self, quadMesh = False): mesh = meshes.Mesh(2) yxToNode = [] index = 0 for yCoord in self.YCoords(): yxToNode.append([]) for xCoord in self.XCoords(): mesh.AddNodeCoord([xCoord, yCoord]) yxToNode[-1].append(index) index +=1 for i in range(self.XCoordsCount())[:-1]: for j in range(self.YCoordsCount())[:-1]: if quadMesh: mesh.AddVolumeElement(elements.Element([yxToNode[j + 1][i], yxToNode[j + 1][i + 1], yxToNode[j][i], yxToNode[j][i + 1]])) else: # Default to triangle mesh, as quad quadrature is currently broken in # Fluidity mesh.AddVolumeElement(elements.Element([yxToNode[j][i], yxToNode[j + 1][i], yxToNode[j][i + 1]])) mesh.AddVolumeElement(elements.Element([yxToNode[j + 1][i], yxToNode[j + 1][i + 1], yxToNode[j][i + 1]])) return mesh def ToVtu(self, axis = (0.0, 1.0, 0.0), quadMesh = False): assert(not self._shape is None) vtu = self.Mesh(quadMesh = quadMesh).ToVtu() name = self.GetName() if name is None: name = "UnknownField" data = [] for i in range(self.YCoordsCount()): for j in range(self.XCoordsCount()): data.append(self.GetVal(j, i)) data = numpy.array(data) data.shape = (self.XCoordsCount() * self.YCoordsCount(), self._DataLen()) vtu.AddField(name, data) if not calc.AlmostEquals(axis[0], 0.0) or not calc.AlmostEquals(axis[1], 1.0) or not calc.AlmostEquals(axis[2], 0.0): transform = vtk.vtkTransform() transform.Identity() # Find the rotation axis # (0, 1, 0) x axis rotationAxis = [-axis[2], 0.0, -axis[0]] # Normalise rotationAxisMagnitude = calc.L2Norm(rotationAxis) rotationAxis = [val / rotationAxisMagnitude for val in rotationAxis] # Find the rotation angle angle = calc.Rad2Deg(math.acos(axis[1] / calc.L2Norm(axis))) # Rotation transform.RotateWXYZ(angle, rotationAxis[0], rotationAxis[1], rotationAxis[2]) transform.Update() newPoints = vtk.vtkPoints() transform.TransformPoints(vtu.ugrid.GetPoints(), newPoints) vtu.ugrid.SetPoints(newPoints) return vtu class structured_fieldsUnittests(unittest.TestCase): def testStructuredField2D(self): field = StructuredField2D(annulus_mesh.SliceCoordsConstant(0.0, 1.0, 3), \ annulus_mesh.SliceCoordsConstant(2.0, 3.0, 4), \ type = float, shape = (1,)) field = StructuredField2D(annulus_mesh.SliceCoordsConstant(0.0, 1.0, 1), \ annulus_mesh.SliceCoordsConstant(0.0, 1.0, 1), \ type =float, shape = (1,)) field.SetVal(0, 0, 0.0) field.SetVal(1, 0, 1.0) field.SetVal(0, 1, 2.0) field.SetVal(1, 1, 3.0) self.assertAlmostEquals(field.LinearlyInterpolate(0.5, 0.0), 0.5) self.assertAlmostEquals(field.LinearlyInterpolate(0.0, 0.5), 1.0) self.assertAlmostEquals(field.LinearlyInterpolate(0.5, 0.5), 1.5) self.assertRaises(AssertionError, field.LinearlyInterpolate, -0.1, 0.5) self.assertRaises(AssertionError, field.LinearlyInterpolate, 1.1, 0.5) self.assertRaises(AssertionError, field.LinearlyInterpolate, 0.5, -0.1) self.assertRaises(AssertionError, field.LinearlyInterpolate, 0.5, 1.1) return def testVtuInteroperability(self): field = StructuredField2D(annulus_mesh.SliceCoordsConstant(0.0, 1.0, 1), \ annulus_mesh.SliceCoordsConstant(0.0, 1.0, 1), \ type = float, shape = (1,), data = [0.0, 1.0, 2.0, 3.0], name = "Test") # Test triangle mesh vtu = field.ToVtu() locations = vtu.GetLocations() data = vtu.GetScalarField("Test") self.assertEquals(len(locations), 4) self.assertEquals(len(data), 4) self.assertAlmostEquals(locations[0][0], 0.0) self.assertAlmostEquals(locations[0][1], 0.0) self.assertAlmostEquals(locations[0][2], 0.0) self.assertAlmostEquals(locations[1][0], 1.0) self.assertAlmostEquals(locations[1][1], 0.0) self.assertAlmostEquals(locations[1][2], 0.0) self.assertAlmostEquals(locations[2][0], 0.0) self.assertAlmostEquals(locations[2][1], 1.0) self.assertAlmostEquals(locations[2][2], 0.0) self.assertAlmostEquals(locations[3][0], 1.0) self.assertAlmostEquals(locations[3][1], 1.0) self.assertAlmostEquals(locations[3][2], 0.0) self.assertAlmostEquals(data[0], 0.0) self.assertAlmostEquals(data[1], 1.0) self.assertAlmostEquals(data[2], 2.0) self.assertAlmostEquals(data[3], 3.0) # Test quad mesh vtu = field.ToVtu(quadMesh = True) locations = vtu.GetLocations() data = vtu.GetScalarField("Test") self.assertEquals(len(locations), 4) self.assertEquals(len(data), 4) self.assertAlmostEquals(locations[0][0], 0.0) self.assertAlmostEquals(locations[0][1], 0.0) self.assertAlmostEquals(locations[0][2], 0.0) self.assertAlmostEquals(locations[1][0], 1.0) self.assertAlmostEquals(locations[1][1], 0.0) self.assertAlmostEquals(locations[1][2], 0.0) self.assertAlmostEquals(locations[2][0], 0.0) self.assertAlmostEquals(locations[2][1], 1.0) self.assertAlmostEquals(locations[2][2], 0.0) self.assertAlmostEquals(locations[3][0], 1.0) self.assertAlmostEquals(locations[3][1], 1.0) self.assertAlmostEquals(locations[3][2], 0.0) self.assertAlmostEquals(data[0], 0.0) self.assertAlmostEquals(data[1], 1.0) self.assertAlmostEquals(data[2], 2.0) self.assertAlmostEquals(data[3], 3.0) return
lgpl-2.1
sankhesh/VTK
Rendering/Annotation/Testing/Python/xyPlot.py
20
5899
#!/usr/bin/env python import vtk from vtk.test import Testing from vtk.util.misc import vtkGetDataRoot VTK_DATA_ROOT = vtkGetDataRoot() # create pipeline # pl3d = vtk.vtkMultiBlockPLOT3DReader() pl3d.SetXYZFileName("" + str(VTK_DATA_ROOT) + "/Data/combxyz.bin") pl3d.SetQFileName("" + str(VTK_DATA_ROOT) + "/Data/combq.bin") pl3d.SetScalarFunctionNumber(100) pl3d.SetVectorFunctionNumber(202) pl3d.Update() output = pl3d.GetOutput().GetBlock(0) # create three line probes line = vtk.vtkLineSource() line.SetResolution(30) transL1 = vtk.vtkTransform() transL1.Translate(3.7,0.0,28.37) transL1.Scale(5,5,5) transL1.RotateY(90) tf = vtk.vtkTransformPolyDataFilter() tf.SetInputConnection(line.GetOutputPort()) tf.SetTransform(transL1) probe = vtk.vtkProbeFilter() probe.SetInputConnection(tf.GetOutputPort()) probe.SetSourceData(output) probe.Update() transL2 = vtk.vtkTransform() transL2.Translate(9.2,0.0,31.20) transL2.Scale(5,5,5) transL2.RotateY(90) tf2 = vtk.vtkTransformPolyDataFilter() tf2.SetInputConnection(line.GetOutputPort()) tf2.SetTransform(transL2) probe2 = vtk.vtkProbeFilter() probe2.SetInputConnection(tf2.GetOutputPort()) probe2.SetSourceData(output) probe2.Update() transL3 = vtk.vtkTransform() transL3.Translate(13.27,0.0,33.40) transL3.Scale(4.5,4.5,4.5) transL3.RotateY(90) tf3 = vtk.vtkTransformPolyDataFilter() tf3.SetInputConnection(line.GetOutputPort()) tf3.SetTransform(transL3) probe3 = vtk.vtkProbeFilter() probe3.SetInputConnection(tf3.GetOutputPort()) probe3.SetSourceData(output) probe3.Update() appendF = vtk.vtkAppendPolyData() appendF.AddInputData(probe.GetPolyDataOutput()) appendF.AddInputData(probe2.GetPolyDataOutput()) appendF.AddInputData(probe3.GetPolyDataOutput()) tuber = vtk.vtkTubeFilter() tuber.SetInputConnection(appendF.GetOutputPort()) tuber.SetRadius(0.1) lineMapper = vtk.vtkPolyDataMapper() lineMapper.SetInputConnection(tuber.GetOutputPort()) lineActor = vtk.vtkActor() lineActor.SetMapper(lineMapper) probe.Update() probe3.Update() # probe the line and plot it xyplot = vtk.vtkXYPlotActor() xyplot.AddDataSetInput(probe.GetOutput()) xyplot.AddDataSetInputConnection(probe2.GetOutputPort()) xyplot.AddDataSetInput(probe3.GetOutput()) xyplot.GetPositionCoordinate().SetValue(0.0,0.67,0) xyplot.GetPosition2Coordinate().SetValue(1.0,0.33,0) #relative to Position xyplot.SetXValuesToArcLength() xyplot.SetNumberOfXLabels(6) xyplot.SetTitle("Pressure vs. Arc Length (Zoomed View)") xyplot.SetXTitle("") xyplot.SetYTitle("P") xyplot.SetXRange(.1,.35) xyplot.SetYRange(.2,.4) xyplot.GetProperty().SetColor(0,0,0) xyplot.GetProperty().SetLineWidth(2) # Set text prop color (same color for backward compat with test) # Assign same object to all text props tprop = xyplot.GetTitleTextProperty() tprop.SetColor(xyplot.GetProperty().GetColor()) xyplot.SetAxisTitleTextProperty(tprop) xyplot.SetAxisLabelTextProperty(tprop) xyplot.SetLabelFormat("%-#6.2f") xyplot2 = vtk.vtkXYPlotActor() xyplot2.AddDataSetInput(probe.GetOutput()) xyplot2.AddDataSetInputConnection(probe2.GetOutputPort()) xyplot2.AddDataSetInputConnection(probe3.GetOutputPort()) xyplot2.GetPositionCoordinate().SetValue(0.00,0.33,0) xyplot2.GetPosition2Coordinate().SetValue(1.0,0.33,0) #relative to Position xyplot2.SetXValuesToNormalizedArcLength() xyplot2.SetNumberOfXLabels(6) xyplot2.SetTitle("Pressure vs. Normalized Arc Length") xyplot2.SetXTitle("") xyplot2.SetYTitle("P") xyplot2.PlotPointsOn() xyplot2.PlotLinesOff() xyplot2.GetProperty().SetColor(1,0,0) xyplot2.GetProperty().SetPointSize(2) # Set text prop color (same color for backward compat with test) # Assign same object to all text props tprop = xyplot2.GetTitleTextProperty() tprop.SetColor(xyplot2.GetProperty().GetColor()) xyplot2.SetAxisTitleTextProperty(tprop) xyplot2.SetAxisLabelTextProperty(tprop) xyplot2.SetLabelFormat(xyplot.GetLabelFormat()) xyplot3 = vtk.vtkXYPlotActor() xyplot3.AddDataSetInputConnection(probe.GetOutputPort()) xyplot3.AddDataSetInputConnection(probe2.GetOutputPort()) xyplot3.AddDataSetInputConnection(probe3.GetOutputPort()) xyplot3.GetPositionCoordinate().SetValue(0.0,0.0,0) xyplot3.GetPosition2Coordinate().SetValue(1.0,0.33,0) #relative to Position xyplot3.SetXValuesToIndex() xyplot3.SetNumberOfXLabels(6) xyplot3.SetTitle("Pressure vs. Point Id") xyplot3.SetXTitle("Probe Length") xyplot3.SetYTitle("P") xyplot3.PlotPointsOn() xyplot3.GetProperty().SetColor(0,0,1) xyplot3.GetProperty().SetPointSize(3) # Set text prop color (same color for backward compat with test) # Assign same object to all text props tprop = xyplot3.GetTitleTextProperty() tprop.SetColor(xyplot3.GetProperty().GetColor()) xyplot3.SetAxisTitleTextProperty(tprop) xyplot3.SetAxisLabelTextProperty(tprop) xyplot3.SetLabelFormat(xyplot.GetLabelFormat()) # draw an outline outline = vtk.vtkStructuredGridOutlineFilter() outline.SetInputData(output) outlineMapper = vtk.vtkPolyDataMapper() outlineMapper.SetInputConnection(outline.GetOutputPort()) outlineActor = vtk.vtkActor() outlineActor.SetMapper(outlineMapper) outlineActor.GetProperty().SetColor(0,0,0) # Create graphics stuff # ren1 = vtk.vtkRenderer() ren2 = vtk.vtkRenderer() renWin = vtk.vtkRenderWindow() renWin.SetMultiSamples(0) renWin.AddRenderer(ren1) renWin.AddRenderer(ren2) iren = vtk.vtkRenderWindowInteractor() iren.SetRenderWindow(renWin) ren1.SetBackground(0.6784,0.8471,0.9020) ren1.SetViewport(0,0,.5,1) ren1.AddActor(outlineActor) ren1.AddActor(lineActor) ren2.SetBackground(1,1,1) ren2.SetViewport(0.5,0.0,1.0,1.0) ren2.AddActor2D(xyplot) ren2.AddActor2D(xyplot2) ren2.AddActor2D(xyplot3) renWin.SetSize(790,400) cam1 = ren1.GetActiveCamera() cam1.SetClippingRange(3.95297,100) cam1.SetFocalPoint(8.88908,0.595038,29.3342) cam1.SetPosition(-12.3332,31.7479,41.2387) cam1.SetViewUp(0.060772,-0.319905,0.945498) iren.Initialize() # render the image # # prevent the tk window from showing up then start the event loop # --- end of script --
bsd-3-clause
vpelletier/neoppod
neo/lib/patch.py
1
2705
# # Copyright (C) 2015-2016 Nexedi SA # # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public License # as published by the Free Software Foundation; either version 2 # of the License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # def speedupFileStorageTxnLookup(): """Speed up lookup of start position when instanciating an iterator FileStorage does not index the file positions of transactions. With this patch, we use the existing {oid->file_pos} index to bisect the the closest file position to start iterating. """ from array import array from bisect import bisect from collections import defaultdict from ZODB.FileStorage.FileStorage import FileStorage, FileIterator typecode = 'L' if array('I').itemsize < 4 else 'I' class Start(object): def __init__(self, read_data_header, h, tid): self.read_data_header = read_data_header self.h = h << 32 self.tid = tid def __lt__(self, l): return self.tid < self.read_data_header(self.h | l).tid def iterator(self, start=None, stop=None): if start: try: index = self._tidindex except AttributeError: # Cache a sorted list of all the file pos from oid index. # To reduce memory usage, the list is splitted in arrays of # low order 32-bit words. tindex = defaultdict(lambda: array(typecode)) for x in self._index.itervalues(): tindex[x >> 32].append(x & 0xffffffff) index = self._tidindex = [] for h, l in sorted(tindex.iteritems()): x = array('I') x.fromlist(sorted(l)) l = self._read_data_header(h << 32 | x[0]) index.append((l.tid, h, x)) x = bisect(index, (start,)) - 1 if x >= 0: x, h, index = index[x] x = self._read_data_header h = x(h << 32 | index[bisect(index, Start(x, h, start)) - 1]) return FileIterator(self._file_name, start, stop, h.tloc) return FileIterator(self._file_name, start, stop) FileStorage.iterator = iterator
gpl-2.0
tianyi33/simple_blog
django/core/management/sql.py
104
7942
from __future__ import unicode_literals import codecs import os import re from django.conf import settings from django.core.management.base import CommandError from django.db import models from django.db.models import get_models from django.utils._os import upath def sql_create(app, style, connection): "Returns a list of the CREATE TABLE SQL statements for the given app." if connection.settings_dict['ENGINE'] == 'django.db.backends.dummy': # This must be the "dummy" database backend, which means the user # hasn't set ENGINE for the database. raise CommandError("Django doesn't know which syntax to use for your SQL statements,\n" + "because you haven't properly specified the ENGINE setting for the database.\n" + "see: https://docs.djangoproject.com/en/dev/ref/settings/#databases") # Get installed models, so we generate REFERENCES right. # We trim models from the current app so that the sqlreset command does not # generate invalid SQL (leaving models out of known_models is harmless, so # we can be conservative). app_models = models.get_models(app, include_auto_created=True) final_output = [] tables = connection.introspection.table_names() known_models = set([model for model in connection.introspection.installed_models(tables) if model not in app_models]) pending_references = {} for model in app_models: output, references = connection.creation.sql_create_model(model, style, known_models) final_output.extend(output) for refto, refs in references.items(): pending_references.setdefault(refto, []).extend(refs) if refto in known_models: final_output.extend(connection.creation.sql_for_pending_references(refto, style, pending_references)) final_output.extend(connection.creation.sql_for_pending_references(model, style, pending_references)) # Keep track of the fact that we've created the table for this model. known_models.add(model) # Handle references to tables that are from other apps # but don't exist physically. not_installed_models = set(pending_references.keys()) if not_installed_models: alter_sql = [] for model in not_installed_models: alter_sql.extend(['-- ' + sql for sql in connection.creation.sql_for_pending_references(model, style, pending_references)]) if alter_sql: final_output.append('-- The following references should be added but depend on non-existent tables:') final_output.extend(alter_sql) return final_output def sql_delete(app, style, connection): "Returns a list of the DROP TABLE SQL statements for the given app." # This should work even if a connection isn't available try: cursor = connection.cursor() except: cursor = None # Figure out which tables already exist if cursor: table_names = connection.introspection.table_names(cursor) else: table_names = [] output = [] # Output DROP TABLE statements for standard application tables. to_delete = set() references_to_delete = {} app_models = models.get_models(app, include_auto_created=True) for model in app_models: if cursor and connection.introspection.table_name_converter(model._meta.db_table) in table_names: # The table exists, so it needs to be dropped opts = model._meta for f in opts.local_fields: if f.rel and f.rel.to not in to_delete: references_to_delete.setdefault(f.rel.to, []).append((model, f)) to_delete.add(model) for model in app_models: if connection.introspection.table_name_converter(model._meta.db_table) in table_names: output.extend(connection.creation.sql_destroy_model(model, references_to_delete, style)) # Close database connection explicitly, in case this output is being piped # directly into a database client, to avoid locking issues. if cursor: cursor.close() connection.close() return output[::-1] # Reverse it, to deal with table dependencies. def sql_flush(style, connection, only_django=False, reset_sequences=True): """ Returns a list of the SQL statements used to flush the database. If only_django is True, then only table names that have associated Django models and are in INSTALLED_APPS will be included. """ if only_django: tables = connection.introspection.django_table_names(only_existing=True) else: tables = connection.introspection.table_names() seqs = connection.introspection.sequence_list() if reset_sequences else () statements = connection.ops.sql_flush(style, tables, seqs) return statements def sql_custom(app, style, connection): "Returns a list of the custom table modifying SQL statements for the given app." output = [] app_models = get_models(app) for model in app_models: output.extend(custom_sql_for_model(model, style, connection)) return output def sql_indexes(app, style, connection): "Returns a list of the CREATE INDEX SQL statements for all models in the given app." output = [] for model in models.get_models(app): output.extend(connection.creation.sql_indexes_for_model(model, style)) return output def sql_all(app, style, connection): "Returns a list of CREATE TABLE SQL, initial-data inserts, and CREATE INDEX SQL for the given module." return sql_create(app, style, connection) + sql_custom(app, style, connection) + sql_indexes(app, style, connection) def _split_statements(content): comment_re = re.compile(r"^((?:'[^']*'|[^'])*?)--.*$") statements = [] statement = [] for line in content.split("\n"): cleaned_line = comment_re.sub(r"\1", line).strip() if not cleaned_line: continue statement.append(cleaned_line) if cleaned_line.endswith(";"): statements.append(" ".join(statement)) statement = [] return statements def custom_sql_for_model(model, style, connection): opts = model._meta app_dir = os.path.normpath(os.path.join(os.path.dirname(upath(models.get_app(model._meta.app_label).__file__)), 'sql')) output = [] # Post-creation SQL should come before any initial SQL data is loaded. # However, this should not be done for models that are unmanaged or # for fields that are part of a parent model (via model inheritance). if opts.managed: post_sql_fields = [f for f in opts.local_fields if hasattr(f, 'post_create_sql')] for f in post_sql_fields: output.extend(f.post_create_sql(style, model._meta.db_table)) # Find custom SQL, if it's available. backend_name = connection.settings_dict['ENGINE'].split('.')[-1] sql_files = [os.path.join(app_dir, "%s.%s.sql" % (opts.object_name.lower(), backend_name)), os.path.join(app_dir, "%s.sql" % opts.object_name.lower())] for sql_file in sql_files: if os.path.exists(sql_file): with codecs.open(sql_file, 'U', encoding=settings.FILE_CHARSET) as fp: # Some backends can't execute more than one SQL statement at a time, # so split into separate statements. output.extend(_split_statements(fp.read())) return output def emit_post_sync_signal(created_models, verbosity, interactive, db): # Emit the post_sync signal for every application. for app in models.get_apps(): app_name = app.__name__.split('.')[-2] if verbosity >= 2: print("Running post-sync handlers for application %s" % app_name) models.signals.post_syncdb.send(sender=app, app=app, created_models=created_models, verbosity=verbosity, interactive=interactive, db=db)
mit
barachka/odoo
addons/crm/wizard/crm_lead_to_opportunity.py
24
13972
# -*- coding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Management Solution # Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>). # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## from openerp.osv import fields, osv from openerp.tools.translate import _ from openerp.tools import email_split import re class crm_lead2opportunity_partner(osv.osv_memory): _name = 'crm.lead2opportunity.partner' _description = 'Lead To Opportunity Partner' _inherit = 'crm.partner.binding' _columns = { 'name': fields.selection([ ('convert', 'Convert to opportunity'), ('merge', 'Merge with existing opportunities') ], 'Conversion Action', required=True), 'opportunity_ids': fields.many2many('crm.lead', string='Opportunities'), 'user_id': fields.many2one('res.users', 'Salesperson', select=True), 'section_id': fields.many2one('crm.case.section', 'Sales Team', select=True), } def onchange_action(self, cr, uid, ids, action, context=None): return {'value': {'partner_id': False if action != 'exist' else self._find_matching_partner(cr, uid, context=context)}} def _get_duplicated_leads(self, cr, uid, partner_id, email, include_lost=False, context=None): """ Search for opportunities that have the same partner and that arent done or cancelled """ lead_obj = self.pool.get('crm.lead') emails = set(email_split(email) + [email]) final_stage_domain = [('stage_id.probability', '<', 100), '|', ('stage_id.probability', '>', 0), ('stage_id.sequence', '<=', 1)] partner_match_domain = [] for email in emails: partner_match_domain.append(('email_from', '=ilike', email)) if partner_id: partner_match_domain.append(('partner_id', '=', partner_id)) partner_match_domain = ['|'] * (len(partner_match_domain) - 1) + partner_match_domain if not partner_match_domain: return [] domain = partner_match_domain if not include_lost: domain += final_stage_domain return lead_obj.search(cr, uid, domain) def default_get(self, cr, uid, fields, context=None): """ Default get for name, opportunity_ids. If there is an exisitng partner link to the lead, find all existing opportunities links with this partner to merge all information together """ lead_obj = self.pool.get('crm.lead') res = super(crm_lead2opportunity_partner, self).default_get(cr, uid, fields, context=context) if context.get('active_id'): tomerge = [int(context['active_id'])] partner_id = res.get('partner_id') lead = lead_obj.browse(cr, uid, int(context['active_id']), context=context) email = lead.partner_id and lead.partner_id.email or lead.email_from tomerge.extend(self._get_duplicated_leads(cr, uid, partner_id, email, include_lost=True, context=context)) tomerge = list(set(tomerge)) if 'action' in fields: res.update({'action' : partner_id and 'exist' or 'create'}) if 'partner_id' in fields: res.update({'partner_id' : partner_id}) if 'name' in fields: res.update({'name' : len(tomerge) >= 2 and 'merge' or 'convert'}) if 'opportunity_ids' in fields and len(tomerge) >= 2: res.update({'opportunity_ids': tomerge}) if lead.user_id: res.update({'user_id': lead.user_id.id}) if lead.section_id: res.update({'section_id': lead.section_id.id}) return res def on_change_user(self, cr, uid, ids, user_id, section_id, context=None): """ When changing the user, also set a section_id or restrict section id to the ones user_id is member of. """ if user_id: if section_id: user_in_section = self.pool.get('crm.case.section').search(cr, uid, [('id', '=', section_id), '|', ('user_id', '=', user_id), ('member_ids', '=', user_id)], context=context, count=True) else: user_in_section = False if not user_in_section: result = self.pool['crm.lead'].on_change_user(cr, uid, ids, user_id, context=context) section_id = result.get('value') and result['value'].get('section_id') and result['value']['section_id'] or False return {'value': {'section_id': section_id}} def view_init(self, cr, uid, fields, context=None): """ Check some preconditions before the wizard executes. """ if context is None: context = {} lead_obj = self.pool.get('crm.lead') for lead in lead_obj.browse(cr, uid, context.get('active_ids', []), context=context): if lead.probability == 100: raise osv.except_osv(_("Warning!"), _("Closed/Dead leads cannot be converted into opportunities.")) return False def _convert_opportunity(self, cr, uid, ids, vals, context=None): if context is None: context = {} lead = self.pool.get('crm.lead') res = False lead_ids = vals.get('lead_ids', []) team_id = vals.get('section_id', False) data = self.browse(cr, uid, ids, context=context)[0] leads = lead.browse(cr, uid, lead_ids, context=context) for lead_id in leads: partner_id = self._create_partner(cr, uid, lead_id.id, data.action, lead_id.partner_id.id, context=context) res = lead.convert_opportunity(cr, uid, [lead_id.id], partner_id, [], False, context=context) user_ids = vals.get('user_ids', False) if context.get('no_force_assignation'): leads_to_allocate = [lead_id.id for lead_id in leads if not lead_id.user_id] else: leads_to_allocate = lead_ids if user_ids: lead.allocate_salesman(cr, uid, leads_to_allocate, user_ids, team_id=team_id, context=context) return res def action_apply(self, cr, uid, ids, context=None): """ Convert lead to opportunity or merge lead and opportunity and open the freshly created opportunity view. """ if context is None: context = {} lead_obj = self.pool['crm.lead'] w = self.browse(cr, uid, ids, context=context)[0] opp_ids = [o.id for o in w.opportunity_ids] if w.name == 'merge': lead_id = lead_obj.merge_opportunity(cr, uid, opp_ids, context=context) lead_ids = [lead_id] lead = lead_obj.read(cr, uid, lead_id, ['type', 'user_id'], context=context) if lead['type'] == "lead": context = dict(context, active_ids=lead_ids) self._convert_opportunity(cr, uid, ids, {'lead_ids': lead_ids, 'user_ids': [w.user_id.id], 'section_id': w.section_id.id}, context=context) elif not context.get('no_force_assignation') or not lead['user_id']: lead_obj.write(cr, uid, lead_id, {'user_id': w.user_id.id, 'section_id': w.section_id.id}, context=context) else: lead_ids = context.get('active_ids', []) self._convert_opportunity(cr, uid, ids, {'lead_ids': lead_ids, 'user_ids': [w.user_id.id], 'section_id': w.section_id.id}, context=context) return self.pool.get('crm.lead').redirect_opportunity_view(cr, uid, lead_ids[0], context=context) def _create_partner(self, cr, uid, lead_id, action, partner_id, context=None): """ Create partner based on action. :return dict: dictionary organized as followed: {lead_id: partner_assigned_id} """ #TODO this method in only called by crm_lead2opportunity_partner #wizard and would probably diserve to be refactored or at least #moved to a better place if context is None: context = {} lead = self.pool.get('crm.lead') if action == 'each_exist_or_create': ctx = dict(context) ctx['active_id'] = lead_id partner_id = self._find_matching_partner(cr, uid, context=ctx) action = 'create' res = lead.handle_partner_assignation(cr, uid, [lead_id], action, partner_id, context=context) return res.get(lead_id) class crm_lead2opportunity_mass_convert(osv.osv_memory): _name = 'crm.lead2opportunity.partner.mass' _description = 'Mass Lead To Opportunity Partner' _inherit = 'crm.lead2opportunity.partner' _columns = { 'user_ids': fields.many2many('res.users', string='Salesmen'), 'section_id': fields.many2one('crm.case.section', 'Sales Team'), 'deduplicate': fields.boolean('Apply deduplication', help='Merge with existing leads/opportunities of each partner'), 'action': fields.selection([ ('each_exist_or_create', 'Use existing partner or create'), ('nothing', 'Do not link to a customer') ], 'Related Customer', required=True), 'force_assignation': fields.boolean('Force assignation', help='If unchecked, this will leave the salesman of duplicated opportunities'), } _defaults = { 'deduplicate': True, } def default_get(self, cr, uid, fields, context=None): res = super(crm_lead2opportunity_mass_convert, self).default_get(cr, uid, fields, context) if 'partner_id' in fields: # avoid forcing the partner of the first lead as default res['partner_id'] = False if 'action' in fields: res['action'] = 'each_exist_or_create' if 'name' in fields: res['name'] = 'convert' if 'opportunity_ids' in fields: res['opportunity_ids'] = False return res def on_change_action(self, cr, uid, ids, action, context=None): vals = {} if action != 'exist': vals = {'value': {'partner_id': False}} return vals def on_change_deduplicate(self, cr, uid, ids, deduplicate, context=None): if context is None: context = {} active_leads = self.pool['crm.lead'].browse(cr, uid, context['active_ids'], context=context) partner_ids = [(lead.partner_id.id, lead.partner_id and lead.partner_id.email or lead.email_from) for lead in active_leads] partners_duplicated_leads = {} for partner_id, email in partner_ids: duplicated_leads = self._get_duplicated_leads(cr, uid, partner_id, email) if len(duplicated_leads) > 1: partners_duplicated_leads.setdefault((partner_id, email), []).extend(duplicated_leads) leads_with_duplicates = [] for lead in active_leads: lead_tuple = (lead.partner_id.id, lead.partner_id.email if lead.partner_id else lead.email_from) if len(partners_duplicated_leads.get(lead_tuple, [])) > 1: leads_with_duplicates.append(lead.id) return {'value': {'opportunity_ids': leads_with_duplicates}} def _convert_opportunity(self, cr, uid, ids, vals, context=None): """ When "massively" (more than one at a time) converting leads to opportunities, check the salesteam_id and salesmen_ids and update the values before calling super. """ if context is None: context = {} data = self.browse(cr, uid, ids, context=context)[0] salesteam_id = data.section_id and data.section_id.id or False salesmen_ids = [] if data.user_ids: salesmen_ids = [x.id for x in data.user_ids] vals.update({'user_ids': salesmen_ids, 'section_id': salesteam_id}) return super(crm_lead2opportunity_mass_convert, self)._convert_opportunity(cr, uid, ids, vals, context=context) def mass_convert(self, cr, uid, ids, context=None): data = self.browse(cr, uid, ids, context=context)[0] ctx = dict(context) if data.name == 'convert' and data.deduplicate: merged_lead_ids = [] remaining_lead_ids = [] for lead in self.pool['crm.lead'].browse(cr, uid, context.get('active_ids', []), context=context): duplicated_lead_ids = self._get_duplicated_leads(cr, uid, lead.partner_id.id, lead.partner_id and lead.partner_id.email or lead.email_from) if len(duplicated_lead_ids) > 1: lead_id = self.pool.get('crm.lead').merge_opportunity(cr, uid, duplicated_lead_ids, False, False, context=context) merged_lead_ids.extend(duplicated_lead_ids) remaining_lead_ids.append(lead_id) active_ids = set(context.get('active_ids', [])) active_ids = active_ids.difference(merged_lead_ids) active_ids = active_ids.union(remaining_lead_ids) ctx['active_ids'] = list(active_ids) ctx['no_force_assignation'] = context.get('no_force_assignation', not data.force_assignation) return self.action_apply(cr, uid, ids, context=ctx) # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
agpl-3.0
diminishedprime/dotfiles
i3/i3-pretty-mode/i3-pretty-mode.py
1
2125
#!/usr/bin/env python3 import i3ipc import Tkinter import sys import re from functools import reduce def parse_entry(acc, entry): key, value = entry.split("=") acc[key] = value return acc def parse_args(args): rest = args[1:] arg_map = reduce(parse_entry, rest, {}) return arg_map args = parse_args(sys.argv) i3 = i3ipc.Connection() def center(toplevel): toplevel.update_idletasks() w = toplevel.winfo_screenwidth() h = toplevel.winfo_screenheight() size = tuple(int(_) for _ in toplevel.geometry().split('+')[0].split('x')) x = w/2 - size[0]/2 y = h/2 - size[1]/2 toplevel.geometry("%dx%d+%d+%d" % (size + (x, y))) def show_window(label_list): fontName = args.get("--font", "Arial") fontSize = int(float(args.get("--font_size", "12"))) regexFilter = args.get("--ignore_titles", "$^") regex = re.compile(regexFilter) if (regex.match(label_list[0]) != None): return root = Tkinter.Tk() root.attributes("-type", "dock") width = int(float(args.get("--min_width", "500"))) root.minsize(width=width, height=1) labelText = reduce(lambda acc, s: acc + '\n' + s, label_list[1:]) label = Tkinter.Label(root, text=labelText, justify=Tkinter.LEFT, anchor='w') foreground=args.get("--foreground", "#000000") background=args.get("--background", "#ffffff") label.config( font=(fontName, fontSize) , background=background , foreground=foreground) label.pack(fill='both') root.title("i3-pretty-mode-title") center(root) # TODO(me)figure out scaling #root.tk.call('tk', 'scaling', 4.0) return root def destroy_root(): global lastRoot if (lastRoot != None): lastRoot.destroy() lastRoot = None lastRoot = None def on_mode(i3, e): global lastRoot mode_string = e.change destroy_root() if (mode_string != "default"): label_list = mode_string.split(" :: ") lastRoot = show_window(label_list) # Subscribe to "mode" events i3.on("mode", on_mode) # Start the main loop and wait for events to come in. i3.main()
mit
donkirkby/django
django/conf/locale/en_AU/formats.py
504
2117
# -*- encoding: utf-8 -*- # This file is distributed under the same license as the Django package. # from __future__ import unicode_literals # The *_FORMAT strings use the Django date format syntax, # see http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date DATE_FORMAT = 'j M Y' # '25 Oct 2006' TIME_FORMAT = 'P' # '2:30 p.m.' DATETIME_FORMAT = 'j M Y, P' # '25 Oct 2006, 2:30 p.m.' YEAR_MONTH_FORMAT = 'F Y' # 'October 2006' MONTH_DAY_FORMAT = 'j F' # '25 October' SHORT_DATE_FORMAT = 'd/m/Y' # '25/10/2006' SHORT_DATETIME_FORMAT = 'd/m/Y P' # '25/10/2006 2:30 p.m.' FIRST_DAY_OF_WEEK = 0 # Sunday # The *_INPUT_FORMATS strings use the Python strftime format syntax, # see http://docs.python.org/library/datetime.html#strftime-strptime-behavior DATE_INPUT_FORMATS = [ '%d/%m/%Y', '%d/%m/%y', # '25/10/2006', '25/10/06' # '%b %d %Y', '%b %d, %Y', # 'Oct 25 2006', 'Oct 25, 2006' # '%d %b %Y', '%d %b, %Y', # '25 Oct 2006', '25 Oct, 2006' # '%B %d %Y', '%B %d, %Y', # 'October 25 2006', 'October 25, 2006' # '%d %B %Y', '%d %B, %Y', # '25 October 2006', '25 October, 2006' ] DATETIME_INPUT_FORMATS = [ '%Y-%m-%d %H:%M:%S', # '2006-10-25 14:30:59' '%Y-%m-%d %H:%M:%S.%f', # '2006-10-25 14:30:59.000200' '%Y-%m-%d %H:%M', # '2006-10-25 14:30' '%Y-%m-%d', # '2006-10-25' '%d/%m/%Y %H:%M:%S', # '25/10/2006 14:30:59' '%d/%m/%Y %H:%M:%S.%f', # '25/10/2006 14:30:59.000200' '%d/%m/%Y %H:%M', # '25/10/2006 14:30' '%d/%m/%Y', # '25/10/2006' '%d/%m/%y %H:%M:%S', # '25/10/06 14:30:59' '%d/%m/%y %H:%M:%S.%f', # '25/10/06 14:30:59.000200' '%d/%m/%y %H:%M', # '25/10/06 14:30' '%d/%m/%y', # '25/10/06' ] DECIMAL_SEPARATOR = '.' THOUSAND_SEPARATOR = ',' NUMBER_GROUPING = 3
bsd-3-clause
msaffarm/DeepRetina
tensorflow/tf_unet/image_util.py
2
5554
# tf_unet is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # tf_unet is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with tf_unet. If not, see <http://www.gnu.org/licenses/>. ''' author: jakeret ''' from __future__ import print_function, division, absolute_import, unicode_literals #import cv2 import glob import numpy as np from PIL import Image class BaseDataProvider(object): """ Abstract base class for DataProvider implementation. Subclasses have to overwrite the `_next_data` method that load the next data and label array. This implementation automatically clips the data with the given min/max and normalizes the values to (0,1]. To change this behavoir the `_process_data` method can be overwritten. To enable some post processing such as data augmentation the `_post_process` method can be overwritten. :param a_min: (optional) min value used for clipping :param a_max: (optional) max value used for clipping """ channels = 1 n_class = 2 def __init__(self, a_min=None, a_max=None): self.a_min = a_min if a_min is not None else -np.inf self.a_max = a_max if a_min is not None else np.inf def _load_data_and_label(self): data, label = self._next_data() train_data = self._process_data(data) labels = self._process_labels(label) train_data, labels = self._post_process(train_data, labels) nx = data.shape[1] ny = data.shape[0] return train_data.reshape(1, ny, nx, self.channels), labels.reshape(1, ny, nx, self.n_class), def _process_labels(self, label): if self.n_class == 2: nx = label.shape[1] ny = label.shape[0] labels = np.zeros((ny, nx, self.n_class), dtype=np.float32) labels[..., 1] = label labels[..., 0] = ~label return labels return label def _process_data(self, data): # normalization data = np.clip(np.fabs(data), self.a_min, self.a_max) data -= np.amin(data) data /= np.amax(data) return data def _post_process(self, data, labels): """ Post processing hook that can be used for data augmentation :param data: the data array :param labels: the label array """ return data, labels def __call__(self, n): train_data, labels = self._load_data_and_label() nx = train_data.shape[1] ny = train_data.shape[2] X = np.zeros((n, nx, ny, self.channels)) Y = np.zeros((n, nx, ny, self.n_class)) X[0] = train_data Y[0] = labels for i in range(1, n): train_data, labels = self._load_data_and_label() X[i] = train_data Y[i] = labels return X, Y class ImageDataProvider(BaseDataProvider): """ Generic data provider for images, supports gray scale and colored images. Assumes that the data images and label images are stored in the same folder and that the labels have a different file suffix e.g. 'train/fish_1.tif' and 'train/fish_1_mask.tif' Usage: data_provider = ImageDataProvider("..fishes/train/*.tif") :param search_path: a glob search pattern to find all data and label images :param a_min: (optional) min value used for clipping :param a_max: (optional) max value used for clipping :param data_suffix: suffix pattern for the data images. Default '.tif' :param mask_suffix: suffix pattern for the label images. Default '_mask.tif' """ n_class = 2 def __init__(self, search_path, a_min=None, a_max=None, data_suffix=".tif", mask_suffix='_mask.tif'): super(ImageDataProvider, self).__init__(a_min, a_max) self.data_suffix = data_suffix self.mask_suffix = mask_suffix self.file_idx = -1 self.data_files = self._find_data_files(search_path) assert len(self.data_files) > 0, "No training files" print("Number of files used: %s" % len(self.data_files)) img = self._load_file(self.data_files[0]) self.channels = 1 if len(img.shape) == 2 else img.shape[-1] def _find_data_files(self, search_path): all_files = glob.glob(search_path) return [name for name in all_files if not self.mask_suffix in name] def _load_file(self, path, dtype=np.float32): return np.array(Image.open(path), dtype) # return np.squeeze(cv2.imread(image_name, cv2.IMREAD_GRAYSCALE)) def _cylce_file(self): self.file_idx += 1 if self.file_idx >= len(self.data_files): self.file_idx = 0 def _next_data(self): self._cylce_file() image_name = self.data_files[self.file_idx] label_name = image_name.replace(self.data_suffix, self.mask_suffix) img = self._load_file(image_name, np.float32) label = self._load_file(label_name, np.bool) return img,label
gpl-3.0
rahlk/Experimental-Algorithms
multiProc/src/Models/_XOMO.py
1
1220
from _model import * import sys import pdb import os _HOME = os.path.expanduser('~') sys.path.insert(0, _HOME + "/git/ai-se/Rahul/DEADANT") # from deadant import * import math import numpy as np class Model: def __init__(self, name): self.name = name if name == '_POM3': self.model = Pom() elif name == 'xomo': self.model = Xomo(model='flight') elif name == 'xomoflight': self.model = Xomo(model='flight') elif name == 'xomoground': self.model = Xomo(model='ground') elif name == 'xomoosp': self.model = Xomo(model='osp') elif name == 'xomoosp2': self.model = Xomo(model='osp2') elif name == 'xomoall': self.model = Xomo(model='all') else: sys.stderr.write( "Enter valid model name _POM3 or xomoflight --> xomo[flight/ground/osp/osp2/all]\n") sys.exit() def trials(self, N, verbose=False): # returns headers and rows return self.model.trials(N, verbose) def oo(self, verbose=False): return self.model.c def update(self, fea, cond, thresh): # cond is true when <= self.model.update(fea, cond, thresh) def __repr__(self): return self.name if __name__ == '__main__': getModels()
mit
bak1an/django
django/contrib/gis/gdal/raster/source.py
23
13935
import json import os from ctypes import addressof, byref, c_double, c_void_p from django.contrib.gis.gdal.base import GDALBase from django.contrib.gis.gdal.driver import Driver from django.contrib.gis.gdal.error import GDALException from django.contrib.gis.gdal.prototypes import raster as capi from django.contrib.gis.gdal.raster.band import BandList from django.contrib.gis.gdal.raster.const import GDAL_RESAMPLE_ALGORITHMS from django.contrib.gis.gdal.srs import SpatialReference, SRSException from django.contrib.gis.geometry.regex import json_regex from django.utils.encoding import force_bytes, force_text from django.utils.functional import cached_property class TransformPoint(list): indices = { 'origin': (0, 3), 'scale': (1, 5), 'skew': (2, 4), } def __init__(self, raster, prop): x = raster.geotransform[self.indices[prop][0]] y = raster.geotransform[self.indices[prop][1]] list.__init__(self, [x, y]) self._raster = raster self._prop = prop @property def x(self): return self[0] @x.setter def x(self, value): gtf = self._raster.geotransform gtf[self.indices[self._prop][0]] = value self._raster.geotransform = gtf @property def y(self): return self[1] @y.setter def y(self, value): gtf = self._raster.geotransform gtf[self.indices[self._prop][1]] = value self._raster.geotransform = gtf class GDALRaster(GDALBase): """ Wrap a raster GDAL Data Source object. """ destructor = capi.close_ds def __init__(self, ds_input, write=False): self._write = 1 if write else 0 Driver.ensure_registered() # Preprocess json inputs. This converts json strings to dictionaries, # which are parsed below the same way as direct dictionary inputs. if isinstance(ds_input, str) and json_regex.match(ds_input): ds_input = json.loads(ds_input) # If input is a valid file path, try setting file as source. if isinstance(ds_input, str): if not os.path.exists(ds_input): raise GDALException('Unable to read raster source input "{}"'.format(ds_input)) try: # GDALOpen will auto-detect the data source type. self._ptr = capi.open_ds(force_bytes(ds_input), self._write) except GDALException as err: raise GDALException('Could not open the datasource at "{}" ({}).'.format(ds_input, err)) elif isinstance(ds_input, dict): # A new raster needs to be created in write mode self._write = 1 # Create driver (in memory by default) driver = Driver(ds_input.get('driver', 'MEM')) # For out of memory drivers, check filename argument if driver.name != 'MEM' and 'name' not in ds_input: raise GDALException('Specify name for creation of raster with driver "{}".'.format(driver.name)) # Check if width and height where specified if 'width' not in ds_input or 'height' not in ds_input: raise GDALException('Specify width and height attributes for JSON or dict input.') # Check if srid was specified if 'srid' not in ds_input: raise GDALException('Specify srid for JSON or dict input.') # Create GDAL Raster self._ptr = capi.create_ds( driver._ptr, force_bytes(ds_input.get('name', '')), ds_input['width'], ds_input['height'], ds_input.get('nr_of_bands', len(ds_input.get('bands', []))), ds_input.get('datatype', 6), None ) # Set band data if provided for i, band_input in enumerate(ds_input.get('bands', [])): band = self.bands[i] if 'nodata_value' in band_input: band.nodata_value = band_input['nodata_value'] # Instantiate band filled with nodata values if only # partial input data has been provided. if band.nodata_value is not None and ( 'data' not in band_input or 'size' in band_input or 'shape' in band_input): band.data(data=(band.nodata_value,), shape=(1, 1)) # Set band data values from input. band.data( data=band_input.get('data'), size=band_input.get('size'), shape=band_input.get('shape'), offset=band_input.get('offset'), ) # Set SRID self.srs = ds_input.get('srid') # Set additional properties if provided if 'origin' in ds_input: self.origin.x, self.origin.y = ds_input['origin'] if 'scale' in ds_input: self.scale.x, self.scale.y = ds_input['scale'] if 'skew' in ds_input: self.skew.x, self.skew.y = ds_input['skew'] elif isinstance(ds_input, c_void_p): # Instantiate the object using an existing pointer to a gdal raster. self._ptr = ds_input else: raise GDALException('Invalid data source input type: "{}".'.format(type(ds_input))) def __str__(self): return self.name def __repr__(self): """ Short-hand representation because WKB may be very large. """ return '<Raster object at %s>' % hex(addressof(self._ptr)) def _flush(self): """ Flush all data from memory into the source file if it exists. The data that needs flushing are geotransforms, coordinate systems, nodata_values and pixel values. This function will be called automatically wherever it is needed. """ # Raise an Exception if the value is being changed in read mode. if not self._write: raise GDALException('Raster needs to be opened in write mode to change values.') capi.flush_ds(self._ptr) @property def name(self): """ Return the name of this raster. Corresponds to filename for file-based rasters. """ return force_text(capi.get_ds_description(self._ptr)) @cached_property def driver(self): """ Return the GDAL Driver used for this raster. """ ds_driver = capi.get_ds_driver(self._ptr) return Driver(ds_driver) @property def width(self): """ Width (X axis) in pixels. """ return capi.get_ds_xsize(self._ptr) @property def height(self): """ Height (Y axis) in pixels. """ return capi.get_ds_ysize(self._ptr) @property def srs(self): """ Return the SpatialReference used in this GDALRaster. """ try: wkt = capi.get_ds_projection_ref(self._ptr) if not wkt: return None return SpatialReference(wkt, srs_type='wkt') except SRSException: return None @srs.setter def srs(self, value): """ Set the spatial reference used in this GDALRaster. The input can be a SpatialReference or any parameter accepted by the SpatialReference constructor. """ if isinstance(value, SpatialReference): srs = value elif isinstance(value, (int, str)): srs = SpatialReference(value) else: raise ValueError('Could not create a SpatialReference from input.') capi.set_ds_projection_ref(self._ptr, srs.wkt.encode()) self._flush() @property def srid(self): """ Shortcut to access the srid of this GDALRaster. """ return self.srs.srid @srid.setter def srid(self, value): """ Shortcut to set this GDALRaster's srs from an srid. """ self.srs = value @property def geotransform(self): """ Return the geotransform of the data source. Return the default geotransform if it does not exist or has not been set previously. The default is [0.0, 1.0, 0.0, 0.0, 0.0, -1.0]. """ # Create empty ctypes double array for data gtf = (c_double * 6)() capi.get_ds_geotransform(self._ptr, byref(gtf)) return list(gtf) @geotransform.setter def geotransform(self, values): "Set the geotransform for the data source." if sum([isinstance(x, (int, float)) for x in values]) != 6: raise ValueError('Geotransform must consist of 6 numeric values.') # Create ctypes double array with input and write data values = (c_double * 6)(*values) capi.set_ds_geotransform(self._ptr, byref(values)) self._flush() @property def origin(self): """ Coordinates of the raster origin. """ return TransformPoint(self, 'origin') @property def scale(self): """ Pixel scale in units of the raster projection. """ return TransformPoint(self, 'scale') @property def skew(self): """ Skew of pixels (rotation parameters). """ return TransformPoint(self, 'skew') @property def extent(self): """ Return the extent as a 4-tuple (xmin, ymin, xmax, ymax). """ # Calculate boundary values based on scale and size xval = self.origin.x + self.scale.x * self.width yval = self.origin.y + self.scale.y * self.height # Calculate min and max values xmin = min(xval, self.origin.x) xmax = max(xval, self.origin.x) ymin = min(yval, self.origin.y) ymax = max(yval, self.origin.y) return xmin, ymin, xmax, ymax @property def bands(self): return BandList(self) def warp(self, ds_input, resampling='NearestNeighbour', max_error=0.0): """ Return a warped GDALRaster with the given input characteristics. The input is expected to be a dictionary containing the parameters of the target raster. Allowed values are width, height, SRID, origin, scale, skew, datatype, driver, and name (filename). By default, the warp functions keeps all parameters equal to the values of the original source raster. For the name of the target raster, the name of the source raster will be used and appended with _copy. + source_driver_name. In addition, the resampling algorithm can be specified with the "resampling" input parameter. The default is NearestNeighbor. For a list of all options consult the GDAL_RESAMPLE_ALGORITHMS constant. """ # Get the parameters defining the geotransform, srid, and size of the raster if 'width' not in ds_input: ds_input['width'] = self.width if 'height' not in ds_input: ds_input['height'] = self.height if 'srid' not in ds_input: ds_input['srid'] = self.srs.srid if 'origin' not in ds_input: ds_input['origin'] = self.origin if 'scale' not in ds_input: ds_input['scale'] = self.scale if 'skew' not in ds_input: ds_input['skew'] = self.skew # Get the driver, name, and datatype of the target raster if 'driver' not in ds_input: ds_input['driver'] = self.driver.name if 'name' not in ds_input: ds_input['name'] = self.name + '_copy.' + self.driver.name if 'datatype' not in ds_input: ds_input['datatype'] = self.bands[0].datatype() # Instantiate raster bands filled with nodata values. ds_input['bands'] = [{'nodata_value': bnd.nodata_value} for bnd in self.bands] # Create target raster target = GDALRaster(ds_input, write=True) # Select resampling algorithm algorithm = GDAL_RESAMPLE_ALGORITHMS[resampling] # Reproject image capi.reproject_image( self._ptr, self.srs.wkt.encode(), target._ptr, target.srs.wkt.encode(), algorithm, 0.0, max_error, c_void_p(), c_void_p(), c_void_p() ) # Make sure all data is written to file target._flush() return target def transform(self, srid, driver=None, name=None, resampling='NearestNeighbour', max_error=0.0): """ Return a copy of this raster reprojected into the given SRID. """ # Convert the resampling algorithm name into an algorithm id algorithm = GDAL_RESAMPLE_ALGORITHMS[resampling] # Instantiate target spatial reference system target_srs = SpatialReference(srid) # Create warped virtual dataset in the target reference system target = capi.auto_create_warped_vrt( self._ptr, self.srs.wkt.encode(), target_srs.wkt.encode(), algorithm, max_error, c_void_p() ) target = GDALRaster(target) # Construct the target warp dictionary from the virtual raster data = { 'srid': srid, 'width': target.width, 'height': target.height, 'origin': [target.origin.x, target.origin.y], 'scale': [target.scale.x, target.scale.y], 'skew': [target.skew.x, target.skew.y], } # Set the driver and filepath if provided if driver: data['driver'] = driver if name: data['name'] = name # Warp the raster into new srid return self.warp(data, resampling=resampling, max_error=max_error)
bsd-3-clause
seem-sky/kbengine
kbe/res/scripts/common/Lib/site-packages/setuptools/tests/test_easy_install.py
73
13214
"""Easy install Tests """ import sys import os import shutil import tempfile import unittest import site import contextlib import textwrap import tarfile import logging import distutils.core from setuptools.compat import StringIO, BytesIO, next, urlparse from setuptools.sandbox import run_setup, SandboxViolation from setuptools.command.easy_install import ( easy_install, fix_jython_executable, get_script_args, nt_quote_arg) from setuptools.command.easy_install import PthDistributions from setuptools.command import easy_install as easy_install_pkg from setuptools.dist import Distribution from pkg_resources import Distribution as PRDistribution import setuptools.tests.server class FakeDist(object): def get_entry_map(self, group): if group != 'console_scripts': return {} return {'name': 'ep'} def as_requirement(self): return 'spec' WANTED = """\ #!%s # EASY-INSTALL-ENTRY-SCRIPT: 'spec','console_scripts','name' __requires__ = 'spec' import sys from pkg_resources import load_entry_point if __name__ == '__main__': sys.exit( load_entry_point('spec', 'console_scripts', 'name')() ) """ % nt_quote_arg(fix_jython_executable(sys.executable, "")) SETUP_PY = """\ from setuptools import setup setup(name='foo') """ class TestEasyInstallTest(unittest.TestCase): def test_install_site_py(self): dist = Distribution() cmd = easy_install(dist) cmd.sitepy_installed = False cmd.install_dir = tempfile.mkdtemp() try: cmd.install_site_py() sitepy = os.path.join(cmd.install_dir, 'site.py') self.assertTrue(os.path.exists(sitepy)) finally: shutil.rmtree(cmd.install_dir) def test_get_script_args(self): dist = FakeDist() old_platform = sys.platform try: name, script = [i for i in next(get_script_args(dist))][0:2] finally: sys.platform = old_platform self.assertEqual(script, WANTED) def test_no_find_links(self): # new option '--no-find-links', that blocks find-links added at # the project level dist = Distribution() cmd = easy_install(dist) cmd.check_pth_processing = lambda: True cmd.no_find_links = True cmd.find_links = ['link1', 'link2'] cmd.install_dir = os.path.join(tempfile.mkdtemp(), 'ok') cmd.args = ['ok'] cmd.ensure_finalized() self.assertEqual(cmd.package_index.scanned_urls, {}) # let's try without it (default behavior) cmd = easy_install(dist) cmd.check_pth_processing = lambda: True cmd.find_links = ['link1', 'link2'] cmd.install_dir = os.path.join(tempfile.mkdtemp(), 'ok') cmd.args = ['ok'] cmd.ensure_finalized() keys = sorted(cmd.package_index.scanned_urls.keys()) self.assertEqual(keys, ['link1', 'link2']) class TestPTHFileWriter(unittest.TestCase): def test_add_from_cwd_site_sets_dirty(self): '''a pth file manager should set dirty if a distribution is in site but also the cwd ''' pth = PthDistributions('does-not_exist', [os.getcwd()]) self.assertTrue(not pth.dirty) pth.add(PRDistribution(os.getcwd())) self.assertTrue(pth.dirty) def test_add_from_site_is_ignored(self): if os.name != 'nt': location = '/test/location/does-not-have-to-exist' else: location = 'c:\\does_not_exist' pth = PthDistributions('does-not_exist', [location, ]) self.assertTrue(not pth.dirty) pth.add(PRDistribution(location)) self.assertTrue(not pth.dirty) class TestUserInstallTest(unittest.TestCase): def setUp(self): self.dir = tempfile.mkdtemp() setup = os.path.join(self.dir, 'setup.py') f = open(setup, 'w') f.write(SETUP_PY) f.close() self.old_cwd = os.getcwd() os.chdir(self.dir) self.old_enable_site = site.ENABLE_USER_SITE self.old_file = easy_install_pkg.__file__ self.old_base = site.USER_BASE site.USER_BASE = tempfile.mkdtemp() self.old_site = site.USER_SITE site.USER_SITE = tempfile.mkdtemp() easy_install_pkg.__file__ = site.USER_SITE def tearDown(self): os.chdir(self.old_cwd) shutil.rmtree(self.dir) shutil.rmtree(site.USER_BASE) shutil.rmtree(site.USER_SITE) site.USER_BASE = self.old_base site.USER_SITE = self.old_site site.ENABLE_USER_SITE = self.old_enable_site easy_install_pkg.__file__ = self.old_file def test_user_install_implied(self): site.ENABLE_USER_SITE = True # disabled sometimes #XXX: replace with something meaningfull dist = Distribution() dist.script_name = 'setup.py' cmd = easy_install(dist) cmd.args = ['py'] cmd.ensure_finalized() self.assertTrue(cmd.user, 'user should be implied') def test_multiproc_atexit(self): try: __import__('multiprocessing') except ImportError: # skip the test if multiprocessing is not available return log = logging.getLogger('test_easy_install') logging.basicConfig(level=logging.INFO, stream=sys.stderr) log.info('this should not break') def test_user_install_not_implied_without_usersite_enabled(self): site.ENABLE_USER_SITE = False # usually enabled #XXX: replace with something meaningfull dist = Distribution() dist.script_name = 'setup.py' cmd = easy_install(dist) cmd.args = ['py'] cmd.initialize_options() self.assertFalse(cmd.user, 'NOT user should be implied') def test_local_index(self): # make sure the local index is used # when easy_install looks for installed # packages new_location = tempfile.mkdtemp() target = tempfile.mkdtemp() egg_file = os.path.join(new_location, 'foo-1.0.egg-info') f = open(egg_file, 'w') try: f.write('Name: foo\n') finally: f.close() sys.path.append(target) old_ppath = os.environ.get('PYTHONPATH') os.environ['PYTHONPATH'] = os.path.pathsep.join(sys.path) try: dist = Distribution() dist.script_name = 'setup.py' cmd = easy_install(dist) cmd.install_dir = target cmd.args = ['foo'] cmd.ensure_finalized() cmd.local_index.scan([new_location]) res = cmd.easy_install('foo') self.assertEqual(os.path.realpath(res.location), os.path.realpath(new_location)) finally: sys.path.remove(target) for basedir in [new_location, target, ]: if not os.path.exists(basedir) or not os.path.isdir(basedir): continue try: shutil.rmtree(basedir) except: pass if old_ppath is not None: os.environ['PYTHONPATH'] = old_ppath else: del os.environ['PYTHONPATH'] def test_setup_requires(self): """Regression test for Distribute issue #318 Ensure that a package with setup_requires can be installed when setuptools is installed in the user site-packages without causing a SandboxViolation. """ test_setup_attrs = { 'name': 'test_pkg', 'version': '0.0', 'setup_requires': ['foobar'], 'dependency_links': [os.path.abspath(self.dir)] } test_pkg = os.path.join(self.dir, 'test_pkg') test_setup_py = os.path.join(test_pkg, 'setup.py') os.mkdir(test_pkg) f = open(test_setup_py, 'w') f.write(textwrap.dedent("""\ import setuptools setuptools.setup(**%r) """ % test_setup_attrs)) f.close() foobar_path = os.path.join(self.dir, 'foobar-0.1.tar.gz') make_trivial_sdist( foobar_path, textwrap.dedent("""\ import setuptools setuptools.setup( name='foobar', version='0.1' ) """)) old_stdout = sys.stdout old_stderr = sys.stderr sys.stdout = StringIO() sys.stderr = StringIO() try: try: with reset_setup_stop_context(): run_setup(test_setup_py, ['install']) except SandboxViolation: self.fail('Installation caused SandboxViolation') finally: sys.stdout = old_stdout sys.stderr = old_stderr class TestSetupRequires(unittest.TestCase): def test_setup_requires_honors_fetch_params(self): """ When easy_install installs a source distribution which specifies setup_requires, it should honor the fetch parameters (such as allow-hosts, index-url, and find-links). """ # set up a server which will simulate an alternate package index. p_index = setuptools.tests.server.MockServer() p_index.start() netloc = 1 p_index_loc = urlparse(p_index.url)[netloc] if p_index_loc.endswith(':0'): # Some platforms (Jython) don't find a port to which to bind, # so skip this test for them. return # create an sdist that has a build-time dependency. with TestSetupRequires.create_sdist() as dist_file: with tempdir_context() as temp_install_dir: with environment_context(PYTHONPATH=temp_install_dir): ei_params = ['--index-url', p_index.url, '--allow-hosts', p_index_loc, '--exclude-scripts', '--install-dir', temp_install_dir, dist_file] with reset_setup_stop_context(): with argv_context(['easy_install']): # attempt to install the dist. It should fail because # it doesn't exist. self.assertRaises(SystemExit, easy_install_pkg.main, ei_params) # there should have been two or three requests to the server # (three happens on Python 3.3a) self.assertTrue(2 <= len(p_index.requests) <= 3) self.assertEqual(p_index.requests[0].path, '/does-not-exist/') @staticmethod @contextlib.contextmanager def create_sdist(): """ Return an sdist with a setup_requires dependency (of something that doesn't exist) """ with tempdir_context() as dir: dist_path = os.path.join(dir, 'setuptools-test-fetcher-1.0.tar.gz') make_trivial_sdist( dist_path, textwrap.dedent(""" import setuptools setuptools.setup( name="setuptools-test-fetcher", version="1.0", setup_requires = ['does-not-exist'], ) """).lstrip()) yield dist_path def make_trivial_sdist(dist_path, setup_py): """Create a simple sdist tarball at dist_path, containing just a setup.py, the contents of which are provided by the setup_py string. """ setup_py_file = tarfile.TarInfo(name='setup.py') try: # Python 3 (StringIO gets converted to io module) MemFile = BytesIO except AttributeError: MemFile = StringIO setup_py_bytes = MemFile(setup_py.encode('utf-8')) setup_py_file.size = len(setup_py_bytes.getvalue()) dist = tarfile.open(dist_path, 'w:gz') try: dist.addfile(setup_py_file, fileobj=setup_py_bytes) finally: dist.close() @contextlib.contextmanager def tempdir_context(cd=lambda dir:None): temp_dir = tempfile.mkdtemp() orig_dir = os.getcwd() try: cd(temp_dir) yield temp_dir finally: cd(orig_dir) shutil.rmtree(temp_dir) @contextlib.contextmanager def environment_context(**updates): old_env = os.environ.copy() os.environ.update(updates) try: yield finally: for key in updates: del os.environ[key] os.environ.update(old_env) @contextlib.contextmanager def argv_context(repl): old_argv = sys.argv[:] sys.argv[:] = repl yield sys.argv[:] = old_argv @contextlib.contextmanager def reset_setup_stop_context(): """ When the setuptools tests are run using setup.py test, and then one wants to invoke another setup() command (such as easy_install) within those tests, it's necessary to reset the global variable in distutils.core so that the setup() command will run naturally. """ setup_stop_after = distutils.core._setup_stop_after distutils.core._setup_stop_after = None yield distutils.core._setup_stop_after = setup_stop_after
lgpl-3.0
stevenewey/django
django/db/__init__.py
17
2316
from django.core import signals from django.db.utils import (DEFAULT_DB_ALIAS, DJANGO_VERSION_PICKLE_KEY, DataError, OperationalError, IntegrityError, InternalError, ProgrammingError, NotSupportedError, DatabaseError, InterfaceError, Error, ConnectionHandler, ConnectionRouter) __all__ = [ 'backend', 'connection', 'connections', 'router', 'DatabaseError', 'IntegrityError', 'InternalError', 'ProgrammingError', 'DataError', 'NotSupportedError', 'Error', 'InterfaceError', 'OperationalError', 'DEFAULT_DB_ALIAS', 'DJANGO_VERSION_PICKLE_KEY' ] connections = ConnectionHandler() router = ConnectionRouter() # `connection`, `DatabaseError` and `IntegrityError` are convenient aliases # for backend bits. # DatabaseWrapper.__init__() takes a dictionary, not a settings module, so we # manually create the dictionary from the settings, passing only the settings # that the database backends care about. # We load all these up for backwards compatibility, you should use # connections['default'] instead. class DefaultConnectionProxy(object): """ Proxy for accessing the default DatabaseWrapper object's attributes. If you need to access the DatabaseWrapper object itself, use connections[DEFAULT_DB_ALIAS] instead. """ def __getattr__(self, item): return getattr(connections[DEFAULT_DB_ALIAS], item) def __setattr__(self, name, value): return setattr(connections[DEFAULT_DB_ALIAS], name, value) def __delattr__(self, name): return delattr(connections[DEFAULT_DB_ALIAS], name) def __eq__(self, other): return connections[DEFAULT_DB_ALIAS] == other def __ne__(self, other): return connections[DEFAULT_DB_ALIAS] != other connection = DefaultConnectionProxy() # Register an event to reset saved queries when a Django request is started. def reset_queries(**kwargs): for conn in connections.all(): conn.queries_log.clear() signals.request_started.connect(reset_queries) # Register an event to reset transaction state and close connections past # their lifetime. def close_old_connections(**kwargs): for conn in connections.all(): conn.close_if_unusable_or_obsolete() signals.request_started.connect(close_old_connections) signals.request_finished.connect(close_old_connections)
bsd-3-clause
loretoparisi/pattern
pattern/text/es/__init__.py
21
8726
#### PATTERN | ES ################################################################################## # -*- coding: utf-8 -*- # Copyright (c) 2012 University of Antwerp, Belgium # Author: Tom De Smedt <tom@organisms.be> # License: BSD (see LICENSE.txt for details). # http://www.clips.ua.ac.be/pages/pattern #################################################################################################### # Spanish linguistical tools using fast regular expressions. import os import sys try: MODULE = os.path.dirname(os.path.realpath(__file__)) except: MODULE = "" sys.path.insert(0, os.path.join(MODULE, "..", "..", "..", "..")) # Import parser base classes. from pattern.text import ( Lexicon, Model, Morphology, Context, Parser as _Parser, ngrams, pprint, commandline, PUNCTUATION ) # Import parser universal tagset. from pattern.text import ( penntreebank2universal, PTB, PENN, UNIVERSAL, NOUN, VERB, ADJ, ADV, PRON, DET, PREP, ADP, NUM, CONJ, INTJ, PRT, PUNC, X ) # Import parse tree base classes. from pattern.text.tree import ( Tree, Text, Sentence, Slice, Chunk, PNPChunk, Chink, Word, table, SLASH, WORD, POS, CHUNK, PNP, REL, ANCHOR, LEMMA, AND, OR ) # Import spelling base class. from pattern.text import ( Spelling ) # Import verb tenses. from pattern.text import ( INFINITIVE, PRESENT, PAST, FUTURE, CONDITIONAL, FIRST, SECOND, THIRD, SINGULAR, PLURAL, SG, PL, INDICATIVE, IMPERATIVE, SUBJUNCTIVE, IMPERFECTIVE, PERFECTIVE, PROGRESSIVE, IMPERFECT, PRETERITE, PARTICIPLE, GERUND ) # Import inflection functions. from pattern.text.es.inflect import ( article, referenced, DEFINITE, INDEFINITE, MASCULINE, MALE, FEMININE, FEMALE, NEUTER, NEUTRAL, PLURAL, M, F, N, PL, pluralize, singularize, NOUN, VERB, ADJECTIVE, verbs, conjugate, lemma, lexeme, tenses, predicative, attributive ) # Import all submodules. from pattern.text.es import inflect sys.path.pop(0) #--- SPANISH PARSER -------------------------------------------------------------------------------- # The Spanish parser (accuracy 92%) is based on the Spanish portion Wikicorpus v.1.0 (FDL license), # using 1.5M words from the tagged sections 10000-15000. # Samuel Reese, Gemma Boleda, Montse Cuadros, Lluís Padró, German Rigau. # Wikicorpus: A Word-Sense Disambiguated Multilingual Wikipedia Corpus. # Proceedings of 7th Language Resources and Evaluation Conference (LREC'10), # La Valleta, Malta. May, 2010. # http://www.lsi.upc.edu/~nlp/wikicorpus/ # The lexicon uses the Parole tagset: # http://www.lsi.upc.edu/~nlp/SVMTool/parole.html # http://nlp.lsi.upc.edu/freeling/doc/tagsets/tagset-es.html PAROLE = "parole" parole = { "AO": "JJ", # primera "AQ": "JJ", # absurdo "CC": "CC", # e "CS": "IN", # porque "DA": "DT", # el "DD": "DT", # ese "DI": "DT", # mucha "DP": "PRP$", # mi, nuestra "DT": "DT", # cuántos "Fa": ".", # ! "Fc": ",", # , "Fd": ":", # : "Fe": "\"", # " "Fg": ".", # - "Fh": ".", # / "Fi": ".", # ? "Fp": ".", # . "Fr": ".", # >> "Fs": ".", # ... "Fpa": "(", # ( "Fpt": ")", # ) "Fx": ".", # ; "Fz": ".", # "I": "UH", # ehm "NC": "NN", # islam "NCS": "NN", # guitarra "NCP": "NNS", # guitarras "NP": "NNP", # Óscar "P0": "PRP", # se "PD": "DT", # ése "PI": "DT", # uno "PP": "PRP", # vos "PR": "WP$", # qué "PT": "WP$", # qué "PX": "PRP$", # mío "RG": "RB", # tecnológicamente "RN": "RB", # no "SP": "IN", # por "VAG": "VBG", # habiendo "VAI": "MD", # había "VAN": "MD", # haber "VAS": "MD", # haya "VMG": "VBG", # habiendo "VMI": "VB", # habemos "VMM": "VB", # compare "VMN": "VB", # comparecer "VMP": "VBN", # comparando "VMS": "VB", # compararan "VSG": "VBG", # comparando "VSI": "VB", # será "VSN": "VB", # ser "VSP": "VBN", # sido "VSS": "VB", # sea "W": "NN", # septiembre "Z": "CD", # 1,7 "Zd": "CD", # 1,7 "Zm": "CD", # £1,7 "Zp": "CD", # 1,7% } def parole2penntreebank(token, tag): """ Converts a Parole tag to a Penn Treebank II tag. For example: importantísimo/AQ => importantísimo/ADJ """ return (token, parole.get(tag, tag)) def parole2universal(token, tag): """ Converts a Parole tag to a universal tag. For example: importantísimo/AQ => importantísimo/ADJ """ if tag == "CS": return (token, CONJ) if tag == "DP": return (token, DET) if tag in ("P0", "PD", "PI", "PP", "PR", "PT", "PX"): return (token, PRON) return penntreebank2universal(*parole2penntreebank(token, tag)) ABBREVIATIONS = set(( u"a.C.", u"a.m.", u"apdo.", u"aprox.", u"Av.", u"Avda.", u"c.c.", u"D.", u"Da.", u"d.C.", u"d.j.C.", u"dna.", u"Dr.", u"Dra.", u"esq.", u"etc.", u"Gob.", u"h.", u"m.n.", u"no.", u"núm.", u"pág.", u"P.D.", u"P.S.", u"p.ej.", u"p.m.", u"Profa.", u"q.e.p.d.", u"S.A.", u"S.L.", u"Sr.", u"Sra.", u"Srta.", u"s.s.s.", u"tel.", u"Ud.", u"Vd.", u"Uds.", u"Vds.", u"v.", u"vol.", u"W.C." )) def find_lemmata(tokens): """ Annotates the tokens with lemmata for plural nouns and conjugated verbs, where each token is a [word, part-of-speech] list. """ for token in tokens: word, pos, lemma = token[0], token[1], token[0] if pos.startswith(("DT",)): lemma = singularize(word, pos="DT") if pos.startswith(("JJ",)): lemma = predicative(word) if pos == "NNS": lemma = singularize(word) if pos.startswith(("VB", "MD")): lemma = conjugate(word, INFINITIVE) or word token.append(lemma.lower()) return tokens class Parser(_Parser): def find_tokens(self, tokens, **kwargs): kwargs.setdefault("abbreviations", ABBREVIATIONS) kwargs.setdefault("replace", {}) return _Parser.find_tokens(self, tokens, **kwargs) def find_lemmata(self, tokens, **kwargs): return find_lemmata(tokens) def find_tags(self, tokens, **kwargs): if kwargs.get("tagset") in (PENN, None): kwargs.setdefault("map", lambda token, tag: parole2penntreebank(token, tag)) if kwargs.get("tagset") == UNIVERSAL: kwargs.setdefault("map", lambda token, tag: parole2universal(token, tag)) if kwargs.get("tagset") is PAROLE: kwargs.setdefault("map", lambda token, tag: (token, tag)) return _Parser.find_tags(self, tokens, **kwargs) parser = Parser( lexicon = os.path.join(MODULE, "es-lexicon.txt"), frequency = os.path.join(MODULE, "es-frequency.txt"), morphology = os.path.join(MODULE, "es-morphology.txt"), context = os.path.join(MODULE, "es-context.txt"), default = ("NCS", "NP", "Z"), language = "es" ) lexicon = parser.lexicon # Expose lexicon. spelling = Spelling( path = os.path.join(MODULE, "es-spelling.txt") ) def tokenize(s, *args, **kwargs): """ Returns a list of sentences, where punctuation marks have been split from words. """ return parser.find_tokens(s, *args, **kwargs) def parse(s, *args, **kwargs): """ Returns a tagged Unicode string. """ return parser.parse(s, *args, **kwargs) def parsetree(s, *args, **kwargs): """ Returns a parsed Text from the given string. """ return Text(parse(s, *args, **kwargs)) def tree(s, token=[WORD, POS, CHUNK, PNP, REL, LEMMA]): """ Returns a parsed Text from the given parsed string. """ return Text(s, token) def tag(s, tokenize=True, encoding="utf-8", **kwargs): """ Returns a list of (token, tag)-tuples from the given string. """ tags = [] for sentence in parse(s, tokenize, True, False, False, False, encoding, **kwargs).split(): for token in sentence: tags.append((token[0], token[1])) return tags def keywords(s, top=10, **kwargs): """ Returns a sorted list of keywords in the given string. """ return parser.find_keywords(s, **dict({ "frequency": parser.frequency, "top": top, "pos": ("NN",), "ignore": ("rt",)}, **kwargs)) def suggest(w): """ Returns a list of (word, confidence)-tuples of spelling corrections. """ return spelling.suggest(w) split = tree # Backwards compatibility. #--------------------------------------------------------------------------------------------------- # python -m pattern.es xml -s "A quien se hace de miel las moscas le comen." -OTCL if __name__ == "__main__": commandline(parse)
bsd-3-clause
brad-h/expy
ExPy/ExPy/module23.py
1
1965
""" Troubleshooting Car Issues """ QUESTIONS = [ { 'question': 'Is the car silent when you turn the key?', 'answers': { 'Yes': 1, 'No': 2 } }, { 'question': 'Are the battery terminals corroded?', 'answers': { 'Yes': 'Clean terminals and try starting again.', 'No': 'Replace cables and try again.' } }, { 'question': 'Does the car making a clicking noise?', 'answers': { 'Yes': 'Replace the battery.', 'No': 3 } }, { 'question': 'Does the car crank up but fail to start?', 'answers': { 'Yes': 'Check spark plug connections', 'No': 4 } }, { 'question': 'Does the engine start and then die?', 'answers': { 'Yes': 5, 'No': 'Unknown' } }, { 'question': 'Does your car have fuel injection?', 'answers': { 'Yes': 'Get it in for service.', 'No': 'Check to ensure the chcoke is opening and closing.' } } ] def ex23(): """ Use QUESTIONS to help a user diagnose car issues """ # QUESTIONS[0] is the "root" question # start from the root and follow the answers through the QUESTIONS list question = QUESTIONS[0] while True: print(question['question']) answer = input(', '.join(question['answers'].keys()) + ': ') answers = question['answers'] if answer in answers: if isinstance(answers[answer], int): # reached a follow-up question; set as the next question to ask question = QUESTIONS[answers[answer]] else: # reached a diagnosis; print and exit print(answers[answer]) break else: print('Please enter a valid selection') if __name__ == '__main__': ex23()
mit
adamwwt/chvac
venv/lib/python2.7/site-packages/pip/_vendor/colorama/win32.py
451
4833
# Copyright Jonathan Hartley 2013. BSD 3-Clause license, see LICENSE file. # from winbase.h STDOUT = -11 STDERR = -12 try: from ctypes import windll from ctypes import wintypes except ImportError: windll = None SetConsoleTextAttribute = lambda *_: None else: from ctypes import ( byref, Structure, c_char, c_short, c_uint32, c_ushort, POINTER ) class CONSOLE_SCREEN_BUFFER_INFO(Structure): """struct in wincon.h.""" _fields_ = [ ("dwSize", wintypes._COORD), ("dwCursorPosition", wintypes._COORD), ("wAttributes", wintypes.WORD), ("srWindow", wintypes.SMALL_RECT), ("dwMaximumWindowSize", wintypes._COORD), ] def __str__(self): return '(%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d)' % ( self.dwSize.Y, self.dwSize.X , self.dwCursorPosition.Y, self.dwCursorPosition.X , self.wAttributes , self.srWindow.Top, self.srWindow.Left, self.srWindow.Bottom, self.srWindow.Right , self.dwMaximumWindowSize.Y, self.dwMaximumWindowSize.X ) _GetStdHandle = windll.kernel32.GetStdHandle _GetStdHandle.argtypes = [ wintypes.DWORD, ] _GetStdHandle.restype = wintypes.HANDLE _GetConsoleScreenBufferInfo = windll.kernel32.GetConsoleScreenBufferInfo _GetConsoleScreenBufferInfo.argtypes = [ wintypes.HANDLE, POINTER(CONSOLE_SCREEN_BUFFER_INFO), ] _GetConsoleScreenBufferInfo.restype = wintypes.BOOL _SetConsoleTextAttribute = windll.kernel32.SetConsoleTextAttribute _SetConsoleTextAttribute.argtypes = [ wintypes.HANDLE, wintypes.WORD, ] _SetConsoleTextAttribute.restype = wintypes.BOOL _SetConsoleCursorPosition = windll.kernel32.SetConsoleCursorPosition _SetConsoleCursorPosition.argtypes = [ wintypes.HANDLE, wintypes._COORD, ] _SetConsoleCursorPosition.restype = wintypes.BOOL _FillConsoleOutputCharacterA = windll.kernel32.FillConsoleOutputCharacterA _FillConsoleOutputCharacterA.argtypes = [ wintypes.HANDLE, c_char, wintypes.DWORD, wintypes._COORD, POINTER(wintypes.DWORD), ] _FillConsoleOutputCharacterA.restype = wintypes.BOOL _FillConsoleOutputAttribute = windll.kernel32.FillConsoleOutputAttribute _FillConsoleOutputAttribute.argtypes = [ wintypes.HANDLE, wintypes.WORD, wintypes.DWORD, wintypes._COORD, POINTER(wintypes.DWORD), ] _FillConsoleOutputAttribute.restype = wintypes.BOOL handles = { STDOUT: _GetStdHandle(STDOUT), STDERR: _GetStdHandle(STDERR), } def GetConsoleScreenBufferInfo(stream_id=STDOUT): handle = handles[stream_id] csbi = CONSOLE_SCREEN_BUFFER_INFO() success = _GetConsoleScreenBufferInfo( handle, byref(csbi)) return csbi def SetConsoleTextAttribute(stream_id, attrs): handle = handles[stream_id] return _SetConsoleTextAttribute(handle, attrs) def SetConsoleCursorPosition(stream_id, position): position = wintypes._COORD(*position) # If the position is out of range, do nothing. if position.Y <= 0 or position.X <= 0: return # Adjust for Windows' SetConsoleCursorPosition: # 1. being 0-based, while ANSI is 1-based. # 2. expecting (x,y), while ANSI uses (y,x). adjusted_position = wintypes._COORD(position.Y - 1, position.X - 1) # Adjust for viewport's scroll position sr = GetConsoleScreenBufferInfo(STDOUT).srWindow adjusted_position.Y += sr.Top adjusted_position.X += sr.Left # Resume normal processing handle = handles[stream_id] return _SetConsoleCursorPosition(handle, adjusted_position) def FillConsoleOutputCharacter(stream_id, char, length, start): handle = handles[stream_id] char = c_char(char) length = wintypes.DWORD(length) num_written = wintypes.DWORD(0) # Note that this is hard-coded for ANSI (vs wide) bytes. success = _FillConsoleOutputCharacterA( handle, char, length, start, byref(num_written)) return num_written.value def FillConsoleOutputAttribute(stream_id, attr, length, start): ''' FillConsoleOutputAttribute( hConsole, csbi.wAttributes, dwConSize, coordScreen, &cCharsWritten )''' handle = handles[stream_id] attribute = wintypes.WORD(attr) length = wintypes.DWORD(length) num_written = wintypes.DWORD(0) # Note that this is hard-coded for ANSI (vs wide) bytes. return _FillConsoleOutputAttribute( handle, attribute, length, start, byref(num_written))
mit
mbiciunas/nix
test/config/tag/test_createTag.py
1
1342
# Nix # Copyright (c) 2017 Mark Biciunas. # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. import pytest from config.config import Config from config.tag.create_tag import CreateTag from utility.nix_error import NixError class TestCreateTag: def test_create(self, config_valid): _create_tag = CreateTag() _create_tag.create(config_valid.TAG_INVALID_1, "invalid tag") _tags = Config().get_tags() assert _tags.exist(config_valid.TAG_INVALID_1), \ "Tag was not created: {}".format(config_valid.TAG_INVALID_1) def test_create_exists(self, config_valid): _create_tag = CreateTag() with pytest.raises(NixError): _create_tag.create(config_valid.TAG_VALID_1, "valid tag")
gpl-3.0
ESA-VirES/eoxserver-magnetism
instance/instance/urls.py
1
2679
#------------------------------------------------------------------------------- # # Project: EOxServer <http://eoxserver.org> # Authors: Stephan Krause <stephan.krause@eox.at> # Stephan Meissl <stephan.meissl@eox.at> # #------------------------------------------------------------------------------- # Copyright (C) 2012 EOX IT Services GmbH # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in all # copies of this Software or works derived from this Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN # THE SOFTWARE. #------------------------------------------------------------------------------- """ URLs config for EOxServer's instance instance. """ from django.conf.urls import patterns, include, url # Enable the admin: from django.contrib import admin admin.autodiscover() # Enable the databrowse: #from django.contrib import databrowse # Enable the ATP auxiliary views: from eoxserver.resources.processes import views as procViews urlpatterns = patterns('', (r'^$', 'eoxserver.views.index'), (r'^ows', 'eoxserver.services.views.ows'), (r'^client/$', 'eoxserver.webclient.views.index'), (r'^client/(.*)', 'eoxserver.webclient.views.webclient'), # Enable admin documentation: url(r'^admin/doc/', include('django.contrib.admindocs.urls')), # Enable the admin: url(r'^admin/', include(admin.site.urls)), # Enable the databrowse: #(r'^databrowse/(.*)', databrowse.site.root), # Uncomment following lines to enable the ATP views: #(r'^process/status$', procViews.status ), #(r'^process/status/(?P<requestType>[^/]{,64})/(?P<requestID>[^/]{,64})$', procViews.status ), #(r'^process/task$', procViews.task ), (r'^process/response/(?P<requestType>[^/]{,64})/(?P<requestID>[^/]{,64})', procViews.response ), )
mit
Endika/django-debug-toolbar
tests/panels/test_profiling.py
4
2143
from __future__ import absolute_import, unicode_literals from django.contrib.auth.models import User from django.db import IntegrityError, transaction from django.test import TestCase from django.test.utils import override_settings from ..base import BaseTestCase from ..views import regular_view @override_settings(DEBUG_TOOLBAR_PANELS=['debug_toolbar.panels.profiling.ProfilingPanel']) class ProfilingPanelTestCase(BaseTestCase): def setUp(self): super(ProfilingPanelTestCase, self).setUp() self.panel = self.toolbar.get_panel_by_id('ProfilingPanel') def test_regular_view(self): self.panel.process_view(self.request, regular_view, ('profiling',), {}) self.panel.process_response(self.request, self.response) self.panel.generate_stats(self.request, self.response) self.assertIn('func_list', self.panel.get_stats()) self.assertIn('regular_view', self.panel.content) def test_insert_content(self): """ Test that the panel only inserts content after generate_stats and not the process_response. """ self.panel.process_view(self.request, regular_view, ('profiling',), {}) self.panel.process_response(self.request, self.response) # ensure the panel does not have content yet. self.assertNotIn('regular_view', self.panel.content) self.panel.generate_stats(self.request, self.response) # ensure the panel renders correctly. self.assertIn('regular_view', self.panel.content) @override_settings(DEBUG=True, DEBUG_TOOLBAR_PANELS=['debug_toolbar.panels.profiling.ProfilingPanel']) class ProfilingPanelIntegrationTestCase(TestCase): def test_view_executed_once(self): self.assertEqual(User.objects.count(), 0) response = self.client.get('/new_user/') self.assertContains(response, 'Profiling') self.assertEqual(User.objects.count(), 1) with self.assertRaises(IntegrityError): with transaction.atomic(): response = self.client.get('/new_user/') self.assertEqual(User.objects.count(), 1)
bsd-3-clause
wackymaster/QTClock
Libraries/numpy/lib/tests/test_io.py
19
72111
from __future__ import division, absolute_import, print_function import sys import gzip import os import threading from tempfile import NamedTemporaryFile import time import warnings import gc from io import BytesIO from datetime import datetime import numpy as np import numpy.ma as ma from numpy.lib._iotools import ConverterError, ConversionWarning from numpy.compat import asbytes, bytes, unicode from numpy.ma.testutils import assert_equal from numpy.testing import ( TestCase, run_module_suite, assert_warns, assert_, assert_raises_regex, assert_raises, assert_allclose, assert_array_equal,temppath ) from numpy.testing.utils import tempdir class TextIO(BytesIO): """Helper IO class. Writes encode strings to bytes if needed, reads return bytes. This makes it easier to emulate files opened in binary mode without needing to explicitly convert strings to bytes in setting up the test data. """ def __init__(self, s=""): BytesIO.__init__(self, asbytes(s)) def write(self, s): BytesIO.write(self, asbytes(s)) def writelines(self, lines): BytesIO.writelines(self, [asbytes(s) for s in lines]) MAJVER, MINVER = sys.version_info[:2] IS_64BIT = sys.maxsize > 2**32 def strptime(s, fmt=None): """ This function is available in the datetime module only from Python >= 2.5. """ if sys.version_info[0] >= 3: return datetime(*time.strptime(s.decode('latin1'), fmt)[:3]) else: return datetime(*time.strptime(s, fmt)[:3]) class RoundtripTest(object): def roundtrip(self, save_func, *args, **kwargs): """ save_func : callable Function used to save arrays to file. file_on_disk : bool If true, store the file on disk, instead of in a string buffer. save_kwds : dict Parameters passed to `save_func`. load_kwds : dict Parameters passed to `numpy.load`. args : tuple of arrays Arrays stored to file. """ save_kwds = kwargs.get('save_kwds', {}) load_kwds = kwargs.get('load_kwds', {}) file_on_disk = kwargs.get('file_on_disk', False) if file_on_disk: target_file = NamedTemporaryFile(delete=False) load_file = target_file.name else: target_file = BytesIO() load_file = target_file try: arr = args save_func(target_file, *arr, **save_kwds) target_file.flush() target_file.seek(0) if sys.platform == 'win32' and not isinstance(target_file, BytesIO): target_file.close() arr_reloaded = np.load(load_file, **load_kwds) self.arr = arr self.arr_reloaded = arr_reloaded finally: if not isinstance(target_file, BytesIO): target_file.close() # holds an open file descriptor so it can't be deleted on win if not isinstance(arr_reloaded, np.lib.npyio.NpzFile): os.remove(target_file.name) def check_roundtrips(self, a): self.roundtrip(a) self.roundtrip(a, file_on_disk=True) self.roundtrip(np.asfortranarray(a)) self.roundtrip(np.asfortranarray(a), file_on_disk=True) if a.shape[0] > 1: # neither C nor Fortran contiguous for 2D arrays or more self.roundtrip(np.asfortranarray(a)[1:]) self.roundtrip(np.asfortranarray(a)[1:], file_on_disk=True) def test_array(self): a = np.array([], float) self.check_roundtrips(a) a = np.array([[1, 2], [3, 4]], float) self.check_roundtrips(a) a = np.array([[1, 2], [3, 4]], int) self.check_roundtrips(a) a = np.array([[1 + 5j, 2 + 6j], [3 + 7j, 4 + 8j]], dtype=np.csingle) self.check_roundtrips(a) a = np.array([[1 + 5j, 2 + 6j], [3 + 7j, 4 + 8j]], dtype=np.cdouble) self.check_roundtrips(a) def test_array_object(self): if sys.version_info[:2] >= (2, 7): a = np.array([], object) self.check_roundtrips(a) a = np.array([[1, 2], [3, 4]], object) self.check_roundtrips(a) # Fails with UnpicklingError: could not find MARK on Python 2.6 def test_1D(self): a = np.array([1, 2, 3, 4], int) self.roundtrip(a) @np.testing.dec.knownfailureif(sys.platform == 'win32', "Fail on Win32") def test_mmap(self): a = np.array([[1, 2.5], [4, 7.3]]) self.roundtrip(a, file_on_disk=True, load_kwds={'mmap_mode': 'r'}) a = np.asfortranarray([[1, 2.5], [4, 7.3]]) self.roundtrip(a, file_on_disk=True, load_kwds={'mmap_mode': 'r'}) def test_record(self): a = np.array([(1, 2), (3, 4)], dtype=[('x', 'i4'), ('y', 'i4')]) self.check_roundtrips(a) def test_format_2_0(self): dt = [(("%d" % i) * 100, float) for i in range(500)] a = np.ones(1000, dtype=dt) with warnings.catch_warnings(record=True): warnings.filterwarnings('always', '', UserWarning) self.check_roundtrips(a) class TestSaveLoad(RoundtripTest, TestCase): def roundtrip(self, *args, **kwargs): RoundtripTest.roundtrip(self, np.save, *args, **kwargs) assert_equal(self.arr[0], self.arr_reloaded) assert_equal(self.arr[0].dtype, self.arr_reloaded.dtype) assert_equal(self.arr[0].flags.fnc, self.arr_reloaded.flags.fnc) class TestSavezLoad(RoundtripTest, TestCase): def roundtrip(self, *args, **kwargs): RoundtripTest.roundtrip(self, np.savez, *args, **kwargs) try: for n, arr in enumerate(self.arr): reloaded = self.arr_reloaded['arr_%d' % n] assert_equal(arr, reloaded) assert_equal(arr.dtype, reloaded.dtype) assert_equal(arr.flags.fnc, reloaded.flags.fnc) finally: # delete tempfile, must be done here on windows if self.arr_reloaded.fid: self.arr_reloaded.fid.close() os.remove(self.arr_reloaded.fid.name) @np.testing.dec.skipif(not IS_64BIT, "Works only with 64bit systems") @np.testing.dec.slow def test_big_arrays(self): L = (1 << 31) + 100000 a = np.empty(L, dtype=np.uint8) with temppath(prefix="numpy_test_big_arrays_", suffix=".npz") as tmp: np.savez(tmp, a=a) del a npfile = np.load(tmp) a = npfile['a'] # Should succeed npfile.close() del a # Avoid pyflakes unused variable warning. def test_multiple_arrays(self): a = np.array([[1, 2], [3, 4]], float) b = np.array([[1 + 2j, 2 + 7j], [3 - 6j, 4 + 12j]], complex) self.roundtrip(a, b) def test_named_arrays(self): a = np.array([[1, 2], [3, 4]], float) b = np.array([[1 + 2j, 2 + 7j], [3 - 6j, 4 + 12j]], complex) c = BytesIO() np.savez(c, file_a=a, file_b=b) c.seek(0) l = np.load(c) assert_equal(a, l['file_a']) assert_equal(b, l['file_b']) def test_BagObj(self): a = np.array([[1, 2], [3, 4]], float) b = np.array([[1 + 2j, 2 + 7j], [3 - 6j, 4 + 12j]], complex) c = BytesIO() np.savez(c, file_a=a, file_b=b) c.seek(0) l = np.load(c) assert_equal(sorted(dir(l.f)), ['file_a','file_b']) assert_equal(a, l.f.file_a) assert_equal(b, l.f.file_b) def test_savez_filename_clashes(self): # Test that issue #852 is fixed # and savez functions in multithreaded environment def writer(error_list): with temppath(suffix='.npz') as tmp: arr = np.random.randn(500, 500) try: np.savez(tmp, arr=arr) except OSError as err: error_list.append(err) errors = [] threads = [threading.Thread(target=writer, args=(errors,)) for j in range(3)] for t in threads: t.start() for t in threads: t.join() if errors: raise AssertionError(errors) def test_not_closing_opened_fid(self): # Test that issue #2178 is fixed: # verify could seek on 'loaded' file with temppath(suffix='.npz') as tmp: with open(tmp, 'wb') as fp: np.savez(fp, data='LOVELY LOAD') with open(tmp, 'rb', 10000) as fp: fp.seek(0) assert_(not fp.closed) np.load(fp)['data'] # fp must not get closed by .load assert_(not fp.closed) fp.seek(0) assert_(not fp.closed) def test_closing_fid(self): # Test that issue #1517 (too many opened files) remains closed # It might be a "weak" test since failed to get triggered on # e.g. Debian sid of 2012 Jul 05 but was reported to # trigger the failure on Ubuntu 10.04: # http://projects.scipy.org/numpy/ticket/1517#comment:2 with temppath(suffix='.npz') as tmp: np.savez(tmp, data='LOVELY LOAD') # We need to check if the garbage collector can properly close # numpy npz file returned by np.load when their reference count # goes to zero. Python 3 running in debug mode raises a # ResourceWarning when file closing is left to the garbage # collector, so we catch the warnings. Because ResourceWarning # is unknown in Python < 3.x, we take the easy way out and # catch all warnings. with warnings.catch_warnings(): warnings.simplefilter("ignore") for i in range(1, 1025): try: np.load(tmp)["data"] except Exception as e: msg = "Failed to load data from a file: %s" % e raise AssertionError(msg) def test_closing_zipfile_after_load(self): # Check that zipfile owns file and can close it. This needs to # pass a file name to load for the test. On windows failure will # cause a second error will be raised when the attempt to remove # the open file is made. prefix = 'numpy_test_closing_zipfile_after_load_' with temppath(suffix='.npz', prefix=prefix) as tmp: np.savez(tmp, lab='place holder') data = np.load(tmp) fp = data.zip.fp data.close() assert_(fp.closed) class TestSaveTxt(TestCase): def test_array(self): a = np.array([[1, 2], [3, 4]], float) fmt = "%.18e" c = BytesIO() np.savetxt(c, a, fmt=fmt) c.seek(0) assert_equal(c.readlines(), [asbytes((fmt + ' ' + fmt + '\n') % (1, 2)), asbytes((fmt + ' ' + fmt + '\n') % (3, 4))]) a = np.array([[1, 2], [3, 4]], int) c = BytesIO() np.savetxt(c, a, fmt='%d') c.seek(0) assert_equal(c.readlines(), [b'1 2\n', b'3 4\n']) def test_1D(self): a = np.array([1, 2, 3, 4], int) c = BytesIO() np.savetxt(c, a, fmt='%d') c.seek(0) lines = c.readlines() assert_equal(lines, [b'1\n', b'2\n', b'3\n', b'4\n']) def test_record(self): a = np.array([(1, 2), (3, 4)], dtype=[('x', 'i4'), ('y', 'i4')]) c = BytesIO() np.savetxt(c, a, fmt='%d') c.seek(0) assert_equal(c.readlines(), [b'1 2\n', b'3 4\n']) def test_delimiter(self): a = np.array([[1., 2.], [3., 4.]]) c = BytesIO() np.savetxt(c, a, delimiter=',', fmt='%d') c.seek(0) assert_equal(c.readlines(), [b'1,2\n', b'3,4\n']) def test_format(self): a = np.array([(1, 2), (3, 4)]) c = BytesIO() # Sequence of formats np.savetxt(c, a, fmt=['%02d', '%3.1f']) c.seek(0) assert_equal(c.readlines(), [b'01 2.0\n', b'03 4.0\n']) # A single multiformat string c = BytesIO() np.savetxt(c, a, fmt='%02d : %3.1f') c.seek(0) lines = c.readlines() assert_equal(lines, [b'01 : 2.0\n', b'03 : 4.0\n']) # Specify delimiter, should be overiden c = BytesIO() np.savetxt(c, a, fmt='%02d : %3.1f', delimiter=',') c.seek(0) lines = c.readlines() assert_equal(lines, [b'01 : 2.0\n', b'03 : 4.0\n']) # Bad fmt, should raise a ValueError c = BytesIO() assert_raises(ValueError, np.savetxt, c, a, fmt=99) def test_header_footer(self): # Test the functionality of the header and footer keyword argument. c = BytesIO() a = np.array([(1, 2), (3, 4)], dtype=np.int) test_header_footer = 'Test header / footer' # Test the header keyword argument np.savetxt(c, a, fmt='%1d', header=test_header_footer) c.seek(0) assert_equal(c.read(), asbytes('# ' + test_header_footer + '\n1 2\n3 4\n')) # Test the footer keyword argument c = BytesIO() np.savetxt(c, a, fmt='%1d', footer=test_header_footer) c.seek(0) assert_equal(c.read(), asbytes('1 2\n3 4\n# ' + test_header_footer + '\n')) # Test the commentstr keyword argument used on the header c = BytesIO() commentstr = '% ' np.savetxt(c, a, fmt='%1d', header=test_header_footer, comments=commentstr) c.seek(0) assert_equal(c.read(), asbytes(commentstr + test_header_footer + '\n' + '1 2\n3 4\n')) # Test the commentstr keyword argument used on the footer c = BytesIO() commentstr = '% ' np.savetxt(c, a, fmt='%1d', footer=test_header_footer, comments=commentstr) c.seek(0) assert_equal(c.read(), asbytes('1 2\n3 4\n' + commentstr + test_header_footer + '\n')) def test_file_roundtrip(self): with temppath() as name: a = np.array([(1, 2), (3, 4)]) np.savetxt(name, a) b = np.loadtxt(name) assert_array_equal(a, b) def test_complex_arrays(self): ncols = 2 nrows = 2 a = np.zeros((ncols, nrows), dtype=np.complex128) re = np.pi im = np.e a[:] = re + 1.0j * im # One format only c = BytesIO() np.savetxt(c, a, fmt=' %+.3e') c.seek(0) lines = c.readlines() assert_equal( lines, [b' ( +3.142e+00+ +2.718e+00j) ( +3.142e+00+ +2.718e+00j)\n', b' ( +3.142e+00+ +2.718e+00j) ( +3.142e+00+ +2.718e+00j)\n']) # One format for each real and imaginary part c = BytesIO() np.savetxt(c, a, fmt=' %+.3e' * 2 * ncols) c.seek(0) lines = c.readlines() assert_equal( lines, [b' +3.142e+00 +2.718e+00 +3.142e+00 +2.718e+00\n', b' +3.142e+00 +2.718e+00 +3.142e+00 +2.718e+00\n']) # One format for each complex number c = BytesIO() np.savetxt(c, a, fmt=['(%.3e%+.3ej)'] * ncols) c.seek(0) lines = c.readlines() assert_equal( lines, [b'(3.142e+00+2.718e+00j) (3.142e+00+2.718e+00j)\n', b'(3.142e+00+2.718e+00j) (3.142e+00+2.718e+00j)\n']) def test_custom_writer(self): class CustomWriter(list): def write(self, text): self.extend(text.split(b'\n')) w = CustomWriter() a = np.array([(1, 2), (3, 4)]) np.savetxt(w, a) b = np.loadtxt(w) assert_array_equal(a, b) class TestLoadTxt(TestCase): def test_record(self): c = TextIO() c.write('1 2\n3 4') c.seek(0) x = np.loadtxt(c, dtype=[('x', np.int32), ('y', np.int32)]) a = np.array([(1, 2), (3, 4)], dtype=[('x', 'i4'), ('y', 'i4')]) assert_array_equal(x, a) d = TextIO() d.write('M 64.0 75.0\nF 25.0 60.0') d.seek(0) mydescriptor = {'names': ('gender', 'age', 'weight'), 'formats': ('S1', 'i4', 'f4')} b = np.array([('M', 64.0, 75.0), ('F', 25.0, 60.0)], dtype=mydescriptor) y = np.loadtxt(d, dtype=mydescriptor) assert_array_equal(y, b) def test_array(self): c = TextIO() c.write('1 2\n3 4') c.seek(0) x = np.loadtxt(c, dtype=np.int) a = np.array([[1, 2], [3, 4]], int) assert_array_equal(x, a) c.seek(0) x = np.loadtxt(c, dtype=float) a = np.array([[1, 2], [3, 4]], float) assert_array_equal(x, a) def test_1D(self): c = TextIO() c.write('1\n2\n3\n4\n') c.seek(0) x = np.loadtxt(c, dtype=int) a = np.array([1, 2, 3, 4], int) assert_array_equal(x, a) c = TextIO() c.write('1,2,3,4\n') c.seek(0) x = np.loadtxt(c, dtype=int, delimiter=',') a = np.array([1, 2, 3, 4], int) assert_array_equal(x, a) def test_missing(self): c = TextIO() c.write('1,2,3,,5\n') c.seek(0) x = np.loadtxt(c, dtype=int, delimiter=',', converters={3: lambda s: int(s or - 999)}) a = np.array([1, 2, 3, -999, 5], int) assert_array_equal(x, a) def test_converters_with_usecols(self): c = TextIO() c.write('1,2,3,,5\n6,7,8,9,10\n') c.seek(0) x = np.loadtxt(c, dtype=int, delimiter=',', converters={3: lambda s: int(s or - 999)}, usecols=(1, 3,)) a = np.array([[2, -999], [7, 9]], int) assert_array_equal(x, a) def test_comments_unicode(self): c = TextIO() c.write('# comment\n1,2,3,5\n') c.seek(0) x = np.loadtxt(c, dtype=int, delimiter=',', comments=unicode('#')) a = np.array([1, 2, 3, 5], int) assert_array_equal(x, a) def test_comments_byte(self): c = TextIO() c.write('# comment\n1,2,3,5\n') c.seek(0) x = np.loadtxt(c, dtype=int, delimiter=',', comments=b'#') a = np.array([1, 2, 3, 5], int) assert_array_equal(x, a) def test_comments_multiple(self): c = TextIO() c.write('# comment\n1,2,3\n@ comment2\n4,5,6 // comment3') c.seek(0) x = np.loadtxt(c, dtype=int, delimiter=',', comments=['#', '@', '//']) a = np.array([[1, 2, 3], [4, 5, 6]], int) assert_array_equal(x, a) def test_comments_multi_chars(self): c = TextIO() c.write('/* comment\n1,2,3,5\n') c.seek(0) x = np.loadtxt(c, dtype=int, delimiter=',', comments='/*') a = np.array([1, 2, 3, 5], int) assert_array_equal(x, a) # Check that '/*' is not transformed to ['/', '*'] c = TextIO() c.write('*/ comment\n1,2,3,5\n') c.seek(0) assert_raises(ValueError, np.loadtxt, c, dtype=int, delimiter=',', comments='/*') def test_skiprows(self): c = TextIO() c.write('comment\n1,2,3,5\n') c.seek(0) x = np.loadtxt(c, dtype=int, delimiter=',', skiprows=1) a = np.array([1, 2, 3, 5], int) assert_array_equal(x, a) c = TextIO() c.write('# comment\n1,2,3,5\n') c.seek(0) x = np.loadtxt(c, dtype=int, delimiter=',', skiprows=1) a = np.array([1, 2, 3, 5], int) assert_array_equal(x, a) def test_usecols(self): a = np.array([[1, 2], [3, 4]], float) c = BytesIO() np.savetxt(c, a) c.seek(0) x = np.loadtxt(c, dtype=float, usecols=(1,)) assert_array_equal(x, a[:, 1]) a = np.array([[1, 2, 3], [3, 4, 5]], float) c = BytesIO() np.savetxt(c, a) c.seek(0) x = np.loadtxt(c, dtype=float, usecols=(1, 2)) assert_array_equal(x, a[:, 1:]) # Testing with arrays instead of tuples. c.seek(0) x = np.loadtxt(c, dtype=float, usecols=np.array([1, 2])) assert_array_equal(x, a[:, 1:]) # Checking with dtypes defined converters. data = '''JOE 70.1 25.3 BOB 60.5 27.9 ''' c = TextIO(data) names = ['stid', 'temp'] dtypes = ['S4', 'f8'] arr = np.loadtxt(c, usecols=(0, 2), dtype=list(zip(names, dtypes))) assert_equal(arr['stid'], [b"JOE", b"BOB"]) assert_equal(arr['temp'], [25.3, 27.9]) def test_fancy_dtype(self): c = TextIO() c.write('1,2,3.0\n4,5,6.0\n') c.seek(0) dt = np.dtype([('x', int), ('y', [('t', int), ('s', float)])]) x = np.loadtxt(c, dtype=dt, delimiter=',') a = np.array([(1, (2, 3.0)), (4, (5, 6.0))], dt) assert_array_equal(x, a) def test_shaped_dtype(self): c = TextIO("aaaa 1.0 8.0 1 2 3 4 5 6") dt = np.dtype([('name', 'S4'), ('x', float), ('y', float), ('block', int, (2, 3))]) x = np.loadtxt(c, dtype=dt) a = np.array([('aaaa', 1.0, 8.0, [[1, 2, 3], [4, 5, 6]])], dtype=dt) assert_array_equal(x, a) def test_3d_shaped_dtype(self): c = TextIO("aaaa 1.0 8.0 1 2 3 4 5 6 7 8 9 10 11 12") dt = np.dtype([('name', 'S4'), ('x', float), ('y', float), ('block', int, (2, 2, 3))]) x = np.loadtxt(c, dtype=dt) a = np.array([('aaaa', 1.0, 8.0, [[[1, 2, 3], [4, 5, 6]], [[7, 8, 9], [10, 11, 12]]])], dtype=dt) assert_array_equal(x, a) def test_empty_file(self): with warnings.catch_warnings(): warnings.filterwarnings("ignore", message="loadtxt: Empty input file:") c = TextIO() x = np.loadtxt(c) assert_equal(x.shape, (0,)) x = np.loadtxt(c, dtype=np.int64) assert_equal(x.shape, (0,)) assert_(x.dtype == np.int64) def test_unused_converter(self): c = TextIO() c.writelines(['1 21\n', '3 42\n']) c.seek(0) data = np.loadtxt(c, usecols=(1,), converters={0: lambda s: int(s, 16)}) assert_array_equal(data, [21, 42]) c.seek(0) data = np.loadtxt(c, usecols=(1,), converters={1: lambda s: int(s, 16)}) assert_array_equal(data, [33, 66]) def test_dtype_with_object(self): # Test using an explicit dtype with an object data = """ 1; 2001-01-01 2; 2002-01-31 """ ndtype = [('idx', int), ('code', np.object)] func = lambda s: strptime(s.strip(), "%Y-%m-%d") converters = {1: func} test = np.loadtxt(TextIO(data), delimiter=";", dtype=ndtype, converters=converters) control = np.array( [(1, datetime(2001, 1, 1)), (2, datetime(2002, 1, 31))], dtype=ndtype) assert_equal(test, control) def test_uint64_type(self): tgt = (9223372043271415339, 9223372043271415853) c = TextIO() c.write("%s %s" % tgt) c.seek(0) res = np.loadtxt(c, dtype=np.uint64) assert_equal(res, tgt) def test_int64_type(self): tgt = (-9223372036854775807, 9223372036854775807) c = TextIO() c.write("%s %s" % tgt) c.seek(0) res = np.loadtxt(c, dtype=np.int64) assert_equal(res, tgt) def test_from_float_hex(self): # IEEE doubles and floats only, otherwise the float32 # conversion may fail. tgt = np.logspace(-10, 10, 5).astype(np.float32) tgt = np.hstack((tgt, -tgt)).astype(np.float) inp = '\n'.join(map(float.hex, tgt)) c = TextIO() c.write(inp) for dt in [np.float, np.float32]: c.seek(0) res = np.loadtxt(c, dtype=dt) assert_equal(res, tgt, err_msg="%s" % dt) def test_from_complex(self): tgt = (complex(1, 1), complex(1, -1)) c = TextIO() c.write("%s %s" % tgt) c.seek(0) res = np.loadtxt(c, dtype=np.complex) assert_equal(res, tgt) def test_universal_newline(self): with temppath() as name: with open(name, 'w') as f: f.write('1 21\r3 42\r') data = np.loadtxt(name) assert_array_equal(data, [[1, 21], [3, 42]]) def test_empty_field_after_tab(self): c = TextIO() c.write('1 \t2 \t3\tstart \n4\t5\t6\t \n7\t8\t9.5\t') c.seek(0) dt = {'names': ('x', 'y', 'z', 'comment'), 'formats': ('<i4', '<i4', '<f4', '|S8')} x = np.loadtxt(c, dtype=dt, delimiter='\t') a = np.array([b'start ', b' ', b'']) assert_array_equal(x['comment'], a) def test_structure_unpack(self): txt = TextIO("M 21 72\nF 35 58") dt = {'names': ('a', 'b', 'c'), 'formats': ('|S1', '<i4', '<f4')} a, b, c = np.loadtxt(txt, dtype=dt, unpack=True) assert_(a.dtype.str == '|S1') assert_(b.dtype.str == '<i4') assert_(c.dtype.str == '<f4') assert_array_equal(a, np.array([b'M', b'F'])) assert_array_equal(b, np.array([21, 35])) assert_array_equal(c, np.array([72., 58.])) def test_ndmin_keyword(self): c = TextIO() c.write('1,2,3\n4,5,6') c.seek(0) assert_raises(ValueError, np.loadtxt, c, ndmin=3) c.seek(0) assert_raises(ValueError, np.loadtxt, c, ndmin=1.5) c.seek(0) x = np.loadtxt(c, dtype=int, delimiter=',', ndmin=1) a = np.array([[1, 2, 3], [4, 5, 6]]) assert_array_equal(x, a) d = TextIO() d.write('0,1,2') d.seek(0) x = np.loadtxt(d, dtype=int, delimiter=',', ndmin=2) assert_(x.shape == (1, 3)) d.seek(0) x = np.loadtxt(d, dtype=int, delimiter=',', ndmin=1) assert_(x.shape == (3,)) d.seek(0) x = np.loadtxt(d, dtype=int, delimiter=',', ndmin=0) assert_(x.shape == (3,)) e = TextIO() e.write('0\n1\n2') e.seek(0) x = np.loadtxt(e, dtype=int, delimiter=',', ndmin=2) assert_(x.shape == (3, 1)) e.seek(0) x = np.loadtxt(e, dtype=int, delimiter=',', ndmin=1) assert_(x.shape == (3,)) e.seek(0) x = np.loadtxt(e, dtype=int, delimiter=',', ndmin=0) assert_(x.shape == (3,)) # Test ndmin kw with empty file. with warnings.catch_warnings(): warnings.filterwarnings("ignore", message="loadtxt: Empty input file:") f = TextIO() assert_(np.loadtxt(f, ndmin=2).shape == (0, 1,)) assert_(np.loadtxt(f, ndmin=1).shape == (0,)) def test_generator_source(self): def count(): for i in range(10): yield "%d" % i res = np.loadtxt(count()) assert_array_equal(res, np.arange(10)) def test_bad_line(self): c = TextIO() c.write('1 2 3\n4 5 6\n2 3') c.seek(0) # Check for exception and that exception contains line number assert_raises_regex(ValueError, "3", np.loadtxt, c) def test_none_as_string(self): # gh-5155, None should work as string when format demands it c = TextIO() c.write('100,foo,200\n300,None,400') c.seek(0) dt = np.dtype([('x', int), ('a', 'S10'), ('y', int)]) np.loadtxt(c, delimiter=',', dtype=dt, comments=None) # Should succeed class Testfromregex(TestCase): # np.fromregex expects files opened in binary mode. def test_record(self): c = TextIO() c.write('1.312 foo\n1.534 bar\n4.444 qux') c.seek(0) dt = [('num', np.float64), ('val', 'S3')] x = np.fromregex(c, r"([0-9.]+)\s+(...)", dt) a = np.array([(1.312, 'foo'), (1.534, 'bar'), (4.444, 'qux')], dtype=dt) assert_array_equal(x, a) def test_record_2(self): c = TextIO() c.write('1312 foo\n1534 bar\n4444 qux') c.seek(0) dt = [('num', np.int32), ('val', 'S3')] x = np.fromregex(c, r"(\d+)\s+(...)", dt) a = np.array([(1312, 'foo'), (1534, 'bar'), (4444, 'qux')], dtype=dt) assert_array_equal(x, a) def test_record_3(self): c = TextIO() c.write('1312 foo\n1534 bar\n4444 qux') c.seek(0) dt = [('num', np.float64)] x = np.fromregex(c, r"(\d+)\s+...", dt) a = np.array([(1312,), (1534,), (4444,)], dtype=dt) assert_array_equal(x, a) #####-------------------------------------------------------------------------- class TestFromTxt(TestCase): # def test_record(self): # Test w/ explicit dtype data = TextIO('1 2\n3 4') test = np.ndfromtxt(data, dtype=[('x', np.int32), ('y', np.int32)]) control = np.array([(1, 2), (3, 4)], dtype=[('x', 'i4'), ('y', 'i4')]) assert_equal(test, control) # data = TextIO('M 64.0 75.0\nF 25.0 60.0') descriptor = {'names': ('gender', 'age', 'weight'), 'formats': ('S1', 'i4', 'f4')} control = np.array([('M', 64.0, 75.0), ('F', 25.0, 60.0)], dtype=descriptor) test = np.ndfromtxt(data, dtype=descriptor) assert_equal(test, control) def test_array(self): # Test outputing a standard ndarray data = TextIO('1 2\n3 4') control = np.array([[1, 2], [3, 4]], dtype=int) test = np.ndfromtxt(data, dtype=int) assert_array_equal(test, control) # data.seek(0) control = np.array([[1, 2], [3, 4]], dtype=float) test = np.loadtxt(data, dtype=float) assert_array_equal(test, control) def test_1D(self): # Test squeezing to 1D control = np.array([1, 2, 3, 4], int) # data = TextIO('1\n2\n3\n4\n') test = np.ndfromtxt(data, dtype=int) assert_array_equal(test, control) # data = TextIO('1,2,3,4\n') test = np.ndfromtxt(data, dtype=int, delimiter=',') assert_array_equal(test, control) def test_comments(self): # Test the stripping of comments control = np.array([1, 2, 3, 5], int) # Comment on its own line data = TextIO('# comment\n1,2,3,5\n') test = np.ndfromtxt(data, dtype=int, delimiter=',', comments='#') assert_equal(test, control) # Comment at the end of a line data = TextIO('1,2,3,5# comment\n') test = np.ndfromtxt(data, dtype=int, delimiter=',', comments='#') assert_equal(test, control) def test_skiprows(self): # Test row skipping control = np.array([1, 2, 3, 5], int) kwargs = dict(dtype=int, delimiter=',') # data = TextIO('comment\n1,2,3,5\n') test = np.ndfromtxt(data, skip_header=1, **kwargs) assert_equal(test, control) # data = TextIO('# comment\n1,2,3,5\n') test = np.loadtxt(data, skiprows=1, **kwargs) assert_equal(test, control) def test_skip_footer(self): data = ["# %i" % i for i in range(1, 6)] data.append("A, B, C") data.extend(["%i,%3.1f,%03s" % (i, i, i) for i in range(51)]) data[-1] = "99,99" kwargs = dict(delimiter=",", names=True, skip_header=5, skip_footer=10) test = np.genfromtxt(TextIO("\n".join(data)), **kwargs) ctrl = np.array([("%f" % i, "%f" % i, "%f" % i) for i in range(41)], dtype=[(_, float) for _ in "ABC"]) assert_equal(test, ctrl) def test_skip_footer_with_invalid(self): with warnings.catch_warnings(): warnings.filterwarnings("ignore") basestr = '1 1\n2 2\n3 3\n4 4\n5 \n6 \n7 \n' # Footer too small to get rid of all invalid values assert_raises(ValueError, np.genfromtxt, TextIO(basestr), skip_footer=1) # except ValueError: # pass a = np.genfromtxt( TextIO(basestr), skip_footer=1, invalid_raise=False) assert_equal(a, np.array([[1., 1.], [2., 2.], [3., 3.], [4., 4.]])) # a = np.genfromtxt(TextIO(basestr), skip_footer=3) assert_equal(a, np.array([[1., 1.], [2., 2.], [3., 3.], [4., 4.]])) # basestr = '1 1\n2 \n3 3\n4 4\n5 \n6 6\n7 7\n' a = np.genfromtxt( TextIO(basestr), skip_footer=1, invalid_raise=False) assert_equal(a, np.array([[1., 1.], [3., 3.], [4., 4.], [6., 6.]])) a = np.genfromtxt( TextIO(basestr), skip_footer=3, invalid_raise=False) assert_equal(a, np.array([[1., 1.], [3., 3.], [4., 4.]])) def test_header(self): # Test retrieving a header data = TextIO('gender age weight\nM 64.0 75.0\nF 25.0 60.0') test = np.ndfromtxt(data, dtype=None, names=True) control = {'gender': np.array([b'M', b'F']), 'age': np.array([64.0, 25.0]), 'weight': np.array([75.0, 60.0])} assert_equal(test['gender'], control['gender']) assert_equal(test['age'], control['age']) assert_equal(test['weight'], control['weight']) def test_auto_dtype(self): # Test the automatic definition of the output dtype data = TextIO('A 64 75.0 3+4j True\nBCD 25 60.0 5+6j False') test = np.ndfromtxt(data, dtype=None) control = [np.array([b'A', b'BCD']), np.array([64, 25]), np.array([75.0, 60.0]), np.array([3 + 4j, 5 + 6j]), np.array([True, False]), ] assert_equal(test.dtype.names, ['f0', 'f1', 'f2', 'f3', 'f4']) for (i, ctrl) in enumerate(control): assert_equal(test['f%i' % i], ctrl) def test_auto_dtype_uniform(self): # Tests whether the output dtype can be uniformized data = TextIO('1 2 3 4\n5 6 7 8\n') test = np.ndfromtxt(data, dtype=None) control = np.array([[1, 2, 3, 4], [5, 6, 7, 8]]) assert_equal(test, control) def test_fancy_dtype(self): # Check that a nested dtype isn't MIA data = TextIO('1,2,3.0\n4,5,6.0\n') fancydtype = np.dtype([('x', int), ('y', [('t', int), ('s', float)])]) test = np.ndfromtxt(data, dtype=fancydtype, delimiter=',') control = np.array([(1, (2, 3.0)), (4, (5, 6.0))], dtype=fancydtype) assert_equal(test, control) def test_names_overwrite(self): # Test overwriting the names of the dtype descriptor = {'names': ('g', 'a', 'w'), 'formats': ('S1', 'i4', 'f4')} data = TextIO(b'M 64.0 75.0\nF 25.0 60.0') names = ('gender', 'age', 'weight') test = np.ndfromtxt(data, dtype=descriptor, names=names) descriptor['names'] = names control = np.array([('M', 64.0, 75.0), ('F', 25.0, 60.0)], dtype=descriptor) assert_equal(test, control) def test_commented_header(self): # Check that names can be retrieved even if the line is commented out. data = TextIO(""" #gender age weight M 21 72.100000 F 35 58.330000 M 33 21.99 """) # The # is part of the first name and should be deleted automatically. test = np.genfromtxt(data, names=True, dtype=None) ctrl = np.array([('M', 21, 72.1), ('F', 35, 58.33), ('M', 33, 21.99)], dtype=[('gender', '|S1'), ('age', int), ('weight', float)]) assert_equal(test, ctrl) # Ditto, but we should get rid of the first element data = TextIO(b""" # gender age weight M 21 72.100000 F 35 58.330000 M 33 21.99 """) test = np.genfromtxt(data, names=True, dtype=None) assert_equal(test, ctrl) def test_autonames_and_usecols(self): # Tests names and usecols data = TextIO('A B C D\n aaaa 121 45 9.1') test = np.ndfromtxt(data, usecols=('A', 'C', 'D'), names=True, dtype=None) control = np.array(('aaaa', 45, 9.1), dtype=[('A', '|S4'), ('C', int), ('D', float)]) assert_equal(test, control) def test_converters_with_usecols(self): # Test the combination user-defined converters and usecol data = TextIO('1,2,3,,5\n6,7,8,9,10\n') test = np.ndfromtxt(data, dtype=int, delimiter=',', converters={3: lambda s: int(s or - 999)}, usecols=(1, 3,)) control = np.array([[2, -999], [7, 9]], int) assert_equal(test, control) def test_converters_with_usecols_and_names(self): # Tests names and usecols data = TextIO('A B C D\n aaaa 121 45 9.1') test = np.ndfromtxt(data, usecols=('A', 'C', 'D'), names=True, dtype=None, converters={'C': lambda s: 2 * int(s)}) control = np.array(('aaaa', 90, 9.1), dtype=[('A', '|S4'), ('C', int), ('D', float)]) assert_equal(test, control) def test_converters_cornercases(self): # Test the conversion to datetime. converter = { 'date': lambda s: strptime(s, '%Y-%m-%d %H:%M:%SZ')} data = TextIO('2009-02-03 12:00:00Z, 72214.0') test = np.ndfromtxt(data, delimiter=',', dtype=None, names=['date', 'stid'], converters=converter) control = np.array((datetime(2009, 2, 3), 72214.), dtype=[('date', np.object_), ('stid', float)]) assert_equal(test, control) def test_converters_cornercases2(self): # Test the conversion to datetime64. converter = { 'date': lambda s: np.datetime64(strptime(s, '%Y-%m-%d %H:%M:%SZ'))} data = TextIO('2009-02-03 12:00:00Z, 72214.0') test = np.ndfromtxt(data, delimiter=',', dtype=None, names=['date', 'stid'], converters=converter) control = np.array((datetime(2009, 2, 3), 72214.), dtype=[('date', 'datetime64[us]'), ('stid', float)]) assert_equal(test, control) def test_unused_converter(self): # Test whether unused converters are forgotten data = TextIO("1 21\n 3 42\n") test = np.ndfromtxt(data, usecols=(1,), converters={0: lambda s: int(s, 16)}) assert_equal(test, [21, 42]) # data.seek(0) test = np.ndfromtxt(data, usecols=(1,), converters={1: lambda s: int(s, 16)}) assert_equal(test, [33, 66]) def test_invalid_converter(self): strip_rand = lambda x: float((b'r' in x.lower() and x.split()[-1]) or (b'r' not in x.lower() and x.strip() or 0.0)) strip_per = lambda x: float((b'%' in x.lower() and x.split()[0]) or (b'%' not in x.lower() and x.strip() or 0.0)) s = TextIO("D01N01,10/1/2003 ,1 %,R 75,400,600\r\n" "L24U05,12/5/2003, 2 %,1,300, 150.5\r\n" "D02N03,10/10/2004,R 1,,7,145.55") kwargs = dict( converters={2: strip_per, 3: strip_rand}, delimiter=",", dtype=None) assert_raises(ConverterError, np.genfromtxt, s, **kwargs) def test_tricky_converter_bug1666(self): # Test some corner cases s = TextIO('q1,2\nq3,4') cnv = lambda s: float(s[1:]) test = np.genfromtxt(s, delimiter=',', converters={0: cnv}) control = np.array([[1., 2.], [3., 4.]]) assert_equal(test, control) def test_dtype_with_converters(self): dstr = "2009; 23; 46" test = np.ndfromtxt(TextIO(dstr,), delimiter=";", dtype=float, converters={0: bytes}) control = np.array([('2009', 23., 46)], dtype=[('f0', '|S4'), ('f1', float), ('f2', float)]) assert_equal(test, control) test = np.ndfromtxt(TextIO(dstr,), delimiter=";", dtype=float, converters={0: float}) control = np.array([2009., 23., 46],) assert_equal(test, control) def test_dtype_with_converters_and_usecols(self): dstr = "1,5,-1,1:1\n2,8,-1,1:n\n3,3,-2,m:n\n" dmap = {'1:1':0, '1:n':1, 'm:1':2, 'm:n':3} dtyp = [('e1','i4'),('e2','i4'),('e3','i2'),('n', 'i1')] conv = {0: int, 1: int, 2: int, 3: lambda r: dmap[r.decode()]} test = np.recfromcsv(TextIO(dstr,), dtype=dtyp, delimiter=',', names=None, converters=conv) control = np.rec.array([[1,5,-1,0], [2,8,-1,1], [3,3,-2,3]], dtype=dtyp) assert_equal(test, control) dtyp = [('e1','i4'),('e2','i4'),('n', 'i1')] test = np.recfromcsv(TextIO(dstr,), dtype=dtyp, delimiter=',', usecols=(0,1,3), names=None, converters=conv) control = np.rec.array([[1,5,0], [2,8,1], [3,3,3]], dtype=dtyp) assert_equal(test, control) def test_dtype_with_object(self): # Test using an explicit dtype with an object data = """ 1; 2001-01-01 2; 2002-01-31 """ ndtype = [('idx', int), ('code', np.object)] func = lambda s: strptime(s.strip(), "%Y-%m-%d") converters = {1: func} test = np.genfromtxt(TextIO(data), delimiter=";", dtype=ndtype, converters=converters) control = np.array( [(1, datetime(2001, 1, 1)), (2, datetime(2002, 1, 31))], dtype=ndtype) assert_equal(test, control) ndtype = [('nest', [('idx', int), ('code', np.object)])] try: test = np.genfromtxt(TextIO(data), delimiter=";", dtype=ndtype, converters=converters) except NotImplementedError: pass else: errmsg = "Nested dtype involving objects should be supported." raise AssertionError(errmsg) def test_userconverters_with_explicit_dtype(self): # Test user_converters w/ explicit (standard) dtype data = TextIO('skip,skip,2001-01-01,1.0,skip') test = np.genfromtxt(data, delimiter=",", names=None, dtype=float, usecols=(2, 3), converters={2: bytes}) control = np.array([('2001-01-01', 1.)], dtype=[('', '|S10'), ('', float)]) assert_equal(test, control) def test_spacedelimiter(self): # Test space delimiter data = TextIO("1 2 3 4 5\n6 7 8 9 10") test = np.ndfromtxt(data) control = np.array([[1., 2., 3., 4., 5.], [6., 7., 8., 9., 10.]]) assert_equal(test, control) def test_integer_delimiter(self): # Test using an integer for delimiter data = " 1 2 3\n 4 5 67\n890123 4" test = np.genfromtxt(TextIO(data), delimiter=3) control = np.array([[1, 2, 3], [4, 5, 67], [890, 123, 4]]) assert_equal(test, control) def test_missing(self): data = TextIO('1,2,3,,5\n') test = np.ndfromtxt(data, dtype=int, delimiter=',', converters={3: lambda s: int(s or - 999)}) control = np.array([1, 2, 3, -999, 5], int) assert_equal(test, control) def test_missing_with_tabs(self): # Test w/ a delimiter tab txt = "1\t2\t3\n\t2\t\n1\t\t3" test = np.genfromtxt(TextIO(txt), delimiter="\t", usemask=True,) ctrl_d = np.array([(1, 2, 3), (np.nan, 2, np.nan), (1, np.nan, 3)],) ctrl_m = np.array([(0, 0, 0), (1, 0, 1), (0, 1, 0)], dtype=bool) assert_equal(test.data, ctrl_d) assert_equal(test.mask, ctrl_m) def test_usecols(self): # Test the selection of columns # Select 1 column control = np.array([[1, 2], [3, 4]], float) data = TextIO() np.savetxt(data, control) data.seek(0) test = np.ndfromtxt(data, dtype=float, usecols=(1,)) assert_equal(test, control[:, 1]) # control = np.array([[1, 2, 3], [3, 4, 5]], float) data = TextIO() np.savetxt(data, control) data.seek(0) test = np.ndfromtxt(data, dtype=float, usecols=(1, 2)) assert_equal(test, control[:, 1:]) # Testing with arrays instead of tuples. data.seek(0) test = np.ndfromtxt(data, dtype=float, usecols=np.array([1, 2])) assert_equal(test, control[:, 1:]) def test_usecols_as_css(self): # Test giving usecols with a comma-separated string data = "1 2 3\n4 5 6" test = np.genfromtxt(TextIO(data), names="a, b, c", usecols="a, c") ctrl = np.array([(1, 3), (4, 6)], dtype=[(_, float) for _ in "ac"]) assert_equal(test, ctrl) def test_usecols_with_structured_dtype(self): # Test usecols with an explicit structured dtype data = TextIO("JOE 70.1 25.3\nBOB 60.5 27.9") names = ['stid', 'temp'] dtypes = ['S4', 'f8'] test = np.ndfromtxt( data, usecols=(0, 2), dtype=list(zip(names, dtypes))) assert_equal(test['stid'], [b"JOE", b"BOB"]) assert_equal(test['temp'], [25.3, 27.9]) def test_usecols_with_integer(self): # Test usecols with an integer test = np.genfromtxt(TextIO(b"1 2 3\n4 5 6"), usecols=0) assert_equal(test, np.array([1., 4.])) def test_usecols_with_named_columns(self): # Test usecols with named columns ctrl = np.array([(1, 3), (4, 6)], dtype=[('a', float), ('c', float)]) data = "1 2 3\n4 5 6" kwargs = dict(names="a, b, c") test = np.genfromtxt(TextIO(data), usecols=(0, -1), **kwargs) assert_equal(test, ctrl) test = np.genfromtxt(TextIO(data), usecols=('a', 'c'), **kwargs) assert_equal(test, ctrl) def test_empty_file(self): # Test that an empty file raises the proper warning. with warnings.catch_warnings(): warnings.filterwarnings("ignore", message="genfromtxt: Empty input file:") data = TextIO() test = np.genfromtxt(data) assert_equal(test, np.array([])) def test_fancy_dtype_alt(self): # Check that a nested dtype isn't MIA data = TextIO('1,2,3.0\n4,5,6.0\n') fancydtype = np.dtype([('x', int), ('y', [('t', int), ('s', float)])]) test = np.mafromtxt(data, dtype=fancydtype, delimiter=',') control = ma.array([(1, (2, 3.0)), (4, (5, 6.0))], dtype=fancydtype) assert_equal(test, control) def test_shaped_dtype(self): c = TextIO("aaaa 1.0 8.0 1 2 3 4 5 6") dt = np.dtype([('name', 'S4'), ('x', float), ('y', float), ('block', int, (2, 3))]) x = np.ndfromtxt(c, dtype=dt) a = np.array([('aaaa', 1.0, 8.0, [[1, 2, 3], [4, 5, 6]])], dtype=dt) assert_array_equal(x, a) def test_withmissing(self): data = TextIO('A,B\n0,1\n2,N/A') kwargs = dict(delimiter=",", missing_values="N/A", names=True) test = np.mafromtxt(data, dtype=None, **kwargs) control = ma.array([(0, 1), (2, -1)], mask=[(False, False), (False, True)], dtype=[('A', np.int), ('B', np.int)]) assert_equal(test, control) assert_equal(test.mask, control.mask) # data.seek(0) test = np.mafromtxt(data, **kwargs) control = ma.array([(0, 1), (2, -1)], mask=[(False, False), (False, True)], dtype=[('A', np.float), ('B', np.float)]) assert_equal(test, control) assert_equal(test.mask, control.mask) def test_user_missing_values(self): data = "A, B, C\n0, 0., 0j\n1, N/A, 1j\n-9, 2.2, N/A\n3, -99, 3j" basekwargs = dict(dtype=None, delimiter=",", names=True,) mdtype = [('A', int), ('B', float), ('C', complex)] # test = np.mafromtxt(TextIO(data), missing_values="N/A", **basekwargs) control = ma.array([(0, 0.0, 0j), (1, -999, 1j), (-9, 2.2, -999j), (3, -99, 3j)], mask=[(0, 0, 0), (0, 1, 0), (0, 0, 1), (0, 0, 0)], dtype=mdtype) assert_equal(test, control) # basekwargs['dtype'] = mdtype test = np.mafromtxt(TextIO(data), missing_values={0: -9, 1: -99, 2: -999j}, **basekwargs) control = ma.array([(0, 0.0, 0j), (1, -999, 1j), (-9, 2.2, -999j), (3, -99, 3j)], mask=[(0, 0, 0), (0, 1, 0), (1, 0, 1), (0, 1, 0)], dtype=mdtype) assert_equal(test, control) # test = np.mafromtxt(TextIO(data), missing_values={0: -9, 'B': -99, 'C': -999j}, **basekwargs) control = ma.array([(0, 0.0, 0j), (1, -999, 1j), (-9, 2.2, -999j), (3, -99, 3j)], mask=[(0, 0, 0), (0, 1, 0), (1, 0, 1), (0, 1, 0)], dtype=mdtype) assert_equal(test, control) def test_user_filling_values(self): # Test with missing and filling values ctrl = np.array([(0, 3), (4, -999)], dtype=[('a', int), ('b', int)]) data = "N/A, 2, 3\n4, ,???" kwargs = dict(delimiter=",", dtype=int, names="a,b,c", missing_values={0: "N/A", 'b': " ", 2: "???"}, filling_values={0: 0, 'b': 0, 2: -999}) test = np.genfromtxt(TextIO(data), **kwargs) ctrl = np.array([(0, 2, 3), (4, 0, -999)], dtype=[(_, int) for _ in "abc"]) assert_equal(test, ctrl) # test = np.genfromtxt(TextIO(data), usecols=(0, -1), **kwargs) ctrl = np.array([(0, 3), (4, -999)], dtype=[(_, int) for _ in "ac"]) assert_equal(test, ctrl) data2 = "1,2,*,4\n5,*,7,8\n" test = np.genfromtxt(TextIO(data2), delimiter=',', dtype=int, missing_values="*", filling_values=0) ctrl = np.array([[1, 2, 0, 4], [5, 0, 7, 8]]) assert_equal(test, ctrl) test = np.genfromtxt(TextIO(data2), delimiter=',', dtype=int, missing_values="*", filling_values=-1) ctrl = np.array([[1, 2, -1, 4], [5, -1, 7, 8]]) assert_equal(test, ctrl) def test_withmissing_float(self): data = TextIO('A,B\n0,1.5\n2,-999.00') test = np.mafromtxt(data, dtype=None, delimiter=',', missing_values='-999.0', names=True,) control = ma.array([(0, 1.5), (2, -1.)], mask=[(False, False), (False, True)], dtype=[('A', np.int), ('B', np.float)]) assert_equal(test, control) assert_equal(test.mask, control.mask) def test_with_masked_column_uniform(self): # Test masked column data = TextIO('1 2 3\n4 5 6\n') test = np.genfromtxt(data, dtype=None, missing_values='2,5', usemask=True) control = ma.array([[1, 2, 3], [4, 5, 6]], mask=[[0, 1, 0], [0, 1, 0]]) assert_equal(test, control) def test_with_masked_column_various(self): # Test masked column data = TextIO('True 2 3\nFalse 5 6\n') test = np.genfromtxt(data, dtype=None, missing_values='2,5', usemask=True) control = ma.array([(1, 2, 3), (0, 5, 6)], mask=[(0, 1, 0), (0, 1, 0)], dtype=[('f0', bool), ('f1', bool), ('f2', int)]) assert_equal(test, control) def test_invalid_raise(self): # Test invalid raise data = ["1, 1, 1, 1, 1"] * 50 for i in range(5): data[10 * i] = "2, 2, 2, 2 2" data.insert(0, "a, b, c, d, e") mdata = TextIO("\n".join(data)) # kwargs = dict(delimiter=",", dtype=None, names=True) # XXX: is there a better way to get the return value of the # callable in assert_warns ? ret = {} def f(_ret={}): _ret['mtest'] = np.ndfromtxt(mdata, invalid_raise=False, **kwargs) assert_warns(ConversionWarning, f, _ret=ret) mtest = ret['mtest'] assert_equal(len(mtest), 45) assert_equal(mtest, np.ones(45, dtype=[(_, int) for _ in 'abcde'])) # mdata.seek(0) assert_raises(ValueError, np.ndfromtxt, mdata, delimiter=",", names=True) def test_invalid_raise_with_usecols(self): # Test invalid_raise with usecols data = ["1, 1, 1, 1, 1"] * 50 for i in range(5): data[10 * i] = "2, 2, 2, 2 2" data.insert(0, "a, b, c, d, e") mdata = TextIO("\n".join(data)) kwargs = dict(delimiter=",", dtype=None, names=True, invalid_raise=False) # XXX: is there a better way to get the return value of the # callable in assert_warns ? ret = {} def f(_ret={}): _ret['mtest'] = np.ndfromtxt(mdata, usecols=(0, 4), **kwargs) assert_warns(ConversionWarning, f, _ret=ret) mtest = ret['mtest'] assert_equal(len(mtest), 45) assert_equal(mtest, np.ones(45, dtype=[(_, int) for _ in 'ae'])) # mdata.seek(0) mtest = np.ndfromtxt(mdata, usecols=(0, 1), **kwargs) assert_equal(len(mtest), 50) control = np.ones(50, dtype=[(_, int) for _ in 'ab']) control[[10 * _ for _ in range(5)]] = (2, 2) assert_equal(mtest, control) def test_inconsistent_dtype(self): # Test inconsistent dtype data = ["1, 1, 1, 1, -1.1"] * 50 mdata = TextIO("\n".join(data)) converters = {4: lambda x: "(%s)" % x} kwargs = dict(delimiter=",", converters=converters, dtype=[(_, int) for _ in 'abcde'],) assert_raises(ValueError, np.genfromtxt, mdata, **kwargs) def test_default_field_format(self): # Test default format data = "0, 1, 2.3\n4, 5, 6.7" mtest = np.ndfromtxt(TextIO(data), delimiter=",", dtype=None, defaultfmt="f%02i") ctrl = np.array([(0, 1, 2.3), (4, 5, 6.7)], dtype=[("f00", int), ("f01", int), ("f02", float)]) assert_equal(mtest, ctrl) def test_single_dtype_wo_names(self): # Test single dtype w/o names data = "0, 1, 2.3\n4, 5, 6.7" mtest = np.ndfromtxt(TextIO(data), delimiter=",", dtype=float, defaultfmt="f%02i") ctrl = np.array([[0., 1., 2.3], [4., 5., 6.7]], dtype=float) assert_equal(mtest, ctrl) def test_single_dtype_w_explicit_names(self): # Test single dtype w explicit names data = "0, 1, 2.3\n4, 5, 6.7" mtest = np.ndfromtxt(TextIO(data), delimiter=",", dtype=float, names="a, b, c") ctrl = np.array([(0., 1., 2.3), (4., 5., 6.7)], dtype=[(_, float) for _ in "abc"]) assert_equal(mtest, ctrl) def test_single_dtype_w_implicit_names(self): # Test single dtype w implicit names data = "a, b, c\n0, 1, 2.3\n4, 5, 6.7" mtest = np.ndfromtxt(TextIO(data), delimiter=",", dtype=float, names=True) ctrl = np.array([(0., 1., 2.3), (4., 5., 6.7)], dtype=[(_, float) for _ in "abc"]) assert_equal(mtest, ctrl) def test_easy_structured_dtype(self): # Test easy structured dtype data = "0, 1, 2.3\n4, 5, 6.7" mtest = np.ndfromtxt(TextIO(data), delimiter=",", dtype=(int, float, float), defaultfmt="f_%02i") ctrl = np.array([(0, 1., 2.3), (4, 5., 6.7)], dtype=[("f_00", int), ("f_01", float), ("f_02", float)]) assert_equal(mtest, ctrl) def test_autostrip(self): # Test autostrip data = "01/01/2003 , 1.3, abcde" kwargs = dict(delimiter=",", dtype=None) mtest = np.ndfromtxt(TextIO(data), **kwargs) ctrl = np.array([('01/01/2003 ', 1.3, ' abcde')], dtype=[('f0', '|S12'), ('f1', float), ('f2', '|S8')]) assert_equal(mtest, ctrl) mtest = np.ndfromtxt(TextIO(data), autostrip=True, **kwargs) ctrl = np.array([('01/01/2003', 1.3, 'abcde')], dtype=[('f0', '|S10'), ('f1', float), ('f2', '|S5')]) assert_equal(mtest, ctrl) def test_replace_space(self): # Test the 'replace_space' option txt = "A.A, B (B), C:C\n1, 2, 3.14" # Test default: replace ' ' by '_' and delete non-alphanum chars test = np.genfromtxt(TextIO(txt), delimiter=",", names=True, dtype=None) ctrl_dtype = [("AA", int), ("B_B", int), ("CC", float)] ctrl = np.array((1, 2, 3.14), dtype=ctrl_dtype) assert_equal(test, ctrl) # Test: no replace, no delete test = np.genfromtxt(TextIO(txt), delimiter=",", names=True, dtype=None, replace_space='', deletechars='') ctrl_dtype = [("A.A", int), ("B (B)", int), ("C:C", float)] ctrl = np.array((1, 2, 3.14), dtype=ctrl_dtype) assert_equal(test, ctrl) # Test: no delete (spaces are replaced by _) test = np.genfromtxt(TextIO(txt), delimiter=",", names=True, dtype=None, deletechars='') ctrl_dtype = [("A.A", int), ("B_(B)", int), ("C:C", float)] ctrl = np.array((1, 2, 3.14), dtype=ctrl_dtype) assert_equal(test, ctrl) def test_replace_space_known_dtype(self): # Test the 'replace_space' (and related) options when dtype != None txt = "A.A, B (B), C:C\n1, 2, 3" # Test default: replace ' ' by '_' and delete non-alphanum chars test = np.genfromtxt(TextIO(txt), delimiter=",", names=True, dtype=int) ctrl_dtype = [("AA", int), ("B_B", int), ("CC", int)] ctrl = np.array((1, 2, 3), dtype=ctrl_dtype) assert_equal(test, ctrl) # Test: no replace, no delete test = np.genfromtxt(TextIO(txt), delimiter=",", names=True, dtype=int, replace_space='', deletechars='') ctrl_dtype = [("A.A", int), ("B (B)", int), ("C:C", int)] ctrl = np.array((1, 2, 3), dtype=ctrl_dtype) assert_equal(test, ctrl) # Test: no delete (spaces are replaced by _) test = np.genfromtxt(TextIO(txt), delimiter=",", names=True, dtype=int, deletechars='') ctrl_dtype = [("A.A", int), ("B_(B)", int), ("C:C", int)] ctrl = np.array((1, 2, 3), dtype=ctrl_dtype) assert_equal(test, ctrl) def test_incomplete_names(self): # Test w/ incomplete names data = "A,,C\n0,1,2\n3,4,5" kwargs = dict(delimiter=",", names=True) # w/ dtype=None ctrl = np.array([(0, 1, 2), (3, 4, 5)], dtype=[(_, int) for _ in ('A', 'f0', 'C')]) test = np.ndfromtxt(TextIO(data), dtype=None, **kwargs) assert_equal(test, ctrl) # w/ default dtype ctrl = np.array([(0, 1, 2), (3, 4, 5)], dtype=[(_, float) for _ in ('A', 'f0', 'C')]) test = np.ndfromtxt(TextIO(data), **kwargs) def test_names_auto_completion(self): # Make sure that names are properly completed data = "1 2 3\n 4 5 6" test = np.genfromtxt(TextIO(data), dtype=(int, float, int), names="a") ctrl = np.array([(1, 2, 3), (4, 5, 6)], dtype=[('a', int), ('f0', float), ('f1', int)]) assert_equal(test, ctrl) def test_names_with_usecols_bug1636(self): # Make sure we pick up the right names w/ usecols data = "A,B,C,D,E\n0,1,2,3,4\n0,1,2,3,4\n0,1,2,3,4" ctrl_names = ("A", "C", "E") test = np.genfromtxt(TextIO(data), dtype=(int, int, int), delimiter=",", usecols=(0, 2, 4), names=True) assert_equal(test.dtype.names, ctrl_names) # test = np.genfromtxt(TextIO(data), dtype=(int, int, int), delimiter=",", usecols=("A", "C", "E"), names=True) assert_equal(test.dtype.names, ctrl_names) # test = np.genfromtxt(TextIO(data), dtype=int, delimiter=",", usecols=("A", "C", "E"), names=True) assert_equal(test.dtype.names, ctrl_names) def test_fixed_width_names(self): # Test fix-width w/ names data = " A B C\n 0 1 2.3\n 45 67 9." kwargs = dict(delimiter=(5, 5, 4), names=True, dtype=None) ctrl = np.array([(0, 1, 2.3), (45, 67, 9.)], dtype=[('A', int), ('B', int), ('C', float)]) test = np.ndfromtxt(TextIO(data), **kwargs) assert_equal(test, ctrl) # kwargs = dict(delimiter=5, names=True, dtype=None) ctrl = np.array([(0, 1, 2.3), (45, 67, 9.)], dtype=[('A', int), ('B', int), ('C', float)]) test = np.ndfromtxt(TextIO(data), **kwargs) assert_equal(test, ctrl) def test_filling_values(self): # Test missing values data = b"1, 2, 3\n1, , 5\n0, 6, \n" kwargs = dict(delimiter=",", dtype=None, filling_values=-999) ctrl = np.array([[1, 2, 3], [1, -999, 5], [0, 6, -999]], dtype=int) test = np.ndfromtxt(TextIO(data), **kwargs) assert_equal(test, ctrl) def test_comments_is_none(self): # Github issue 329 (None was previously being converted to 'None'). test = np.genfromtxt(TextIO("test1,testNonetherestofthedata"), dtype=None, comments=None, delimiter=',') assert_equal(test[1], b'testNonetherestofthedata') test = np.genfromtxt(TextIO("test1, testNonetherestofthedata"), dtype=None, comments=None, delimiter=',') assert_equal(test[1], b' testNonetherestofthedata') def test_recfromtxt(self): # data = TextIO('A,B\n0,1\n2,3') kwargs = dict(delimiter=",", missing_values="N/A", names=True) test = np.recfromtxt(data, **kwargs) control = np.array([(0, 1), (2, 3)], dtype=[('A', np.int), ('B', np.int)]) self.assertTrue(isinstance(test, np.recarray)) assert_equal(test, control) # data = TextIO('A,B\n0,1\n2,N/A') test = np.recfromtxt(data, dtype=None, usemask=True, **kwargs) control = ma.array([(0, 1), (2, -1)], mask=[(False, False), (False, True)], dtype=[('A', np.int), ('B', np.int)]) assert_equal(test, control) assert_equal(test.mask, control.mask) assert_equal(test.A, [0, 2]) def test_recfromcsv(self): # data = TextIO('A,B\n0,1\n2,3') kwargs = dict(missing_values="N/A", names=True, case_sensitive=True) test = np.recfromcsv(data, dtype=None, **kwargs) control = np.array([(0, 1), (2, 3)], dtype=[('A', np.int), ('B', np.int)]) self.assertTrue(isinstance(test, np.recarray)) assert_equal(test, control) # data = TextIO('A,B\n0,1\n2,N/A') test = np.recfromcsv(data, dtype=None, usemask=True, **kwargs) control = ma.array([(0, 1), (2, -1)], mask=[(False, False), (False, True)], dtype=[('A', np.int), ('B', np.int)]) assert_equal(test, control) assert_equal(test.mask, control.mask) assert_equal(test.A, [0, 2]) # data = TextIO('A,B\n0,1\n2,3') test = np.recfromcsv(data, missing_values='N/A',) control = np.array([(0, 1), (2, 3)], dtype=[('a', np.int), ('b', np.int)]) self.assertTrue(isinstance(test, np.recarray)) assert_equal(test, control) # data = TextIO('A,B\n0,1\n2,3') dtype = [('a', np.int), ('b', np.float)] test = np.recfromcsv(data, missing_values='N/A', dtype=dtype) control = np.array([(0, 1), (2, 3)], dtype=dtype) self.assertTrue(isinstance(test, np.recarray)) assert_equal(test, control) def test_max_rows(self): # Test the `max_rows` keyword argument. data = '1 2\n3 4\n5 6\n7 8\n9 10\n' txt = TextIO(data) a1 = np.genfromtxt(txt, max_rows=3) a2 = np.genfromtxt(txt) assert_equal(a1, [[1, 2], [3, 4], [5, 6]]) assert_equal(a2, [[7, 8], [9, 10]]) # max_rows must be at least 1. assert_raises(ValueError, np.genfromtxt, TextIO(data), max_rows=0) # An input with several invalid rows. data = '1 1\n2 2\n0 \n3 3\n4 4\n5 \n6 \n7 \n' test = np.genfromtxt(TextIO(data), max_rows=2) control = np.array([[1., 1.], [2., 2.]]) assert_equal(test, control) # Test keywords conflict assert_raises(ValueError, np.genfromtxt, TextIO(data), skip_footer=1, max_rows=4) # Test with invalid value assert_raises(ValueError, np.genfromtxt, TextIO(data), max_rows=4) # Test with invalid not raise with warnings.catch_warnings(): warnings.filterwarnings("ignore") test = np.genfromtxt(TextIO(data), max_rows=4, invalid_raise=False) control = np.array([[1., 1.], [2., 2.], [3., 3.], [4., 4.]]) assert_equal(test, control) test = np.genfromtxt(TextIO(data), max_rows=5, invalid_raise=False) control = np.array([[1., 1.], [2., 2.], [3., 3.], [4., 4.]]) assert_equal(test, control) # Structured array with field names. data = 'a b\n#c d\n1 1\n2 2\n#0 \n3 3\n4 4\n5 5\n' # Test with header, names and comments txt = TextIO(data) test = np.genfromtxt(txt, skip_header=1, max_rows=3, names=True) control = np.array([(1.0, 1.0), (2.0, 2.0), (3.0, 3.0)], dtype=[('c', '<f8'), ('d', '<f8')]) assert_equal(test, control) # To continue reading the same "file", don't use skip_header or # names, and use the previously determined dtype. test = np.genfromtxt(txt, max_rows=None, dtype=test.dtype) control = np.array([(4.0, 4.0), (5.0, 5.0)], dtype=[('c', '<f8'), ('d', '<f8')]) assert_equal(test, control) def test_gft_using_filename(self): # Test that we can load data from a filename as well as a file # object tgt = np.arange(6).reshape((2, 3)) if sys.version_info[0] >= 3: # python 3k is known to fail for '\r' linesep = ('\n', '\r\n') else: linesep = ('\n', '\r\n', '\r') for sep in linesep: data = '0 1 2' + sep + '3 4 5' with temppath() as name: with open(name, 'w') as f: f.write(data) res = np.genfromtxt(name) assert_array_equal(res, tgt) def test_gft_using_generator(self): # gft doesn't work with unicode. def count(): for i in range(10): yield asbytes("%d" % i) res = np.genfromtxt(count()) assert_array_equal(res, np.arange(10)) def test_auto_dtype_largeint(self): # Regression test for numpy/numpy#5635 whereby large integers could # cause OverflowErrors. # Test the automatic definition of the output dtype # # 2**66 = 73786976294838206464 => should convert to float # 2**34 = 17179869184 => should convert to int64 # 2**10 = 1024 => should convert to int (int32 on 32-bit systems, # int64 on 64-bit systems) data = TextIO('73786976294838206464 17179869184 1024') test = np.ndfromtxt(data, dtype=None) assert_equal(test.dtype.names, ['f0', 'f1', 'f2']) assert_(test.dtype['f0'] == np.float) assert_(test.dtype['f1'] == np.int64) assert_(test.dtype['f2'] == np.integer) assert_allclose(test['f0'], 73786976294838206464.) assert_equal(test['f1'], 17179869184) assert_equal(test['f2'], 1024) def test_gzip_load(): a = np.random.random((5, 5)) s = BytesIO() f = gzip.GzipFile(fileobj=s, mode="w") np.save(f, a) f.close() s.seek(0) f = gzip.GzipFile(fileobj=s, mode="r") assert_array_equal(np.load(f), a) def test_gzip_loadtxt(): # Thanks to another windows brokeness, we can't use # NamedTemporaryFile: a file created from this function cannot be # reopened by another open call. So we first put the gzipped string # of the test reference array, write it to a securely opened file, # which is then read from by the loadtxt function s = BytesIO() g = gzip.GzipFile(fileobj=s, mode='w') g.write(b'1 2 3\n') g.close() s.seek(0) with temppath(suffix='.gz') as name: with open(name, 'wb') as f: f.write(s.read()) res = np.loadtxt(name) s.close() assert_array_equal(res, [1, 2, 3]) def test_gzip_loadtxt_from_string(): s = BytesIO() f = gzip.GzipFile(fileobj=s, mode="w") f.write(b'1 2 3\n') f.close() s.seek(0) f = gzip.GzipFile(fileobj=s, mode="r") assert_array_equal(np.loadtxt(f), [1, 2, 3]) def test_npzfile_dict(): s = BytesIO() x = np.zeros((3, 3)) y = np.zeros((3, 3)) np.savez(s, x=x, y=y) s.seek(0) z = np.load(s) assert_('x' in z) assert_('y' in z) assert_('x' in z.keys()) assert_('y' in z.keys()) for f, a in z.items(): assert_(f in ['x', 'y']) assert_equal(a.shape, (3, 3)) assert_(len(z.items()) == 2) for f in z: assert_(f in ['x', 'y']) assert_('x' in z.keys()) def test_load_refcount(): # Check that objects returned by np.load are directly freed based on # their refcount, rather than needing the gc to collect them. f = BytesIO() np.savez(f, [1, 2, 3]) f.seek(0) assert_(gc.isenabled()) gc.disable() try: gc.collect() np.load(f) # gc.collect returns the number of unreachable objects in cycles that # were found -- we are checking that no cycles were created by np.load n_objects_in_cycles = gc.collect() finally: gc.enable() assert_equal(n_objects_in_cycles, 0) if __name__ == "__main__": run_module_suite()
mit
pebble/pebble-tool
pebble_tool/commands/sdk/project/debug.py
1
9277
from __future__ import absolute_import, print_function, division __author__ = 'katharine' from six import iteritems import collections import os import signal import subprocess from libpebble2.exceptions import TimeoutError from libpebble2.protocol.apps import AppRunState, AppRunStateRequest, AppRunStateStart from pebble_tool.commands.base import PebbleCommand from pebble_tool.commands.install import ToolAppInstaller from pebble_tool.sdk import sdk_manager, add_tools_to_path from pebble_tool.sdk.emulator import ManagedEmulatorTransport from pebble_tool.sdk.project import PebbleProject from pebble_tool.exceptions import ToolError class GdbCommand(PebbleCommand): """Connects a debugger to the current app. Only works in the emulator.""" command = 'gdb' valid_connections = {'emulator'} @staticmethod def _find_app_section_offsets(app_elf_path): SectionRow = collections.namedtuple( 'SectionRow', 'index name size vma lma file_offset align flags') info = subprocess.check_output( ['arm-none-eabi-objdump', '--headers', '--wide', app_elf_path]).decode('utf-8').split('\n')[5:] sections = [SectionRow._make(section_string.split(None, 7)) for section_string in info if section_string] offsets = {section.name: int(section.vma, 16) for section in sections if 'ALLOC' in section.flags} return offsets @staticmethod def _find_legacy_app_load_offset(fw_elf, kind): """Use readelf to find the app/worker load offset in a legacy 3.x firmware debugging symbols ELF where GDB is unable to read the symbols itself. """ elf_sections = subprocess.check_output(["arm-none-eabi-readelf", "-W", "-s", fw_elf]) # Figure out where we load the app into firmware memory for line in elf_sections.split(b'\n'): if b'__{}_flash_load_start__'.format(kind) in line: return int(line.split()[1], 16) else: raise ToolError("Couldn't find the {} address offset.".format(kind)) def _get_symbol_command(self, elf, base_addr_expr): offsets = self._find_app_section_offsets(elf) command = ['add-symbol-file', '"{}"'.format(elf), '{base_addr}+{text:#x}'.format( base_addr=base_addr_expr, text=offsets['.text'])] command += ['-s {section} {base_addr}+{offset:#x}' .format(section=section, offset=offset, base_addr=base_addr_expr) for section, offset in iteritems(offsets) if section != '.text'] return ' '.join(command) def _ensure_correct_app(self, try_install=True): project = PebbleProject() if project.project_type != 'native': raise ToolError("Only native apps can be debugged using gdb.") current_app_uuid = self.pebble.send_and_read(AppRunState(data=AppRunStateRequest()), AppRunState).data.uuid if current_app_uuid != project.uuid: print("Launching {}...".format(project.long_name)) # Try launching the app we want. This just does nothing if the app doesn't exist. # Edge case: the app exists in blobdb but isn't installed. This shouldn't come up with the pebble tool. queue = self.pebble.get_endpoint_queue(AppRunState) try: self.pebble.send_packet(AppRunState(data=AppRunStateStart(uuid=project.uuid))) while True: packet = queue.get(timeout=0.5) if isinstance(packet.data, AppRunStateStart) and packet.data.uuid == project.uuid: break except TimeoutError: if try_install: print("App did not launch. Trying to install it...") try: ToolAppInstaller(self.pebble).install() except IOError: raise ToolError("The app to debug must be built and installed on the watch.") self._ensure_correct_app(try_install=False) else: raise ToolError("The app to debug must be running on the watch to start gdb.") finally: queue.close() def __call__(self, args): super(GdbCommand, self).__call__(args) # We poke around in the ManagedEmulatorTransport, so it's important that we actually have one. # Just asserting is okay because this should already be enforced by valid_connections. assert isinstance(self.pebble.transport, ManagedEmulatorTransport) self._ensure_correct_app() add_tools_to_path() platform = self.pebble.transport.platform sdk_version = self.pebble.transport.version gdb_port = self.pebble.transport.qemu_gdb_port if gdb_port is None: raise ToolError("The emulator does not have gdb support. Try killing and re-running it.") sdk_root = sdk_manager.path_for_sdk(sdk_version) self._fw_elf = os.path.join(sdk_root, 'pebble', platform, 'qemu', '{}_sdk_debug.elf'.format(platform)) if not os.path.exists(self._fw_elf): raise ToolError("SDK {} does not support app debugging. You need at least SDK 3.10.".format(sdk_version)) app_elf_path = os.path.join(os.getcwd(), 'build', platform, 'pebble-app.elf') if not os.path.exists(app_elf_path): raise ToolError("No app debugging information available. " "You must be in a project directory and have built the app.") if self.pebble.firmware_version.major >= 4: # Type information for symbols is not currently written into the # debugging symbols generated by fw_elf_obfuscate.py. We must # explicitly tell GDB what type the symbols are so that their values # can be read. app_load_address = '*(void**)&g_app_load_address' worker_load_address = '*(void**)&g_worker_load_address' else: # The version of fw_elf_obfuscate.py which generated the debugging # symbol files for 3.x SDKs wrote out the symbol information for # variables in a way that caused them to be unavailable to GDB. # We have to use readelf to work around that and get the symbol # addresses. app_load_address = '(void*){:#x}'.format( self._find_legacy_app_load_offset(self._fw_elf, 'app')) worker_load_address = '(void*){:#x}'.format( self._find_legacy_app_load_offset(self._fw_elf, 'worker')) gdb_commands = [ "set charset US-ASCII", # Avoid a bug in the ancient version of libiconv apple ships. "target remote :{}".format(gdb_port), "set confirm off", self._get_symbol_command(app_elf_path, app_load_address) ] # Optionally add the worker symbols, if any exist. worker_elf_path = os.path.join(os.getcwd(), 'build', platform, 'pebble-worker.elf') if os.path.exists(worker_elf_path): gdb_commands.append(self._get_symbol_command(worker_elf_path, worker_load_address)) gdb_commands.extend([ "set confirm on", "break app_crashed", # app crashes (as of FW 3.10) go through this symbol for our convenience. 'echo \nPress ctrl-D or type \'quit\' to exit.\n', 'echo Try `pebble gdb --help` for a short cheat sheet.\n', ]) gdb_args = ['arm-none-eabi-gdb', self._fw_elf, '-q'] + ['--ex={}'.format(x) for x in gdb_commands] # Ignore SIGINT, or we'll die every time the user tries to pause execution. signal.signal(signal.SIGINT, signal.SIG_IGN) subprocess.call(gdb_args) epilog = """ gdb cheat sheet: ctrl-C Pause app execution. ctrl-D, quit Quit gdb. continue, c Continue app execution. break, b Set a breakpoint. This can be either a symbol or a position: - `b show_train_info` to break when entering a function. - `b stop_info.c:45` to break on line 45 of stop_info.c. step, s Step forward one line. next, n Step *over* the current line, avoiding stopping for any functions it calls into. finish Run forward until exiting the current stack frame. backtrace, bt Print out the current call stack. p [expression] Print the result of evaluating the given expression. set var x = foo Set the value of the variable x to foo. info args Show the values of arguments to the current function. info locals Show local variables in the current frame. bt full Show all local variables in all stack frames. info break List break points (#1 is <app_crashed>, and is inserted by the pebble tool). delete [n] Delete breakpoint #n. """
mit
linjoahow/w17test_1
static/Brython3.1.1-20150328-091302/Lib/getopt.py
845
7488
"""Parser for command line options. This module helps scripts to parse the command line arguments in sys.argv. It supports the same conventions as the Unix getopt() function (including the special meanings of arguments of the form `-' and `--'). Long options similar to those supported by GNU software may be used as well via an optional third argument. This module provides two functions and an exception: getopt() -- Parse command line options gnu_getopt() -- Like getopt(), but allow option and non-option arguments to be intermixed. GetoptError -- exception (class) raised with 'opt' attribute, which is the option involved with the exception. """ # Long option support added by Lars Wirzenius <liw@iki.fi>. # # Gerrit Holl <gerrit@nl.linux.org> moved the string-based exceptions # to class-based exceptions. # # Peter Åstrand <astrand@lysator.liu.se> added gnu_getopt(). # # TODO for gnu_getopt(): # # - GNU getopt_long_only mechanism # - allow the caller to specify ordering # - RETURN_IN_ORDER option # - GNU extension with '-' as first character of option string # - optional arguments, specified by double colons # - a option string with a W followed by semicolon should # treat "-W foo" as "--foo" __all__ = ["GetoptError","error","getopt","gnu_getopt"] import os try: from gettext import gettext as _ except ImportError: # Bootstrapping Python: gettext's dependencies not built yet def _(s): return s class GetoptError(Exception): opt = '' msg = '' def __init__(self, msg, opt=''): self.msg = msg self.opt = opt Exception.__init__(self, msg, opt) def __str__(self): return self.msg error = GetoptError # backward compatibility def getopt(args, shortopts, longopts = []): """getopt(args, options[, long_options]) -> opts, args Parses command line options and parameter list. args is the argument list to be parsed, without the leading reference to the running program. Typically, this means "sys.argv[1:]". shortopts is the string of option letters that the script wants to recognize, with options that require an argument followed by a colon (i.e., the same format that Unix getopt() uses). If specified, longopts is a list of strings with the names of the long options which should be supported. The leading '--' characters should not be included in the option name. Options which require an argument should be followed by an equal sign ('='). The return value consists of two elements: the first is a list of (option, value) pairs; the second is the list of program arguments left after the option list was stripped (this is a trailing slice of the first argument). Each option-and-value pair returned has the option as its first element, prefixed with a hyphen (e.g., '-x'), and the option argument as its second element, or an empty string if the option has no argument. The options occur in the list in the same order in which they were found, thus allowing multiple occurrences. Long and short options may be mixed. """ opts = [] if type(longopts) == type(""): longopts = [longopts] else: longopts = list(longopts) while args and args[0].startswith('-') and args[0] != '-': if args[0] == '--': args = args[1:] break if args[0].startswith('--'): opts, args = do_longs(opts, args[0][2:], longopts, args[1:]) else: opts, args = do_shorts(opts, args[0][1:], shortopts, args[1:]) return opts, args def gnu_getopt(args, shortopts, longopts = []): """getopt(args, options[, long_options]) -> opts, args This function works like getopt(), except that GNU style scanning mode is used by default. This means that option and non-option arguments may be intermixed. The getopt() function stops processing options as soon as a non-option argument is encountered. If the first character of the option string is `+', or if the environment variable POSIXLY_CORRECT is set, then option processing stops as soon as a non-option argument is encountered. """ opts = [] prog_args = [] if isinstance(longopts, str): longopts = [longopts] else: longopts = list(longopts) # Allow options after non-option arguments? if shortopts.startswith('+'): shortopts = shortopts[1:] all_options_first = True elif os.environ.get("POSIXLY_CORRECT"): all_options_first = True else: all_options_first = False while args: if args[0] == '--': prog_args += args[1:] break if args[0][:2] == '--': opts, args = do_longs(opts, args[0][2:], longopts, args[1:]) elif args[0][:1] == '-' and args[0] != '-': opts, args = do_shorts(opts, args[0][1:], shortopts, args[1:]) else: if all_options_first: prog_args += args break else: prog_args.append(args[0]) args = args[1:] return opts, prog_args def do_longs(opts, opt, longopts, args): try: i = opt.index('=') except ValueError: optarg = None else: opt, optarg = opt[:i], opt[i+1:] has_arg, opt = long_has_args(opt, longopts) if has_arg: if optarg is None: if not args: raise GetoptError(_('option --%s requires argument') % opt, opt) optarg, args = args[0], args[1:] elif optarg is not None: raise GetoptError(_('option --%s must not have an argument') % opt, opt) opts.append(('--' + opt, optarg or '')) return opts, args # Return: # has_arg? # full option name def long_has_args(opt, longopts): possibilities = [o for o in longopts if o.startswith(opt)] if not possibilities: raise GetoptError(_('option --%s not recognized') % opt, opt) # Is there an exact match? if opt in possibilities: return False, opt elif opt + '=' in possibilities: return True, opt # No exact match, so better be unique. if len(possibilities) > 1: # XXX since possibilities contains all valid continuations, might be # nice to work them into the error msg raise GetoptError(_('option --%s not a unique prefix') % opt, opt) assert len(possibilities) == 1 unique_match = possibilities[0] has_arg = unique_match.endswith('=') if has_arg: unique_match = unique_match[:-1] return has_arg, unique_match def do_shorts(opts, optstring, shortopts, args): while optstring != '': opt, optstring = optstring[0], optstring[1:] if short_has_arg(opt, shortopts): if optstring == '': if not args: raise GetoptError(_('option -%s requires argument') % opt, opt) optstring, args = args[0], args[1:] optarg, optstring = optstring, '' else: optarg = '' opts.append(('-' + opt, optarg)) return opts, args def short_has_arg(opt, shortopts): for i in range(len(shortopts)): if opt == shortopts[i] != ':': return shortopts.startswith(':', i+1) raise GetoptError(_('option -%s not recognized') % opt, opt) if __name__ == '__main__': import sys print(getopt(sys.argv[1:], "a:b", ["alpha=", "beta"]))
gpl-3.0
alanjw/GreenOpenERP-Win-X86
python/Lib/Crypto/Util/number.py
127
95488
# # number.py : Number-theoretic functions # # Part of the Python Cryptography Toolkit # # Written by Andrew M. Kuchling, Barry A. Warsaw, and others # # =================================================================== # The contents of this file are dedicated to the public domain. To # the extent that dedication to the public domain is not available, # everyone is granted a worldwide, perpetual, royalty-free, # non-exclusive license to exercise all rights associated with the # contents of this file for any purpose whatsoever. # No rights are reserved. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, # EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF # MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND # NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS # BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN # ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN # CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. # =================================================================== # __revision__ = "$Id$" from Crypto.pct_warnings import GetRandomNumber_DeprecationWarning, PowmInsecureWarning from warnings import warn as _warn import math import sys from Crypto.Util.py3compat import * bignum = long try: from Crypto.PublicKey import _fastmath except ImportError: # For production, we are going to let import issues due to gmp/mpir shared # libraries not loading slide silently and use slowmath. If you'd rather # see an exception raised if _fastmath exists but cannot be imported, # uncomment the below # # from distutils.sysconfig import get_config_var # import inspect, os # _fm_path = os.path.normpath(os.path.dirname(os.path.abspath( # inspect.getfile(inspect.currentframe()))) # +"/../../PublicKey/_fastmath"+get_config_var("SO")) # if os.path.exists(_fm_path): # raise ImportError("While the _fastmath module exists, importing "+ # "it failed. This may point to the gmp or mpir shared library "+ # "not being in the path. _fastmath was found at "+_fm_path) _fastmath = None # You need libgmp v5 or later to get mpz_powm_sec. Warn if it's not available. if _fastmath is not None and not _fastmath.HAVE_DECL_MPZ_POWM_SEC: _warn("Not using mpz_powm_sec. You should rebuild using libgmp >= 5 to avoid timing attack vulnerability.", PowmInsecureWarning) # New functions from _number_new import * # Commented out and replaced with faster versions below ## def long2str(n): ## s='' ## while n>0: ## s=chr(n & 255)+s ## n=n>>8 ## return s ## import types ## def str2long(s): ## if type(s)!=types.StringType: return s # Integers will be left alone ## return reduce(lambda x,y : x*256+ord(y), s, 0L) def size (N): """size(N:long) : int Returns the size of the number N in bits. """ bits = 0 while N >> bits: bits += 1 return bits def getRandomNumber(N, randfunc=None): """Deprecated. Use getRandomInteger or getRandomNBitInteger instead.""" warnings.warn("Crypto.Util.number.getRandomNumber has confusing semantics"+ "and has been deprecated. Use getRandomInteger or getRandomNBitInteger instead.", GetRandomNumber_DeprecationWarning) return getRandomNBitInteger(N, randfunc) def getRandomInteger(N, randfunc=None): """getRandomInteger(N:int, randfunc:callable):long Return a random number with at most N bits. If randfunc is omitted, then Random.new().read is used. This function is for internal use only and may be renamed or removed in the future. """ if randfunc is None: _import_Random() randfunc = Random.new().read S = randfunc(N>>3) odd_bits = N % 8 if odd_bits != 0: char = ord(randfunc(1)) >> (8-odd_bits) S = bchr(char) + S value = bytes_to_long(S) return value def getRandomRange(a, b, randfunc=None): """getRandomRange(a:int, b:int, randfunc:callable):long Return a random number n so that a <= n < b. If randfunc is omitted, then Random.new().read is used. This function is for internal use only and may be renamed or removed in the future. """ range_ = b - a - 1 bits = size(range_) value = getRandomInteger(bits, randfunc) while value > range_: value = getRandomInteger(bits, randfunc) return a + value def getRandomNBitInteger(N, randfunc=None): """getRandomInteger(N:int, randfunc:callable):long Return a random number with exactly N-bits, i.e. a random number between 2**(N-1) and (2**N)-1. If randfunc is omitted, then Random.new().read is used. This function is for internal use only and may be renamed or removed in the future. """ value = getRandomInteger (N-1, randfunc) value |= 2L ** (N-1) # Ensure high bit is set assert size(value) >= N return value def GCD(x,y): """GCD(x:long, y:long): long Return the GCD of x and y. """ x = abs(x) ; y = abs(y) while x > 0: x, y = y % x, x return y def inverse(u, v): """inverse(u:long, v:long):long Return the inverse of u mod v. """ u3, v3 = long(u), long(v) u1, v1 = 1L, 0L while v3 > 0: q=divmod(u3, v3)[0] u1, v1 = v1, u1 - v1*q u3, v3 = v3, u3 - v3*q while u1<0: u1 = u1 + v return u1 # Given a number of bits to generate and a random generation function, # find a prime number of the appropriate size. def getPrime(N, randfunc=None): """getPrime(N:int, randfunc:callable):long Return a random N-bit prime number. If randfunc is omitted, then Random.new().read is used. """ if randfunc is None: _import_Random() randfunc = Random.new().read number=getRandomNBitInteger(N, randfunc) | 1 while (not isPrime(number, randfunc=randfunc)): number=number+2 return number def _rabinMillerTest(n, rounds, randfunc=None): """_rabinMillerTest(n:long, rounds:int, randfunc:callable):int Tests if n is prime. Returns 0 when n is definitly composite. Returns 1 when n is probably prime. Returns 2 when n is definitly prime. If randfunc is omitted, then Random.new().read is used. This function is for internal use only and may be renamed or removed in the future. """ # check special cases (n==2, n even, n < 2) if n < 3 or (n & 1) == 0: return n == 2 # n might be very large so it might be beneficial to precalculate n-1 n_1 = n - 1 # determine m and b so that 2**b * m = n - 1 and b maximal b = 0 m = n_1 while (m & 1) == 0: b += 1 m >>= 1 tested = [] # we need to do at most n-2 rounds. for i in xrange (min (rounds, n-2)): # randomly choose a < n and make sure it hasn't been tested yet a = getRandomRange (2, n, randfunc) while a in tested: a = getRandomRange (2, n, randfunc) tested.append (a) # do the rabin-miller test z = pow (a, m, n) # (a**m) % n if z == 1 or z == n_1: continue composite = 1 for r in xrange (b): z = (z * z) % n if z == 1: return 0 elif z == n_1: composite = 0 break if composite: return 0 return 1 def getStrongPrime(N, e=0, false_positive_prob=1e-6, randfunc=None): """getStrongPrime(N:int, e:int, false_positive_prob:float, randfunc:callable):long Return a random strong N-bit prime number. In this context p is a strong prime if p-1 and p+1 have at least one large prime factor. N should be a multiple of 128 and > 512. If e is provided the returned prime p-1 will be coprime to e and thus suitable for RSA where e is the public exponent. The optional false_positive_prob is the statistical probability that true is returned even though it is not (pseudo-prime). It defaults to 1e-6 (less than 1:1000000). Note that the real probability of a false-positive is far less. This is just the mathematically provable limit. randfunc should take a single int parameter and return that many random bytes as a string. If randfunc is omitted, then Random.new().read is used. """ # This function was implemented following the # instructions found in the paper: # "FAST GENERATION OF RANDOM, STRONG RSA PRIMES" # by Robert D. Silverman # RSA Laboratories # May 17, 1997 # which by the time of writing could be freely downloaded here: # http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.17.2713&rep=rep1&type=pdf # Use the accelerator if available if _fastmath is not None: return _fastmath.getStrongPrime(long(N), long(e), false_positive_prob, randfunc) if (N < 512) or ((N % 128) != 0): raise ValueError ("bits must be multiple of 128 and > 512") rabin_miller_rounds = int(math.ceil(-math.log(false_positive_prob)/math.log(4))) # calculate range for X # lower_bound = sqrt(2) * 2^{511 + 128*x} # upper_bound = 2^{512 + 128*x} - 1 x = (N - 512) >> 7; # We need to approximate the sqrt(2) in the lower_bound by an integer # expression because floating point math overflows with these numbers lower_bound = divmod(14142135623730950489L * (2L ** (511 + 128*x)), 10000000000000000000L)[0] upper_bound = (1L << (512 + 128*x)) - 1 # Randomly choose X in calculated range X = getRandomRange (lower_bound, upper_bound, randfunc) # generate p1 and p2 p = [0, 0] for i in (0, 1): # randomly choose 101-bit y y = getRandomNBitInteger (101, randfunc) # initialize the field for sieving field = [0] * 5 * len (sieve_base) # sieve the field for prime in sieve_base: offset = y % prime for j in xrange ((prime - offset) % prime, len (field), prime): field[j] = 1 # look for suitable p[i] starting at y result = 0 for j in range(len(field)): composite = field[j] # look for next canidate if composite: continue tmp = y + j result = _rabinMillerTest (tmp, rabin_miller_rounds) if result > 0: p[i] = tmp break if result == 0: raise RuntimeError ("Couln't find prime in field. " "Developer: Increase field_size") # Calculate R # R = (p2^{-1} mod p1) * p2 - (p1^{-1} mod p2) * p1 tmp1 = inverse (p[1], p[0]) * p[1] # (p2^-1 mod p1)*p2 tmp2 = inverse (p[0], p[1]) * p[0] # (p1^-1 mod p2)*p1 R = tmp1 - tmp2 # (p2^-1 mod p1)*p2 - (p1^-1 mod p2)*p1 # search for final prime number starting by Y0 # Y0 = X + (R - X mod p1p2) increment = p[0] * p[1] X = X + (R - (X % increment)) while 1: is_possible_prime = 1 # first check candidate against sieve_base for prime in sieve_base: if (X % prime) == 0: is_possible_prime = 0 break # if e is given make sure that e and X-1 are coprime # this is not necessarily a strong prime criterion but useful when # creating them for RSA where the p-1 and q-1 should be coprime to # the public exponent e if e and is_possible_prime: if e & 1: if GCD (e, X-1) != 1: is_possible_prime = 0 else: if GCD (e, divmod((X-1),2)[0]) != 1: is_possible_prime = 0 # do some Rabin-Miller-Tests if is_possible_prime: result = _rabinMillerTest (X, rabin_miller_rounds) if result > 0: break X += increment # abort when X has more bits than requested # TODO: maybe we shouldn't abort but rather start over. if X >= 1L << N: raise RuntimeError ("Couln't find prime in field. " "Developer: Increase field_size") return X def isPrime(N, false_positive_prob=1e-6, randfunc=None): """isPrime(N:long, false_positive_prob:float, randfunc:callable):bool Return true if N is prime. The optional false_positive_prob is the statistical probability that true is returned even though it is not (pseudo-prime). It defaults to 1e-6 (less than 1:1000000). Note that the real probability of a false-positive is far less. This is just the mathematically provable limit. If randfunc is omitted, then Random.new().read is used. """ if _fastmath is not None: return _fastmath.isPrime(long(N), false_positive_prob, randfunc) if N < 3 or N & 1 == 0: return N == 2 for p in sieve_base: if N == p: return 1 if N % p == 0: return 0 rounds = int(math.ceil(-math.log(false_positive_prob)/math.log(4))) return _rabinMillerTest(N, rounds, randfunc) # Improved conversion functions contributed by Barry Warsaw, after # careful benchmarking import struct def long_to_bytes(n, blocksize=0): """long_to_bytes(n:long, blocksize:int) : string Convert a long integer to a byte string. If optional blocksize is given and greater than zero, pad the front of the byte string with binary zeros so that the length is a multiple of blocksize. """ # after much testing, this algorithm was deemed to be the fastest s = b('') n = long(n) pack = struct.pack while n > 0: s = pack('>I', n & 0xffffffffL) + s n = n >> 32 # strip off leading zeros for i in range(len(s)): if s[i] != b('\000')[0]: break else: # only happens when n == 0 s = b('\000') i = 0 s = s[i:] # add back some pad bytes. this could be done more efficiently w.r.t. the # de-padding being done above, but sigh... if blocksize > 0 and len(s) % blocksize: s = (blocksize - len(s) % blocksize) * b('\000') + s return s def bytes_to_long(s): """bytes_to_long(string) : long Convert a byte string to a long integer. This is (essentially) the inverse of long_to_bytes(). """ acc = 0L unpack = struct.unpack length = len(s) if length % 4: extra = (4 - length % 4) s = b('\000') * extra + s length = length + extra for i in range(0, length, 4): acc = (acc << 32) + unpack('>I', s[i:i+4])[0] return acc # For backwards compatibility... import warnings def long2str(n, blocksize=0): warnings.warn("long2str() has been replaced by long_to_bytes()") return long_to_bytes(n, blocksize) def str2long(s): warnings.warn("str2long() has been replaced by bytes_to_long()") return bytes_to_long(s) def _import_Random(): # This is called in a function instead of at the module level in order to # avoid problems with recursive imports global Random, StrongRandom from Crypto import Random from Crypto.Random.random import StrongRandom # The first 10000 primes used for checking primality. # This should be enough to eliminate most of the odd # numbers before needing to do a Rabin-Miller test at all. sieve_base = ( 2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41, 43, 47, 53, 59, 61, 67, 71, 73, 79, 83, 89, 97, 101, 103, 107, 109, 113, 127, 131, 137, 139, 149, 151, 157, 163, 167, 173, 179, 181, 191, 193, 197, 199, 211, 223, 227, 229, 233, 239, 241, 251, 257, 263, 269, 271, 277, 281, 283, 293, 307, 311, 313, 317, 331, 337, 347, 349, 353, 359, 367, 373, 379, 383, 389, 397, 401, 409, 419, 421, 431, 433, 439, 443, 449, 457, 461, 463, 467, 479, 487, 491, 499, 503, 509, 521, 523, 541, 547, 557, 563, 569, 571, 577, 587, 593, 599, 601, 607, 613, 617, 619, 631, 641, 643, 647, 653, 659, 661, 673, 677, 683, 691, 701, 709, 719, 727, 733, 739, 743, 751, 757, 761, 769, 773, 787, 797, 809, 811, 821, 823, 827, 829, 839, 853, 857, 859, 863, 877, 881, 883, 887, 907, 911, 919, 929, 937, 941, 947, 953, 967, 971, 977, 983, 991, 997, 1009, 1013, 1019, 1021, 1031, 1033, 1039, 1049, 1051, 1061, 1063, 1069, 1087, 1091, 1093, 1097, 1103, 1109, 1117, 1123, 1129, 1151, 1153, 1163, 1171, 1181, 1187, 1193, 1201, 1213, 1217, 1223, 1229, 1231, 1237, 1249, 1259, 1277, 1279, 1283, 1289, 1291, 1297, 1301, 1303, 1307, 1319, 1321, 1327, 1361, 1367, 1373, 1381, 1399, 1409, 1423, 1427, 1429, 1433, 1439, 1447, 1451, 1453, 1459, 1471, 1481, 1483, 1487, 1489, 1493, 1499, 1511, 1523, 1531, 1543, 1549, 1553, 1559, 1567, 1571, 1579, 1583, 1597, 1601, 1607, 1609, 1613, 1619, 1621, 1627, 1637, 1657, 1663, 1667, 1669, 1693, 1697, 1699, 1709, 1721, 1723, 1733, 1741, 1747, 1753, 1759, 1777, 1783, 1787, 1789, 1801, 1811, 1823, 1831, 1847, 1861, 1867, 1871, 1873, 1877, 1879, 1889, 1901, 1907, 1913, 1931, 1933, 1949, 1951, 1973, 1979, 1987, 1993, 1997, 1999, 2003, 2011, 2017, 2027, 2029, 2039, 2053, 2063, 2069, 2081, 2083, 2087, 2089, 2099, 2111, 2113, 2129, 2131, 2137, 2141, 2143, 2153, 2161, 2179, 2203, 2207, 2213, 2221, 2237, 2239, 2243, 2251, 2267, 2269, 2273, 2281, 2287, 2293, 2297, 2309, 2311, 2333, 2339, 2341, 2347, 2351, 2357, 2371, 2377, 2381, 2383, 2389, 2393, 2399, 2411, 2417, 2423, 2437, 2441, 2447, 2459, 2467, 2473, 2477, 2503, 2521, 2531, 2539, 2543, 2549, 2551, 2557, 2579, 2591, 2593, 2609, 2617, 2621, 2633, 2647, 2657, 2659, 2663, 2671, 2677, 2683, 2687, 2689, 2693, 2699, 2707, 2711, 2713, 2719, 2729, 2731, 2741, 2749, 2753, 2767, 2777, 2789, 2791, 2797, 2801, 2803, 2819, 2833, 2837, 2843, 2851, 2857, 2861, 2879, 2887, 2897, 2903, 2909, 2917, 2927, 2939, 2953, 2957, 2963, 2969, 2971, 2999, 3001, 3011, 3019, 3023, 3037, 3041, 3049, 3061, 3067, 3079, 3083, 3089, 3109, 3119, 3121, 3137, 3163, 3167, 3169, 3181, 3187, 3191, 3203, 3209, 3217, 3221, 3229, 3251, 3253, 3257, 3259, 3271, 3299, 3301, 3307, 3313, 3319, 3323, 3329, 3331, 3343, 3347, 3359, 3361, 3371, 3373, 3389, 3391, 3407, 3413, 3433, 3449, 3457, 3461, 3463, 3467, 3469, 3491, 3499, 3511, 3517, 3527, 3529, 3533, 3539, 3541, 3547, 3557, 3559, 3571, 3581, 3583, 3593, 3607, 3613, 3617, 3623, 3631, 3637, 3643, 3659, 3671, 3673, 3677, 3691, 3697, 3701, 3709, 3719, 3727, 3733, 3739, 3761, 3767, 3769, 3779, 3793, 3797, 3803, 3821, 3823, 3833, 3847, 3851, 3853, 3863, 3877, 3881, 3889, 3907, 3911, 3917, 3919, 3923, 3929, 3931, 3943, 3947, 3967, 3989, 4001, 4003, 4007, 4013, 4019, 4021, 4027, 4049, 4051, 4057, 4073, 4079, 4091, 4093, 4099, 4111, 4127, 4129, 4133, 4139, 4153, 4157, 4159, 4177, 4201, 4211, 4217, 4219, 4229, 4231, 4241, 4243, 4253, 4259, 4261, 4271, 4273, 4283, 4289, 4297, 4327, 4337, 4339, 4349, 4357, 4363, 4373, 4391, 4397, 4409, 4421, 4423, 4441, 4447, 4451, 4457, 4463, 4481, 4483, 4493, 4507, 4513, 4517, 4519, 4523, 4547, 4549, 4561, 4567, 4583, 4591, 4597, 4603, 4621, 4637, 4639, 4643, 4649, 4651, 4657, 4663, 4673, 4679, 4691, 4703, 4721, 4723, 4729, 4733, 4751, 4759, 4783, 4787, 4789, 4793, 4799, 4801, 4813, 4817, 4831, 4861, 4871, 4877, 4889, 4903, 4909, 4919, 4931, 4933, 4937, 4943, 4951, 4957, 4967, 4969, 4973, 4987, 4993, 4999, 5003, 5009, 5011, 5021, 5023, 5039, 5051, 5059, 5077, 5081, 5087, 5099, 5101, 5107, 5113, 5119, 5147, 5153, 5167, 5171, 5179, 5189, 5197, 5209, 5227, 5231, 5233, 5237, 5261, 5273, 5279, 5281, 5297, 5303, 5309, 5323, 5333, 5347, 5351, 5381, 5387, 5393, 5399, 5407, 5413, 5417, 5419, 5431, 5437, 5441, 5443, 5449, 5471, 5477, 5479, 5483, 5501, 5503, 5507, 5519, 5521, 5527, 5531, 5557, 5563, 5569, 5573, 5581, 5591, 5623, 5639, 5641, 5647, 5651, 5653, 5657, 5659, 5669, 5683, 5689, 5693, 5701, 5711, 5717, 5737, 5741, 5743, 5749, 5779, 5783, 5791, 5801, 5807, 5813, 5821, 5827, 5839, 5843, 5849, 5851, 5857, 5861, 5867, 5869, 5879, 5881, 5897, 5903, 5923, 5927, 5939, 5953, 5981, 5987, 6007, 6011, 6029, 6037, 6043, 6047, 6053, 6067, 6073, 6079, 6089, 6091, 6101, 6113, 6121, 6131, 6133, 6143, 6151, 6163, 6173, 6197, 6199, 6203, 6211, 6217, 6221, 6229, 6247, 6257, 6263, 6269, 6271, 6277, 6287, 6299, 6301, 6311, 6317, 6323, 6329, 6337, 6343, 6353, 6359, 6361, 6367, 6373, 6379, 6389, 6397, 6421, 6427, 6449, 6451, 6469, 6473, 6481, 6491, 6521, 6529, 6547, 6551, 6553, 6563, 6569, 6571, 6577, 6581, 6599, 6607, 6619, 6637, 6653, 6659, 6661, 6673, 6679, 6689, 6691, 6701, 6703, 6709, 6719, 6733, 6737, 6761, 6763, 6779, 6781, 6791, 6793, 6803, 6823, 6827, 6829, 6833, 6841, 6857, 6863, 6869, 6871, 6883, 6899, 6907, 6911, 6917, 6947, 6949, 6959, 6961, 6967, 6971, 6977, 6983, 6991, 6997, 7001, 7013, 7019, 7027, 7039, 7043, 7057, 7069, 7079, 7103, 7109, 7121, 7127, 7129, 7151, 7159, 7177, 7187, 7193, 7207, 7211, 7213, 7219, 7229, 7237, 7243, 7247, 7253, 7283, 7297, 7307, 7309, 7321, 7331, 7333, 7349, 7351, 7369, 7393, 7411, 7417, 7433, 7451, 7457, 7459, 7477, 7481, 7487, 7489, 7499, 7507, 7517, 7523, 7529, 7537, 7541, 7547, 7549, 7559, 7561, 7573, 7577, 7583, 7589, 7591, 7603, 7607, 7621, 7639, 7643, 7649, 7669, 7673, 7681, 7687, 7691, 7699, 7703, 7717, 7723, 7727, 7741, 7753, 7757, 7759, 7789, 7793, 7817, 7823, 7829, 7841, 7853, 7867, 7873, 7877, 7879, 7883, 7901, 7907, 7919, 7927, 7933, 7937, 7949, 7951, 7963, 7993, 8009, 8011, 8017, 8039, 8053, 8059, 8069, 8081, 8087, 8089, 8093, 8101, 8111, 8117, 8123, 8147, 8161, 8167, 8171, 8179, 8191, 8209, 8219, 8221, 8231, 8233, 8237, 8243, 8263, 8269, 8273, 8287, 8291, 8293, 8297, 8311, 8317, 8329, 8353, 8363, 8369, 8377, 8387, 8389, 8419, 8423, 8429, 8431, 8443, 8447, 8461, 8467, 8501, 8513, 8521, 8527, 8537, 8539, 8543, 8563, 8573, 8581, 8597, 8599, 8609, 8623, 8627, 8629, 8641, 8647, 8663, 8669, 8677, 8681, 8689, 8693, 8699, 8707, 8713, 8719, 8731, 8737, 8741, 8747, 8753, 8761, 8779, 8783, 8803, 8807, 8819, 8821, 8831, 8837, 8839, 8849, 8861, 8863, 8867, 8887, 8893, 8923, 8929, 8933, 8941, 8951, 8963, 8969, 8971, 8999, 9001, 9007, 9011, 9013, 9029, 9041, 9043, 9049, 9059, 9067, 9091, 9103, 9109, 9127, 9133, 9137, 9151, 9157, 9161, 9173, 9181, 9187, 9199, 9203, 9209, 9221, 9227, 9239, 9241, 9257, 9277, 9281, 9283, 9293, 9311, 9319, 9323, 9337, 9341, 9343, 9349, 9371, 9377, 9391, 9397, 9403, 9413, 9419, 9421, 9431, 9433, 9437, 9439, 9461, 9463, 9467, 9473, 9479, 9491, 9497, 9511, 9521, 9533, 9539, 9547, 9551, 9587, 9601, 9613, 9619, 9623, 9629, 9631, 9643, 9649, 9661, 9677, 9679, 9689, 9697, 9719, 9721, 9733, 9739, 9743, 9749, 9767, 9769, 9781, 9787, 9791, 9803, 9811, 9817, 9829, 9833, 9839, 9851, 9857, 9859, 9871, 9883, 9887, 9901, 9907, 9923, 9929, 9931, 9941, 9949, 9967, 9973, 10007, 10009, 10037, 10039, 10061, 10067, 10069, 10079, 10091, 10093, 10099, 10103, 10111, 10133, 10139, 10141, 10151, 10159, 10163, 10169, 10177, 10181, 10193, 10211, 10223, 10243, 10247, 10253, 10259, 10267, 10271, 10273, 10289, 10301, 10303, 10313, 10321, 10331, 10333, 10337, 10343, 10357, 10369, 10391, 10399, 10427, 10429, 10433, 10453, 10457, 10459, 10463, 10477, 10487, 10499, 10501, 10513, 10529, 10531, 10559, 10567, 10589, 10597, 10601, 10607, 10613, 10627, 10631, 10639, 10651, 10657, 10663, 10667, 10687, 10691, 10709, 10711, 10723, 10729, 10733, 10739, 10753, 10771, 10781, 10789, 10799, 10831, 10837, 10847, 10853, 10859, 10861, 10867, 10883, 10889, 10891, 10903, 10909, 10937, 10939, 10949, 10957, 10973, 10979, 10987, 10993, 11003, 11027, 11047, 11057, 11059, 11069, 11071, 11083, 11087, 11093, 11113, 11117, 11119, 11131, 11149, 11159, 11161, 11171, 11173, 11177, 11197, 11213, 11239, 11243, 11251, 11257, 11261, 11273, 11279, 11287, 11299, 11311, 11317, 11321, 11329, 11351, 11353, 11369, 11383, 11393, 11399, 11411, 11423, 11437, 11443, 11447, 11467, 11471, 11483, 11489, 11491, 11497, 11503, 11519, 11527, 11549, 11551, 11579, 11587, 11593, 11597, 11617, 11621, 11633, 11657, 11677, 11681, 11689, 11699, 11701, 11717, 11719, 11731, 11743, 11777, 11779, 11783, 11789, 11801, 11807, 11813, 11821, 11827, 11831, 11833, 11839, 11863, 11867, 11887, 11897, 11903, 11909, 11923, 11927, 11933, 11939, 11941, 11953, 11959, 11969, 11971, 11981, 11987, 12007, 12011, 12037, 12041, 12043, 12049, 12071, 12073, 12097, 12101, 12107, 12109, 12113, 12119, 12143, 12149, 12157, 12161, 12163, 12197, 12203, 12211, 12227, 12239, 12241, 12251, 12253, 12263, 12269, 12277, 12281, 12289, 12301, 12323, 12329, 12343, 12347, 12373, 12377, 12379, 12391, 12401, 12409, 12413, 12421, 12433, 12437, 12451, 12457, 12473, 12479, 12487, 12491, 12497, 12503, 12511, 12517, 12527, 12539, 12541, 12547, 12553, 12569, 12577, 12583, 12589, 12601, 12611, 12613, 12619, 12637, 12641, 12647, 12653, 12659, 12671, 12689, 12697, 12703, 12713, 12721, 12739, 12743, 12757, 12763, 12781, 12791, 12799, 12809, 12821, 12823, 12829, 12841, 12853, 12889, 12893, 12899, 12907, 12911, 12917, 12919, 12923, 12941, 12953, 12959, 12967, 12973, 12979, 12983, 13001, 13003, 13007, 13009, 13033, 13037, 13043, 13049, 13063, 13093, 13099, 13103, 13109, 13121, 13127, 13147, 13151, 13159, 13163, 13171, 13177, 13183, 13187, 13217, 13219, 13229, 13241, 13249, 13259, 13267, 13291, 13297, 13309, 13313, 13327, 13331, 13337, 13339, 13367, 13381, 13397, 13399, 13411, 13417, 13421, 13441, 13451, 13457, 13463, 13469, 13477, 13487, 13499, 13513, 13523, 13537, 13553, 13567, 13577, 13591, 13597, 13613, 13619, 13627, 13633, 13649, 13669, 13679, 13681, 13687, 13691, 13693, 13697, 13709, 13711, 13721, 13723, 13729, 13751, 13757, 13759, 13763, 13781, 13789, 13799, 13807, 13829, 13831, 13841, 13859, 13873, 13877, 13879, 13883, 13901, 13903, 13907, 13913, 13921, 13931, 13933, 13963, 13967, 13997, 13999, 14009, 14011, 14029, 14033, 14051, 14057, 14071, 14081, 14083, 14087, 14107, 14143, 14149, 14153, 14159, 14173, 14177, 14197, 14207, 14221, 14243, 14249, 14251, 14281, 14293, 14303, 14321, 14323, 14327, 14341, 14347, 14369, 14387, 14389, 14401, 14407, 14411, 14419, 14423, 14431, 14437, 14447, 14449, 14461, 14479, 14489, 14503, 14519, 14533, 14537, 14543, 14549, 14551, 14557, 14561, 14563, 14591, 14593, 14621, 14627, 14629, 14633, 14639, 14653, 14657, 14669, 14683, 14699, 14713, 14717, 14723, 14731, 14737, 14741, 14747, 14753, 14759, 14767, 14771, 14779, 14783, 14797, 14813, 14821, 14827, 14831, 14843, 14851, 14867, 14869, 14879, 14887, 14891, 14897, 14923, 14929, 14939, 14947, 14951, 14957, 14969, 14983, 15013, 15017, 15031, 15053, 15061, 15073, 15077, 15083, 15091, 15101, 15107, 15121, 15131, 15137, 15139, 15149, 15161, 15173, 15187, 15193, 15199, 15217, 15227, 15233, 15241, 15259, 15263, 15269, 15271, 15277, 15287, 15289, 15299, 15307, 15313, 15319, 15329, 15331, 15349, 15359, 15361, 15373, 15377, 15383, 15391, 15401, 15413, 15427, 15439, 15443, 15451, 15461, 15467, 15473, 15493, 15497, 15511, 15527, 15541, 15551, 15559, 15569, 15581, 15583, 15601, 15607, 15619, 15629, 15641, 15643, 15647, 15649, 15661, 15667, 15671, 15679, 15683, 15727, 15731, 15733, 15737, 15739, 15749, 15761, 15767, 15773, 15787, 15791, 15797, 15803, 15809, 15817, 15823, 15859, 15877, 15881, 15887, 15889, 15901, 15907, 15913, 15919, 15923, 15937, 15959, 15971, 15973, 15991, 16001, 16007, 16033, 16057, 16061, 16063, 16067, 16069, 16073, 16087, 16091, 16097, 16103, 16111, 16127, 16139, 16141, 16183, 16187, 16189, 16193, 16217, 16223, 16229, 16231, 16249, 16253, 16267, 16273, 16301, 16319, 16333, 16339, 16349, 16361, 16363, 16369, 16381, 16411, 16417, 16421, 16427, 16433, 16447, 16451, 16453, 16477, 16481, 16487, 16493, 16519, 16529, 16547, 16553, 16561, 16567, 16573, 16603, 16607, 16619, 16631, 16633, 16649, 16651, 16657, 16661, 16673, 16691, 16693, 16699, 16703, 16729, 16741, 16747, 16759, 16763, 16787, 16811, 16823, 16829, 16831, 16843, 16871, 16879, 16883, 16889, 16901, 16903, 16921, 16927, 16931, 16937, 16943, 16963, 16979, 16981, 16987, 16993, 17011, 17021, 17027, 17029, 17033, 17041, 17047, 17053, 17077, 17093, 17099, 17107, 17117, 17123, 17137, 17159, 17167, 17183, 17189, 17191, 17203, 17207, 17209, 17231, 17239, 17257, 17291, 17293, 17299, 17317, 17321, 17327, 17333, 17341, 17351, 17359, 17377, 17383, 17387, 17389, 17393, 17401, 17417, 17419, 17431, 17443, 17449, 17467, 17471, 17477, 17483, 17489, 17491, 17497, 17509, 17519, 17539, 17551, 17569, 17573, 17579, 17581, 17597, 17599, 17609, 17623, 17627, 17657, 17659, 17669, 17681, 17683, 17707, 17713, 17729, 17737, 17747, 17749, 17761, 17783, 17789, 17791, 17807, 17827, 17837, 17839, 17851, 17863, 17881, 17891, 17903, 17909, 17911, 17921, 17923, 17929, 17939, 17957, 17959, 17971, 17977, 17981, 17987, 17989, 18013, 18041, 18043, 18047, 18049, 18059, 18061, 18077, 18089, 18097, 18119, 18121, 18127, 18131, 18133, 18143, 18149, 18169, 18181, 18191, 18199, 18211, 18217, 18223, 18229, 18233, 18251, 18253, 18257, 18269, 18287, 18289, 18301, 18307, 18311, 18313, 18329, 18341, 18353, 18367, 18371, 18379, 18397, 18401, 18413, 18427, 18433, 18439, 18443, 18451, 18457, 18461, 18481, 18493, 18503, 18517, 18521, 18523, 18539, 18541, 18553, 18583, 18587, 18593, 18617, 18637, 18661, 18671, 18679, 18691, 18701, 18713, 18719, 18731, 18743, 18749, 18757, 18773, 18787, 18793, 18797, 18803, 18839, 18859, 18869, 18899, 18911, 18913, 18917, 18919, 18947, 18959, 18973, 18979, 19001, 19009, 19013, 19031, 19037, 19051, 19069, 19073, 19079, 19081, 19087, 19121, 19139, 19141, 19157, 19163, 19181, 19183, 19207, 19211, 19213, 19219, 19231, 19237, 19249, 19259, 19267, 19273, 19289, 19301, 19309, 19319, 19333, 19373, 19379, 19381, 19387, 19391, 19403, 19417, 19421, 19423, 19427, 19429, 19433, 19441, 19447, 19457, 19463, 19469, 19471, 19477, 19483, 19489, 19501, 19507, 19531, 19541, 19543, 19553, 19559, 19571, 19577, 19583, 19597, 19603, 19609, 19661, 19681, 19687, 19697, 19699, 19709, 19717, 19727, 19739, 19751, 19753, 19759, 19763, 19777, 19793, 19801, 19813, 19819, 19841, 19843, 19853, 19861, 19867, 19889, 19891, 19913, 19919, 19927, 19937, 19949, 19961, 19963, 19973, 19979, 19991, 19993, 19997, 20011, 20021, 20023, 20029, 20047, 20051, 20063, 20071, 20089, 20101, 20107, 20113, 20117, 20123, 20129, 20143, 20147, 20149, 20161, 20173, 20177, 20183, 20201, 20219, 20231, 20233, 20249, 20261, 20269, 20287, 20297, 20323, 20327, 20333, 20341, 20347, 20353, 20357, 20359, 20369, 20389, 20393, 20399, 20407, 20411, 20431, 20441, 20443, 20477, 20479, 20483, 20507, 20509, 20521, 20533, 20543, 20549, 20551, 20563, 20593, 20599, 20611, 20627, 20639, 20641, 20663, 20681, 20693, 20707, 20717, 20719, 20731, 20743, 20747, 20749, 20753, 20759, 20771, 20773, 20789, 20807, 20809, 20849, 20857, 20873, 20879, 20887, 20897, 20899, 20903, 20921, 20929, 20939, 20947, 20959, 20963, 20981, 20983, 21001, 21011, 21013, 21017, 21019, 21023, 21031, 21059, 21061, 21067, 21089, 21101, 21107, 21121, 21139, 21143, 21149, 21157, 21163, 21169, 21179, 21187, 21191, 21193, 21211, 21221, 21227, 21247, 21269, 21277, 21283, 21313, 21317, 21319, 21323, 21341, 21347, 21377, 21379, 21383, 21391, 21397, 21401, 21407, 21419, 21433, 21467, 21481, 21487, 21491, 21493, 21499, 21503, 21517, 21521, 21523, 21529, 21557, 21559, 21563, 21569, 21577, 21587, 21589, 21599, 21601, 21611, 21613, 21617, 21647, 21649, 21661, 21673, 21683, 21701, 21713, 21727, 21737, 21739, 21751, 21757, 21767, 21773, 21787, 21799, 21803, 21817, 21821, 21839, 21841, 21851, 21859, 21863, 21871, 21881, 21893, 21911, 21929, 21937, 21943, 21961, 21977, 21991, 21997, 22003, 22013, 22027, 22031, 22037, 22039, 22051, 22063, 22067, 22073, 22079, 22091, 22093, 22109, 22111, 22123, 22129, 22133, 22147, 22153, 22157, 22159, 22171, 22189, 22193, 22229, 22247, 22259, 22271, 22273, 22277, 22279, 22283, 22291, 22303, 22307, 22343, 22349, 22367, 22369, 22381, 22391, 22397, 22409, 22433, 22441, 22447, 22453, 22469, 22481, 22483, 22501, 22511, 22531, 22541, 22543, 22549, 22567, 22571, 22573, 22613, 22619, 22621, 22637, 22639, 22643, 22651, 22669, 22679, 22691, 22697, 22699, 22709, 22717, 22721, 22727, 22739, 22741, 22751, 22769, 22777, 22783, 22787, 22807, 22811, 22817, 22853, 22859, 22861, 22871, 22877, 22901, 22907, 22921, 22937, 22943, 22961, 22963, 22973, 22993, 23003, 23011, 23017, 23021, 23027, 23029, 23039, 23041, 23053, 23057, 23059, 23063, 23071, 23081, 23087, 23099, 23117, 23131, 23143, 23159, 23167, 23173, 23189, 23197, 23201, 23203, 23209, 23227, 23251, 23269, 23279, 23291, 23293, 23297, 23311, 23321, 23327, 23333, 23339, 23357, 23369, 23371, 23399, 23417, 23431, 23447, 23459, 23473, 23497, 23509, 23531, 23537, 23539, 23549, 23557, 23561, 23563, 23567, 23581, 23593, 23599, 23603, 23609, 23623, 23627, 23629, 23633, 23663, 23669, 23671, 23677, 23687, 23689, 23719, 23741, 23743, 23747, 23753, 23761, 23767, 23773, 23789, 23801, 23813, 23819, 23827, 23831, 23833, 23857, 23869, 23873, 23879, 23887, 23893, 23899, 23909, 23911, 23917, 23929, 23957, 23971, 23977, 23981, 23993, 24001, 24007, 24019, 24023, 24029, 24043, 24049, 24061, 24071, 24077, 24083, 24091, 24097, 24103, 24107, 24109, 24113, 24121, 24133, 24137, 24151, 24169, 24179, 24181, 24197, 24203, 24223, 24229, 24239, 24247, 24251, 24281, 24317, 24329, 24337, 24359, 24371, 24373, 24379, 24391, 24407, 24413, 24419, 24421, 24439, 24443, 24469, 24473, 24481, 24499, 24509, 24517, 24527, 24533, 24547, 24551, 24571, 24593, 24611, 24623, 24631, 24659, 24671, 24677, 24683, 24691, 24697, 24709, 24733, 24749, 24763, 24767, 24781, 24793, 24799, 24809, 24821, 24841, 24847, 24851, 24859, 24877, 24889, 24907, 24917, 24919, 24923, 24943, 24953, 24967, 24971, 24977, 24979, 24989, 25013, 25031, 25033, 25037, 25057, 25073, 25087, 25097, 25111, 25117, 25121, 25127, 25147, 25153, 25163, 25169, 25171, 25183, 25189, 25219, 25229, 25237, 25243, 25247, 25253, 25261, 25301, 25303, 25307, 25309, 25321, 25339, 25343, 25349, 25357, 25367, 25373, 25391, 25409, 25411, 25423, 25439, 25447, 25453, 25457, 25463, 25469, 25471, 25523, 25537, 25541, 25561, 25577, 25579, 25583, 25589, 25601, 25603, 25609, 25621, 25633, 25639, 25643, 25657, 25667, 25673, 25679, 25693, 25703, 25717, 25733, 25741, 25747, 25759, 25763, 25771, 25793, 25799, 25801, 25819, 25841, 25847, 25849, 25867, 25873, 25889, 25903, 25913, 25919, 25931, 25933, 25939, 25943, 25951, 25969, 25981, 25997, 25999, 26003, 26017, 26021, 26029, 26041, 26053, 26083, 26099, 26107, 26111, 26113, 26119, 26141, 26153, 26161, 26171, 26177, 26183, 26189, 26203, 26209, 26227, 26237, 26249, 26251, 26261, 26263, 26267, 26293, 26297, 26309, 26317, 26321, 26339, 26347, 26357, 26371, 26387, 26393, 26399, 26407, 26417, 26423, 26431, 26437, 26449, 26459, 26479, 26489, 26497, 26501, 26513, 26539, 26557, 26561, 26573, 26591, 26597, 26627, 26633, 26641, 26647, 26669, 26681, 26683, 26687, 26693, 26699, 26701, 26711, 26713, 26717, 26723, 26729, 26731, 26737, 26759, 26777, 26783, 26801, 26813, 26821, 26833, 26839, 26849, 26861, 26863, 26879, 26881, 26891, 26893, 26903, 26921, 26927, 26947, 26951, 26953, 26959, 26981, 26987, 26993, 27011, 27017, 27031, 27043, 27059, 27061, 27067, 27073, 27077, 27091, 27103, 27107, 27109, 27127, 27143, 27179, 27191, 27197, 27211, 27239, 27241, 27253, 27259, 27271, 27277, 27281, 27283, 27299, 27329, 27337, 27361, 27367, 27397, 27407, 27409, 27427, 27431, 27437, 27449, 27457, 27479, 27481, 27487, 27509, 27527, 27529, 27539, 27541, 27551, 27581, 27583, 27611, 27617, 27631, 27647, 27653, 27673, 27689, 27691, 27697, 27701, 27733, 27737, 27739, 27743, 27749, 27751, 27763, 27767, 27773, 27779, 27791, 27793, 27799, 27803, 27809, 27817, 27823, 27827, 27847, 27851, 27883, 27893, 27901, 27917, 27919, 27941, 27943, 27947, 27953, 27961, 27967, 27983, 27997, 28001, 28019, 28027, 28031, 28051, 28057, 28069, 28081, 28087, 28097, 28099, 28109, 28111, 28123, 28151, 28163, 28181, 28183, 28201, 28211, 28219, 28229, 28277, 28279, 28283, 28289, 28297, 28307, 28309, 28319, 28349, 28351, 28387, 28393, 28403, 28409, 28411, 28429, 28433, 28439, 28447, 28463, 28477, 28493, 28499, 28513, 28517, 28537, 28541, 28547, 28549, 28559, 28571, 28573, 28579, 28591, 28597, 28603, 28607, 28619, 28621, 28627, 28631, 28643, 28649, 28657, 28661, 28663, 28669, 28687, 28697, 28703, 28711, 28723, 28729, 28751, 28753, 28759, 28771, 28789, 28793, 28807, 28813, 28817, 28837, 28843, 28859, 28867, 28871, 28879, 28901, 28909, 28921, 28927, 28933, 28949, 28961, 28979, 29009, 29017, 29021, 29023, 29027, 29033, 29059, 29063, 29077, 29101, 29123, 29129, 29131, 29137, 29147, 29153, 29167, 29173, 29179, 29191, 29201, 29207, 29209, 29221, 29231, 29243, 29251, 29269, 29287, 29297, 29303, 29311, 29327, 29333, 29339, 29347, 29363, 29383, 29387, 29389, 29399, 29401, 29411, 29423, 29429, 29437, 29443, 29453, 29473, 29483, 29501, 29527, 29531, 29537, 29567, 29569, 29573, 29581, 29587, 29599, 29611, 29629, 29633, 29641, 29663, 29669, 29671, 29683, 29717, 29723, 29741, 29753, 29759, 29761, 29789, 29803, 29819, 29833, 29837, 29851, 29863, 29867, 29873, 29879, 29881, 29917, 29921, 29927, 29947, 29959, 29983, 29989, 30011, 30013, 30029, 30047, 30059, 30071, 30089, 30091, 30097, 30103, 30109, 30113, 30119, 30133, 30137, 30139, 30161, 30169, 30181, 30187, 30197, 30203, 30211, 30223, 30241, 30253, 30259, 30269, 30271, 30293, 30307, 30313, 30319, 30323, 30341, 30347, 30367, 30389, 30391, 30403, 30427, 30431, 30449, 30467, 30469, 30491, 30493, 30497, 30509, 30517, 30529, 30539, 30553, 30557, 30559, 30577, 30593, 30631, 30637, 30643, 30649, 30661, 30671, 30677, 30689, 30697, 30703, 30707, 30713, 30727, 30757, 30763, 30773, 30781, 30803, 30809, 30817, 30829, 30839, 30841, 30851, 30853, 30859, 30869, 30871, 30881, 30893, 30911, 30931, 30937, 30941, 30949, 30971, 30977, 30983, 31013, 31019, 31033, 31039, 31051, 31063, 31069, 31079, 31081, 31091, 31121, 31123, 31139, 31147, 31151, 31153, 31159, 31177, 31181, 31183, 31189, 31193, 31219, 31223, 31231, 31237, 31247, 31249, 31253, 31259, 31267, 31271, 31277, 31307, 31319, 31321, 31327, 31333, 31337, 31357, 31379, 31387, 31391, 31393, 31397, 31469, 31477, 31481, 31489, 31511, 31513, 31517, 31531, 31541, 31543, 31547, 31567, 31573, 31583, 31601, 31607, 31627, 31643, 31649, 31657, 31663, 31667, 31687, 31699, 31721, 31723, 31727, 31729, 31741, 31751, 31769, 31771, 31793, 31799, 31817, 31847, 31849, 31859, 31873, 31883, 31891, 31907, 31957, 31963, 31973, 31981, 31991, 32003, 32009, 32027, 32029, 32051, 32057, 32059, 32063, 32069, 32077, 32083, 32089, 32099, 32117, 32119, 32141, 32143, 32159, 32173, 32183, 32189, 32191, 32203, 32213, 32233, 32237, 32251, 32257, 32261, 32297, 32299, 32303, 32309, 32321, 32323, 32327, 32341, 32353, 32359, 32363, 32369, 32371, 32377, 32381, 32401, 32411, 32413, 32423, 32429, 32441, 32443, 32467, 32479, 32491, 32497, 32503, 32507, 32531, 32533, 32537, 32561, 32563, 32569, 32573, 32579, 32587, 32603, 32609, 32611, 32621, 32633, 32647, 32653, 32687, 32693, 32707, 32713, 32717, 32719, 32749, 32771, 32779, 32783, 32789, 32797, 32801, 32803, 32831, 32833, 32839, 32843, 32869, 32887, 32909, 32911, 32917, 32933, 32939, 32941, 32957, 32969, 32971, 32983, 32987, 32993, 32999, 33013, 33023, 33029, 33037, 33049, 33053, 33071, 33073, 33083, 33091, 33107, 33113, 33119, 33149, 33151, 33161, 33179, 33181, 33191, 33199, 33203, 33211, 33223, 33247, 33287, 33289, 33301, 33311, 33317, 33329, 33331, 33343, 33347, 33349, 33353, 33359, 33377, 33391, 33403, 33409, 33413, 33427, 33457, 33461, 33469, 33479, 33487, 33493, 33503, 33521, 33529, 33533, 33547, 33563, 33569, 33577, 33581, 33587, 33589, 33599, 33601, 33613, 33617, 33619, 33623, 33629, 33637, 33641, 33647, 33679, 33703, 33713, 33721, 33739, 33749, 33751, 33757, 33767, 33769, 33773, 33791, 33797, 33809, 33811, 33827, 33829, 33851, 33857, 33863, 33871, 33889, 33893, 33911, 33923, 33931, 33937, 33941, 33961, 33967, 33997, 34019, 34031, 34033, 34039, 34057, 34061, 34123, 34127, 34129, 34141, 34147, 34157, 34159, 34171, 34183, 34211, 34213, 34217, 34231, 34253, 34259, 34261, 34267, 34273, 34283, 34297, 34301, 34303, 34313, 34319, 34327, 34337, 34351, 34361, 34367, 34369, 34381, 34403, 34421, 34429, 34439, 34457, 34469, 34471, 34483, 34487, 34499, 34501, 34511, 34513, 34519, 34537, 34543, 34549, 34583, 34589, 34591, 34603, 34607, 34613, 34631, 34649, 34651, 34667, 34673, 34679, 34687, 34693, 34703, 34721, 34729, 34739, 34747, 34757, 34759, 34763, 34781, 34807, 34819, 34841, 34843, 34847, 34849, 34871, 34877, 34883, 34897, 34913, 34919, 34939, 34949, 34961, 34963, 34981, 35023, 35027, 35051, 35053, 35059, 35069, 35081, 35083, 35089, 35099, 35107, 35111, 35117, 35129, 35141, 35149, 35153, 35159, 35171, 35201, 35221, 35227, 35251, 35257, 35267, 35279, 35281, 35291, 35311, 35317, 35323, 35327, 35339, 35353, 35363, 35381, 35393, 35401, 35407, 35419, 35423, 35437, 35447, 35449, 35461, 35491, 35507, 35509, 35521, 35527, 35531, 35533, 35537, 35543, 35569, 35573, 35591, 35593, 35597, 35603, 35617, 35671, 35677, 35729, 35731, 35747, 35753, 35759, 35771, 35797, 35801, 35803, 35809, 35831, 35837, 35839, 35851, 35863, 35869, 35879, 35897, 35899, 35911, 35923, 35933, 35951, 35963, 35969, 35977, 35983, 35993, 35999, 36007, 36011, 36013, 36017, 36037, 36061, 36067, 36073, 36083, 36097, 36107, 36109, 36131, 36137, 36151, 36161, 36187, 36191, 36209, 36217, 36229, 36241, 36251, 36263, 36269, 36277, 36293, 36299, 36307, 36313, 36319, 36341, 36343, 36353, 36373, 36383, 36389, 36433, 36451, 36457, 36467, 36469, 36473, 36479, 36493, 36497, 36523, 36527, 36529, 36541, 36551, 36559, 36563, 36571, 36583, 36587, 36599, 36607, 36629, 36637, 36643, 36653, 36671, 36677, 36683, 36691, 36697, 36709, 36713, 36721, 36739, 36749, 36761, 36767, 36779, 36781, 36787, 36791, 36793, 36809, 36821, 36833, 36847, 36857, 36871, 36877, 36887, 36899, 36901, 36913, 36919, 36923, 36929, 36931, 36943, 36947, 36973, 36979, 36997, 37003, 37013, 37019, 37021, 37039, 37049, 37057, 37061, 37087, 37097, 37117, 37123, 37139, 37159, 37171, 37181, 37189, 37199, 37201, 37217, 37223, 37243, 37253, 37273, 37277, 37307, 37309, 37313, 37321, 37337, 37339, 37357, 37361, 37363, 37369, 37379, 37397, 37409, 37423, 37441, 37447, 37463, 37483, 37489, 37493, 37501, 37507, 37511, 37517, 37529, 37537, 37547, 37549, 37561, 37567, 37571, 37573, 37579, 37589, 37591, 37607, 37619, 37633, 37643, 37649, 37657, 37663, 37691, 37693, 37699, 37717, 37747, 37781, 37783, 37799, 37811, 37813, 37831, 37847, 37853, 37861, 37871, 37879, 37889, 37897, 37907, 37951, 37957, 37963, 37967, 37987, 37991, 37993, 37997, 38011, 38039, 38047, 38053, 38069, 38083, 38113, 38119, 38149, 38153, 38167, 38177, 38183, 38189, 38197, 38201, 38219, 38231, 38237, 38239, 38261, 38273, 38281, 38287, 38299, 38303, 38317, 38321, 38327, 38329, 38333, 38351, 38371, 38377, 38393, 38431, 38447, 38449, 38453, 38459, 38461, 38501, 38543, 38557, 38561, 38567, 38569, 38593, 38603, 38609, 38611, 38629, 38639, 38651, 38653, 38669, 38671, 38677, 38693, 38699, 38707, 38711, 38713, 38723, 38729, 38737, 38747, 38749, 38767, 38783, 38791, 38803, 38821, 38833, 38839, 38851, 38861, 38867, 38873, 38891, 38903, 38917, 38921, 38923, 38933, 38953, 38959, 38971, 38977, 38993, 39019, 39023, 39041, 39043, 39047, 39079, 39089, 39097, 39103, 39107, 39113, 39119, 39133, 39139, 39157, 39161, 39163, 39181, 39191, 39199, 39209, 39217, 39227, 39229, 39233, 39239, 39241, 39251, 39293, 39301, 39313, 39317, 39323, 39341, 39343, 39359, 39367, 39371, 39373, 39383, 39397, 39409, 39419, 39439, 39443, 39451, 39461, 39499, 39503, 39509, 39511, 39521, 39541, 39551, 39563, 39569, 39581, 39607, 39619, 39623, 39631, 39659, 39667, 39671, 39679, 39703, 39709, 39719, 39727, 39733, 39749, 39761, 39769, 39779, 39791, 39799, 39821, 39827, 39829, 39839, 39841, 39847, 39857, 39863, 39869, 39877, 39883, 39887, 39901, 39929, 39937, 39953, 39971, 39979, 39983, 39989, 40009, 40013, 40031, 40037, 40039, 40063, 40087, 40093, 40099, 40111, 40123, 40127, 40129, 40151, 40153, 40163, 40169, 40177, 40189, 40193, 40213, 40231, 40237, 40241, 40253, 40277, 40283, 40289, 40343, 40351, 40357, 40361, 40387, 40423, 40427, 40429, 40433, 40459, 40471, 40483, 40487, 40493, 40499, 40507, 40519, 40529, 40531, 40543, 40559, 40577, 40583, 40591, 40597, 40609, 40627, 40637, 40639, 40693, 40697, 40699, 40709, 40739, 40751, 40759, 40763, 40771, 40787, 40801, 40813, 40819, 40823, 40829, 40841, 40847, 40849, 40853, 40867, 40879, 40883, 40897, 40903, 40927, 40933, 40939, 40949, 40961, 40973, 40993, 41011, 41017, 41023, 41039, 41047, 41051, 41057, 41077, 41081, 41113, 41117, 41131, 41141, 41143, 41149, 41161, 41177, 41179, 41183, 41189, 41201, 41203, 41213, 41221, 41227, 41231, 41233, 41243, 41257, 41263, 41269, 41281, 41299, 41333, 41341, 41351, 41357, 41381, 41387, 41389, 41399, 41411, 41413, 41443, 41453, 41467, 41479, 41491, 41507, 41513, 41519, 41521, 41539, 41543, 41549, 41579, 41593, 41597, 41603, 41609, 41611, 41617, 41621, 41627, 41641, 41647, 41651, 41659, 41669, 41681, 41687, 41719, 41729, 41737, 41759, 41761, 41771, 41777, 41801, 41809, 41813, 41843, 41849, 41851, 41863, 41879, 41887, 41893, 41897, 41903, 41911, 41927, 41941, 41947, 41953, 41957, 41959, 41969, 41981, 41983, 41999, 42013, 42017, 42019, 42023, 42043, 42061, 42071, 42073, 42083, 42089, 42101, 42131, 42139, 42157, 42169, 42179, 42181, 42187, 42193, 42197, 42209, 42221, 42223, 42227, 42239, 42257, 42281, 42283, 42293, 42299, 42307, 42323, 42331, 42337, 42349, 42359, 42373, 42379, 42391, 42397, 42403, 42407, 42409, 42433, 42437, 42443, 42451, 42457, 42461, 42463, 42467, 42473, 42487, 42491, 42499, 42509, 42533, 42557, 42569, 42571, 42577, 42589, 42611, 42641, 42643, 42649, 42667, 42677, 42683, 42689, 42697, 42701, 42703, 42709, 42719, 42727, 42737, 42743, 42751, 42767, 42773, 42787, 42793, 42797, 42821, 42829, 42839, 42841, 42853, 42859, 42863, 42899, 42901, 42923, 42929, 42937, 42943, 42953, 42961, 42967, 42979, 42989, 43003, 43013, 43019, 43037, 43049, 43051, 43063, 43067, 43093, 43103, 43117, 43133, 43151, 43159, 43177, 43189, 43201, 43207, 43223, 43237, 43261, 43271, 43283, 43291, 43313, 43319, 43321, 43331, 43391, 43397, 43399, 43403, 43411, 43427, 43441, 43451, 43457, 43481, 43487, 43499, 43517, 43541, 43543, 43573, 43577, 43579, 43591, 43597, 43607, 43609, 43613, 43627, 43633, 43649, 43651, 43661, 43669, 43691, 43711, 43717, 43721, 43753, 43759, 43777, 43781, 43783, 43787, 43789, 43793, 43801, 43853, 43867, 43889, 43891, 43913, 43933, 43943, 43951, 43961, 43963, 43969, 43973, 43987, 43991, 43997, 44017, 44021, 44027, 44029, 44041, 44053, 44059, 44071, 44087, 44089, 44101, 44111, 44119, 44123, 44129, 44131, 44159, 44171, 44179, 44189, 44201, 44203, 44207, 44221, 44249, 44257, 44263, 44267, 44269, 44273, 44279, 44281, 44293, 44351, 44357, 44371, 44381, 44383, 44389, 44417, 44449, 44453, 44483, 44491, 44497, 44501, 44507, 44519, 44531, 44533, 44537, 44543, 44549, 44563, 44579, 44587, 44617, 44621, 44623, 44633, 44641, 44647, 44651, 44657, 44683, 44687, 44699, 44701, 44711, 44729, 44741, 44753, 44771, 44773, 44777, 44789, 44797, 44809, 44819, 44839, 44843, 44851, 44867, 44879, 44887, 44893, 44909, 44917, 44927, 44939, 44953, 44959, 44963, 44971, 44983, 44987, 45007, 45013, 45053, 45061, 45077, 45083, 45119, 45121, 45127, 45131, 45137, 45139, 45161, 45179, 45181, 45191, 45197, 45233, 45247, 45259, 45263, 45281, 45289, 45293, 45307, 45317, 45319, 45329, 45337, 45341, 45343, 45361, 45377, 45389, 45403, 45413, 45427, 45433, 45439, 45481, 45491, 45497, 45503, 45523, 45533, 45541, 45553, 45557, 45569, 45587, 45589, 45599, 45613, 45631, 45641, 45659, 45667, 45673, 45677, 45691, 45697, 45707, 45737, 45751, 45757, 45763, 45767, 45779, 45817, 45821, 45823, 45827, 45833, 45841, 45853, 45863, 45869, 45887, 45893, 45943, 45949, 45953, 45959, 45971, 45979, 45989, 46021, 46027, 46049, 46051, 46061, 46073, 46091, 46093, 46099, 46103, 46133, 46141, 46147, 46153, 46171, 46181, 46183, 46187, 46199, 46219, 46229, 46237, 46261, 46271, 46273, 46279, 46301, 46307, 46309, 46327, 46337, 46349, 46351, 46381, 46399, 46411, 46439, 46441, 46447, 46451, 46457, 46471, 46477, 46489, 46499, 46507, 46511, 46523, 46549, 46559, 46567, 46573, 46589, 46591, 46601, 46619, 46633, 46639, 46643, 46649, 46663, 46679, 46681, 46687, 46691, 46703, 46723, 46727, 46747, 46751, 46757, 46769, 46771, 46807, 46811, 46817, 46819, 46829, 46831, 46853, 46861, 46867, 46877, 46889, 46901, 46919, 46933, 46957, 46993, 46997, 47017, 47041, 47051, 47057, 47059, 47087, 47093, 47111, 47119, 47123, 47129, 47137, 47143, 47147, 47149, 47161, 47189, 47207, 47221, 47237, 47251, 47269, 47279, 47287, 47293, 47297, 47303, 47309, 47317, 47339, 47351, 47353, 47363, 47381, 47387, 47389, 47407, 47417, 47419, 47431, 47441, 47459, 47491, 47497, 47501, 47507, 47513, 47521, 47527, 47533, 47543, 47563, 47569, 47581, 47591, 47599, 47609, 47623, 47629, 47639, 47653, 47657, 47659, 47681, 47699, 47701, 47711, 47713, 47717, 47737, 47741, 47743, 47777, 47779, 47791, 47797, 47807, 47809, 47819, 47837, 47843, 47857, 47869, 47881, 47903, 47911, 47917, 47933, 47939, 47947, 47951, 47963, 47969, 47977, 47981, 48017, 48023, 48029, 48049, 48073, 48079, 48091, 48109, 48119, 48121, 48131, 48157, 48163, 48179, 48187, 48193, 48197, 48221, 48239, 48247, 48259, 48271, 48281, 48299, 48311, 48313, 48337, 48341, 48353, 48371, 48383, 48397, 48407, 48409, 48413, 48437, 48449, 48463, 48473, 48479, 48481, 48487, 48491, 48497, 48523, 48527, 48533, 48539, 48541, 48563, 48571, 48589, 48593, 48611, 48619, 48623, 48647, 48649, 48661, 48673, 48677, 48679, 48731, 48733, 48751, 48757, 48761, 48767, 48779, 48781, 48787, 48799, 48809, 48817, 48821, 48823, 48847, 48857, 48859, 48869, 48871, 48883, 48889, 48907, 48947, 48953, 48973, 48989, 48991, 49003, 49009, 49019, 49031, 49033, 49037, 49043, 49057, 49069, 49081, 49103, 49109, 49117, 49121, 49123, 49139, 49157, 49169, 49171, 49177, 49193, 49199, 49201, 49207, 49211, 49223, 49253, 49261, 49277, 49279, 49297, 49307, 49331, 49333, 49339, 49363, 49367, 49369, 49391, 49393, 49409, 49411, 49417, 49429, 49433, 49451, 49459, 49463, 49477, 49481, 49499, 49523, 49529, 49531, 49537, 49547, 49549, 49559, 49597, 49603, 49613, 49627, 49633, 49639, 49663, 49667, 49669, 49681, 49697, 49711, 49727, 49739, 49741, 49747, 49757, 49783, 49787, 49789, 49801, 49807, 49811, 49823, 49831, 49843, 49853, 49871, 49877, 49891, 49919, 49921, 49927, 49937, 49939, 49943, 49957, 49991, 49993, 49999, 50021, 50023, 50033, 50047, 50051, 50053, 50069, 50077, 50087, 50093, 50101, 50111, 50119, 50123, 50129, 50131, 50147, 50153, 50159, 50177, 50207, 50221, 50227, 50231, 50261, 50263, 50273, 50287, 50291, 50311, 50321, 50329, 50333, 50341, 50359, 50363, 50377, 50383, 50387, 50411, 50417, 50423, 50441, 50459, 50461, 50497, 50503, 50513, 50527, 50539, 50543, 50549, 50551, 50581, 50587, 50591, 50593, 50599, 50627, 50647, 50651, 50671, 50683, 50707, 50723, 50741, 50753, 50767, 50773, 50777, 50789, 50821, 50833, 50839, 50849, 50857, 50867, 50873, 50891, 50893, 50909, 50923, 50929, 50951, 50957, 50969, 50971, 50989, 50993, 51001, 51031, 51043, 51047, 51059, 51061, 51071, 51109, 51131, 51133, 51137, 51151, 51157, 51169, 51193, 51197, 51199, 51203, 51217, 51229, 51239, 51241, 51257, 51263, 51283, 51287, 51307, 51329, 51341, 51343, 51347, 51349, 51361, 51383, 51407, 51413, 51419, 51421, 51427, 51431, 51437, 51439, 51449, 51461, 51473, 51479, 51481, 51487, 51503, 51511, 51517, 51521, 51539, 51551, 51563, 51577, 51581, 51593, 51599, 51607, 51613, 51631, 51637, 51647, 51659, 51673, 51679, 51683, 51691, 51713, 51719, 51721, 51749, 51767, 51769, 51787, 51797, 51803, 51817, 51827, 51829, 51839, 51853, 51859, 51869, 51871, 51893, 51899, 51907, 51913, 51929, 51941, 51949, 51971, 51973, 51977, 51991, 52009, 52021, 52027, 52051, 52057, 52067, 52069, 52081, 52103, 52121, 52127, 52147, 52153, 52163, 52177, 52181, 52183, 52189, 52201, 52223, 52237, 52249, 52253, 52259, 52267, 52289, 52291, 52301, 52313, 52321, 52361, 52363, 52369, 52379, 52387, 52391, 52433, 52453, 52457, 52489, 52501, 52511, 52517, 52529, 52541, 52543, 52553, 52561, 52567, 52571, 52579, 52583, 52609, 52627, 52631, 52639, 52667, 52673, 52691, 52697, 52709, 52711, 52721, 52727, 52733, 52747, 52757, 52769, 52783, 52807, 52813, 52817, 52837, 52859, 52861, 52879, 52883, 52889, 52901, 52903, 52919, 52937, 52951, 52957, 52963, 52967, 52973, 52981, 52999, 53003, 53017, 53047, 53051, 53069, 53077, 53087, 53089, 53093, 53101, 53113, 53117, 53129, 53147, 53149, 53161, 53171, 53173, 53189, 53197, 53201, 53231, 53233, 53239, 53267, 53269, 53279, 53281, 53299, 53309, 53323, 53327, 53353, 53359, 53377, 53381, 53401, 53407, 53411, 53419, 53437, 53441, 53453, 53479, 53503, 53507, 53527, 53549, 53551, 53569, 53591, 53593, 53597, 53609, 53611, 53617, 53623, 53629, 53633, 53639, 53653, 53657, 53681, 53693, 53699, 53717, 53719, 53731, 53759, 53773, 53777, 53783, 53791, 53813, 53819, 53831, 53849, 53857, 53861, 53881, 53887, 53891, 53897, 53899, 53917, 53923, 53927, 53939, 53951, 53959, 53987, 53993, 54001, 54011, 54013, 54037, 54049, 54059, 54083, 54091, 54101, 54121, 54133, 54139, 54151, 54163, 54167, 54181, 54193, 54217, 54251, 54269, 54277, 54287, 54293, 54311, 54319, 54323, 54331, 54347, 54361, 54367, 54371, 54377, 54401, 54403, 54409, 54413, 54419, 54421, 54437, 54443, 54449, 54469, 54493, 54497, 54499, 54503, 54517, 54521, 54539, 54541, 54547, 54559, 54563, 54577, 54581, 54583, 54601, 54617, 54623, 54629, 54631, 54647, 54667, 54673, 54679, 54709, 54713, 54721, 54727, 54751, 54767, 54773, 54779, 54787, 54799, 54829, 54833, 54851, 54869, 54877, 54881, 54907, 54917, 54919, 54941, 54949, 54959, 54973, 54979, 54983, 55001, 55009, 55021, 55049, 55051, 55057, 55061, 55073, 55079, 55103, 55109, 55117, 55127, 55147, 55163, 55171, 55201, 55207, 55213, 55217, 55219, 55229, 55243, 55249, 55259, 55291, 55313, 55331, 55333, 55337, 55339, 55343, 55351, 55373, 55381, 55399, 55411, 55439, 55441, 55457, 55469, 55487, 55501, 55511, 55529, 55541, 55547, 55579, 55589, 55603, 55609, 55619, 55621, 55631, 55633, 55639, 55661, 55663, 55667, 55673, 55681, 55691, 55697, 55711, 55717, 55721, 55733, 55763, 55787, 55793, 55799, 55807, 55813, 55817, 55819, 55823, 55829, 55837, 55843, 55849, 55871, 55889, 55897, 55901, 55903, 55921, 55927, 55931, 55933, 55949, 55967, 55987, 55997, 56003, 56009, 56039, 56041, 56053, 56081, 56087, 56093, 56099, 56101, 56113, 56123, 56131, 56149, 56167, 56171, 56179, 56197, 56207, 56209, 56237, 56239, 56249, 56263, 56267, 56269, 56299, 56311, 56333, 56359, 56369, 56377, 56383, 56393, 56401, 56417, 56431, 56437, 56443, 56453, 56467, 56473, 56477, 56479, 56489, 56501, 56503, 56509, 56519, 56527, 56531, 56533, 56543, 56569, 56591, 56597, 56599, 56611, 56629, 56633, 56659, 56663, 56671, 56681, 56687, 56701, 56711, 56713, 56731, 56737, 56747, 56767, 56773, 56779, 56783, 56807, 56809, 56813, 56821, 56827, 56843, 56857, 56873, 56891, 56893, 56897, 56909, 56911, 56921, 56923, 56929, 56941, 56951, 56957, 56963, 56983, 56989, 56993, 56999, 57037, 57041, 57047, 57059, 57073, 57077, 57089, 57097, 57107, 57119, 57131, 57139, 57143, 57149, 57163, 57173, 57179, 57191, 57193, 57203, 57221, 57223, 57241, 57251, 57259, 57269, 57271, 57283, 57287, 57301, 57329, 57331, 57347, 57349, 57367, 57373, 57383, 57389, 57397, 57413, 57427, 57457, 57467, 57487, 57493, 57503, 57527, 57529, 57557, 57559, 57571, 57587, 57593, 57601, 57637, 57641, 57649, 57653, 57667, 57679, 57689, 57697, 57709, 57713, 57719, 57727, 57731, 57737, 57751, 57773, 57781, 57787, 57791, 57793, 57803, 57809, 57829, 57839, 57847, 57853, 57859, 57881, 57899, 57901, 57917, 57923, 57943, 57947, 57973, 57977, 57991, 58013, 58027, 58031, 58043, 58049, 58057, 58061, 58067, 58073, 58099, 58109, 58111, 58129, 58147, 58151, 58153, 58169, 58171, 58189, 58193, 58199, 58207, 58211, 58217, 58229, 58231, 58237, 58243, 58271, 58309, 58313, 58321, 58337, 58363, 58367, 58369, 58379, 58391, 58393, 58403, 58411, 58417, 58427, 58439, 58441, 58451, 58453, 58477, 58481, 58511, 58537, 58543, 58549, 58567, 58573, 58579, 58601, 58603, 58613, 58631, 58657, 58661, 58679, 58687, 58693, 58699, 58711, 58727, 58733, 58741, 58757, 58763, 58771, 58787, 58789, 58831, 58889, 58897, 58901, 58907, 58909, 58913, 58921, 58937, 58943, 58963, 58967, 58979, 58991, 58997, 59009, 59011, 59021, 59023, 59029, 59051, 59053, 59063, 59069, 59077, 59083, 59093, 59107, 59113, 59119, 59123, 59141, 59149, 59159, 59167, 59183, 59197, 59207, 59209, 59219, 59221, 59233, 59239, 59243, 59263, 59273, 59281, 59333, 59341, 59351, 59357, 59359, 59369, 59377, 59387, 59393, 59399, 59407, 59417, 59419, 59441, 59443, 59447, 59453, 59467, 59471, 59473, 59497, 59509, 59513, 59539, 59557, 59561, 59567, 59581, 59611, 59617, 59621, 59627, 59629, 59651, 59659, 59663, 59669, 59671, 59693, 59699, 59707, 59723, 59729, 59743, 59747, 59753, 59771, 59779, 59791, 59797, 59809, 59833, 59863, 59879, 59887, 59921, 59929, 59951, 59957, 59971, 59981, 59999, 60013, 60017, 60029, 60037, 60041, 60077, 60083, 60089, 60091, 60101, 60103, 60107, 60127, 60133, 60139, 60149, 60161, 60167, 60169, 60209, 60217, 60223, 60251, 60257, 60259, 60271, 60289, 60293, 60317, 60331, 60337, 60343, 60353, 60373, 60383, 60397, 60413, 60427, 60443, 60449, 60457, 60493, 60497, 60509, 60521, 60527, 60539, 60589, 60601, 60607, 60611, 60617, 60623, 60631, 60637, 60647, 60649, 60659, 60661, 60679, 60689, 60703, 60719, 60727, 60733, 60737, 60757, 60761, 60763, 60773, 60779, 60793, 60811, 60821, 60859, 60869, 60887, 60889, 60899, 60901, 60913, 60917, 60919, 60923, 60937, 60943, 60953, 60961, 61001, 61007, 61027, 61031, 61043, 61051, 61057, 61091, 61099, 61121, 61129, 61141, 61151, 61153, 61169, 61211, 61223, 61231, 61253, 61261, 61283, 61291, 61297, 61331, 61333, 61339, 61343, 61357, 61363, 61379, 61381, 61403, 61409, 61417, 61441, 61463, 61469, 61471, 61483, 61487, 61493, 61507, 61511, 61519, 61543, 61547, 61553, 61559, 61561, 61583, 61603, 61609, 61613, 61627, 61631, 61637, 61643, 61651, 61657, 61667, 61673, 61681, 61687, 61703, 61717, 61723, 61729, 61751, 61757, 61781, 61813, 61819, 61837, 61843, 61861, 61871, 61879, 61909, 61927, 61933, 61949, 61961, 61967, 61979, 61981, 61987, 61991, 62003, 62011, 62017, 62039, 62047, 62053, 62057, 62071, 62081, 62099, 62119, 62129, 62131, 62137, 62141, 62143, 62171, 62189, 62191, 62201, 62207, 62213, 62219, 62233, 62273, 62297, 62299, 62303, 62311, 62323, 62327, 62347, 62351, 62383, 62401, 62417, 62423, 62459, 62467, 62473, 62477, 62483, 62497, 62501, 62507, 62533, 62539, 62549, 62563, 62581, 62591, 62597, 62603, 62617, 62627, 62633, 62639, 62653, 62659, 62683, 62687, 62701, 62723, 62731, 62743, 62753, 62761, 62773, 62791, 62801, 62819, 62827, 62851, 62861, 62869, 62873, 62897, 62903, 62921, 62927, 62929, 62939, 62969, 62971, 62981, 62983, 62987, 62989, 63029, 63031, 63059, 63067, 63073, 63079, 63097, 63103, 63113, 63127, 63131, 63149, 63179, 63197, 63199, 63211, 63241, 63247, 63277, 63281, 63299, 63311, 63313, 63317, 63331, 63337, 63347, 63353, 63361, 63367, 63377, 63389, 63391, 63397, 63409, 63419, 63421, 63439, 63443, 63463, 63467, 63473, 63487, 63493, 63499, 63521, 63527, 63533, 63541, 63559, 63577, 63587, 63589, 63599, 63601, 63607, 63611, 63617, 63629, 63647, 63649, 63659, 63667, 63671, 63689, 63691, 63697, 63703, 63709, 63719, 63727, 63737, 63743, 63761, 63773, 63781, 63793, 63799, 63803, 63809, 63823, 63839, 63841, 63853, 63857, 63863, 63901, 63907, 63913, 63929, 63949, 63977, 63997, 64007, 64013, 64019, 64033, 64037, 64063, 64067, 64081, 64091, 64109, 64123, 64151, 64153, 64157, 64171, 64187, 64189, 64217, 64223, 64231, 64237, 64271, 64279, 64283, 64301, 64303, 64319, 64327, 64333, 64373, 64381, 64399, 64403, 64433, 64439, 64451, 64453, 64483, 64489, 64499, 64513, 64553, 64567, 64577, 64579, 64591, 64601, 64609, 64613, 64621, 64627, 64633, 64661, 64663, 64667, 64679, 64693, 64709, 64717, 64747, 64763, 64781, 64783, 64793, 64811, 64817, 64849, 64853, 64871, 64877, 64879, 64891, 64901, 64919, 64921, 64927, 64937, 64951, 64969, 64997, 65003, 65011, 65027, 65029, 65033, 65053, 65063, 65071, 65089, 65099, 65101, 65111, 65119, 65123, 65129, 65141, 65147, 65167, 65171, 65173, 65179, 65183, 65203, 65213, 65239, 65257, 65267, 65269, 65287, 65293, 65309, 65323, 65327, 65353, 65357, 65371, 65381, 65393, 65407, 65413, 65419, 65423, 65437, 65447, 65449, 65479, 65497, 65519, 65521, 65537, 65539, 65543, 65551, 65557, 65563, 65579, 65581, 65587, 65599, 65609, 65617, 65629, 65633, 65647, 65651, 65657, 65677, 65687, 65699, 65701, 65707, 65713, 65717, 65719, 65729, 65731, 65761, 65777, 65789, 65809, 65827, 65831, 65837, 65839, 65843, 65851, 65867, 65881, 65899, 65921, 65927, 65929, 65951, 65957, 65963, 65981, 65983, 65993, 66029, 66037, 66041, 66047, 66067, 66071, 66083, 66089, 66103, 66107, 66109, 66137, 66161, 66169, 66173, 66179, 66191, 66221, 66239, 66271, 66293, 66301, 66337, 66343, 66347, 66359, 66361, 66373, 66377, 66383, 66403, 66413, 66431, 66449, 66457, 66463, 66467, 66491, 66499, 66509, 66523, 66529, 66533, 66541, 66553, 66569, 66571, 66587, 66593, 66601, 66617, 66629, 66643, 66653, 66683, 66697, 66701, 66713, 66721, 66733, 66739, 66749, 66751, 66763, 66791, 66797, 66809, 66821, 66841, 66851, 66853, 66863, 66877, 66883, 66889, 66919, 66923, 66931, 66943, 66947, 66949, 66959, 66973, 66977, 67003, 67021, 67033, 67043, 67049, 67057, 67061, 67073, 67079, 67103, 67121, 67129, 67139, 67141, 67153, 67157, 67169, 67181, 67187, 67189, 67211, 67213, 67217, 67219, 67231, 67247, 67261, 67271, 67273, 67289, 67307, 67339, 67343, 67349, 67369, 67391, 67399, 67409, 67411, 67421, 67427, 67429, 67433, 67447, 67453, 67477, 67481, 67489, 67493, 67499, 67511, 67523, 67531, 67537, 67547, 67559, 67567, 67577, 67579, 67589, 67601, 67607, 67619, 67631, 67651, 67679, 67699, 67709, 67723, 67733, 67741, 67751, 67757, 67759, 67763, 67777, 67783, 67789, 67801, 67807, 67819, 67829, 67843, 67853, 67867, 67883, 67891, 67901, 67927, 67931, 67933, 67939, 67943, 67957, 67961, 67967, 67979, 67987, 67993, 68023, 68041, 68053, 68059, 68071, 68087, 68099, 68111, 68113, 68141, 68147, 68161, 68171, 68207, 68209, 68213, 68219, 68227, 68239, 68261, 68279, 68281, 68311, 68329, 68351, 68371, 68389, 68399, 68437, 68443, 68447, 68449, 68473, 68477, 68483, 68489, 68491, 68501, 68507, 68521, 68531, 68539, 68543, 68567, 68581, 68597, 68611, 68633, 68639, 68659, 68669, 68683, 68687, 68699, 68711, 68713, 68729, 68737, 68743, 68749, 68767, 68771, 68777, 68791, 68813, 68819, 68821, 68863, 68879, 68881, 68891, 68897, 68899, 68903, 68909, 68917, 68927, 68947, 68963, 68993, 69001, 69011, 69019, 69029, 69031, 69061, 69067, 69073, 69109, 69119, 69127, 69143, 69149, 69151, 69163, 69191, 69193, 69197, 69203, 69221, 69233, 69239, 69247, 69257, 69259, 69263, 69313, 69317, 69337, 69341, 69371, 69379, 69383, 69389, 69401, 69403, 69427, 69431, 69439, 69457, 69463, 69467, 69473, 69481, 69491, 69493, 69497, 69499, 69539, 69557, 69593, 69623, 69653, 69661, 69677, 69691, 69697, 69709, 69737, 69739, 69761, 69763, 69767, 69779, 69809, 69821, 69827, 69829, 69833, 69847, 69857, 69859, 69877, 69899, 69911, 69929, 69931, 69941, 69959, 69991, 69997, 70001, 70003, 70009, 70019, 70039, 70051, 70061, 70067, 70079, 70099, 70111, 70117, 70121, 70123, 70139, 70141, 70157, 70163, 70177, 70181, 70183, 70199, 70201, 70207, 70223, 70229, 70237, 70241, 70249, 70271, 70289, 70297, 70309, 70313, 70321, 70327, 70351, 70373, 70379, 70381, 70393, 70423, 70429, 70439, 70451, 70457, 70459, 70481, 70487, 70489, 70501, 70507, 70529, 70537, 70549, 70571, 70573, 70583, 70589, 70607, 70619, 70621, 70627, 70639, 70657, 70663, 70667, 70687, 70709, 70717, 70729, 70753, 70769, 70783, 70793, 70823, 70841, 70843, 70849, 70853, 70867, 70877, 70879, 70891, 70901, 70913, 70919, 70921, 70937, 70949, 70951, 70957, 70969, 70979, 70981, 70991, 70997, 70999, 71011, 71023, 71039, 71059, 71069, 71081, 71089, 71119, 71129, 71143, 71147, 71153, 71161, 71167, 71171, 71191, 71209, 71233, 71237, 71249, 71257, 71261, 71263, 71287, 71293, 71317, 71327, 71329, 71333, 71339, 71341, 71347, 71353, 71359, 71363, 71387, 71389, 71399, 71411, 71413, 71419, 71429, 71437, 71443, 71453, 71471, 71473, 71479, 71483, 71503, 71527, 71537, 71549, 71551, 71563, 71569, 71593, 71597, 71633, 71647, 71663, 71671, 71693, 71699, 71707, 71711, 71713, 71719, 71741, 71761, 71777, 71789, 71807, 71809, 71821, 71837, 71843, 71849, 71861, 71867, 71879, 71881, 71887, 71899, 71909, 71917, 71933, 71941, 71947, 71963, 71971, 71983, 71987, 71993, 71999, 72019, 72031, 72043, 72047, 72053, 72073, 72077, 72089, 72091, 72101, 72103, 72109, 72139, 72161, 72167, 72169, 72173, 72211, 72221, 72223, 72227, 72229, 72251, 72253, 72269, 72271, 72277, 72287, 72307, 72313, 72337, 72341, 72353, 72367, 72379, 72383, 72421, 72431, 72461, 72467, 72469, 72481, 72493, 72497, 72503, 72533, 72547, 72551, 72559, 72577, 72613, 72617, 72623, 72643, 72647, 72649, 72661, 72671, 72673, 72679, 72689, 72701, 72707, 72719, 72727, 72733, 72739, 72763, 72767, 72797, 72817, 72823, 72859, 72869, 72871, 72883, 72889, 72893, 72901, 72907, 72911, 72923, 72931, 72937, 72949, 72953, 72959, 72973, 72977, 72997, 73009, 73013, 73019, 73037, 73039, 73043, 73061, 73063, 73079, 73091, 73121, 73127, 73133, 73141, 73181, 73189, 73237, 73243, 73259, 73277, 73291, 73303, 73309, 73327, 73331, 73351, 73361, 73363, 73369, 73379, 73387, 73417, 73421, 73433, 73453, 73459, 73471, 73477, 73483, 73517, 73523, 73529, 73547, 73553, 73561, 73571, 73583, 73589, 73597, 73607, 73609, 73613, 73637, 73643, 73651, 73673, 73679, 73681, 73693, 73699, 73709, 73721, 73727, 73751, 73757, 73771, 73783, 73819, 73823, 73847, 73849, 73859, 73867, 73877, 73883, 73897, 73907, 73939, 73943, 73951, 73961, 73973, 73999, 74017, 74021, 74027, 74047, 74051, 74071, 74077, 74093, 74099, 74101, 74131, 74143, 74149, 74159, 74161, 74167, 74177, 74189, 74197, 74201, 74203, 74209, 74219, 74231, 74257, 74279, 74287, 74293, 74297, 74311, 74317, 74323, 74353, 74357, 74363, 74377, 74381, 74383, 74411, 74413, 74419, 74441, 74449, 74453, 74471, 74489, 74507, 74509, 74521, 74527, 74531, 74551, 74561, 74567, 74573, 74587, 74597, 74609, 74611, 74623, 74653, 74687, 74699, 74707, 74713, 74717, 74719, 74729, 74731, 74747, 74759, 74761, 74771, 74779, 74797, 74821, 74827, 74831, 74843, 74857, 74861, 74869, 74873, 74887, 74891, 74897, 74903, 74923, 74929, 74933, 74941, 74959, 75011, 75013, 75017, 75029, 75037, 75041, 75079, 75083, 75109, 75133, 75149, 75161, 75167, 75169, 75181, 75193, 75209, 75211, 75217, 75223, 75227, 75239, 75253, 75269, 75277, 75289, 75307, 75323, 75329, 75337, 75347, 75353, 75367, 75377, 75389, 75391, 75401, 75403, 75407, 75431, 75437, 75479, 75503, 75511, 75521, 75527, 75533, 75539, 75541, 75553, 75557, 75571, 75577, 75583, 75611, 75617, 75619, 75629, 75641, 75653, 75659, 75679, 75683, 75689, 75703, 75707, 75709, 75721, 75731, 75743, 75767, 75773, 75781, 75787, 75793, 75797, 75821, 75833, 75853, 75869, 75883, 75913, 75931, 75937, 75941, 75967, 75979, 75983, 75989, 75991, 75997, 76001, 76003, 76031, 76039, 76079, 76081, 76091, 76099, 76103, 76123, 76129, 76147, 76157, 76159, 76163, 76207, 76213, 76231, 76243, 76249, 76253, 76259, 76261, 76283, 76289, 76303, 76333, 76343, 76367, 76369, 76379, 76387, 76403, 76421, 76423, 76441, 76463, 76471, 76481, 76487, 76493, 76507, 76511, 76519, 76537, 76541, 76543, 76561, 76579, 76597, 76603, 76607, 76631, 76649, 76651, 76667, 76673, 76679, 76697, 76717, 76733, 76753, 76757, 76771, 76777, 76781, 76801, 76819, 76829, 76831, 76837, 76847, 76871, 76873, 76883, 76907, 76913, 76919, 76943, 76949, 76961, 76963, 76991, 77003, 77017, 77023, 77029, 77041, 77047, 77069, 77081, 77093, 77101, 77137, 77141, 77153, 77167, 77171, 77191, 77201, 77213, 77237, 77239, 77243, 77249, 77261, 77263, 77267, 77269, 77279, 77291, 77317, 77323, 77339, 77347, 77351, 77359, 77369, 77377, 77383, 77417, 77419, 77431, 77447, 77471, 77477, 77479, 77489, 77491, 77509, 77513, 77521, 77527, 77543, 77549, 77551, 77557, 77563, 77569, 77573, 77587, 77591, 77611, 77617, 77621, 77641, 77647, 77659, 77681, 77687, 77689, 77699, 77711, 77713, 77719, 77723, 77731, 77743, 77747, 77761, 77773, 77783, 77797, 77801, 77813, 77839, 77849, 77863, 77867, 77893, 77899, 77929, 77933, 77951, 77969, 77977, 77983, 77999, 78007, 78017, 78031, 78041, 78049, 78059, 78079, 78101, 78121, 78137, 78139, 78157, 78163, 78167, 78173, 78179, 78191, 78193, 78203, 78229, 78233, 78241, 78259, 78277, 78283, 78301, 78307, 78311, 78317, 78341, 78347, 78367, 78401, 78427, 78437, 78439, 78467, 78479, 78487, 78497, 78509, 78511, 78517, 78539, 78541, 78553, 78569, 78571, 78577, 78583, 78593, 78607, 78623, 78643, 78649, 78653, 78691, 78697, 78707, 78713, 78721, 78737, 78779, 78781, 78787, 78791, 78797, 78803, 78809, 78823, 78839, 78853, 78857, 78877, 78887, 78889, 78893, 78901, 78919, 78929, 78941, 78977, 78979, 78989, 79031, 79039, 79043, 79063, 79087, 79103, 79111, 79133, 79139, 79147, 79151, 79153, 79159, 79181, 79187, 79193, 79201, 79229, 79231, 79241, 79259, 79273, 79279, 79283, 79301, 79309, 79319, 79333, 79337, 79349, 79357, 79367, 79379, 79393, 79397, 79399, 79411, 79423, 79427, 79433, 79451, 79481, 79493, 79531, 79537, 79549, 79559, 79561, 79579, 79589, 79601, 79609, 79613, 79621, 79627, 79631, 79633, 79657, 79669, 79687, 79691, 79693, 79697, 79699, 79757, 79769, 79777, 79801, 79811, 79813, 79817, 79823, 79829, 79841, 79843, 79847, 79861, 79867, 79873, 79889, 79901, 79903, 79907, 79939, 79943, 79967, 79973, 79979, 79987, 79997, 79999, 80021, 80039, 80051, 80071, 80077, 80107, 80111, 80141, 80147, 80149, 80153, 80167, 80173, 80177, 80191, 80207, 80209, 80221, 80231, 80233, 80239, 80251, 80263, 80273, 80279, 80287, 80309, 80317, 80329, 80341, 80347, 80363, 80369, 80387, 80407, 80429, 80447, 80449, 80471, 80473, 80489, 80491, 80513, 80527, 80537, 80557, 80567, 80599, 80603, 80611, 80621, 80627, 80629, 80651, 80657, 80669, 80671, 80677, 80681, 80683, 80687, 80701, 80713, 80737, 80747, 80749, 80761, 80777, 80779, 80783, 80789, 80803, 80809, 80819, 80831, 80833, 80849, 80863, 80897, 80909, 80911, 80917, 80923, 80929, 80933, 80953, 80963, 80989, 81001, 81013, 81017, 81019, 81023, 81031, 81041, 81043, 81047, 81049, 81071, 81077, 81083, 81097, 81101, 81119, 81131, 81157, 81163, 81173, 81181, 81197, 81199, 81203, 81223, 81233, 81239, 81281, 81283, 81293, 81299, 81307, 81331, 81343, 81349, 81353, 81359, 81371, 81373, 81401, 81409, 81421, 81439, 81457, 81463, 81509, 81517, 81527, 81533, 81547, 81551, 81553, 81559, 81563, 81569, 81611, 81619, 81629, 81637, 81647, 81649, 81667, 81671, 81677, 81689, 81701, 81703, 81707, 81727, 81737, 81749, 81761, 81769, 81773, 81799, 81817, 81839, 81847, 81853, 81869, 81883, 81899, 81901, 81919, 81929, 81931, 81937, 81943, 81953, 81967, 81971, 81973, 82003, 82007, 82009, 82013, 82021, 82031, 82037, 82039, 82051, 82067, 82073, 82129, 82139, 82141, 82153, 82163, 82171, 82183, 82189, 82193, 82207, 82217, 82219, 82223, 82231, 82237, 82241, 82261, 82267, 82279, 82301, 82307, 82339, 82349, 82351, 82361, 82373, 82387, 82393, 82421, 82457, 82463, 82469, 82471, 82483, 82487, 82493, 82499, 82507, 82529, 82531, 82549, 82559, 82561, 82567, 82571, 82591, 82601, 82609, 82613, 82619, 82633, 82651, 82657, 82699, 82721, 82723, 82727, 82729, 82757, 82759, 82763, 82781, 82787, 82793, 82799, 82811, 82813, 82837, 82847, 82883, 82889, 82891, 82903, 82913, 82939, 82963, 82981, 82997, 83003, 83009, 83023, 83047, 83059, 83063, 83071, 83077, 83089, 83093, 83101, 83117, 83137, 83177, 83203, 83207, 83219, 83221, 83227, 83231, 83233, 83243, 83257, 83267, 83269, 83273, 83299, 83311, 83339, 83341, 83357, 83383, 83389, 83399, 83401, 83407, 83417, 83423, 83431, 83437, 83443, 83449, 83459, 83471, 83477, 83497, 83537, 83557, 83561, 83563, 83579, 83591, 83597, 83609, 83617, 83621, 83639, 83641, 83653, 83663, 83689, 83701, 83717, 83719, 83737, 83761, 83773, 83777, 83791, 83813, 83833, 83843, 83857, 83869, 83873, 83891, 83903, 83911, 83921, 83933, 83939, 83969, 83983, 83987, 84011, 84017, 84047, 84053, 84059, 84061, 84067, 84089, 84121, 84127, 84131, 84137, 84143, 84163, 84179, 84181, 84191, 84199, 84211, 84221, 84223, 84229, 84239, 84247, 84263, 84299, 84307, 84313, 84317, 84319, 84347, 84349, 84377, 84389, 84391, 84401, 84407, 84421, 84431, 84437, 84443, 84449, 84457, 84463, 84467, 84481, 84499, 84503, 84509, 84521, 84523, 84533, 84551, 84559, 84589, 84629, 84631, 84649, 84653, 84659, 84673, 84691, 84697, 84701, 84713, 84719, 84731, 84737, 84751, 84761, 84787, 84793, 84809, 84811, 84827, 84857, 84859, 84869, 84871, 84913, 84919, 84947, 84961, 84967, 84977, 84979, 84991, 85009, 85021, 85027, 85037, 85049, 85061, 85081, 85087, 85091, 85093, 85103, 85109, 85121, 85133, 85147, 85159, 85193, 85199, 85201, 85213, 85223, 85229, 85237, 85243, 85247, 85259, 85297, 85303, 85313, 85331, 85333, 85361, 85363, 85369, 85381, 85411, 85427, 85429, 85439, 85447, 85451, 85453, 85469, 85487, 85513, 85517, 85523, 85531, 85549, 85571, 85577, 85597, 85601, 85607, 85619, 85621, 85627, 85639, 85643, 85661, 85667, 85669, 85691, 85703, 85711, 85717, 85733, 85751, 85781, 85793, 85817, 85819, 85829, 85831, 85837, 85843, 85847, 85853, 85889, 85903, 85909, 85931, 85933, 85991, 85999, 86011, 86017, 86027, 86029, 86069, 86077, 86083, 86111, 86113, 86117, 86131, 86137, 86143, 86161, 86171, 86179, 86183, 86197, 86201, 86209, 86239, 86243, 86249, 86257, 86263, 86269, 86287, 86291, 86293, 86297, 86311, 86323, 86341, 86351, 86353, 86357, 86369, 86371, 86381, 86389, 86399, 86413, 86423, 86441, 86453, 86461, 86467, 86477, 86491, 86501, 86509, 86531, 86533, 86539, 86561, 86573, 86579, 86587, 86599, 86627, 86629, 86677, 86689, 86693, 86711, 86719, 86729, 86743, 86753, 86767, 86771, 86783, 86813, 86837, 86843, 86851, 86857, 86861, 86869, 86923, 86927, 86929, 86939, 86951, 86959, 86969, 86981, 86993, 87011, 87013, 87037, 87041, 87049, 87071, 87083, 87103, 87107, 87119, 87121, 87133, 87149, 87151, 87179, 87181, 87187, 87211, 87221, 87223, 87251, 87253, 87257, 87277, 87281, 87293, 87299, 87313, 87317, 87323, 87337, 87359, 87383, 87403, 87407, 87421, 87427, 87433, 87443, 87473, 87481, 87491, 87509, 87511, 87517, 87523, 87539, 87541, 87547, 87553, 87557, 87559, 87583, 87587, 87589, 87613, 87623, 87629, 87631, 87641, 87643, 87649, 87671, 87679, 87683, 87691, 87697, 87701, 87719, 87721, 87739, 87743, 87751, 87767, 87793, 87797, 87803, 87811, 87833, 87853, 87869, 87877, 87881, 87887, 87911, 87917, 87931, 87943, 87959, 87961, 87973, 87977, 87991, 88001, 88003, 88007, 88019, 88037, 88069, 88079, 88093, 88117, 88129, 88169, 88177, 88211, 88223, 88237, 88241, 88259, 88261, 88289, 88301, 88321, 88327, 88337, 88339, 88379, 88397, 88411, 88423, 88427, 88463, 88469, 88471, 88493, 88499, 88513, 88523, 88547, 88589, 88591, 88607, 88609, 88643, 88651, 88657, 88661, 88663, 88667, 88681, 88721, 88729, 88741, 88747, 88771, 88789, 88793, 88799, 88801, 88807, 88811, 88813, 88817, 88819, 88843, 88853, 88861, 88867, 88873, 88883, 88897, 88903, 88919, 88937, 88951, 88969, 88993, 88997, 89003, 89009, 89017, 89021, 89041, 89051, 89057, 89069, 89071, 89083, 89087, 89101, 89107, 89113, 89119, 89123, 89137, 89153, 89189, 89203, 89209, 89213, 89227, 89231, 89237, 89261, 89269, 89273, 89293, 89303, 89317, 89329, 89363, 89371, 89381, 89387, 89393, 89399, 89413, 89417, 89431, 89443, 89449, 89459, 89477, 89491, 89501, 89513, 89519, 89521, 89527, 89533, 89561, 89563, 89567, 89591, 89597, 89599, 89603, 89611, 89627, 89633, 89653, 89657, 89659, 89669, 89671, 89681, 89689, 89753, 89759, 89767, 89779, 89783, 89797, 89809, 89819, 89821, 89833, 89839, 89849, 89867, 89891, 89897, 89899, 89909, 89917, 89923, 89939, 89959, 89963, 89977, 89983, 89989, 90001, 90007, 90011, 90017, 90019, 90023, 90031, 90053, 90059, 90067, 90071, 90073, 90089, 90107, 90121, 90127, 90149, 90163, 90173, 90187, 90191, 90197, 90199, 90203, 90217, 90227, 90239, 90247, 90263, 90271, 90281, 90289, 90313, 90353, 90359, 90371, 90373, 90379, 90397, 90401, 90403, 90407, 90437, 90439, 90469, 90473, 90481, 90499, 90511, 90523, 90527, 90529, 90533, 90547, 90583, 90599, 90617, 90619, 90631, 90641, 90647, 90659, 90677, 90679, 90697, 90703, 90709, 90731, 90749, 90787, 90793, 90803, 90821, 90823, 90833, 90841, 90847, 90863, 90887, 90901, 90907, 90911, 90917, 90931, 90947, 90971, 90977, 90989, 90997, 91009, 91019, 91033, 91079, 91081, 91097, 91099, 91121, 91127, 91129, 91139, 91141, 91151, 91153, 91159, 91163, 91183, 91193, 91199, 91229, 91237, 91243, 91249, 91253, 91283, 91291, 91297, 91303, 91309, 91331, 91367, 91369, 91373, 91381, 91387, 91393, 91397, 91411, 91423, 91433, 91453, 91457, 91459, 91463, 91493, 91499, 91513, 91529, 91541, 91571, 91573, 91577, 91583, 91591, 91621, 91631, 91639, 91673, 91691, 91703, 91711, 91733, 91753, 91757, 91771, 91781, 91801, 91807, 91811, 91813, 91823, 91837, 91841, 91867, 91873, 91909, 91921, 91939, 91943, 91951, 91957, 91961, 91967, 91969, 91997, 92003, 92009, 92033, 92041, 92051, 92077, 92083, 92107, 92111, 92119, 92143, 92153, 92173, 92177, 92179, 92189, 92203, 92219, 92221, 92227, 92233, 92237, 92243, 92251, 92269, 92297, 92311, 92317, 92333, 92347, 92353, 92357, 92363, 92369, 92377, 92381, 92383, 92387, 92399, 92401, 92413, 92419, 92431, 92459, 92461, 92467, 92479, 92489, 92503, 92507, 92551, 92557, 92567, 92569, 92581, 92593, 92623, 92627, 92639, 92641, 92647, 92657, 92669, 92671, 92681, 92683, 92693, 92699, 92707, 92717, 92723, 92737, 92753, 92761, 92767, 92779, 92789, 92791, 92801, 92809, 92821, 92831, 92849, 92857, 92861, 92863, 92867, 92893, 92899, 92921, 92927, 92941, 92951, 92957, 92959, 92987, 92993, 93001, 93047, 93053, 93059, 93077, 93083, 93089, 93097, 93103, 93113, 93131, 93133, 93139, 93151, 93169, 93179, 93187, 93199, 93229, 93239, 93241, 93251, 93253, 93257, 93263, 93281, 93283, 93287, 93307, 93319, 93323, 93329, 93337, 93371, 93377, 93383, 93407, 93419, 93427, 93463, 93479, 93481, 93487, 93491, 93493, 93497, 93503, 93523, 93529, 93553, 93557, 93559, 93563, 93581, 93601, 93607, 93629, 93637, 93683, 93701, 93703, 93719, 93739, 93761, 93763, 93787, 93809, 93811, 93827, 93851, 93871, 93887, 93889, 93893, 93901, 93911, 93913, 93923, 93937, 93941, 93949, 93967, 93971, 93979, 93983, 93997, 94007, 94009, 94033, 94049, 94057, 94063, 94079, 94099, 94109, 94111, 94117, 94121, 94151, 94153, 94169, 94201, 94207, 94219, 94229, 94253, 94261, 94273, 94291, 94307, 94309, 94321, 94327, 94331, 94343, 94349, 94351, 94379, 94397, 94399, 94421, 94427, 94433, 94439, 94441, 94447, 94463, 94477, 94483, 94513, 94529, 94531, 94541, 94543, 94547, 94559, 94561, 94573, 94583, 94597, 94603, 94613, 94621, 94649, 94651, 94687, 94693, 94709, 94723, 94727, 94747, 94771, 94777, 94781, 94789, 94793, 94811, 94819, 94823, 94837, 94841, 94847, 94849, 94873, 94889, 94903, 94907, 94933, 94949, 94951, 94961, 94993, 94999, 95003, 95009, 95021, 95027, 95063, 95071, 95083, 95087, 95089, 95093, 95101, 95107, 95111, 95131, 95143, 95153, 95177, 95189, 95191, 95203, 95213, 95219, 95231, 95233, 95239, 95257, 95261, 95267, 95273, 95279, 95287, 95311, 95317, 95327, 95339, 95369, 95383, 95393, 95401, 95413, 95419, 95429, 95441, 95443, 95461, 95467, 95471, 95479, 95483, 95507, 95527, 95531, 95539, 95549, 95561, 95569, 95581, 95597, 95603, 95617, 95621, 95629, 95633, 95651, 95701, 95707, 95713, 95717, 95723, 95731, 95737, 95747, 95773, 95783, 95789, 95791, 95801, 95803, 95813, 95819, 95857, 95869, 95873, 95881, 95891, 95911, 95917, 95923, 95929, 95947, 95957, 95959, 95971, 95987, 95989, 96001, 96013, 96017, 96043, 96053, 96059, 96079, 96097, 96137, 96149, 96157, 96167, 96179, 96181, 96199, 96211, 96221, 96223, 96233, 96259, 96263, 96269, 96281, 96289, 96293, 96323, 96329, 96331, 96337, 96353, 96377, 96401, 96419, 96431, 96443, 96451, 96457, 96461, 96469, 96479, 96487, 96493, 96497, 96517, 96527, 96553, 96557, 96581, 96587, 96589, 96601, 96643, 96661, 96667, 96671, 96697, 96703, 96731, 96737, 96739, 96749, 96757, 96763, 96769, 96779, 96787, 96797, 96799, 96821, 96823, 96827, 96847, 96851, 96857, 96893, 96907, 96911, 96931, 96953, 96959, 96973, 96979, 96989, 96997, 97001, 97003, 97007, 97021, 97039, 97073, 97081, 97103, 97117, 97127, 97151, 97157, 97159, 97169, 97171, 97177, 97187, 97213, 97231, 97241, 97259, 97283, 97301, 97303, 97327, 97367, 97369, 97373, 97379, 97381, 97387, 97397, 97423, 97429, 97441, 97453, 97459, 97463, 97499, 97501, 97511, 97523, 97547, 97549, 97553, 97561, 97571, 97577, 97579, 97583, 97607, 97609, 97613, 97649, 97651, 97673, 97687, 97711, 97729, 97771, 97777, 97787, 97789, 97813, 97829, 97841, 97843, 97847, 97849, 97859, 97861, 97871, 97879, 97883, 97919, 97927, 97931, 97943, 97961, 97967, 97973, 97987, 98009, 98011, 98017, 98041, 98047, 98057, 98081, 98101, 98123, 98129, 98143, 98179, 98207, 98213, 98221, 98227, 98251, 98257, 98269, 98297, 98299, 98317, 98321, 98323, 98327, 98347, 98369, 98377, 98387, 98389, 98407, 98411, 98419, 98429, 98443, 98453, 98459, 98467, 98473, 98479, 98491, 98507, 98519, 98533, 98543, 98561, 98563, 98573, 98597, 98621, 98627, 98639, 98641, 98663, 98669, 98689, 98711, 98713, 98717, 98729, 98731, 98737, 98773, 98779, 98801, 98807, 98809, 98837, 98849, 98867, 98869, 98873, 98887, 98893, 98897, 98899, 98909, 98911, 98927, 98929, 98939, 98947, 98953, 98963, 98981, 98993, 98999, 99013, 99017, 99023, 99041, 99053, 99079, 99083, 99089, 99103, 99109, 99119, 99131, 99133, 99137, 99139, 99149, 99173, 99181, 99191, 99223, 99233, 99241, 99251, 99257, 99259, 99277, 99289, 99317, 99347, 99349, 99367, 99371, 99377, 99391, 99397, 99401, 99409, 99431, 99439, 99469, 99487, 99497, 99523, 99527, 99529, 99551, 99559, 99563, 99571, 99577, 99581, 99607, 99611, 99623, 99643, 99661, 99667, 99679, 99689, 99707, 99709, 99713, 99719, 99721, 99733, 99761, 99767, 99787, 99793, 99809, 99817, 99823, 99829, 99833, 99839, 99859, 99871, 99877, 99881, 99901, 99907, 99923, 99929, 99961, 99971, 99989, 99991, 100003, 100019, 100043, 100049, 100057, 100069, 100103, 100109, 100129, 100151, 100153, 100169, 100183, 100189, 100193, 100207, 100213, 100237, 100267, 100271, 100279, 100291, 100297, 100313, 100333, 100343, 100357, 100361, 100363, 100379, 100391, 100393, 100403, 100411, 100417, 100447, 100459, 100469, 100483, 100493, 100501, 100511, 100517, 100519, 100523, 100537, 100547, 100549, 100559, 100591, 100609, 100613, 100621, 100649, 100669, 100673, 100693, 100699, 100703, 100733, 100741, 100747, 100769, 100787, 100799, 100801, 100811, 100823, 100829, 100847, 100853, 100907, 100913, 100927, 100931, 100937, 100943, 100957, 100981, 100987, 100999, 101009, 101021, 101027, 101051, 101063, 101081, 101089, 101107, 101111, 101113, 101117, 101119, 101141, 101149, 101159, 101161, 101173, 101183, 101197, 101203, 101207, 101209, 101221, 101267, 101273, 101279, 101281, 101287, 101293, 101323, 101333, 101341, 101347, 101359, 101363, 101377, 101383, 101399, 101411, 101419, 101429, 101449, 101467, 101477, 101483, 101489, 101501, 101503, 101513, 101527, 101531, 101533, 101537, 101561, 101573, 101581, 101599, 101603, 101611, 101627, 101641, 101653, 101663, 101681, 101693, 101701, 101719, 101723, 101737, 101741, 101747, 101749, 101771, 101789, 101797, 101807, 101833, 101837, 101839, 101863, 101869, 101873, 101879, 101891, 101917, 101921, 101929, 101939, 101957, 101963, 101977, 101987, 101999, 102001, 102013, 102019, 102023, 102031, 102043, 102059, 102061, 102071, 102077, 102079, 102101, 102103, 102107, 102121, 102139, 102149, 102161, 102181, 102191, 102197, 102199, 102203, 102217, 102229, 102233, 102241, 102251, 102253, 102259, 102293, 102299, 102301, 102317, 102329, 102337, 102359, 102367, 102397, 102407, 102409, 102433, 102437, 102451, 102461, 102481, 102497, 102499, 102503, 102523, 102533, 102539, 102547, 102551, 102559, 102563, 102587, 102593, 102607, 102611, 102643, 102647, 102653, 102667, 102673, 102677, 102679, 102701, 102761, 102763, 102769, 102793, 102797, 102811, 102829, 102841, 102859, 102871, 102877, 102881, 102911, 102913, 102929, 102931, 102953, 102967, 102983, 103001, 103007, 103043, 103049, 103067, 103069, 103079, 103087, 103091, 103093, 103099, 103123, 103141, 103171, 103177, 103183, 103217, 103231, 103237, 103289, 103291, 103307, 103319, 103333, 103349, 103357, 103387, 103391, 103393, 103399, 103409, 103421, 103423, 103451, 103457, 103471, 103483, 103511, 103529, 103549, 103553, 103561, 103567, 103573, 103577, 103583, 103591, 103613, 103619, 103643, 103651, 103657, 103669, 103681, 103687, 103699, 103703, 103723, 103769, 103787, 103801, 103811, 103813, 103837, 103841, 103843, 103867, 103889, 103903, 103913, 103919, 103951, 103963, 103967, 103969, 103979, 103981, 103991, 103993, 103997, 104003, 104009, 104021, 104033, 104047, 104053, 104059, 104087, 104089, 104107, 104113, 104119, 104123, 104147, 104149, 104161, 104173, 104179, 104183, 104207, 104231, 104233, 104239, 104243, 104281, 104287, 104297, 104309, 104311, 104323, 104327, 104347, 104369, 104381, 104383, 104393, 104399, 104417, 104459, 104471, 104473, 104479, 104491, 104513, 104527, 104537, 104543, 104549, 104551, 104561, 104579, 104593, 104597, 104623, 104639, 104651, 104659, 104677, 104681, 104683, 104693, 104701, 104707, 104711, 104717, 104723, 104729, )
agpl-3.0
aycanirican/nixops
nixops/resources/azure_reserved_ip_address.py
6
6605
# -*- coding: utf-8 -*- # Automatic provisioning of Azure reserved IP addresses. import os import azure import time from nixops.util import attr_property from nixops.azure_common import ResourceDefinition, ResourceState, normalize_location from azure.mgmt.network import * class AzureReservedIPAddressDefinition(ResourceDefinition): """Definition of an Azure Reserved IP Address""" @classmethod def get_type(cls): return "azure-reserved-ip-address" @classmethod def get_resource_type(cls): return "azureReservedIPAddresses" def __init__(self, xml): ResourceDefinition.__init__(self, xml) self.reserved_ip_address_name = self.get_option_value(xml, 'name', str) self.copy_option(xml, 'resourceGroup', 'resource') self.copy_location(xml) self.copy_tags(xml) self.copy_option(xml, 'idleTimeout', int) self.copy_option(xml, 'domainNameLabel', str, optional = True) self.copy_option(xml, 'reverseFqdn', str, optional = True) self.allocation_method = 'Static' def show_type(self): return "{0} [{1}]".format(self.get_type(), self.location) class AzureReservedIPAddressState(ResourceState): """State of an Azure Reserved IP Address""" reserved_ip_address_name = attr_property("azure.name", None) resource_group = attr_property("azure.resourceGroup", None) location = attr_property("azure.location", None) tags = attr_property("azure.tags", {}, 'json') idle_timeout = attr_property("azure.idleTimeout", None, int) domain_name_label = attr_property("azure.domainNameLabel", None) allocation_method = attr_property("azure.allocationMethod", None) fqdn = attr_property("azure.fqdn", None) reverse_fqdn = attr_property("azure.reverseFqdn", None) ip_address = attr_property("azure.ipAddress", None) @classmethod def get_type(cls): return "azure-reserved-ip-address" def show_type(self): s = super(AzureReservedIPAddressState, self).show_type() if self.state == self.UP: s = "{0} [{1}]".format(s, self.location) return s @property def resource_id(self): return self.reserved_ip_address_name @property def full_name(self): return "Azure reserved IP address '{0}'".format(self.reserved_ip_address_name) @property def public_ipv4(self): return self.ip_address def get_resource(self): try: return self.nrpc().public_ip_addresses.get( self.resource_group, self.resource_id).public_ip_address except azure.common.AzureMissingResourceHttpError: return None def destroy_resource(self): self.nrpc().public_ip_addresses.delete(self.resource_group, self.resource_id) defn_properties = [ 'location', 'tags', 'idle_timeout', 'allocation_method', 'domain_name_label', 'reverse_fqdn' ] def create_or_update(self, defn): dns_settings = PublicIpAddressDnsSettings( domain_name_label = defn.domain_name_label, reverse_fqdn = defn.reverse_fqdn ) if defn.domain_name_label or defn.reverse_fqdn else None self.nrpc().public_ip_addresses.create_or_update( defn.resource_group, defn.reserved_ip_address_name, PublicIpAddress( location = defn.location, public_ip_allocation_method = defn.allocation_method, idle_timeout_in_minutes = defn.idle_timeout, tags = defn.tags, dns_settings = dns_settings )) self.state = self.UP self.copy_properties(defn) address = self.get_settled_resource() self.ip_address = address.ip_address self.fqdn = address.dns_settings and address.dns_settings.fqdn self.log("reserved IP address: {0}".format(self.ip_address)) if self.fqdn: self.log("got domain name: {0}".format(self.fqdn)) def create(self, defn, check, allow_reboot, allow_recreate): self.no_subscription_id_change(defn) self.no_location_change(defn) self.no_property_change(defn, 'resource_group') self.copy_mgmt_credentials(defn) self.reserved_ip_address_name = defn.reserved_ip_address_name self.resource_group = defn.resource_group if check: address = self.get_settled_resource() if not address: self.warn_missing_resource() elif self.state == self.UP: self.warn_if_failed(address) self.handle_changed_property('location', normalize_location(address.location), can_fix = False) self.handle_changed_property('tags', address.tags) self.handle_changed_property('ip_address', address.ip_address, property_name = '') self.handle_changed_property('idle_timeout', address.idle_timeout_in_minutes) self.handle_changed_property('allocation_method', address.public_ip_allocation_method) _dns = address.dns_settings self.handle_changed_property('domain_name_label', _dns and _dns.domain_name_label) self.handle_changed_property('reverse_fqdn', _dns and _dns.reverse_fqdn) self.handle_changed_property('fqdn', _dns and _dns.fqdn) else: self.warn_not_supposed_to_exist(valuable_resource = True) self.confirm_destroy() if self.state != self.UP: if self.get_settled_resource(): raise Exception("tried creating a reserved IP address that already exists; " "please run 'deploy --check' to fix this") self.log("creating {0} in {1}...".format(self.full_name, defn.location)) self.create_or_update(defn) if self.properties_changed(defn): self.log("updating properties of {0}...".format(self.full_name)) self.get_settled_resource_assert_exists() self.create_or_update(defn) def create_after(self, resources, defn): from nixops.resources.azure_resource_group import AzureResourceGroupState return {r for r in resources if isinstance(r, AzureResourceGroupState)}
lgpl-3.0
EDUlib/edx-platform
openedx/core/djangoapps/credit/routers.py
9
1339
""" DRF routers. """ from rest_framework import routers class SimpleRouter(routers.SimpleRouter): """ Simple DRF router. """ # Note (CCB): This is a retrofit of a DRF 2.4 feature onto DRF 2.3. This is, sadly, simpler than # updating edx-ora2 to work with DRF 2.4. See https://github.com/tomchristie/django-rest-framework/pull/1333 # for details on this specific DRF 2.4 feature. def get_lookup_regex(self, viewset, lookup_prefix=''): """ Given a viewset, return the portion of URL regex that is used to match against a single instance. Note that lookup_prefix is not used directly inside REST rest_framework itself, but is required in order to nicely support nested router implementations, such as drf-nested-routers. https://github.com/alanjds/drf-nested-routers """ base_regex = '(?P<{lookup_prefix}{lookup_field}>{lookup_value})' lookup_field = getattr(viewset, 'lookup_field', 'pk') try: lookup_value = viewset.lookup_value_regex except AttributeError: # Don't consume `.json` style suffixes lookup_value = '[^/.]+' return base_regex.format( lookup_prefix=lookup_prefix, lookup_field=lookup_field, lookup_value=lookup_value )
agpl-3.0
abendleiter/Django-facebook
docs/docs_env/Lib/encodings/unicode_escape.py
852
1184
""" Python 'unicode-escape' Codec Written by Marc-Andre Lemburg (mal@lemburg.com). (c) Copyright CNRI, All Rights Reserved. NO WARRANTY. """ import codecs ### Codec APIs class Codec(codecs.Codec): # Note: Binding these as C functions will result in the class not # converting them to methods. This is intended. encode = codecs.unicode_escape_encode decode = codecs.unicode_escape_decode class IncrementalEncoder(codecs.IncrementalEncoder): def encode(self, input, final=False): return codecs.unicode_escape_encode(input, self.errors)[0] class IncrementalDecoder(codecs.IncrementalDecoder): def decode(self, input, final=False): return codecs.unicode_escape_decode(input, self.errors)[0] class StreamWriter(Codec,codecs.StreamWriter): pass class StreamReader(Codec,codecs.StreamReader): pass ### encodings module API def getregentry(): return codecs.CodecInfo( name='unicode-escape', encode=Codec.encode, decode=Codec.decode, incrementalencoder=IncrementalEncoder, incrementaldecoder=IncrementalDecoder, streamwriter=StreamWriter, streamreader=StreamReader, )
bsd-3-clause
jacksonwilliams/arsenalsuite
cpp/lib/PyQt4/doc/sphinx/conf.py
4
6459
# -*- coding: utf-8 -*- # # PyQt documentation build configuration file, created by # sphinx-quickstart on Sat May 30 14:28:55 2009. # # This file is execfile()d with the current directory set to its containing dir. # # Note that not all possible configuration values are present in this # autogenerated file. # # All configuration values have a default; values that are commented out # serve to show the default. import sys, os # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. #sys.path.append(os.path.abspath('.')) # -- General configuration ----------------------------------------------------- # Add any Sphinx extension module names here, as strings. They can be extensions # coming with Sphinx (named 'sphinx.ext.*') or your custom ones. #extensions = [] # Add any paths that contain templates here, relative to this directory. templates_path = ['templates'] # The suffix of source filenames. source_suffix = '.rst' # The encoding of source files. #source_encoding = 'utf-8' # The master toctree document. master_doc = 'index' # General information about the project. project = u'PyQt' copyright = u'2011 Riverbank Computing Limited' # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. # # The short X.Y version. version = '4.8.6' # The full version, including alpha/beta/rc tags. release = 'snapshot-4.8.6-4726879563e5' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. #language = None # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: #today = '' # Else, today_fmt is used as the format for a strftime call. #today_fmt = '%B %d, %Y' # List of documents that shouldn't be included in the build. #unused_docs = [] # List of directories, relative to source directory, that shouldn't be searched # for source files. #exclude_trees = [] # The reST default role (used for this markup: `text`) to use for all documents. #default_role = None # If true, '()' will be appended to :func: etc. cross-reference text. #add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). #add_module_names = True # If true, sectionauthor and moduleauthor directives will be shown in the # output. They are ignored by default. #show_authors = False # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'sphinx' # A list of ignored prefixes for module index sorting. #modindex_common_prefix = [] # -- Options for HTML output --------------------------------------------------- # The theme to use for HTML and HTML Help pages. Major themes that come with # Sphinx are currently 'default' and 'sphinxdoc'. html_theme = 'default' # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. #html_theme_options = {} # Add any paths that contain custom themes here, relative to this directory. #html_theme_path = [] # The name for this set of Sphinx documents. If None, it defaults to # "<project> v<release> documentation". html_title = "PyQt snapshot-4.8.6-4726879563e5 Reference Guide" # A shorter title for the navigation bar. Default is the same as html_title. #html_short_title = None # The name of an image file (relative to this directory) to place at the top # of the sidebar. html_logo = 'static/logo.png' # The name of an image file (within the static path) to use as favicon of the # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 # pixels large. html_favicon = 'logo_tn.ico' # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = ['static'] # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, # using the given strftime format. #html_last_updated_fmt = '%b %d, %Y' # If true, SmartyPants will be used to convert quotes and dashes to # typographically correct entities. #html_use_smartypants = True # Custom sidebar templates, maps document names to template names. #html_sidebars = {} # Additional templates that should be rendered to pages, maps page names to # template names. #html_additional_pages = {} # If false, no module index is generated. #html_use_modindex = True # If false, no index is generated. #html_use_index = True # If true, the index is split into individual pages for each letter. #html_split_index = False # If true, links to the reST sources are added to the pages. html_show_sourcelink = False # If true, an OpenSearch description file will be output, and all pages will # contain a <link> tag referring to it. The value of this option must be the # base URL from which the finished HTML is served. #html_use_opensearch = '' # If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml"). #html_file_suffix = '' # Output file base name for HTML help builder. htmlhelp_basename = 'PyQtdoc' # -- Options for LaTeX output -------------------------------------------------- # The paper size ('letter' or 'a4'). #latex_paper_size = 'letter' # The font size ('10pt', '11pt' or '12pt'). #latex_font_size = '10pt' # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, author, documentclass [howto/manual]). latex_documents = [ ('index', 'PyQt.tex', u'PyQt Documentation', u'Riverbank Computing Limited', 'manual'), ] # The name of an image file (relative to this directory) to place at the top of # the title page. #latex_logo = None # For "manual" documents, if this is true, then toplevel headings are parts, # not chapters. #latex_use_parts = False # Additional stuff for the LaTeX preamble. #latex_preamble = '' # Documents to append as an appendix to all manuals. #latex_appendices = [] # If false, no module index is generated. #latex_use_modindex = True def setup(app): """ Define roles specific to PyQt. """ pass
gpl-2.0
pancentric/django-cms
menus/migrations/0001_initial.py
36
1235
# -*- coding: utf-8 -*- from south.db import db from south.v2 import SchemaMigration class Migration(SchemaMigration): def forwards(self, orm): # Adding model 'CacheKey' db.create_table('menus_cachekey', ( ('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)), ('language', self.gf('django.db.models.fields.CharField')(max_length=255)), ('site', self.gf('django.db.models.fields.PositiveIntegerField')()), ('key', self.gf('django.db.models.fields.CharField')(max_length=255)), )) db.send_create_signal('menus', ['CacheKey']) def backwards(self, orm): # Deleting model 'CacheKey' db.delete_table('menus_cachekey') models = { 'menus.cachekey': { 'Meta': {'object_name': 'CacheKey'}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'key': ('django.db.models.fields.CharField', [], {'max_length': '255'}), 'language': ('django.db.models.fields.CharField', [], {'max_length': '255'}), 'site': ('django.db.models.fields.PositiveIntegerField', [], {}) } } complete_apps = ['menus']
bsd-3-clause
google-code-export/pychess
lib/pychess/widgets/TaskerManager.py
20
11005
from __future__ import absolute_import from gi.repository import Gtk from gi.repository import Pango from gi.repository import GObject from gi.repository import GdkPixbuf import math import random #from Gtk.gdk import pixbuf_new_from_file from pychess.Players.Human import Human from pychess.Players.engineNest import discoverer from pychess.System import uistuff, conf from pychess.System.glock import glock_connect_after from pychess.System.prefix import addDataPrefix from pychess.Utils.GameModel import GameModel from pychess.Utils.IconLoader import load_icon from pychess.Utils.TimeModel import TimeModel from pychess.Utils.const import LOCAL, ARTIFICIAL, WHITE, BLACK, NORMALCHESS from pychess.Variants import variants from pychess.ic import ICLogon from pychess.widgets import ionest, newGameDialog from .Background import giveBackground from .ToggleComboBox import ToggleComboBox import traceback class TaskerManager (Gtk.Table): def __init__ (self): GObject.GObject.__init__(self) self.border = 20 giveBackground(self) self.connect("draw", self.expose) #self.set_homogeneous(True) def expose (self, widget, ctx): cr = widget.get_window().cairo_create() for widget in self.widgets: x = widget.get_allocation().x y = widget.get_allocation().y width = widget.get_allocation().width height = widget.get_allocation().height cr.move_to (x-self.border, y) cr.curve_to (x-self.border, y-self.border/2., x-self.border/2., y-self.border, x, y-self.border) cr.line_to (x+width, y-self.border) cr.curve_to (x+width+self.border/2., y-self.border, x+width+self.border, y-self.border/2., x+width+self.border, y) cr.line_to (x+width+self.border, y+height) cr.curve_to (x+width+self.border, y+height+self.border/2., x+width+self.border/2., y+height+self.border, x+width, y+height+self.border) cr.line_to (x, y+height+self.border) cr.curve_to (x-self.border/2., y+height+self.border, x-self.border, y+height+self.border/2., x-self.border, y+height) sc = self.get_style_context() bool1, bgcolor = sc.lookup_color("p_bg_color") bool1, darkcolor = sc.lookup_color("p_dark_color") cr.set_source_rgba(bgcolor.red, bgcolor.green, bgcolor.blue, bgcolor.alpha) cr.fill() cr.rectangle (x-self.border, y+height-30, width+self.border*2, 30) cr.set_source_rgba(darkcolor.red, darkcolor.green, darkcolor.blue, darkcolor.alpha) cr.fill() def calcSpacings (self, n): """ Will yield ranges like ((.50,.50),) ((.66,.33), (.33,.66)) ((.75,.25), (.50,.50), (.25,.75)) ((.80,.20), (.60,.40), (.40,.60), (.20,.80)) Used to create the centering in the table """ first = next = (n)/float(n+1) for i in range(n): yield (next, 1-next) next = first-(1-next) def packTaskers (self, *widgets): self.widgets = widgets for widget in widgets: widget.connect("size-allocate", lambda *a: self.get_window().invalidate_rect(self.get_allocation(), False)) root = math.sqrt(len(widgets)) # Calculate number of rows rows = int(math.ceil(root)) # Calculate number of filled out rows rrows = int(math.floor(root)) # Calculate number of cols in filled out rows cols = int(math.ceil( len(widgets)/float(rows) )) # Calculate spacings vspac = [s[0] for s in self.calcSpacings(rows)] hspac = [s[0] for s in self.calcSpacings(cols)] # Clear and set up new size for child in self.get_children(): self.remove(child) self.props.n_columns = cols self.props.n_rows = rows # Add filled out rows for row in range(rrows): for col in range(cols): widget = widgets[row*cols + col] alignment = Gtk.Alignment.new(hspac[col], vspac[row], 0, 0) alignment.add(widget) self.attach(alignment, col, col+1, row, row+1) # Add last row if rows > rrows: lastrow = Gtk.HBox() # Calculate number of widgets in last row numw = len(widgets) - cols*rrows hspac = [s[0] for s in self.calcSpacings(numw)] for col, widget in enumerate(widgets[-numw:]): alignment = Gtk.Alignment.new(hspac[col], vspac[-1], 0, 0) alignment.add(widget) alignment.set_padding(self.border, self.border, self.border, self.border) lastrow.pack_start(alignment, True, True, 0) self.attach(lastrow, 0, cols, rrows, rrows+1) class NewGameTasker (Gtk.Alignment): def __init__ (self): #GObject.GObject.__init__(self,0,0,0,0) GObject.GObject.__init__(self) self.widgets = widgets = uistuff.GladeWidgets("taskers.glade") tasker = widgets["newGameTasker"] tasker.unparent() self.add(tasker) combo = ToggleComboBox() combo.addItem(_("White"), GdkPixbuf.Pixbuf.new_from_file(addDataPrefix("glade/white.png"))) combo.addItem(_("Black"), GdkPixbuf.Pixbuf.new_from_file(addDataPrefix("glade/black.png"))) combo.addItem(_("Random"), GdkPixbuf.Pixbuf.new_from_file(addDataPrefix("glade/random.png"))) combo.setMarkup("<b>", "</b>") widgets["colorDock"].add(combo) uistuff.keep(combo, "newgametasker_colorcombo") widgets['yourColorLabel'].set_mnemonic_widget(combo) # We need to wait until after engines have been discovered, to init the # playerCombos. We use connect_after to make sure, that newGameDialog # has also had time to init the constants we share with them. self.playerCombo = ToggleComboBox() widgets["opponentDock"].add(self.playerCombo) glock_connect_after(discoverer, "all_engines_discovered", self.__initPlayerCombo, widgets) widgets['opponentLabel'].set_mnemonic_widget(self.playerCombo) def on_skill_changed (scale): pix = newGameDialog.skillToIconLarge[int(scale.get_value())] widgets["skillImage"].set_from_pixbuf(pix) widgets["skillSlider"].connect("value-changed", on_skill_changed) on_skill_changed(widgets["skillSlider"]) widgets["startButton"].connect("clicked", self.startClicked) self.widgets["opendialog1"].connect("clicked", self.openDialogClicked) def __initPlayerCombo (self, discoverer, widgets): combo = self.playerCombo combo.update(newGameDialog.smallPlayerItems[0]) if combo.active < 0: combo.label.set_ellipsize(Pango.EllipsizeMode.MIDDLE) combo.setMarkup("<b>", "</b>") combo.active = 1 uistuff.keep(self.playerCombo, "newgametasker_playercombo") def on_playerCombobox_changed (widget, event): widgets["skillSlider"].props.visible = widget.active > 0 combo.connect("changed", on_playerCombobox_changed) uistuff.keep(widgets["skillSlider"], "taskerSkillSlider") widgets["skillSlider"].set_no_show_all(True) on_playerCombobox_changed(self.playerCombo, None) def openDialogClicked (self, button): newGameDialog.NewGameMode.run() def startClicked (self, button): color = self.widgets["colorDock"].get_child().active if color == 2: color = random.choice([WHITE, BLACK]) opponent = self.widgets["opponentDock"].get_child().active difficulty = int(self.widgets["skillSlider"].get_value()) gamemodel = GameModel(TimeModel(5*60, 0)) name = conf.get("firstName", _("You")) player0tup = (LOCAL, Human, (color, name), name) if opponent == 0: name = conf.get("secondName", _("Guest")) player1tup = (LOCAL, Human, (1-color, name), name) else: engine = discoverer.getEngineN (opponent-1) name = discoverer.getName(engine) player1tup = (ARTIFICIAL, discoverer.initPlayerEngine, (engine, 1-color, difficulty, variants[NORMALCHESS], 5*60, 0), name) if color == WHITE: ionest.generalStart(gamemodel, player0tup, player1tup) else: ionest.generalStart(gamemodel, player1tup, player0tup) big_start = load_icon(48, "stock_init", "gnome-globe", "applications-internet") class InternetGameTasker (Gtk.Alignment): def __init__ (self): #GObject.GObject.__init__(self,0,0,0,0) GObject.GObject.__init__(self) self.widgets = uistuff.GladeWidgets("taskers.glade") tasker = self.widgets["internetGameTasker"] tasker.unparent() self.add(tasker) def asGuestCallback (checkbutton): for widget in (self.widgets["passwordLabel"], self.widgets["passwordEntry"]): widget.set_sensitive(not checkbutton.get_active()) self.widgets["asGuestCheck"].connect("toggled", asGuestCallback) uistuff.keep(self.widgets["asGuestCheck"], "asGuestCheck") uistuff.keep(self.widgets["usernameEntry"], "usernameEntry") uistuff.keep(self.widgets["passwordEntry"], "passwordEntry") self.widgets["connectButton"].connect("clicked", self.connectClicked) self.widgets["opendialog2"].connect("clicked", self.openDialogClicked) self.widgets["startIcon"].set_from_pixbuf(big_start) def openDialogClicked (self, button): ICLogon.run() def connectClicked (self, button): asGuest = self.widgets["asGuestCheck"].get_active() username = self.widgets["usernameEntry"].get_text() password = self.widgets["passwordEntry"].get_text() ICLogon.run() if not ICLogon.dialog.connection: ICLogon.dialog.widgets["logOnAsGuest"].set_active(asGuest) ICLogon.dialog.widgets["nameEntry"].set_text(username) ICLogon.dialog.widgets["passEntry"].set_text(password) ICLogon.dialog.widgets["connectButton"].clicked()
gpl-3.0
vortex-ape/scikit-learn
sklearn/externals/joblib/_store_backends.py
16
14502
"""Storage providers backends for Memory caching.""" import re import os import os.path import datetime import json import shutil import warnings import collections import operator import threading from abc import ABCMeta, abstractmethod from ._compat import with_metaclass, _basestring from .backports import concurrency_safe_rename from .disk import mkdirp, memstr_to_bytes, rm_subdirs from . import numpy_pickle CacheItemInfo = collections.namedtuple('CacheItemInfo', 'path size last_access') def concurrency_safe_write(object_to_write, filename, write_func): """Writes an object into a unique file in a concurrency-safe way.""" thread_id = id(threading.current_thread()) temporary_filename = '{}.thread-{}-pid-{}'.format( filename, thread_id, os.getpid()) write_func(object_to_write, temporary_filename) return temporary_filename class StoreBackendBase(with_metaclass(ABCMeta)): """Helper Abstract Base Class which defines all methods that a StorageBackend must implement.""" location = None @abstractmethod def _open_item(self, f, mode): """Opens an item on the store and return a file-like object. This method is private and only used by the StoreBackendMixin object. Parameters ---------- f: a file-like object The file-like object where an item is stored and retrieved mode: string, optional the mode in which the file-like object is opened allowed valued are 'rb', 'wb' Returns ------- a file-like object """ @abstractmethod def _item_exists(self, location): """Checks if an item location exists in the store. This method is private and only used by the StoreBackendMixin object. Parameters ---------- location: string The location of an item. On a filesystem, this corresponds to the absolute path, including the filename, of a file. Returns ------- True if the item exists, False otherwise """ @abstractmethod def _move_item(self, src, dst): """Moves an item from src to dst in the store. This method is private and only used by the StoreBackendMixin object. Parameters ---------- src: string The source location of an item dst: string The destination location of an item """ @abstractmethod def create_location(self, location): """Creates a location on the store. Parameters ---------- location: string The location in the store. On a filesystem, this corresponds to a directory. """ @abstractmethod def clear_location(self, location): """Clears a location on the store. Parameters ---------- location: string The location in the store. On a filesystem, this corresponds to a directory or a filename absolute path """ @abstractmethod def get_items(self): """Returns the whole list of items available in the store. Returns ------- The list of items identified by their ids (e.g filename in a filesystem). """ @abstractmethod def configure(self, location, verbose=0, backend_options=dict()): """Configures the store. Parameters ---------- location: string The base location used by the store. On a filesystem, this corresponds to a directory. verbose: int The level of verbosity of the store backend_options: dict Contains a dictionnary of named paremeters used to configure the store backend. """ class StoreBackendMixin(object): """Class providing all logic for managing the store in a generic way. The StoreBackend subclass has to implement 3 methods: create_location, clear_location and configure. The StoreBackend also has to provide a private _open_item, _item_exists and _move_item methods. The _open_item method has to have the same signature as the builtin open and return a file-like object. """ def load_item(self, path, verbose=1, msg=None): """Load an item from the store given its path as a list of strings.""" full_path = os.path.join(self.location, *path) if verbose > 1: if verbose < 10: print('{0}...'.format(msg)) else: print('{0} from {1}'.format(msg, full_path)) mmap_mode = (None if not hasattr(self, 'mmap_mode') else self.mmap_mode) filename = os.path.join(full_path, 'output.pkl') if not self._item_exists(filename): raise KeyError("Non-existing item (may have been " "cleared).\nFile %s does not exist" % filename) # file-like object cannot be used when mmap_mode is set if mmap_mode is None: with self._open_item(filename, "rb") as f: item = numpy_pickle.load(f) else: item = numpy_pickle.load(filename, mmap_mode=mmap_mode) return item def dump_item(self, path, item, verbose=1): """Dump an item in the store at the path given as a list of strings.""" try: item_path = os.path.join(self.location, *path) if not self._item_exists(item_path): self.create_location(item_path) filename = os.path.join(item_path, 'output.pkl') if verbose > 10: print('Persisting in %s' % item_path) def write_func(to_write, dest_filename): with self._open_item(dest_filename, "wb") as f: numpy_pickle.dump(to_write, f, compress=self.compress) self._concurrency_safe_write(item, filename, write_func) except: # noqa: E722 " Race condition in the creation of the directory " def clear_item(self, path): """Clear the item at the path, given as a list of strings.""" item_path = os.path.join(self.location, *path) if self._item_exists(item_path): self.clear_location(item_path) def contains_item(self, path): """Check if there is an item at the path, given as a list of strings""" item_path = os.path.join(self.location, *path) filename = os.path.join(item_path, 'output.pkl') return self._item_exists(filename) def get_item_info(self, path): """Return information about item.""" return {'location': os.path.join(self.location, *path)} def get_metadata(self, path): """Return actual metadata of an item.""" try: item_path = os.path.join(self.location, *path) filename = os.path.join(item_path, 'metadata.json') with self._open_item(filename, 'rb') as f: return json.loads(f.read().decode('utf-8')) except: # noqa: E722 return {} def store_metadata(self, path, metadata): """Store metadata of a computation.""" try: item_path = os.path.join(self.location, *path) self.create_location(item_path) filename = os.path.join(item_path, 'metadata.json') def write_func(to_write, dest_filename): with self._open_item(dest_filename, "wb") as f: f.write(json.dumps(to_write).encode('utf-8')) self._concurrency_safe_write(metadata, filename, write_func) except: # noqa: E722 pass def contains_path(self, path): """Check cached function is available in store.""" func_path = os.path.join(self.location, *path) return self.object_exists(func_path) def clear_path(self, path): """Clear all items with a common path in the store.""" func_path = os.path.join(self.location, *path) if self._item_exists(func_path): self.clear_location(func_path) def store_cached_func_code(self, path, func_code=None): """Store the code of the cached function.""" func_path = os.path.join(self.location, *path) if not self._item_exists(func_path): self.create_location(func_path) if func_code is not None: filename = os.path.join(func_path, "func_code.py") with self._open_item(filename, 'wb') as f: f.write(func_code.encode('utf-8')) def get_cached_func_code(self, path): """Store the code of the cached function.""" path += ['func_code.py', ] filename = os.path.join(self.location, *path) try: with self._open_item(filename, 'rb') as f: return f.read().decode('utf-8') except: # noqa: E722 raise def get_cached_func_info(self, path): """Return information related to the cached function if it exists.""" return {'location': os.path.join(self.location, *path)} def clear(self): """Clear the whole store content.""" self.clear_location(self.location) def reduce_store_size(self, bytes_limit): """Reduce store size to keep it under the given bytes limit.""" items_to_delete = self._get_items_to_delete(bytes_limit) for item in items_to_delete: if self.verbose > 10: print('Deleting item {0}'.format(item)) try: self.clear_location(item.path) except OSError: # Even with ignore_errors=True can shutil.rmtree # can raise OSErrror with [Errno 116] Stale file # handle if another process has deleted the folder # already. pass def _get_items_to_delete(self, bytes_limit): """Get items to delete to keep the store under a size limit.""" if isinstance(bytes_limit, _basestring): bytes_limit = memstr_to_bytes(bytes_limit) items = self.get_items() size = sum(item.size for item in items) to_delete_size = size - bytes_limit if to_delete_size < 0: return [] # We want to delete first the cache items that were accessed a # long time ago items.sort(key=operator.attrgetter('last_access')) items_to_delete = [] size_so_far = 0 for item in items: if size_so_far > to_delete_size: break items_to_delete.append(item) size_so_far += item.size return items_to_delete def _concurrency_safe_write(self, to_write, filename, write_func): """Writes an object into a file in a concurrency-safe way.""" temporary_filename = concurrency_safe_write(to_write, filename, write_func) self._move_item(temporary_filename, filename) def __repr__(self): """Printable representation of the store location.""" return '{class_name}(location="{location}")'.format( class_name=self.__class__.__name__, location=self.location) class FileSystemStoreBackend(StoreBackendBase, StoreBackendMixin): """A StoreBackend used with local or network file systems.""" _open_item = staticmethod(open) _item_exists = staticmethod(os.path.exists) _move_item = staticmethod(concurrency_safe_rename) def clear_location(self, location): """Delete location on store.""" if (location == self.location): rm_subdirs(location) else: shutil.rmtree(location, ignore_errors=True) def create_location(self, location): """Create object location on store""" mkdirp(location) def get_items(self): """Returns the whole list of items available in the store.""" items = [] for dirpath, _, filenames in os.walk(self.location): is_cache_hash_dir = re.match('[a-f0-9]{32}', os.path.basename(dirpath)) if is_cache_hash_dir: output_filename = os.path.join(dirpath, 'output.pkl') try: last_access = os.path.getatime(output_filename) except OSError: try: last_access = os.path.getatime(dirpath) except OSError: # The directory has already been deleted continue last_access = datetime.datetime.fromtimestamp(last_access) try: full_filenames = [os.path.join(dirpath, fn) for fn in filenames] dirsize = sum(os.path.getsize(fn) for fn in full_filenames) except OSError: # Either output_filename or one of the files in # dirpath does not exist any more. We assume this # directory is being cleaned by another process already continue items.append(CacheItemInfo(dirpath, dirsize, last_access)) return items def configure(self, location, verbose=1, backend_options=None): """Configure the store backend. For this backend, valid store options are 'compress' and 'mmap_mode' """ if backend_options is None: backend_options = {} # setup location directory self.location = location if not os.path.exists(self.location): mkdirp(self.location) # item can be stored compressed for faster I/O self.compress = backend_options.get('compress', False) # FileSystemStoreBackend can be used with mmap_mode options under # certain conditions. mmap_mode = backend_options.get('mmap_mode') if self.compress and mmap_mode is not None: warnings.warn('Compressed items cannot be memmapped in a ' 'filesystem store. Option will be ignored.', stacklevel=2) self.mmap_mode = mmap_mode self.verbose = verbose
bsd-3-clause
jtbandes/swift
utils/swift_build_support/tests/test_workspace.py
58
7061
# tests/test_workspace.py ---------------------------------------*- python -*- # # This source file is part of the Swift.org open source project # # Copyright (c) 2014 - 2017 Apple Inc. and the Swift project authors # Licensed under Apache License v2.0 with Runtime Library Exception # # See https://swift.org/LICENSE.txt for license information # See https://swift.org/CONTRIBUTORS.txt for the list of Swift project authors # # ---------------------------------------------------------------------------- import argparse import itertools import os import shutil import tempfile import unittest from swift_build_support.workspace import ( Workspace, compute_build_subdir, ) class WorkspaceTestCase(unittest.TestCase): def test_workspace(self): tmpdir1 = os.path.realpath(tempfile.mkdtemp()) tmpdir2 = os.path.realpath(tempfile.mkdtemp()) os.makedirs(os.path.join(tmpdir1, 'foo')) workspace = Workspace(source_root=tmpdir1, build_root=tmpdir2) self.assertEqual(workspace.source_root, tmpdir1) self.assertEqual(workspace.build_root, tmpdir2) # source_dir self.assertEqual(workspace.source_dir('foo'), os.path.join(tmpdir1, 'foo')) # build_dir self.assertEqual(workspace.build_dir('target', 'product'), os.path.join(tmpdir2, 'product-target')) shutil.rmtree(tmpdir1) shutil.rmtree(tmpdir2) class ComputeBuildSubdirTestCase(unittest.TestCase): def create_basic_args(self, generator, variant, assertions, enable_asan=False, enable_ubsan=False, enable_tsan=False): return argparse.Namespace( cmake_generator=generator, cmark_build_variant=variant, llvm_build_variant=variant, swift_build_variant=variant, swift_stdlib_build_variant=variant, swift_analyze_code_coverage="false", cmark_assertions=assertions, llvm_assertions=assertions, swift_assertions=assertions, swift_stdlib_assertions=assertions, enable_asan=enable_asan, enable_ubsan=enable_ubsan, enable_tsan=enable_tsan) def test_Ninja_ReleaseAssert_asan(self): # noqa (N802 function name should be lowercase) args = self.create_basic_args( "Ninja", variant="Release", assertions=True, enable_asan=True) self.assertEqual(compute_build_subdir(args), "Ninja-ReleaseAssert+asan") def test_Ninja_ReleaseAssert_ubsan(self): # noqa (N802 function name should be lowercase) args = self.create_basic_args( "Ninja", variant="Release", assertions=True, enable_ubsan=True) self.assertEqual(compute_build_subdir(args), "Ninja-ReleaseAssert+ubsan") def test_Ninja_ReleaseAssert_tsan(self): # noqa (N802 function name should be lowercase) args = self.create_basic_args( "Ninja", variant="Release", assertions=True, enable_tsan=True) self.assertEqual(compute_build_subdir(args), "Ninja-ReleaseAssert+tsan") def test_Ninja_ReleaseAssert(self): # noqa (N802 function name should be lowercase) # build-script -R args = self.create_basic_args( "Ninja", variant="Release", assertions=True) self.assertEqual(compute_build_subdir(args), "Ninja-ReleaseAssert") def test_Ninja_Release(self): # noqa (N802 function name should be lowercase) # build-script -R --no-assertions args = self.create_basic_args( "Ninja", variant="Release", assertions=False) self.assertEqual(compute_build_subdir(args), "Ninja-Release") def test_Ninja_Release_stdlib_ReleaseAssert(self): # noqa (N802 function name should be lowercase) # build-script -R --no-assertions --swift-stdlib-assertions args = self.create_basic_args( "Ninja", variant="Release", assertions=False) args.swift_stdlib_assertions = True self.assertEqual(compute_build_subdir(args), "Ninja-Release+stdlib-ReleaseAssert") def test_Ninja_mixed(self): # noqa (N802 function name should be lowercase) # build-script -R --no-assertions # --llvm-build-variant=RelWithDebInfo # --swift-analyze-code-coverage="merged" # --swift-stdlib-assertions args = self.create_basic_args( "Ninja", variant="Release", assertions=False) args.llvm_build_variant = "RelWithDebInfo" args.swift_analyze_code_coverage = "merged" args.swift_stdlib_assertions = True self.assertEqual(compute_build_subdir(args), "Ninja+cmark-Release+llvm-RelWithDebInfo" "+swift-ReleaseCoverage+stdlib-ReleaseAssert") def test_Unix_Makefiles_ReleaseAssert(self): # noqa (N802 function name should be lowercase) # build-script -R -m args = self.create_basic_args( "Unix Makefiles", variant="Release", assertions=True) self.assertEqual(compute_build_subdir(args), "Unix_Makefiles-ReleaseAssert") def test_all_combinations_are_unique(self): productions = itertools.product( ["Release", "Debug"], # cmark_build_variant ["Release", "Debug"], # llvm_build_variant ["Release", "Debug"], # swift_build_variant ["Release", "Debug"], # swift_stdlib_build_variant ["false", "true"], # swift_analyze_code_coverage [True, False], # cmark_assertions [True, False], # llvm_assertions [True, False], # swift_assertions [True, False], # swift_stdlib_assertions ) keys = [ "cmark_build_variant", "llvm_build_variant", "swift_build_variant", "swift_stdlib_build_variant", "swift_analyze_code_coverage", "cmark_assertions", "llvm_assertions", "swift_assertions", "swift_stdlib_assertions", ] def generate(): for c in productions: args = argparse.Namespace(cmake_generator="Ninja", enable_asan=False, enable_ubsan=False, enable_tsan=False) for key, val in zip(keys, c): setattr(args, key, val) yield compute_build_subdir(args) seen = set() for line in generate(): self.assertIsInstance(line, str) self.assertNotIn(line, seen) seen.add(line) self.assertEqual(len(seen), 1 << 9) # Iterated all productions.
apache-2.0
dentaku65/pelisalacarta
python/main-classic/servers/lumfile.py
43
1270
# -*- coding: utf-8 -*- #------------------------------------------------------------ # pelisalacarta - XBMC Plugin # Conector para lumfile # http://blog.tvalacarta.info/plugin-xbmc/pelisalacarta/ #------------------------------------------------------------ import urlparse,urllib2,urllib,re import os from core import scrapertools from core import logger from core import config def get_video_url( page_url , premium = False , user="" , password="", video_password="" ): logger.info("[lumfile.py] get_video_url(page_url='%s')" % page_url) video_urls = [] return video_urls # Encuentra vídeos del servidor en el texto pasado def find_videos(data): encontrados = set() devuelve = [] # http://lumfile.com/cwdddpi2d/aaa.html patronvideos = '(lumfile.com/[a-z0-9]+/.*?\.html)' logger.info("[lumfile.py] find_videos #"+patronvideos+"#") matches = re.compile(patronvideos,re.DOTALL).findall(data) for match in matches: titulo = "[lumfile]" url = "http://"+match if url not in encontrados: logger.info(" url="+url) devuelve.append( [ titulo , url , 'lumfile' ] ) encontrados.add(url) else: logger.info(" url duplicada="+url) return devuelve
gpl-3.0
christinahedges/PyKE
pyke/tests/test_utils.py
2
1732
import argparse from numpy.testing import assert_almost_equal from ..utils import PyKEArgumentHelpFormatter from ..utils import module_output_to_channel, channel_to_module_output from ..utils import running_mean def test_PyKEArgumentHelpFormatter(): parser = argparse.ArgumentParser( description=('Test PyKEArgumentHelpFormatter'), formatter_class=PyKEArgumentHelpFormatter) parser.add_argument('--str_arg', help='string type argument', default='oi') parser.add_argument('--bool_arg', help='bool type argument', action='store_true') formatter = parser._get_formatter() for actions in parser._action_groups: formatter.add_arguments(actions._group_actions) ans = ["-h, --help show this help message and exit", "--str_arg STR_ARG string type argument (default: oi)", "--bool_arg bool type argument", ''] assert ans == formatter.format_help().split('\n') def test_channel_to_module_output(): assert channel_to_module_output(1) == (2, 1) assert channel_to_module_output(42) == (13, 2) assert channel_to_module_output(84) == (24, 4) assert channel_to_module_output(33) == (11, 1) def test_module_output_to_channel(): assert module_output_to_channel(2, 1) == 1 assert module_output_to_channel(13, 2) == 42 assert module_output_to_channel(24, 4) == 84 assert module_output_to_channel(11, 1) == 33 def test_running_mean(): assert_almost_equal(running_mean([1, 2, 3], window_size=1), [1, 2, 3]) assert_almost_equal(running_mean([1, 2, 3], window_size=2), [1.5, 2.5]) assert_almost_equal(running_mean([2, 2, 2], window_size=3), [2])
mit
madjam/mxnet
example/module/python_loss.py
26
2816
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. # pylint: skip-file import numpy as np import mxnet as mx import numba import logging # We use numba.jit to implement the loss gradient. @numba.jit def mc_hinge_grad(scores, labels): scores = scores.asnumpy() labels = labels.asnumpy() n, _ = scores.shape grad = np.zeros_like(scores) for i in range(n): score = 1 + scores[i] - scores[i, labels[i]] score[labels[i]] = 0 ind_pred = score.argmax() grad[i, labels[i]] -= 1 grad[i, ind_pred] += 1 return grad if __name__ == '__main__': n_epoch = 10 batch_size = 100 num_gpu = 2 contexts = mx.context.cpu() if num_gpu < 1 else [mx.context.gpu(i) for i in range(num_gpu)] # build a MLP module data = mx.symbol.Variable('data') fc1 = mx.symbol.FullyConnected(data, name='fc1', num_hidden=128) act1 = mx.symbol.Activation(fc1, name='relu1', act_type="relu") fc2 = mx.symbol.FullyConnected(act1, name = 'fc2', num_hidden = 64) act2 = mx.symbol.Activation(fc2, name='relu2', act_type="relu") fc3 = mx.symbol.FullyConnected(act2, name='fc3', num_hidden=10) mlp = mx.mod.Module(fc3, context=contexts) loss = mx.mod.PythonLossModule(grad_func=mc_hinge_grad) mod = mx.mod.SequentialModule() \ .add(mlp) \ .add(loss, take_labels=True, auto_wiring=True) train_dataiter = mx.io.MNISTIter( image="data/train-images-idx3-ubyte", label="data/train-labels-idx1-ubyte", data_shape=(784,), batch_size=batch_size, shuffle=True, flat=True, silent=False, seed=10) val_dataiter = mx.io.MNISTIter( image="data/t10k-images-idx3-ubyte", label="data/t10k-labels-idx1-ubyte", data_shape=(784,), batch_size=batch_size, shuffle=True, flat=True, silent=False) logging.basicConfig(level=logging.DEBUG) mod.fit(train_dataiter, eval_data=val_dataiter, optimizer_params={'learning_rate':0.01, 'momentum': 0.9}, num_epoch=n_epoch)
apache-2.0
kennho/oppia
core/storage/base_model/gae_models_test.py
18
3158
# coding: utf-8 # # Copyright 2014 The Oppia Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS-IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from core.platform import models from core.tests import test_utils (base_models,) = models.Registry.import_models([models.NAMES.base_model]) class BaseModelUnitTests(test_utils.GenericTestBase): """Test the generic base model.""" def tearDown(self): """Deletes all model entities.""" for entity in base_models.BaseModel.get_all(): entity.delete() super(BaseModelUnitTests, self).tearDown() def test_error_cases_for_get_method(self): with self.assertRaises(base_models.BaseModel.EntityNotFoundError): base_models.BaseModel.get('Invalid id') with self.assertRaises(base_models.BaseModel.EntityNotFoundError): base_models.BaseModel.get('Invalid id', strict=True) self.assertIsNone( base_models.BaseModel.get('Invalid id', strict=False)) def test_generic_query_put_get_and_delete_operations(self): model = base_models.BaseModel() all_models = [m for m in base_models.BaseModel.get_all()] self.assertEqual(len(all_models), 0) model.put() all_models = [m for m in base_models.BaseModel.get_all()] self.assertEqual(len(all_models), 1) self.assertEqual(all_models[0], model) model_id = all_models[0].id self.assertEqual(model, base_models.BaseModel.get(model_id)) model.delete() all_models = [m for m in base_models.BaseModel.get_all()] self.assertEqual(len(all_models), 0) with self.assertRaises(base_models.BaseModel.EntityNotFoundError): model.get(model_id) def test_get_multi(self): model1 = base_models.BaseModel() model2 = base_models.BaseModel() model2.deleted = True model1.put() model2.put() model1_id = model1.id model2_id = model2.id result = base_models.BaseModel.get_multi([model1_id, model2_id, 'none']) self.assertEqual(result, [model1, None, None]) def test_get_new_id_method_returns_unique_ids(self): ids = set([]) for _ in range(100): new_id = base_models.BaseModel.get_new_id('') self.assertNotIn(new_id, ids) base_models.BaseModel(id=new_id).put() ids.add(new_id) def test_get_new_id_method_does_not_fail_with_bad_names(self): base_models.BaseModel.get_new_id(None) base_models.BaseModel.get_new_id('¡Hola!') base_models.BaseModel.get_new_id(12345) base_models.BaseModel.get_new_id({'a': 'b'})
apache-2.0
ClearCorp-dev/odoo-clearcorp
TODO-8.0/sale_order_ccorp_report/ccorp_sale.py
3
2450
# -*- encoding: utf-8 -*- ############################################################################## # # ccorp_account.py # ccorp_account # First author: Carlos Vásquez <carlos.vasquez@clearcorp.co.cr> (ClearCorp S.A.) # Copyright (c) 2010-TODAY ClearCorp S.A. (http://clearcorp.co.cr). All rights reserved. # # Redistribution and use in source and binary forms, with or without modification, are # permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, this list of # conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright notice, this list # of conditions and the following disclaimer in the documentation and/or other materials # provided with the distribution. # # THIS SOFTWARE IS PROVIDED BY <COPYRIGHT HOLDER> ``AS IS'' AND ANY EXPRESS OR IMPLIED # WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND # FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> OR # CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR # CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON # ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING # NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF # ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # # The views and conclusions contained in the software and documentation are those of the # authors and should not be interpreted as representing official policies, either expressed # or implied, of ClearCorp S.A.. # ############################################################################## from openerp.osv import osv,fields class account_invoice(osv.osv): _inherit = 'account.invoice' def name_get(self, cr, uid, ids, context={}): if not len(ids): return [] reads = self.read(cr, uid, ids, ['name', 'number'], context) res = [] for record in reads: name = "" if record['number']: name = record['number'] if record['name']: name = name + ' ' + record['name'] res.append((record['id'], name)) return res account_invoice()
agpl-3.0
openpgh/askpgh
askbot/user_messages/context_processors.py
10
1947
""" Context processor for lightweight session messages. Time-stamp: <2008-07-19 23:16:19 carljm context_processors.py> """ from django.conf import settings as django_settings from django.utils.encoding import StrAndUnicode from askbot.user_messages import get_and_delete_messages def user_messages(request): """ Returns session messages for the current session. """ if not request.path.startswith('/' + django_settings.ASKBOT_URL): #todo: a hack, for real we need to remove this middleware #and switch to the new-style session messages return {} #the get_and_delete_messages is added to anonymous user by the #ConnectToSessionMessages middleware by the process_request, #however - if the user is logging out via /admin/logout/ #the AnonymousUser is installed in the response and thus #the Askbot's session messages hack will fail, so we have #an extra if statement here. if hasattr(request.user, 'get_and_delete_messages'): messages = request.user.get_and_delete_messages() return { 'user_messages': messages } return {} class LazyMessages(StrAndUnicode): """ Lazy message container, so messages aren't actually retrieved from session and deleted until the template asks for them. """ def __init__(self, request): self.request = request def __iter__(self): return iter(self.messages) def __len__(self): return len(self.messages) def __nonzero__(self): return bool(self.messages) def __unicode__(self): return unicode(self.messages) def __getitem__(self, *args, **kwargs): return self.messages.__getitem__(*args, **kwargs) def _get_messages(self): if hasattr(self, '_messages'): return self._messages self._messages = get_and_delete_messages(self.request) return self._messages messages = property(_get_messages)
gpl-3.0
krunal3103/servo
tests/wpt/web-platform-tests/tools/pytest/testing/test_resultlog.py
171
7770
import os import _pytest._code import py import pytest from _pytest.main import Node, Item, FSCollector from _pytest.resultlog import generic_path, ResultLog, \ pytest_configure, pytest_unconfigure def test_generic_path(testdir): from _pytest.main import Session config = testdir.parseconfig() session = Session(config) p1 = Node('a', config=config, session=session) #assert p1.fspath is None p2 = Node('B', parent=p1) p3 = Node('()', parent = p2) item = Item('c', parent = p3) res = generic_path(item) assert res == 'a.B().c' p0 = FSCollector('proj/test', config=config, session=session) p1 = FSCollector('proj/test/a', parent=p0) p2 = Node('B', parent=p1) p3 = Node('()', parent = p2) p4 = Node('c', parent=p3) item = Item('[1]', parent = p4) res = generic_path(item) assert res == 'test/a:B().c[1]' def test_write_log_entry(): reslog = ResultLog(None, None) reslog.logfile = py.io.TextIO() reslog.write_log_entry('name', '.', '') entry = reslog.logfile.getvalue() assert entry[-1] == '\n' entry_lines = entry.splitlines() assert len(entry_lines) == 1 assert entry_lines[0] == '. name' reslog.logfile = py.io.TextIO() reslog.write_log_entry('name', 's', 'Skipped') entry = reslog.logfile.getvalue() assert entry[-1] == '\n' entry_lines = entry.splitlines() assert len(entry_lines) == 2 assert entry_lines[0] == 's name' assert entry_lines[1] == ' Skipped' reslog.logfile = py.io.TextIO() reslog.write_log_entry('name', 's', 'Skipped\n') entry = reslog.logfile.getvalue() assert entry[-1] == '\n' entry_lines = entry.splitlines() assert len(entry_lines) == 2 assert entry_lines[0] == 's name' assert entry_lines[1] == ' Skipped' reslog.logfile = py.io.TextIO() longrepr = ' tb1\n tb 2\nE tb3\nSome Error' reslog.write_log_entry('name', 'F', longrepr) entry = reslog.logfile.getvalue() assert entry[-1] == '\n' entry_lines = entry.splitlines() assert len(entry_lines) == 5 assert entry_lines[0] == 'F name' assert entry_lines[1:] == [' '+line for line in longrepr.splitlines()] class TestWithFunctionIntegration: # XXX (hpk) i think that the resultlog plugin should # provide a Parser object so that one can remain # ignorant regarding formatting details. def getresultlog(self, testdir, arg): resultlog = testdir.tmpdir.join("resultlog") testdir.plugins.append("resultlog") args = ["--resultlog=%s" % resultlog] + [arg] testdir.runpytest(*args) return [x for x in resultlog.readlines(cr=0) if x] def test_collection_report(self, testdir): ok = testdir.makepyfile(test_collection_ok="") skip = testdir.makepyfile(test_collection_skip= "import pytest ; pytest.skip('hello')") fail = testdir.makepyfile(test_collection_fail="XXX") lines = self.getresultlog(testdir, ok) assert not lines lines = self.getresultlog(testdir, skip) assert len(lines) == 2 assert lines[0].startswith("S ") assert lines[0].endswith("test_collection_skip.py") assert lines[1].startswith(" ") assert lines[1].endswith("test_collection_skip.py:1: Skipped: hello") lines = self.getresultlog(testdir, fail) assert lines assert lines[0].startswith("F ") assert lines[0].endswith("test_collection_fail.py"), lines[0] for x in lines[1:]: assert x.startswith(" ") assert "XXX" in "".join(lines[1:]) def test_log_test_outcomes(self, testdir): mod = testdir.makepyfile(test_mod=""" import pytest def test_pass(): pass def test_skip(): pytest.skip("hello") def test_fail(): raise ValueError("FAIL") @pytest.mark.xfail def test_xfail(): raise ValueError("XFAIL") @pytest.mark.xfail def test_xpass(): pass """) lines = self.getresultlog(testdir, mod) assert len(lines) >= 3 assert lines[0].startswith(". ") assert lines[0].endswith("test_pass") assert lines[1].startswith("s "), lines[1] assert lines[1].endswith("test_skip") assert lines[2].find("hello") != -1 assert lines[3].startswith("F ") assert lines[3].endswith("test_fail") tb = "".join(lines[4:8]) assert tb.find('raise ValueError("FAIL")') != -1 assert lines[8].startswith('x ') tb = "".join(lines[8:14]) assert tb.find('raise ValueError("XFAIL")') != -1 assert lines[14].startswith('X ') assert len(lines) == 15 @pytest.mark.parametrize("style", ("native", "long", "short")) def test_internal_exception(self, style): # they are produced for example by a teardown failing # at the end of the run or a failing hook invocation try: raise ValueError except ValueError: excinfo = _pytest._code.ExceptionInfo() reslog = ResultLog(None, py.io.TextIO()) reslog.pytest_internalerror(excinfo.getrepr(style=style)) entry = reslog.logfile.getvalue() entry_lines = entry.splitlines() assert entry_lines[0].startswith('! ') if style != "native": assert os.path.basename(__file__)[:-9] in entry_lines[0] #.pyc/class assert entry_lines[-1][0] == ' ' assert 'ValueError' in entry def test_generic(testdir, LineMatcher): testdir.plugins.append("resultlog") testdir.makepyfile(""" import pytest def test_pass(): pass def test_fail(): assert 0 def test_skip(): pytest.skip("") @pytest.mark.xfail def test_xfail(): assert 0 @pytest.mark.xfail(run=False) def test_xfail_norun(): assert 0 """) testdir.runpytest("--resultlog=result.log") lines = testdir.tmpdir.join("result.log").readlines(cr=0) LineMatcher(lines).fnmatch_lines([ ". *:test_pass", "F *:test_fail", "s *:test_skip", "x *:test_xfail", "x *:test_xfail_norun", ]) def test_makedir_for_resultlog(testdir, LineMatcher): """--resultlog should automatically create directories for the log file""" testdir.plugins.append("resultlog") testdir.makepyfile(""" import pytest def test_pass(): pass """) testdir.runpytest("--resultlog=path/to/result.log") lines = testdir.tmpdir.join("path/to/result.log").readlines(cr=0) LineMatcher(lines).fnmatch_lines([ ". *:test_pass", ]) def test_no_resultlog_on_slaves(testdir): config = testdir.parseconfig("-p", "resultlog", "--resultlog=resultlog") assert not hasattr(config, '_resultlog') pytest_configure(config) assert hasattr(config, '_resultlog') pytest_unconfigure(config) assert not hasattr(config, '_resultlog') config.slaveinput = {} pytest_configure(config) assert not hasattr(config, '_resultlog') pytest_unconfigure(config) assert not hasattr(config, '_resultlog') def test_failure_issue380(testdir): testdir.makeconftest(""" import pytest class MyCollector(pytest.File): def collect(self): raise ValueError() def repr_failure(self, excinfo): return "somestring" def pytest_collect_file(path, parent): return MyCollector(parent=parent, fspath=path) """) testdir.makepyfile(""" def test_func(): pass """) result = testdir.runpytest("--resultlog=log") assert result.ret == 1
mpl-2.0
heytcass/homeassistant-config
deps/requests/packages/chardet/codingstatemachine.py
2931
2318
######################## BEGIN LICENSE BLOCK ######################## # The Original Code is mozilla.org code. # # The Initial Developer of the Original Code is # Netscape Communications Corporation. # Portions created by the Initial Developer are Copyright (C) 1998 # the Initial Developer. All Rights Reserved. # # Contributor(s): # Mark Pilgrim - port to Python # # This library is free software; you can redistribute it and/or # modify it under the terms of the GNU Lesser General Public # License as published by the Free Software Foundation; either # version 2.1 of the License, or (at your option) any later version. # # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this library; if not, write to the Free Software # Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA # 02110-1301 USA ######################### END LICENSE BLOCK ######################### from .constants import eStart from .compat import wrap_ord class CodingStateMachine: def __init__(self, sm): self._mModel = sm self._mCurrentBytePos = 0 self._mCurrentCharLen = 0 self.reset() def reset(self): self._mCurrentState = eStart def next_state(self, c): # for each byte we get its class # if it is first byte, we also get byte length # PY3K: aBuf is a byte stream, so c is an int, not a byte byteCls = self._mModel['classTable'][wrap_ord(c)] if self._mCurrentState == eStart: self._mCurrentBytePos = 0 self._mCurrentCharLen = self._mModel['charLenTable'][byteCls] # from byte's class and stateTable, we get its next state curr_state = (self._mCurrentState * self._mModel['classFactor'] + byteCls) self._mCurrentState = self._mModel['stateTable'][curr_state] self._mCurrentBytePos += 1 return self._mCurrentState def get_current_charlen(self): return self._mCurrentCharLen def get_coding_state_machine(self): return self._mModel['name']
mit
pamoakoy/invenio
modules/bibformat/lib/elements/bfe_fulltext.py
1
15271
# -*- coding: utf-8 -*- ## ## This file is part of Invenio. ## Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011 CERN. ## ## Invenio is free software; you can redistribute it and/or ## modify it under the terms of the GNU General Public License as ## published by the Free Software Foundation; either version 2 of the ## License, or (at your option) any later version. ## ## Invenio is distributed in the hope that it will be useful, but ## WITHOUT ANY WARRANTY; without even the implied warranty of ## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ## General Public License for more details. ## ## You should have received a copy of the GNU General Public License ## along with Invenio; if not, write to the Free Software Foundation, Inc., ## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA. """BibFormat element - Prints a links to fulltext """ __revision__ = "$Id$" import re from invenio.bibdocfile import BibRecDocs, file_strip_ext from invenio.messages import gettext_set_language from invenio.config import CFG_SITE_URL, CFG_CERN_SITE, CFG_SITE_RECORD from invenio.websubmit_config import CFG_WEBSUBMIT_ICON_SUBFORMAT_RE from cgi import escape, parse_qs from urlparse import urlparse from os.path import basename import urllib cern_arxiv_categories = ["astro-ph", "chao-dyn", "cond-mat", "gr-qc", "hep-ex", "hep-lat", "hep-ph", "hep-th", "math-ph", "math", "nucl-ex", "nucl-th", "out", "physics", "quant-ph", "q-alg", "cs", "adap-org", "comp-gas", "chem-ph", "cs", "math", "neuro-sys", "patt-sol", "solv-int", "acc-phys", "alg-geom", "ao-sci", "atom-ph", "cmp-lg", "dg-ga", "funct-an", "mtrl-th", "plasm-ph", "q-alg", "supr-con"] def format_element(bfo, style, separator='; ', show_icons='no', focus_on_main_file='no', show_subformat_icons='no'): """ This is the default format for formatting fulltext links. When possible, it returns only the main file(s) (+ link to additional files if needed). If no distinction is made at submission time between main and additional files, returns all the files @param separator: the separator between urls. @param style: CSS class of the link @param show_icons: if 'yes', print icons for fulltexts @param focus_on_main_file: if 'yes' and a doctype 'Main' is found, prominently display this doctype. In that case other doctypes are summarized with a link to the Files tab, named "Additional files" @param show_subformat_icons: shall we display subformats considered as icons? """ _ = gettext_set_language(bfo.lang) out = '' # Retrieve files (parsed_urls, old_versions, additionals) = get_files(bfo, \ distinguish_main_and_additional_files=focus_on_main_file.lower() == 'yes', include_subformat_icons=show_subformat_icons == 'yes') main_urls = parsed_urls['main_urls'] others_urls = parsed_urls['others_urls'] if parsed_urls.has_key('cern_urls'): cern_urls = parsed_urls['cern_urls'] # Prepare style and icon if style != "": style = 'class="'+style+'"' if show_icons.lower() == 'yes': file_icon = '<img style="border:none" src="%s/img/file-icon-text-12x16.gif" alt="%s"/>' % (CFG_SITE_URL, _("Download fulltext")) else: file_icon = '' # Build urls list. # Escape special chars for <a> tag value. additional_str = '' if additionals: additional_str = ' <small>(<a '+style+' href="'+CFG_SITE_URL+'/%s/' % CFG_SITE_RECORD + str(bfo.recID)+'/files/">%s</a>)</small>' % _("additional files") versions_str = '' #if old_versions: #versions_str = ' <small>(<a '+style+' href="'+CFG_SITE_URL+'/CFG_SITE_RECORD/'+str(bfo.recID)+'/files/">%s</a>)</small>' % _("older versions") if main_urls: main_urls_keys = sort_alphanumerically(main_urls.keys()) for descr in main_urls_keys: urls = main_urls[descr] if re.match(r'^\d+\s', descr) and urls[0][2] == 'png': # FIXME: we have probably hit a Plot (as link # description looks like '0001 This is Caption'), so # do not take it. This test is not ideal, we should # rather study doc type, and base ourselves on # Main/Additional/Plot etc. continue out += "<strong>%s:</strong> " % descr urls_dict = {} for url, name, url_format in urls: if name not in urls_dict: urls_dict[name] = [(url, url_format)] else: urls_dict[name].append((url, url_format)) for name, urls_and_format in urls_dict.items(): if len(urls_dict) > 1: print_name = "<em>%s</em> - " % name url_list = [print_name] else: url_list = [] for url, url_format in urls_and_format: if CFG_CERN_SITE and url_format == 'ps.gz' and len(urls_and_format) > 1: ## We skip old PS.GZ files continue url_list.append('<a %(style)s href="%(url)s">%(file_icon)s%(url_format)s</a>' % { 'style': style, 'url': escape(url, True), 'file_icon': file_icon, 'url_format': escape(url_format.upper()) }) out += " ".join(url_list) + additional_str + versions_str + separator if CFG_CERN_SITE and cern_urls: link_word = len(cern_urls) == 1 and _('%(x_sitename)s link') or _('%(x_sitename)s links') out += '<strong>%s</strong>: ' % (link_word % {'x_sitename': 'CERN'}) url_list = [] for url, descr in cern_urls: url_list.append('<a '+style+' href="'+escape(url)+ '''" onclick="try{var r;if(typeof XMLHttpRequest=='undefined'){r=new ActiveXObject('Msxml2.XMLHTTP.3.0');}else{r=new XMLHttpRequest();}r.open('GET', \''''+CFG_SITE_URL+"/record/"+str(bfo.recID)+'''/extlink/?url=' + escape(this.href), false);r.send(null);}catch(e){}return true;">'''+ \ file_icon + escape(str(descr))+'</a>') out += separator.join(url_list) if others_urls: external_link = len(others_urls) == 1 and _('external link') or _('external links') out += '<strong>%s</strong>: ' % external_link.capitalize() url_list = [] for url, descr in others_urls: url_list.append('<a '+style+' href="'+escape(url)+ '''" onclick="try{var r;if(typeof XMLHttpRequest=='undefined'){r=new ActiveXObject('Msxml2.XMLHTTP.3.0');}else{r=new XMLHttpRequest();}r.open('GET', \''''+CFG_SITE_URL+"/record/"+str(bfo.recID)+'''/extlink/?url=' + escape(this.href), false);r.send(null);}catch(e){}return true;">'''+ \ file_icon + escape(str(descr))+'</a>') out += separator.join(url_list) + '<br />' if out.endswith('<br />'): out = out[:-len('<br />')] # When exported to text (eg. in WebAlert emails) we do not want to # display the link to the fulltext: if out: out = '<!--START_NOT_FOR_TEXT-->' + out + '<!--END_NOT_FOR_TEXT-->' return out def escape_values(bfo): """ Called by BibFormat in order to check if output of this element should be escaped. """ return 0 def get_files(bfo, distinguish_main_and_additional_files=True, include_subformat_icons=False): """ Returns the files available for the given record. Returned structure is a tuple (parsed_urls, old_versions, additionals): - parsed_urls: contains categorized URLS (see details below) - old_versions: set to True if we can have access to old versions - additionals: set to True if we have other documents than the 'main' document Parameter 'include_subformat_icons' decides if subformat considered as icons should be returned 'parsed_urls' is a dictionary in the form: {'main_urls' : {'Main' : [('http://CFG_SITE_URL/CFG_SITE_RECORD/1/files/aFile.pdf', 'aFile', 'PDF'), ('http://CFG_SITE_URL/CFG_SITE_RECORD/1/files/aFile.gif', 'aFile', 'GIF')], 'Additional': [('http://CFG_SITE_URL/CFG_SITE_RECORD/1/files/bFile.pdf', 'bFile', 'PDF')]}, 'other_urls': [('http://externalurl.com/aFile.pdf', 'Fulltext'), # url(8564_u), description(8564_z/y) ('http://externalurl.com/bFile.pdf', 'Fulltext')], 'cern_urls' : [('http://cern.ch/aFile.pdf', 'Fulltext'), # url(8564_u), description(8564_z/y) ('http://cern.ch/bFile.pdf', 'Fulltext')], } Some notes about returned structure: - key 'cern_urls' is only available on CERN site - keys in main_url dictionaries are defined by the BibDoc. - older versions are not part of the parsed urls - returns only main files when possible, that is when doctypes make a distinction between 'Main' files and other files. Otherwise returns all the files as main. This is only enabled if distinguish_main_and_additional_files is set to True """ _ = gettext_set_language(bfo.lang) urls = bfo.fields("8564_") bibarchive = BibRecDocs(bfo.recID) old_versions = False # We can provide link to older files. Will be # set to True if older files are found. additionals = False # We have additional files. Will be set to # True if additional files are found. # Prepare object to return parsed_urls = {'main_urls':{}, # Urls hosted by Invenio (bibdocs) 'others_urls':[] # External urls } if CFG_CERN_SITE: parsed_urls['cern_urls'] = [] # cern.ch urls # Doctypes can of any type, but when there is one file marked as # 'Main', we consider that there is a distinction between "main" # and "additional" files. Otherwise they will all be considered # equally as main files distinct_main_and_additional_files = False if len(bibarchive.list_bibdocs(doctype='Main')) > 0 and \ distinguish_main_and_additional_files: distinct_main_and_additional_files = True # Parse URLs for complete_url in urls: if complete_url.has_key('u'): url = complete_url['u'] (dummy, host, path, dummy, params, dummy) = urlparse(url) subformat = complete_url.get('x', '') filename = urllib.unquote(basename(path)) name = file_strip_ext(filename) url_format = filename[len(name):] if url_format.startswith('.'): url_format = url_format[1:] descr = _("Fulltext") if complete_url.has_key('y'): descr = complete_url['y'] if descr == 'Fulltext': descr = _("Fulltext") if not url.startswith(CFG_SITE_URL): # Not a bibdoc? if not descr: # For not bibdoc let's have a description # Display the URL in full: descr = url if CFG_CERN_SITE and 'cern.ch' in host and \ ('/setlink?' in url or \ 'cms' in host or \ 'documents.cern.ch' in url or \ 'doc.cern.ch' in url or \ 'preprints.cern.ch' in url): url_params_dict = dict([part.split('=') for part in params.split('&') if len(part.split('=')) == 2]) if url_params_dict.has_key('categ') and \ (url_params_dict['categ'].split('.', 1)[0] in cern_arxiv_categories) and \ url_params_dict.has_key('id'): # Old arXiv links, used to be handled by # setlink. Provide direct links to arXiv for file_format, label in [('pdf', "PDF")]:#, #('ps', "PS"), #('e-print', "Source (generally TeX or LaTeX)"), #('abs', "Abstract")]: url = "http://arxiv.org/%(format)s/%(category)s/%(id)s" % \ {'format': file_format, 'category': url_params_dict['categ'], 'id': url_params_dict['id']} parsed_urls['others_urls'].append((url, "%s/%s %s" % \ (url_params_dict['categ'], url_params_dict['id'], label))) else: parsed_urls['others_urls'].append((url, descr)) # external url else: # It's a bibdoc! assigned = False for doc in bibarchive.list_bibdocs(): if int(doc.get_latest_version()) > 1: old_versions = True if True in [f.fullname.startswith(filename) \ for f in doc.list_all_files()]: assigned = True if not include_subformat_icons and \ CFG_WEBSUBMIT_ICON_SUBFORMAT_RE.match(subformat): # This is an icon and we want to skip it continue if not doc.doctype == 'Main' and \ distinct_main_and_additional_files == True: # In that case we record that there are # additional files, but don't add them to # returned structure. additionals = True else: if not descr: descr = _('Fulltext') if not parsed_urls['main_urls'].has_key(descr): parsed_urls['main_urls'][descr] = [] params = parse_qs(params) if 'subformat' in params: url_format += ' (%s)' % params['subformat'][0] parsed_urls['main_urls'][descr].append((url, name, url_format)) if not assigned: # Url is not a bibdoc :-S if not descr: descr = filename parsed_urls['others_urls'].append((url, descr)) # Let's put it in a general other url return (parsed_urls, old_versions, additionals) _RE_SPLIT = re.compile(r"\d+|\D+") def sort_alphanumerically(elements): elements = [([not token.isdigit() and token or int(token) for token in _RE_SPLIT.findall(element)], element) for element in elements] elements.sort() return [element[1] for element in elements]
gpl-2.0
reinout/django
django/core/management/commands/loaddata.py
2
14253
import functools import glob import gzip import os import sys import warnings import zipfile from itertools import product from django.apps import apps from django.conf import settings from django.core import serializers from django.core.exceptions import ImproperlyConfigured from django.core.management.base import BaseCommand, CommandError from django.core.management.color import no_style from django.core.management.utils import parse_apps_and_model_labels from django.db import ( DEFAULT_DB_ALIAS, DatabaseError, IntegrityError, connections, router, transaction, ) from django.utils.functional import cached_property try: import bz2 has_bz2 = True except ImportError: has_bz2 = False READ_STDIN = '-' class Command(BaseCommand): help = 'Installs the named fixture(s) in the database.' missing_args_message = ( "No database fixture specified. Please provide the path of at least " "one fixture in the command line." ) def add_arguments(self, parser): parser.add_argument('args', metavar='fixture', nargs='+', help='Fixture labels.') parser.add_argument( '--database', action='store', dest='database', default=DEFAULT_DB_ALIAS, help='Nominates a specific database to load fixtures into. Defaults to the "default" database.', ) parser.add_argument( '--app', action='store', dest='app_label', default=None, help='Only look for fixtures in the specified app.', ) parser.add_argument( '--ignorenonexistent', '-i', action='store_true', dest='ignore', help='Ignores entries in the serialized data for fields that do not ' 'currently exist on the model.', ) parser.add_argument( '-e', '--exclude', dest='exclude', action='append', default=[], help='An app_label or app_label.ModelName to exclude. Can be used multiple times.', ) parser.add_argument( '--format', action='store', dest='format', default=None, help='Format of serialized data when reading from stdin.', ) def handle(self, *fixture_labels, **options): self.ignore = options['ignore'] self.using = options['database'] self.app_label = options['app_label'] self.verbosity = options['verbosity'] self.excluded_models, self.excluded_apps = parse_apps_and_model_labels(options['exclude']) self.format = options['format'] with transaction.atomic(using=self.using): self.loaddata(fixture_labels) # Close the DB connection -- unless we're still in a transaction. This # is required as a workaround for an edge case in MySQL: if the same # connection is used to create tables, load data, and query, the query # can return incorrect results. See Django #7572, MySQL #37735. if transaction.get_autocommit(self.using): connections[self.using].close() def loaddata(self, fixture_labels): connection = connections[self.using] # Keep a count of the installed objects and fixtures self.fixture_count = 0 self.loaded_object_count = 0 self.fixture_object_count = 0 self.models = set() self.serialization_formats = serializers.get_public_serializer_formats() # Forcing binary mode may be revisited after dropping Python 2 support (see #22399) self.compression_formats = { None: (open, 'rb'), 'gz': (gzip.GzipFile, 'rb'), 'zip': (SingleZipReader, 'r'), 'stdin': (lambda *args: sys.stdin, None), } if has_bz2: self.compression_formats['bz2'] = (bz2.BZ2File, 'r') # Django's test suite repeatedly tries to load initial_data fixtures # from apps that don't have any fixtures. Because disabling constraint # checks can be expensive on some database (especially MSSQL), bail # out early if no fixtures are found. for fixture_label in fixture_labels: if self.find_fixtures(fixture_label): break else: return with connection.constraint_checks_disabled(): for fixture_label in fixture_labels: self.load_label(fixture_label) # Since we disabled constraint checks, we must manually check for # any invalid keys that might have been added table_names = [model._meta.db_table for model in self.models] try: connection.check_constraints(table_names=table_names) except Exception as e: e.args = ("Problem installing fixtures: %s" % e,) raise # If we found even one object in a fixture, we need to reset the # database sequences. if self.loaded_object_count > 0: sequence_sql = connection.ops.sequence_reset_sql(no_style(), self.models) if sequence_sql: if self.verbosity >= 2: self.stdout.write("Resetting sequences\n") with connection.cursor() as cursor: for line in sequence_sql: cursor.execute(line) if self.verbosity >= 1: if self.fixture_object_count == self.loaded_object_count: self.stdout.write( "Installed %d object(s) from %d fixture(s)" % (self.loaded_object_count, self.fixture_count) ) else: self.stdout.write( "Installed %d object(s) (of %d) from %d fixture(s)" % (self.loaded_object_count, self.fixture_object_count, self.fixture_count) ) def load_label(self, fixture_label): """Load fixtures files for a given label.""" show_progress = self.verbosity >= 3 for fixture_file, fixture_dir, fixture_name in self.find_fixtures(fixture_label): _, ser_fmt, cmp_fmt = self.parse_name(os.path.basename(fixture_file)) open_method, mode = self.compression_formats[cmp_fmt] fixture = open_method(fixture_file, mode) try: self.fixture_count += 1 objects_in_fixture = 0 loaded_objects_in_fixture = 0 if self.verbosity >= 2: self.stdout.write( "Installing %s fixture '%s' from %s." % (ser_fmt, fixture_name, humanize(fixture_dir)) ) objects = serializers.deserialize( ser_fmt, fixture, using=self.using, ignorenonexistent=self.ignore, ) for obj in objects: objects_in_fixture += 1 if (obj.object._meta.app_config in self.excluded_apps or type(obj.object) in self.excluded_models): continue if router.allow_migrate_model(self.using, obj.object.__class__): loaded_objects_in_fixture += 1 self.models.add(obj.object.__class__) try: obj.save(using=self.using) if show_progress: self.stdout.write( '\rProcessed %i object(s).' % loaded_objects_in_fixture, ending='' ) except (DatabaseError, IntegrityError) as e: e.args = ("Could not load %(app_label)s.%(object_name)s(pk=%(pk)s): %(error_msg)s" % { 'app_label': obj.object._meta.app_label, 'object_name': obj.object._meta.object_name, 'pk': obj.object.pk, 'error_msg': e, },) raise if objects and show_progress: self.stdout.write('') # add a newline after progress indicator self.loaded_object_count += loaded_objects_in_fixture self.fixture_object_count += objects_in_fixture except Exception as e: if not isinstance(e, CommandError): e.args = ("Problem installing fixture '%s': %s" % (fixture_file, e),) raise finally: fixture.close() # Warn if the fixture we loaded contains 0 objects. if objects_in_fixture == 0: warnings.warn( "No fixture data found for '%s'. (File format may be " "invalid.)" % fixture_name, RuntimeWarning ) @functools.lru_cache(maxsize=None) def find_fixtures(self, fixture_label): """Find fixture files for a given label.""" if fixture_label == READ_STDIN: return [(READ_STDIN, None, READ_STDIN)] fixture_name, ser_fmt, cmp_fmt = self.parse_name(fixture_label) databases = [self.using, None] cmp_fmts = list(self.compression_formats) if cmp_fmt is None else [cmp_fmt] ser_fmts = serializers.get_public_serializer_formats() if ser_fmt is None else [ser_fmt] if self.verbosity >= 2: self.stdout.write("Loading '%s' fixtures..." % fixture_name) if os.path.isabs(fixture_name): fixture_dirs = [os.path.dirname(fixture_name)] fixture_name = os.path.basename(fixture_name) else: fixture_dirs = self.fixture_dirs if os.path.sep in os.path.normpath(fixture_name): fixture_dirs = [os.path.join(dir_, os.path.dirname(fixture_name)) for dir_ in fixture_dirs] fixture_name = os.path.basename(fixture_name) suffixes = ( '.'.join(ext for ext in combo if ext) for combo in product(databases, ser_fmts, cmp_fmts) ) targets = {'.'.join((fixture_name, suffix)) for suffix in suffixes} fixture_files = [] for fixture_dir in fixture_dirs: if self.verbosity >= 2: self.stdout.write("Checking %s for fixtures..." % humanize(fixture_dir)) fixture_files_in_dir = [] path = os.path.join(fixture_dir, fixture_name) for candidate in glob.iglob(glob.escape(path) + '*'): if os.path.basename(candidate) in targets: # Save the fixture_dir and fixture_name for future error messages. fixture_files_in_dir.append((candidate, fixture_dir, fixture_name)) if self.verbosity >= 2 and not fixture_files_in_dir: self.stdout.write("No fixture '%s' in %s." % (fixture_name, humanize(fixture_dir))) # Check kept for backwards-compatibility; it isn't clear why # duplicates are only allowed in different directories. if len(fixture_files_in_dir) > 1: raise CommandError( "Multiple fixtures named '%s' in %s. Aborting." % (fixture_name, humanize(fixture_dir))) fixture_files.extend(fixture_files_in_dir) if not fixture_files: raise CommandError("No fixture named '%s' found." % fixture_name) return fixture_files @cached_property def fixture_dirs(self): """ Return a list of fixture directories. The list contains the 'fixtures' subdirectory of each installed application, if it exists, the directories in FIXTURE_DIRS, and the current directory. """ dirs = [] fixture_dirs = settings.FIXTURE_DIRS if len(fixture_dirs) != len(set(fixture_dirs)): raise ImproperlyConfigured("settings.FIXTURE_DIRS contains duplicates.") for app_config in apps.get_app_configs(): app_label = app_config.label app_dir = os.path.join(app_config.path, 'fixtures') if app_dir in fixture_dirs: raise ImproperlyConfigured( "'%s' is a default fixture directory for the '%s' app " "and cannot be listed in settings.FIXTURE_DIRS." % (app_dir, app_label) ) if self.app_label and app_label != self.app_label: continue if os.path.isdir(app_dir): dirs.append(app_dir) dirs.extend(list(fixture_dirs)) dirs.append('') dirs = [os.path.abspath(os.path.realpath(d)) for d in dirs] return dirs def parse_name(self, fixture_name): """ Split fixture name in name, serialization format, compression format. """ if fixture_name == READ_STDIN: if not self.format: raise CommandError('--format must be specified when reading from stdin.') return READ_STDIN, self.format, 'stdin' parts = fixture_name.rsplit('.', 2) if len(parts) > 1 and parts[-1] in self.compression_formats: cmp_fmt = parts[-1] parts = parts[:-1] else: cmp_fmt = None if len(parts) > 1: if parts[-1] in self.serialization_formats: ser_fmt = parts[-1] parts = parts[:-1] else: raise CommandError( "Problem installing fixture '%s': %s is not a known " "serialization format." % (''.join(parts[:-1]), parts[-1])) else: ser_fmt = None name = '.'.join(parts) return name, ser_fmt, cmp_fmt class SingleZipReader(zipfile.ZipFile): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) if len(self.namelist()) != 1: raise ValueError("Zip-compressed fixtures must contain one file.") def read(self): return zipfile.ZipFile.read(self, self.namelist()[0]) def humanize(dirname): return "'%s'" % dirname if dirname else 'absolute path'
bsd-3-clause
renzon/antigonovo
antigonovo/urls.py
1
1328
"""antigonovo URL Configuration The `urlpatterns` list routes URLs to views. For more information please see: https://docs.djangoproject.com/en/2.0/topics/http/urls/ Examples: Function views 1. Add an import: from my_app import views 2. Add a URL to urlpatterns: path('', views.home, name='home') Class-based views 1. Add an import: from other_app.views import Home 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home') Including another URLconf 1. Import the include() function: from django.urls import include, path 2. Add a URL to urlpatterns: path('blog/', include('blog.urls')) """ from django.conf import settings from django.conf.urls.static import static from django.contrib import admin from django.urls import path, include from antigonovo.core import views as core_views urlpatterns = [ path('admin/', admin.site.urls), path('', core_views.home), path('contato/', core_views.contact), path('moveis/', include('antigonovo.moveis.urls')), path('accounts/', include('django.contrib.auth.urls')), ] if not settings.AWS_ACCESS_KEY_ID: urlpatterns.extend(static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)) if settings.DEBUG: import debug_toolbar urlpatterns.append( path('__debug__/', include(debug_toolbar.urls)) )
agpl-3.0
ryanoberto/homevolution
server.py
1
11051
#!/usr/bin/python # all the imports import sqlite3 import os.path from flask import Flask, request, session, g, redirect, url_for, abort, render_template, flash from homevolution.db import init_db from flask_cors import CORS from contextlib import closing from homevolution.device import action, get_devices, list_devices import homevolution.zoneminder as zoneminder import homevolution.kodi as kodi import homevolution.schedule as schedule #import homevolution.schedules as schedules #import homevolution.kodiapi as kodiapi #import homevolution.modules as modules # configuration DATABASE = 'homevolution.db' DEBUG = True SECRET_KEY = 'DeeXPDogs5svAQ7giGwaGgpYA154jFfxoqRw' USERNAME = 'admin' PASSWORD = 'admin' # create our little application :) app = Flask(__name__, static_url_path='') app.config.from_object(__name__) def connect_db(): return sqlite3.connect(app.config['DATABASE']) def init_db(): with closing(connect_db()) as db: with app.open_resource('database.sql', mode='r') as f: db.cursor().executescript(f.read()) db.commit() """ Check to see if there is a database else create on """ if not os.path.exists("homevolution.db"): init_db() @app.before_request def before_request(): g.db = connect_db() @app.teardown_request def teardown_request(exception): db = getattr(g, 'db', None) if db is not None: db.close() @app.route('/') def index(): if not session.get('logged_in'): return render_template('login.html') else: templateData={} return render_template('index.html', **templateData) @app.route('/kodi') def kodis(): # Pass the template data into the template dashboard.html and return it to the user if not session.get('logged_in'): flash('Need to login') return render_template('login.html') else: updated_nowplaying = kodi.nowplaying(True) templateData = { 'kodi' : kodi.list(), 'now_playing' : updated_nowplaying['now_playing'], 'now_playing_image' : updated_nowplaying['now_playing_image'], } return render_template('kodi.html', **templateData) @app.route('/schedules') def schedules(): # Pass the template data into the template dashboard.html and return it to the user update_schedule = schedule.gettime("sname") update_run = schedule.getrun("sname") #SCHEDULE_TIME = config['SCHEDULE'] templateData = { 'schedule' : schedule.SCHEDULE, 'schedules' : update_schedule, 'schedulerun' : update_run, } return render_template('schedules.html', **templateData) @app.route('/devices') def show_devices(): if not session.get('logged_in'): flash('Need to login') return render_template('login.html') else: get_device = get_devices() for result in get_device.itervalues(): if "error" in str(result): error = get_devices() else: error = None templateData={ 'error' : error, 'devices' : get_devices() } return render_template('devices.html', **templateData) @app.route('/<action>/<module>',methods=['POST']) def add(action, module): if not session.get('logged_in'): abort(401) if action == "add": if module == "device": g.db.execute('insert into slaves (node, key) values (?, ?)', [request.form['node'], request.form['key']]) g.db.commit() flash('New device was successfully added') if module == "kodi": g.db.execute('insert into kodi (name, port) values (?,?)', [request.form['name'], request.form['port']]) g.db.commit() flash('New server was successfully added') if module == "zoneminder": g.db.execute('insert into zoneminder (name, url, port) values (?, ?, ?)', [request.form['name'], request.form['url'], request.form['port']]) g.db.commit() flash('New server was successfully added') if module == "schedule": print request.form['name'],request.form['month'],request.form['dayofweek'],request.form['hour'],request.form['minute'] g.db.execute('insert into schedule (name, month, dayofweek, hour, minute) values (?, ?, ?, ?, ?)', [request.form['name'], request.form['month'], request.form['dayofweek'], request.form['hour'], request.form['minute']]) g.db.commit() flash('New schedule was successfully added') if module == "schedules": print request.form['name'],request.form['host'],request.form['device'],request.form['action'] g.db.execute('insert into schedules (name, host, device, action) values (?, ?, ?, ?)', [request.form['name'], request.form['host'], request.form['device'], request.form['action']]) g.db.commit() flash('New schedule was successfully added') elif action == "del": if module == "device": g.db.execute('delete from slaves where node=?', [request.form['node']]) g.db.commit() flash(request.form['node']+' was deleted') if module == "kodi": g.db.execute('delete from kodi where name=?', [request.form['name']]) g.db.commit() flash(request.form['name']+' was deleted') if module == "zoneminder": g.db.execute('delete from zoneminder where name=?', [request.form['name']]) g.db.commit() flash(request.form['name']+' was deleted') if module == "schedule": g.db.execute('delete from schedule where name=?', [request.form['name']]) g.db.commit() flash(request.form['name']+' was deleted') if module == "schedules": g.db.execute('delete from schedules where name=?', [request.form['name']]) g.db.commit() flash(request.form['name']+' was deleted') return redirect(url_for('settings')) @app.route('/dashboard') def dashboards(): templateData={ } return render_template('dashboard.html', **templateData) #@app.route('/add/modules', methods=['POST']) #def add_module(): # if not session.get('logged_in'): # abort(401) # g.db.execute('insert into modules (name, url, enabled) values (?, ?, ?)', # [request.form['name'], request.form['url'], request.form['enabled']]) # g.db.commit() # flash('New module was successfully added') # return redirect(url_for('settings')) @app.route('/show') def show_entries(): cur = g.db.execute('select title, text from entries order by id desc') entries = [dict(title=row[0], text=row[1]) for row in cur.fetchall()] return render_template('show_entries.html', entries=entries) # Settings Stuff add edit and delete @app.route('/add', methods=['POST']) def add_entry(): if not session.get('logged_in'): abort(401) g.db.execute('insert into slaves (node, key) values (?, ?)', [request.form['title'], request.form['text']]) g.db.commit() flash('New entry was successfully posted') return redirect(url_for('show_entries')) @app.route('/cameras') def cameras(): # Pass the template data into the template dashboard.html and return it to the user if not session.get('logged_in'): flash('Need to login') return render_template('login.html') else: if "error" in str(zoneminder.get_cameras()): error = zoneminder.get_cameras() else: error = None templateData = { 'error' : error, 'zmhost' : zoneminder.ZMHOST, 'zoneminder' : zoneminder.get_cameras(), } return render_template('cameras.html', **templateData) @app.route('/settings',methods=['GET','POST']) def settings(): # Pass the template data into the template dashboard.html and return it to the user if not session.get('logged_in'): flash('Need to login') return render_template('login.html') else: templateData = { 'nodes' : list_devices(), 'kodi' : kodi.list(), 'zoneminder' : zoneminder.list(), 'devices' : get_devices(), 'schedule' : schedule.list(), 'schedules' : schedule.lists() } return render_template('settings.html', **templateData) @app.route('/login', methods=['GET', 'POST']) def login(): error = None if request.method == 'POST': if request.form['username'] != app.config['USERNAME']: error = 'Invalid username' elif request.form['password'] != app.config['PASSWORD']: error = 'Invalid password' else: session['logged_in'] = True flash('You were logged in') return redirect(url_for('index')) return render_template('login.html', error=error) @app.route('/logout') def logout(): session.pop('logged_in', None) flash('You were logged out') return redirect(url_for('index')) #API stuff @app.route('/api/<plugin>/<action>', methods = ['GET']) def service(plugin, action): if plugin == "kodi": if action == "options": return jsonify(KODI_API_OPT) if action == "playing": updated_nowplaying = thing(KODI) return jsonify( { 'now_playing': updated_nowplaying['now_playing'] } ) if action == "play": return "play" if action == "stop": return "stop" if action == "rewind": return "rewind" if action == "begining": return "begining" if action == "fastforward": return "fastforward" if action == "end": return "end" if action == "volup": return "volup" if action == "voldwn": return "voldwn" if action == "info": return "info" else: return abort(404) else: return abort(404) if __name__ == '__main__': app.run( debug=True, host="0.0.0.0", port=int("80") )
mit
microresearch/diana
weight-test.py
1
5228
import nltk,random from pyevolve import G1DList from pyevolve import GSimpleGA, Consts from pyevolve import Selectors from pyevolve import Initializators, Mutators, Crossovers import math from textclean.textclean import textclean train_txt = open("/root/diana/chapters/3_glass-crash/texts/allnvfmold").read() #train_txt = open("crash.txt").read() #train_txt = open("allnvfmold").read() train_txt = textclean.clean(train_txt) train_sens = nltk.sent_tokenize(train_txt) train_txt = [] for sen in train_sens: train_txt += nltk.pos_tag(nltk.word_tokenize(sen)) fdist1 = nltk.FreqDist(train_txt) vocab = fdist1.keys() # list of tuples of weights wordposweightlist=[] #for word in train_txt: # wordposweightlist.append((word, fdist1[word]/float(fdist1[vocab[0]]))) #takes a weighted list and returns a random selection #assuming the second item in the tuple is the weight def choose_weighted(wlist): if isinstance(wlist, list): wlist.sort(key=lambda x: x[1]) choice = random.random() * sum(j for i, j in wlist) for i, w in wlist: choice -= w if choice < 0: return i else: return wlist[0] mm = {} def insertTuple(d, k, tup): if k not in d: d[k] = tup elif type(d[k]) == tuple: d[k] = [ d[k], tup ] elif not(tup in d[k]): d[k].append(tup) # fixed for repeats for tword in train_txt: insertTuple(mm, tword[1], ([tword[0]],fdist1[tword])) #print choose_weighted(mm["VBN"]) ll=mm.keys() ll.sort() #print ll sentence="Almost every conceivable violent confrontation between the automobile and its occupants was listed: mechanisms of passenger ejection, the geometry of kneecap and hip-joint injuries, deformation of passenger compartments in head-on and rear-end collisions, injuries sustained in accidents at roundabouts, at trunkroad intersections, at the junctions between access roads and motorway intersections, the telescoping mechanisms of car-bodies in front-end collisions, abrasive injuries formed in roll-overs, the amputation of limbs by roof assemblies and door sills during roll-over, facial injuries caused by dashboard and window trim, scalp and cranial injuries caused by rear-view mirrors and sun-visors, whiplash injuries in rear-end collisions, first and second-degree burns in accidents involving the rupture and detonation of fuel tanks, chest injuries caused by steering column impalements, abdominal injuries caused by faulty seat-belt adjustment, second-order collisions between front-seat and rear-seat passengers, cranial and spinal injuries caused by ejection through windshields, the graded injuries to the skull caused by variable windshield glasses, injuries to minors, both children and infants in arms, injuries caused by prosthetic limbs, injuries caused within cars fitted with invalid controls, the complex self-amplifying injuries of single and double amputees, injuries caused by specialist automobile accessories such as record players, cocktail cabinets and radiotelephones, the injuries caused by manufacturers' medallions, safety belt pinions and quarter-window latches." # convert Ballard sentence to these numbers sens = nltk.sent_tokenize(sentence) crash = [] indexcrash=[] randomcrash=[] for sen in sens: crash += nltk.pos_tag(nltk.word_tokenize(sen)) # start with random sequence of same length for word in crash: pos = word[1] # print pos # print position of pos in ll indexcrash+=ll.index(pos), randomcrash.append(random.randint(0,len(ll))) numeric_sentence = indexcrash def evolve_callback(ga_engine): generation = ga_engine.getCurrentGeneration() if generation%50==0: indiv = ga_engine.bestIndividual() out = "" for posnum in indiv: newword = "" # choose random word from pos key poskey=ll[posnum] #print mm[poskey] # newword = random.choice(mm[poskey]) # choose from POS key newword=''.join(choose_weighted(mm[poskey])) # choose from POS key - weighted out += newword + " " print out+"\n" # print newword return False def run_main(): genome = G1DList.G1DList(len(sentence)) genome.setParams(rangemin=min(numeric_sentence), rangemax=max(numeric_sentence), bestrawscore=0.00, gauss_mu=1, gauss_sigma=4) genome.initializator.set(Initializators.G1DListInitializatorInteger) genome.mutator.set(Mutators.G1DListMutatorIntegerGaussian) genome.evaluator.set(lambda genome: sum( [abs(a-b) for a, b in zip(genome, numeric_sentence)] )) # genome.evaluator.set(eval_func) ga = GSimpleGA.GSimpleGA(genome) ga.stepCallback.set(evolve_callback) ga.setMinimax(Consts.minimaxType["minimize"]) ga.terminationCriteria.set(GSimpleGA.RawScoreCriteria) ga.setPopulationSize(60) ga.setMutationRate(0.02) ga.setCrossoverRate(0.9) ga.setGenerations(5000) ga.evolve(freq_stats=100) best = ga.bestIndividual() print "Best individual score: %.2f" % (best.score,) # print ''.join(map(chr, best)) # prints junk if __name__ == "__main__": run_main()
gpl-2.0
WilliamDiakite/ExperimentationsACA
processing/image_processing/main.py
1
1560
import numpy as np import matplotlib.pyplot as plt from PIL import Image import pytesseract from scipy import misc from sklearn.cluster import KMeans def remove_dominant_color(image): # Reshape image for kmeans image = np.reshape(image, (image.shape[0]*image.shape[1])) image = np.reshape(image, (-1, 1)) # Fit and get clusters from kmeans predictions = KMeans(n_clusters=2).fit_predict(image) clusters = list(set(predictions.tolist())).sort() # Reshape image back. Stack image and predictions # so image is shape (pixel, cluster) image = np.reshape(image, (image.shape[0], )) image = np.vstack((image, predictions)).T # Remove dominant color for i in range(image.shape[0]): if image[i,1] == 0: image[i,0] = 254 image = image.T return image[0] def apply_ocr(file): text = pytesseract.image_to_string(Image.open(file), lang='fra') print(text) if __name__ == '__main__': # Load image img = misc.imread('AICA53-Com2-Thomas_Mac_Greevy-fre-1.jpg') original_shape = img.shape[:-1] print('[ + ] Image shape is', original_shape) final_image = [] # For all channels remove dominant color print('[...] Removing dominant color (background), this might take a while') for i in range(3): tmp_img = img[:,:,i] tmp_img = remove_dominant_color(tmp_img) tmp_img = np.reshape(tmp_img, original_shape) final_image.append(tmp_img) final_image = np.asarray(final_image, dtype=np.uint8) final_image = np.transpose(final_image, (1,2,0)) misc.imsave('new.tif',final_image) print('[ + ] The new image have been saved !')
mit
bruckhaus/challenges
python_challenges/project_euler/p032_pandigital_products.py
1
2935
# coding=utf-8 import os import sys current_path = os.path.dirname(os.path.abspath(__file__)) lib_path = os.path.join(current_path, '..') sys.path.append(lib_path) from lib.pandigital import Pandigital class PandigitalProducts: """ Pandigital products Problem 32 We shall say that an n-digit number is pandigital if it makes use of all the digits 1 to n exactly once; for example, the 5-digit number, 15234, is 1 through 5 pandigital. The product 7254 is unusual, as the identity, 39 × 186 = 7254, containing multiplicand, multiplier, and product is 1 through 9 pandigital. Find the sum of all products whose multiplicand/multiplier/product identity can be written as a 1 through 9 pandigital. HINT: Some products can be obtained in more than one way so be sure to only include it once in your sum. """ def __init__(self, digits=None): if digits is None: digits = [1, 2, 3, 4, 5, 6, 7, 8, 9] self.digits = digits self.pandigital = None self.factor_1 = None self.factor_2 = None self.product = None self.products = set() self.all_products = set() def find(self): p = Pandigital(self.digits) self.pandigital = p.get() while True: self.check_products() self.all_products |= self.products if p.has_next(): self.pandigital = p.next() else: break return sum(self.all_products) def check_products(self): p = self.pandigital l = len(self.pandigital) for factor_1_len in range(1, l): l1 = factor_1_len self.factor_1 = self.get_int(p[:l1]) for factor_2_len in range(l1 + 1, l): l2 = factor_2_len self.factor_2 = self.get_int(p[l1:l2]) self.product = self.get_int(p[l2:]) if self.is_match(): self.products.add(self.product) self.show() return self.products def is_match(self): return (self.product > 0) and (self.factor_1 * self.factor_2 == self.product) @staticmethod def get_int(l): return int("".join([str(d) for d in l])) def show(self): if __name__ == '__main__': expected = self.factor_1 * self.factor_2 print "pandigital:", self.pandigital, \ ", factor 1:", self.factor_1, \ ", factor 2:", self.factor_2, \ ", product", self.product, \ "(", expected, "-", self.is_match(), ")" \ ", products:", self.products, \ ", all products:", self.all_products if __name__ == '__main__': p = PandigitalProducts([1, 2, 3, 4, 5, 6, 7, 8, 9]).find() print "The sum of all products whose product identity can be written as a 1 through 9 pandigital is", p
mit
VeryLonelyCocks/shiva
shiva/__init__.py
1
1038
from .core import Core from .config import config import datetime from .commands import TelegramCommands core = Core(config) telegramHandler = TelegramCommands(core) # def log_event(event): # table = core.db['events'] # date = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S") # event_model = { # 'date': date, # 'event': event # } # table.insert_one(event_model) # # def return_chat(chat_data): # table = core.db['chats'] # chat = table.find_one({'id': chat_data['id']}) # if not chat: # chat = table.insert_one(chat_data) # chat = table.find_one({'id': chat_data['id']}) # return chat # async def tg_callback(request): # try: # ## get event and logs it # # event = await request.json() # log_event(event) # core.logger.info('Got a new event: %s' % event) # ### telegram = core.telegram telegram.create_webhook(config['host']) core.server.hooks.add(telegram.URI, telegramHandler.telegram_callback) core.server.run()
gpl-3.0
dvliman/jaikuengine
.google_appengine/lib/django-1.5/django/contrib/auth/tests/remote_user.py
91
7817
from datetime import datetime from django.conf import settings from django.contrib.auth import authenticate from django.contrib.auth.backends import RemoteUserBackend from django.contrib.auth.models import User, AnonymousUser from django.contrib.auth.tests.utils import skipIfCustomUser from django.test import TestCase from django.utils import timezone @skipIfCustomUser class RemoteUserTest(TestCase): urls = 'django.contrib.auth.tests.urls' middleware = 'django.contrib.auth.middleware.RemoteUserMiddleware' backend = 'django.contrib.auth.backends.RemoteUserBackend' # Usernames to be passed in REMOTE_USER for the test_known_user test case. known_user = 'knownuser' known_user2 = 'knownuser2' def setUp(self): self.curr_middleware = settings.MIDDLEWARE_CLASSES self.curr_auth = settings.AUTHENTICATION_BACKENDS settings.MIDDLEWARE_CLASSES += (self.middleware,) settings.AUTHENTICATION_BACKENDS += (self.backend,) def test_no_remote_user(self): """ Tests requests where no remote user is specified and insures that no users get created. """ num_users = User.objects.count() response = self.client.get('/remote_user/') self.assertTrue(response.context['user'].is_anonymous()) self.assertEqual(User.objects.count(), num_users) response = self.client.get('/remote_user/', REMOTE_USER=None) self.assertTrue(response.context['user'].is_anonymous()) self.assertEqual(User.objects.count(), num_users) response = self.client.get('/remote_user/', REMOTE_USER='') self.assertTrue(response.context['user'].is_anonymous()) self.assertEqual(User.objects.count(), num_users) def test_unknown_user(self): """ Tests the case where the username passed in the header does not exist as a User. """ num_users = User.objects.count() response = self.client.get('/remote_user/', REMOTE_USER='newuser') self.assertEqual(response.context['user'].username, 'newuser') self.assertEqual(User.objects.count(), num_users + 1) User.objects.get(username='newuser') # Another request with same user should not create any new users. response = self.client.get('/remote_user/', REMOTE_USER='newuser') self.assertEqual(User.objects.count(), num_users + 1) def test_known_user(self): """ Tests the case where the username passed in the header is a valid User. """ User.objects.create(username='knownuser') User.objects.create(username='knownuser2') num_users = User.objects.count() response = self.client.get('/remote_user/', REMOTE_USER=self.known_user) self.assertEqual(response.context['user'].username, 'knownuser') self.assertEqual(User.objects.count(), num_users) # Test that a different user passed in the headers causes the new user # to be logged in. response = self.client.get('/remote_user/', REMOTE_USER=self.known_user2) self.assertEqual(response.context['user'].username, 'knownuser2') self.assertEqual(User.objects.count(), num_users) def test_last_login(self): """ Tests that a user's last_login is set the first time they make a request but not updated in subsequent requests with the same session. """ user = User.objects.create(username='knownuser') # Set last_login to something so we can determine if it changes. default_login = datetime(2000, 1, 1) if settings.USE_TZ: default_login = default_login.replace(tzinfo=timezone.utc) user.last_login = default_login user.save() response = self.client.get('/remote_user/', REMOTE_USER=self.known_user) self.assertNotEqual(default_login, response.context['user'].last_login) user = User.objects.get(username='knownuser') user.last_login = default_login user.save() response = self.client.get('/remote_user/', REMOTE_USER=self.known_user) self.assertEqual(default_login, response.context['user'].last_login) def test_header_disappears(self): """ Tests that a logged in user is logged out automatically when the REMOTE_USER header disappears during the same browser session. """ User.objects.create(username='knownuser') # Known user authenticates response = self.client.get('/remote_user/', REMOTE_USER=self.known_user) self.assertEqual(response.context['user'].username, 'knownuser') # During the session, the REMOTE_USER header disappears. Should trigger logout. response = self.client.get('/remote_user/') self.assertEqual(response.context['user'].is_anonymous(), True) # verify the remoteuser middleware will not remove a user # authenticated via another backend User.objects.create_user(username='modeluser', password='foo') self.client.login(username='modeluser', password='foo') authenticate(username='modeluser', password='foo') response = self.client.get('/remote_user/') self.assertEqual(response.context['user'].username, 'modeluser') def tearDown(self): """Restores settings to avoid breaking other tests.""" settings.MIDDLEWARE_CLASSES = self.curr_middleware settings.AUTHENTICATION_BACKENDS = self.curr_auth class RemoteUserNoCreateBackend(RemoteUserBackend): """Backend that doesn't create unknown users.""" create_unknown_user = False @skipIfCustomUser class RemoteUserNoCreateTest(RemoteUserTest): """ Contains the same tests as RemoteUserTest, but using a custom auth backend class that doesn't create unknown users. """ backend =\ 'django.contrib.auth.tests.remote_user.RemoteUserNoCreateBackend' def test_unknown_user(self): num_users = User.objects.count() response = self.client.get('/remote_user/', REMOTE_USER='newuser') self.assertTrue(response.context['user'].is_anonymous()) self.assertEqual(User.objects.count(), num_users) class CustomRemoteUserBackend(RemoteUserBackend): """ Backend that overrides RemoteUserBackend methods. """ def clean_username(self, username): """ Grabs username before the @ character. """ return username.split('@')[0] def configure_user(self, user): """ Sets user's email address. """ user.email = 'user@example.com' user.save() return user @skipIfCustomUser class RemoteUserCustomTest(RemoteUserTest): """ Tests a custom RemoteUserBackend subclass that overrides the clean_username and configure_user methods. """ backend =\ 'django.contrib.auth.tests.remote_user.CustomRemoteUserBackend' # REMOTE_USER strings with email addresses for the custom backend to # clean. known_user = 'knownuser@example.com' known_user2 = 'knownuser2@example.com' def test_known_user(self): """ The strings passed in REMOTE_USER should be cleaned and the known users should not have been configured with an email address. """ super(RemoteUserCustomTest, self).test_known_user() self.assertEqual(User.objects.get(username='knownuser').email, '') self.assertEqual(User.objects.get(username='knownuser2').email, '') def test_unknown_user(self): """ The unknown user created should be configured with an email address. """ super(RemoteUserCustomTest, self).test_unknown_user() newuser = User.objects.get(username='newuser') self.assertEqual(newuser.email, 'user@example.com')
apache-2.0