repo_name
stringlengths
6
130
hexsha
list
file_path
list
code
list
apis
list
yandex-research/shifts
[ "12c8ca805ff4d18bdc1300611c318b264d79fdec" ]
[ "translation/assessment/evaluate_remote.py" ]
[ "import json\nimport numpy as np\nimport sacrebleu\nfrom nltk.translate import gleu_score\nfrom sklearn.metrics import roc_auc_score, roc_curve, auc, f1_score, fbeta_score\nfrom sklearn.metrics import precision_recall_curve\nimport pandas as pd\n\nimport numpy as np\nfrom sklearn.metrics import auc\nfrom sklearn.utils.multiclass import type_of_target\nfrom sklearn.utils import check_consistent_length, column_or_1d, check_array, assert_all_finite\nfrom sklearn.utils.extmath import stable_cumsum\nimport argparse\n\n\ndef _check_pos_label_consistency(pos_label, y_true):\n # ensure binary classification if pos_label is not specified\n # classes.dtype.kind in ('O', 'U', 'S') is required to avoid\n # triggering a FutureWarning by calling np.array_equal(a, b)\n # when elements in the two arrays are not comparable.\n classes = np.unique(y_true)\n if (pos_label is None and (\n classes.dtype.kind in 'OUS' or\n not (np.array_equal(classes, [0, 1]) or\n np.array_equal(classes, [-1, 1]) or\n np.array_equal(classes, [0]) or\n np.array_equal(classes, [-1]) or\n np.array_equal(classes, [1])))):\n classes_repr = \", \".join(repr(c) for c in classes)\n raise ValueError(\n f\"y_true takes value in {{{classes_repr}}} and pos_label is not \"\n f\"specified: either make y_true take value in {{0, 1}} or \"\n f\"{{-1, 1}} or pass pos_label explicitly.\"\n )\n elif pos_label is None:\n pos_label = 1.0\n\n return pos_label\n\n\ndef _binary_clf_curve_ret(y_true, y_score, pos_label=None, sample_weight=None):\n # Check to make sure y_true is valid\n y_type = type_of_target(y_true)\n if not (y_type == \"binary\" or\n (y_type == \"multiclass\" and pos_label is not None)):\n raise ValueError(\"{0} format is not supported\".format(y_type))\n\n check_consistent_length(y_true, y_score, sample_weight)\n y_true = column_or_1d(y_true)\n y_score = column_or_1d(y_score)\n assert_all_finite(y_true)\n assert_all_finite(y_score)\n\n if sample_weight is not None:\n sample_weight = column_or_1d(sample_weight)\n\n pos_label = _check_pos_label_consistency(pos_label, y_true)\n\n # make y_true a boolean vector\n y_true = (y_true == pos_label)\n\n # sort scores and corresponding truth values\n desc_score_indices = np.argsort(y_score, kind=\"mergesort\")[::-1]\n y_score = y_score[desc_score_indices]\n y_true = y_true[desc_score_indices]\n if sample_weight is not None:\n weight = sample_weight[desc_score_indices]\n else:\n weight = 1.\n\n # y_score typically has many tied values. Here we extract\n # the indices associated with the distinct values. We also\n # concatenate a value for the end of the curve.\n # distinct_value_indices = np.where(np.diff(y_score))[0]\n # threshold_idxs = np.r_[distinct_value_indices, y_true.size - 1]\n\n # accumulate the true positives with decreasing threshold\n tps = stable_cumsum(y_true * weight) # [threshold_idxs]\n if sample_weight is not None:\n # express fps as a cumsum to ensure fps is increasing even in\n # the presence of floating point errors\n fps = stable_cumsum((1 - y_true) * weight) # [threshold_idxs]\n else:\n fps = stable_cumsum((1 - y_true)) # [threshold_idxs]\n return fps, tps, y_score # [threshold_idxs]\n\n\ndef precision_recall_curve_retention(y_true, probas_pred, *, pos_label=None,\n sample_weight=None):\n fps, tps, thresholds = _binary_clf_curve_ret(y_true, probas_pred,\n pos_label=pos_label,\n sample_weight=sample_weight)\n\n precision = tps / (tps + fps)\n precision[np.isnan(precision)] = 0\n recall = tps / tps[-1]\n\n # stop when full recall attained\n # and reverse the outputs so recall is decreasing\n last_ind = tps.searchsorted(tps[-1])\n sl = slice(-1, None, -1)\n return np.r_[precision[sl], 1], np.r_[recall[sl], 0], thresholds[sl]\n\n\ndef acceptable_error(errors, threshold):\n return np.asarray(errors <= threshold, dtype=np.float32)\n\n\ndef calc_fbeta_regection_curve(errors, uncertainty, threshold, beta=1.0, group_by_uncertainty=True, eps=1e-10):\n ae = acceptable_error(errors, threshold)\n pr, rec, _ = precision_recall_curve_retention(ae, -uncertainty)\n pr = np.asarray(pr)\n rec = np.asarray(rec)\n f_scores = (1 + beta ** 2) * pr * rec / (pr * beta ** 2 + rec + eps)\n\n return f_scores, pr, rec\n\n\ndef f_beta_metrics(errors, uncertainty, threshold, beta=1.0):\n \"\"\"\n\n :param errors: Per sample errors - array [n_samples]\n :param uncertainty: Uncertainties associated with each prediction. rray [n_samples]\n :param threshold: The error threshold below which we consider the prediction acceptable\n :param beta: The beta value for the F_beta metric. Defaults to 1\n :return: fbeta_auc, fbeta_95, retention\n \"\"\"\n f_scores, pr, rec = calc_fbeta_regection_curve(errors, uncertainty, threshold, beta)\n ret = np.arange(pr.shape[0]) / pr.shape[0]\n\n f_auc = auc(ret[::-1], f_scores)\n f95 = f_scores[::-1][np.int(0.95 * pr.shape[0])]\n\n return f_auc, f95, f_scores[::-1]\n\n\ndef calc_uncertainty_regection_curve(errors, uncertainty, group_by_uncertainty=True):\n n_objects = errors.shape[0]\n if group_by_uncertainty:\n data = pd.DataFrame(dict(\n errors=errors,\n uncertainty=uncertainty\n ))\n mean_errors = data.groupby(\"uncertainty\").mean()\n mean_errors.rename(columns={\"errors\": \"mean_errors\"}, inplace=True)\n data = data.join(mean_errors, \"uncertainty\")\n data.drop(\"errors\", axis=1, inplace=True)\n\n uncertainty_order = data[\"uncertainty\"].argsort()\n errors = data[\"mean_errors\"][uncertainty_order]\n else:\n uncertainty_order = uncertainty.argsort()\n errors = errors[uncertainty_order]\n\n error_rates = np.zeros(n_objects + 1)\n error_rates[:-1] = np.cumsum(errors)[::-1] / n_objects\n return error_rates\n\n\ndef calc_aucs(errors, uncertainty):\n uncertainty_rejection_curve = calc_uncertainty_regection_curve(errors, uncertainty)\n uncertainty_rejection_auc = uncertainty_rejection_curve.mean()\n random_rejection_auc = uncertainty_rejection_curve[0] / 2\n ideal_rejection_auc = calc_uncertainty_regection_curve(errors, errors).mean()\n\n rejection_ratio = (uncertainty_rejection_auc - random_rejection_auc) / (\n ideal_rejection_auc - random_rejection_auc) * 100.0\n return rejection_ratio, uncertainty_rejection_auc\n\n\ndef ood_detect(domain_labels, measure):\n scores = np.asarray(measure, dtype=np.float128)\n\n roc_auc = roc_auc_score(domain_labels, scores)\n return roc_auc\n\n\ndef eval():\n try:\n parser = argparse.ArgumentParser()\n parser.add_argument(\"preds\")\n parser.add_argument(\"refs\")\n parser.add_argument(\"domain_labels\")\n try:\n args = parser.parse_args()\n except:\n raise Exception('Missing sumbmit or target.')\n\n decoder = json.JSONDecoder()\n refs = []\n preds = []\n\n hypo_len = None\n preds_ids = []\n refs_ids = []\n\n domain_labels_orig = np.loadtxt(args.domain_labels, dtype=np.int32)\n with open(args.preds, 'r') as jp, open(args.refs, 'r') as jr:\n for line in jp.readlines():\n pred = decoder.decode(line)\n preds.append(pred)\n if not hypo_len:\n hypo_len = len(pred['hypos'])\n assert hypo_len < 11, 'The number of hypotheses exceeds 10.'\n else:\n assert hypo_len == len(pred['hypos']), 'The number of hypotheses is not constant.'\n assert abs(np.sum([hypo['confidence'] for hypo in\n pred['hypos']]) - 1) < 1e-5, 'The sum of the confidence scores is not equal to 1.'\n for line in jr.readlines():\n ref = decoder.decode(line)\n refs.append(ref)\n\n refs = sorted(refs, key=lambda x: x['id'])\n preds = sorted(preds, key=lambda x: x['id'])\n\n assert len(refs) == len(preds), 'Missing some IDs.'\n\n for r, p in zip(refs, preds):\n assert r['id'] == p['id'], 'Wrong order of predictions.'\n\n refsb = [ref['ref'] for ref in refs]\n hyposb = [hypo['hypos'][0]['text'] for hypo in preds]\n\n refsg = [[ref['ref'].split()] for ref in refs]\n gleus = []\n for r, pr in zip(refsg, preds):\n score = 0\n for h in pr['hypos']:\n hypo = h['text'].split()\n score += h['confidence'] * gleu_score.sentence_gleu(references=r, hypothesis=hypo) * 100\n gleus.append(score)\n gleu_errors = np.asarray([100.0 - g for g in gleus])\n\n uncertainties = np.asarray([hypo['uncertainty'] for hypo in preds])\n\n domain_labels = np.asarray([domain_labels_orig[r['id']] for r in refs])\n\n bleu = sacrebleu.corpus_bleu(sys_stream=hyposb, ref_streams=[refsb], force=True).score\n gleu = np.mean(gleus)\n prr, auc = calc_aucs(errors=gleu_errors, uncertainty=uncertainties)\n roc_auc = ood_detect(domain_labels, uncertainties)\n\n f_auc, f95, _ = f_beta_metrics(gleu_errors, np.asarray([hypo['uncertainty'] for hypo in preds]), threshold=60.0)\n\n scores = json.dumps({\n 'BLEU': bleu,\n 'sGLEU': gleu,\n 'AUC-F1': f_auc,\n 'F1 @ 95%': f95,\n 'ROC-AUC': roc_auc * 100,\n })\n\n print(auc, scores)\n except Exception as err:\n print('$ ' + str(err) + ' $')\n\n\neval()\n" ]
[ [ "sklearn.metrics.roc_auc_score", "sklearn.utils.check_consistent_length", "numpy.asarray", "numpy.cumsum", "numpy.int", "numpy.mean", "sklearn.utils.assert_all_finite", "numpy.unique", "numpy.arange", "sklearn.utils.column_or_1d", "numpy.zeros", "numpy.isnan", "numpy.argsort", "sklearn.metrics.auc", "numpy.sum", "numpy.array_equal", "sklearn.utils.extmath.stable_cumsum", "sklearn.utils.multiclass.type_of_target", "numpy.loadtxt" ] ]
YotamElor/sagemaker-scikit-learn-extension
[ "930e0ad5b2ef3caa1d7565850f63b3ce4a39b146" ]
[ "test/test_date_time.py" ]
[ "# Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"). You\n# may not use this file except in compliance with the License. A copy of\n# the License is located at\n#\n# http://aws.amazon.com/apache2.0/\n#\n# or in the \"license\" file accompanying this file. This file is\n# distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF\n# ANY KIND, either express or implied. See the License for the specific\n# language governing permissions and limitations under the License.\n\nimport numpy as np\nimport pytest\n\nfrom dateutil import parser\n\nfrom sagemaker_sklearn_extension.feature_extraction.date_time import DateTimeVectorizer, DateTimeDefinition\n\n\ndata_array = [\n [parser.parse(\"Jan 5th, 2012, 12:34am\")],\n [parser.parse(\"Feb 2, 2011, 2:34:04am\")],\n [parser.parse(\"Jan 1st, 2012, 11:59:59pm\")],\n [parser.parse(\"Dec 2th, 2012, 12:00am\")],\n [parser.parse(\"Jan 3th, 2012, 12:34am\")],\n [parser.parse(\"Jan 3th, 2018, 1:34am\")],\n]\n\ndata = np.array(data_array)\n\n\n@pytest.mark.parametrize(\"data_shape\", [(2, 3), (2, 3, 4), (2,)])\ndef test_cyclic_transform_outputs_correct_shape(data_shape):\n size = int(np.prod(data_shape))\n data = np.arange(size).reshape(data_shape)\n ret = DateTimeVectorizer._cyclic_transform(data, low=0, high=size - 1)\n\n new_shape = list(data_shape)\n new_shape[-1] *= 2\n new_shape = tuple(new_shape)\n assert ret.shape == new_shape\n\n ret = ret.reshape((-1, 2))\n ret = ret ** 2\n assert np.linalg.norm(np.sum(ret, axis=1) - 1) < 1e-8\n\n\n@pytest.mark.parametrize(\"mode\", [\"ordinal\", \"cyclic\"])\ndef test_fit_transform_works_with_non_np_input(mode):\n dtv = DateTimeVectorizer(\n mode=mode,\n extract=[\n DateTimeDefinition.HOUR.value,\n DateTimeDefinition.SECOND.value,\n DateTimeDefinition.YEAR.value,\n DateTimeDefinition.MONTH.value,\n ],\n )\n output = dtv.fit_transform(data_array)\n assert output.shape[0] == len(data_array)\n assert output.shape[1] > 1\n\n\n@pytest.mark.parametrize(\"data_shape\", [(2, 3), (2, 3, 4), (2,)])\ndef test_cyclic_transform_outputs_correct_cyclic_values(data_shape):\n size = int(np.prod(data_shape))\n data = np.arange(size).reshape(data_shape)\n ret = DateTimeVectorizer._cyclic_transform(data, low=0, high=size - 1)\n ret = ret.reshape((-1, 2))\n ret = ret ** 2\n assert np.linalg.norm(np.sum(ret, axis=1) - 1) < 1e-8\n\n\ndef test_fit_eliminates_constant_columns():\n dtv = DateTimeVectorizer(\n mode=\"ordinal\",\n extract=[\n DateTimeDefinition.HOUR.value,\n DateTimeDefinition.SECOND.value,\n DateTimeDefinition.YEAR.value,\n DateTimeDefinition.MONTH.value,\n ],\n )\n # taking only odd items. Year and month are always the same.\n cur_data = data.reshape((-1, 2))[:, 0].reshape((-1, 1))\n dtv = dtv.fit(cur_data)\n # Year and month are constants, make sure they are out\n assert dtv.extract_ == [DateTimeDefinition.HOUR.value, DateTimeDefinition.SECOND.value]\n\n\n@pytest.mark.parametrize(\"mode\", [\"ordinal\", \"cyclic\"])\ndef test_fit_eliminates_constant_columns_multicol_input(mode):\n # set up data. Properties:\n # Hour: Constant thrghout - eliminate\n # Year: Constant in both, but has different value accross columns - should eliminate\n # Month: Constant in column 2, not in 1 - should not eliminate\n # Day of month: not constant in both columns - should not eliminate\n col1 = [\n parser.parse(\"Jan 5th, 2012\"),\n parser.parse(\"Feb 2, 2012\"),\n parser.parse(\"Jan 1st, 2012\"),\n ]\n col2 = [\n parser.parse(\"Dec 2th, 2013\"),\n parser.parse(\"Dec 3th, 2013\"),\n parser.parse(\"Dec 3th, 2013\"),\n ]\n\n cur_data = np.array([col1, col2]).T\n\n dtv = DateTimeVectorizer(\n mode=mode,\n extract=[\n DateTimeDefinition.HOUR.value,\n DateTimeDefinition.DAY_OF_MONTH.value,\n DateTimeDefinition.YEAR.value,\n DateTimeDefinition.MONTH.value,\n ],\n )\n # taking only odd items. Year and month are always the same.\n dtv = dtv.fit(cur_data)\n # Year and month are constants, make sure they are out\n assert dtv.extract_ == [DateTimeDefinition.DAY_OF_MONTH.value, DateTimeDefinition.MONTH.value]\n\n\ndef test_transform_categorical():\n extract_keys = [k for k in dir(DateTimeDefinition) if not k.startswith(\"_\")]\n extract = [DateTimeDefinition.__dict__[k].value for k in extract_keys]\n dtv = DateTimeVectorizer(mode=\"ordinal\", extract=extract, ignore_constant_columns=False)\n dtv.fit(data)\n output = dtv.transform(data)\n\n assert np.all(output >= 0)\n\n loc_year = extract_keys.index(\"YEAR\")\n np.testing.assert_array_equal(output[:, loc_year], np.array([2012, 2011, 2012, 2012, 2012, 2018]))\n\n loc_month = extract_keys.index(\"MONTH\")\n np.testing.assert_array_equal(output[:, loc_month], np.array([0, 1, 0, 11, 0, 0]))\n\n\ndef test_transform_cyclic_leaves_year():\n extract_keys = [k for k in dir(DateTimeDefinition) if not k.startswith(\"_\")]\n extract = [DateTimeDefinition.__dict__[k].value for k in extract_keys]\n\n dtv = DateTimeVectorizer(mode=\"cyclic\", extract=extract, ignore_constant_columns=False)\n dtv.fit(data)\n output = dtv.transform(data)\n\n loc_year = extract_keys.index(\"YEAR\")\n loc_year *= 2\n np.testing.assert_array_equal(output[:, loc_year], np.array([2012, 2011, 2012, 2012, 2012, 2018]))\n\n assert output.shape[1] == len(extract) * 2 - 1\n\n\ndef test_fit_transform_cyclic_leaves_year():\n extract_keys = [k for k in dir(DateTimeDefinition) if not k.startswith(\"_\")]\n extract = [DateTimeDefinition.__dict__[k].value for k in extract_keys]\n\n dtv = DateTimeVectorizer(mode=\"cyclic\", extract=extract, ignore_constant_columns=False)\n output = dtv.fit_transform(data)\n\n loc_year = extract_keys.index(\"YEAR\")\n loc_year *= 2\n np.testing.assert_array_equal(output[:, loc_year], np.array([2012, 2011, 2012, 2012, 2012, 2018]))\n\n assert output.shape[1] == len(dtv.extract_) * 2 - 1\n\n\ndef test_fit_transform_accepts_mixed_str_datetime():\n cur_data_array = data_array + [[\"Feb 12th, 15:33, 2011\"], [\"Nov 5th, 1am, 1975\"], [432], [None], [\"Feb 45th, 2018\"]]\n\n dtv = DateTimeVectorizer(mode=\"ordinal\")\n processed = dtv.fit_transform(cur_data_array)\n year_location = dtv.extract_.index(DateTimeDefinition.YEAR.value)\n assert processed[0, year_location] == 2012\n assert processed[-4, year_location] == 1975\n assert np.isnan(processed[-3, year_location])\n assert np.isnan(processed[-2, year_location])\n assert np.isnan(processed[-1, year_location])\n\n dtv = DateTimeVectorizer(mode=\"cyclic\")\n processed = dtv.fit_transform(cur_data_array)\n assert all(np.isnan(processed[-1]))\n assert not any(np.isnan(processed[-4]))\n assert not any(np.isnan(processed[0]))\n" ]
[ [ "numpy.isnan", "numpy.arange", "numpy.all", "numpy.prod", "numpy.array", "numpy.sum" ] ]
vyraun/TinyShakespeare-Speaks
[ "a412041f8c60a95004f3d4acebd3193db6ff3341" ]
[ "train.py" ]
[ "from __future__ import print_function\nimport numpy as np\nimport tensorflow as tf\n\nimport argparse\nimport time\nimport os\nfrom six.moves import cPickle\n\nfrom utils import TextLoader\nfrom model import Model\n\ndef main():\n parser = argparse.ArgumentParser()\n parser.add_argument('--data_dir', type=str, default='data/tinyshakespeare',\n help='data directory containing input.txt')\n parser.add_argument('--save_dir', type=str, default='save',\n help='directory to store checkpointed models')\n parser.add_argument('--rnn_size', type=int, default=128,\n help='size of RNN hidden state')\n parser.add_argument('--num_layers', type=int, default=2,\n help='number of layers in the RNN')\n parser.add_argument('--model', type=str, default='lstm',\n help='rnn, gru, or lstm')\n parser.add_argument('--batch_size', type=int, default=50,\n help='minibatch size')\n parser.add_argument('--seq_length', type=int, default=50,\n help='RNN sequence length')\n parser.add_argument('--num_epochs', type=int, default=50,\n help='number of epochs')\n parser.add_argument('--save_every', type=int, default=1000,\n help='save frequency')\n parser.add_argument('--grad_clip', type=float, default=5.,\n help='clip gradients at this value')\n parser.add_argument('--learning_rate', type=float, default=0.002,\n help='learning rate')\n parser.add_argument('--decay_rate', type=float, default=0.97,\n help='decay rate for rmsprop') \n parser.add_argument('--init_from', type=str, default=None,\n help=\"\"\"continue training from saved model at this path. Path must contain files saved by previous training process: \n 'config.pkl' : configuration;\n 'chars_vocab.pkl' : vocabulary definitions;\n 'checkpoint' : paths to model file(s) (created by tf).\n Note: this file contains absolute paths, be careful when moving files around;\n 'model.ckpt-*' : file(s) with model definition (created by tf)\n \"\"\")\n args = parser.parse_args()\n train(args)\n\ndef train(args):\n data_loader = TextLoader(args.data_dir, args.batch_size, args.seq_length)\n args.vocab_size = data_loader.vocab_size\n \n # check compatibility if training is continued from previously saved model\n if args.init_from is not None:\n # check if all necessary files exist \n assert os.path.isdir(args.init_from),\" %s must be a a path\" % args.init_from\n assert os.path.isfile(os.path.join(args.init_from,\"config.pkl\")),\"config.pkl file does not exist in path %s\"%args.init_from\n assert os.path.isfile(os.path.join(args.init_from,\"chars_vocab.pkl\")),\"chars_vocab.pkl.pkl file does not exist in path %s\" % args.init_from\n ckpt = tf.train.get_checkpoint_state(args.init_from)\n assert ckpt,\"No checkpoint found\"\n assert ckpt.model_checkpoint_path,\"No model path found in checkpoint\"\n\n # open old config and check if models are compatible\n with open(os.path.join(args.init_from, 'config.pkl'), 'rb') as f:\n saved_model_args = cPickle.load(f)\n need_be_same=[\"model\",\"rnn_size\",\"num_layers\",\"seq_length\"]\n for checkme in need_be_same:\n assert vars(saved_model_args)[checkme]==vars(args)[checkme],\"Command line argument and saved model disagree on '%s' \"%checkme\n \n # open saved vocab/dict and check if vocabs/dicts are compatible\n with open(os.path.join(args.init_from, 'chars_vocab.pkl'), 'rb') as f:\n saved_chars, saved_vocab = cPickle.load(f)\n assert saved_chars==data_loader.chars, \"Data and loaded model disagree on character set!\"\n assert saved_vocab==data_loader.vocab, \"Data and loaded model disagree on dictionary mappings!\"\n \n with open(os.path.join(args.save_dir, 'config.pkl'), 'wb') as f:\n cPickle.dump(args, f)\n with open(os.path.join(args.save_dir, 'chars_vocab.pkl'), 'wb') as f:\n cPickle.dump((data_loader.chars, data_loader.vocab), f)\n \n model = Model(args)\n\n with tf.Session() as sess:\n tf.initialize_all_variables().run()\n saver = tf.train.Saver(tf.all_variables())\n # restore model\n if args.init_from is not None:\n saver.restore(sess, ckpt.model_checkpoint_path)\n for e in range(args.num_epochs):\n sess.run(tf.assign(model.lr, args.learning_rate * (args.decay_rate ** e)))\n data_loader.reset_batch_pointer()\n state = sess.run(model.initial_state)\n for b in range(data_loader.num_batches):\n start = time.time()\n x, y = data_loader.next_batch()\n feed = {model.input_data: x, model.targets: y}\n for i, (c, h) in enumerate(model.initial_state):\n feed[c] = state[i].c\n feed[h] = state[i].h\n train_loss, state, _ = sess.run([model.cost, model.final_state, model.train_op], feed)\n end = time.time()\n print(\"{}/{} (epoch {}), train_loss = {:.3f}, time/batch = {:.3f}\" \\\n .format(e * data_loader.num_batches + b,\n args.num_epochs * data_loader.num_batches,\n e, train_loss, end - start))\n if (e * data_loader.num_batches + b) % args.save_every == 0\\\n or (e==args.num_epochs-1 and b == data_loader.num_batches-1): # save for the last result\n checkpoint_path = os.path.join(args.save_dir, 'model.ckpt')\n saver.save(sess, checkpoint_path, global_step = e * data_loader.num_batches + b)\n print(\"model saved to {}\".format(checkpoint_path))\n\nif __name__ == '__main__':\n main()\n" ]
[ [ "tensorflow.train.get_checkpoint_state", "tensorflow.all_variables", "tensorflow.assign", "tensorflow.initialize_all_variables", "tensorflow.Session" ] ]
stellaraccident/iree-jax
[ "cc22664e5ea9d8120c080fd9417a636539105cd7" ]
[ "tests/module_api_test.py" ]
[ "# RUN: %PYTHON %s\n# Copyright 2021 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport numpy as np\nimport unittest\nfrom iree.jax import module_api\n\nfrom iree.jax.module_api import (\n Module\n)\n\nimport logging\n\nlogging.basicConfig(level=logging.DEBUG)\n\n\nclass ModuleApiTest(unittest.TestCase):\n\n def test_base_class_omits_info(self):\n with self.assertRaises(KeyError):\n Module.get_class_info(Module)\n\n def test_info(self):\n\n class MySubclass(Module):\n ...\n\n class_info = Module.get_class_info(MySubclass)\n self.assertEqual(class_info.export_name, \"my_subclass\")\n inst1 = MySubclass(import_only=True)\n inst2 = MySubclass(import_only=True)\n info1 = Module.get_info(inst1)\n info2 = Module.get_info(inst2)\n self.assertIsNot(info1, info2)\n self.assertEqual(info1.class_info.export_name, \"my_subclass\")\n\n def test_explicit_export_name(self):\n\n class MySubclass(Module, export_name=\"Foobar\"):\n ...\n\n class_info = Module.get_class_info(MySubclass)\n self.assertEqual(class_info.export_name, \"Foobar\")\n\n def test_def_function(self):\n\n class Nullary(Module):\n\n def f(self):\n ...\n\n class Unary(Module):\n\n def f(self, a=Module.like(np.asarray(0))):\n ...\n\n self.assertEqual(repr(Unary.f), \"<def f([ShapedArray(int32[])])>\")\n\n def test_global(self):\n\n class Global(Module):\n my_global = np.asarray(0)\n\n self.assertEqual(\n repr(Global.my_global),\n \"<global my_global: initialize=True, mutable=True, value=0>\")\n\n def test_builtins_hidden(self):\n\n class Hidden(Module):\n # Should be able to define something with a builtin name.\n def export_global(self):\n ...\n\n instance = Hidden(import_only=True)\n\n self.assertTrue(callable(instance.export_global))\n\n # Verify that everything except 'export_global' defined above raises\n # AttributeError.\n for key in module_api._STATIC_MODULE_ATTRIBUTES:\n if key != \"export_global\":\n with self.assertRaises(AttributeError):\n _ = getattr(instance, key)\n\n def test_export_function_requires_self(self):\n with self.assertRaisesRegex(\n TypeError,\n \"export function 'missing_self' is expected to have at least a 'self' parameter\"\n ):\n\n class Error(Module):\n\n def missing_self():\n ...\n\n def test_export_function_requires_positional(self):\n with self.assertRaisesRegex(\n TypeError,\n \"export function 'do_something' can only have positional parameters\"):\n\n class Error(Module):\n\n def do_something(self, *, a):\n ...\n\n def test_export_function_requires_aval(self):\n with self.assertRaisesRegex(\n TypeError, \"expected tree of abstract values but got: False\"):\n\n class Error(Module):\n\n def do_something(self, a=False):\n ...\n\n def test_export_illegal_global(self):\n with self.assertRaisesRegex(\n TypeError, \"cannot set arbitrary Python value 'foobar' on module:\"):\n\n class Error(Module):\n foobar = object()\n\n\nif __name__ == '__main__':\n unittest.main()\n" ]
[ [ "numpy.asarray" ] ]
xxxnhb/fewshot-egnn
[ "205fa80ec7cb12550f7b52a63f921171f92dac4c" ]
[ "data.py" ]
[ "from __future__ import print_function\nfrom torchtools import *\nimport torch.utils.data as data\nimport random\nimport os\nimport numpy as np\nfrom PIL import Image as pil_image\nimport pickle\nfrom itertools import islice\nfrom torchvision import transforms\n\n\nclass MiniImagenetLoader(data.Dataset):\n def __init__(self, root, partition='train'):\n super(MiniImagenetLoader, self).__init__()\n # set dataset information\n self.root = root\n self.partition = partition\n self.data_size = [3, 84, 84]\n\n # set normalizer\n mean_pix = [x / 255.0 for x in [120.39586422, 115.59361427, 104.54012653]]\n std_pix = [x / 255.0 for x in [70.68188272, 68.27635443, 72.54505529]]\n normalize = transforms.Normalize(mean=mean_pix, std=std_pix)\n\n # set transformer\n if self.partition == 'train':\n self.transform = transforms.Compose([transforms.RandomCrop(84, padding=4),\n lambda x: np.asarray(x),\n transforms.ToTensor(),\n normalize])\n else: # 'val' or 'test' ,\n self.transform = transforms.Compose([lambda x: np.asarray(x),\n transforms.ToTensor(),\n normalize])\n\n # load data\n self.data = self.load_dataset()\n\n def load_dataset(self):\n # load data\n dataset_path = os.path.join(self.root, 'mini-imagenet/compacted_datasets', 'mini_imagenet_%s.pickle' % self.partition)\n with open(dataset_path, 'rb') as handle:\n data = pickle.load(handle)\n\n # for each class\n for c_idx in data:\n # for each image\n for i_idx in range(len(data[c_idx])):\n # resize\n image_data = pil_image.fromarray(np.uint8(data[c_idx][i_idx]))\n image_data = image_data.resize((self.data_size[2], self.data_size[1]))\n #image_data = np.array(image_data, dtype='float32')\n\n #image_data = np.transpose(image_data, (2, 0, 1))\n\n # save\n data[c_idx][i_idx] = image_data\n return data\n\n def get_task_batch(self,\n num_tasks=5,\n num_ways=20,\n num_shots=1,\n num_queries=1,\n seed=None):\n\n if seed is not None:\n random.seed(seed)\n\n # init task batch data\n support_data, support_label, query_data, query_label = [], [], [], []\n for _ in range(num_ways * num_shots):\n data = np.zeros(shape=[num_tasks] + self.data_size,\n dtype='float32')\n label = np.zeros(shape=[num_tasks],\n dtype='float32')\n support_data.append(data)\n support_label.append(label)\n for _ in range(num_ways * num_queries):\n data = np.zeros(shape=[num_tasks] + self.data_size,\n dtype='float32')\n label = np.zeros(shape=[num_tasks],\n dtype='float32')\n query_data.append(data)\n query_label.append(label)\n\n # get full class list in dataset\n full_class_list = list(self.data.keys())\n\n # for each task\n for t_idx in range(num_tasks):\n # define task by sampling classes (num_ways)\n task_class_list = random.sample(full_class_list, num_ways)\n\n # for each sampled class in task\n for c_idx in range(num_ways):\n # sample data for support and query (num_shots + num_queries)\n class_data_list = random.sample(self.data[task_class_list[c_idx]], num_shots + num_queries)\n\n\n # load sample for support set\n for i_idx in range(num_shots):\n # set data\n support_data[i_idx + c_idx * num_shots][t_idx] = self.transform(class_data_list[i_idx])\n support_label[i_idx + c_idx * num_shots][t_idx] = c_idx\n\n # load sample for query set\n for i_idx in range(num_queries):\n query_data[i_idx + c_idx * num_queries][t_idx] = self.transform(class_data_list[num_shots + i_idx])\n query_label[i_idx + c_idx * num_queries][t_idx] = c_idx\n\n # convert to tensor (num_tasks x (num_ways * (num_supports + num_queries)) x ...)\n support_data = torch.stack([torch.from_numpy(data).float().to(tt.arg.device) for data in support_data], 1)\n support_label = torch.stack([torch.from_numpy(label).float().to(tt.arg.device) for label in support_label], 1)\n query_data = torch.stack([torch.from_numpy(data).float().to(tt.arg.device) for data in query_data], 1)\n query_label = torch.stack([torch.from_numpy(label).float().to(tt.arg.device) for label in query_label], 1)\n\n return [support_data, support_label, query_data, query_label]\n\n\n\nclass TieredImagenetLoader(data.Dataset):\n def __init__(self, root, partition='train'):\n self.root = root\n self.partition = partition # train/val/test\n #self.preprocess()\n self.data_size = [3, 84, 84]\n\n # load data\n self.data = self.load_dataset()\n\n # if not self._check_exists_():\n # self._init_folders_()\n # if self.check_decompress():\n # self._decompress_()\n # self._preprocess_()\n\n\n def get_image_paths(self, file):\n images_path, class_names = [], []\n with open(file, 'r') as f:\n f.readline()\n for line in f:\n name, class_ = line.split(',')\n class_ = class_[0:(len(class_)-1)]\n path = self.root + '/tiered-imagenet/images/'+name\n images_path.append(path)\n class_names.append(class_)\n return class_names, images_path\n\n def preprocess(self):\n print('\\nPreprocessing Tiered-Imagenet images...')\n (class_names_train, images_path_train) = self.get_image_paths('%s/tiered-imagenet/train.csv' % self.root)\n (class_names_test, images_path_test) = self.get_image_paths('%s/tiered-imagenet/test.csv' % self.root)\n (class_names_val, images_path_val) = self.get_image_paths('%s/tiered-imagenet/val.csv' % self.root)\n\n keys_train = list(set(class_names_train))\n keys_test = list(set(class_names_test))\n keys_val = list(set(class_names_val))\n label_encoder = {}\n label_decoder = {}\n for i in range(len(keys_train)):\n label_encoder[keys_train[i]] = i\n label_decoder[i] = keys_train[i]\n for i in range(len(keys_train), len(keys_train)+len(keys_test)):\n label_encoder[keys_test[i-len(keys_train)]] = i\n label_decoder[i] = keys_test[i-len(keys_train)]\n for i in range(len(keys_train)+len(keys_test), len(keys_train)+len(keys_test)+len(keys_val)):\n label_encoder[keys_val[i-len(keys_train) - len(keys_test)]] = i\n label_decoder[i] = keys_val[i-len(keys_train)-len(keys_test)]\n\n counter = 0\n train_set = {}\n\n for class_, path in zip(class_names_train, images_path_train):\n img = pil_image.open(path)\n img = img.convert('RGB')\n img = img.resize((84, 84), pil_image.ANTIALIAS)\n img = np.array(img, dtype='float32')\n if label_encoder[class_] not in train_set:\n train_set[label_encoder[class_]] = []\n train_set[label_encoder[class_]].append(img)\n counter += 1\n if counter % 1000 == 0:\n print(\"Counter \"+str(counter) + \" from \" + str(len(images_path_train)))\n\n test_set = {}\n for class_, path in zip(class_names_test, images_path_test):\n img = pil_image.open(path)\n img = img.convert('RGB')\n img = img.resize((84, 84), pil_image.ANTIALIAS)\n img = np.array(img, dtype='float32')\n\n if label_encoder[class_] not in test_set:\n test_set[label_encoder[class_]] = []\n test_set[label_encoder[class_]].append(img)\n counter += 1\n if counter % 1000 == 0:\n print(\"Counter \" + str(counter) + \" from \"+str(len(class_names_test)))\n\n val_set = {}\n for class_, path in zip(class_names_val, images_path_val):\n img = pil_image.open(path)\n img = img.convert('RGB')\n img = img.resize((84, 84), pil_image.ANTIALIAS)\n img = np.array(img, dtype='float32')\n\n if label_encoder[class_] not in val_set:\n val_set[label_encoder[class_]] = []\n val_set[label_encoder[class_]].append(img)\n counter += 1\n if counter % 1000 == 0:\n print(\"Counter \"+str(counter) + \" from \" + str(len(class_names_val)))\n\n partition_count = 0\n for item in self.chunks(train_set, 20):\n partition_count = partition_count + 1\n with open(os.path.join(self.root, 'tiered-imagenet/compacted_datasets', 'tiered_imagenet_train_{}.pickle'.format(partition_count)), 'wb') as handle:\n pickle.dump(item, handle, protocol=2)\n\n partition_count = 0\n for item in self.chunks(test_set, 20):\n partition_count = partition_count + 1\n with open(os.path.join(self.root, 'tiered-imagenet/compacted_datasets', 'tiered_imagenet_test_{}.pickle'.format(partition_count)), 'wb') as handle:\n pickle.dump(item, handle, protocol=2)\n\n partition_count = 0\n for item in self.chunks(val_set, 20):\n partition_count = partition_count + 1\n with open(os.path.join(self.root, 'tiered-imagenet/compacted_datasets', 'tiered_imagenet_val_{}.pickle'.format(partition_count)), 'wb') as handle:\n pickle.dump(item, handle, protocol=2)\n\n\n\n label_encoder = {}\n keys = list(train_set.keys()) + list(test_set.keys())\n for id_key, key in enumerate(keys):\n label_encoder[key] = id_key\n with open(os.path.join(self.root, 'tiered-imagenet/compacted_datasets', 'tiered_imagenet_label_encoder.pickle'), 'wb') as handle:\n pickle.dump(label_encoder, handle, protocol=2)\n\n print('Images preprocessed')\n\n def load_dataset(self):\n print(\"Loading dataset\")\n data = {}\n if self.partition == 'train':\n num_partition = 18\n elif self.partition == 'val':\n num_partition = 5\n elif self.partition == 'test':\n num_partition = 8\n\n partition_count = 0\n for i in range(num_partition):\n partition_count = partition_count +1\n with open(os.path.join(self.root, 'tiered-imagenet/compacted_datasets', 'tiered_imagenet_{}_{}.pickle'.format(self.partition, partition_count)), 'rb') as handle:\n data.update(pickle.load(handle))\n\n # Resize images and normalize\n for class_ in data:\n for i in range(len(data[class_])):\n image2resize = pil_image.fromarray(np.uint8(data[class_][i]))\n image_resized = image2resize.resize((self.data_size[2], self.data_size[1]))\n image_resized = np.array(image_resized, dtype='float32')\n\n # Normalize\n image_resized = np.transpose(image_resized, (2, 0, 1))\n image_resized[0, :, :] -= 120.45 # R\n image_resized[1, :, :] -= 115.74 # G\n image_resized[2, :, :] -= 104.65 # B\n image_resized /= 127.5\n\n data[class_][i] = image_resized\n\n print(\"Num classes \" + str(len(data)))\n num_images = 0\n for class_ in data:\n num_images += len(data[class_])\n print(\"Num images \" + str(num_images))\n return data\n\n def chunks(self, data, size=10000):\n it = iter(data)\n for i in range(0, len(data), size):\n yield {k: data[k] for k in islice(it, size)}\n\n def get_task_batch(self,\n num_tasks=5,\n num_ways=20,\n num_shots=1,\n num_queries=1,\n seed=None):\n if seed is not None:\n random.seed(seed)\n\n # init task batch data\n support_data, support_label, query_data, query_label = [], [], [], []\n for _ in range(num_ways * num_shots):\n data = np.zeros(shape=[num_tasks] + self.data_size,\n dtype='float32')\n label = np.zeros(shape=[num_tasks],\n dtype='float32')\n support_data.append(data)\n support_label.append(label)\n for _ in range(num_ways * num_queries):\n data = np.zeros(shape=[num_tasks] + self.data_size,\n dtype='float32')\n label = np.zeros(shape=[num_tasks],\n dtype='float32')\n query_data.append(data)\n query_label.append(label)\n\n # get full class list in dataset\n full_class_list = list(self.data.keys())\n\n # for each task\n for t_idx in range(num_tasks):\n # define task by sampling classes (num_ways)\n task_class_list = random.sample(full_class_list, num_ways)\n\n # for each sampled class in task\n for c_idx in range(num_ways):\n # sample data for support and query (num_shots + num_queries)\n class_data_list = random.sample(self.data[task_class_list[c_idx]], num_shots + num_queries)\n\n # load sample for support set\n for i_idx in range(num_shots):\n # set data\n support_data[i_idx + c_idx * num_shots][t_idx] = class_data_list[i_idx]\n support_label[i_idx + c_idx * num_shots][t_idx] = c_idx\n\n # load sample for query set\n for i_idx in range(num_queries):\n query_data[i_idx + c_idx * num_queries][t_idx] = class_data_list[num_shots + i_idx]\n query_label[i_idx + c_idx * num_queries][t_idx] = c_idx\n\n\n\n # convert to tensor (num_tasks x (num_ways * (num_supports + num_queries)) x ...)\n support_data = torch.stack([torch.from_numpy(data).float().to(tt.arg.device) for data in support_data], 1)\n support_label = torch.stack([torch.from_numpy(label).float().to(tt.arg.device) for label in support_label], 1)\n query_data = torch.stack([torch.from_numpy(data).float().to(tt.arg.device) for data in query_data], 1)\n query_label = torch.stack([torch.from_numpy(label).float().to(tt.arg.device) for label in query_label], 1)\n\n return [support_data, support_label, query_data, query_label]" ]
[ [ "numpy.asarray", "numpy.uint8", "numpy.transpose", "numpy.array", "numpy.zeros" ] ]
BarryLiu97/SEEG_Scripts
[ "fd0a79cfedc7a18f9995d808ab608a64facd5fe6" ]
[ "example/electrodes_analysis.py" ]
[ "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Jul 12 14:28:00 2021\n\n@author: barryliu\n\"\"\"\n\nimport os\nimport glob\nimport numpy as np\nimport pandas as pd\nfrom visbrain.objects import SourceObj, RoiObj\n\nroot = 'E:\\Projects\\Video\\data'\nload_path = os.path.join(root, 'Electrodes\\\\')\nsave_path = os.path.join(root, 'Electrodes_Region\\\\')\nsubject = glob.glob(load_path + '/*.txt')\n\nen_re_name = pd.read_table('E:/Projects/Video/scripts/en_ch_region.txt', \n header=None)\nen_re_name = dict(zip(en_re_name[1], en_re_name[2]))\n\n\n# region = {sub[34:-4]:None for sub in subject}\nregion = {}\nfor i in range(len(subject)):\n elec = pd.read_table(subject[i], header=None)\n ch_name = list(elec[0])\n xyz = np.array(elec[[1, 2, 3]])\n \n subject_name = subject[i][34:-4]\n try: \n os.mkdir(save_path + subject_name)\n except:\n continue\n sub_path = os.path.join(save_path, subject_name) + '\\\\'\n \n src = SourceObj('source', xyz, text=ch_name)\n analysis = src.analyse_sources(roi_obj='aal')\n analysis = analysis.rename(columns={'Text':'Name'})\n out_chan = list(analysis[analysis['label'].isin(['Not found'])]['Name'])\n \n roi = RoiObj('aal')\n region = roi.get_labels()\n region = region[region.index < 91]['label']\n ch_names = analysis['Name']\n ch_region = analysis['label']\n ch_region = pd.DataFrame([ch_names, ch_region]).T\n ch_region = ch_region.drop(ch_region[ch_region['label'] == 'Not found'].index)\n ch_region.to_csv(sub_path + subject_name + '_elec.txt', sep='\\t')\n \n region = list(set(ch_region['label']))\n try:\n l_region = []\n [l_region.append(re) for re in region if 'L' == re[-1]]\n l_region.sort()\n except:\n pass\n \n try:\n r_region = []\n [r_region.append(re) for re in region if 'R' == re[-1]]\n r_region.sort()\n except:\n pass\n \n residual = abs(len(l_region)-len(r_region))\n if len(l_region) > len(r_region):\n for i in range(residual):\n r_region.append(None)\n elif len(l_region) < len(r_region):\n for i in range(residual):\n l_region.append(None)\n \n en_re_all = pd.DataFrame({'Left Hemi':l_region, 'Right Hemi':r_region})\n en_re_all.to_csv(sub_path + subject_name + '_en_region.txt', sep='\\t')\n \n chi_l_region = []\n for ch in en_re_all['Left Hemi']:\n try:\n chi_l_region.append(en_re_name[ch])\n except:\n chi_l_region.append(None)\n chi_r_region = []\n for ch in en_re_all['Right Hemi']:\n try:\n chi_r_region.append(en_re_name[ch])\n except:\n chi_r_region.append(None)\n chi_re_all = pd.DataFrame({'左侧半球':chi_l_region, '右侧半球':chi_r_region})\n chi_re_all.to_csv(sub_path + subject_name + '_chi_region.txt', sep='\\t', encoding='ansi')\n\n\nprint('There are {:} subjects'.format(len(subject)))\n\n\n\n\n\n\n\n\n\n\n\n" ]
[ [ "pandas.read_table", "numpy.array", "pandas.DataFrame" ] ]
visinf/ppac_refinement
[ "04a8676f5eb96c41ec6b1125c6bcad430218ef30" ]
[ "bin/train_flow_refined.py" ]
[ "# Author: Anne Wannenwetsch, TU Darmstadt (anne.wannenwetsch@visinf.tu-darmstadt.de)\n# Parts of this code were adapted from https://github.com/ucbdrive/hd3\nimport argparse\nimport logging\nimport os\nimport shutil\n\nimport torch\nfrom tensorboardX import SummaryWriter\n\nfrom datasets import datasets_flow\nfrom models_refine.refinement_networks import PPacNet\nimport prob_utils\nimport refinement_models\nfrom utils import utils\n\nLOGGER = logging.getLogger(__name__)\n\n\n# Setup\ndef get_parser():\n parser = argparse.ArgumentParser(description='PPAC Flow Training')\n\n # Parameters for PPAC refinement network.\n parser.add_argument(\n '--depth_layers_guidance',\n nargs=\"+\",\n type=int,\n default=[],\n help='Depth of guidance layers in PPAC refinement network')\n parser.add_argument(\n '--depth_layers_prob',\n nargs=\"+\",\n type=int,\n default=[],\n help='Depth of probability layers in PPAC refinement network')\n parser.add_argument(\n '--kernel_size_preprocessing',\n type=int,\n default=5,\n help='Kernel size of guidance and probability branches')\n parser.add_argument(\n '--conv_specification',\n nargs=\"+\",\n type=str,\n default=[],\n help='Type of joint layers in PPAC refinement network')\n parser.add_argument(\n '--depth_layers_joint',\n nargs=\"+\",\n type=int,\n default=[],\n help='Depth of joint layers in PPAC refinement network')\n parser.add_argument(\n '--shared_filters',\n action='store_true',\n default=False,\n help='Use shared filters in combination branch?')\n parser.add_argument(\n '--kernel_size_joint',\n type=int,\n default=7,\n help='Kernel size in combination branch')\n parser.add_argument(\n '--pretrained_model_refine',\n type=str,\n help='Path to pretrained PPAC refinement model')\n\n # Parameters for data loading.\n parser.add_argument(\n '--dataset_name', type=str, help='Name of train dataset')\n parser.add_argument(\n '--data_root',\n type=str,\n help='Root directory of train/validation data')\n parser.add_argument(\n '--flow_root',\n type=str,\n help='Root directory of saved flow input data')\n parser.add_argument('--train_list', type=str, help='List of train data')\n parser.add_argument('--val_list', type=str, help='List of validation data')\n parser.add_argument(\n '--workers',\n type=int,\n default=8,\n help='Number of workers for data loader')\n\n # Parameters for learning\n parser.add_argument(\n '--epochs', type=int, default=500, help='Number of training epochs')\n parser.add_argument(\n '--batch_size',\n type=int,\n default=8,\n help='Batch size used for training')\n parser.add_argument(\n '--base_lr', type=float, default=1e-3, help='Learning rate')\n parser.add_argument(\n '--preprocessing_lr',\n type=float,\n default=None,\n help='Learning rate used for guidance/probability branch')\n parser.add_argument(\n '--weight_decay',\n type=float,\n default=0.0,\n help='Weight decay used in training')\n\n # Parameters for validation during training\n parser.add_argument(\n '--batch_size_val',\n type=int,\n default=1,\n help='Batch size for validation during training')\n parser.add_argument(\n '--evaluation-frequency',\n type=int,\n default=1,\n help='Evaluate every x epochs')\n parser.add_argument(\n '--evaluate_only',\n action='store_true',\n default=False,\n help='Perform only a single evaluation cycle, no training?')\n\n # Parameters for outputs\n parser.add_argument(\n '--save_step', type=int, default=50, help='Save model every x epochs')\n parser.add_argument(\n '--save_folder',\n type=str,\n default='model',\n help='Folder to save model and training summary')\n\n return parser\n\n\ndef main():\n global args, writer\n args = get_parser().parse_args()\n writer = SummaryWriter(args.save_folder)\n LOGGER.info(args)\n\n refinement_network = PPacNet(\n args.kernel_size_preprocessing, args.kernel_size_joint,\n args.conv_specification, args.shared_filters, args.depth_layers_prob,\n args.depth_layers_guidance, args.depth_layers_joint)\n model_refine = refinement_models.EpeNet(refinement_network).cuda()\n model_refine = torch.nn.DataParallel(model_refine).cuda()\n LOGGER.info('Used PPAC refinement model:')\n LOGGER.info(model_refine)\n\n if args.pretrained_model_refine:\n name_refinement_model = args.pretrained_model_refine\n if os.path.isfile(name_refinement_model):\n checkpoint = torch.load(name_refinement_model)\n model_refine.load_state_dict(checkpoint['state_dict'])\n LOGGER.info(\"Loaded pretrained PPAC checkpoint '{}'\".format(\n name_refinement_model))\n else:\n LOGGER.info(\n \"No checkpoint found at '{}'\".format(name_refinement_model))\n\n # Prepare data.\n mean = [0.485, 0.456, 0.406]\n std = [0.229, 0.224, 0.225]\n crop_shape = get_crop_shape(args.dataset_name)\n train_transform = datasets_flow.AugmenterFlow(mean, std, crop_shape)\n val_transform = datasets_flow.AugmenterFlow(mean, std)\n train_data = datasets_flow.FlowDataset(\n data_root=args.data_root,\n data_list=args.train_list,\n flow_root=args.flow_root,\n transform=train_transform)\n train_loader = torch.utils.data.DataLoader(\n train_data,\n batch_size=args.batch_size,\n shuffle=True,\n num_workers=args.workers,\n pin_memory=True)\n val_data = datasets_flow.FlowDataset(\n data_root=args.data_root,\n data_list=args.val_list,\n flow_root=args.flow_root,\n transform=val_transform)\n val_loader = torch.utils.data.DataLoader(\n val_data,\n batch_size=args.batch_size_val,\n shuffle=False,\n num_workers=args.workers,\n pin_memory=True)\n\n if args.evaluate_only:\n epe_val, outliers_val = validate(val_loader, model_refine)\n LOGGER.info(\n 'Validation EPE: {:.4f}, Validation outliers: {:.4f}'.format(\n epe_val, outliers_val))\n return\n\n # Prepare learning.\n if args.preprocessing_lr:\n optimizer = torch.optim.Adam(\n [{\n 'params':\n model_refine.module.refinement_net.layers_joint.parameters()\n },\n {\n 'params':\n model_refine.module.refinement_net.network_guidance.\n parameters(),\n 'lr':\n args.preprocessing_lr\n },\n {\n 'params':\n model_refine.module.refinement_net.network_prob.parameters(),\n 'lr':\n args.preprocessing_lr\n }],\n lr=args.base_lr,\n weight_decay=args.weight_decay)\n else:\n optimizer = torch.optim.Adam(\n model_refine.parameters(),\n lr=args.base_lr,\n weight_decay=args.weight_decay)\n scheduler = get_lr_scheduler(optimizer, args.dataset_name)\n\n best_epe = 1e9\n\n # Start learning.\n for epoch in range(1, args.epochs + 1):\n scheduler.step()\n loss_train = train(train_loader, model_refine, optimizer, epoch,\n args.batch_size)\n writer.add_scalar('loss_train', loss_train, epoch)\n\n is_best = False\n if epoch % args.evaluation_frequency == 0:\n torch.cuda.empty_cache()\n epe_val, outliers_val = validate(val_loader, model_refine)\n LOGGER.info(\n 'Epoch {}. Validation EPE: {:.4f}, Validation outliers: {:.4f}'\n .format(epoch, epe_val, outliers_val))\n writer.add_scalar('epe_val', epe_val, epoch)\n writer.add_scalar('outliers_val', outliers_val, epoch)\n is_best = epe_val < best_epe\n best_epe = min(epe_val, best_epe)\n\n filename = os.path.join(args.save_folder, 'model_refine_latest.pth')\n torch.save({\n 'epoch': epoch,\n 'state_dict': model_refine.cpu().state_dict(),\n 'optimizer': optimizer.state_dict(),\n 'best_epe': best_epe\n }, filename)\n model_refine.cuda()\n if is_best:\n shutil.copyfile(\n filename,\n os.path.join(args.save_folder, 'model_refine_best.pth'))\n if epoch % args.save_step == 0:\n shutil.copyfile(\n filename, args.save_folder + '/train_refine_epoch_' +\n str(epoch) + '.pth')\n\n\ndef train(train_loader, model_refine, optimizer, epoch, batch_size):\n \"\"\"Performs one training pass.\"\"\"\n loss_meter = utils.AverageMeter()\n model_refine.train()\n\n for i, (image, input_flow, probabilities,\n label) in enumerate(train_loader):\n if image.size(0) < batch_size:\n continue\n\n image = image.to(torch.device(\"cuda\"))\n input_flow = input_flow.to(torch.device(\"cuda\"))\n probabilities = probabilities.to(torch.device(\"cuda\"))\n label = label.to(torch.device(\"cuda\"))\n\n probabilities = prob_utils.safe_log(probabilities)\n output_refine = model_refine(\n input_flow,\n probabilities,\n image,\n label_list=[label],\n get_loss=True)\n total_loss = output_refine['loss'].sum()\n\n optimizer.zero_grad()\n total_loss.backward()\n optimizer.step()\n\n loss_meter.update(total_loss.mean().data, image.size(0))\n current_iter = (epoch - 1) * len(train_loader) + i + 1\n writer.add_scalar('loss_train_batch',\n loss_meter.val.cpu().numpy(), current_iter)\n\n return loss_meter.avg.cpu().numpy()\n\n\ndef validate(val_loader, model_refine):\n \"\"\"Performs one validation pass.\"\"\"\n epe_meter = utils.AverageMeter()\n outlier_meter = utils.AverageMeter()\n model_refine.eval()\n\n with torch.no_grad():\n for (image, input_flow, probabilities, label) in val_loader:\n image = image.to(torch.device(\"cuda\"))\n input_flow = input_flow.to(torch.device(\"cuda\"))\n probabilities = probabilities.to(torch.device(\"cuda\"))\n label = label.to(torch.device(\"cuda\"))\n\n probabilities = prob_utils.safe_log(probabilities)\n output_refine = model_refine(\n input_flow,\n probabilities,\n image,\n label_list=[label],\n get_loss=False,\n get_epe=True,\n get_outliers=True)\n\n epe_meter.update(output_refine['epe'].mean().data, image.size(0))\n outlier_meter.update(output_refine['outliers'].mean().data,\n image.size(0))\n\n return epe_meter.avg, outlier_meter.avg\n\n\ndef get_crop_shape(dataset_name):\n \"\"\"Returns cropping shape corresponding to dataset_name.\"\"\"\n if 'MPISintel' in dataset_name:\n return (384, 768)\n elif 'KITTI' in dataset_name:\n return (320, 896)\n else:\n raise ValueError('Unknown dataset name {}'.format(dataset_name))\n\n\ndef get_lr_scheduler(optimizer, dataset_name=None):\n \"\"\"Returns learning rate scheduler corresponding to dataset_name.\"\"\"\n if dataset_name == 'KITTI':\n milestones = [100, 200, 300, 400]\n elif dataset_name == 'KITTI_full':\n milestones = [80, 160, 240, 320]\n elif dataset_name == 'MPISintel':\n milestones = [100, 200, 300, 400]\n elif dataset_name == 'MPISintel_full':\n milestones = [82, 164, 246, 328]\n else:\n raise ValueError('Unknown dataset name {}'.format(dataset_name))\n LOGGER.info('Milestones: {}'.format(str(milestones)))\n scheduler = torch.optim.lr_scheduler.MultiStepLR(\n optimizer, milestones=milestones, gamma=0.5)\n return scheduler\n\n\nif __name__ == '__main__':\n logging.basicConfig(level=logging.INFO)\n main()\n" ]
[ [ "torch.optim.lr_scheduler.MultiStepLR", "torch.load", "torch.utils.data.DataLoader", "torch.cuda.empty_cache", "torch.no_grad", "torch.device", "torch.nn.DataParallel" ] ]
lynncyrin/galaxySim
[ "e649d2fc560189fdda2be96d1c1df42a8182cdea" ]
[ "custom.py" ]
[ "#custom.py\n\nfrom __future__ import division\nimport math\nimport numpy\nimport sys\n\nclass partitionData (object):\n '''\n Partitions data into 2d or 3d boxes.\n \n [Input]\n data\n type: numpy ndarray\n a collection of the data to be partitioned\n \n [API]\n self.partitionToPoints[partition]\n type: dictionary\n all the points this partition holds\n self.pointToPartition[point]\n type: dictionary\n the partition this point is held in\n self.maxPartition\n type: int\n the number of the last partition\n self.data\n type: numpy ndarray\n you can add new data without reseting the partitions\n this is useful to moving averages\n \n self.partitionAverage[partition]\n type: dictionary\n stores the average for each partition\n self.calculateAverage()\n stores the average of each partition's data into self.partitionAverage\n\n self.partitionMass[partition]\n type: dictionary\n stores the mass for each partition \n self.partitionCenterOfMass[partition]\n type: dictionary\n stores the center of mass for each partition\n self.calculateCenterOfMass()\n stores the center of mass in self.partitionCenterOfMass\n '''\n def __init__ (self, data):\n self.data = data\n self.partitionToPoints = dict()\n self.pointToPartition = dict()\n #check input\n assert type(data) is numpy.ndarray, \"incorrect data type\"\n try:\n sizeX, sizeY, sizeZ = data.shape\n boxSize = math.floor(((sizeX+sizeY+sizeZ)/3)**0.5)\n for (x, y, z), v in numpy.ndenumerate(data):\n boxX = x//boxSize\n boxY = y//boxSize\n boxZ = z//boxSize\n box = boxX+boxSize*boxY+(boxSize**2)*boxZ\n self.pointToPartition[(x,y,z)] = box\n try:\n self.partitionToPoints[box].append((x,y,z))\n except KeyError:\n self.partitionToPoints[box] = list()\n self.partitionToPoints[box].append((x,y,z))\n except ValueError:\n sizeX, sizeY = data.shape\n boxSize = math.floor(((sizeX+sizeY)/2)**0.5)\n for (x, y), v in numpy.ndenumerate(data):\n boxX = x//boxSize\n boxY = y//boxSize\n box = boxX+boxSize*boxY\n self.pointToPartition[(x,y)] = box\n try:\n self.partitionToPoints[box].append((x,y))\n except KeyError:\n self.partitionToPoints[box] = list()\n self.partitionToPoints[box].append((x,y))\n inBox = 0\n for k, v in self.partitionToPoints.items(): \n inBox += len(v)\n #checks for some sort of obscure error\n assert inBox == data.size, \"all data not placed in boxes\"\n self.maxPartition = len(self.partitionToPoints)-1\n print(str(len(self.pointToPartition))+\" data points -> \"+str(len(self.partitionToPoints))+\" partitions\")\n\n def calculateAvereage (self, param=0):\n '''\n given a data set where data[i] = value, averages that value\n\n but if given a param, calculates the average of data[i][param]\n '''\n self.partitionAverage = dict()\n for partition, points in self.partitionToPoints.items():\n valueSum = 0\n locationSum = [0,0]\n numPoints = len(points)\n for point in points:\n if not param:\n valueSum += self.data[point]\n elif param == \"index\":\n locationSum[0] += point[0]\n locationSum[1] += point[1] \n else:\n valueSum += self.data[point][param]\n if param == \"index\":\n self.partitionAverage[partition] = (locationSum[0]/numPoints,locationSum[1]/numPoints)\n else:\n self.partitionAverage[partition] = valueSum/numPoints\n\n def calculateCenterOfMass (self):\n '''\n assumes the input data set is of format data[i] = mass\n '''\n self.partitionCenterOfMass = dict()\n self.partitionMass = dict()\n for partition, points in self.partitionToPoints.items():\n weightedPostition = [0,0]\n totalMass = 0\n for point in points:\n mass = self.data[point]\n totalMass += mass\n weightedPostition[0] += mass*point[0]\n weightedPostition[1] += mass*point[1]\n COM = [weightedPostition[0]/totalMass, weightedPostition[1]/totalMass]\n self.partitionCenterOfMass[partition] = COM\n self.partitionMass[partition] = totalMass\n\nclass loopProgress (object):\n '''\n simple two line loop progress indicator\n\n [Example]\n pb = loopProgress(100-1) #initilize indicator\n for i in range(100):\n pb.update(i) #update value\n '''\n def __init__ (self, maxVal=0):\n self.maxVal = maxVal-1\n print(\"Loop progress\")\n def update (self, counter):\n sys.stdout.flush()\n if self.maxVal:\n sys.stdout.write(\"\\r{}/{}\".format(counter,self.maxVal))\n else:\n sys.stdout.write(\"\\r{}\".format(counter))\n\n\ndef rotate (point_x, point_y, rads, center=(0,0)):\n newX = center[0] + (point_x-center[0])*math.cos(rads) - (point_y-center[1])*math.sin(rads)\n newY = center[1] + (point_x-center[0])*math.sin(rads) + (point_y-center[1])*math.cos(rads)\n return newX,newY\n\ndef build_distance_matrix (size):\n distance_matrix = dict()\n known_distances = dict()\n pb = loopProgress(size)\n for x1 in range(size):\n for y1 in range(size):\n distance_matrix[x1, y1] = numpy.empty((size, size))\n for (x2, y2), dont_need in numpy.ndenumerate(distance_matrix[x1, y1]):\n dx, dy = abs(x1-x2), abs(y1-x2)\n if (dx,dy) in known_distances.keys():\n distance_matrix[x1, y1][x2, y2] = known_distances[dx,dy]\n else:\n dist = math.hypot(dx, dy)\n known_distances[dx,dy] = dist\n known_distances[dy,dx] = dist\n distance_matrix[x1, y1][x2, y2] = dist\n distance_matrix[x1, y1][x2, y2] = math.hypot(dx, dy)\n pb.update(x1)\n return distance_matrix\n\ndef sortkeys (data):\n out = list()\n for entry in data.keys(): out.append(entry)\n return sorted(out)\n\ndef make_sphere (r): \n inr = set()\n for x in range(r+1):\n for y in range(r+1):\n if math.hypot(x,y)<=r:\n inr.add((x,y))\n inr.add((-x,y))\n inr.add((x,-y))\n inr.add((-x,-y))\n dmap = dict()\n for x,y in inr:\n dmod = r+1-math.hypot(x,y)\n dmap[x,y] = dmod\n return dmap" ]
[ [ "numpy.ndenumerate", "numpy.empty" ] ]
nongroup-lanl/riv-processing
[ "2fce3a4741a61a9fa94c813322f5d4f51002a5fe" ]
[ "process_legacy_data.py" ]
[ "\"\"\"\nScript to compare results with Ucayali classifications and channel masks\n\"\"\"\n\nimport os\nimport rasterio\nimport numpy as np\nfrom scipy.io import loadmat\n\n\nIMAGERY_PATH = '/Users/rmsare/data/Ucayali/images/'\nMASK_PATH = '/Users/rmsare/data/Ucayali/masks/'\nSTRUCT_FIELD_NAME = 'cmap'\n\n\ndef get_raster_profile(directory):\n files = os.listdir(directory)\n available_tif_files = [f for f in files if '.tif' in f]\n in_file = available_tif_files[0]\n filename = os.path.join(directory, in_file)\n with rasterio.open(filename) as r:\n profile = r.profile\n profile.update(count=1)\n return profile\n\n\ndef read_data(filename, shape):\n struct = loadmat(filename)\n data = np.array(struct[STRUCT_FIELD_NAME])\n data = data.reshape(shape, order='F')\n return data\n\n\ndef save_file_as_tiff(in_filename, profile, shape):\n out_filename = in_filename.replace('mat', 'tif')\n data = read_data(in_filename, shape)\n with rasterio.open(out_filename, 'w', **profile) as out:\n out.write(data.astype(profile['dtype']), 1)\n\n\ndef process_directory(directory):\n profile = get_raster_profile(directory)\n shape = (profile['height'], profile['width'])\n\n all_files = os.listdir(directory)\n in_files = [f for f in all_files if '.mat' in f and f[0] == 'C']\n done_files = [f for f in all_files if '.tif' in f and f[0] == 'C']\n done_files = [f.replace('tif', 'mat') for f in done_files]\n in_files = list(set(in_files) - set(done_files))\n\n for f in in_files:\n filename = os.path.join(directory, f)\n save_file_as_tiff(filename, profile, shape)\n\n\ndef main():\n directories = ['R3', 'R4', 'R5', 'R6']\n for directory in directories:\n print('Processing {}...'.format(directory))\n directory = os.path.join(IMAGERY_PATH, directory)\n process_directory(directory)\n\n\nif __name__ == \"__main__\":\n main()\n" ]
[ [ "numpy.array", "scipy.io.loadmat" ] ]
geophysics-ubonn/REDA
[ "8f0399031121f5a937171231a25f9ab03a3c8873" ]
[ "lib/reda/utils/eit_fzj_utils.py" ]
[ "# various utility functions used in conjunction with the FZ EIT systems\nimport numpy as np\nimport pandas as pd\n\nimport pylab as plt\nimport scipy.io as sio\n\nimport reda\nimport reda.utils.geometric_factors as geometric_factors\nimport reda.utils.fix_sign_with_K as fixK\nimport reda.importers.eit_fzj as eit_fzj\n\n\ndef compute_correction_factors(data, true_conductivity, elem_file, elec_file):\n \"\"\"Compute correction factors for 2D rhizotron geometries, following\n Weigand and Kemna, 2017, Biogeosciences\n\n https://doi.org/10.5194/bg-14-921-2017\n\n Parameters\n ----------\n data : :py:class:`pandas.DataFrame`\n measured data\n true_conductivity : float\n Conductivity in S/m\n elem_file : string\n path to CRTomo FE mesh file (elem.dat)\n elec_file : string\n path to CRTomo FE electrode file (elec.dat)\n\n Returns\n -------\n correction_factors : Nx5 :py:class.`numpy.ndarray`\n measurement configurations and correction factors\n (a,b,m,n,correction_factor)\n \"\"\"\n settings = {\n 'rho': 100,\n 'pha': 0,\n 'elem': 'elem.dat',\n 'elec': 'elec.dat',\n '2D': True,\n 'sink_node': 100,\n\n }\n K = geometric_factors.compute_K_numerical(data, settings=settings)\n\n data = geometric_factors.apply_K(data, K)\n data = fixK.fix_sign_with_K(data)\n\n frequency = 100\n\n data_onef = data.query('frequency == {}'.format(frequency))\n rho_measured = data_onef['r'] * data_onef['k']\n\n rho_true = 1 / true_conductivity * 1e4\n correction_factors = rho_true / rho_measured\n\n collection = np.hstack((\n data_onef[['a', 'b', 'm', 'n']].values,\n np.abs(correction_factors)[:, np.newaxis]\n ))\n\n return collection\n\n\ndef apply_correction_factors(df, correction_file):\n \"\"\"Apply correction factors for a pseudo-2D measurement setup. See Weigand\n and Kemna, 2017, Biogeosciences, for more information:\n\n https://doi.org/10.5194/bg-14-921-2017\n\n Parameters\n ----------\n df : :py:class:`pandas.DataFrame`\n Data container\n correction_file : string\n Path to correction file. The file must have 5 columns:\n a,b,m,n,correction_factor\n\n Returns\n -------\n\n corr_data : Nx5 :py:class:`numpy.ndarray`\n Correction files as imported from the file. Columns:\n a,b,m,n,correction_factor\n \"\"\"\n if isinstance(correction_file, (list, tuple)):\n corr_data_raw = np.vstack(\n [np.loadtxt(x) for x in correction_file]\n )\n else:\n corr_data_raw = np.loadtxt(correction_file)\n A = (corr_data_raw[:, 0] / 1e4).astype(int)\n B = (corr_data_raw[:, 0] % 1e4).astype(int)\n M = (corr_data_raw[:, 1] / 1e4).astype(int)\n N = (corr_data_raw[:, 1] % 1e4).astype(int)\n\n corr_data = np.vstack((A, B, M, N, corr_data_raw[:, 2])).T\n corr_data[:, 0:2] = np.sort(corr_data[:, 0:2], axis=1)\n corr_data[:, 2:4] = np.sort(corr_data[:, 2:4], axis=1)\n\n # if 'frequency' not in df.columns:\n # raise Exception(\n # 'No frequency data found. Are you sure this is a seit data set?'\n # )\n\n gf = df.groupby(['a', 'b', 'm', 'n'])\n for key, item in gf.groups.items():\n # print('key', key)\n # print(item)\n item_norm = np.hstack((np.sort(key[0:2]), np.sort(key[2:4])))\n # print(item_norm)\n index = np.where(\n (corr_data[:, 0] == item_norm[0]) &\n (corr_data[:, 1] == item_norm[1]) &\n (corr_data[:, 2] == item_norm[2]) &\n (corr_data[:, 3] == item_norm[3])\n )[0]\n # print(index, corr_data[index])\n if len(index) == 0:\n print(key)\n # import IPython\n # IPython.embed()\n raise Exception(\n 'No correction factor found for this configuration: {}'.format(\n key\n )\n )\n\n factor = corr_data[index, 4]\n # apply correction factor\n for col in ('r', 'Zt', 'Vmn', 'rho_a'):\n if col in df.columns:\n df.iloc[item, df.columns.get_loc(col)] *= factor\n return corr_data\n\n\n# this is data for the first test board. As far as I know nobody else has such\n# an EIT system, and therefore I think it's ok to include the data here.\n_resistor_data = np.array((\n (1, 4, 2, 3, 980, 10, 20),\n (2, 3, 1, 4, 980, 10, 20),\n))\n\n\ndef check_resistor_board_measurements(data_file, reference_data_file=None,\n create_plot=True, **kwargs):\n \"\"\" To check basic system function a test board was built with multiple\n resistors attached to for connectors each. Measurements can thus be\n validated against known electrical (ohmic) resistances.\n\n Note that the normal-reciprocal difference is not yet analyzed!\n\n The referenc_data_file should have the following structure:\n The file contains the four-point spreads to be imported from\n the measurement. This file is a text file with four columns (A, B, M, N),\n separated by spaces or tabs. Each line denotes one measurement and its\n expected resistance, the allowed variation, and its allow difference\n towards its reciprocal counterpart: ::\n\n 1 2 4 3 1000 1 20\n 4 3 2 1 1000 1 20\n\n test frequency: 1Hz\n\n Parameters\n ----------\n data_file : string\n path to mnu0 data file\n reference_data_file: string, optional\n path to reference data file with structure as describe above. Default\n data is used if set to None\n create_plot : bool, optional\n if True, create a plot with measured and expected resistances\n kwargs : dict, optional\n kwargs will be redirected to the sEIT.import_eit_fzj call\n\n Returns\n -------\n fig : figure object, optional\n if create_plot is True, return a matplotlib figure\n \"\"\"\n # reference_data = np.loadtxt(reference_data_file)\n # configs = reference_data[:, 0:4]\n column_names = [\n 'a', 'b', 'm', 'n', 'expected_r', 'variation_r', 'variation_diffr'\n ]\n if reference_data_file is None:\n ref_data = pd.DataFrame(_resistor_data, columns=column_names)\n else:\n ref_data = pd.read_csv(\n reference_data_file,\n names=column_names,\n delim_whitespace=True,\n )\n print(ref_data)\n configs = ref_data[['a', 'b', 'm', 'n']].values.astype(int)\n\n seit = reda.sEIT()\n seit.import_eit_fzj(data_file, configs, **kwargs)\n seit.data = seit.data.merge(ref_data, on=('a', 'b', 'm', 'n'))\n\n # iterate through the test configurations\n test_frequency = 1\n failing = []\n for nr, row in enumerate(ref_data.values):\n print(nr, row)\n key = tuple(row[0:4].astype(int))\n group_abmn = seit.abmn\n if key not in group_abmn.keys:\n continue\n else:\n item = seit.abmn.get_group(key)\n\n expected_r = row[4]\n allowed_variation = row[5]\n # expected_r_diff = row[6]\n\n measured_r, measured_rdiff = item.query(\n 'frequency == {}'.format(test_frequency)\n )[['r', 'rdiff']].values.squeeze()\n minr = expected_r - allowed_variation\n maxr = expected_r + allowed_variation\n if not (minr <= measured_r and maxr >= measured_r):\n print(' ', 'not passing', row)\n print(' ', minr, maxr)\n print(' ', measured_r)\n failing.append((nr, measured_r))\n if len(failing) == 0:\n failing = None\n else:\n failing = np.atleast_2d(np.array(failing))\n\n if create_plot:\n fig, ax = plt.subplots(1, 1, figsize=(16 / 2.54, 8 / 2.54))\n data = seit.data.query('frequency == 1')\n x = np.arange(0, data.shape[0])\n\n ax.plot(\n x,\n data['r'],\n '.-',\n label='data',\n )\n ax.fill_between(\n x,\n data['expected_r'] - data['variation_r'],\n data['expected_r'] + data['variation_r'],\n color='green',\n alpha=0.8,\n label='allowed limits',\n )\n if failing is not None:\n ax.scatter(\n failing[:, 0],\n failing[:, 1],\n color='r',\n label='not passing',\n s=40,\n )\n\n ax.legend()\n ax.set_xticks(x)\n xticklabels = [\n '{}-{} {}-{}'.format(*row) for row\n in data[['a', 'b', 'm', 'n']].values.astype(int)\n ]\n ax.set_xticklabels(xticklabels, rotation=45)\n\n ax.set_ylabel(r'resistance $[\\Omega]$')\n ax.set_xlabel('configuration a-b m-n')\n if failing is None:\n suffix = ' PASSED'\n else:\n suffix = ''\n ax.set_title('Resistor-check for FZJ-EIT systems' + suffix)\n\n fig.tight_layout()\n # fig.savefig('out.pdf')\n return fig\n\n\ndef get_md_data_2018a(filename):\n \"\"\"Return the md data of a given FZJ EIT 2018a LI calibration data file.\n\n This function should probably go into the importers, but for now will\n reside here until it can be properly integrated.\n\n Parameters\n ----------\n filename : str\n Path to eit_data.mat file generated for an LI-'calibration' run\n\n Returns\n -------\n md : pandas.DataFrame\n MD data\n\n \"\"\"\n mat = sio.loadmat('eit_data.mat', squeeze_me=True)\n importer = eit_fzj.mat_version_importers['FZJ-EZ-2018A']\n md = importer._extract_md(mat, multiplexer_group=1)\n return md\n\n\ndef testboard_evaluation(datapath, configdat,\n outputname, frequencies=np.logspace(-1, 4, 40),\n error_percentage=1):\n \"\"\"\n A testboard with resistors and capacitors was built to test the\n basic operation performance of eit-systems from FZJ. This function plots\n the results of measurements on this board in terms of impedance magnitude\n and phase.\n\n Parameters\n ----------\n datapath : str\n Path to the eit_data_mnu0.mat file containing the measurements.\n\n configdat: np.ndarray or txt-file\n input configuration of the used testboard configurations,\n e.g. for first two rows of the board:\n 1 4 2 3\n 2 3 1 4\n 5 8 6 7\n 6 7 5 8\n Note that normal and reciprocal measurements have to be measured.\n\n outputname: str\n output name of plot in png-format\n\n frequencies: numpy array\n frequency range (in log10-space) to compare the measurements to;\n default range is from 0.1 Hz to 10 kHz\n\n error_percentage: float\n percentage of allowed measurement error. The range inside this\n limit will be shown as a grey shadow in the plot.\n\n Returns\n -------\n fig: figure object\n Saves the plot with the given output name in the execution location of\n the script.\n\n \"\"\"\n\n def calc_response(frequencies):\n # calculates theoretical |Z| and Zpha of the testboard for given\n # frequencies\n omega = 2 * np.pi * frequencies\n # settings of the specific testboard; if a new testboard with different\n # resistors/capacitors is built, parameters can be changed here\n rs = 1000\n r1 = 500\n\n #\n c1 = 330e-9\n\n r2 = 500\n c2 = 47e-6\n\n cp = 5e-12\n\n # the terms\n term1 = (r1 - 1j * omega * r1 ** 2 * c1) / \\\n (1 + omega ** 2 * c1 ** 2 * r1 ** 2)\n term2 = (r2 - 1j * omega * r2 ** 2 * c2) / \\\n (1 + omega ** 2 * c2 ** 2 * r2 ** 2)\n\n z1 = rs + term1 + term2\n z2 = - 1j / (omega * cp)\n\n z = 1 / (1 / z1 + 1 / z2)\n\n rmag = np.abs(z)\n rpha = np.arctan2(z.imag, z.real) * 1000\n\n return rmag, rpha\n\n # load configurations\n if type(configdat) == np.ndarray:\n configs = configdat\n else:\n configs = np.loadtxt(configdat)\n\n # load measurements\n seit = reda.sEIT()\n seit.import_eit_fzj(datapath, configs)\n\n # append measurements to either the \"normal\" or \"reciprocal\" list\n nor = []\n rec = []\n for i in configs:\n data = seit.abmn.get_group((i[0], i[1], i[2], i[3]))\n if (data['norrec'] == 'nor').all():\n nor.append(data)\n else:\n rec.append(data)\n\n # calculate theoretical testboard response and error\n rmag, rpha = calc_response(frequencies)\n error_rmag = rmag * error_percentage / 100\n error_rpha = rpha * error_percentage / 100\n\n # plot results\n assert len(nor) > 0 or len(rec) > 0, \\\n 'we got neither normal or reciprocal data'\n nr_y = max((len(nor), len(rec)))\n fig, axes = plt.subplots(\n nr_y, 2, figsize=(12, 3*len(nor)), sharex=True)\n\n # in case of only one measurement\n if len(nor) <= 1:\n # plot normal measurements and theoretical response\n for num, n in enumerate(nor):\n axes[0].set_title('Magnitude {} {} {} {}'.format(\n n.iloc[0]['a'], n.iloc[0]['b'],\n n.iloc[0]['m'], n.iloc[0]['n']))\n axes[0].plot(n[\"frequency\"], n['r'],\n marker='o', linestyle=' ', label='nor')\n axes[0].plot(frequencies, rmag, label='calculated')\n axes[0].fill_between(frequencies, rmag+error_rmag,\n rmag-error_rmag, color='grey', alpha=0.3)\n axes[0].set_ylabel(r'|Z| [$\\Omega$]')\n axes[1].set_title('Phase {} {} {} {}'.format(\n n.iloc[0]['a'], n.iloc[0]['b'],\n n.iloc[0]['m'], n.iloc[0]['n']))\n axes[1].plot(n[\"frequency\"], -1*n['rpha'],\n marker='o', linestyle=' ', label='nor')\n axes[1].plot(frequencies, -1*rpha, label='calculated')\n axes[1].fill_between(\n frequencies, -1*rpha + error_rpha, -1*rpha-error_rpha,\n color='grey', alpha=0.3)\n axes[1].set_ylabel(r'-$\\varphi_{Z}$ [mrad]')\n\n # plot reciprocal measurements\n for num, r in enumerate(rec):\n axes[0].plot(r[\"frequency\"], r['r'],\n marker='x', linestyle=' ', label='rec')\n axes[1].plot(r[\"frequency\"], -1*r['rpha'],\n marker='x', linestyle=' ', label='rec')\n\n # axis labels for two plots\n axes[0].set_xlabel(\"frequency [Hz]\")\n axes[1].set_xlabel(\"frequency [Hz]\")\n\n # in case of several measurements\n else:\n # plot normal measurements and theoretical response\n for num, n in enumerate(nor):\n axes[num-1][0].set_title('Magnitude {} {} {} {}'.format(\n n.iloc[0]['a'], n.iloc[0]['b'],\n n.iloc[0]['m'], n.iloc[0]['n']))\n axes[num-1][0].plot(n[\"frequency\"], n['r'],\n marker='o', linestyle=' ', label='nor')\n axes[num-1][0].plot(frequencies, rmag, label='calculated')\n axes[num-1][0].fill_between(frequencies, rmag+error_rmag,\n rmag-error_rmag, color='grey',\n alpha=0.3)\n axes[num-1][0].set_ylabel(r'|Z| [$\\Omega$]')\n axes[num-1][1].set_title('Phase {} {} {} {}'.format(\n n.iloc[0]['a'], n.iloc[0]['b'],\n n.iloc[0]['m'], n.iloc[0]['n']))\n axes[num-1][1].plot(n[\"frequency\"], -1*n['rpha'],\n marker='o', linestyle=' ', label='nor')\n axes[num-1][1].plot(frequencies, -1*rpha, label='calculated')\n axes[num-1][1].fill_between(\n frequencies, -1*rpha + error_rpha, -1*rpha-error_rpha,\n color='grey', alpha=0.3)\n axes[num-1][1].set_ylabel(r'-$\\varphi_{Z}$ [mrad]')\n\n # plot reciprocal measurements\n for num, r in enumerate(rec):\n axes[num-1][0].plot(r[\"frequency\"], r['r'],\n marker='x', linestyle=' ', label='rec')\n axes[num-1][1].plot(r[\"frequency\"], -1*r['rpha'],\n marker='x', linestyle=' ', label='rec')\n\n # axis labels for two bottom plots\n axes[len(nor)-1][0].set_xlabel(\"frequency [Hz]\")\n axes[len(nor)-1][1].set_xlabel(\"frequency [Hz]\")\n\n # axis scaling and legends\n for ax in axes.reshape(-1):\n ax.grid()\n ax.legend()\n ax.set_xscale(\"log\")\n ax.set_xlim(min(frequencies), max(frequencies))\n\n fig.tight_layout()\n fig.savefig('{}.png'.format(outputname), dpi=300)\n" ]
[ [ "pandas.read_csv", "numpy.abs", "numpy.logspace", "numpy.arange", "numpy.vstack", "scipy.io.loadmat", "numpy.sort", "pandas.DataFrame", "numpy.arctan2", "numpy.array", "numpy.where", "numpy.loadtxt" ] ]
yusin2it/SARoptical_fusion
[ "896da9f436b90b8eb6609e981ea0ba8f495be278" ]
[ "datasets/superpixels_seg.py" ]
[ "import os\r\nimport glob\r\nimport rasterio\r\nimport numpy as np\r\nfrom tqdm import tqdm\r\n\r\nimport torch.utils.data as data\r\nfrom skimage.segmentation import felzenszwalb, slic, mark_boundaries\r\n\r\ndef normalization(data):\r\n _range = np.max(data) - np.min(data)\r\n return (data - np.min(data)) / _range\r\n\r\n# indices of sentinel-2 high-/medium-/low-resolution bands\r\nS2_BANDS_HR = [2, 3, 4]\r\nS2_BANDS_MR = [5, 6, 7, 9, 12, 13]\r\nS2_BANDS_LR = [1, 10, 11]\r\n\r\n\r\n# util function for reading s2 data\r\ndef load_s2(path, use_hr, use_mr, use_lr):\r\n bands_selected = []\r\n if use_hr:\r\n bands_selected = bands_selected + S2_BANDS_HR\r\n if use_mr:\r\n bands_selected = bands_selected + S2_BANDS_MR\r\n if use_lr:\r\n bands_selected = bands_selected + S2_BANDS_LR\r\n bands_selected = sorted(bands_selected)\r\n with rasterio.open(path) as data:\r\n s2 = data.read(bands_selected)\r\n s2 = s2.astype(np.float32)\r\n s2 = np.clip(s2, 0, 10000)\r\n return s2\r\n\r\n\r\n# util function for reading s1 data\r\ndef load_s1(path):\r\n with rasterio.open(path) as data:\r\n s1 = data.read()\r\n s1 = s1.astype(np.float32)\r\n s1 = np.nan_to_num(s1)\r\n s1 = np.clip(s1, -25, 0)\r\n # normaliza 0~1\r\n s1 /= 25\r\n s1 += 1\r\n s1 = s1.astype(np.float32)\r\n return s1\r\n\r\n\r\n# this function for classification and most important is for weak supervised\r\ndef load_sample(sample, use_s1, use_s2hr, use_s2mr, use_s2lr, superpixel=True,\r\n no_savanna=False, igbp=True, unlabeled=False, n_segments=100, sigma=2):\r\n\r\n use_s2 = use_s2hr or use_s2mr or use_s2lr\r\n\r\n # load s2 data\r\n if use_s2:\r\n img = load_s2(sample[\"s2\"], use_s2hr, use_s2mr, use_s2lr)\r\n s2 = normalization(img) # normaliza 0~1\r\n s2 = s2.astype(np.float32)\r\n #s2 = s2.swapaxes(2, 0) #这个错了,全错了\r\n s2 = np.rollaxis(s2, 0, 3)\r\n segments = felzenszwalb(s2, scale=200, sigma=0.50, min_size=30)\r\n #segments = felzenszwalb(s2, scale=50, sigma=0.80, min_size=30)\r\n segments = segments + 1 #为了和slic保持一致\r\n #segments = slic(s2, n_segments=1000, sigma=1, start_label=1, multichannel=True)\r\n print(segments.max())\r\n print(sample[\"s2\"].replace(\"tif\", \"npy\").replace(\"s2_\", \"se_\"))\r\n #print(os.path.split(sample[\"s2\"].replace(\"tif\", \"npy\").replace(\"s2_\", \"se_\"))[0])\r\n if not os.path.isdir(os.path.dirname(sample[\"s2\"].replace(\"tif\", \"npy\").replace(\"s2_\", \"se_\"))):\r\n os.makedirs(os.path.dirname(sample[\"s2\"].replace(\"tif\", \"npy\").replace(\"s2_\", \"se_\")))\r\n np.save(sample[\"s2\"].replace(\"tif\", \"npy\").replace(\"s2_\", \"se_\"), segments)\r\n\r\n # segmentate the image to superpixels\r\n if superpixel:\r\n segments = None\r\n else:\r\n segments = None\r\n\r\n # load label\r\n if unlabeled:\r\n return {'image': img, 'segments': segments, 'id': sample[\"id\"]}\r\n else:\r\n return {'image': img, 'segments': segments, 'id': sample[\"id\"]}\r\n\r\n\r\n\r\nclass DFC2020(data.Dataset):\r\n \"\"\"PyTorch dataset class for the DFC2020 dataset\"\"\"\r\n\r\n def __init__(self,\r\n path,\r\n subset=\"val\",\r\n no_savanna=False,\r\n use_s2hr=False,\r\n use_s2mr=False,\r\n use_s2lr=False,\r\n use_s1=False,\r\n train_index=None):\r\n \"\"\"Initialize the dataset\"\"\"\r\n\r\n # inizialize\r\n super(DFC2020, self).__init__()\r\n\r\n # make sure parameters are okay\r\n if not (use_s2hr or use_s2mr or use_s2lr or use_s1):\r\n raise ValueError(\"No input specified, set at least one of use_[s2hr, s2mr, s2lr, s1] to True!\")\r\n self.use_s2hr = use_s2hr\r\n self.use_s2mr = use_s2mr\r\n self.use_s2lr = use_s2lr\r\n self.use_s1 = use_s1\r\n self.train_index = train_index\r\n assert subset in [\"val\", \"train\", \"test\"]\r\n self.no_savanna = no_savanna\r\n # make sure parent dir exists\r\n assert os.path.exists(path)\r\n # build list of sample paths\r\n if subset == \"train\":\r\n train_list = []\r\n for seasonfolder in ['ROIs0000_autumn', 'ROIs0000_spring',\r\n 'ROIs0000_winter', 'ROIs0000_summer']:\r\n train_list += [os.path.join(seasonfolder, x) for x in\r\n os.listdir(os.path.join(path, seasonfolder))]\r\n train_list = [x for x in train_list if \"s2_\" in x]\r\n sample_dirs = train_list\r\n else:\r\n path = os.path.join(path, \"ROIs0000_test\", \"s2_0\")\r\n sample_dirs = []\r\n\r\n self.samples = []\r\n for folder in sample_dirs:\r\n s2_locations = glob.glob(os.path.join(path, f\"{folder}/*.tif\"), recursive=True)\r\n for s2_loc in tqdm(s2_locations, desc=\"[Load]\"):\r\n self.samples.append({\"s2\": s2_loc, \"id\": os.path.basename(s2_loc)})\r\n # sort list of samples\r\n #self.samples = sorted(self.samples, key=lambda i: i['id'])\r\n # sort list of samples\r\n if self.train_index:\r\n Tindex = np.load(self.train_index)\r\n self.samples = [self.samples[i] for i in Tindex]\r\n\r\n print(\"loaded\", len(self.samples),\r\n \"samples from the dfc2020 subset\", subset)\r\n\r\n def __getitem__(self, index):\r\n \"\"\"Get a single example from the dataset\"\"\"\r\n\r\n # get and load sample from index file\r\n sample = self.samples[index]\r\n return load_sample(sample, self.use_s1, self.use_s2hr, self.use_s2mr,\r\n self.use_s2lr, no_savanna=self.no_savanna, igbp=False)\r\n\r\n def __len__(self):\r\n \"\"\"Get number of samples in the dataset\"\"\"\r\n return len(self.samples)\r\n\r\n\r\nif __name__ == \"__main__\":\r\n print(\"\\n\\nDFC2020 test\")\r\n data_dir = '/workplace/DFC2020'\r\n ds = DFC2020(data_dir, subset=\"train\", use_s1=False, use_s2hr=True, use_s2mr=False, use_s2lr=False, no_savanna=True, train_index='../utils/train_100.npy')\r\n for i in range(len(ds)):\r\n s = ds.__getitem__(i)\r\n print(\"id:\", s[\"id\"], \"\\n\", \"input shape:\", s[\"image\"].shape)\r\n" ]
[ [ "numpy.rollaxis", "numpy.min", "numpy.clip", "numpy.nan_to_num", "numpy.max", "numpy.load", "torch.utils.data.read" ] ]
ZuowenWang0000/GRUBERT-A-GRU-Based-Method-to-Fuse-BERT-Hidden-Layers
[ "992967fe102493eadf37423de5710761f007bcb1" ]
[ "train.py" ]
[ "import sys\nimport os\nimport random\nimport torch\nimport numpy as np\nif __name__ == \"__main__\":\n try:\n # Try to set the random seed, have to do this here instead of in main()\n seed = int(sys.argv[sys.argv.index(\"--seed\") + 1])\n print(\"Using seed: %d\" % seed)\n os.environ['PYTHONHASHSEED'] = str(seed)\n random.seed(seed)\n torch.manual_seed(seed)\n np.random.seed(seed)\n torch.backends.cudnn.deterministic = True\n torch.backends.cudnn.benchmark = False\n except:\n seed = None\n print(\"WARNING: Seed not set\")\n\nimport time\nimport sys\nimport copy\nimport json\nimport click\n\nimport torch.nn as nn\nimport torch.optim as optim\nfrom torch.utils.tensorboard import SummaryWriter\nimport torch.backends.cudnn as cudnn\n\nfrom lstm_model import LstmModel\nfrom bert_model import BertMixModel, BertBaseModel, BertWSModel, BertLastFourModel, BertMixLinearModel, \\\n BertMixLSTMModel\nfrom flair_model import GSFlairMixModel\nfrom dataset import BertTwitterDataset, RobertaTwitterDataset\nfrom utils import *\nfrom test import *\nfrom embeddings import *\n\ndef main(config, seed=None, embedding=\"bert-mix\"):\n \"\"\"\n Training and validation.\n \"\"\"\n # Get config\n config_dict = get_config(config)\n config = config_to_namedtuple(config_dict)\n\n print(config)\n model_type = eval(config.model.architecture) # Find out which type of model to instantiate\n\n n_classes = config.model.n_classes\n # fine_tune_embeddings = config.model.fine_tune_embeddings # fine-tune word embeddings?\n sentence_length_cut = config.model.sentence_length_cut #set fixed sentence length\n\n # Training parameters\n start_epoch = config.training.start_epoch # start at this epoch\n batch_size = config.training.batch_size # batch size\n lr = config.training.lr # learning rate\n momentum = config.training.momentum # momentum\n workers = config.training.workers # number of workers for loading data in the DataLoader\n epochs = config.training.epochs # number of epochs to run\n checkpoint = config.training.checkpoint # path to saved model checkpoint, None if none\n save_checkpoint_freq_epoch = config.training.save_checkpoint_freq_epoch\n train_without_val = config.training.train_without_val\n # Replace __USER__ with actual username, append seed for uniqueness\n save_checkpoint_path = config.training.save_checkpoint_path.replace(\"__USER__\", os.popen(\"whoami\").read().strip()) + f\"_seed{seed}\"\n weight_decay = config.training.weight_decay # weight decay\n lr_decay = config.training.lr_decay # learning rate decay\n\n # Dataset parameters\n dataset_path = config.dataset.dataset_dir\n train_file_path = config.dataset.rel_train_path\n val_file_path = config.dataset.rel_val_path\n test_file_path = config.dataset.rel_test_path\n\n setattr(config.model, \"embedding_type\", embedding) # Add embedding type to model config\n device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n setattr(config.model, \"device\", device) # Add device to model config\n\n print(\"Checkpoints will be saved in: %s\" % save_checkpoint_path, flush=True)\n\n print(f\"[{embedding}] initializing embedder\", flush=True)\n\n if embedding in [\"gs-flair\", \"flair\", \"gs-bert\", \"gs-elmo\", \"elmo\", \"glove\", \"syngcn\", \"gs-only\", \"twitter\"]:\n import flair\n from flair.datasets import CSVClassificationDataset\n print(f\"[{embedding}] initializing dataset\", flush=True)\n\n # Initialize datasets\n train_dataset = CSVClassificationDataset(os.path.join(dataset_path, train_file_path), {0: \"text\", 1: \"label\"}, max_tokens_per_doc=sentence_length_cut, tokenizer=False, in_memory=False, skip_header=True)\n val_dataset = CSVClassificationDataset(os.path.join(dataset_path, val_file_path), {0: \"text\", 1: \"label\"}, max_tokens_per_doc=sentence_length_cut, tokenizer=False, in_memory=False, skip_header=True)\n \n # Initialize data loaders\n train_loader = flair.datasets.DataLoader(train_dataset, batch_size=batch_size, shuffle=True, num_workers=workers)\n val_loader = flair.datasets.DataLoader(val_dataset, batch_size=batch_size, shuffle=True, num_workers=workers)\n\n # Tell training loop how to prepare embeddings\n prepare_embeddings_fn = prepare_embeddings_flair\n print(f\"[{embedding}] entering training loop\", flush=True)\n \n elif embedding in [\"bert-base\", \"bert-mix\", \"bert-last-four\", \"roberta-mix\"]:\n print(\"[\" + embedding + \"]\" + \" initializing embeddings+dataset\", flush=True)\n\n # Initialize datasets\n if embedding == \"roberta-mix\":\n train_dataset = RobertaTwitterDataset(csv_file=os.path.join(dataset_path, train_file_path),\n sentence_length_cut=sentence_length_cut)\n val_dataset = RobertaTwitterDataset(csv_file=os.path.join(dataset_path, val_file_path),\n sentence_length_cut=sentence_length_cut)\n else: # using bert class embedding\n train_dataset = BertTwitterDataset(csv_file=os.path.join(dataset_path, train_file_path), sentence_length_cut=sentence_length_cut)\n val_dataset = BertTwitterDataset(csv_file=os.path.join(dataset_path, val_file_path), sentence_length_cut=sentence_length_cut)\n\n # Initialize data loaders\n train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=batch_size, num_workers=workers, shuffle=False) # should shuffle really be false? copying from the notebook\n val_loader = torch.utils.data.DataLoader(val_dataset, batch_size=batch_size, num_workers=workers, shuffle=False)\n\n # Tell training loop how to prepare embeddings\n prepare_embeddings_fn = eval(\"prepare_embeddings_\" + embedding.replace(\"-\", \"_\"))\n print(\"[\" + embedding + \"]\" + \" entering training loop\", flush=True)\n\n else:\n raise NotImplementedError(\"Unsupported embedding: \" + embedding)\n\n # set up tensorboard writer\n writer = SummaryWriter(save_checkpoint_path)\n\n # Initialize model or load checkpoint\n if checkpoint != \"none\":\n checkpoint = torch.load(checkpoint)\n model = checkpoint['model']\n optimizer = checkpoint['optimizer']\n start_epoch = checkpoint['epoch'] + 1\n print('\\nLoaded checkpoint from epoch %d.\\n' % (start_epoch - 1), flush=True)\n else:\n model = model_type(n_classes=n_classes, model_config=config.model)\n print(\"Instantiated new model\", flush=True)\n optimizer = optim.Adam(params=filter(lambda p: p.requires_grad, model.parameters()), lr=lr, weight_decay=weight_decay)\n \n if \"elmo\" in embedding: # can't save elmo embedder somehow, so have to use it outside the model\n print(\"Using elmo, overriding model embedder\", flush=True)\n model.embedder = None\n embedder = initialize_embeddings(\"elmo\", device, fine_tune_embeddings=False)\n elif hasattr(model, \"embedder\"):\n print(\"Model has built-in embedder, using it\", flush=True)\n embedder = model.embedder # Use embedder inside the model, this allows saving it (e.g. in case it is fine-tuned)\n else:\n # Use embedder from outside the model\n print(\"Using user-defined embedder\", flush=True)\n\n # Loss functions\n criterion = nn.CrossEntropyLoss()\n\n # Move to device\n model = model.to(device)\n criterion = criterion.to(device)\n\n # Epochs\n train_start_time = time.time()\n for epoch in range(start_epoch, epochs):\n epoch_start = time.time()\n # One epoch's training\n train(train_loader=train_loader,\n model=model,\n criterion=criterion,\n optimizer=optimizer,\n epoch=epoch,\n device=device,\n config=config,\n tf_writer=writer,\n prepare_embeddings_fn=prepare_embeddings_fn,\n embedder=embedder)\n\n # Decay learning rate every epoch\n adjust_learning_rate(optimizer, lr_decay)\n\n # Save checkpoint and perform validation\n if epoch % save_checkpoint_freq_epoch == 0:\n save_checkpoint(epoch, model, optimizer, save_checkpoint_path)\n if not train_without_val:\n test(val_loader, model, criterion, epoch, device, config, writer, prepare_embeddings_fn, embedder)\n epoch_end = time.time()\n print(\"Per epoch time = {}\".format(epoch_end-epoch_start), flush=True)\n\n train_end_time = time.time()\n print(\"Total training time: {} minutes\".format((train_end_time-train_start_time)/60.0), flush=True)\n\n print(\"Final evaluation:\", flush=True)\n test(val_loader, model, criterion, epoch, device, config, writer, prepare_embeddings_fn, embedder)\n writer.close()\n\n\ndef train(train_loader, model, criterion, optimizer, epoch, device, config, tf_writer, prepare_embeddings_fn, embedder):\n \"\"\"\n Performs one epoch's training.\n\n :param train_loader: DataLoader for training data\n :param model: model\n :param criterion: cross entropy loss layer\n :param optimizer: optimizer\n :param epoch: epoch number\n :param device: device on which to perform training\n :param config: config dict read in from JSON\n :param tf_writer: TensorBoard writer for logging\n :param prepare_embeddings_fn: function to perform embedding that should take the following arguments (1) data as returned by data loader,\n (2) embedder to use for performing embedding, (3) device on which to perform embedding, (4) params (config is passed here)\n :param embedder: embedder to use for embedding, passed to prepare_embeddings_fn\n \"\"\"\n model.train() # training mode enables dropout\n\n batch_time = AverageMeter() # forward prop. + back prop. time per batch\n data_time = AverageMeter() # data loading time per batch\n losses = AverageMeter() # cross entropy loss\n accs = AverageMeter() # accuracies\n\n start = time.time()\n # Batches\n for i, data in enumerate(train_loader):\n\n # Perform embedding + padding if necessary\n embeddings, labels = prepare_embeddings_fn(data, embedder, device, config)\n data_time.update(time.time() - start)\n\n # Forward prop.\n output = model(embeddings)\n\n # Regularization on embedding weights -- not all models support this\n if config.model.use_regularization == \"none\":\n loss = criterion(output[\"logits\"].to(device), labels)\n elif config.model.use_regularization == \"l1\":\n # Regularization on embedding weights\n emb_weights_norm = torch.norm(model.emb_weights, p=1)\n # Loss\n loss = criterion(output[\"logits\"].to(device), labels) + config.model.regularization_lambda * emb_weights_norm # scalar\n else:\n raise NotImplementedError(\"Regularization other than 'none' or 'l1' not supported\")\n\n # Back prop.\n optimizer.zero_grad()\n loss.backward()\n\n # Clip gradients\n if config.training.grad_clip != \"none\":\n clip_gradient(optimizer, config.grad_clip)\n\n # Update\n optimizer.step()\n\n # Find accuracy\n _, predictions = output[\"logits\"].max(dim=1) # (n_documents)\n correct_predictions = torch.eq(predictions, labels).sum().item()\n accuracy = correct_predictions / labels.size(0)\n\n # Keep track of metrics\n losses.update(loss.item(), labels.size(0))\n batch_time.update(time.time() - start)\n accs.update(accuracy, labels.size(0))\n\n start = time.time()\n\n # Print training status\n if i % config.training.print_freq == 0:\n print('Epoch: [{0}][{1}/{2}]\\t'\n 'Batch Time {batch_time.val:.3f} ({batch_time.avg:.3f})\\t'\n 'Data Load Time {data_time.val:.3f} ({data_time.avg:.3f})\\t'\n 'Loss {loss.val:.4f} ({loss.avg:.4f})\\t'\n 'Accuracy {acc.val:.3f} ({acc.avg:.3f})'.format(epoch, i, len(train_loader),\n batch_time=batch_time,\n data_time=data_time, loss=losses,\n acc=accs), flush=True)\n\n # Delete embeddings from flair, somehow it runs out of memory otherwise\n try:\n for sentence in data:\n sentence.clear_embeddings()\n except:\n pass\n\n # Log the running loss, accuracy\n tf_writer.add_scalar('training loss (avg. epoch)', losses.avg, epoch)\n tf_writer.add_scalar('training accuracy (avg. epoch)', accs.avg, epoch)\n tf_writer.add_scalar('learning rate', optimizer.param_groups[0]['lr'], epoch)\n\n\n@click.command()\n@click.option('-c', '--config', required=True, type=str, help=\"JSON file specifying model and training parameters\")\n@click.option('-s', '--seed', default=0, type=int, help=\"Random seed to use for producing reproducible results\")\n@click.option('-e', '--embedding', required=True, type=str, help=\"Embedding to use for training\")\n\ndef main_cli(config, seed, embedding):\n main(config, seed, embedding)\n\n\nif __name__ == '__main__':\n main_cli()\n" ]
[ [ "torch.nn.CrossEntropyLoss", "torch.norm", "numpy.random.seed", "torch.load", "torch.eq", "torch.manual_seed", "torch.utils.data.DataLoader", "torch.utils.tensorboard.SummaryWriter", "torch.cuda.is_available" ] ]
JunjieHu/ReCo-RL
[ "4406f6eec2d6bee4aa12c8b22494f2d167c570c1" ]
[ "src/StackedRNN.py" ]
[ "\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\n\ndef dot_prod_attention(h_t, src_encoding, src_encoding_att_linear, mask=None):\n \"\"\"\n :param h_t: (batch_size, hidden_size)\n :param src_encoding: (batch_size, src_sent_len, hidden_size * 2)\n :param src_encoding_att_linear: (batch_size, src_sent_len, hidden_size)\n :param mask: (batch_size, src_sent_len)\n \"\"\"\n # (batch_size, src_sent_len)\n att_weight = torch.bmm(src_encoding_att_linear, h_t.unsqueeze(2)).squeeze(2)\n if mask:\n att_weight.data.masked_fill_(mask, -float('inf'))\n att_weight = F.softmax(att_weight)\n\n att_view = (att_weight.size(0), 1, att_weight.size(1))\n # (batch_size, hidden_size)\n ctx_vec = torch.bmm(att_weight.view(*att_view), src_encoding).squeeze(1)\n\n return ctx_vec, att_weight\n\n\nclass StackedAttentionLSTM(nn.Module):\n \"\"\"\n stacked LSTM.\n Needed for the decoder, because we do input feeding.\n \"\"\"\n def __init__(self, num_layers, input_size, rnn_size, dropout, ctx_vec_size=None):\n super(StackedAttentionLSTM, self).__init__()\n self.dropout = nn.Dropout(dropout)\n self.num_layers = num_layers\n self.layers = nn.ModuleList()\n\n # Map the concatenated [h_t; ctx_t] vector to the rnn_size vector \n self.ctx_vec_size = rnn_size * 2 if ctx_vec_size is None else ctx_vec_size\n self.att_vec_linear = nn.Linear(rnn_size + self.ctx_vec_size, rnn_size, bias=False)\n\n for i in range(num_layers):\n self.layers.append(nn.LSTMCell(input_size, rnn_size))\n input_size = rnn_size\n #print('input size', input_size, rnn_size)\n\n def forward(self, input, hidden, src_encoding, src_encoding_att_linear):\n \"\"\"\n :param input: (batch_size, input_size)\n :param hidden : (num_layer, batch_size, hidden_size)\n :param src_encoding: (batch_size, src_len, ctx_vec_size)\n :param src_encoding_att_linear: (batch_size, src_len, hidden_size)\n return: input: (batch_size, hidden_size)\n h_1, c_1: (num_layers, batch_size, hidden_size)\n \"\"\"\n h_0, c_0 = hidden\n #print('layer 0', len(hidden), h_0.size(), c_0.size())\n #print('input', input.size())\n #print('self.layers[0]', self.layers[0])\n h_1_0, c_1_0 = self.layers[0](input, (h_0[0], c_0[0]))\n h_1, c_1 = [h_1_0], [c_1_0]\n # Only use the first decoding outputs to do attention and copy the context vectors\n # to the subsequent decoding layers\n ctx_t, alpha_t = dot_prod_attention(h_1_0, src_encoding, src_encoding_att_linear)\n input = self.att_vec_linear(torch.cat([h_1_0, ctx_t], 1)) # (batch, hidden_size)\n\n for i in range(1, len(self.layers)):\n layer = self.layers[i]\n h_1_i, c_1_i = layer(input, (h_0[i], c_0[i]))\n input = self.att_vec_linear(torch.cat([h_1_i, ctx_t], 1))\n if i + 1 != self.num_layers:\n input = self.dropout(input)\n h_1 += [h_1_i]\n c_1 += [c_1_i]\n input = F.tanh(input)\n h_1 = torch.stack(h_1)\n c_1 = torch.stack(c_1)\n h_1 = self.dropout(h_1)\n return input, (h_1, c_1)\n\n\nclass StackedAttentionGRU(nn.Module):\n\n def __init__(self, num_layers, input_size, rnn_size, dropout):\n super(StackedAttentionGRU, self).__init__()\n self.dropout = nn.Dropout(dropout)\n self.num_layers = num_layers\n self.layers = nn.ModuleList()\n\n for i in range(num_layers):\n self.layers.append(nn.GRUCell(input_size, rnn_size))\n input_size = rnn_size\n\n def forward(self, input, hidden, src_encoding, src_encoding_att_linear):\n \"\"\"\n :param input: (batch_size, input_size)\n :param hidden : (num_layer, batch_size, hidden_size)\n :param src_encoding: (batch_size, src_len, hidden_size * 2)\n :param src_encoding_att_linear: (batch_size, src_len, hidden_size)\n return: input: (batch_size, hidden_size)\n h_1, c_1: (num_layers, batch_size, hidden_size)\n \"\"\"\n h_0, c_0 = hidden\n h_1_0, c_1_0 = self.layers[0](input, (h_0[0], c_0[0]))\n h_1, c_1 = [h_1_0], [c_1_0]\n # Only use the first decoding outputs to do attention and copy the context vectors\n # to the subsequent decoding layers\n ctx_t, alpha_t = dot_prod_attention(h_1_0, src_encoding, src_encoding_att_linear)\n input = self.att_vec_linear(torch.cat([h_1_0, ctx_t], 1)) # (batch, hidden_size)\n\n for i in range(1, len(self.layers)):\n layer = self.layers[i]\n h_1_i, c_1_i = layer(input, (h_0[i], c_0[i]))\n input = self.att_vec_linear(torch.cat([h_1_i, ctx_t], 1))\n if i + 1 != self.num_layers:\n input = self.dropout(input)\n h_1 += [h_1_i]\n c_1 += [c_1_i]\n input = F.tanh(input)\n h_1 = torch.stack(h_1)\n c_1 = torch.stack(c_1)\n h_1 = self.dropout(h_1)\n return input, (h_1, c_1)\n\n\nclass StackedLSTM(nn.Module):\n def __init__(self, num_layers, input_size, rnn_size, dropout):\n super(StackedLSTM, self).__init__()\n self.dropout = nn.Dropout(dropout)\n self.num_layers = num_layers\n self.layers = nn.ModuleList()\n\n for i in range(num_layers):\n self.layers.append(nn.LSTMCell(input_size, rnn_size))\n input_size = rnn_size\n\n def forward(self, input, hidden):\n h_0, c_0 = hidden\n h_1, c_1 = [], []\n for i, layer in enumerate(self.layers):\n h_1_i, c_1_i = layer(input, (h_0[i], c_0[i]))\n input = h_1_i\n if i + 1 != self.num_layers:\n input = self.dropout(input)\n h_1 += [h_1_i]\n c_1 += [c_1_i]\n\n h_1 = torch.stack(h_1)\n c_1 = torch.stack(c_1)\n\n return input, (h_1, c_1)\n\n\nclass StackedGRU(nn.Module):\n\n def __init__(self, num_layers, input_size, rnn_size, dropout):\n super(StackedGRU, self).__init__()\n self.dropout = nn.Dropout(dropout)\n self.num_layers = num_layers\n self.layers = nn.ModuleList()\n\n for i in range(num_layers):\n self.layers.append(nn.GRUCell(input_size, rnn_size))\n input_size = rnn_size\n\n def forward(self, input, hidden):\n h_1 = []\n for i, layer in enumerate(self.layers):\n h_1_i = layer(input, hidden[0][i])\n input = h_1_i\n if i + 1 != self.num_layers:\n input = self.dropout(input)\n h_1 += [h_1_i]\n\n h_1 = torch.stack(h_1)\n return input, (h_1,)\n\n\nclass AttentionLSTM(nn.Module):\n \"\"\"\n stacked LSTM.\n Needed for the decoder, because we do input feeding.\n \"\"\"\n def __init__(self, input_size, rnn_size, dropout, ctx_vec_size=None):\n super(StackedAttentionLSTM, self).__init__()\n self.dropout = nn.Dropout(dropout)\n\n # Map the concatenated [h_t; ctx_t] vector to the rnn_size vector \n self.ctx_vec_size = rnn_size * 2 if ctx_vec_size is None else ctx_vec_size\n self.att_vec_linear = nn.Linear(rnn_size + self.ctx_vec_size, rnn_size, bias=False)\n\n self.lstm = nn.LSTMCell(input_size, rnn_size)\n # for i in range(num_layers):\n # self.layers.append(nn.LSTMCell(input_size, rnn_size))\n # input_size = rnn_size\n #print('input size', input_size, rnn_size)\n\n def forward(self, input, hidden, src_encoding, src_encoding_att_linear):\n \"\"\"\n :param input: (batch_size, input_size)\n :param hidden : (batch_size, hidden_size)\n :param src_encoding: (batch_size, src_len, ctx_vec_size)\n :param src_encoding_att_linear: (batch_size, src_len, hidden_size)\n return: input: (batch_size, hidden_size)\n h_1, c_1: (batch_size, hidden_size)\n \"\"\"\n # h_0, c_0 = hidden\n h_1, c_1 = self.lstm(input, hidden)\n # h_1, c_1 = h_1_0, [c_1_0]\n ctx_t, alpha_t = dot_prod_attention(h_1, src_encoding, src_encoding_att_linear)\n input = self.att_vec_linear(torch.cat([h_1, ctx_t], 1))\n input = F.tanh(input)\n return input, (h_1, c_1) \n" ]
[ [ "torch.nn.Dropout", "torch.nn.functional.softmax", "torch.cat", "torch.nn.ModuleList", "torch.nn.LSTMCell", "torch.nn.Linear", "torch.stack", "torch.nn.GRUCell", "torch.nn.functional.tanh" ] ]
csukuangfj/transducer-loss-benchmarking
[ "8373897fd86a64425baa198247cde439f10b1bd2" ]
[ "generate_shape_info.py" ]
[ "#!/usr/bin/env python3\n#\n# Copyright 2022 Xiaomi Corp. (authors: Fangjun Kuang)\n#\n# See ../LICENSE for clarification regarding multiple authors\n#\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"\nThis script takes the following two files as input:\n\n - cuts_train-clean-100.json.gz\n - bpe.model\n\nto generate the shape information for benchmarking.\n\nThe above two files can be generate by\nhttps://github.com/k2-fsa/icefall/blob/master/egs/librispeech/ASR/prepare.sh\n\nThe generated shape information is used to set the shape of randomly generated\ndata during benchmarking so that the benchmarking results look more realistic.\n\"\"\"\n\nimport argparse\nfrom pathlib import Path\n\nimport sentencepiece as spm\nimport torch\nfrom lhotse import load_manifest\n\nDEFAULT_MAINIFEST = \"/ceph-fj/fangjun/open-source-2/icefall-multi-datasets/egs/librispeech/ASR/data/fbank/cuts_train-clean-100.json.gz\" # noqa\nDEFAULT_BPE_MODEL_FILE = \"/ceph-fj/fangjun/open-source-2/icefall-multi-datasets/egs/librispeech/ASR/data/lang_bpe_500/bpe.model\" # noqa\n\n\ndef get_parser():\n parser = argparse.ArgumentParser(\n formatter_class=argparse.ArgumentDefaultsHelpFormatter\n )\n\n parser.add_argument(\n \"--manifest\",\n type=Path,\n default=DEFAULT_MAINIFEST,\n help=\"\"\"Path to `cuts_train-clean-100.json.gz.\n It can be generated using\n https://github.com/k2-fsa/icefall/blob/master/egs/librispeech/ASR/prepare.sh\n \"\"\",\n )\n\n parser.add_argument(\n \"--bpe-model\",\n type=Path,\n default=DEFAULT_BPE_MODEL_FILE,\n help=\"\"\"Path to the BPE model.\n It can be generated using\n https://github.com/k2-fsa/icefall/blob/master/egs/librispeech/ASR/prepare.sh\n \"\"\",\n )\n\n return parser\n\n\ndef main():\n args = get_parser().parse_args()\n assert args.manifest.is_file(), f\"{args.manifest} does not exist\"\n assert args.bpe_model.is_file(), f\"{args.bpe_model} does not exist\"\n\n sp = spm.SentencePieceProcessor()\n sp.load(str(args.bpe_model))\n\n cuts = load_manifest(args.manifest)\n\n TU_list = []\n\n for i, c in enumerate(cuts):\n tokens = sp.encode(c.supervisions[0].text)\n num_frames = c.features.num_frames\n U = len(tokens)\n\n # We assume the encoder has a subsampling_factor 4\n T = ((num_frames - 1) // 2 - 1) // 2\n TU_list.append([T, U])\n # NT_tensor has two columns.\n # column 0 - T\n # column 1 - U\n TU_tensor = torch.tensor(TU_list, dtype=torch.int32)\n print(\"TU_tensor.shape\", TU_tensor.shape)\n torch.save(TU_tensor, \"./shape_info.pt\")\n print(\"Generate ./shape_info.pt successfully\")\n\n\nif __name__ == \"__main__\":\n main()\n" ]
[ [ "torch.save", "torch.tensor" ] ]
sanskar107/NeRD-Neural-Reflectance-Decomposition
[ "f4408645cdc49033acb9eaf0f6a58bacc549a774" ]
[ "train_nerd.py" ]
[ "import os\nfrom typing import Callable, List, Dict\n\nimport imageio\nimport numpy as np\nimport tensorflow as tf\nfrom tqdm import tqdm\n\nimport dataflow.nerd as data\nimport nn_utils.math_utils as math_utils\nimport utils.training_setup_utils as train_utils\nfrom models.nerd_net import NerdModel\nfrom nn_utils.nerf_layers import get_full_image_eval_grid\nfrom nn_utils.tensorboard_visualization import hdr_to_tb, horizontal_image_log, to_8b\n\n\ndef add_args(parser):\n parser.add_argument(\n \"--log_step\",\n type=int,\n default=100,\n help=\"frequency of tensorboard metric logging\",\n )\n parser.add_argument(\n \"--weights_epoch\", type=int, default=10, help=\"save weights every x epochs\"\n )\n parser.add_argument(\n \"--validation_epoch\",\n type=int,\n default=5,\n help=\"render validation every x epochs\",\n )\n parser.add_argument(\n \"--testset_epoch\",\n type=int,\n default=300,\n help=\"render testset every x epochs\",\n )\n parser.add_argument(\n \"--video_epoch\",\n type=int,\n default=300,\n help=\"render video every x epochs\",\n )\n\n parser.add_argument(\n \"--lrate_decay\",\n type=int,\n default=250,\n help=\"exponential learning rate decay (in 1000s)\",\n )\n\n parser.add_argument(\n \"--envmap_path\",\n type=str,\n default=None,\n help=\"envmap path for relighting\"\n )\n\n parser.add_argument(\"--render_only\", action=\"store_true\")\n\n return parser\n\n\ndef parse_args():\n parser = add_args(\n data.add_args(\n NerdModel.add_args(\n train_utils.setup_parser(),\n ),\n ),\n )\n return train_utils.parse_args_file_without_nones(parser)\n\ndef get_envmap(path):\n import cv2\n img = cv2.cvtColor(\n cv2.imread(path, cv2.IMREAD_UNCHANGED), cv2.COLOR_BGR2RGB\n ).astype(np.float32)\n img = cv2.resize(img, (256, 128), cv2.INTER_AREA)\n cv2.imwrite('spotlight_gt.png', img.astype(np.uint8))\n\n if img.min() < 0:\n img = img + img.min()\n\n return tf.convert_to_tensor(np.clip(np.nan_to_num(img, nan=0, posinf=np.max(img), neginf=0), 0, None))\n\ndef eval_datasets(\n strategy,\n df,\n nerd,\n hwf,\n near,\n far,\n sgs_optimizer,\n steps: int,\n chunk_size: int,\n is_single_env: bool,\n envmap_path=None,\n):\n # Build lists to save all individual images\n gt_rgbs = []\n gt_masks = []\n\n predictions = {}\n to_extract_coarse = [(\"rgb\", 3), (\"acc_alpha\", 1)]\n to_extract_fine = [\n (\"rgb\", 3),\n (\"acc_alpha\", 1),\n (\"basecolor\", 3),\n (\"metallic\", 1),\n (\"roughness\", 1),\n (\"normal\", 3),\n (\"depth\", 1),\n ]\n\n illumination_context_override = None\n if envmap_path:\n illumination_context_override = np.load(envmap_path)\n print(\"new context = \", illumination_context_override)\n print(\"new context = \", illumination_context_override.shape)\n illumination_context_override = tf.convert_to_tensor(illumination_context_override)\n # envmap = get_envmap(envmap_path)\n # illumination_context_override = get_illum_override_context(envmap)\n\n H, W, _ = hwf\n\n # Go over validation dataset\n with strategy.scope():\n for dp in tqdm(df):\n img_idx, rays_o, rays_d, pose, mask, ev100, _, _, target = dp\n\n gt_rgbs.append(tf.reshape(target, (H, W, 3)))\n gt_masks.append(tf.reshape(mask, (H, W, 1)))\n\n # Optimize SGs first - only if we have varying illumination\n if not is_single_env:\n wb_input_value = tf.convert_to_tensor(\n [[0.8, 0.8, 0.8]], dtype=tf.float32\n )\n strategy.run(\n nerd.sgs_store.apply_whitebalance_to_idx,\n (img_idx, wb_input_value, rays_o, ev100),\n ) # Start by actually ensuring the value range fits\n sgs_loss = nerd.illumination_steps(\n rays_o,\n rays_d,\n pose,\n near,\n far,\n img_idx,\n ev100,\n sgs_optimizer,\n target,\n steps,\n chunk_size,\n strategy,\n )\n print(\n \"Illumination estimation done. Remaining error:\", sgs_loss.numpy()\n )\n\n # Render image.\n coarse_result, fine_result = nerd.distributed_call(\n strategy,\n chunk_size,\n rays_o,\n rays_d,\n pose,\n near,\n far,\n img_idx,\n ev100,\n training=False,\n illumination_context_override=illumination_context_override,\n high_quality=True,\n )\n\n # Extract values and reshape them to the image dimensions\n new_shape: Callable[[int], List[int]] = lambda x: [H, W, x]\n\n for name, channels in to_extract_coarse:\n predictions[\"coarse_%s\" % name] = predictions.get(\n \"coarse_%s\" % name, []\n ) + [tf.reshape(coarse_result[name], new_shape(channels))]\n\n for name, channels in to_extract_fine:\n if name in fine_result:\n predictions[\"fine_%s\" % name] = predictions.get(\n \"fine_%s\" % name, []\n ) + [tf.reshape(fine_result[name], new_shape(channels))]\n\n # Also render the environment illumination\n img_idx = img_idx[:1] # only first needed. Others are duplications\n if illumination_context_override is None:\n sgs = nerd.sgs_store(img_idx)\n else:\n sgs = illumination_context_override\n\n env_map = nerd.renderer.visualize_fit((64, 128), sgs)\n\n predictions[\"fine_env_map\"] = predictions.get(\"fine_env_map\", []) + [\n env_map\n ]\n\n # Stack all images in dataset in batch dimension\n ret = {}\n ret[\"gt_rgb\"] = tf.stack(gt_rgbs, 0)\n ret[\"gt_mask\"] = tf.stack(gt_masks, 0)\n\n for pname, vals in predictions.items():\n ret[pname] = tf.stack(vals, 0)\n\n # Calculate losses\n fine_ssim = tf.reduce_mean(\n tf.image.ssim(\n math_utils.white_background_compose(ret[\"gt_rgb\"], ret[\"gt_mask\"]),\n math_utils.white_background_compose(ret[\"fine_rgb\"], ret[\"fine_acc_alpha\"]),\n max_val=1.0,\n )\n )\n fine_psnr = tf.reduce_mean(\n tf.image.psnr(\n math_utils.white_background_compose(ret[\"gt_rgb\"], ret[\"gt_mask\"]),\n math_utils.white_background_compose(ret[\"fine_rgb\"], ret[\"fine_acc_alpha\"]),\n max_val=1.0,\n )\n )\n\n return ret, fine_ssim, fine_psnr\n\n\ndef run_validation(\n strategy,\n val_df,\n nerd,\n hwf,\n near,\n far,\n sgs_optimizer,\n chunk_size: int,\n is_single_env: bool,\n):\n ret, fine_ssim, fine_psnr = eval_datasets(\n strategy,\n val_df,\n nerd,\n hwf,\n near,\n far,\n sgs_optimizer,\n 20,\n chunk_size,\n is_single_env,\n )\n\n # Log validation dataset\n horizontal_image_log(\"val/coarse_rgb\", ret[\"gt_rgb\"], ret[\"coarse_rgb\"])\n horizontal_image_log(\"val/fine_rgb\", ret[\"gt_rgb\"], ret[\"fine_rgb\"])\n\n horizontal_image_log(\"val/coarse_alpha\", ret[\"gt_mask\"], ret[\"coarse_acc_alpha\"])\n horizontal_image_log(\"val/fine_alpha\", ret[\"gt_mask\"], ret[\"fine_acc_alpha\"])\n\n for n, t in ret.items():\n filters = [\"rgb\", \"acc_alpha\"]\n if \"fine\" in n and not any(f in n for f in filters):\n if \"normal\" in n:\n tf.summary.image(\"val/\" + n, t * 0.5 + 0.5)\n elif \"env_map\" in n:\n hdr_to_tb(\"val/env_map\", t)\n else:\n tf.summary.image(\"val/\" + n, t)\n\n tf.summary.scalar(\"val/ssim\", fine_ssim)\n tf.summary.scalar(\"val/psnr\", fine_psnr)\n\n\ndef main(args):\n # Setup directories, logging etc.\n with train_utils.SetupDirectory(\n args,\n copy_files=not args.render_only,\n main_script=__file__,\n copy_data=\"data/nerd\",\n ):\n strategy = (\n tf.distribute.get_strategy()\n if train_utils.get_num_gpus() <= 1\n else tf.distribute.MirroredStrategy()\n )\n\n # Setup dataflow\n (\n hwf,\n near,\n far,\n render_poses,\n num_images,\n _,\n train_df,\n val_df,\n test_df,\n ) = data.create_dataflow(args)\n\n print(f\"Rendering between near {near} and far {far}\")\n\n # Optimizer and models\n with strategy.scope():\n # Setup models\n nerd = NerdModel(num_images, args)\n lrate = train_utils.adjust_learning_rate_to_replica(args)\n if args.lrate_decay > 0:\n lrate = tf.keras.optimizers.schedules.ExponentialDecay(\n lrate, decay_steps=args.lrate_decay * 1000, decay_rate=0.1\n )\n optimizer = tf.keras.optimizers.Adam(lrate)\n\n sgs_optimizer = tf.keras.optimizers.Adam(1e-3)\n\n # Restore if possible\n start_step = nerd.restore()\n tf.summary.experimental.set_step(start_step)\n\n train_dist_df = strategy.experimental_distribute_dataset(train_df)\n\n start_epoch = start_step // len(train_df)\n\n print(\n \"Starting training in epoch {} at step {}\".format(start_epoch, start_step)\n )\n\n # Will be 1 magnitude lower after advanced_loss_done steps\n advanced_loss_lambda = tf.Variable(1.0, dtype=tf.float32)\n color_loss_lambda = tf.Variable(1.0, dtype=tf.float32)\n # Run the actual optimization for x epochs\n\n for epoch in range(start_epoch + 1, args.epochs + (2 if args.render_only else 1)):\n pbar = tf.keras.utils.Progbar(len(train_df))\n\n # Iterate over the train dataset\n if not args.render_only:\n with strategy.scope():\n for dp in train_dist_df:\n (\n img_idx,\n rays_o,\n rays_d,\n pose,\n mask,\n ev100,\n wb,\n wb_ref_image,\n target,\n ) = dp\n\n advanced_loss_lambda.assign(\n 1 * 0.9 ** (tf.summary.experimental.get_step() / 5000)\n ) # Starts with 1 goes to 0\n color_loss_lambda.assign(\n 1 * 0.75 ** (tf.summary.experimental.get_step() / 1500)\n ) # Starts with 1 goes to 0\n\n # Execute train the train step\n (\n fine_payload,\n _,\n loss_per_replica,\n coarse_losses_per_replica,\n fine_losses_per_replica,\n ) = strategy.run(\n nerd.train_step,\n (\n rays_o,\n rays_d,\n pose,\n near,\n far,\n img_idx,\n ev100,\n wb_ref_image,\n wb,\n optimizer,\n target,\n mask,\n advanced_loss_lambda,\n color_loss_lambda,\n (tf.summary.experimental.get_step() < 1000),\n ),\n )\n\n loss = strategy.reduce(\n tf.distribute.ReduceOp.SUM, loss_per_replica, axis=None\n )\n coarse_losses = {}\n for k, v in coarse_losses_per_replica.items():\n coarse_losses[k] = strategy.reduce(\n tf.distribute.ReduceOp.SUM, v, axis=None\n )\n fine_losses = {}\n for k, v in fine_losses_per_replica.items():\n fine_losses[k] = strategy.reduce(\n tf.distribute.ReduceOp.SUM, v, axis=None\n )\n\n losses_for_pbar = [\n (\"loss\", loss.numpy()),\n (\"coarse_loss\", coarse_losses[\"loss\"].numpy()),\n (\"fine_loss\", fine_losses[\"loss\"].numpy()),\n (\"fine_image_loss\", fine_losses[\"image_loss\"].numpy()),\n ]\n\n pbar.add(\n 1,\n values=losses_for_pbar,\n )\n\n # Log to tensorboard\n with tf.summary.record_if(\n tf.summary.experimental.get_step() % args.log_step == 0\n ):\n tf.summary.scalar(\"loss\", loss)\n for k, v in coarse_losses.items():\n tf.summary.scalar(\"coarse_%s\" % k, v)\n for k, v in fine_losses.items():\n tf.summary.scalar(\"fine_%s\" % k, v)\n tf.summary.scalar(\n \"lambda_advanced_loss\", advanced_loss_lambda\n )\n\n # tf.summary.histogram(\n # \"brdf_embedding\", fine_payload[\"brdf_embedding\"]\n # )\n\n tf.summary.experimental.set_step(\n tf.summary.experimental.get_step() + 1\n )\n\n # Show last dp and render to tensorboard\n if train_utils.get_num_gpus() > 1:\n dp = [d.values[0] for d in dp]\n\n render_test_example(dp, hwf, nerd, near, far, strategy)\n\n # Save when a weight epoch arrives\n if epoch % args.weights_epoch == 0:\n nerd.save(\n tf.summary.experimental.get_step()\n ) # Step was already incremented\n\n # Render validation if a validation epoch arrives\n if epoch % args.validation_epoch == 0:\n print(\"RENDERING VALIDATION...\")\n # Build lists to save all individual images\n run_validation(\n strategy,\n val_df,\n nerd,\n hwf,\n near,\n far,\n sgs_optimizer,\n args.batch_size,\n args.single_env,\n )\n\n # Render test set when a test epoch arrives\n if epoch % args.testset_epoch == 0 or args.render_only:\n print(\"RENDERING TESTSET...\")\n ret, fine_ssim, fine_psnr = eval_datasets(\n strategy,\n test_df,\n nerd,\n hwf,\n near,\n far,\n sgs_optimizer,\n 100,\n args.batch_size,\n args.single_env,\n args.envmap_path,\n )\n\n if not args.single_env:\n nerd.save(\n tf.summary.experimental.get_step() + 1\n ) # Save the illumination optimization\n\n if args.envmap_path is None:\n testimgdir = os.path.join(\n args.basedir,\n args.expname,\n \"test_imgs_{:06d}\".format(tf.summary.experimental.get_step() - 1),\n )\n else:\n testimgdir = os.path.join(\n args.basedir,\n args.expname,\n args.envmap_path.split('/')[-1].replace('.npy', '_') + \"test_imgs_{:06d}\".format(tf.summary.experimental.get_step() - 1),\n )\n\n # testimgdir = os.path.join(\n # args.basedir,\n # args.expname,\n # \"test_imgs_{:06d}\".format(tf.summary.experimental.get_step() - 1),\n # )\n\n alpha = ret[\"fine_acc_alpha\"]\n print(\"Mean PSNR:\", fine_psnr, \"Mean SSIM:\", fine_ssim)\n os.makedirs(testimgdir, exist_ok=True)\n for n, t in ret.items():\n for b in range(t.shape[0]):\n to_save = t[b]\n if \"normal\" in n:\n to_save = (t[b] * 0.5 + 0.5) * alpha[b] + (1 - alpha[b])\n\n if \"env_map\" in n:\n imageio.imwrite(\n os.path.join(testimgdir, \"{:d}_{}.png\".format(b, n)),\n to_8b(\n math_utils.linear_to_srgb(to_save / (1 + to_save))\n ).numpy(),\n )\n imageio.imwrite(\n os.path.join(testimgdir, \"{:d}_{}.exr\".format(b, n)),\n to_save.numpy(),\n )\n elif \"normal\" in n or \"depth\" in n:\n imageio.imwrite(\n os.path.join(testimgdir, \"{:d}_{}.exr\".format(b, n)),\n to_save.numpy(),\n )\n if \"normal\" in n:\n imageio.imwrite(\n os.path.join(\n testimgdir, \"{:d}_{}.png\".format(b, n)\n ),\n to_8b(to_save).numpy(),\n )\n else:\n imageio.imwrite(\n os.path.join(testimgdir, \"{:d}_{}.png\".format(b, n)),\n to_8b(to_save).numpy(),\n )\n\n # Render video when a video epoch arrives\n if epoch % args.video_epoch == 0 or args.render_only:\n print(\"RENDERING VIDEO...\")\n video_dir = os.path.join(\n args.basedir,\n args.expname,\n \"video_{:06d}\".format(tf.summary.experimental.get_step()),\n )\n video_img_dir = os.path.join(\n video_dir,\n \"images\",\n )\n os.makedirs(video_img_dir, exist_ok=True)\n\n render_video(\n hwf,\n test_df,\n render_poses,\n strategy,\n near,\n far,\n nerd,\n args,\n video_img_dir,\n video_dir,\n )\n\n if args.render_only:\n return\n\n\ndef render_video(\n hwf,\n test_df,\n render_poses,\n strategy,\n near,\n far,\n nerd,\n args,\n video_img_dir,\n video_dir,\n):\n return\n H, W, F = hwf\n fine_results = {}\n\n options = tf.data.Options()\n options.experimental_distribute.auto_shard_policy = (\n tf.data.experimental.AutoShardPolicy.DATA\n )\n\n # switch between illuminations\n num_illuminations = 6 + 2 # Start and end with same latent\n num_seconds = 6\n num_fps = 30\n\n for d in test_df: # Get the first illumination from test set\n img_idx, _, _, _, _, ev100_video, _, _, _ = d\n\n break\n\n pose_df = tf.data.Dataset.from_tensor_slices(render_poses[:, :3, :4])\n\n def render_pose(pose):\n rays_o, rays_d = get_full_image_eval_grid(H, W, F, tf.reshape(pose, (3, 4)))\n\n _, fine_result = nerd.distributed_call(\n strategy=strategy,\n chunk_size=args.batch_size,\n ray_origins=tf.reshape(rays_o, (-1, 3)),\n ray_directions=tf.reshape(rays_d, (-1, 3)),\n camera_pose=pose,\n near_bound=near,\n far_bound=far,\n sg_illumination_idx=img_idx,\n ev100=ev100_video,\n training=False,\n )\n\n return fine_result\n\n for pose_dp in tqdm(pose_df):\n cur_pose = pose_dp\n\n fine_result = render_pose(pose_dp)\n\n fine_result[\"rgb\"] = math_utils.white_background_compose(\n math_utils.linear_to_srgb(\n math_utils.uncharted2_filmic(fine_result[\"hdr_rgb\"])\n ),\n fine_result[\"acc_alpha\"][..., None]\n * (\n tf.where(\n fine_result[\"depth\"] < (far * 0.9),\n tf.ones_like(fine_result[\"depth\"]),\n tf.zeros_like(fine_result[\"depth\"]),\n )[..., None]\n ),\n )\n\n for k, v in fine_result.items():\n fine_results[k] = fine_results.get(k, []) + [v.numpy()]\n\n total_frames = num_seconds * num_fps\n frames_per_illumination = total_frames // (num_illuminations - 1)\n total_frames = frames_per_illumination * (\n num_illuminations - 1\n ) # Make sure that everything fits\n\n illuminations_path = \"data/nerd/video_sgs\"\n illuminations = [\n np.load(os.path.join(illuminations_path, f))[None, ...]\n for f in os.listdir(illuminations_path)\n ]\n\n # Always start and end with main video SGs\n main_video_sgs = nerd.sgs_store(img_idx).numpy()\n illuminations = [main_video_sgs] + illuminations + [main_video_sgs]\n\n frame_sgs = []\n frame_env_idx = 0\n imageio.plugins.freeimage.download()\n\n env_maps = []\n\n for sgs0, sgs1 in zip(illuminations, illuminations[1:]):\n for frame in range(frames_per_illumination):\n blend_alpha = frame / (frames_per_illumination - 1)\n cur_sgs = nerd.sgs_store.validate_sgs(\n sgs0 * (1 - blend_alpha) + sgs1 * blend_alpha\n ).numpy()\n\n frame_sgs.append(cur_sgs)\n\n env_map = nerd.renderer.visualize_fit((128, 256), cur_sgs)\n env_maps.append(env_map.numpy())\n\n imageio.imwrite(\n os.path.join(video_img_dir, \"env_{:06d}.exr\".format(frame_env_idx)),\n env_map.numpy()[0],\n )\n frame_env_idx += 1\n\n number_of_sgs_frames = len(frame_sgs)\n # pad frame latents if required\n div_remain = np.ceil(number_of_sgs_frames / train_utils.get_num_gpus())\n mod_remain = int((div_remain * train_utils.get_num_gpus()) - number_of_sgs_frames)\n for _ in range(mod_remain):\n frame_sgs.append(frame_sgs[-1]) # Clone last\n\n frame_sgs_pad = np.concatenate(frame_sgs, 0)\n print(\n frame_sgs_pad.shape,\n number_of_sgs_frames,\n mod_remain,\n train_utils.get_num_gpus(),\n )\n\n sgs_df = (\n tf.data.Dataset.from_tensor_slices(\n tf.convert_to_tensor(frame_sgs_pad, dtype=tf.float32)\n )\n .batch(train_utils.get_num_gpus())\n .with_options(options)\n )\n sgs_dist_df = strategy.experimental_distribute_dataset(sgs_df)\n\n # Render all sgs\n with strategy.scope():\n # Use last pose\n rays_o, rays_d = get_full_image_eval_grid(H, W, F, tf.reshape(cur_pose, (3, 4)))\n\n def render_sgs(rays_o, rays_d, fres, sgs):\n tf.debugging.assert_shapes(\n [\n (rays_o, (H, W, 3)),\n (rays_d, (H, W, 3)),\n (\n sgs,\n (1, 24, 7),\n ),\n ]\n )\n\n view_direction = math_utils.normalize(-1 * tf.reshape(rays_d, (-1, 3)))\n\n hdr_rgb = nerd.renderer(\n sg_illuminations=sgs,\n basecolor=fres[\"basecolor\"],\n metallic=fres[\"metallic\"],\n roughness=fres[\"roughness\"],\n normal=fres[\"normal\"],\n alpha=fres[\"acc_alpha\"],\n view_dir=view_direction,\n )\n\n rgb = math_utils.white_background_compose(\n math_utils.linear_to_srgb(math_utils.uncharted2_filmic(hdr_rgb)),\n fres[\"acc_alpha\"][..., None],\n )\n\n return rgb\n\n for sgs_dp in tqdm(sgs_dist_df):\n rgb_per_replica = strategy.run(\n render_sgs, (rays_o, rays_d, fine_result, sgs_dp)\n )\n rgb_result = strategy.gather(rgb_per_replica, 0).numpy()\n rgb_results = np.split(rgb_result, train_utils.get_num_gpus(), 0)\n fine_results[\"rgb\"] = fine_results.get(\"rgb\", []) + rgb_results\n\n # Everything is now a numpy\n fine_result_np = {\n k: np.stack(v, 0)[: render_poses.shape[0] + number_of_sgs_frames]\n for k, v in fine_results.items()\n }\n # reshape and extract\n rgb = fine_result_np[\"rgb\"]\n rgb = rgb.reshape((-1, H, W, 3))\n\n # save individual images and video\n imageio.mimwrite(\n os.path.join(video_dir, \"rgb.mp4\"),\n (rgb * 255).astype(np.uint8),\n fps=30,\n quality=8,\n )\n\n for i in range(rgb.shape[0]):\n imageio.imwrite(\n os.path.join(video_img_dir, \"rgb_{:06d}.png\".format(i)),\n (rgb[i] * 255).astype(np.uint8),\n )\n\n alpha = fine_result_np[\"acc_alpha\"].reshape((-1, H, W, 1))\n parameters = {}\n parameters[\"basecolor\"] = math_utils.linear_to_srgb(\n (fine_result_np[\"basecolor\"].reshape((-1, H, W, 3)) * alpha) + (1 - alpha)\n ).numpy()\n parameters[\"metallic\"] = math_utils.linear_to_srgb(\n (fine_result_np[\"metallic\"].reshape((-1, H, W, 1)) * alpha) + (1 - alpha)\n ).numpy()\n parameters[\"roughness\"] = (\n fine_result_np[\"roughness\"].reshape((-1, H, W, 1)) * alpha\n ) + (1 - alpha)\n parameters[\"normal\"] = math_utils.linear_to_srgb(\n ((fine_result_np[\"normal\"].reshape((-1, H, W, 3)) * 0.5 + 0.5) * alpha)\n + (1 - alpha)\n ).numpy()\n\n for n, imgs in parameters.items():\n imageio.mimwrite(\n os.path.join(video_dir, \"{}.mp4\".format(n)),\n (imgs * 255).astype(np.uint8),\n fps=30,\n quality=8,\n )\n\n for i in range(imgs.shape[0]):\n imageio.imwrite(\n os.path.join(video_img_dir, \"{}_{:06d}.png\".format(n, i)),\n (imgs[i] * 255).astype(np.uint8),\n )\n\n\ndef render_test_example(dp, hwf, nerd, near, far, strategy):\n (\n img_idx,\n _,\n _,\n pose,\n _,\n ev100,\n _,\n _,\n _,\n ) = dp\n\n H, W, F = hwf\n rays_o, rays_d = get_full_image_eval_grid(H, W, F, pose[0])\n\n coarse_result, fine_result = nerd.distributed_call(\n strategy=strategy,\n chunk_size=1024,\n ray_origins=tf.reshape(rays_o, (-1, 3)),\n ray_directions=tf.reshape(rays_d, (-1, 3)),\n camera_pose=pose,\n near_bound=near,\n far_bound=far,\n sg_illumination_idx=img_idx,\n ev100=ev100,\n training=False,\n high_quality=True,\n )\n\n horizontal_image_log(\n \"train/rgb\",\n tf.reshape(coarse_result[\"rgb\"], (1, H, W, 3)),\n tf.reshape(fine_result[\"rgb\"], (1, H, W, 3)),\n )\n horizontal_image_log(\n \"train/alpha\",\n tf.reshape(coarse_result[\"acc_alpha\"], (1, H, W, 1)),\n tf.reshape(fine_result[\"acc_alpha\"], (1, H, W, 1)),\n )\n\n for n, t in fine_result.items():\n filters = [\"rgb\", \"alpha\"]\n if not any(f in n for f in filters):\n if \"normal\" in n:\n tf.summary.image(\"train/\" + n, tf.reshape(t * 0.5 + 0.5, (1, H, W, 3)))\n elif \"brdf_embedding\" in n:\n min_val = tf.reduce_min(t)\n max_val = tf.reduce_max(t)\n t_scaled = (t - min_val) / (max_val - min_val)\n\n pad = 3 - t.shape[-1]\n t_pad = tf.concat(\n [\n t_scaled,\n math_utils.repeat(tf.zeros_like(t_scaled[..., :1]), pad, -1),\n ],\n -1,\n )\n\n t_mask = tf.reshape(t_pad, (1, H, W, 3)) * tf.reshape(\n fine_result[\"acc_alpha\"], (1, H, W, 1)\n )\n\n tf.summary.image(\"train/\" + n, t_mask)\n else:\n if len(t.shape) == 1:\n t = t[:, None]\n tf.summary.image(\"train/\" + n, tf.reshape(t, (1, H, W, t.shape[-1])))\n\n sgs = nerd.sgs_store(img_idx)\n env_map = nerd.renderer.visualize_fit((64, 128), sgs)\n hdr_to_tb(\"train/env_map\", env_map[None, :])\n\n\nif __name__ == \"__main__\":\n args = parse_args()\n print(args)\n\n main(args)\n" ]
[ [ "tensorflow.convert_to_tensor", "tensorflow.stack", "numpy.concatenate", "numpy.max", "tensorflow.summary.scalar", "tensorflow.Variable", "tensorflow.summary.image", "numpy.stack", "numpy.load", "tensorflow.data.Options", "tensorflow.zeros_like", "tensorflow.summary.experimental.get_step", "tensorflow.debugging.assert_shapes", "tensorflow.distribute.get_strategy", "tensorflow.summary.experimental.set_step", "tensorflow.reduce_max", "tensorflow.data.Dataset.from_tensor_slices", "tensorflow.reshape", "tensorflow.keras.optimizers.schedules.ExponentialDecay", "tensorflow.ones_like", "tensorflow.reduce_min", "tensorflow.keras.optimizers.Adam", "tensorflow.distribute.MirroredStrategy" ] ]
X-CCS/mandarin_tacotron2_world
[ "46065c4b6735ed66cdb6b66433b85cd74e838df2" ]
[ "datasets/audio.py" ]
[ "import librosa\nimport numpy as np\nimport pysptk\nimport pyworld\nimport soundfile as sf\nimport tensorflow as tf\n\n\ndef load_wav(path, hparams):\n\twav, _ = sf.read(path)\n\treturn wav\n\ndef save_wav(wav, path, hparams):\n\tsf.write(path, wav, hparams.sample_rate)\n\ndef trim_silence(wav, hparams):\n\treturn librosa.effects.trim(wav, top_db= hparams.trim_top_db,\n\t\tframe_length=hparams.trim_fft_size, hop_length=hparams.trim_hop_size)[0]\n\ndef feature_extract(wav, hp):\n\tfs = hp.sample_rate\n\tif hp.use_harvest:\n\t\tf0, timeaxis = pyworld.harvest(wav, fs, frame_period=hp.frame_period)\n\telse:\n\t\tf0, timeaxis = pyworld.dio(wav, fs, frame_period=hp.frame_period)\n\t\tf0 = pyworld.stonemask(wav, f0, timeaxis, fs)\n\n\tspectrogram = pyworld.cheaptrick(wav, f0, timeaxis, fs)\n\taperiodicity = pyworld.d4c(wav, f0, timeaxis, fs)\n\tbap = pyworld.code_aperiodicity(aperiodicity, fs)\n\thp.num_bap = bap.shape[1]\n\talpha = pysptk.util.mcepalpha(fs)\n\tmgc = pysptk.sp2mc(spectrogram, order=hp.num_mgc - 1, alpha=alpha)\n\tf0 = f0[:, None]\n\tlf0 = f0.copy()\n\tnonzero_indices = np.nonzero(f0)\n\tlf0[nonzero_indices] = np.log(f0[nonzero_indices])\n\tif hp.use_harvest:\n\t\t# https://github.com/mmorise/World/issues/35#issuecomment-306521887\n\t\tvuv = (aperiodicity[:, 0] < 0.5).astype(np.float32)[:, None]\n\telse:\n\t\tvuv = (lf0 != 0).astype(np.float32)\n\n\tfeatures = np.hstack((mgc, lf0, vuv, bap))\n\treturn features.astype(np.float32)\n\ndef synthesize(feature, hparams):\n\tmgc_idx = 0\n\tlf0_idx = mgc_idx + hparams.num_mgc\n\tvuv_idx = lf0_idx + hparams.num_lf0\n\tbap_idx = vuv_idx + hparams.num_vuv\n\n\tmgc = feature[:, mgc_idx : mgc_idx + hparams.num_mgc]\n\tlf0 = feature[:, lf0_idx : lf0_idx + hparams.num_lf0]\n\tvuv = feature[:, vuv_idx : vuv_idx + hparams.num_vuv]\n\tbap = feature[:, bap_idx : bap_idx + hparams.num_bap]\n\n\tfs = hparams.sample_rate\n\talpha = pysptk.util.mcepalpha(fs)\n\tfftlen = pyworld.get_cheaptrick_fft_size(fs)\n\n\tspectrogram = pysptk.mc2sp(mgc, fftlen=fftlen, alpha=alpha)\n\n\tindexes = (vuv < 0.5).flatten()\n\tbap[indexes] = np.zeros(hparams.num_bap)\n\taperiodicity = pyworld.decode_aperiodicity(bap.astype(np.float64), fs, fftlen)\n\n\tf0 = lf0.copy()\n\tf0[vuv < 0.5] = 0\n\tf0[np.nonzero(f0)] = np.exp(f0[np.nonzero(f0)])\n\n\treturn pyworld.synthesize(f0.flatten().astype(np.float64),\n\t\t\t\tspectrogram.astype(np.float64),\n\t\t\t\taperiodicity.astype(np.float64),\n\t\t\t\tfs, hparams.frame_period)\n" ]
[ [ "numpy.hstack", "numpy.log", "numpy.zeros", "numpy.nonzero" ] ]
pabloi09/physionet-challenge-2020
[ "8e18e326bbad70bdcac11b27c65951dde698e2f3" ]
[ "webtool/apiserver/get_12ECG_features.py" ]
[ "#!/usr/bin/env python\n\nimport numpy as np\nfrom scipy.signal import butter, lfilter, resample\nfrom scipy import stats\nlead_names = np.array([\"I\",\"II\", \"III\", \"aVR\", \"aVL\", \"aVF\", \"V1\", \"V2\", \"V3\", \"V4\", \"V5\", \"V6\"])\n\ndef get_slices(signal):\n signals = []\n while len(signal) > 2999:\n signals.append(resample(signal[:3000],256))\n signal = signal[3001:]\n\n return signals\n\ndef bandpass_filter(data, lowcut, highcut, signal_freq, filter_order):\n \n nyquist_freq = 0.5 * signal_freq\n low = lowcut / nyquist_freq\n high = highcut / nyquist_freq\n b, a = butter(filter_order, [low, high], btype=\"band\")\n y = lfilter(b, a, data)\n y[:5] = y[5]\n \n return y\n\ndef normalize(signal):\n signal -= np.mean(signal)\n minimum = np.amin(signal)\n maximum = np.amax(signal)\n \n return (signal - minimum) / ( maximum - minimum)\n\ndef get_features(classes,data, header_data):\n \n #data_dict {fs, n_samples,\"age\",\"sex\",\"output\":[0 1 0 1 0],\"prescript\",\"hist\",\"sympt\", \n #leads:[{\"gain\",\"name\",samples:[]}]}\n data_dict = {}\n _,n_leads,fs,n_samples,_,_ = header_data[0].split()\n n_leads,fs,n_samples = map(int,[n_leads,fs,n_samples])\n \n data_dict[\"n_leads\"] = n_leads\n data_dict[\"fs\"] = fs\n data_dict[\"n_samples\"] = n_samples\n data_dict[\"leads\"] = []\n \n for i in range(n_leads):\n tmp = header_data[i + 1].split()\n lead_name = tmp[-1].replace(\"\\n\",\"\")\n gain_mv = int(tmp[2].replace(\"/mV\",\"\"))\n lead = {}\n lead[\"namestr\"] = lead_name\n lead[\"name\"] = np.where(lead_names == lead_name)[0]\n lead[\"gain_mv\"] = gain_mv\n lead[\"samples\"] = data[i]\n data_dict[\"leads\"].append(lead)\n \n for line in header_data:\n if \"#Age\" in line:\n age = line.split(\": \")[1]\n data_dict[\"age\"] = int(age if not \"NaN\" in age else 57)\n elif \"#Sex\" in line:\n data_dict[\"sex\"] = 0 if line.split(\": \")[1].replace(\"\\n\",\"\") == \"Male\" else 1\n elif \"#Dx\" in line:\n data_dict[\"output\"] = np.zeros((1,9))\n for c in line.split(\": \")[1].replace(\"\\n\",\"\").split(\",\"):\n data_dict[\"output\"] += (classes == c)\n\n elif \"#Rx\" in line:\n data_dict[\"Rx\"] = line.split(\": \")[1].replace(\"\\n\",\"\")\n elif \"#Hx\" in line:\n data_dict[\"Hx\"] = line.split(\": \")[1].replace(\"\\n\",\"\")\n elif \"#Sx\" in line:\n data_dict[\"Sx\"] = line.split(\": \")[1].replace(\"\\n\",\"\")\n return data_dict\n\ndef get_for_web(data_dict):\n filter_lowcut = 0.001\n filter_highcut = 15.0\n filter_order = 1\n result = []\n for lead in data_dict[\"leads\"]:\n filtered = bandpass_filter(lead[\"samples\"], lowcut=filter_lowcut, highcut=filter_highcut, signal_freq = data_dict[\"fs\"], filter_order = filter_order)\n normalized = normalize(filtered)\n result.append({\"name\" : lead[\"namestr\"], \"signal\" : normalized.tolist()})\n return result\n\ndef get_x(data_dict):\n filter_lowcut = 0.001\n filter_highcut = 15.0\n filter_order = 1\n signals = []\n data_tags = []\n for lead in data_dict[\"leads\"]:\n filtered = bandpass_filter(lead[\"samples\"], lowcut=filter_lowcut, highcut=filter_highcut, signal_freq = data_dict[\"fs\"], filter_order = filter_order)\n for s in get_slices(filtered):\n try:\n normalized = normalize(s)\n signals.append(normalized)\n except:\n continue\n data_tags.append([data_dict[\"age\"] if data_dict[\"age\"] > 0 else 57 , data_dict[\"sex\"], data_dict[\"Rx\"], data_dict[\"Hx\"], data_dict[\"Sx\"], lead[\"name\"]])\n x = np.asarray(signals,dtype=np.float32)\n x = np.reshape(x,(x.shape[0],x.shape[1],1))\n tags = []\n for tag in data_tags:\n tags.append([tag[0],tag[1],tag[5]])\n tags = np.asarray(tags,dtype=np.float32)\n return x,tags" ]
[ [ "numpy.amax", "numpy.amin", "numpy.asarray", "numpy.reshape", "scipy.signal.resample", "scipy.signal.butter", "numpy.mean", "scipy.signal.lfilter", "numpy.array", "numpy.where", "numpy.zeros" ] ]
younes-h/keras-video-classifier
[ "f4eeec2bc8e19b695a72ff00b9c8a556b54d505c" ]
[ "demo/vgg16_lstm_train.py" ]
[ "import numpy as np\nfrom keras import backend as K\nimport sys\nimport os\n\n\ndef main():\n #K.set_image_dim_ordering('tf')\n sys.path.append(os.path.join(os.path.dirname(__file__), '..'))\n\n from keras_video_classifier.library.utility.plot_utils import plot_and_save_history\n from keras_video_classifier.library.recurrent_networks import VGG16LSTMVideoClassifier\n from keras_video_classifier.library.utility.ucf.UCF101_loader import load_ucf\n\n data_set_name = 'UCF-101'\n input_dir_path = os.path.join(os.path.dirname(__file__), 'very_large_data')\n output_dir_path = os.path.join(os.path.dirname(__file__), 'models', data_set_name)\n report_dir_path = os.path.join(os.path.dirname(__file__), 'reports', data_set_name)\n\n np.random.seed(42)\n\n # this line downloads the video files of UCF-101 dataset if they are not available in the very_large_data folder\n load_ucf(input_dir_path)\n\n classifier = VGG16LSTMVideoClassifier()\n\n history = classifier.fit(data_dir_path=input_dir_path, model_dir_path=output_dir_path, data_set_name=data_set_name)\n\n plot_and_save_history(history, VGG16LSTMVideoClassifier.model_name,\n report_dir_path + '/' + VGG16LSTMVideoClassifier.model_name + '-history.png')\n\n\nif __name__ == '__main__':\n main()\n" ]
[ [ "numpy.random.seed" ] ]
pjsier/national-voter-file
[ "f8bae42418c9307150d10c9e71174defaefa4e60" ]
[ "src/python/utils/censusreporter/censusreporter_api.py" ]
[ "import requests\r\nimport pandas as pd\r\nimport csv\r\nimport sys\r\nimport re\r\nimport collections\r\nfrom jsonmerge import merge\r\n\r\nAPI_URL=\"http://api.censusreporter.org/1.0/data/show/{release}?table_ids={table_ids}&geo_ids={geoids}\"\r\n\r\n\r\ndef _clean_list_arg(arg,default):\r\n if arg is None:\r\n arg = default\r\n if isinstance(arg,str):\r\n arg = [arg]\r\n return arg\r\n\r\ndef get_url_response(tables, geoids, release):\r\n url = API_URL.format(table_ids=','.join(tables).upper(),\r\n geoids=','.join(geoids),\r\n release=release)\r\n\r\n response = requests.get(url)\r\n return response.json()\r\n\r\n\r\ndef json_data(tables=None, geoids=None, release='latest'):\r\n \"\"\"Make a basic API request for data for a given table, geoid, and/or release.\r\n tables -- An ACS table ID as a string, or a list of such IDs. Default: 'B01001'\r\n geoids -- A Census geoID as a string, or a list of such IDs. Default: '040|01000US' ('all states in the US')\r\n release -- The ACS release from which to retrieve data. Should be one of:\r\n latest - (default) the ACS release which has data for all of the requested geographies\r\n acs2013_1yr - the 2013 1-year ACS data. Only includes geographies with population >65,000\r\n acs2013_3yr - the 2011-13 3-year ACS data. Only includes geographies with population >20,000\r\n acs2013_5yr - the 2009-13 5-year ACS data. Includes all geographies covered in the ACS.\r\n \"\"\"\r\n geoids = _clean_list_arg(geoids,'040|01000US')\r\n tables = _clean_list_arg(tables,'B01001')\r\n\r\n #If the URL is too big it will fail, estimating the size here and if it is too big we'll break this up\r\n #This should never happen, but we're going to check just to make sure\r\n maxURLSize = 4020\r\n geoSize = len(geoids[0]) + 1\r\n tblSize = len(tables[0]) + 1\r\n urlSize = (len(tables) * tblSize) + (len(geoids) * geoSize)\r\n\r\n if urlSize > maxURLSize:\r\n tableSize = len(tables) * tblSize\r\n maxGeos = int((maxURLSize - tableSize) / geoSize)\r\n print(\"URL maybe too big, breaking up.\")\r\n print((len(tables) * tblSize) + (len(geoids[:maxGeos]) * geoSize))\r\n resp = get_url_response(tables, geoids[:maxGeos], release)\r\n if \"error\" in resp:\r\n raise Exception(resp['error'])\r\n\r\n return merge(resp, json_data(tables, geoids[maxGeos:], release))\r\n\r\n response = get_url_response(tables, geoids, release)\r\n\r\n if \"error\" in response and \"release doesn't include GeoID(s) \" in response['error']:\r\n geoList = re.findall(r'(\\d+US\\w+)\\W/', response['error'])\r\n\r\n geoids = [x for x in geoids if x not in geoList]\r\n if len(geoids) == 0:\r\n return None\r\n\r\n response = get_url_response(tables, geoids, release)\r\n\r\n return response\r\n\r\n\r\ndef get_dataframe(tables=None, geoids=None, release='latest',level=None,place_names=True,column_names=True):\r\n \"\"\"Return a pandas DataFrame object for the given tables and geoids.\r\n Keyword arguments (all optional):\r\n tables -- An ACS table ID as a string, or a list of such IDs. Default: 'B01001'\r\n geoids -- A Census geoID as a string, or a list of such IDs. Default: '040|01000US' ('all states in the US')\r\n release -- The ACS release from which to retrieve data. Should be one of:\r\n latest - (default) the ACS release which has data for all of the requested geographies\r\n acs2013_1yr - the 2013 1-year ACS data. Only includes geographies with population >65,000\r\n acs2013_3yr - the 2011-13 3-year ACS data. Only includes geographies with population >20,000\r\n acs2013_5yr - the 2009-13 5-year ACS data. Includes all geographies covered in the ACS.\r\n level -- if provided, should be an integer representing the maximum \"indent level\" of columns to be returned. Generally, '0' is the total column.\r\n place_names -- specify False to omit a 'name' column for each geography row\r\n column_names -- specify False to preserve the coded column names instead of using verbal labels\r\n \"\"\"\r\n\r\n response = json_data(tables, geoids, release)\r\n\r\n if 'error' in response:\r\n raise Exception(response['error'])\r\n\r\n result_list = []\r\n for geoid, tables in response['data'].items():\r\n result = {\r\n 'GEOID': geoid\r\n }\r\n for table, table_data in tables.items():\r\n result.update(table_data['estimate'])\r\n\r\n result_list.append(result)\r\n\r\n df = pd.DataFrame(result_list)\r\n return df\r\n\r\n\r\n# Create string translation tables\r\nallowed = ' _01234567890abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ'\r\ndelchars = \"\"\r\nfor i in range(255):\r\n\tif chr(i) not in allowed: delchars = delchars + str(chr(i))\r\ndeltable = str.maketrans(' ','_', delchars)\r\n\r\ndef fixColName(col):\r\n # Format column name to remove unwanted chars\r\n col = str.strip(col)\r\n col = col.translate(deltable)\r\n fmtcol = col\r\n fmtcol = col.lower()\r\n\r\n return fmtcol\r\n" ]
[ [ "pandas.DataFrame" ] ]
CarlosMR91/formation-evaluation
[ "185597e8ea8069bbd40aa967ff6667b6e4de328c" ]
[ "well_log_display.py" ]
[ "def well_log_display(df, column_depth, column_list, \n column_semilog=None, min_depth=None, max_depth=None, \n column_min=None, column_max=None, colors=None, \n fm_tops=None, fm_depths=None, \n tight_layout=1, title_size=10):\n \"\"\"\n Display log side-by-side style\n\n Input:\n\n df is your dataframe\n specify min_depth and max_depth as the upper and lower depth limit\n column_depth is the column name of your depth\n column_list is the LIST of column names that you will display\n\n column_semilog is specific for resistivity column; if your resistivity is\n in column 3, specify as: column_semilog=2. Default is None, so if \n you don't specify, the resistivity will be plotted in normal axis instead\n \n column_min is list of minimum values for the x-axes.\n column_max is list of maximum values for the x-axes.\n \n colors is the list of colors specified for each log names. Default is None,\n so if don't specify, the colors will be Matplotlib default (blue)\n\n fm_tops and fm_depths are the list of formation top names and depths.\n Default is None, so no tops are shown. Specify both lists, if you want\n to show the tops\n \"\"\"\n import numpy as np\n import matplotlib.pyplot as plt\n import pandas as pd\n import random\n\n if column_semilog==None:\n # column semilog not defined, RT will be plotted in normal axis\n logs = column_list\n\n # create the subplots; ncols equals the number of logs\n fig, ax = plt.subplots(nrows=1, ncols=len(logs), figsize=(20,10))\n\n # looping each log to display in the subplots\n if colors==None:\n # color is None (default)\n for i in range(len(logs)):\n # normal axis plot\n ax[i].plot(df[logs[i]], df[column_depth])\n ax[i].set_title(logs[i], size=title_size)\n ax[i].minorticks_on()\n ax[i].grid(which='major', linestyle='-', linewidth='0.5', color='lime')\n ax[i].grid(which='minor', linestyle=':', linewidth='0.5', color='black')\n if column_min!=None and column_max!=None:\n # x-axis limits defined\n ax[i].set_xlim(column_min[i], column_max[i])\n if min_depth!=None and max_depth!=None:\n # y-axis limit defined\n ax[i].set_ylim(min_depth, max_depth) \n ax[i].invert_yaxis() \n\n else:\n # colors are defined (as list)\n for i in range(len(logs)):\n # normal axis plot\n ax[i].plot(df[logs[i]], df[column_depth], color=colors[i])\n ax[i].set_title(logs[i], size=title_size)\n ax[i].minorticks_on()\n ax[i].grid(which='major', linestyle='-', linewidth='0.5', color='lime')\n ax[i].grid(which='minor', linestyle=':', linewidth='0.5', color='black') \n if column_min!=None and column_max!=None:\n # x-axis limits defined\n ax[i].set_xlim(column_min[i], column_max[i]) \n if min_depth!=None and max_depth!=None:\n # y-axis limit defined\n ax[i].set_ylim(min_depth, max_depth) \n ax[i].invert_yaxis() \n\n\n else:\n # column semilog is defined, RT will be plotted in semilog axis\n logs = column_list\n\n # create the subplots; ncols equals the number of logs\n fig, ax = plt.subplots(nrows=1, ncols=len(logs), figsize=(20,10))\n\n # looping each log to display in the subplots\n if colors==None:\n # color is None (default)\n for i in range(len(logs)):\n if i == 3:\n # for resistivity, semilog plot\n ax[i].semilogx(df[logs[i]], df[column_depth])\n else:\n # for non-resistivity, normal plot\n ax[i].plot(df[logs[i]], df[column_depth])\n \n ax[i].set_title(logs[i], size=title_size)\n ax[i].minorticks_on()\n ax[i].grid(which='major', linestyle='-', linewidth='0.5', color='lime')\n ax[i].grid(which='minor', linestyle=':', linewidth='0.5', color='black') \n if column_min!=None and column_max!=None:\n # x-axis limits defined\n ax[i].set_xlim(column_min[i], column_max[i]) \n if min_depth!=None and max_depth!=None:\n # y-axis limit defined\n ax[i].set_ylim(min_depth, max_depth) \n ax[i].invert_yaxis() \n\n else:\n # colors are defined (as list)\n for i in range(len(logs)):\n if i == 3:\n # for resistivity, semilog plot\n ax[i].semilogx(df[logs[i]], df[column_depth], color=colors[i]) \n else:\n # for non-resistivity, normal plot\n ax[i].plot(df[logs[i]], df[column_depth], color=colors[i])\n \n ax[i].set_title(logs[i], size=title_size)\n ax[i].minorticks_on()\n ax[i].grid(which='major', linestyle='-', linewidth='0.5', color='lime')\n ax[i].grid(which='minor', linestyle=':', linewidth='0.5', color='black') \n if column_min!=None and column_max!=None:\n # x-axis limits defined\n ax[i].set_xlim(column_min[i], column_max[i]) \n if min_depth!=None and max_depth!=None:\n # y-axis limit defined\n ax[i].set_ylim(min_depth, max_depth)\n ax[i].invert_yaxis() \n\n if fm_tops!=None and fm_depths!=None:\n # Formation tops and depths are specified, they will be shown\n\n # produce colors\n rgb = []\n for j in range(len(fm_tops)):\n _ = (random.random(), random.random(), random.random())\n rgb.append(_)\n\n for i in range(len(logs)):\n for j in range(len(fm_tops)):\n # rgb = (random.random(), random.random(), random.random())\n ax[i].axhline(y=fm_depths[j], linestyle=\":\", c=rgb[j], label=fm_tops[j]) \n # y = fm_depths[j] / (max_depth - min_depth) \n # ax[i].text(0.5, y, fm_tops[j], fontsize=5, va='center', ha='center', backgroundcolor='w')\n\n # plt.legend()\n # plt.legend(loc='upper center', bbox_to_anchor=(-3, -0.05),\n # fancybox=True, shadow=True, ncol=5) \n \n plt.tight_layout(tight_layout)\n plt.show() \n" ]
[ [ "matplotlib.pyplot.tight_layout", "matplotlib.pyplot.show" ] ]
dingdanhao110/Conch
[ "befa022dd08590062213ef2a17d0cf697fa26ec4", "5c209865429cc711a40d6b529c7f3ab26083633b" ]
[ "problem.py", "preprocess/cora.py" ]
[ "#!/usr/bin/env python\n\n\"\"\"\n problem.py\n\"\"\"\n\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport numpy as np\nfrom scipy import sparse\nfrom sklearn import metrics\n\nimport torch\nfrom torch.autograd import Variable\nfrom torch.nn import functional as F\n\nfrom helpers import load_edge_emb\n\nfrom helpers import read_mpindex_dblp,read_homograph,read_mpindex_yelp,read_mpindex_yago,read_mpindex_cora,read_mpindex_aminer\n\n# --\n# Helper classes\n\nclass ProblemLosses:\n @staticmethod\n def multilabel_classification(preds, targets):\n return F.multilabel_soft_margin_loss(preds, targets)\n \n @staticmethod\n def classification(preds, targets):\n return F.cross_entropy(preds, targets)\n #return F.nll_loss(preds, targets)\n #return F.multi_margin_loss(preds, targets,margin=0.2)\n \n\n \n\n @staticmethod\n def regression_mae(preds, targets):\n return F.l1_loss(preds, targets)\n \n # @staticmethod\n # def regression_mse(preds, targets):\n # return F.mse_loss(preds - targets)\n\n\nclass ProblemMetrics:\n @staticmethod\n def multilabel_classification(y_true, y_pred):\n y_pred = (y_pred > 0.5).astype(int)\n return {\n \"accuracy\": float(metrics.accuracy_score(y_true, y_pred)),\n \"micro\" : float(metrics.f1_score(y_true, y_pred, average=\"micro\")),\n \"macro\" : float(metrics.f1_score(y_true, y_pred, average=\"macro\")),\n }\n \n @staticmethod\n def classification(y_true, y_pred):\n y_pred = np.argmax(y_pred, axis=1)\n #print(np.unique(y_true),np.unique(y_pred))\n return {\n \"accuracy\": float(metrics.accuracy_score(y_true, y_pred)),\n \"micro\" : float(metrics.f1_score(y_true, y_pred, average=\"micro\")),\n \"macro\" : float(metrics.f1_score(y_true, y_pred, average=\"macro\")),\n }\n # return (y_pred == y_true.squeeze()).mean()\n \n @staticmethod\n def regression_mae(y_true, y_pred):\n return float(np.abs(y_true - y_pred).mean())\n\n\n# --\n# Problem definition\n\nread_feat_lookup = {\n \"dblp\":read_mpindex_dblp,\n \"yelp\":read_mpindex_yelp,\n \"yago\":read_mpindex_yago,\n \"cora\":read_mpindex_cora,\n \"aminer\":read_mpindex_aminer,\n}\n\nclass NodeProblem(object):\n def __init__(self, problem_path, problem, schemes, device,train_per, K=10, input_edge_dims = 128,):\n \n # print('NodeProblem: loading started')\n\n features, labels, folds = read_feat_lookup[problem](path=problem_path,train_per=train_per)\n\n # self.edge_neighs = dict()\n # with np.load(\"{}edge_neighs_{}_{}.npz\".format(problem_path,K, input_edge_dims)) as data:\n # for s in schemes:\n # self.edge_neighs[s] = data[s]\n self.node_neighs = dict()\n with np.load(\"{}node_neighs_{}_{}.npz\".format(problem_path,K, input_edge_dims)) as data:\n for s in schemes:\n self.node_neighs[s] = data[s]\n # self.node2edge_idxs = dict()\n # with np.load(\"{}mp_node2edge_idxs_{}_{}.npz\".format(problem_path,K, input_edge_dims)) as data:\n # for s in schemes:\n # self.node2edge_idxs[s] = data[s]\n # print(data[s].shape)\n # self.edge_embs = dict()\n # with np.load(\"{}mp_edge_embs_{}_{}.npz\".format(problem_path,K, input_edge_dims)) as data:\n # for s in schemes:\n # self.edge_embs[s] = data[s]\n # print(data[s].shape)\n # self.edge2node_idxs = dict()\n # with np.load(\"{}mp_edge2node_idxs_{}_{}.npz\".format(problem_path,K, input_edge_dims)) as data:\n # for s in schemes:\n # self.edge2node_idxs[s] = data[s]\n\n # self.edge_node_adjs = dict()\n # with np.load(\"{}mp_edge_node_adjs_{}_{}.npz\".format(problem_path, K, input_edge_dims)) as data:\n # for s in schemes:\n # self.edge_node_adjs[s] = data[s]\n # print(data[s].shape)\n\n self.task = 'classification'\n self.n_classes = int(max(labels)+1) # !!\n\n #input: features, homograph, edge embedding\n if features.shape[1]>1:\n # self.feats = np.pad(features,((0,1),(0,0)),'constant')\n self.feats = features\n pass\n else:\n self.feats = np.eye(features.shape[0])\n\n\n self.schemes=schemes\n\n self.folds = folds\n self.targets = labels\n\n self.feats_dim = self.feats.shape[1] if self.feats is not None else None\n # self.edge_dim = self.edge_embs[schemes[0]].shape[1]\n self.n_nodes = features.shape[0]\n # print(self.n_nodes)\n\n #self.homo_adj, self.homo_feat = read_homograph(path=problem_path,problem=problem)\n\n self.device = device\n self.__to_torch()\n \n self.nodes = {\n \"train\" : self.folds ['train'],\n \"val\" : self.folds ['val'],\n \"test\" : self.folds ['test'],\n }\n \n self.loss_fn = getattr(ProblemLosses, self.task)\n self.metric_fn = getattr(ProblemMetrics, self.task)\n \n # print('NodeProblem: loading finished')\n\n\n def __to_torch(self):\n if self.feats is not None:\n self.feats = torch.FloatTensor(self.feats)\n\n # for i in self.edge_neighs:\n # self.edge_neighs[i] = torch.from_numpy(self.edge_neighs[i]).long()\n for i in self.node_neighs:\n self.node_neighs[i] = torch.from_numpy(self.node_neighs[i]).long()\n # for i in self.node2edge_idxs:\n # self.node2edge_idxs[i] = torch.from_numpy(self.node2edge_idxs[i]).long()\n # for i in self.edge_embs:\n # self.edge_embs[i] = torch.from_numpy(self.edge_embs[i]).float()\n # print(self.edge_embs[i].shape)\n # for i in self.edge2node_idxs:\n # self.edge2node_idxs[i] = torch.from_numpy(self.edge2node_idxs[i]).long()\n\n # for i in self.edge_node_adjs:\n # self.edge_node_adjs[i] = torch.from_numpy(self.edge_node_adjs[i]).long()\n # if not sparse.issparse(self.adj):\n # if self.device!=\"cpu\":\n # for i in self.edge_neighs:\n # self.edge_neighs[i]= self.edge_neighs[i].to(self.device)\n # for i in self.node_neighs:\n # self.node_neighs[i]=self.node_neighs[i].to(self.device)\n # for i in self.node2edge_idxs:\n # self.node2edge_idxs[i]=self.node2edge_idxs[i].to(self.device)\n # for i in self.edge_embs:\n # self.edge_embs[i]=self.edge_embs[i].to(self.device)\n # for i in self.edge2node_idxs:\n # self.edge2node_idxs[i]=self.edge2node_idxs[i].to(self.device).detatch()\n # print('GPU memory allocated: ', torch.cuda.memory_allocated() / 1000 / 1000 / 1000)\n # # #self.homo_adj = self.homo_adj.to(self.device)\n # # #self.homo_feat = self.homo_feat.to(self.device)\n # # for i in self.edge_emb:\n # # if torch.is_tensor(self.edge_emb[i]):\n # # pass\n # # self.edge_emb[i] = self.edge_emb[i].to(self.device)\n # if self.feats is not None:\n # self.feats = self.feats.to(self.device)\n # print('GPU memory allocated: ', torch.cuda.memory_allocated() / 1000 / 1000 / 1000)\n\n def __batch_to_torch(self, mids, targets):\n \"\"\" convert batch to torch \"\"\"\n mids = Variable(torch.LongTensor(mids))\n \n if self.task == 'multilabel_classification':\n targets = Variable(torch.FloatTensor(targets))\n elif self.task == 'classification':\n targets = Variable(torch.LongTensor(targets))\n elif 'regression' in self.task:\n targets = Variable(torch.FloatTensor(targets))\n else:\n raise Exception('NodeDataLoader: unknown task: %s' % self.task)\n \n if self.device!=\"cpu\":\n mids, targets = mids.to(self.device), targets.to(self.device)\n \n return mids, targets\n \n def iterate(self, mode, batch_size=512, shuffle=False):\n nodes = self.nodes[mode]\n \n idx = np.arange(nodes.shape[0])\n if shuffle:\n idx = np.random.permutation(idx)\n \n n_chunks = idx.shape[0] // batch_size + 1\n for chunk_id, chunk in enumerate(np.array_split(idx, n_chunks)):\n mids = nodes[chunk]\n targets = self.targets[mids].reshape(-1,1)\n mids, targets = self.__batch_to_torch(mids, targets)\n yield mids, targets, chunk_id / n_chunks\n\n\nclass ReadCosSim(object):\n def __init__(self, problem_path, problem, schemes, device,train_per, K=10, input_edge_dims = 128,):\n # print('ReadCosSim: loading started')\n\n # self.edge_neighs = dict()\n # with np.load(\"{}edge_neighs_{}_{}.npz\".format(problem_path,K, input_edge_dims)) as data:\n # for s in schemes:\n # self.edge_neighs[s] = data[s]\n # self.node_neighs = dict()\n # with np.load(\"{}node_neighs_{}_{}.npz\".format(problem_path,K, input_edge_dims)) as data:\n # for s in schemes:\n # self.node_neighs[s] = data[s]\n self.node2edge_idxs = dict()\n with np.load(\"{}node2edge_idxs_{}_{}_cos.npz\".format(problem_path,K, input_edge_dims)) as data:\n for s in schemes:\n self.node2edge_idxs[s] = data[s]\n self.edge_embs = dict()\n with np.load(\"{}edge_embs_{}_{}_cos.npz\".format(problem_path,K, input_edge_dims)) as data:\n for s in schemes:\n self.edge_embs[s] = data[s]\n # print(data[s].shape)\n # self.edge2node_idxs = dict()\n # with np.load(\"{}edge2node_idxs_{}_{}.npz\".format(problem_path,K, input_edge_dims)) as data:\n # for s in schemes:\n # self.edge2node_idxs[s] = data[s]\n\n self.edge_node_adjs = dict()\n with np.load(\"{}edge_node_adjs_{}_{}_cos.npz\".format(problem_path, K, input_edge_dims)) as data:\n for s in schemes:\n self.edge_node_adjs[s] = data[s]\n\n self.device = device\n self.__to_torch()\n \n # print('ReadCosSim: loading finished')\n \n def __to_torch(self):\n\n # for i in self.edge_neighs:\n # self.edge_neighs[i] = torch.from_numpy(self.edge_neighs[i]).long()\n # for i in self.node_neighs:\n # self.node_neighs[i] = torch.from_numpy(self.node_neighs[i]).long()\n for i in self.node2edge_idxs:\n self.node2edge_idxs[i] = torch.from_numpy(self.node2edge_idxs[i]).long()\n for i in self.edge_embs:\n self.edge_embs[i] = torch.from_numpy(self.edge_embs[i]).float()\n # print(self.edge_embs[i].shape)\n # for i in self.edge2node_idxs:\n # self.edge2node_idxs[i] = torch.from_numpy(self.edge2node_idxs[i]).long()\n\n for i in self.edge_node_adjs:\n self.edge_node_adjs[i] = torch.from_numpy(self.edge_node_adjs[i]).long()", "import numpy as np\nimport scipy.sparse as sp\nimport torch\nimport random\nfrom sklearn.feature_extraction.text import TfidfTransformer\nfrom tqdm import tqdm\n\ndef gen_homograph():\n path = \"../data/cora/\"\n out_file = \"homograph2\"\n\n label_file = \"paper_label\"\n PA_file = \"PA\"\n PP_file = \"PP\"\n PT_file = \"PT\"\n PAP_file = \"PAP\"\n PPP_file = \"PPP\"\n\n PA = np.genfromtxt(\"{}{}.txt\".format(path, PA_file),\n dtype=np.int32)\n PP = np.genfromtxt(\"{}{}.txt\".format(path, PP_file),\n dtype=np.int32)\n PT = np.genfromtxt(\"{}{}.txt\".format(path, PT_file),\n dtype=np.int32)\n\n PA = PA[:, :2]\n PP = PP[:, :2]\n PT = PT[:, :2]\n\n PA[:, 0] -= 1\n PA[:, 1] -= 1\n PP[:, 0] -= 1\n PP[:, 1] -= 1\n PT[:, 0] -= 1\n PT[:, 1] -= 1\n print('paper id range:', min(PA[:, 0]), max(PA[:, 0]))\n print('paper id range:', min(PP[:, 0]), min(PP[:, 1]))\n print('author id range:', min(PA[:, 1]), max(PA[:, 1]))\n print('term id range:', min(PT[:, 1]), max(PT[:, 1]))\n\n paper_max = max(PA[:, 0]) + 1\n author_max = max(PA[:, 1]) + 1\n term_max = max(PT[:, 1]) + 1\n\n PA[:, 1] += paper_max\n # PC[:, 1] += author_max\n PT[:, 1] += author_max + paper_max\n print(PA.shape)\n edges = np.concatenate((PA, PP, PA[:,::-1]), axis=0)\n\n np.savetxt(\"{}{}.txt\".format(path, out_file), edges, fmt='%u')\n\n\ndef read_embed(path=\"../../../data/cora/\",\n emb_file=\"AP\", emb_len=16):\n with open(\"{}{}_{}.emb\".format(path, emb_file, emb_len)) as f:\n n_nodes, n_feature = map(int, f.readline().strip().split())\n print(\"number of nodes:{}, embedding size:{}\".format(n_nodes, n_feature))\n\n embedding = np.loadtxt(\"{}{}_{}.emb\".format(path, emb_file, emb_len),\n dtype=np.float32, skiprows=1)\n emb_index = {}\n for i in range(n_nodes):\n emb_index[int(embedding[i, 0])] = i\n\n features = np.asarray([embedding[emb_index[i], 1:] if i in emb_index else embedding[0, 1:] for i in range(n_nodes)])\n\n # assert features.shape[1] == n_feature\n # assert features.shape[0] == n_nodes\n\n return features, n_nodes, n_feature\n\n\ndef dump_edge_emb(path='../data/cora/', emb_len=16):\n # dump APA\n PAP_file = \"PAP\"\n PPP_file = \"PPP\"\n PCP_file = \"PCP\"\n\n PAP_e, n_nodes, n_emb = read_embed(path=path, emb_file='AP', emb_len=emb_len)\n PPP_e, n_nodes, n_emb = read_embed(path=path, emb_file='AP', emb_len=emb_len)\n PCP_e, n_nodes, n_emb = read_embed(path=path, emb_file='AP', emb_len=emb_len)\n\n PA_file = \"PA\"\n PP_file = \"PP\"\n\n PA = np.genfromtxt(\"{}{}.txt\".format(path, PA_file),\n dtype=np.int32)\n PC = np.genfromtxt(\"{}{}.txt\".format(path, PP_file),\n dtype=np.int32)\n PA[:, 0] -= 1\n PA[:, 1] -= 1\n PC[:, 0] -= 1\n PC[:, 1] -= 1\n\n PAi = {}\n APi = {}\n PCi = {}\n CPi = {}\n\n for i in range(PA.shape[0]):\n p = PA[i, 0]\n a = PA[i, 1]\n\n if p not in PAi:\n PAi[p] = set()\n if a not in APi:\n APi[a] = set()\n\n PAi[p].add(a)\n APi[a].add(p)\n\n for i in range(PC.shape[0]):\n p = PC[i, 0]\n c = PC[i, 1]\n\n if p not in PCi:\n PCi[p] = set()\n if c not in CPi:\n CPi[c] = set()\n\n PCi[p].add(c)\n CPi[c].add(p)\n\n PAPi = {}\n PPPi = {}\n PCPi={}\n\n for v in PAi:\n for a in PAi[v]:\n if a not in APi:\n continue\n for p in APi[a]:\n if p not in PAPi:\n PAPi[p] = {}\n if v not in PAPi:\n PAPi[v] = {}\n\n if v not in PAPi[p]:\n PAPi[p][v] = set()\n if p not in PAPi[v]:\n PAPi[v][p] = set()\n\n PAPi[p][v].add(a)\n PAPi[v][p].add(a)\n\n for v in PCi:\n for a in PCi[v]:\n if a not in CPi:\n continue\n for p in CPi[a]:\n if p not in PPPi:\n PPPi[p] = {}\n if v not in PPPi:\n PPPi[v] = {}\n\n if v not in PPPi[p]:\n PPPi[p][v] = set()\n if p not in PPPi[v]:\n PPPi[v][p] = set()\n\n PPPi[p][v].add(a)\n PPPi[v][p].add(a)\n\n for v in CPi:\n for a in CPi[v]:\n if a not in PCi:\n continue\n for p in PCi[a]:\n if p not in PCPi:\n PCPi[p] = {}\n if v not in PCPi:\n PCPi[v] = {}\n\n if v not in PCPi[p]:\n PCPi[p][v] = set()\n if p not in PCPi[v]:\n PCPi[v][p] = set()\n\n PCPi[p][v].add(a)\n PCPi[v][p].add(a)\n\n PAP_ps = sp.load_npz(\"{}{}\".format(path, 'PAP_ps.npz')).todense()\n PPP_ps = sp.load_npz(\"{}{}\".format(path, 'PPP_ps.npz')).todense()\n PCP_ps = sp.load_npz(\"{}{}\".format(path, 'PCP_ps.npz')).todense()\n\n # PAP\n APA = PAPi\n APA_emb = []\n for a1 in tqdm(range(19396)):\n if a1 not in APA or len(APA[a1]) == 0:\n APA_emb.append(np.concatenate(([a1, a1], PAP_e[a1], [1], [1])))\n # print('no neighbor')\n continue\n for a2 in APA[a1]:\n tmp = [PAP_e[p] for p in APA[a1][a2]]\n tmp = np.sum(tmp, axis=0) / len(APA[a1][a2])\n tmp += PAP_e[a1] + PAP_e[a2]\n tmp /= 3\n if a1 <= a2:\n APA_emb.append(np.concatenate(([a1, a2], tmp, [PAP_ps[a1, a2]], [len(APA[a1][a2])])))\n PAP_emb = np.asarray(APA_emb)\n print(\"compute edge embeddings {} complete\".format(PAP_file))\n\n # PPP\n APA = PPPi\n APA_emb = []\n for a1 in tqdm(range(19396)):\n if a1 not in APA or len(APA[a1]) == 0:\n APA_emb.append(np.concatenate(([a1, a1], PPP_e[a1], [1], [1])))\n # print('no neighbor')\n continue\n for a2 in APA[a1]:\n tmp = [PPP_e[p] for p in APA[a1][a2]]\n tmp = np.sum(tmp, axis=0) / len(APA[a1][a2])\n tmp += PPP_e[a1] + PPP_e[a2]\n tmp /= 3\n if a1 <= a2:\n APA_emb.append(np.concatenate(([a1, a2], tmp, [PPP_ps[a1, a2]], [len(APA[a1][a2])])))\n PPP_emb = np.asarray(APA_emb)\n print(\"compute edge embeddings {} complete\".format(PPP_file))\n\n # PCP\n APA = PCPi\n APA_emb = []\n for a1 in tqdm(range(19396)):\n if a1 not in APA or len(APA[a1]) == 0:\n APA_emb.append(np.concatenate(([a1, a1], PCP_e[a1], [1], [1])))\n # print('no neighbor')\n continue\n for a2 in APA[a1]:\n tmp = [PCP_e[p] for p in APA[a1][a2]]\n tmp = np.sum(tmp, axis=0) / len(APA[a1][a2])\n tmp += PCP_e[a1] + PCP_e[a2]\n tmp /= 3\n if a1 <= a2:\n APA_emb.append(np.concatenate(([a1, a2], tmp, [PCP_ps[a1, a2]], [len(APA[a1][a2])])))\n PCP_emb = np.asarray(APA_emb)\n print(\"compute edge embeddings {} complete\".format(PCP_file))\n\n emb_len = PPP_emb.shape[1] - 2\n np.savez(\"{}edge{}.npz\".format(path, emb_len),\n PAP=PAP_emb, PPP=PPP_emb, PCP=PCP_emb)\n print('dump npz file {}edge{}.npz complete'.format(path, emb_len))\n pass\n\ndef dump_edge_emb_undirected(path='../data/cora/', emb_len=16):\n # dump APA\n PAP_file = \"PAP\"\n PPP_file = \"PPP\"\n\n PAP_e, n_nodes, n_emb = read_embed(path=path, emb_file='AP', emb_len=emb_len)\n PPP_e, n_nodes, n_emb = read_embed(path=path, emb_file='AP', emb_len=emb_len)\n PP_e, n_nodes, n_emb = read_embed(path=path, emb_file='AP', emb_len=emb_len)\n\n PA_file = \"PA\"\n PP_file = \"PP\"\n\n PA = np.genfromtxt(\"{}{}.txt\".format(path, PA_file),\n dtype=np.int32)\n PC = np.genfromtxt(\"{}{}.txt\".format(path, PP_file),\n dtype=np.int32)\n PA[:, 0] -= 1\n PA[:, 1] -= 1\n PC[:, 0] -= 1\n PC[:, 1] -= 1\n\n # PC = np.vstack([PC, PC[:,::-1]])\n\n PAi = {}\n APi = {}\n PCi = {}\n CPi = {}\n\n for i in range(PA.shape[0]):\n p = PA[i, 0]\n a = PA[i, 1]\n\n if p not in PAi:\n PAi[p] = set()\n if a not in APi:\n APi[a] = set()\n\n PAi[p].add(a)\n APi[a].add(p)\n\n for i in range(PC.shape[0]):\n p = PC[i, 0]\n c = PC[i, 1]\n\n if p not in PCi:\n PCi[p] = set()\n if c not in PCi:\n PCi[c] = set()\n if c not in CPi:\n CPi[c] = set()\n if p not in CPi:\n CPi[p] = set()\n\n PCi[p].add(c)\n PCi[c].add(p)\n CPi[c].add(p)\n CPi[p].add(c)\n\n PAPi = {}\n PPPi = {}\n PPi=PCi\n\n for v in PAi:\n for a in PAi[v]:\n if a not in APi:\n continue\n for p in APi[a]:\n if p not in PAPi:\n PAPi[p] = {}\n if v not in PAPi:\n PAPi[v] = {}\n\n if v not in PAPi[p]:\n PAPi[p][v] = set()\n if p not in PAPi[v]:\n PAPi[v][p] = set()\n\n PAPi[p][v].add(a)\n PAPi[v][p].add(a)\n\n for v in PCi:\n for a in PCi[v]:\n if a not in CPi:\n continue\n for p in CPi[a]:\n if p not in PPPi:\n PPPi[p] = {}\n if v not in PPPi:\n PPPi[v] = {}\n\n if v not in PPPi[p]:\n PPPi[p][v] = set()\n if p not in PPPi[v]:\n PPPi[v][p] = set()\n\n PPPi[p][v].add(a)\n PPPi[v][p].add(a)\n\n\n\n\n PAP_ps = sp.load_npz(\"{}{}\".format(path, 'PAP_ps.npz')).todense()\n PPP_ps = sp.load_npz(\"{}{}\".format(path, 'PPP_ps.npz')).todense()\n # PP_ps = sp.load_npz(\"{}{}\".format(path, 'PP_ps.npz')).todense()\n\n # PAP\n APA = PAPi\n APA_emb = []\n for a1 in tqdm(range(19396)):\n if a1 not in APA or len(APA[a1]) == 0:\n APA_emb.append(np.concatenate(([a1, a1], PAP_e[a1], [1], [1])))\n # print('no neighbor')\n continue\n for a2 in APA[a1]:\n tmp = [PAP_e[p] for p in APA[a1][a2]]\n tmp = np.sum(tmp, axis=0) / len(APA[a1][a2])\n tmp += PAP_e[a1] + PAP_e[a2]\n tmp /= 3\n if a1 <= a2:\n APA_emb.append(np.concatenate(([a1, a2], tmp, [PAP_ps[a1, a2]], [len(APA[a1][a2])])))\n PAP_emb = np.asarray(APA_emb)\n print(\"compute edge embeddings {} complete\".format(PAP_file))\n\n # PPP\n APA = PPPi\n APA_emb = []\n for a1 in tqdm(range(19396)):\n if a1 not in APA or len(APA[a1]) == 0:\n APA_emb.append(np.concatenate(([a1, a1], PPP_e[a1], [1], [1])))\n # print('no neighbor')\n continue\n for a2 in APA[a1]:\n tmp = [PPP_e[p] for p in APA[a1][a2]]\n tmp = np.sum(tmp, axis=0) / len(APA[a1][a2])\n tmp += PPP_e[a1] + PPP_e[a2]\n tmp /= 3\n if a1 <= a2:\n APA_emb.append(np.concatenate(([a1, a2], tmp, [PPP_ps[a1, a2]], [len(APA[a1][a2])])))\n PPP_emb = np.asarray(APA_emb)\n print(\"compute edge embeddings {} complete\".format(PPP_file))\n\n # pp embedding\n PP_emb = []\n for p in tqdm(range(19396)):\n if p not in PPi or len(PPi[p]) == 0:\n PP_emb.append(np.concatenate(([p, p], PP_e[p], [1], [1])))\n print('no neighbor')\n continue\n for p2 in PPi[p]:\n if p <= p2:\n PP_emb.append(np.concatenate(([p, p2], (PP_e[p]+PP_e[p2])/2, [1], [len(PPi[p])])))\n\n\n PP_emb = np.asarray(PP_emb)\n print(PP_emb.shape)\n print(\"compute edge embeddings {} complete\".format(PP_file))\n\n emb_len = PPP_emb.shape[1] - 2\n np.savez(\"{}edge{}.npz\".format(path, emb_len),\n PAP=PAP_emb, PPP=PPP_emb, PP=PP_emb)\n print('dump npz file {}edge{}.npz complete'.format(path, emb_len))\n pass\n\ndef pathsim(A):\n value = []\n x, y = A.nonzero()\n for i, j in zip(x, y):\n value.append(2 * A[i, j] / (A[i, i] + A[j, j]))\n return sp.coo_matrix((value, (x, y)))\n\n\ndef gen_homoadj(path=\"../data/cora/\"):\n PA_file = \"PA\"\n PP_file = \"PP\"\n\n PA = np.genfromtxt(\"{}{}.txt\".format(path, PA_file),\n dtype=np.int32)\n PP = np.genfromtxt(\"{}{}.txt\".format(path, PP_file),\n dtype=np.int32)\n PA[:, 0] -= 1\n PA[:, 1] -= 1\n PP[:, 0] -= 1\n PP[:, 1] -= 1\n\n paper_max = max(PA[:, 0]) + 1\n author_max = max(PA[:, 1]) + 1\n\n PA = sp.coo_matrix((np.ones(PA.shape[0]), (PA[:, 0], PA[:, 1])),\n shape=(paper_max, author_max),\n dtype=np.float32)\n PP = sp.coo_matrix((np.ones(PP.shape[0]), (PP[:, 0], PP[:, 1])),\n shape=(paper_max, paper_max),\n dtype=np.float32)\n\n PP = PP + PP.transpose()\n\n PAP = PA * PA.transpose()\n PPP = PP * PP.transpose()\n\n\n PAP = pathsim(PAP)\n PPP = pathsim(PPP)\n # PP = pathsim(PP)\n\n sp.save_npz(\"{}{}\".format(path, 'PAP_ps.npz'), PAP)\n sp.save_npz(\"{}{}\".format(path, 'PPP_ps.npz'), PPP)\n # sp.save_npz(\"{}{}\".format(path, 'PP_ps.npz'), PP)\n\n # APA = np.hstack([APA.nonzero()[0].reshape(-1,1), APA.nonzero()[1].reshape(-1,1)])\n # APAPA = np.hstack([APAPA.nonzero()[0].reshape(-1,1), APAPA.nonzero()[1].reshape(-1,1)])\n # APCPA = np.hstack([APCPA.nonzero()[0].reshape(-1,1), APCPA.nonzero()[1].reshape(-1,1)])\n\n # np.savetxt(\"{}{}.txt\".format(path, 'APA'),APA,fmt='%u')\n # np.savetxt(\"{}{}.txt\".format(path, 'APAPA'),APA,fmt='%u')\n # np.savetxt(\"{}{}.txt\".format(path, 'APCPA'),APA,fmt='%u')\n\n\ndef gen_walk(path='data/dblp2/'):\n APA_file = \"APA\"\n APAPA_file = \"APAPA\"\n APCPA_file = \"APCPA\"\n\n PA_file = \"PA\"\n PC_file = \"PC\"\n\n PA = np.genfromtxt(\"{}{}.txt\".format(path, PA_file),\n dtype=np.int32)\n PC = np.genfromtxt(\"{}{}.txt\".format(path, PC_file),\n dtype=np.int32)\n PA[:, 0] -= 1\n PA[:, 1] -= 1\n PC[:, 0] -= 1\n PC[:, 1] -= 1\n\n paper_max = max(PA[:, 0]) + 1\n author_max = max(PA[:, 1]) + 1\n conf_max = max(PC[:, 1]) + 1\n\n PA[:, 0] += author_max\n PC[:, 0] += author_max\n PC[:, 1] += author_max + paper_max\n\n PAi = {}\n APi = {}\n PCi = {}\n CPi = {}\n\n for i in range(PA.shape[0]):\n p = PA[i, 0]\n a = PA[i, 1]\n\n if p not in PAi:\n PAi[p] = set()\n if a not in APi:\n APi[a] = set()\n\n PAi[p].add(a)\n APi[a].add(p)\n\n for i in range(PC.shape[0]):\n p = PC[i, 0]\n c = PC[i, 1]\n\n if p not in PCi:\n PCi[p] = set()\n if c not in CPi:\n CPi[c] = set()\n\n PCi[p].add(c)\n CPi[c].add(p)\n\n APAi = {}\n APCi = {}\n CPAi = {}\n\n for v in APi:\n for p in APi[v]:\n if p not in PAi:\n continue\n for a in PAi[p]:\n if a not in APAi:\n APAi[a] = {}\n if v not in APAi:\n APAi[v] = {}\n\n if v not in APAi[a]:\n APAi[a][v] = set()\n if a not in APAi[v]:\n APAi[v][a] = set()\n\n APAi[a][v].add(p)\n APAi[v][a].add(p)\n\n for v in APi:\n for p in APi[v]:\n if p not in PCi:\n continue\n for c in PCi[p]:\n if v not in APCi:\n APCi[v] = {}\n if c not in CPAi:\n CPAi[c] = {}\n\n if c not in APCi[v]:\n APCi[v][c] = set()\n if v not in CPAi[c]:\n CPAi[c][v] = set()\n\n CPAi[c][v].add(p)\n APCi[v][c].add(p)\n\n # Œ(1) number of walks per node w: 1000; TOO many\n # (2) walk length l: 100;\n # (3) Œvector dimension d: 128 (LINE: 128 for each order);\n # (4) Œneighborhood size k: 7; --default is 5\n # (5) Œsize of negative samples: 5\n # mapping of notation: a:author v:paper i:conference\n l = 100\n w = 1000\n\n import random\n # gen random walk for meta-path APCPA\n with open(\"{}{}.walk\".format(path, APCPA_file), mode='w') as f:\n for _ in range(w):\n for a in APi:\n # print(a)\n result = \"a{}\".format(a)\n for _ in range(int(l / 4)):\n p = random.sample(APi[a], 1)[0]\n c = random.sample(PCi[p], 1)[0]\n result += \" v{} i{}\".format(p, c)\n p = random.sample(CPi[c], 1)[0]\n while p not in PAi:\n p = random.sample(CPi[c], 1)[0]\n a = random.sample(PAi[p], 1)[0]\n result += \" v{} a{}\".format(p, a)\n f.write(result + \"\\n\")\n\n # gen random walk for meta-path APA\n with open(\"{}{}.walk\".format(path, APA_file), mode='w') as f:\n for _ in range(w):\n for a in APi:\n result = \"a{}\".format(a)\n for _ in range(int(l / 2)):\n p = random.sample(APi[a], 1)[0]\n a = random.sample(PAi[p], 1)[0]\n result += \" v{} a{}\".format(p, a)\n f.write(result + \"\\n\")\n ##gen random walk for meta-path APAPA\n # with open(\"{}{}.walk\".format(path,APAPA_file),mode='w') as f:\n # for _ in range(w):\n # for a in APi:\n # result=\"a{}\".format(a)\n # for _ in range(int(l/2)):\n # p = random.sample(APi[a],1)[0]\n # a = random.sample(PAi[p],1)[0]\n # result+=\" v{} a{}\".format(p,a)\n # f.write(result+\"\\n\")\n\n pass\n\n\ndef sparse_mx_to_torch_sparse_tensor(sparse_mx):\n \"\"\"Convert a scipy sparse matrix to a torch sparse tensor.\"\"\"\n sparse_mx = sparse_mx.tocoo().astype(np.float32)\n indices = torch.from_numpy(\n np.vstack((sparse_mx.row, sparse_mx.col)).astype(np.int64))\n values = torch.from_numpy(sparse_mx.data)\n shape = torch.Size(sparse_mx.shape)\n return torch.sparse.FloatTensor(indices, values, shape)\n\n\ndef load_edge_emb(path, schemes, n_dim=17, n_author=5000):\n data = np.load(\"{}edge{}.npz\".format(path, n_dim))\n index = {}\n emb = {}\n for scheme in schemes:\n # print('number of authors: {}'.format(n_author))\n ind = sp.coo_matrix((np.arange(1, data[scheme].shape[0] + 1),\n (data[scheme][:, 0], data[scheme][:, 1])),\n shape=(n_author, n_author),\n dtype=np.long)\n # diag = ind.diagonal()\n # ind = ind - diag\n # ind = ind + ind.transpose() + diag\n\n # ind = torch.LongTensor(ind)\n\n ind = ind + ind.T.multiply(ind.T > ind)\n ind = sparse_mx_to_torch_sparse_tensor(ind) # .to_dense()\n\n embedding = np.zeros(n_dim, dtype=np.float32)\n embedding = np.vstack((embedding, data[scheme][:, 2:]))\n emb[scheme] = torch.from_numpy(embedding).float()\n\n index[scheme] = ind.long()\n print('loading edge embedding for {} complete, num of embeddings: {}'.format(scheme, embedding.shape[0]))\n\n return index, emb\n\n\ndef gen_edge_adj(path='../data/cora', K=5):\n \"\"\"\n\n Args:\n path:\n K:\n\n Returns:\n node_neigh:\n edge_idx:\n edge_emb:\n edge_neigh:\n\n \"\"\"\n\n PA_file = \"PA\"\n PC_file = \"PP\"\n\n # print(\"{}{}.txt\".format(path, PA_file))\n PA = np.genfromtxt(\"{}{}.txt\".format(path, PA_file),\n dtype=np.int32)\n PC = np.genfromtxt(\"{}{}.txt\".format(path, PC_file),\n dtype=np.int32)\n PA[:, 0] -= 1\n PA[:, 1] -= 1\n PC[:, 0] -= 1\n PC[:, 1] -= 1\n\n paper_max = max(PA[:, 0]) + 1\n author_max = max(PA[:, 1]) + 1\n\n PA = sp.coo_matrix((np.ones(PA.shape[0]), (PA[:, 0], PA[:, 1])),\n shape=(paper_max, author_max),\n dtype=np.int32)\n PC = sp.coo_matrix((np.ones(PC.shape[0]), (PC[:, 0], PC[:, 1])),\n shape=(paper_max, paper_max),\n dtype=np.int32)\n PC = PC + PC.transpose()\n\n PAP = (PA * PA.transpose())\n PPP = (PC * PC.transpose())\n PAP = PAP + (sp.eye(paper_max) > PAP).astype(np.int32)\n PPP = PPP + (sp.eye(paper_max) > PPP).astype(np.int32)\n PP = PC #+ (sp.eye(paper_max) > PC).astype(np.int32)\n\n PAP = sparse_mx_to_torch_sparse_tensor(PAP).to_dense().long()\n PPP = sparse_mx_to_torch_sparse_tensor(PPP).to_dense().long()\n PP = sparse_mx_to_torch_sparse_tensor(PP).to_dense().long()\n\n # select top-K path-count neighbors of each A. If number of neighbors>K, trunc; else upsampling\n adj = {'PAP': PAP, 'PPP': PPP, 'PP': PP}\n schemes = [ 'PAP','PPP', 'PP'] #'PAP',\n index, emb = load_edge_emb(path, schemes, n_dim=130, n_author=paper_max)\n\n node_neighs = {}\n edge_neighs = {}\n node2edge_idxs = {}\n edge_embs = {}\n edge2node_idxs = {}\n edge_node_adjs = {}\n for s in schemes:\n print('----{}----'.format(s))\n aa = adj[s]\n\n # count nonzero degree\n degree = aa.shape[1] - (aa == 0).sum(dim=1)\n print(degree[0])\n print('min degree ', torch.min(degree))\n print('max degree ', torch.max(degree))\n print('avg degree ', torch.mean(degree.float()))\n\n for i in range(degree.shape[0]):\n if degree[i]==0:\n aa[i,i]=1\n degree[i]=1\n degree = degree.numpy()\n ind = torch.argsort(aa, dim=1)\n ind = torch.flip(ind, dims=[1])\n\n node_neigh = torch.cat([ind[i, :K].view(1, -1) if degree[i] >= K\n else torch.cat([ind[i, :degree[i]], ind[i, np.random.choice(degree[i], K-degree[i])]]).view(1, -1)\n for i in range(ind.shape[0])]\n , dim=0)\n print(\"node_neigh.shape \", node_neigh.shape)\n\n mp_index = (index[s]).to_dense()\n # print(mp_index)\n mp_edge = emb[s]\n\n edge_idx_old = mp_index[\n torch.arange(node_neigh.shape[0]).repeat_interleave(K).view(-1),\n node_neigh.contiguous().view(-1)]\n print('max called edge embedding: ',torch.max(torch.unique(edge_idx_old,return_counts=True)[1]))\n print(\"edge_idx_old.shape \", edge_idx_old.shape)\n old2new = dict()\n new2old = dict()\n for e in edge_idx_old.numpy():\n if e not in old2new:\n old2new[e] = len(old2new)\n new2old[old2new[e]] = e\n assert len(old2new) == len(new2old)\n print('number of unique edges ', len(old2new))\n new_embs = [new2old[i] for i in range(len(old2new))]\n new_embs = mp_edge[new_embs]\n\n edge_idx = torch.LongTensor([old2new[i] for i in edge_idx_old.numpy()]).view(-1, K)\n edge_emb = new_embs\n\n uq = torch.unique(edge_idx.view(-1), return_counts=True)[1]\n print('max number of neighbors ', max(uq))\n\n # edge->node adj\n edge_node_adj = [[] for _ in range(len(old2new))]\n for i in range(edge_idx.shape[0]):\n for j in range(edge_idx.shape[1]):\n edge_node_adj[edge_idx.numpy()[i, j]].append(i)\n edge_node_adj = [np.unique(i) for i in edge_node_adj]\n edge_node_adj = np.array([xi if len(xi) == 2 else [xi[0], xi[0]] for xi in edge_node_adj])\n # print(max(map(len, edge_node_adj)))\n # edge_node_adj = np.array(edge_node_adj)\n print('edge_node_adj.shape ', edge_node_adj.shape)\n # print(edge_node_adj[0])\n # edges of line graph\n line_graph_edges = torch.cat(\n [edge_idx.repeat_interleave(K).reshape(-1, 1), edge_idx.repeat(K, 1).reshape(-1, 1),\n torch.arange(node_neigh.shape[0]).repeat_interleave(K * K).view(-1, 1)], dim=1).numpy()\n assert line_graph_edges.shape[1] == 3\n print(\"line_graph_edges.shape \", line_graph_edges.shape) # [edge1, edge2, node ]\n\n # construct line graph\n import pandas as pd\n df = pd.DataFrame(line_graph_edges)\n edge_neigh = df.groupby(0)[1, 2].apply(pd.Series.tolist) # group by edge1; [ [e2,n], .. ]\n\n max_len = max([len(i) for i in edge_neigh])\n print('max degree of edge: ', max_len)\n print('edge of max degree: ', np.argmax([len(i) for i in edge_neigh]))\n\n edge_neigh_result = []\n edge_idx_result = []\n for e, neigh in enumerate(edge_neigh):\n neigh = np.asarray(neigh)\n idx = np.random.choice(neigh.shape[0], max_len)\n edge_neigh_result.append(neigh[idx, 0])\n edge_idx_result.append(neigh[idx, 1])\n edge_neigh = np.vstack(edge_neigh_result)\n edge2node = np.vstack(edge_idx_result)\n\n print(\"edge_neigh.shape \", edge_neigh.shape)\n print(\"edge2node.shape \", edge2node.shape)\n\n edge_neighs[s] = edge_neigh\n node_neighs[s] = node_neigh\n node2edge_idxs[s] = edge_idx\n edge_embs[s] = edge_emb\n edge2node_idxs[s] = edge2node\n edge_node_adjs[s] = edge_node_adj\n #\n np.savez(\"{}edge_neighs_{}.npz\".format(path, K),\n PAP=edge_neighs['PAP'], PPP=edge_neighs['PPP'], PP=edge_neighs['PP'], )\n print('dump npz file {}edge_neighs.npz complete'.format(path))\n\n np.savez(\"{}node_neighs_{}.npz\".format(path, K),\n PAP=node_neighs['PAP'], PPP=node_neighs['PPP'],PP=node_neighs['PP'])\n print('dump npz file {}node_neighs.npz complete'.format(path))\n\n np.savez(\"{}node2edge_idxs_{}.npz\".format(path, K),\n PAP=node2edge_idxs['PAP'], PPP=node2edge_idxs['PPP'],PP=node2edge_idxs['PP'])\n print('dump npz file {}node2edge_idxs.npz complete'.format(path))\n\n np.savez(\"{}edge_embs_{}.npz\".format(path, K),\n PAP=edge_embs['PAP'], PPP=edge_embs['PPP'], PP=edge_embs['PP'])\n print('dump npz file {}edge_embs.npz complete'.format(path))\n\n np.savez(\"{}edge2node_idxs_{}.npz\".format(path, K),\n PAP=edge2node_idxs['PAP'], PPP=edge2node_idxs['PPP'], PP=edge2node_idxs['PP'])\n print('dump npz file {}edge2node_idxs.npz complete'.format(path))\n\n np.savez(\"{}edge_node_adjs_{}.npz\".format(path, K),\n PAP=edge_node_adjs['PAP'], PPP=edge_node_adjs['PPP'], PP=edge_node_adjs['PP'])\n print('dump npz file {}edge_node_adjs.npz complete'.format(path))\n\n pass\n\n\nif __name__ == '__main__':\n # gen_homograph()\n # gen_homoadj()\n # dump_edge_emb_undirected(emb_len=128)\n gen_edge_adj(path='../data/cora/', K=5)\n" ]
[ [ "torch.LongTensor", "torch.nn.functional.multilabel_soft_margin_loss", "torch.nn.functional.l1_loss", "numpy.abs", "numpy.arange", "numpy.eye", "torch.nn.functional.cross_entropy", "torch.from_numpy", "numpy.argmax", "torch.FloatTensor", "numpy.random.permutation", "sklearn.metrics.f1_score", "numpy.array_split", "sklearn.metrics.accuracy_score" ], [ "torch.max", "numpy.asarray", "pandas.DataFrame", "numpy.concatenate", "torch.unique", "scipy.sparse.coo_matrix", "torch.Size", "numpy.unique", "numpy.arange", "torch.from_numpy", "torch.arange", "torch.argsort", "numpy.zeros", "numpy.random.choice", "torch.min", "torch.flip", "numpy.sum", "scipy.sparse.eye", "numpy.ones", "torch.sparse.FloatTensor", "numpy.vstack" ] ]
badeaadi/Computer_Vision
[ "843bdd6b6b4ea5b2332a16962195fee4cf7577f2" ]
[ "build_mosaic.py" ]
[ "\"\"\"\r\n PROIECT MOZAIC\r\n \r\n Badea Adrian Catalin, grupa 334, anul III, FMI\r\n\"\"\"\r\n\r\nimport os\r\nimport cv2 as cv\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nimport pdb\r\nimport glob\r\n\r\nfrom add_pieces_mosaic import *\r\nfrom parameters import *\r\n\r\n\r\ndef load_pieces(params: Parameters):\r\n \r\n # citeste toate cele N piese folosite la mozaic din directorul corespunzator\r\n # toate cele N imagini au aceeasi dimensiune H x W x C, unde:\r\n # H = inaltime, W = latime, C = nr canale (C=1 gri, C=3 color)\r\n # functia intoarce pieseMozaic = matrice N x H x W x C in params\r\n # pieseMoziac[i, :, :, :] reprezinta piesa numarul i\r\n images = [cv.imread(file) for file in glob.glob(params.small_images_dir + \"*\" + params.image_type)]\r\n \r\n \r\n if params.grayscale:\r\n images = [cv.cvtColor(image, cv.COLOR_BGR2GRAY) for image in images]\r\n images = [np.stack((image, image, image), axis = -1) for image in images]\r\n \r\n print(len(images))\r\n images = np.asarray(np.asarray(images)[:,:,:, :3])\r\n \r\n \r\n params.small_images = images\r\n params.small_images_np = np.asarray(images)\r\n \r\n print(params.small_images_np.shape)\r\n\r\n\r\n # citeste imaginile din director\r\n\r\n\r\n if params.show_small_images:\r\n for i in range(10):\r\n for j in range(10):\r\n plt.subplot(10, 10, i * 10 + j + 1)\r\n # OpenCV reads images in BGR format, matplotlib reads images in RGB format\r\n im = images[i * 10 + j].copy()\r\n # BGR to RGB, swap the channels\r\n im = im[:, :, [2, 1, 0]]\r\n plt.imshow(im)\r\n plt.show()\r\n\r\n\r\n\r\ndef compute_dimensions(params: Parameters):\r\n \r\n # calculeaza dimensiunile mozaicului\r\n # obtine si imaginea de referinta redimensionata avand aceleasi dimensiuni\r\n\r\n height, width = params.small_images_np.shape[1:3];\r\n \r\n # print(height, width);\r\n # 28 40 pentru colectia cu flori\r\n \r\n height_image, width_image = np.asarray(params.image).shape[:2]\r\n \r\n # print(height_image, width_image)\r\n # 183 275 pentru imaginea ferrari\r\n \r\n ratio_image = width_image / height_image\r\n params.num_pieces_vertical = int(params.num_pieces_horizontal * width / height / ratio_image)\r\n\r\n \r\n # print(params.num_pieces_vertical)\r\n # 95 pentru ferrari\r\n \r\n # redimensioneaza imaginea\r\n new_height = params.num_pieces_vertical * height\r\n new_width = params.num_pieces_horizontal * width\r\n \r\n print(new_height, new_width)\r\n # 2660 4000\r\n \r\n params.image_resized = cv.resize(params.image, (new_width, new_height))\r\n \r\n params.small_images = np.asarray(params.small_images)\r\n params.image_resized = np.asarray(params.image_resized)\r\n \r\n # auximage = cv.cvtColor(params.image_resized, cv.COLOR_RGB2BGR)\r\n # cv.imwrite('masina.png', auximage)\r\n\r\n\r\ndef build_mosaic(params: Parameters):\r\n # incarcam imaginile din care vom forma mozaicul\r\n load_pieces(params)\r\n # calculeaza dimensiunea mozaicului\r\n compute_dimensions(params)\r\n\r\n\r\n img_mosaic = None\r\n \r\n if params.layout == 'caroiaj':\r\n if params.hexagon is True:\r\n img_mosaic = add_pieces_hexagon(params)\r\n else:\r\n img_mosaic = add_pieces_grid(params)\r\n \r\n elif params.layout == 'aleator':\r\n img_mosaic = add_pieces_random(params)\r\n else:\r\n print('Wrong option!')\r\n exit(-1)\r\n\r\n return img_mosaic\r\n\r\n" ]
[ [ "matplotlib.pyplot.imshow", "numpy.asarray", "numpy.stack", "matplotlib.pyplot.subplot", "matplotlib.pyplot.show" ] ]
roy881020/pytorch-yolo2
[ "6b2538b0ec314486479173920e6cacafbf5cd9e6" ]
[ "dataset.py" ]
[ "#!/usr/bin/python\n# encoding: utf-8\n\nimport os\nimport random\nimport torch\nimport numpy as np\nfrom torch.utils.data import Dataset\nfrom PIL import Image\nfrom utils import read_truths_args, read_truths\nfrom image import *\n\n\nclass listDataset(Dataset):\n\n def __init__(self, root, shape=None, shuffle=True, transform=None, target_transform=None, train=False, seen=0, batch_size=64, num_workers=4):\n with open(root, 'r') as file:\n self.lines = file.readlines()\n\n if shuffle:\n random.shuffle(self.lines)\n\n self.nSamples = len(self.lines)\n self.transform = transform\n self.target_transform = target_transform\n self.train = train\n self.shape = shape\n self.seen = seen\n self.batch_size = batch_size\n self.num_workers = num_workers\n\n def __len__(self):\n return self.nSamples\n\n def __getitem__(self, index):\n assert index <= len(self), 'index range error'\n imgpath = self.lines[index].rstrip()\n\n if self.train and index % 64== 0:\n if self.seen < 4000*64:\n width = 13*32\n self.shape = (width, width)\n elif self.seen < 8000*64:\n width = (random.randint(0,3) + 13)*32\n self.shape = (width, width)\n elif self.seen < 12000*64:\n width = (random.randint(0,5) + 12)*32\n self.shape = (width, width)\n elif self.seen < 16000*64:\n width = (random.randint(0,7) + 11)*32\n self.shape = (width, width)\n else: # self.seen < 20000*64:\n width = (random.randint(0,9) + 10)*32\n self.shape = (width, width)\n\n if self.train:\n jitter = 0.2\n hue = 0.1\n saturation = 1.5 \n exposure = 1.5\n\n img, label = load_data_detection(imgpath, self.shape, jitter, hue, saturation, exposure)\n label = torch.from_numpy(label)\n else:\n img = Image.open(imgpath).convert('RGB')\n if self.shape:\n img = img.resize(self.shape)\n \n labpath = imgpath.replace('images', 'labels').replace('JPEGImages', 'labels').replace('.jpg', '.txt').replace('.png','.txt')\n label = torch.zeros(50*5)\n #if os.path.getsize(labpath):\n #tmp = torch.from_numpy(np.loadtxt(labpath))\n try:\n tmp = torch.from_numpy(read_truths_args(labpath, 8.0/img.width).astype('float32'))\n except Exception:\n tmp = torch.zeros(1,5)\n #tmp = torch.from_numpy(read_truths(labpath))\n tmp = tmp.view(-1)\n tsz = tmp.numel()\n #print('labpath = %s , tsz = %d' % (labpath, tsz))\n if tsz > 50*5:\n label = tmp[0:50*5]\n elif tsz > 0:\n label[0:tsz] = tmp\n\n if self.transform is not None:\n img = self.transform(img)\n\n if self.target_transform is not None:\n label = self.target_transform(label)\n\n self.seen = self.seen + self.num_workers\n return (img, label)\n" ]
[ [ "torch.from_numpy", "torch.zeros" ] ]
fereshteh1992/sciann-applications
[ "8b2c173d226d769d7fb800359723025ec3ba91e6" ]
[ "SciANN-Vibrations/PlateVibration/membrane_inv.py" ]
[ "import numpy as np\nimport matplotlib.pyplot as plt\nimport sciann as sn\nfrom sciann.utils.math import diff, sign, sin\nfrom gen_dataset import gen_grid\n\n\nfrom mpl_toolkits.mplot3d import Axes3D\nimport matplotlib.pyplot as plt\nfrom matplotlib import cm\nfrom matplotlib.ticker import LinearLocator, FormatStrFormatter\nfrom matplotlib import gridspec\n\nLx = 1.0\nLy = 1.0\nT_Final = 0.5\n\nNX = 40\nNY = 40\nNT = 20\n\nEPOCHS = 2000\nBATCH = 1000\n\nx_data, y_data, t_data = np.meshgrid(\n np.linspace(0, Lx, NX), \n np.linspace(0, Ly, NY), \n np.linspace(0, T_Final, NT)\n)\nx_data = x_data.reshape(-1, 1)\ny_data = y_data.reshape(-1, 1)\nt_data = t_data.reshape(-1, 1)\n\n\nLambd11 = np.pi * np.sqrt(2)\nu_data = np.sin(np.pi * x_data) * np.sin(np.pi * y_data) * np.cos(Lambd11 * t_data)\n\n\n\n\nx = sn.Variable('x', dtype='float64')\ny = sn.Variable('y', dtype='float64')\nt = sn.Variable('t', dtype='float64')\nu = sn.Functional('u', [x, y, t], 4*[20], 'sin')\n\nc = sn.Parameter(np.random.rand(), inputs=[x,y,t], name='c')\n\n\nL1 = c * (diff(u, x, order=2) + diff(u, y, order=2)) - diff(u, t, order=2)\n\nm = sn.SciModel(\n [x, y, t], \n [sn.PDE(L1), sn.Data(u)],\n # load_weights_from='membrane_inv-weights.hdf5'\n)\n\ninputs = [x_data, y_data, t_data]\ntargets = ['zeros', u_data] \n\nh = m.train(\n inputs, targets, \n batch_size=BATCH, \n learning_rate=0.001,\n reduce_lr_after=50,\n adaptive_weights={'freq':True},\n epochs=EPOCHS,\n log_parameters={'parameters': c, 'freq':1}\n)\n\nm.save_weights('membrane_inv-weights.hdf5')\n\n\nx_test, y_test, t_test = np.meshgrid(\n np.linspace(0, Lx, NX*3), \n np.linspace(0, Ly, NY*3), \n np.linspace(0, T_Final, NT*3)\n)\n\nu_pred = u.eval(m, [x_test, y_test, t_test])\n\nLambd11 = np.pi * np.sqrt(2)\nu_analytic = np.sin(np.pi * x_test) * np.sin(np.pi * y_test) * np.cos(Lambd11 * t_test)\n\n\nfig = plt.figure(figsize=plt.figaspect(0.6))\ngs = gridspec.GridSpec(1, 2)\n\nax = fig.add_subplot(gs[0], projection='3d')\n# ax.plot_wireframe(x_test[:,:,0], y_test[:,:,0], u_pred[:,:,0])\n# ax.plot_wireframe(x_test[:,:,0], y_test[:,:,0], u_pred[:,:,10])\nsurf = ax.plot_surface(x_test[:,:,0], y_test[:,:,0], u_pred[:,:,-1], cmap='coolwarm')\nax.set_xlabel('$x$')\nax.set_ylabel('$y$')\nax.set_zlabel('$u$')\nfig.colorbar(surf, shrink=0.75, orientation='horizontal', label='$u$')\n\nax = fig.add_subplot(gs[1], projection='3d')\nsurf = ax.plot_surface(x_test[:,:,0], y_test[:,:,0], np.abs(u_analytic[:,:,-1]-u_pred[:,:,-1]), vmin=0., cmap='hot_r')\nax.set_xlabel('$x$')\nax.set_ylabel('$y$')\nax.set_zlabel('$|u-u^*|$', labelpad=10)\ncbar = fig.colorbar(surf, shrink=0.75, orientation='horizontal',label='$|u-u^*|$')\ncbar.formatter.set_powerlimits((0, 0))\n# cbar.ax.set_xticklabels(np.linspace(0, 0.0012, 5), rotation=90, )\n\nplt.savefig('membrane_inv-results.pdf', dpi=300)\n\nfig = plt.figure(figsize=(4,3))\nplt.semilogx(h.history['parameter_epochs'], np.concatenate(h.history['parameter_c']).flatten())\nplt.xlabel('epochs')\nplt.ylabel('$c$')\nplt.title('$c^* = 1.0$')\nplt.subplots_adjust(0.2,0.15,0.8,0.85)\nplt.savefig('membrane_inv-resylts2.pdf', dpi=300)\n" ]
[ [ "matplotlib.pyplot.figaspect", "numpy.sqrt", "matplotlib.pyplot.title", "numpy.linspace", "numpy.abs", "matplotlib.pyplot.figure", "numpy.cos", "matplotlib.pyplot.savefig", "numpy.sin", "numpy.concatenate", "matplotlib.gridspec.GridSpec", "numpy.random.rand", "matplotlib.pyplot.subplots_adjust", "matplotlib.pyplot.xlabel", "matplotlib.pyplot.ylabel" ] ]
wakananai/MBLLEN
[ "f3ce1663235d060e48e36b5bdf62e793dd75e37d" ]
[ "main/test.py" ]
[ "from glob import glob\r\nimport numpy as np\r\nimport scipy\r\nimport keras\r\nimport os\r\nimport Network\r\nimport utls\r\nimport time\r\nimport cv2\r\nimport argparse\r\nfrom tqdm import tqdm\r\n\r\nfrom keras.backend.tensorflow_backend import set_session\r\nimport tensorflow as tf\r\nconfig = tf.ConfigProto()\r\nconfig.gpu_options.allow_growth = True # dynamically grow the memory used on the GPU\r\nsess = tf.Session(config=config)\r\nset_session(sess) # set this TensorFlow session as the default session for Keras\r\n\r\n\r\nparser = argparse.ArgumentParser()\r\nparser.add_argument(\"--input\", \"-i\", type=str, default=None, help='test image folder')\r\nparser.add_argument(\"--result\", \"-r\", type=str, default=None, help='result folder')\r\nparser.add_argument(\"--model\", \"-m\", type=str, default='Syn_img_lowlight_withnoise', help='model name')\r\nparser.add_argument(\"--com\", \"-c\", type=int, default=0, help='output with/without origional image and mid result')\r\nparser.add_argument(\"--highpercent\", \"-hp\", type=int, default=95, help='should be in [85,100], linear amplification')\r\nparser.add_argument(\"--lowpercent\", \"-lp\", type=int, default=5, help='should be in [0,15], rescale the range [p%,1] to [0, 1]')\r\nparser.add_argument(\"--gamma\", \"-g\", type=int, default=8, help='should be in [6,10], increase the saturability')\r\nparser.add_argument(\"--maxrange\", \"-mr\", type=int, default=8, help='linear amplification range')\r\nparser.add_argument(\"--filelist\", type=str, default=None, help='file list')\r\narg = parser.parse_args()\r\n\r\nresult_folder = arg.result\r\nif not os.path.isdir(result_folder):\r\n os.makedirs(result_folder)\r\n\r\nif arg.input and arg.filelist:\r\n exit(f'only one arg can be activated, either --input ({arg.input}) or --filelist ({arg.filelist})')\r\n\r\nif arg.input:\r\n input_folder = arg.input\r\n path = sorted(glob(input_folder+'/*.*'))\r\nelif arg.filelist:\r\n with open(arg.filelist, 'r') as f:\r\n path = []\r\n for i in f.readlines():\r\n path.append(i.rstrip())\r\n path = sorted(path)\r\nelse:\r\n exit(f'must provide either --input or --filelist')\r\n\r\n\r\nmodel_name = arg.model\r\nmbllen = Network.build_mbllen((None, None, 3))\r\nmbllen.load_weights('../models/'+model_name+'.h5')\r\nopt = keras.optimizers.Adam(lr=2 * 1e-04, beta_1=0.9, beta_2=0.999, epsilon=1e-08)\r\nmbllen.compile(loss='mse', optimizer=opt)\r\n\r\nflag = arg.com\r\nlowpercent = arg.lowpercent\r\nhighpercent = arg.highpercent\r\nmaxrange = arg.maxrange/10.\r\nhsvgamma = arg.gamma/10.\r\n\r\ncrop_size=512\r\nmin_crop_size = 32\r\n\r\nfor i in tqdm(range(len(path))):\r\n img_A_path = path[i]\r\n img_A = utls.imread_color(img_A_path)\r\n img_h, img_w, _ = img_A.shape\r\n \r\n\r\n starttime = time.clock()\r\n \r\n shift_X = shift_Y = crop_size\r\n out_plane = np.zeros((img_h, img_w, 3), dtype='float32')\r\n \r\n for x in range(0, img_w, shift_X):\r\n for y in range(0, img_h, shift_Y): \r\n X_upper = min(x + shift_X, img_w)\r\n Y_upper = min(y + shift_Y, img_h)\r\n X_lower = max(0, X_upper-shift_X)\r\n Y_lower = max(0, Y_upper-shift_Y) \r\n input_img = np.zeros((crop_size, crop_size,3))\r\n size_Y = Y_upper - Y_lower\r\n size_X = X_upper - X_lower\r\n \r\n input_img[:size_Y,:size_X,:] = img_A[Y_lower:Y_upper, X_lower:X_upper, :]\r\n input_img = input_img[np.newaxis, :]\r\n \r\n out_pred = mbllen.predict(input_img)\r\n\r\n out_plane[Y_lower:Y_upper, X_lower:X_upper, :] = out_pred[0, :size_Y, :size_X, :3]\r\n endtime = time.clock()\r\n # print('The ' + str(i+1)+'th image\\'s Time:' +str(endtime-starttime)+'s.')\r\n \r\n \r\n # fake_B = out_pred[0, :, :, :3]\r\n fake_B = out_plane\r\n fake_B_o = fake_B\r\n\r\n gray_fake_B = fake_B[:, :, 0] * 0.299 + fake_B[:, :, 1] * 0.587 + fake_B[:, :, 1] * 0.114\r\n percent_max = sum(sum(gray_fake_B >= maxrange))/sum(sum(gray_fake_B <= 1.0))\r\n # print(percent_max)\r\n max_value = np.percentile(gray_fake_B[:], highpercent)\r\n if percent_max < (100-highpercent)/100.:\r\n scale = maxrange / max_value\r\n fake_B = fake_B * scale\r\n fake_B = np.minimum(fake_B, 1.0)\r\n\r\n gray_fake_B = fake_B[:,:,0]*0.299 + fake_B[:,:,1]*0.587 + fake_B[:,:,1]*0.114\r\n sub_value = np.percentile(gray_fake_B[:], lowpercent)\r\n fake_B = (fake_B - sub_value)*(1./(1-sub_value))\r\n\r\n imgHSV = cv2.cvtColor(fake_B, cv2.COLOR_RGB2HSV)\r\n H, S, V = cv2.split(imgHSV)\r\n S = np.power(S, hsvgamma)\r\n imgHSV = cv2.merge([H, S, V])\r\n fake_B = cv2.cvtColor(imgHSV, cv2.COLOR_HSV2RGB)\r\n fake_B = np.minimum(fake_B, 1.0)\r\n\r\n if flag:\r\n outputs = np.concatenate([img_A[0,:,:,:], fake_B_o, fake_B], axis=1)\r\n else:\r\n outputs = fake_B\r\n\r\n filename = os.path.basename(path[i])\r\n img_name = result_folder+'/' + filename\r\n # scipy.misc.toimage(outputs * 255, high=255, low=0, cmin=0, cmax=255).save(img_name)\r\n outputs = np.minimum(outputs, 1.0)\r\n outputs = np.maximum(outputs, 0.0)\r\n utls.imwrite(img_name, outputs)" ]
[ [ "numpy.minimum", "numpy.maximum", "numpy.power", "numpy.percentile", "tensorflow.ConfigProto", "numpy.concatenate", "tensorflow.Session", "numpy.zeros" ] ]
rheiland/pc4covid19_v6_test
[ "1f0dc29a787b008436da0299bf93168af9520925" ]
[ "bin/substrates-v4-new.py" ]
[ "# substrates Out:Plots\n\nimport os, math\nfrom pathlib import Path\nfrom ipywidgets import Layout, Label, Text, Checkbox, Button, BoundedIntText, HBox, VBox, Box, \\\n FloatText, Dropdown, SelectMultiple, RadioButtons, interactive\nimport matplotlib.pyplot as plt\nfrom matplotlib.colors import BoundaryNorm\nfrom matplotlib.ticker import MaxNLocator\nfrom matplotlib.collections import LineCollection\nfrom matplotlib.patches import Circle, Ellipse, Rectangle\nfrom matplotlib.collections import PatchCollection\nimport matplotlib.colors as mplc\nfrom matplotlib import gridspec\nfrom collections import deque\nfrom pyMCDS import pyMCDS\nimport numpy as np\nimport scipy.io\nimport xml.etree.ElementTree as ET # https://docs.python.org/2/library/xml.etree.elementtree.html\nimport glob\nimport platform\nimport zipfile\nfrom debug import debug_view \nimport warnings\nimport traceback\nimport sys\n\nhublib_flag = True\nif platform.system() != 'Windows':\n try:\n# print(\"Trying to import hublib.ui\")\n from hublib.ui import Download\n except:\n hublib_flag = False\nelse:\n hublib_flag = False\n\n#warnings.warn(message, mplDeprecation, stacklevel=1)\nwarnings.filterwarnings(\"ignore\")\n\nclass SubstrateTab(object):\n\n def __init__(self):\n \n self.tb_count = 0\n\n self.output_dir = '.'\n # self.output_dir = 'tmpdir'\n\n # These are recomputed below \n # basic_length = 12.5\n basic_length = 12.0\n self.figsize_width_substrate = 15.0 # allow extra for colormap\n self.figsize_height_substrate = basic_length\n\n self.figsize_width_2Dplot = basic_length\n self.figsize_height_2Dplot = basic_length\n\n # self.width_substrate = basic_length # allow extra for colormap\n # self.height_substrate = basic_length\n\n self.figsize_width_svg = basic_length\n self.figsize_height_svg = basic_length\n # self.width_svg = basic_length\n # self.height_svg = basic_length\n\n self.figsize_width_substrate = 15.0 # allow extra for colormap\n self.figsize_height_substrate = 12.0\n self.figsize_width_svg = 12.0\n self.figsize_height_svg = 12.0\n\n self.axis_label_fontsize = 15\n\n self.ax0 = None\n self.ax1 = None\n\n self.custom_data_plotted = False\n self.custom_data_set1 = False # live, infected, dead\n self.custom_data_set2 = False # mac, neut, cd8\n\n # self.fig = plt.figure(figsize=(7.2,6)) # this strange figsize results in a ~square contour plot\n\n self.first_time = True\n self.modulo = 1\n\n self.use_defaults = True\n\n self.svg_delta_t = 1\n self.substrate_delta_t = 1\n self.svg_frame = 1\n self.substrate_frame = 1\n\n self.customized_output_freq = False\n self.therapy_activation_time = 1000000\n self.max_svg_frame_pre_therapy = 1000000\n self.max_substrate_frame_pre_therapy = 1000000\n\n self.svg_xmin = 0\n\n # Probably don't want to hardwire these if we allow changing the domain size\n # self.svg_xrange = 2000\n # self.xmin = -1000.\n # self.xmax = 1000.\n # self.ymin = -1000.\n # self.ymax = 1000.\n # self.x_range = 2000.\n # self.y_range = 2000.\n\n self.show_nucleus = False\n self.show_edge = True\n self.alpha = 0.7\n\n substrates_default_disabled_flag = True # True = disable them by default; False=enable them\n\n # initial value\n self.field_index = 4\n # self.field_index = self.substrate_choice.value + 4\n\n self.skip_cb = True\n\n # define dummy size of mesh (set in the tool's primary module)\n self.numx = 0\n self.numy = 0\n\n # ------- custom data for cells ----------\n self.xval = np.empty([1])\n # print('sub, init: len(self.xval) = ',len(self.xval))\n self.yval1 = np.empty([1])\n self.yval2 = np.empty([1])\n self.yval3 = np.empty([1])\n self.yval4 = np.empty([1])\n self.yval5 = np.empty([1])\n self.yval6 = np.empty([1])\n self.tname = \"time\"\n self.yname = 'Y'\n # self.num_2D_plots = 1\n\n\n self.title_str = ''\n\n tab_height = '600px'\n tab_height = '500px'\n constWidth = '180px'\n constWidth2 = '150px'\n tab_layout = Layout(width='900px', # border='2px solid black',\n height=tab_height, ) #overflow_y='scroll')\n\n max_frames = 1 \n # NOTE: The \"interactive\" widget contains the plot(s). Whenever any plot needs to be updated,\n # its \"update\" method needs to be invoked. So, if you notice multiple, flashing \n # plot updates occuring, you can search for all instances of \"self.i_plot.update()\" and do\n # a debug print to see where they occur.\n\n # self.mcds_plot = interactive(self.plot_substrate, frame=(0, max_frames), continuous_update=False) \n # self.i_plot = interactive(self.plot_plots, frame=(0, max_frames), continuous_update=False) \n self.i_plot = interactive(self.plot_substrate, frame=(0, max_frames), continuous_update=False) \n\n # \"plot_size\" controls the size of the tab height, not the plot (rf. figsize for that)\n # NOTE: the Substrates Plot tab has an extra row of widgets at the top of it (cf. Cell Plots tab)\n svg_plot_size = '700px'\n svg_plot_size = '900px'\n plot_area_width = '1500px'\n plot_area_height = '900px'\n self.i_plot.layout.width = plot_area_width \n self.i_plot.layout.height = plot_area_height \n\n self.fontsize = 20\n # self.fontsize = 30\n\n\n #============ new GUI =================\n self.max_frames = BoundedIntText(value=0,description='# cell frames',min=0,max=999999,layout=Layout(width='160px')) # border='1px solid black',\n self.cells_toggle = Checkbox(description='Cells',value=True, style = {'description_width': 'initial'}, layout=Layout(width='110px', )) #border='1px solid black'))\n self.cell_edges_toggle = Checkbox(description='edge',value=self.show_edge, style = {'description_width': 'initial'}, layout=Layout(width='110px',)) # align_items='stretch',\n \n\n layout1 = Layout(display='flex',\n flex_flow='row',\n align_items='center',\n width='25%', ) #border='1px solid black')\n hb1=HBox([self.cells_toggle,self.cell_edges_toggle ]) # layout=layout1)\n # cells_vbox=VBox([self.max_frames, hb1], layout=Layout(width='350px',border='1px solid black',))\n cells_vbox=VBox([self.max_frames, hb1], layout=Layout(width='320px'))\n #--------------------------\n self.substrates_toggle = Checkbox(description='Substrates', style = {'description_width': 'initial'})\n\n # self.field_min_max = {'assembled_virion':[0.,1.,False] }\n # hacky I know, but make a dict that's got (key,value) reversed from the dict in the Dropdown below\n\n # ipywidgets 8 docs: Selection widgets no longer accept a dictionary of options. Pass a list of key-value pairs instead.\n self.field_dict = {0:'director signal', 1:'cargo signal'}\n\n # self.substrate_choice = Dropdown(options={'assembled_virion': 0},layout=Layout(width='150px'))\n # options will be replaced below, based on initial.xml\n self.substrate_choice = Dropdown(\n options={'director signal': 0, 'cargo signal':1},\n value=0,\n disabled = substrates_default_disabled_flag,\n # description='Field',\n layout=Layout(width='150px')\n )\n self.colormap_dd = Dropdown(options=['viridis', 'jet', 'YlOrRd'],value='YlOrRd',layout=Layout(width='200px'))\n hb2 = HBox([self.substrates_toggle,self.substrate_choice,self.colormap_dd], layout=Layout(width='350px', )) # border='1px solid black',))\n\n self.colormap_fixed_toggle = Checkbox(description='Fix',style = {'description_width': 'initial'}, layout=Layout(width='60px'))\n constWidth2 = '160px'\n self.colormap_min = FloatText(\n description='Min',\n value=0,\n step = 0.1, \n layout=Layout(width=constWidth2),)\n self.colormap_max = FloatText(\n description='Max',\n value=38,\n step = 0.1,\n layout=Layout(width=constWidth2),)\n # hb3=HBox([colormap_fixed_toggle,colormap_min,colormap_max], layout=Layout(width='500px',justify_content='flex-start'))\n hb3=HBox([self.colormap_fixed_toggle,self.colormap_min,self.colormap_max], layout=Layout(justify_content='flex-start')) # border='1px solid black',\n\n substrate_vbox=VBox([hb2, hb3], layout=Layout(width='500px'))\n\n #--------------------------\n self.custom_data_toggle = Checkbox(\n description='2D plot',\n disabled=True,\n value=False,\n style = {'description_width': 'initial'},\n layout=Layout(width='130px', ) # border='1px solid black',)\n # layout=Layout(width=constWidth2),\n )\n\n #--------------\n # self.custom_data_update_button= Button(\n # description='Update', \n # disabled=True,)\n # # layout=Layout(width='120px',) ,style = {'description_width': 'initial'})\n # self.custom_data_update_button.style.button_color = 'lightgreen'\n\n # custom_data_vbox1 = VBox([self.custom_data_toggle, self.custom_data_update_button], layout=Layout(justify_content='center',border='1px solid black',)) # width='330px',\n custom_data_vbox1 = VBox([self.custom_data_toggle, ], layout=Layout(justify_content='center')) # ,border='1px solid black', width='330px',\n\n # self.custom_data_choice = SelectMultiple(\n # # options=['assembled_virion','susceptible','infected', 'dead'],\n # options=['live','infected', 'dead'],\n # disabled=True,\n # value=['live'], \n # rows=3, \n # layout=Layout(width='160px', ) )\n\n\n self.custom_data_choice = RadioButtons(\n options=['live,infected,dead', 'mac,neut,cd8'],\n value='live,infected,dead', \n# layout={'width': 'max-content'}, # If the items' names are long\n disabled=True\n )\n\n def custom_data_choice_cb(b):\n self.update_custom_data()\n self.i_plot.update()\n\n self.custom_data_choice.observe(custom_data_choice_cb)\n\n\n self.custom_data_wait = Label('')\n # self.custom_data_wait = Label('Wait for 1st time processing...') \n\n custom_data_hbox = HBox([custom_data_vbox1, VBox([self.custom_data_choice, self.custom_data_wait])])\n\n\n def custom_data_toggle_cb(b):\n # self.update()\n if (self.custom_data_toggle.value): \n # self.custom_data_wait.value = 'Wait for 1st time processing...'\n self.update_custom_data()\n self.custom_data_choice.disabled = False\n else:\n self.custom_data_wait.value = ''\n self.custom_data_choice.disabled = True\n self.i_plot.update()\n self.custom_data_wait.value = ''\n\n self.custom_data_toggle.observe(custom_data_toggle_cb)\n\n\n #gui=HBox([cells_vbox, substrate_vbox, custom_data_hbox], justify_content='center') # vs. 'flex-start , layout=Layout(width='900px'))\n\n #==========================================================================\n\n # ------- \"observe\" functionality (callbacks)\n self.max_frames.observe(self.update_max_frames)\n\n # self.field_min_max = {'dummy': [0., 1., False]}\n # NOTE: manually setting these for now (vs. parsing them out of data/initial.xml)\n\n # print(\"substrate __init__: self.substrate_choice.value=\",self.substrate_choice.value)\n# self.substrate_choice.observe(self.mcds_field_cb)\n # self.substrate_choice.observe(self.mcds_field_changed_cb)\n self.substrate_choice.observe(self.substrate_field_changed_cb)\n\n self.field_colormap = Dropdown(\n options=['viridis', 'jet', 'YlOrRd'],\n value='YlOrRd',\n # description='Field',\n layout=Layout(width=constWidth)\n )\n\n # rwh2\n# self.field_cmap.observe(self.plot_substrate)\n # self.field_colormap.observe(self.substrate_field_cb)\n\n # self.colormap_min.observe(self.substrate_field_cb)\n # self.colormap_max.observe(self.substrate_field_cb)\n\n# self.cmap_fixed_toggle = Checkbox(\n# description='Fix',\n# disabled=False,\n# # layout=Layout(width=constWidth2),\n# )\n # self.colormap_fixed_toggle.observe(self.mcds_field_cb)\n\n # self.cmap_min = FloatText(\n # description='Min',\n # value=0,\n # step = 0.1,\n # disabled=True,\n # layout=Layout(width=constWidth2),\n # )\n\n # self.cmap_max = FloatText(\n # description='Max',\n # value=38,\n # step = 0.1,\n # disabled=True,\n # layout=Layout(width=constWidth2),\n # )\n\n def colormap_fixed_toggle_cb(b):\n field_name = self.field_dict[self.substrate_choice.value]\n # print(self.cmap_fixed_toggle.value)\n if (self.colormap_fixed_toggle.value): # toggle on fixed range\n self.colormap_min.disabled = False\n self.colormap_max.disabled = False\n self.field_min_max[field_name][0] = self.colormap_min.value\n self.field_min_max[field_name][1] = self.colormap_max.value\n self.field_min_max[field_name][2] = True\n # self.save_min_max.disabled = False\n else: # toggle off fixed range\n self.colormap_min.disabled = True\n self.colormap_max.disabled = True\n self.field_min_max[field_name][2] = False\n # self.save_min_max.disabled = True\n# self.mcds_field_cb()\n\n if not self.skip_cb:\n # print(\"colormap_fixed_toggle_cb(): i_plot.update\")\n self.i_plot.update()\n\n # self.colormap_fixed_toggle.observe(colormap_fixed_toggle_cb)\n self.colormap_fixed_toggle.observe(self.substrate_field_cb)\n\n def cell_edges_toggle_cb(b):\n # self.update()\n if (self.cell_edges_toggle.value): \n self.show_edge = True\n else:\n self.show_edge = False\n # print(\"cell_edges_toggle_cb(): i_plot.update\")\n self.i_plot.update()\n\n self.cell_edges_toggle.observe(cell_edges_toggle_cb)\n\n def cells_toggle_cb(b):\n # self.update()\n self.skip_cb = True\n if self.cells_toggle.value:\n self.cell_edges_toggle.disabled = False\n # self.cell_nucleus_toggle.disabled = False\n else:\n self.cell_edges_toggle.disabled = True\n # self.cell_nucleus_toggle.disabled = True\n self.skip_cb = False\n\n # print(\"cells_toggle_cb(): i_plot.update\")\n self.i_plot.update()\n\n self.cells_toggle.observe(cells_toggle_cb)\n\n def substrates_toggle_cb(b):\n self.skip_cb = True\n if self.substrates_toggle.value: # seems bass-ackwards, but makes sense\n self.colormap_fixed_toggle.disabled = False\n self.colormap_min.disabled = False\n self.colormap_max.disabled = False\n self.substrate_choice.disabled = False\n self.field_colormap.disabled = False\n else:\n self.colormap_fixed_toggle.disabled = True\n self.colormap_min.disabled = True\n self.colormap_max.disabled = True\n self.substrate_choice.disabled = True\n self.field_colormap.disabled = True\n self.skip_cb = False\n\n # print(\"substrates_toggle_cb: i_plot.update\")\n self.i_plot.update()\n\n self.substrates_toggle.observe(substrates_toggle_cb)\n\n #---------------------\n # def custom_data_toggle_cb(b):\n # # print(\"custom_data_toggle_cb()\")\n # self.skip_cb = True\n # if (self.custom_data_toggle.value): # seems bass-ackwards\n # self.custom_data_choice.disabled = False\n # self.custom_data_update_button.disabled = False\n # else:\n # self.custom_data_choice.disabled = True\n # self.custom_data_update_button.disabled = True\n # self.skip_cb = False\n\n # # print(\"custom_data_toggle_cb(): i_plot.update\")\n # self.i_plot.update()\n\n # self.custom_data_toggle.observe(custom_data_toggle_cb)\n # self.custom_data_update_button.on_click(self.update_custom_data)\n\n #---------------------\n help_label = Label('select slider: drag or left/right arrows')\n\n controls_box = HBox([cells_vbox, substrate_vbox, custom_data_hbox], justify_content='center') # vs. 'flex-start , layout=Layout(width='900px'))\n\n if (hublib_flag):\n self.download_button = Download('mcds.zip', style='warning', icon='cloud-download', \n tooltip='Download data', cb=self.download_cb)\n\n self.download_svg_button = Download('svg.zip', style='warning', icon='cloud-download', \n tooltip='You need to allow pop-ups in your browser', cb=self.download_svg_cb)\n download_row = HBox([self.download_button.w, self.download_svg_button.w, Label(\"Download all cell plots (browser must allow pop-ups).\")])\n\n # box_layout = Layout(border='0px solid')\n # controls_box = VBox([row1, row2]) # ,width='50%', layout=box_layout)\n # controls_box = HBox([cells_vbox, substrate_vbox, custom_data_hbox], justify_content='center') # vs. 'flex-start , layout=Layout(width='900px'))\n\n self.tab = VBox([controls_box, self.i_plot, download_row, debug_view])\n else:\n # self.tab = VBox([row1, row2])\n # self.tab = VBox([row1, row2, self.i_plot])\n self.tab = VBox([controls_box, self.i_plot])\n\n #---------------------------------------------------\n def disable_2D_plotting(self, bool_val):\n self.custom_data_toggle.disabled = bool_val\n if (bool_val == True):\n self.custom_data_toggle.value = False\n self.yval1 = np.empty([1])\n self.yval2 = np.empty([1])\n self.yval3 = np.empty([1])\n self.yval4 = np.empty([1])\n self.yval5 = np.empty([1])\n self.yval6 = np.empty([1])\n self.custom_data_set1 = False # live, infected, dead\n self.custom_data_set2 = False # mac, neut, cd8\n # self.custom_data_toggle.value = False\n # self.custom_data_choice.disabled = bool_val\n # self.custom_data_update_button.disabled = bool_val\n\n #---------------------------------------------------\n def update_dropdown_fields(self, data_dir):\n # print('update_dropdown_fields called --------')\n self.output_dir = data_dir\n tree = None\n try:\n fname = os.path.join(self.output_dir, \"initial.xml\")\n tree = ET.parse(fname)\n xml_root = tree.getroot()\n except:\n print(\"Cannot open \",fname,\" to read info, e.g., names of substrate fields.\")\n return\n\n self.custom_data_set1 = False # live, infected, dead\n self.custom_data_set2 = False # mac, neut, cd8\n\n xml_root = tree.getroot()\n self.field_min_max = {}\n self.field_dict = {}\n dropdown_options = {}\n uep = xml_root.find('.//variables')\n comment_str = \"\"\n field_idx = 0\n if (uep):\n for elm in uep.findall('variable'):\n # print(\"-----> \",elm.attrib['name'])\n field_name = elm.attrib['name']\n self.field_min_max[field_name] = [0., 1., False]\n self.field_dict[field_idx] = field_name\n dropdown_options[field_name] = field_idx\n\n self.field_min_max[field_name][0] = 0 \n self.field_min_max[field_name][1] = 1\n\n # self.field_min_max[field_name][0] = field_idx #rwh: helps debug\n # self.field_min_max[field_name][1] = field_idx+1 \n self.field_min_max[field_name][2] = False\n field_idx += 1\n\n# constWidth = '180px'\n # print('options=',dropdown_options)\n # print(self.field_min_max) # debug\n self.substrate_choice.value = 0\n self.substrate_choice.options = dropdown_options\n\n # print(\"----- update_dropdown_fields(): self.field_dict= \", self.field_dict) \n# self.mcds_field = Dropdown(\n# # options={'oxygen': 0, 'glucose': 1},\n# options=dropdown_options,\n# value=0,\n# # description='Field',\n# layout=Layout(width=constWidth)\n# )\n\n # def update_max_frames_expected(self, value): # called when beginning an interactive Run\n # self.max_frames.value = value # assumes naming scheme: \"snapshot%08d.svg\"\n # self.mcds_plot.children[0].max = self.max_frames.value\n\n#------------------------------------------------------------------------------\n # called from pc4covid19 module when user selects new cache dir in 'Load Config'\n def update_params(self, config_tab, user_params_tab):\n # xml_root.find(\".//x_min\").text = str(self.xmin.value)\n # xml_root.find(\".//x_max\").text = str(self.xmax.value)\n # xml_root.find(\".//dx\").text = str(self.xdelta.value)\n # xml_root.find(\".//y_min\").text = str(self.ymin.value)\n # xml_root.find(\".//y_max\").text = str(self.ymax.value)\n # xml_root.find(\".//dy\").text = str(self.ydelta.value)\n # xml_root.find(\".//z_min\").text = str(self.zmin.value)\n # xml_root.find(\".//z_max\").text = str(self.zmax.value)\n # xml_root.find(\".//dz\").text = str(self.zdelta.value)\n\n self.xmin = config_tab.xmin.value \n self.xmax = config_tab.xmax.value \n self.x_range = self.xmax - self.xmin\n self.svg_xrange = self.xmax - self.xmin\n self.ymin = config_tab.ymin.value\n self.ymax = config_tab.ymax.value \n self.y_range = self.ymax - self.ymin\n\n self.numx = math.ceil( (self.xmax - self.xmin) / config_tab.xdelta.value)\n self.numy = math.ceil( (self.ymax - self.ymin) / config_tab.ydelta.value)\n\n # if (self.x_range > self.y_range): \n # ratio = self.y_range / self.x_range\n # self.figsize_width_substrate = self.width_substrate # allow extra for colormap\n # self.figsize_height_substrate = self.height_substrate * ratio\n # self.figsize_width_svg = self.width_svg\n # self.figsize_height_svg = self.height_svg * ratio\n # else: # x < y\n # ratio = self.x_range / self.y_range\n # self.figsize_width_substrate = self.width_substrate * ratio \n # self.figsize_height_substrate = self.height_substrate\n # self.figsize_width_svg = self.width_svg * ratio\n # self.figsize_height_svg = self.height_svg\n\n if (self.x_range > self.y_range): \n ratio = self.y_range / self.x_range\n self.figsize_width_substrate = 15.0 # allow extra for colormap\n self.figsize_height_substrate = 12.0 * ratio\n self.figsize_width_svg = 12.0\n self.figsize_height_svg = 12.0 * ratio\n else: # x < y\n ratio = self.x_range / self.y_range\n self.figsize_width_substrate = 15.0 * ratio \n self.figsize_height_substrate = 12.0\n self.figsize_width_svg = 12.0 * ratio\n self.figsize_height_svg = 12.0 \n # print('update_params(): sub w,h= ',self.figsize_width_substrate,self.figsize_height_substrate,' , svg w,h= ',self.figsize_width_svg,self.figsize_height_svg)\n\n self.svg_flag = config_tab.toggle_svg.value\n self.substrates_flag = config_tab.toggle_mcds.value\n # print(\"substrates: update_params(): svg_flag, toggle=\",self.svg_flag,config_tab.toggle_svg.value) \n # print(\"substrates: update_params(): self.substrates_flag = \",self.substrates_flag)\n self.svg_delta_t = config_tab.svg_interval.value\n self.substrate_delta_t = config_tab.mcds_interval.value\n self.modulo = int(self.substrate_delta_t / self.svg_delta_t)\n # print(\"substrates: update_params(): modulo=\",self.modulo) \n\n if self.customized_output_freq:\n# self.therapy_activation_time = user_params_tab.therapy_activation_time.value # NOTE: edit for user param name\n # print(\"substrates: update_params(): therapy_activation_time=\",self.therapy_activation_time)\n self.max_svg_frame_pre_therapy = int(self.therapy_activation_time/self.svg_delta_t)\n self.max_substrate_frame_pre_therapy = int(self.therapy_activation_time/self.substrate_delta_t)\n\n#------------------------------------------------------------------------------\n# def update(self, rdir):\n# Called from driver module (e.g., pc4*.py) (among other places?)\n def update(self, rdir=''):\n # with debug_view:\n # print(\"substrates: update rdir=\", rdir) \n # print(\"substrates: update rdir=\", rdir) \n\n if rdir:\n self.output_dir = rdir\n\n # print('update(): self.output_dir = ', self.output_dir)\n\n if self.first_time:\n # if True:\n self.first_time = False\n full_xml_filename = Path(os.path.join(self.output_dir, 'config.xml'))\n # print(\"substrates: update(), config.xml = \",full_xml_filename) \n # self.num_svgs = len(glob.glob(os.path.join(self.output_dir, 'snap*.svg')))\n # self.num_substrates = len(glob.glob(os.path.join(self.output_dir, 'output*.xml')))\n # print(\"substrates: num_svgs,num_substrates =\",self.num_svgs,self.num_substrates) \n # argh - no! If no files created, then denom = -1\n # self.modulo = int((self.num_svgs - 1) / (self.num_substrates - 1))\n # print(\"substrates: update(): modulo=\",self.modulo) \n if full_xml_filename.is_file():\n tree = ET.parse(full_xml_filename) # this file cannot be overwritten; part of tool distro\n xml_root = tree.getroot()\n self.svg_delta_t = int(xml_root.find(\".//SVG//interval\").text)\n self.substrate_delta_t = int(xml_root.find(\".//full_data//interval\").text)\n # print(\"substrates: svg,substrate delta_t values=\",self.svg_delta_t,self.substrate_delta_t) \n self.modulo = int(self.substrate_delta_t / self.svg_delta_t)\n # print(\"substrates-2: update(): modulo=\",self.modulo) \n\n\n # all_files = sorted(glob.glob(os.path.join(self.output_dir, 'output*.xml'))) # if the substrates/MCDS\n\n all_files = sorted(glob.glob(os.path.join(self.output_dir, 'snap*.svg'))) # if .svg\n if len(all_files) > 0:\n last_file = all_files[-1]\n # print(\"substrates.py/update(): len(snap*.svg) = \",len(all_files),\" , last_file=\",last_file)\n self.max_frames.value = int(last_file[-12:-4]) # assumes naming scheme: \"snapshot%08d.svg\"\n else:\n substrate_files = sorted(glob.glob(os.path.join(self.output_dir, 'output*.xml')))\n if len(substrate_files) > 0:\n last_file = substrate_files[-1]\n self.max_frames.value = int(last_file[-12:-4])\n\n def download_svg_cb(self):\n file_str = os.path.join(self.output_dir, '*.svg')\n # print('zip up all ',file_str)\n with zipfile.ZipFile('svg.zip', 'w') as myzip:\n for f in glob.glob(file_str):\n myzip.write(f, os.path.basename(f)) # 2nd arg avoids full filename path in the archive\n\n def download_cb(self):\n file_xml = os.path.join(self.output_dir, '*.xml')\n file_mat = os.path.join(self.output_dir, '*.mat')\n # print('zip up all ',file_str)\n with zipfile.ZipFile('mcds.zip', 'w') as myzip:\n for f in glob.glob(file_xml):\n myzip.write(f, os.path.basename(f)) # 2nd arg avoids full filename path in the archive\n for f in glob.glob(file_mat):\n myzip.write(f, os.path.basename(f))\n\n def update_max_frames(self,_b):\n self.i_plot.children[0].max = self.max_frames.value\n\n # called if user selected different substrate in dropdown\n # @debug_view.capture(clear_output=True)\n def substrate_field_changed_cb(self, b):\n if (self.substrate_choice.value == None):\n return\n\n self.tb_count += 1\n # print('substrate_field_changed_cb(): tb_count=',self.tb_count,', options= ',self.substrate_choice.options)\n\n if self.tb_count == 25: # originally checked == 5 (I don't remember why I did this)\n # foo = 1/0 # force an exception for a traceback\n try:\n raise NameError('HiThere')\n except:\n with debug_view:\n # print(\"substrates: update rdir=\", rdir) \n exc_type, exc_value, exc_traceback = sys.exc_info()\n traceback.print_tb(exc_traceback, limit=None, file=sys.stdout)\n # sys.exit(-1)\n\n # print('substrate_field_changed_cb: self.field_index=', self.field_index)\n # print('substrate_field_changed_cb: self.substrate_choice.value=', self.substrate_choice.value)\n # if (self.field_index == self.substrate_choice.value + 4):\n # return\n\n self.field_index = self.substrate_choice.value + 4\n\n field_name = self.field_dict[self.substrate_choice.value]\n # print('substrate_field_changed_cb: field_name='+ field_name)\n # print(self.field_min_max[field_name])\n\n # BEWARE of these triggering the substrate_field_cb() callback! Hence, the \"skip_cb\"\n self.skip_cb = True\n self.colormap_min.value = self.field_min_max[field_name][0]\n self.colormap_max.value = self.field_min_max[field_name][1]\n self.colormap_fixed_toggle.value = bool(self.field_min_max[field_name][2])\n self.skip_cb = False\n\n # if not self.skip_cb:\n # print(\"substrate_field_changed_cb: i_plot.update\")\n self.i_plot.update()\n\n # called if user provided different min/max values for colormap, or a different colormap\n def substrate_field_cb(self, b):\n # # if self.skip_cb:\n # return\n\n self.field_index = self.substrate_choice.value + 4\n\n field_name = self.field_dict[self.substrate_choice.value]\n # print('substrate_field_cb: field_name='+ field_name)\n\n # print('substrate_field_cb: '+ field_name)\n self.field_min_max[field_name][0] = self.colormap_min.value \n self.field_min_max[field_name][1] = self.colormap_max.value\n self.field_min_max[field_name][2] = self.colormap_fixed_toggle.value\n\n# self.field_index = self.substrate_choice.value + 4\n# print('field_index=',self.field_index)\n if not self.skip_cb:\n # print(\"substrate_field_cb: i_plot.update, field_index=\",self.field_index)\n self.i_plot.update()\n\n #------------------------------------------------------------\n # def update_custom_data(self,b):\n def update_custom_data(self):\n # print('----- update_custom_data')\n # print('update_custom_data(): self.output_dir = ', self.output_dir)\n if ('live' in self.custom_data_choice.value) and self.custom_data_set1:\n return\n elif ('mac' in self.custom_data_choice.value) and self.custom_data_set2:\n return\n\n self.custom_data_wait.value = 'Wait for 1st time processing...'\n cwd = os.getcwd()\n print(\"----- cwd(1)=\",cwd)\n data_dir = cwd\n print(\"----- data_dir(1)=\",cwd)\n\n if 'cache' in self.output_dir:\n data_dir = self.output_dir\n print(\"----- data_dir(2)=\",data_dir, \" --> chdir to there\")\n os.chdir(data_dir)\n else:\n # print('update_custom_data: cwd=',cwd)\n if not 'tmpdir' in cwd:\n data_dir = os.path.abspath('tmpdir')\n print(\"----- data_dir(3)=\",cwd)\n\n\n xml_files = glob.glob('output*.xml')\n xml_files = glob.glob(os.path.join('tmpdir', 'output*.xml'))\n xml_files.sort()\n print(\"----- chdir back to cwd=\",cwd)\n os.chdir(cwd)\n\n ds_count = len(xml_files)\n # print(\"----- ds_count = \",ds_count)\n mcds = [pyMCDS(xml_files[i], '.') for i in range(ds_count)]\n # mcds = [pyMCDS(xml_files[i], 'tmpdir') for i in range(ds_count)]\n # mcds = [pyMCDS(xml_files[i], data_dir) for i in range(ds_count)]\n print(\"----- mcds = \",mcds)\n # print(mcds[0].data['discrete_cells'].keys())\n# dict_keys(['ID', 'position_x', 'position_y', 'position_z', 'total_volume', 'cell_type', 'cycle_model', 'current_phase', 'elapsed_time_in_phase', 'nuclear_volume', 'cytoplasmic_volume', 'fluid_fraction', 'calcified_fraction', 'orientation_x', 'orientation_y', 'orientation_z', 'polarity', 'migration_speed', 'motility_vector_x', 'motility_vector_y', 'motility_vector_z', 'migration_bias', 'motility_bias_direction_x', 'motility_bias_direction_y', 'motility_bias_direction_z', 'persistence_time', 'motility_reserved', 'unbound_external_ACE2', 'bound_external_ACE2', 'unbound_internal_ACE2', 'bound_internal_ACE2', 'ACE2_binding_rate', 'ACE2_endocytosis_rate', 'ACE2_cargo_release_rate', 'ACE2_recycling_rate', 'virion', 'uncoated_virion', 'viral_RNA', 'viral_protein', 'assembled_virion', 'virion_uncoating_rate', 'uncoated_to_RNA_rate', 'protein_synthesis_rate', 'virion_assembly_rate', 'virion_export_rate', 'max_infected_apoptosis_rate', 'max_apoptosis_half_max', 'apoptosis_hill_power'])\n\n # def cell_data_plot(xname, yname_list, t):\n # discrete_cells_names = ['virion', 'assembled_virion'] # not used now\n tval = np.linspace(0, mcds[-1].get_time(), len(xml_files))\n # return\n\n # if all selected: ('assembled_virion', 'susceptible', 'infected', 'dead')\n # print('custom_data_choice = ', self.custom_data_choice.value) # get RadioButton selection\n # self.num_2D_plots = len(self.custom_data_choice.value)\n # print('num_2D_plots = ', self.num_2D_plots)\n\n xname = 'time'\n if xname == self.tname:\n self.xval = tval\n # elif xname in discrete_cells_names:\n # self.xval = np.array([mcds[i].data['discrete_cells'][xname].sum() for i in range(ds_count)])\n # else:\n # if xname == 'susceptible_cells':\n # self.xval = np.array([(mcds[i].data['discrete_cells']['assembled_virion'] <= 1).sum() for i in range(ds_count)])\n # + np.array([(mcds[i].data['discrete_cells']['cycle_model'] < 6).sum() for i in range(ds_count)])\n # elif xname == 'infected_cells':\n # self.xval = np.array([(mcds[i].data['discrete_cells']['assembled_virion'] > 1).sum() for i in range(ds_count)]) \\\n # + np.array([(mcds[i].data['discrete_cells']['cycle_model'] < 6).sum() for i in range(ds_count)])\n # elif xname == 'dead_cells':\n # self.xval = np.array([len(mcds[0].data['discrete_cells']['ID']) - len(mcds[i].data['discrete_cells']['ID']) for i in range(ds_count)]) \\\n # + np.array([(mcds[i].data['discrete_cells']['cycle_model'] >= 6).sum() for i in range(ds_count)])\n\n\n # print('custom_data_choice = ',self.custom_data_choice.value)\n\n if 'live' in self.custom_data_choice.value: # live,infected,dead\n if (self.custom_data_set1 == False):\n # count # infected\n self.yval2 = np.array( [len(np.where(mcds[idx].data['discrete_cells']['virion'] > 1)[0]) for idx in range(ds_count)] )\n # print('self.yval2=',self.yval2)\n\n # count # dead\n self.yval3 = np.array( [(np.count_nonzero((mcds[idx].data['discrete_cells']['cycle_model'] > 6) == True)) for idx in range(ds_count)] )\n # print('self.yval3=',self.yval3)\n\n # count # live (all epi cells - dead)\n self.yval1 = np.array( [(np.count_nonzero((mcds[idx].data['discrete_cells']['cell_type'] == 1) == True)) for idx in range(ds_count)] )\n\n self.custom_data_set1 = True \n\n elif 'mac' in self.custom_data_choice.value: # mac,neut,cd8\n if (self.custom_data_set2 == False):\n # count macs\n self.yval4 = np.array( [(np.count_nonzero((mcds[idx].data['discrete_cells']['cell_type'] == 4) == True)) for idx in range(ds_count)] )\n\n # count neuts\n self.yval5 = np.array( [(np.count_nonzero((mcds[idx].data['discrete_cells']['cell_type'] == 5) == True)) for idx in range(ds_count)] )\n\n # count cd8\n self.yval6 = np.array( [(np.count_nonzero((mcds[idx].data['discrete_cells']['cell_type'] == 3) == True)) for idx in range(ds_count)] )\n\n self.custom_data_set2 = True \n\n self.custom_data_wait.value = ''\n self.i_plot.update()\n\n #------------------------------------------------------------\n # def plot_cell_custom_data_dummy(self):\n # print('----- plot_cell_custom_data()')\n # x = np.linspace(0, 2*np.pi, 400)\n # y = np.sin(x**2)\n # # self.i_plot.update()\n # self.ax1.plot(x, y)\n\n #------------------------------------------------------------\n def plot_empty_custom_data(self):\n self.ax1.plot([0.], [0.], color='white',marker='.') # hack empty\n # self.ax1.clf()\n self.ax1.get_xaxis().set_visible(False)\n self.ax1.get_yaxis().set_visible(False)\n self.ax1.axis('off')\n\n #------------------------------------------------------------\n # def plot2D_custom_data(self, frame):\n def plot_cell_custom_data(self, xname, yname_list, t):\n # global current_idx, axes_max\n global current_frame\n # current_frame = frame\n # fname = \"snapshot%08d.svg\" % frame\n # full_fname = os.path.join(self.output_dir, fname)\n # print('plot_cell_custom_data: self.output_dir=',self.output_dir)\n\n if 'live' in self.custom_data_choice.value: \n p1 = self.ax1.plot(self.xval, self.yval1, label='live', linewidth=3)\n p2 = self.ax1.plot(self.xval, self.yval2, label='infected', linewidth=3)\n p3 = self.ax1.plot(self.xval, self.yval3, label='dead', linewidth=3)\n elif 'mac' in self.custom_data_choice.value: # mac,neut,cd8\n p1 = self.ax1.plot(self.xval, self.yval4, label='mac', linewidth=3, color='lime')\n p2 = self.ax1.plot(self.xval, self.yval5, label='neut', linewidth=3, color='cyan')\n p3 = self.ax1.plot(self.xval, self.yval6, label='cd8', linewidth=3, color='red')\n\n # print('xval=',xval) # [ 0. 60. 120. ...\n # print('yval=',yval) # [2793 2793 2793 ...\n # print('t=',t)\n\n # if (t >= 0):\n xoff= self.xval.max() * .01 # should be a % of axes range\n fsize=12\n if (t >= 0 and len(self.xval) > 1):\n # print('self.xval=',self.xval) # [ 0. 60. 120. ...\n # array = np.asarray(self.xval)\n # idx = (np.abs(array - t)).argmin()\n # print('closest value idx =',idx)\n # self.ax1.plot(self.xval[t], self.yval[t], p[-1].get_color(), marker='o')\n if 'live' in self.custom_data_choice.value: \n self.ax1.plot(self.xval[self.substrate_frame], self.yval1[self.substrate_frame], p1[-1].get_color(), marker='o', markersize=12)\n self.ax1.plot(self.xval[self.substrate_frame], self.yval2[self.substrate_frame], p2[-1].get_color(), marker='o', markersize=12)\n self.ax1.plot(self.xval[self.substrate_frame], self.yval3[self.substrate_frame], p3[-1].get_color(), marker='o', markersize=12)\n\n # label = \"{:d}\".format(self.yval1[self.substrate_frame]), \n # self.ax1.annotate(str(self.yval1[self.substrate_frame]), (self.xval[self.substrate_frame]+xoff,self.yval1[self.substrate_frame]+yoff) )\n ymax= max(int(self.yval1.max()),int(self.yval2.max()),int(self.yval3.max())) # should be a % of axes range\n yoff= ymax * .01 # should be a % of axes range\n\n self.ax1.text( self.xval[self.substrate_frame]+xoff, self.yval1[self.substrate_frame]+yoff, str(self.yval1[self.substrate_frame]), fontsize=fsize)\n self.ax1.text( self.xval[self.substrate_frame]+xoff, self.yval2[self.substrate_frame]+yoff, str(self.yval2[self.substrate_frame]), fontsize=fsize)\n self.ax1.text( self.xval[self.substrate_frame]+xoff, self.yval3[self.substrate_frame]+yoff, str(self.yval3[self.substrate_frame]), fontsize=fsize)\n \n elif 'mac' in self.custom_data_choice.value: # mac,neut,cd8\n self.ax1.plot(self.xval[self.substrate_frame], self.yval4[self.substrate_frame], p1[-1].get_color(), marker='o', markersize=12)\n self.ax1.plot(self.xval[self.substrate_frame], self.yval5[self.substrate_frame], p2[-1].get_color(), marker='o', markersize=12)\n self.ax1.plot(self.xval[self.substrate_frame], self.yval6[self.substrate_frame], p3[-1].get_color(), marker='o', markersize=12)\n # self.ax1.gca().spines['top'].set_visible(False)\n # self.ax1.gca().spines['right'].set_visible(False)\n # self.ax1.margins(0)\n\n # label markers\n ymax= max(int(self.yval4.max()),int(self.yval5.max()),int(self.yval6.max())) # should be a % of axes range\n yoff= ymax * .01 # should be a % of axes range\n self.ax1.text( self.xval[self.substrate_frame]+xoff, self.yval4[self.substrate_frame]+yoff, str(self.yval4[self.substrate_frame]), fontsize=fsize)\n self.ax1.text( self.xval[self.substrate_frame]+xoff, self.yval5[self.substrate_frame]+yoff, str(self.yval5[self.substrate_frame]), fontsize=fsize)\n self.ax1.text( self.xval[self.substrate_frame]+xoff, self.yval6[self.substrate_frame]+yoff, str(self.yval6[self.substrate_frame]), fontsize=fsize)\n\n\n\n self.ax1.legend(loc='center left', prop={'size': 15})\n if xname == self.tname:\n self.ax1.set_xlabel('time (min)', fontsize=self.axis_label_fontsize)\n else:\n self.ax1.set_xlabel('total ' * (xname != self.tname) + xname)\n# self.ax1.set_ylabel('total ' + (yname_list[0] if len(yname_list) == 1 else ', '.join(yname_list)))\n self.ax1.set_ylabel('number of cells', fontsize=self.axis_label_fontsize)\n\n # p = self.ax1.plot(xval, yval, label=yname)\n # self.ax1.set_legend()\n # self.ax1.tight_layout()\n # self.ax1.show()\n\n\n #---------------------------------------------------------------------------\n def circles(self, x, y, s, c='b', vmin=None, vmax=None, **kwargs):\n \"\"\"\n See https://gist.github.com/syrte/592a062c562cd2a98a83 \n\n Make a scatter plot of circles. \n Similar to plt.scatter, but the size of circles are in data scale.\n Parameters\n ----------\n x, y : scalar or array_like, shape (n, )\n Input data\n s : scalar or array_like, shape (n, ) \n Radius of circles.\n c : color or sequence of color, optional, default : 'b'\n `c` can be a single color format string, or a sequence of color\n specifications of length `N`, or a sequence of `N` numbers to be\n mapped to colors using the `cmap` and `norm` specified via kwargs.\n Note that `c` should not be a single numeric RGB or RGBA sequence \n because that is indistinguishable from an array of values\n to be colormapped. (If you insist, use `color` instead.) \n `c` can be a 2-D array in which the rows are RGB or RGBA, however. \n vmin, vmax : scalar, optional, default: None\n `vmin` and `vmax` are used in conjunction with `norm` to normalize\n luminance data. If either are `None`, the min and max of the\n color array is used.\n kwargs : `~matplotlib.collections.Collection` properties\n Eg. alpha, edgecolor(ec), facecolor(fc), linewidth(lw), linestyle(ls), \n norm, cmap, transform, etc.\n Returns\n -------\n paths : `~matplotlib.collections.PathCollection`\n Examples\n --------\n a = np.arange(11)\n circles(a, a, s=a*0.2, c=a, alpha=0.5, ec='none')\n plt.colorbar()\n License\n --------\n This code is under [The BSD 3-Clause License]\n (http://opensource.org/licenses/BSD-3-Clause)\n \"\"\"\n\n if np.isscalar(c):\n kwargs.setdefault('color', c)\n c = None\n\n if 'fc' in kwargs:\n kwargs.setdefault('facecolor', kwargs.pop('fc'))\n if 'ec' in kwargs:\n kwargs.setdefault('edgecolor', kwargs.pop('ec'))\n if 'ls' in kwargs:\n kwargs.setdefault('linestyle', kwargs.pop('ls'))\n if 'lw' in kwargs:\n kwargs.setdefault('linewidth', kwargs.pop('lw'))\n # You can set `facecolor` with an array for each patch,\n # while you can only set `facecolors` with a value for all.\n\n zipped = np.broadcast(x, y, s)\n patches = [Circle((x_, y_), s_)\n for x_, y_, s_ in zipped]\n collection = PatchCollection(patches, **kwargs)\n if c is not None:\n c = np.broadcast_to(c, zipped.shape).ravel()\n collection.set_array(c)\n collection.set_clim(vmin, vmax)\n\n # ax = plt.gca()\n # ax.add_collection(collection)\n # ax.autoscale_view()\n self.ax0.add_collection(collection)\n self.ax0.autoscale_view()\n # plt.draw_if_interactive()\n if c is not None:\n # plt.sci(collection)\n self.ax0.sci(collection)\n # return collection\n\n #------------------------------------------------------------\n # def plot_svg(self, frame, rdel=''):\n def plot_svg(self, frame):\n # global current_idx, axes_max\n global current_frame\n current_frame = frame\n fname = \"snapshot%08d.svg\" % frame\n full_fname = os.path.join(self.output_dir, fname)\n # with debug_view:\n # print(\"plot_svg:\", full_fname) \n # print(\"-- plot_svg:\", full_fname) \n if not os.path.isfile(full_fname):\n print(\"Once output files are generated, click the slider.\") \n return\n\n xlist = deque()\n ylist = deque()\n rlist = deque()\n rgb_list = deque()\n\n # print('\\n---- ' + fname + ':')\n# tree = ET.parse(fname)\n tree = ET.parse(full_fname)\n root = tree.getroot()\n # print('--- root.tag ---')\n # print(root.tag)\n # print('--- root.attrib ---')\n # print(root.attrib)\n # print('--- child.tag, child.attrib ---')\n numChildren = 0\n for child in root:\n # print(child.tag, child.attrib)\n # print(\"keys=\",child.attrib.keys())\n if self.use_defaults and ('width' in child.attrib.keys()):\n self.axes_max = float(child.attrib['width'])\n # print(\"debug> found width --> axes_max =\", axes_max)\n if child.text and \"Current time\" in child.text:\n svals = child.text.split()\n # remove the \".00\" on minutes\n self.title_str += \" cells: \" + svals[2] + \"d, \" + svals[4] + \"h, \" + svals[7][:-3] + \"m\"\n\n # self.cell_time_mins = int(svals[2])*1440 + int(svals[4])*60 + int(svals[7][:-3])\n # self.title_str += \" cells: \" + str(self.cell_time_mins) + \"m\" # rwh\n\n # print(\"width \",child.attrib['width'])\n # print('attrib=',child.attrib)\n # if (child.attrib['id'] == 'tissue'):\n if ('id' in child.attrib.keys()):\n # print('-------- found tissue!!')\n tissue_parent = child\n break\n\n # print('------ search tissue')\n cells_parent = None\n\n for child in tissue_parent:\n # print('attrib=',child.attrib)\n if (child.attrib['id'] == 'cells'):\n # print('-------- found cells, setting cells_parent')\n cells_parent = child\n break\n numChildren += 1\n\n num_cells = 0\n # print('------ search cells')\n for child in cells_parent:\n # print(child.tag, child.attrib)\n # print('attrib=',child.attrib)\n for circle in child: # two circles in each child: outer + nucleus\n # circle.attrib={'cx': '1085.59','cy': '1225.24','fill': 'rgb(159,159,96)','r': '6.67717','stroke': 'rgb(159,159,96)','stroke-width': '0.5'}\n # print(' --- cx,cy=',circle.attrib['cx'],circle.attrib['cy'])\n xval = float(circle.attrib['cx'])\n\n # map SVG coords into comp domain\n # xval = (xval-self.svg_xmin)/self.svg_xrange * self.x_range + self.xmin\n xval = xval/self.x_range * self.x_range + self.xmin\n\n s = circle.attrib['fill']\n # print(\"s=\",s)\n # print(\"type(s)=\",type(s))\n if (s[0:3] == \"rgb\"): # if an rgb string, e.g. \"rgb(175,175,80)\" \n rgb = list(map(int, s[4:-1].split(\",\"))) \n rgb[:] = [x / 255. for x in rgb]\n else: # otherwise, must be a color name\n rgb_tuple = mplc.to_rgb(mplc.cnames[s]) # a tuple\n rgb = [x for x in rgb_tuple]\n\n # test for bogus x,y locations (rwh TODO: use max of domain?)\n too_large_val = 10000.\n if (np.fabs(xval) > too_large_val):\n print(\"bogus xval=\", xval)\n break\n yval = float(circle.attrib['cy'])\n # yval = (yval - self.svg_xmin)/self.svg_xrange * self.y_range + self.ymin\n yval = yval/self.y_range * self.y_range + self.ymin\n if (np.fabs(yval) > too_large_val):\n print(\"bogus xval=\", xval)\n break\n\n rval = float(circle.attrib['r'])\n # if (rgb[0] > rgb[1]):\n # print(num_cells,rgb, rval)\n xlist.append(xval)\n ylist.append(yval)\n rlist.append(rval)\n rgb_list.append(rgb)\n\n # For .svg files with cells that *have* a nucleus, there will be a 2nd\n if (not self.show_nucleus):\n #if (not self.show_nucleus):\n break\n\n num_cells += 1\n\n # if num_cells > 3: # for debugging\n # print(fname,': num_cells= ',num_cells,\" --- debug exit.\")\n # sys.exit(1)\n # break\n\n # print(fname,': num_cells= ',num_cells)\n\n xvals = np.array(xlist)\n yvals = np.array(ylist)\n rvals = np.array(rlist)\n rgbs = np.array(rgb_list)\n # print(\"xvals[0:5]=\",xvals[0:5])\n # print(\"rvals[0:5]=\",rvals[0:5])\n # print(\"rvals.min, max=\",rvals.min(),rvals.max())\n\n # rwh - is this where I change size of render window?? (YES - yipeee!)\n # plt.figure(figsize=(6, 6))\n # plt.cla()\n # if (self.substrates_toggle.value):\n self.title_str += \" (\" + str(num_cells) + \" agents)\"\n # title_str = \" (\" + str(num_cells) + \" agents)\"\n # else:\n # mins= round(int(float(root.find(\".//current_time\").text))) # TODO: check units = mins\n # hrs = int(mins/60)\n # days = int(hrs/24)\n # title_str = '%dd, %dh, %dm' % (int(days),(hrs%24), mins - (hrs*60))\n # plt.title(self.title_str)\n self.ax0.set_title(self.title_str)\n\n # plt.xlim(self.xmin, self.xmax)\n # plt.ylim(self.ymin, self.ymax)\n self.ax0.set_xlim(self.xmin, self.xmax)\n self.ax0.set_ylim(self.ymin, self.ymax)\n\n # self.ax0.colorbar(collection)\n\n # plt.xlim(axes_min,axes_max)\n # plt.ylim(axes_min,axes_max)\n # plt.scatter(xvals,yvals, s=rvals*scale_radius, c=rgbs)\n\n # TODO: make figsize a function of plot_size? What about non-square plots?\n # self.fig = plt.figure(figsize=(9, 9))\n\n# axx = plt.axes([0, 0.05, 0.9, 0.9]) # left, bottom, width, height\n# axx = fig.gca()\n# print('fig.dpi=',fig.dpi) # = 72\n\n # im = ax.imshow(f.reshape(100,100), interpolation='nearest', cmap=cmap, extent=[0,20, 0,20])\n # ax.xlim(axes_min,axes_max)\n # ax.ylim(axes_min,axes_max)\n\n # convert radii to radii in pixels\n # ax1 = self.fig.gca()\n # N = len(xvals)\n # rr_pix = (ax1.transData.transform(np.vstack([rvals, rvals]).T) -\n # ax1.transData.transform(np.vstack([np.zeros(N), np.zeros(N)]).T))\n # rpix, _ = rr_pix.T\n\n # markers_size = (144. * rpix / self.fig.dpi)**2 # = (2*rpix / fig.dpi * 72)**2\n # markers_size = markers_size/4000000.\n # print('max=',markers_size.max())\n\n #rwh - temp fix - Ah, error only occurs when \"edges\" is toggled on\n if (self.show_edge):\n try:\n # plt.scatter(xvals,yvals, s=markers_size, c=rgbs, edgecolor='black', linewidth=0.5)\n self.circles(xvals,yvals, s=rvals, color=rgbs, alpha=self.alpha, edgecolor='black', linewidth=0.5)\n # cell_circles = self.circles(xvals,yvals, s=rvals, color=rgbs, edgecolor='black', linewidth=0.5)\n # plt.sci(cell_circles)\n except (ValueError):\n pass\n else:\n # plt.scatter(xvals,yvals, s=markers_size, c=rgbs)\n self.circles(xvals,yvals, s=rvals, color=rgbs, alpha=self.alpha)\n\n # im = ax.imshow(np.arange(100).reshape((10, 10))) # rwh: dummy, for future testing\n # cbar = self.fig.colorbar(substrate_plot, ax=self.ax0)\n # plt.colorbar(im, cax=cax)\n\n # x = np.linspace(0, 2*np.pi, 100)\n # y = np.sin(x**2)\n # self.i_plot.update()\n # self.ax1.plot(x, y)\n # self.plot_cell_custom_data_0(\"time\", [\"assembled_virion\"], 20)\n\n # if (self.show_tracks):\n # for key in self.trackd.keys():\n # xtracks = self.trackd[key][:,0]\n # ytracks = self.trackd[key][:,1]\n # plt.plot(xtracks[0:frame],ytracks[0:frame], linewidth=5)\n\n\n #---------------------------------------------------------------------------\n # assume \"frame\" is cell frame #, unless Cells is togggled off, then it's the substrate frame #\n # def plot_substrate(self, frame, grid):\n def plot_substrate(self, frame):\n\n # print(\"plot_substrate(): frame*self.substrate_delta_t = \",frame*self.substrate_delta_t)\n # print(\"plot_substrate(): frame*self.svg_delta_t = \",frame*self.svg_delta_t)\n # print(\"plot_substrate(): fig width: SVG+2D = \",self.figsize_width_svg + self.figsize_width_2Dplot) # 24\n # print(\"plot_substrate(): fig width: substrate+2D = \",self.figsize_width_substrate + self.figsize_width_2Dplot) # 27\n\n self.title_str = ''\n\n # Recall:\n # self.svg_delta_t = config_tab.svg_interval.value\n # self.substrate_delta_t = config_tab.mcds_interval.value\n # self.modulo = int(self.substrate_delta_t / self.svg_delta_t)\n # self.therapy_activation_time = user_params_tab.therapy_activation_time.value\n\n # print(\"plot_substrate(): pre_therapy: max svg, substrate frames = \",max_svg_frame_pre_therapy, max_substrate_frame_pre_therapy)\n\n # Assume: # .svg files >= # substrate files\n# if (self.cells_toggle.value):\n\n if self.substrates_toggle.value:\n # maybe only show 2nd plot if self.custom_data_toggle is True\n # if self.custom_data_toggle.value: # substrates and 2D plots \n if True: # substrates and 2D plots \n # self.fig, (self.ax0, self.ax1) = plt.subplots(1, 2, figsize=(30, 12))\n # self.fig, (self.ax0, self.ax1) = plt.subplots(1, 2, figsize=(self.figsize_width_substrate + self.figsize_width_2Dplot, self.figsize_height_substrate))\n # self.fig, (self.ax0, self.ax1) = plt.subplots(1, 2, figsize=(31, self.figsize_height_substrate), gridspec_kw={'width_ratios': [1.35, 1]})\n # self.fig, (self.ax0, self.ax1) = plt.subplots(1, 2, figsize=(31, 13), gridspec_kw={'width_ratios': [1.35, 1]})\n self.fig, (self.ax0, self.ax1) = plt.subplots(1, 2, figsize=(34, 15), gridspec_kw={'width_ratios': [1.5, 1]})\n # else: # substrates plot, but no 2D plot\n # print('plot sub: sub w,h= ',self.figsize_width_substrate,self.figsize_height_substrate)\n # self.fig, (self.ax0) = plt.subplots(1, 1, figsize=(self.figsize_width_substrate, self.figsize_height_substrate))\n # # self.fig, (self.ax0) = plt.subplots(1, 1, figsize=(12, 12))\n\n\n if (self.customized_output_freq and (frame > self.max_svg_frame_pre_therapy)):\n self.substrate_frame = self.max_substrate_frame_pre_therapy + (frame - self.max_svg_frame_pre_therapy)\n else:\n self.substrate_frame = int(frame / self.modulo)\n\n fname = \"output%08d_microenvironment0.mat\" % self.substrate_frame\n xml_fname = \"output%08d.xml\" % self.substrate_frame\n # fullname = output_dir_str + fname\n\n # fullname = fname\n full_fname = os.path.join(self.output_dir, fname)\n # print(\"--- plot_substrate(): full_fname=\",full_fname)\n full_xml_fname = os.path.join(self.output_dir, xml_fname)\n # self.output_dir = '.'\n\n # if not os.path.isfile(fullname):\n if not os.path.isfile(full_fname):\n print(\"Once output files are generated, click the slider.\") # No: output00000000_microenvironment0.mat\n return\n\n # tree = ET.parse(xml_fname)\n tree = ET.parse(full_xml_fname)\n xml_root = tree.getroot()\n mins = round(int(float(xml_root.find(\".//current_time\").text))) # TODO: check units = mins\n self.substrate_mins= round(int(float(xml_root.find(\".//current_time\").text))) # TODO: check units = mins\n\n hrs = int(mins/60)\n days = int(hrs/24)\n self.title_str = 'substrate: %dd, %dh, %dm' % (int(days),(hrs%24), mins - (hrs*60))\n # self.title_str = 'substrate: %dm' % (mins ) # rwh\n\n info_dict = {}\n scipy.io.loadmat(full_fname, info_dict)\n M = info_dict['multiscale_microenvironment']\n f = M[self.field_index, :] # 4=tumor cells field, 5=blood vessel density, 6=growth substrate\n\n try:\n xgrid = M[0, :].reshape(self.numy, self.numx)\n ygrid = M[1, :].reshape(self.numy, self.numx)\n except:\n print(\"substrates.py: mismatched mesh size for reshape: numx,numy=\",self.numx, self.numy)\n pass\n# xgrid = M[0, :].reshape(self.numy, self.numx)\n# ygrid = M[1, :].reshape(self.numy, self.numx)\n\n num_contours = 15\n levels = MaxNLocator(nbins=num_contours).tick_values(self.colormap_min.value, self.colormap_max.value)\n contour_ok = True\n if (self.colormap_fixed_toggle.value):\n try:\n substrate_plot = self.ax0.contourf(xgrid, ygrid, M[self.field_index, :].reshape(self.numy, self.numx), levels=levels, extend='both', cmap=self.field_colormap.value, fontsize=self.fontsize)\n except:\n contour_ok = False\n # print('got error on contourf 1.')\n else: \n try:\n substrate_plot = self.ax0.contourf(xgrid, ygrid, M[self.field_index, :].reshape(self.numy,self.numx), num_contours, cmap=self.field_colormap.value)\n except:\n contour_ok = False\n # print('got error on contourf 2.')\n\n if (contour_ok):\n self.ax0.set_title(self.title_str, fontsize=self.fontsize)\n cbar = self.fig.colorbar(substrate_plot, ax=self.ax0)\n cbar.ax.tick_params(labelsize=self.fontsize)\n\n self.ax0.set_xlim(self.xmin, self.xmax)\n self.ax0.set_ylim(self.ymin, self.ymax)\n\n # Now plot the cells (possibly on top of the substrate)\n if self.cells_toggle.value:\n if not self.substrates_toggle.value:\n # maybe only show 2nd plot if self.custom_data_toggle is True\n # self.fig, (self.ax0, self.ax1) = plt.subplots(1, 2, figsize=(self.figsize_width_svg*2, self.figsize_height_svg))\n # self.fig, (self.ax0, self.ax1) = plt.subplots(1, 2, figsize=(24, 12))\n # if self.custom_data_toggle.value: # cells (SVG) and 2D plot (no substrate)\n # if False: # cells (SVG) and 2D plot (no substrate)\n # # self.fig, (self.ax0, self.ax1) = plt.subplots(1, 2, figsize=(20, 10))\n # self.fig, (self.ax0, self.ax1) = plt.subplots(1, 2, figsize=(self.figsize_width_svg + self.figsize_width_2Dplot, self.figsize_height_svg))\n if True: # cells (SVG), but no 2D plot (and no substrate)\n # else: # cells (SVG), but no 2D plot (and no substrate)\n # print('plot svg: svg w,h= ',self.figsize_width_svg,self.figsize_height_svg)\n # self.fig, (self.ax0) = plt.subplots(1, 1, figsize=(self.figsize_width_svg, self.figsize_height_svg))\n self.fig, (self.ax0, self.ax1) = plt.subplots(1, 2, figsize=(25, self.figsize_height_substrate), gridspec_kw={'width_ratios': [1.1, 1]})\n\n self.svg_frame = frame\n # print('plot_svg with frame=',self.svg_frame)\n self.plot_svg(self.svg_frame)\n # cbar = self.fig.colorbar(substrate_plot, ax=self.ax0)\n\n if (self.custom_data_toggle.value):\n # print('custom_data_toggle.value =',self.custom_data_toggle.value )\n # self.plot_cell_custom_data(\"time\", [\"assembled_virion\"], -1)\n # print('self.substrate_frame = ',self.substrate_frame)\n self.substrate_frame = int(frame / self.modulo)\n self.plot_cell_custom_data(\"time\", [\"assembled_virion\"], self.substrate_frame)\n else:\n self.plot_empty_custom_data()\n" ]
[ [ "matplotlib.collections.PatchCollection", "matplotlib.colors.to_rgb", "numpy.fabs", "matplotlib.patches.Circle", "matplotlib.pyplot.subplots", "numpy.broadcast", "numpy.broadcast_to", "numpy.isscalar", "matplotlib.ticker.MaxNLocator", "numpy.count_nonzero", "numpy.array", "numpy.where", "numpy.empty" ] ]
zhijieW94/ETNet
[ "dd6676f38ec446703c3b71289457a53188701a43" ]
[ "data_processing/data_processing.py" ]
[ "import tensorflow as tf\nfrom scipy import misc\nimport numpy as np\nimport random\n\nclass ImageData:\n def __init__(self, img_h, img_w, channels):\n self.img_h = img_h\n self.img_w = img_w\n self.channels = channels\n\n def image_processing(self, filename):\n x = tf.read_file(filename)\n x_decode = tf.image.decode_jpeg(x, channels=self.channels)\n\n resize_h = tf.cond(tf.less(tf.shape(x_decode)[0], self.img_h), lambda: self.img_h, lambda: tf.shape(x_decode)[0])\n resize_w = tf.cond(tf.less(tf.shape(x_decode)[1], self.img_w), lambda: self.img_w, lambda: tf.shape(x_decode)[1])\n resize_shape = (resize_h, resize_w)\n\n img = tf.image.resize_images(x_decode,resize_shape)\n seed = random.randint(0, 2 ** 31 - 1)\n img = tf.random_crop(img, [self.img_h, self.img_w,self.channels], seed=seed)\n img = tf.cast(img, tf.float32) / 127.5 - 1\n return img, filename\n\n\n#save image\ndef merge(images, size):\n h, w = images.shape[1], images.shape[2]\n img = np.zeros((h * size[0] // (size[1]), w * size[1], 3))\n for idx, image in enumerate(images):\n i = idx % size[1]\n j = idx // size[1]\n img[h*j:h*(j+1), w*i:w*(i+1), :] = image\n return img\n\ndef save_images(images, size, image_path):\n return imsave(inverse_transform(images), size, image_path)\n\ndef inverse_transform(images):\n return (images+1.) / 2\n\ndef imsave(images, size, path):\n images = np.uint8(np.clip(images, 0, 1) * 255)\n images = np.clip(images, 0, 255).astype(np.uint8)\n return misc.imsave(path, merge(images, size))\n\ndef load_data_testing(image_path):\n img = misc.imread(image_path, mode='RGB')\n w, h, c = img.shape\n w = w - w%32\n h = h - h%32\n\n img = img[0:w, 0:h] #misc.imresize(img,(w,h))\n\n img = np.expand_dims(img, axis=0)\n img = img / 127.5 - 1\n return img\n\n" ]
[ [ "numpy.expand_dims", "tensorflow.read_file", "numpy.clip", "tensorflow.image.resize_images", "tensorflow.shape", "tensorflow.cast", "tensorflow.random_crop", "scipy.misc.imread", "numpy.zeros", "tensorflow.image.decode_jpeg" ] ]
vksysd/P4_INT
[ "ddf51610cbdcc1e79f493df42f87a37f320360b6" ]
[ "INT_headerstack/results/graphs.py" ]
[ "import numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd\n\nx = pd.read_csv('s3_qlength.txt',index_col=None,sep=' ')\n\nprint(x.columns)\nx.columns = ['Time','Queue Length']\n# x.columns = ['Time','Switch Latency']\nx.plot(x='Time',y='Queue Length',kind='line')\n# x.plot(x='Time',y='Switch Latency',kind='line')\nplt.ylabel('Queue Length')\n# plt.ylabel('Switch Latency( in microseconds)')\nplt.xlabel('Time(in seconds)')\nplt.show()" ]
[ [ "matplotlib.pyplot.xlabel", "pandas.read_csv", "matplotlib.pyplot.show", "matplotlib.pyplot.ylabel" ] ]
jklymak/yt
[ "c1e23d74f5288846bd14c6aea3e2b78c70f7af62" ]
[ "yt/visualization/volume_rendering/render_source.py" ]
[ "import abc\nfrom functools import wraps\n\nimport numpy as np\n\nfrom yt.config import ytcfg\nfrom yt.data_objects.image_array import ImageArray\nfrom yt.funcs import ensure_numpy_array, is_sequence, mylog\nfrom yt.geometry.grid_geometry_handler import GridIndex\nfrom yt.geometry.oct_geometry_handler import OctreeIndex\nfrom yt.utilities.amr_kdtree.api import AMRKDTree\nfrom yt.utilities.lib.bounding_volume_hierarchy import BVH\nfrom yt.utilities.lib.misc_utilities import zlines, zpoints\nfrom yt.utilities.lib.octree_raytracing import OctreeRayTracing\nfrom yt.utilities.lib.partitioned_grid import PartitionedGrid\nfrom yt.utilities.on_demand_imports import NotAModule\nfrom yt.utilities.parallel_tools.parallel_analysis_interface import (\n ParallelAnalysisInterface,\n)\nfrom yt.visualization.image_writer import apply_colormap\n\nfrom .transfer_function_helper import TransferFunctionHelper\nfrom .transfer_functions import (\n ColorTransferFunction,\n ProjectionTransferFunction,\n TransferFunction,\n)\nfrom .utils import (\n data_source_or_all,\n get_corners,\n new_interpolated_projection_sampler,\n new_mesh_sampler,\n new_projection_sampler,\n new_volume_render_sampler,\n)\nfrom .zbuffer_array import ZBuffer\n\ntry:\n from yt.utilities.lib.embree_mesh import mesh_traversal\n# Catch ValueError in case size of objects in Cython change\nexcept (ImportError, ValueError):\n mesh_traversal = NotAModule(\"pyembree\")\n ytcfg[\"yt\", \"ray_tracing_engine\"] = \"yt\"\ntry:\n from yt.utilities.lib.embree_mesh import mesh_construction\n# Catch ValueError in case size of objects in Cython change\nexcept (ImportError, ValueError):\n mesh_construction = NotAModule(\"pyembree\")\n ytcfg[\"yt\", \"ray_tracing_engine\"] = \"yt\"\n\n\ndef invalidate_volume(f):\n @wraps(f)\n def wrapper(*args, **kwargs):\n ret = f(*args, **kwargs)\n obj = args[0]\n if isinstance(obj.transfer_function, ProjectionTransferFunction):\n obj.sampler_type = \"projection\"\n obj._log_field = False\n obj._use_ghost_zones = False\n del obj.volume\n obj._volume_valid = False\n return ret\n\n return wrapper\n\n\ndef validate_volume(f):\n @wraps(f)\n def wrapper(*args, **kwargs):\n obj = args[0]\n fields = [obj.field]\n log_fields = [obj.log_field]\n if obj.weight_field is not None:\n fields.append(obj.weight_field)\n log_fields.append(obj.log_field)\n if not obj._volume_valid:\n obj.volume.set_fields(\n fields, log_fields, no_ghost=(not obj.use_ghost_zones)\n )\n obj._volume_valid = True\n return f(*args, **kwargs)\n\n return wrapper\n\n\nclass RenderSource(ParallelAnalysisInterface):\n\n \"\"\"Base Class for Render Sources.\n\n Will be inherited for volumes, streamlines, etc.\n\n \"\"\"\n\n volume_method = None\n\n def __init__(self):\n super().__init__()\n self.opaque = False\n self.zbuffer = None\n\n @abc.abstractmethod\n def render(self, camera, zbuffer=None):\n pass\n\n @abc.abstractmethod\n def _validate(self):\n pass\n\n\nclass OpaqueSource(RenderSource):\n \"\"\"A base class for opaque render sources.\n\n Will be inherited from for LineSources, BoxSources, etc.\n\n \"\"\"\n\n def __init__(self):\n super().__init__()\n self.opaque = True\n\n def set_zbuffer(self, zbuffer):\n self.zbuffer = zbuffer\n\n\ndef create_volume_source(data_source, field):\n data_source = data_source_or_all(data_source)\n ds = data_source.ds\n index_class = ds.index.__class__\n if issubclass(index_class, GridIndex):\n return KDTreeVolumeSource(data_source, field)\n elif issubclass(index_class, OctreeIndex):\n return OctreeVolumeSource(data_source, field)\n else:\n raise NotImplementedError\n\n\nclass VolumeSource(RenderSource, abc.ABC):\n \"\"\"A class for rendering data from a volumetric data source\n\n Examples of such sources include a sphere, cylinder, or the\n entire computational domain.\n\n A :class:`VolumeSource` provides the framework to decompose an arbitrary\n yt data source into bricks that can be traversed and volume rendered.\n\n Parameters\n ----------\n data_source: :class:`AMR3DData` or :class:`Dataset`, optional\n This is the source to be rendered, which can be any arbitrary yt\n data object or dataset.\n field : string\n The name of the field to be rendered.\n\n Examples\n --------\n\n The easiest way to make a VolumeSource is to use the volume_render\n function, so that the VolumeSource gets created automatically. This\n example shows how to do this and then access the resulting source:\n\n >>> import yt\n >>> ds = yt.load('IsolatedGalaxy/galaxy0030/galaxy0030')\n >>> im, sc = yt.volume_render(ds)\n >>> volume_source = sc.get_source(0)\n\n You can also create VolumeSource instances by hand and add them to Scenes.\n This example manually creates a VolumeSource, adds it to a scene, sets the\n camera, and renders an image.\n\n >>> import yt\n >>> from yt.visualization.volume_rendering.api import\\\n ... Scene, create_volume_source, Camera\n >>> ds = yt.load('IsolatedGalaxy/galaxy0030/galaxy0030')\n >>> sc = Scene()\n >>> source = create_volume_source(ds.all_data(), 'density')\n >>> sc.add_source(source)\n >>> sc.add_camera()\n >>> im = sc.render()\n\n \"\"\"\n\n _image = None\n data_source = None\n volume_method = None\n\n def __init__(self, data_source, field):\n r\"\"\"Initialize a new volumetric source for rendering.\"\"\"\n super().__init__()\n self.data_source = data_source_or_all(data_source)\n field = self.data_source._determine_fields(field)[0]\n self.current_image = None\n self.check_nans = False\n self.num_threads = 0\n self.num_samples = 10\n self.sampler_type = \"volume-render\"\n\n self._volume_valid = False\n\n # these are caches for properties, defined below\n self._volume = None\n self._transfer_function = None\n self._field = field\n self._log_field = self.data_source.ds.field_info[field].take_log\n self._use_ghost_zones = False\n self._weight_field = None\n\n self.tfh = TransferFunctionHelper(self.data_source.pf)\n self.tfh.set_field(self.field)\n\n @property\n def transfer_function(self):\n \"\"\"The transfer function associated with this VolumeSource\"\"\"\n if self._transfer_function is not None:\n return self._transfer_function\n\n if self.tfh.tf is not None:\n self._transfer_function = self.tfh.tf\n return self._transfer_function\n\n mylog.info(\"Creating transfer function\")\n self.tfh.set_field(self.field)\n self.tfh.set_log(self.log_field)\n self.tfh.build_transfer_function()\n self.tfh.setup_default()\n self._transfer_function = self.tfh.tf\n\n return self._transfer_function\n\n @transfer_function.setter\n def transfer_function(self, value):\n self.tfh.tf = None\n valid_types = (\n TransferFunction,\n ColorTransferFunction,\n ProjectionTransferFunction,\n type(None),\n )\n if not isinstance(value, valid_types):\n raise RuntimeError(\n \"transfer_function not a valid type, \"\n \"received object of type %s\" % type(value)\n )\n if isinstance(value, ProjectionTransferFunction):\n self.sampler_type = \"projection\"\n if self._volume is not None:\n fields = [self.field]\n if self.weight_field is not None:\n fields.append(self.weight_field)\n self._volume_valid = False\n self._transfer_function = value\n\n @property\n def volume(self):\n \"\"\"The abstract volume associated with this VolumeSource\n\n This object does the heavy lifting to access data in an efficient manner\n using a KDTree\n \"\"\"\n return self._get_volume()\n\n @volume.setter\n def volume(self, value):\n assert isinstance(value, AMRKDTree)\n del self._volume\n self._field = value.fields\n self._log_field = value.log_fields\n self._volume = value\n assert self._volume_valid\n\n @volume.deleter\n def volume(self):\n del self._volume\n self._volume = None\n\n @property\n def field(self):\n \"\"\"The field to be rendered\"\"\"\n return self._field\n\n @field.setter\n @invalidate_volume\n def field(self, value):\n field = self.data_source._determine_fields(value)\n if len(field) > 1:\n raise RuntimeError(\n \"VolumeSource.field can only be a single field but received \"\n \"multiple fields: %s\"\n ) % field\n field = field[0]\n if self._field != field:\n log_field = self.data_source.ds.field_info[field].take_log\n self.tfh.bounds = None\n else:\n log_field = self._log_field\n self._log_field = log_field\n self._field = value\n self.transfer_function = None\n self.tfh.set_field(value)\n self.tfh.set_log(log_field)\n\n @property\n def log_field(self):\n \"\"\"Whether or not the field rendering is computed in log space\"\"\"\n return self._log_field\n\n @log_field.setter\n @invalidate_volume\n def log_field(self, value):\n self.transfer_function = None\n self.tfh.set_log(value)\n self._log_field = value\n\n @property\n def use_ghost_zones(self):\n \"\"\"Whether or not ghost zones are used to estimate vertex-centered data\n values at grid boundaries\"\"\"\n return self._use_ghost_zones\n\n @use_ghost_zones.setter\n @invalidate_volume\n def use_ghost_zones(self, value):\n self._use_ghost_zones = value\n\n @property\n def weight_field(self):\n \"\"\"The weight field for the rendering\n\n Currently this is only used for off-axis projections.\n \"\"\"\n return self._weight_field\n\n @weight_field.setter\n @invalidate_volume\n def weight_field(self, value):\n self._weight_field = value\n\n def set_transfer_function(self, transfer_function):\n \"\"\"Set transfer function for this source\"\"\"\n self.transfer_function = transfer_function\n return self\n\n def _validate(self):\n \"\"\"Make sure that all dependencies have been met\"\"\"\n if self.data_source is None:\n raise RuntimeError(\"Data source not initialized\")\n\n def set_volume(self, volume):\n \"\"\"Associates an AMRKDTree with the VolumeSource\"\"\"\n self.volume = volume\n return self\n\n def set_field(self, field):\n \"\"\"Set the source's field to render\n\n Parameters\n ----------\n\n field: field name\n The field to render\n \"\"\"\n self.field = field\n return self\n\n def set_log(self, log_field):\n \"\"\"Set whether the rendering of the source's field is done in log space\n\n Generally volume renderings of data whose values span a large dynamic\n range should be done on log space and volume renderings of data with\n small dynamic range should be done in linear space.\n\n Parameters\n ----------\n\n log_field: boolean\n If True, the volume rendering will be done in log space, and if False\n will be done in linear space.\n \"\"\"\n self.log_field = log_field\n return self\n\n def set_weight_field(self, weight_field):\n \"\"\"Set the source's weight field\n\n .. note::\n\n This is currently only used for renderings using the\n ProjectionTransferFunction\n\n Parameters\n ----------\n\n weight_field: field name\n The weight field to use in the rendering\n \"\"\"\n self.weight_field = weight_field\n return self\n\n def set_use_ghost_zones(self, use_ghost_zones):\n \"\"\"Set whether or not interpolation at grid edges uses ghost zones\n\n Parameters\n ----------\n\n use_ghost_zones: boolean\n If True, the AMRKDTree estimates vertex centered data using ghost\n zones, which can eliminate seams in the resulting volume rendering.\n Defaults to False for performance reasons.\n\n \"\"\"\n self.use_ghost_zones = use_ghost_zones\n return self\n\n def set_sampler(self, camera, interpolated=True):\n \"\"\"Sets a volume render sampler\n\n The type of sampler is determined based on the ``sampler_type`` attribute\n of the VolumeSource. Currently the ``volume_render`` and ``projection``\n sampler types are supported.\n\n The 'interpolated' argument is only meaningful for projections. If True,\n the data is first interpolated to the cell vertices, and then\n tri-linearly interpolated to the ray sampling positions. If False, then\n the cell-centered data is simply accumulated along the\n ray. Interpolation is always performed for volume renderings.\n\n \"\"\"\n if self.sampler_type == \"volume-render\":\n sampler = new_volume_render_sampler(camera, self)\n elif self.sampler_type == \"projection\" and interpolated:\n sampler = new_interpolated_projection_sampler(camera, self)\n elif self.sampler_type == \"projection\":\n sampler = new_projection_sampler(camera, self)\n else:\n NotImplementedError(f\"{self.sampler_type} not implemented yet\")\n self.sampler = sampler\n assert self.sampler is not None\n\n @abc.abstractmethod\n def _get_volume(self):\n \"\"\"The abstract volume associated with this VolumeSource\n\n This object does the heavy lifting to access data in an efficient manner\n using a KDTree\n \"\"\"\n pass\n\n @abc.abstractmethod\n @validate_volume\n def render(self, camera, zbuffer=None):\n \"\"\"Renders an image using the provided camera\n\n Parameters\n ----------\n camera: :class:`yt.visualization.volume_rendering.camera.Camera` instance\n A volume rendering camera. Can be any type of camera.\n zbuffer: :class:`yt.visualization.volume_rendering.zbuffer_array.Zbuffer` instance # noqa: E501\n A zbuffer array. This is used for opaque sources to determine the\n z position of the source relative to other sources. Only useful if\n you are manually calling render on multiple sources. Scene.render\n uses this internally.\n\n Returns\n -------\n A :class:`yt.data_objects.image_array.ImageArray` instance containing\n the rendered image.\n\n \"\"\"\n pass\n\n def finalize_image(self, camera, image):\n \"\"\"Parallel reduce the image.\n\n Parameters\n ----------\n camera: :class:`yt.visualization.volume_rendering.camera.Camera` instance\n The camera used to produce the volume rendering image.\n image: :class:`yt.data_objects.image_array.ImageArray` instance\n A reference to an image to fill\n \"\"\"\n image.shape = camera.resolution[0], camera.resolution[1], 4\n # If the call is from VR, the image is rotated by 180 to get correct\n # up direction\n if not self.transfer_function.grey_opacity:\n image[:, :, 3] = 1\n return image\n\n def __repr__(self):\n disp = f\"<Volume Source>:{str(self.data_source)} \"\n disp += f\"transfer_function:{str(self._transfer_function)}\"\n return disp\n\n\nclass KDTreeVolumeSource(VolumeSource):\n volume_method = \"KDTree\"\n\n def _get_volume(self):\n \"\"\"The abstract volume associated with this VolumeSource\n\n This object does the heavy lifting to access data in an efficient manner\n using a KDTree\n \"\"\"\n\n if self._volume is None:\n mylog.info(\"Creating volume\")\n volume = AMRKDTree(self.data_source.ds, data_source=self.data_source)\n self._volume = volume\n\n return self._volume\n\n @validate_volume\n def render(self, camera, zbuffer=None):\n \"\"\"Renders an image using the provided camera\n\n Parameters\n ----------\n camera: :class:`yt.visualization.volume_rendering.camera.Camera`\n A volume rendering camera. Can be any type of camera.\n zbuffer: :class:`yt.visualization.volume_rendering.zbuffer_array.Zbuffer`\n A zbuffer array. This is used for opaque sources to determine the\n z position of the source relative to other sources. Only useful if\n you are manually calling render on multiple sources. Scene.render\n uses this internally.\n\n Returns\n -------\n A :class:`yt.data_objects.image_array.ImageArray` containing\n the rendered image.\n\n \"\"\"\n self.zbuffer = zbuffer\n self.set_sampler(camera)\n assert self.sampler is not None\n\n mylog.debug(\"Casting rays\")\n total_cells = 0\n if self.check_nans:\n for brick in self.volume.bricks:\n for data in brick.my_data:\n if np.any(np.isnan(data)):\n raise RuntimeError\n\n for brick in self.volume.traverse(camera.lens.viewpoint):\n mylog.debug(\"Using sampler %s\", self.sampler)\n self.sampler(brick, num_threads=self.num_threads)\n total_cells += np.prod(brick.my_data[0].shape)\n mylog.debug(\"Done casting rays\")\n self.current_image = self.finalize_image(camera, self.sampler.aimage)\n\n if zbuffer is None:\n self.zbuffer = ZBuffer(\n self.current_image, np.full(self.current_image.shape[:2], np.inf)\n )\n\n return self.current_image\n\n def finalize_image(self, camera, image):\n if self._volume is not None:\n image = self.volume.reduce_tree_images(image, camera.lens.viewpoint)\n\n return super().finalize_image(camera, image)\n\n\nclass OctreeVolumeSource(VolumeSource):\n volume_method = \"Octree\"\n\n def __init__(self, *args, **kwa):\n super().__init__(*args, **kwa)\n self.set_use_ghost_zones(True)\n\n def _get_volume(self):\n \"\"\"The abstract volume associated with this VolumeSource\n\n This object does the heavy lifting to access data in an efficient manner\n using an octree.\n \"\"\"\n\n if self._volume is None:\n mylog.info(\"Creating volume\")\n volume = OctreeRayTracing(self.data_source)\n self._volume = volume\n\n return self._volume\n\n @validate_volume\n def render(self, camera, zbuffer=None):\n \"\"\"Renders an image using the provided camera\n\n Parameters\n ----------\n camera: :class:`yt.visualization.volume_rendering.camera.Camera` instance\n A volume rendering camera. Can be any type of camera.\n zbuffer: :class:`yt.visualization.volume_rendering.zbuffer_array.Zbuffer` instance # noqa: E501\n A zbuffer array. This is used for opaque sources to determine the\n z position of the source relative to other sources. Only useful if\n you are manually calling render on multiple sources. Scene.render\n uses this internally.\n\n Returns\n -------\n A :class:`yt.data_objects.image_array.ImageArray` instance containing\n the rendered image.\n\n \"\"\"\n self.zbuffer = zbuffer\n self.set_sampler(camera)\n if self.sampler is None:\n raise RuntimeError(\n \"No sampler set. This is likely a bug as it should never happen.\"\n )\n\n data = self.data_source\n\n dx = data[\"dx\"].to(\"unitary\").value[:, None]\n xyz = np.stack([data[_].to(\"unitary\").value for _ in \"x y z\".split()], axis=-1)\n LE = xyz - dx / 2\n RE = xyz + dx / 2\n\n mylog.debug(\"Gathering data\")\n dt = np.stack(list(self.volume.data) + [*LE.T, *RE.T], axis=-1).reshape(\n 1, len(dx), 14, 1\n )\n mask = np.full(dt.shape[1:], 1, dtype=np.uint8)\n dims = np.array([1, 1, 1], dtype=\"int64\")\n pg = PartitionedGrid(0, dt, mask, LE.flatten(), RE.flatten(), dims, n_fields=1)\n\n mylog.debug(\"Casting rays\")\n self.sampler(pg, oct=self.volume.octree)\n mylog.debug(\"Done casting rays\")\n\n self.current_image = self.finalize_image(camera, self.sampler.aimage)\n\n if zbuffer is None:\n self.zbuffer = ZBuffer(\n self.current_image, np.full(self.current_image.shape[:2], np.inf)\n )\n\n return self.current_image\n\n\nclass MeshSource(OpaqueSource):\n \"\"\"A source for unstructured mesh data.\n\n This functionality requires the embree ray-tracing engine and the\n associated pyembree python bindings to be installed in order to\n function.\n\n A :class:`MeshSource` provides the framework to volume render\n unstructured mesh data.\n\n Parameters\n ----------\n data_source: :class:`AMR3DData` or :class:`Dataset`, optional\n This is the source to be rendered, which can be any arbitrary yt\n data object or dataset.\n field : string\n The name of the field to be rendered.\n\n Examples\n --------\n >>> source = MeshSource(ds, ('connect1', 'convected'))\n \"\"\"\n\n _image = None\n data_source = None\n\n def __init__(self, data_source, field):\n r\"\"\"Initialize a new unstructured mesh source for rendering.\"\"\"\n super().__init__()\n self.data_source = data_source_or_all(data_source)\n field = self.data_source._determine_fields(field)[0]\n self.field = field\n self.volume = None\n self.current_image = None\n self.engine = ytcfg.get(\"yt\", \"ray_tracing_engine\")\n\n # default color map\n self._cmap = ytcfg.get(\"yt\", \"default_colormap\")\n self._color_bounds = None\n\n # default mesh annotation options\n self._annotate_mesh = False\n self._mesh_line_color = None\n self._mesh_line_alpha = 1.0\n\n # Error checking\n assert self.field is not None\n assert self.data_source is not None\n if self.field[0] == \"all\":\n raise NotImplementedError(\n \"Mesh unions are not implemented \" \"for 3D rendering\"\n )\n\n if self.engine == \"embree\":\n self.volume = mesh_traversal.YTEmbreeScene()\n self.build_volume_embree()\n elif self.engine == \"yt\":\n self.build_volume_bvh()\n else:\n raise NotImplementedError(\n \"Invalid ray-tracing engine selected. \" \"Choices are 'embree' and 'yt'.\"\n )\n\n def cmap():\n \"\"\"\n This is the name of the colormap that will be used when rendering\n this MeshSource object. Should be a string, like 'arbre', or 'dusk'.\n\n \"\"\"\n\n def fget(self):\n return self._cmap\n\n def fset(self, cmap_name):\n self._cmap = cmap_name\n if hasattr(self, \"data\"):\n self.current_image = self.apply_colormap()\n\n return locals()\n\n cmap = property(**cmap())\n\n def color_bounds():\n \"\"\"\n These are the bounds that will be used with the colormap to the display\n the rendered image. Should be a (vmin, vmax) tuple, like (0.0, 2.0). If\n None, the bounds will be automatically inferred from the max and min of\n the rendered data.\n\n \"\"\"\n\n def fget(self):\n return self._color_bounds\n\n def fset(self, bounds):\n self._color_bounds = bounds\n if hasattr(self, \"data\"):\n self.current_image = self.apply_colormap()\n\n return locals()\n\n color_bounds = property(**color_bounds())\n\n def _validate(self):\n \"\"\"Make sure that all dependencies have been met\"\"\"\n if self.data_source is None:\n raise RuntimeError(\"Data source not initialized.\")\n\n if self.volume is None:\n raise RuntimeError(\"Volume not initialized.\")\n\n def build_volume_embree(self):\n \"\"\"\n\n This constructs the mesh that will be ray-traced by pyembree.\n\n \"\"\"\n ftype, fname = self.field\n mesh_id = int(ftype[-1]) - 1\n index = self.data_source.ds.index\n offset = index.meshes[mesh_id]._index_offset\n field_data = self.data_source[self.field].d # strip units\n\n vertices = index.meshes[mesh_id].connectivity_coords\n indices = index.meshes[mesh_id].connectivity_indices - offset\n\n # if this is an element field, promote to 2D here\n if len(field_data.shape) == 1:\n field_data = np.expand_dims(field_data, 1)\n\n # Here, we decide whether to render based on high-order or\n # low-order geometry. Right now, high-order geometry is only\n # implemented for 20-point hexes.\n if indices.shape[1] == 20 or indices.shape[1] == 10:\n self.mesh = mesh_construction.QuadraticElementMesh(\n self.volume, vertices, indices, field_data\n )\n else:\n # if this is another type of higher-order element, we demote\n # to 1st order here, for now.\n if indices.shape[1] == 27:\n # hexahedral\n mylog.warning(\"27-node hexes not yet supported, dropping to 1st order.\")\n field_data = field_data[:, 0:8]\n indices = indices[:, 0:8]\n\n self.mesh = mesh_construction.LinearElementMesh(\n self.volume, vertices, indices, field_data\n )\n\n def build_volume_bvh(self):\n \"\"\"\n\n This constructs the mesh that will be ray-traced.\n\n \"\"\"\n ftype, fname = self.field\n mesh_id = int(ftype[-1]) - 1\n index = self.data_source.ds.index\n offset = index.meshes[mesh_id]._index_offset\n field_data = self.data_source[self.field].d # strip units\n\n vertices = index.meshes[mesh_id].connectivity_coords\n indices = index.meshes[mesh_id].connectivity_indices - offset\n\n # if this is an element field, promote to 2D here\n if len(field_data.shape) == 1:\n field_data = np.expand_dims(field_data, 1)\n\n # Here, we decide whether to render based on high-order or\n # low-order geometry.\n if indices.shape[1] == 27:\n # hexahedral\n mylog.warning(\"27-node hexes not yet supported, dropping to 1st order.\")\n field_data = field_data[:, 0:8]\n indices = indices[:, 0:8]\n\n self.volume = BVH(vertices, indices, field_data)\n\n def render(self, camera, zbuffer=None):\n \"\"\"Renders an image using the provided camera\n\n Parameters\n ----------\n camera: :class:`yt.visualization.volume_rendering.camera.Camera`\n A volume rendering camera. Can be any type of camera.\n zbuffer: :class:`yt.visualization.volume_rendering.zbuffer_array.Zbuffer`\n A zbuffer array. This is used for opaque sources to determine the\n z position of the source relative to other sources. Only useful if\n you are manually calling render on multiple sources. Scene.render\n uses this internally.\n\n Returns\n -------\n A :class:`yt.data_objects.image_array.ImageArray` containing\n the rendered image.\n\n \"\"\"\n\n shape = (camera.resolution[0], camera.resolution[1], 4)\n if zbuffer is None:\n empty = np.empty(shape, dtype=\"float64\")\n z = np.empty(empty.shape[:2], dtype=\"float64\")\n empty[:] = 0.0\n z[:] = np.inf\n zbuffer = ZBuffer(empty, z)\n elif zbuffer.rgba.shape != shape:\n zbuffer = ZBuffer(zbuffer.rgba.reshape(shape), zbuffer.z.reshape(shape[:2]))\n self.zbuffer = zbuffer\n\n self.sampler = new_mesh_sampler(camera, self, engine=self.engine)\n\n mylog.debug(\"Casting rays\")\n self.sampler(self.volume)\n mylog.debug(\"Done casting rays\")\n\n self.finalize_image(camera)\n self.current_image = self.apply_colormap()\n\n zbuffer += ZBuffer(self.current_image.astype(\"float64\"), self.sampler.azbuffer)\n zbuffer.rgba = ImageArray(zbuffer.rgba)\n self.zbuffer = zbuffer\n self.current_image = self.zbuffer.rgba\n\n if self._annotate_mesh:\n self.current_image = self.annotate_mesh_lines(\n self._mesh_line_color, self._mesh_line_alpha\n )\n\n return self.current_image\n\n def finalize_image(self, camera):\n sam = self.sampler\n\n # reshape data\n Nx = camera.resolution[0]\n Ny = camera.resolution[1]\n self.data = sam.aimage[:, :, 0].reshape(Nx, Ny)\n\n def annotate_mesh_lines(self, color=None, alpha=1.0):\n r\"\"\"\n\n Modifies this MeshSource by drawing the mesh lines.\n This modifies the current image by drawing the element\n boundaries and returns the modified image.\n\n Parameters\n ----------\n color: array_like of shape (4,), optional\n The RGBA value to use to draw the mesh lines.\n Default is black.\n alpha : float, optional\n The opacity of the mesh lines. Default is 255 (solid).\n\n \"\"\"\n\n self.annotate_mesh = True\n self._mesh_line_color = color\n self._mesh_line_alpha = alpha\n\n if color is None:\n color = np.array([0, 0, 0, alpha])\n\n locs = [self.sampler.amesh_lines == 1]\n\n self.current_image[:, :, 0][locs] = color[0]\n self.current_image[:, :, 1][locs] = color[1]\n self.current_image[:, :, 2][locs] = color[2]\n self.current_image[:, :, 3][locs] = color[3]\n\n return self.current_image\n\n def apply_colormap(self):\n \"\"\"\n\n Applies a colormap to the current image without re-rendering.\n\n Parameters\n ----------\n cmap_name : string, optional\n An acceptable colormap. See either yt.visualization.color_maps or\n https://scipy-cookbook.readthedocs.io/items/Matplotlib_Show_colormaps.html .\n color_bounds : tuple of floats, optional\n The min and max to scale between. Outlying values will be clipped.\n\n Returns\n -------\n current_image : A new image with the specified color scale applied to\n the underlying data.\n\n\n \"\"\"\n\n image = (\n apply_colormap(\n self.data, color_bounds=self._color_bounds, cmap_name=self._cmap\n )\n / 255.0\n )\n alpha = image[:, :, 3]\n alpha[self.sampler.aimage_used == -1] = 0.0\n image[:, :, 3] = alpha\n return image\n\n def __repr__(self):\n disp = f\"<Mesh Source>:{str(self.data_source)} \"\n return disp\n\n\nclass PointSource(OpaqueSource):\n r\"\"\"A rendering source of opaque points in the scene.\n\n This class provides a mechanism for adding points to a scene; these\n points will be opaque, and can also be colored.\n\n Parameters\n ----------\n positions: array_like of shape (N, 3)\n The positions of points to be added to the scene. If specified with no\n units, the positions will be assumed to be in code units.\n colors : array_like of shape (N, 4), optional\n The colors of the points, including an alpha channel, in floating\n point running from 0..1.\n color_stride : int, optional\n The stride with which to access the colors when putting them on the\n scene.\n radii : array_like of shape (N), optional\n The radii of the points in the final image, in pixels (int)\n\n Examples\n --------\n\n This example creates a volume rendering and adds 1000 random points to\n the image:\n\n >>> import yt\n >>> import numpy as np\n >>> from yt.visualization.volume_rendering.api import PointSource\n >>> from yt.units import kpc\n >>> ds = yt.load('IsolatedGalaxy/galaxy0030/galaxy0030')\n\n >>> im, sc = yt.volume_render(ds)\n\n >>> npoints = 1000\n >>> vertices = np.random.random([npoints, 3]) * 1000 * kpc\n >>> colors = np.random.random([npoints, 4])\n >>> colors[:,3] = 1.0\n\n >>> points = PointSource(vertices, colors=colors)\n >>> sc.add_source(points)\n\n >>> im = sc.render()\n\n \"\"\"\n\n _image = None\n data_source = None\n\n def __init__(self, positions, colors=None, color_stride=1, radii=None):\n assert positions.ndim == 2 and positions.shape[1] == 3\n if colors is not None:\n assert colors.ndim == 2 and colors.shape[1] == 4\n assert colors.shape[0] == positions.shape[0]\n if not is_sequence(radii):\n if radii is not None: # broadcast the value\n radii = radii * np.ones(positions.shape[0], dtype=\"int64\")\n else: # default radii to 0 pixels (i.e. point is 1 pixel wide)\n radii = np.zeros(positions.shape[0], dtype=\"int64\")\n else:\n assert radii.ndim == 1\n assert radii.shape[0] == positions.shape[0]\n self.positions = positions\n # If colors aren't individually set, make black with full opacity\n if colors is None:\n colors = np.ones((len(positions), 4))\n self.colors = colors\n self.color_stride = color_stride\n self.radii = radii\n\n def render(self, camera, zbuffer=None):\n \"\"\"Renders an image using the provided camera\n\n Parameters\n ----------\n camera: :class:`yt.visualization.volume_rendering.camera.Camera`\n A volume rendering camera. Can be any type of camera.\n zbuffer: :class:`yt.visualization.volume_rendering.zbuffer_array.Zbuffer`\n A zbuffer array. This is used for opaque sources to determine the\n z position of the source relative to other sources. Only useful if\n you are manually calling render on multiple sources. Scene.render\n uses this internally.\n\n Returns\n -------\n A :class:`yt.data_objects.image_array.ImageArray` containing\n the rendered image.\n\n \"\"\"\n vertices = self.positions\n if zbuffer is None:\n empty = camera.lens.new_image(camera)\n z = np.empty(empty.shape[:2], dtype=\"float64\")\n empty[:] = 0.0\n z[:] = np.inf\n zbuffer = ZBuffer(empty, z)\n else:\n empty = zbuffer.rgba\n z = zbuffer.z\n\n # DRAW SOME POINTS\n camera.lens.setup_box_properties(camera)\n px, py, dz = camera.lens.project_to_plane(camera, vertices)\n\n zpoints(empty, z, px, py, dz, self.colors, self.radii, self.color_stride)\n\n self.zbuffer = zbuffer\n return zbuffer\n\n def __repr__(self):\n disp = \"<Point Source>\"\n return disp\n\n\nclass LineSource(OpaqueSource):\n r\"\"\"A render source for a sequence of opaque line segments.\n\n This class provides a mechanism for adding lines to a scene; these\n points will be opaque, and can also be colored.\n\n .. note::\n\n If adding a LineSource to your rendering causes the image to appear\n blank or fades a VolumeSource, try lowering the values specified in\n the alpha channel of the ``colors`` array.\n\n Parameters\n ----------\n positions: array_like of shape (N, 2, 3)\n The positions of the starting and stopping points for each line.\n For example,positions[0][0] and positions[0][1] would give the (x, y, z)\n coordinates of the beginning and end points of the first line,\n respectively. If specified with no units, assumed to be in code units.\n colors : array_like of shape (N, 4), optional\n The colors of the points, including an alpha channel, in floating\n point running from 0..1. The four channels correspond to r, g, b, and\n alpha values. Note that they correspond to the line segment succeeding\n each point; this means that strictly speaking they need only be (N-1)\n in length.\n color_stride : int, optional\n The stride with which to access the colors when putting them on the\n scene.\n\n Examples\n --------\n\n This example creates a volume rendering and then adds some random lines\n to the image:\n\n >>> import yt\n >>> import numpy as np\n >>> from yt.visualization.volume_rendering.api import LineSource\n >>> from yt.units import kpc\n >>> ds = yt.load('IsolatedGalaxy/galaxy0030/galaxy0030')\n\n >>> im, sc = yt.volume_render(ds)\n\n >>> nlines = 4\n >>> vertices = np.random.random([nlines, 2, 3]) * 600 * kpc\n >>> colors = np.random.random([nlines, 4])\n >>> colors[:,3] = 1.0\n\n >>> lines = LineSource(vertices, colors)\n >>> sc.add_source(lines)\n\n >>> im = sc.render()\n\n \"\"\"\n\n _image = None\n data_source = None\n\n def __init__(self, positions, colors=None, color_stride=1):\n super().__init__()\n\n assert positions.ndim == 3\n assert positions.shape[1] == 2\n assert positions.shape[2] == 3\n if colors is not None:\n assert colors.ndim == 2\n assert colors.shape[1] == 4\n\n # convert the positions to the shape expected by zlines, below\n N = positions.shape[0]\n self.positions = positions.reshape((2 * N, 3))\n\n # If colors aren't individually set, make black with full opacity\n if colors is None:\n colors = np.ones((len(positions), 4))\n self.colors = colors\n self.color_stride = color_stride\n\n def render(self, camera, zbuffer=None):\n \"\"\"Renders an image using the provided camera\n\n Parameters\n ----------\n camera: :class:`yt.visualization.volume_rendering.camera.Camera`\n A volume rendering camera. Can be any type of camera.\n zbuffer: :class:`yt.visualization.volume_rendering.zbuffer_array.Zbuffer`\n z position of the source relative to other sources. Only useful if\n you are manually calling render on multiple sources. Scene.render\n uses this internally.\n\n Returns\n -------\n A :class:`yt.data_objects.image_array.ImageArray` containing\n the rendered image.\n\n \"\"\"\n vertices = self.positions\n if zbuffer is None:\n empty = camera.lens.new_image(camera)\n z = np.empty(empty.shape[:2], dtype=\"float64\")\n empty[:] = 0.0\n z[:] = np.inf\n zbuffer = ZBuffer(empty, z)\n else:\n empty = zbuffer.rgba\n z = zbuffer.z\n\n # DRAW SOME LINES\n camera.lens.setup_box_properties(camera)\n px, py, dz = camera.lens.project_to_plane(camera, vertices)\n\n px = px.astype(\"int64\")\n py = py.astype(\"int64\")\n\n if len(px.shape) == 1:\n zlines(\n empty, z, px, py, dz, self.colors.astype(\"float64\"), self.color_stride\n )\n else:\n # For stereo-lens, two sets of pos for each eye are contained\n # in px...pz\n zlines(\n empty,\n z,\n px[0, :],\n py[0, :],\n dz[0, :],\n self.colors.astype(\"float64\"),\n self.color_stride,\n )\n zlines(\n empty,\n z,\n px[1, :],\n py[1, :],\n dz[1, :],\n self.colors.astype(\"float64\"),\n self.color_stride,\n )\n\n self.zbuffer = zbuffer\n return zbuffer\n\n def __repr__(self):\n disp = \"<Line Source>\"\n return disp\n\n\nclass BoxSource(LineSource):\n r\"\"\"A render source for a box drawn with line segments.\n This render source will draw a box, with transparent faces, in data\n space coordinates. This is useful for annotations.\n\n Parameters\n ----------\n left_edge: array-like of shape (3,), float\n The left edge coordinates of the box.\n right_edge : array-like of shape (3,), float\n The right edge coordinates of the box.\n color : array-like of shape (4,), float, optional\n The colors (including alpha) to use for the lines.\n Default is black with an alpha of 1.0.\n\n Examples\n --------\n\n This example shows how to use BoxSource to add an outline of the\n domain boundaries to a volume rendering.\n\n >>> import yt\n >>> from yt.visualization.volume_rendering.api import BoxSource\n >>> ds = yt.load('IsolatedGalaxy/galaxy0030/galaxy0030')\n >>>\n >>> im, sc = yt.volume_render(ds)\n >>>\n >>> box_source = BoxSource(ds.domain_left_edge,\n ... ds.domain_right_edge,\n ... [1.0, 1.0, 1.0, 1.0])\n >>> sc.add_source(box_source)\n >>>\n >>> im = sc.render()\n\n \"\"\"\n\n def __init__(self, left_edge, right_edge, color=None):\n\n assert left_edge.shape == (3,)\n assert right_edge.shape == (3,)\n\n if color is None:\n color = np.array([1.0, 1.0, 1.0, 1.0])\n\n color = ensure_numpy_array(color)\n color.shape = (1, 4)\n corners = get_corners(left_edge.copy(), right_edge.copy())\n order = [0, 1, 1, 2, 2, 3, 3, 0]\n order += [4, 5, 5, 6, 6, 7, 7, 4]\n order += [0, 4, 1, 5, 2, 6, 3, 7]\n vertices = np.empty([24, 3])\n for i in range(3):\n vertices[:, i] = corners[order, i, ...].ravel(order=\"F\")\n vertices = vertices.reshape((12, 2, 3))\n\n super().__init__(vertices, color, color_stride=24)\n\n\nclass GridSource(LineSource):\n r\"\"\"A render source for drawing grids in a scene.\n\n This render source will draw blocks that are within a given data\n source, by default coloring them by their level of resolution.\n\n Parameters\n ----------\n data_source: :class:`~yt.data_objects.api.DataContainer`\n The data container that will be used to identify grids to draw.\n alpha : float\n The opacity of the grids to draw.\n cmap : color map name\n The color map to use to map resolution levels to color.\n min_level : int, optional\n Minimum level to draw\n max_level : int, optional\n Maximum level to draw\n\n Examples\n --------\n\n This example makes a volume rendering and adds outlines of all the\n AMR grids in the simulation:\n\n >>> import yt\n >>> from yt.visualization.volume_rendering.api import GridSource\n >>> ds = yt.load('IsolatedGalaxy/galaxy0030/galaxy0030')\n >>>\n >>> im, sc = yt.volume_render(ds)\n >>>\n >>> grid_source = GridSource(ds.all_data(), alpha=1.0)\n >>>\n >>> sc.add_source(grid_source)\n >>>\n >>> im = sc.render()\n\n This example does the same thing, except it only draws the grids\n that are inside a sphere of radius (0.1, \"unitary\") located at the\n domain center:\n\n >>> import yt\n >>> from yt.visualization.volume_rendering.api import GridSource\n >>> ds = yt.load('IsolatedGalaxy/galaxy0030/galaxy0030')\n >>>\n >>> im, sc = yt.volume_render(ds)\n >>>\n >>> dd = ds.sphere(\"c\", (0.1, \"unitary\"))\n >>> grid_source = GridSource(dd, alpha=1.0)\n >>>\n >>> sc.add_source(grid_source)\n >>>\n >>> im = sc.render()\n\n \"\"\"\n\n def __init__(\n self, data_source, alpha=0.3, cmap=None, min_level=None, max_level=None\n ):\n self.data_source = data_source_or_all(data_source)\n corners = []\n levels = []\n for block, _mask in self.data_source.blocks:\n block_corners = np.array(\n [\n [block.LeftEdge[0], block.LeftEdge[1], block.LeftEdge[2]],\n [block.RightEdge[0], block.LeftEdge[1], block.LeftEdge[2]],\n [block.RightEdge[0], block.RightEdge[1], block.LeftEdge[2]],\n [block.LeftEdge[0], block.RightEdge[1], block.LeftEdge[2]],\n [block.LeftEdge[0], block.LeftEdge[1], block.RightEdge[2]],\n [block.RightEdge[0], block.LeftEdge[1], block.RightEdge[2]],\n [block.RightEdge[0], block.RightEdge[1], block.RightEdge[2]],\n [block.LeftEdge[0], block.RightEdge[1], block.RightEdge[2]],\n ],\n dtype=\"float64\",\n )\n corners.append(block_corners)\n levels.append(block.Level)\n corners = np.dstack(corners)\n levels = np.array(levels)\n if cmap is None:\n cmap = ytcfg.get(\"yt\", \"default_colormap\")\n\n if max_level is not None:\n subset = levels <= max_level\n levels = levels[subset]\n corners = corners[:, :, subset]\n if min_level is not None:\n subset = levels >= min_level\n levels = levels[subset]\n corners = corners[:, :, subset]\n\n colors = (\n apply_colormap(\n levels * 1.0,\n color_bounds=[0, self.data_source.ds.index.max_level],\n cmap_name=cmap,\n )[0, :, :]\n / 255.0\n )\n colors[:, 3] = alpha\n\n order = [0, 1, 1, 2, 2, 3, 3, 0]\n order += [4, 5, 5, 6, 6, 7, 7, 4]\n order += [0, 4, 1, 5, 2, 6, 3, 7]\n\n vertices = np.empty([corners.shape[2] * 2 * 12, 3])\n for i in range(3):\n vertices[:, i] = corners[order, i, ...].ravel(order=\"F\")\n vertices = vertices.reshape((corners.shape[2] * 12, 2, 3))\n\n super().__init__(vertices, colors, color_stride=24)\n\n\nclass CoordinateVectorSource(OpaqueSource):\n r\"\"\"Draw coordinate vectors on the scene.\n\n This will draw a set of coordinate vectors on the camera image. They\n will appear in the lower right of the image.\n\n Parameters\n ----------\n colors: array-like of shape (3,4), optional\n The RGBA values to use to draw the x, y, and z vectors. The default is\n [[1, 0, 0, alpha], [0, 1, 0, alpha], [0, 0, 1, alpha]] where ``alpha``\n is set by the parameter below. If ``colors`` is set then ``alpha`` is\n ignored.\n alpha : float, optional\n The opacity of the vectors.\n\n Examples\n --------\n\n >>> import yt\n >>> from yt.visualization.volume_rendering.api import CoordinateVectorSource\n >>> ds = yt.load('IsolatedGalaxy/galaxy0030/galaxy0030')\n >>>\n >>> im, sc = yt.volume_render(ds)\n >>>\n >>> coord_source = CoordinateVectorSource()\n >>>\n >>> sc.add_source(coord_source)\n >>>\n >>> im = sc.render()\n\n \"\"\"\n\n def __init__(self, colors=None, alpha=1.0):\n super().__init__()\n # If colors aren't individually set, make black with full opacity\n if colors is None:\n colors = np.zeros((3, 4))\n colors[0, 0] = 1.0 # x is red\n colors[1, 1] = 1.0 # y is green\n colors[2, 2] = 1.0 # z is blue\n colors[:, 3] = alpha\n self.colors = colors\n\n def render(self, camera, zbuffer=None):\n \"\"\"Renders an image using the provided camera\n\n Parameters\n ----------\n camera: :class:`yt.visualization.volume_rendering.camera.Camera`\n A volume rendering camera. Can be any type of camera.\n zbuffer: :class:`yt.visualization.volume_rendering.zbuffer_array.Zbuffer`\n A zbuffer array. This is used for opaque sources to determine the\n z position of the source relative to other sources. Only useful if\n you are manually calling render on multiple sources. Scene.render\n uses this internally.\n\n Returns\n -------\n A :class:`yt.data_objects.image_array.ImageArray` containing\n the rendered image.\n\n \"\"\"\n camera.lens.setup_box_properties(camera)\n center = camera.focus\n # Get positions at the focus\n positions = np.zeros([6, 3])\n positions[:] = center\n\n # Create vectors in the x,y,z directions\n for i in range(3):\n positions[2 * i + 1, i] += camera.width.in_units(\"code_length\").d[i] / 16.0\n\n # Project to the image plane\n px, py, dz = camera.lens.project_to_plane(camera, positions)\n\n if len(px.shape) == 1:\n dpx = px[1::2] - px[::2]\n dpy = py[1::2] - py[::2]\n\n # Set the center of the coordinates to be in the lower left of the image\n lpx = camera.resolution[0] / 8\n lpy = camera.resolution[1] - camera.resolution[1] / 8 # Upside-downsies\n\n # Offset the pixels according to the projections above\n px[::2] = lpx\n px[1::2] = lpx + dpx\n py[::2] = lpy\n py[1::2] = lpy + dpy\n dz[:] = 0.0\n else:\n # For stereo-lens, two sets of pos for each eye are contained in px...pz\n dpx = px[:, 1::2] - px[:, ::2]\n dpy = py[:, 1::2] - py[:, ::2]\n\n lpx = camera.resolution[0] / 16\n lpy = camera.resolution[1] - camera.resolution[1] / 8 # Upside-downsies\n\n # Offset the pixels according to the projections above\n px[:, ::2] = lpx\n px[:, 1::2] = lpx + dpx\n px[1, :] += camera.resolution[0] / 2\n py[:, ::2] = lpy\n py[:, 1::2] = lpy + dpy\n dz[:, :] = 0.0\n\n # Create a zbuffer if needed\n if zbuffer is None:\n empty = camera.lens.new_image(camera)\n z = np.empty(empty.shape[:2], dtype=\"float64\")\n empty[:] = 0.0\n z[:] = np.inf\n zbuffer = ZBuffer(empty, z)\n else:\n empty = zbuffer.rgba\n z = zbuffer.z\n\n # Draw the vectors\n\n px = px.astype(\"int64\")\n py = py.astype(\"int64\")\n\n if len(px.shape) == 1:\n zlines(empty, z, px, py, dz, self.colors.astype(\"float64\"))\n else:\n # For stereo-lens, two sets of pos for each eye are contained\n # in px...pz\n zlines(\n empty, z, px[0, :], py[0, :], dz[0, :], self.colors.astype(\"float64\")\n )\n zlines(\n empty, z, px[1, :], py[1, :], dz[1, :], self.colors.astype(\"float64\")\n )\n\n # Set the new zbuffer\n self.zbuffer = zbuffer\n return zbuffer\n\n def __repr__(self):\n disp = \"<Coordinates Source>\"\n return disp\n" ]
[ [ "numpy.expand_dims", "numpy.isnan", "numpy.dstack", "numpy.full", "numpy.ones", "numpy.prod", "numpy.array", "numpy.zeros", "numpy.empty" ] ]
LCAV/lippmann-photography
[ "d2f1c0bb2817d762f813ca5b4de12a4f02ee3069", "d2f1c0bb2817d762f813ca5b4de12a4f02ee3069" ]
[ "test_cosine_transforms.py", "test_perspective.py" ]
[ "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Mar 20 15:18:17 2018\n\n@author: gbaechle\n\"\"\"\nimport numpy as np\nimport scipy as sp\n\nimport matplotlib.pyplot as plt\n\nc = 299792458\n#c = 2\n\ndef cosine_transform(x, y, w, inv=False, theta=0):\n \n cosines = np.cos(x[None, :] * w[:, None]/c + theta)\n if inv:\n return 2/np.pi*np.trapz(cosines * y[None, :], x, axis=1)\n else:\n return np.trapz(cosines * y[None, :], x, axis=1)\n \ndef inverse_cosine(x, y, w, theta=0):\n \n nu = 2*np.mod(-theta, 2*np.pi)/np.pi\n integrand = (w[:,None]*x[None,:]/c)**nu * \\\n (sp.special.hyp1f1(1,1+nu, 1j*w[:,None]*x[None,:]/c) + \\\n sp.special.hyp1f1(1,1+nu, -1j*w[:,None]*x[None,:]/c))\n \n return 1/(c*np.pi*sp.special.gamma(nu+1)) * np.trapz(integrand * y[:, None], w, axis=0)\n \n \ndef fourier_transform(x, y, w, inv=False, theta=0):\n\n if inv:\n exps = np.exp(1j*( x[None, :] * w[:, None] + theta ))\n return 2/np.pi*np.trapz(exps * y[None, :], x, axis=1)\n else:\n exps = np.exp(-1j*( x[None, :] * w[:, None] + theta ))\n return np.trapz(exps * y[None, :], x, axis=1)\n \n \ndef sine_transform(x, y, w, inv=False, theta=0):\n \n sines = np.sin(x[None, :] * w[:, None] + theta)\n if inv:\n return 2/np.pi*np.trapz(sines * y[None, :], x, axis=1)\n else:\n return np.trapz(sines * y[None, :], x, axis=1)\n \n\n\ndef lippmann_transform(x, y, w, theta=0):\n\n cosines = np.cos(x[None, :] * w[:, None] - theta)\n return np.trapz(cosines * y[None, :], w, axis=1)\n \n\ndef plt_complex(x,y,ax=None):\n \n if ax is None:\n plt.figure()\n ax = plt.gca()\n \n ax.plot(x,np.real(y))\n ax.plot(x,np.imag(y), ':')\n\n\nif __name__ == '__main__':\n \n plt.close('all')\n \n N = 1000\n theta = np.pi/3\n \n x = np.linspace(0,N,N)\n w = np.linspace(0,c/10,N)\n x_sym = np.linspace(-N,N,2*N-1)\n w_sym = np.linspace(-1/10, 1/10, 2*N-1)\n \n y = sp.stats.norm(loc=N/3, scale=50).pdf(x)\n y_e = sp.stats.norm(loc=N/3, scale=50).pdf(x_sym) + sp.stats.norm(loc=-N/3, scale=50).pdf(x_sym)\n y_o = sp.stats.norm(loc=N/3, scale=50).pdf(x_sym) - sp.stats.norm(loc=-N/3, scale=50).pdf(x_sym)\n \n y_f = np.cos(theta)*y_e + np.sin(theta)/1j*y_o\n \n z = cosine_transform(x, y, w, theta=theta)\n z_s = np.gradient(z, w[1])\n# z_s = sp.misc.derivative(z, w[1])\n z_f = z -1j*z_s\n# z_f[w==0] = z[0] # correct for w = 0\n \n \n y_magic = inverse_cosine(x, z, w, theta=theta)\n plt.figure(); plt.plot(x,y); plt.plot(x, y_magic, 'r:')\n \n plt.figure(); plt.plot(w,z); plt.plot(w,sine_transform(x, y, w, theta=theta), 'r:')\n plt.figure(); plt.plot(w,np.real(z_f)); plt.plot(w,np.imag(z_f), 'r:')\n z_f *= np.exp(-1j*theta)\n\n\n z_sym = np.r_[np.conj(z[:0:-1]), z]\n y2 = cosine_transform(w, z, x, inv=True, theta=theta)\n \n zf = fourier_transform(x_sym, y_sym, w_sym, theta=theta)\n y3 = fourier_transform(w_sym, z_sym, x_sym, inv=True, theta=0)\n z2 = np.cos(theta)*cosine_transform(x, y, w) + np.sin(theta)/1j*sine_transform(x, y, w)\n z3 = fourier_transform(x_sym, y_f, w_sym)\n \n plt.figure(); plt.plot(w,np.real(z_f)); plt.plot(w,np.imag(z_f), 'r:')\n plt.figure(); plt.plot(w_sym,zf); plt.plot(w_sym,np.imag(zf), 'r:')\n \n plt.figure(); plt.plot(x,y); plt.plot(x,y2, 'r:');# plt.plot(x_sym,y3/(np.cos(theta)+1/1j*np.sin(theta)), 'g--')\n plt.figure(); plt.plot(x,y); plt.plot(x_sym, y_f, 'r:'); plt.plot(x_sym, np.imag(y_f), 'g--')\n \n plt.figure(); plt.plot(w_sym,z_sym); plt.plot(w_sym,zf/2, 'r:'); plt.plot(w_sym, z3/2, 'g--')\n \n plt_complex(w_sym, z3/2)\n plt_complex(w_sym, zf/2)\n \n \n \n \n ", "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Jul 19 16:26:21 2016\n\n@author: gbaechle\n\"\"\"\n\nimport numpy as np\nimport seaborn as sns\nfrom scipy import misc, io\nimport matplotlib.pyplot as plt\nimport copy\nimport color_tools as ct\n\nsns.set_palette(\"Blues_r\")\nsns.set_style(\"whitegrid\")\n\nsns.set_context(\"notebook\", font_scale=1.0, rc={\"lines.linewidth\": 3.0})\n\nfrom tools import *\nfrom gui_manager import GuiManager\n\nplt.close(\"all\")\n\n\npath_CAVE = '/Users/gbaechle/EPFL/PhD/BRDF Data and Rendering Engines/CAVE - multispectral image database/fake_and_real_strawberries_ms/fake_and_real_strawberries_ms'\npath_Suwannee = '/Users/gbaechle/EPFL/PhD/BRDF Data and Rendering Engines/Multispectral image databases/Gulf_Wetlands_Sample_Rad/Suwannee_0609-1331_rad_small.mat'\npath_PURDUE = '/Users/gbaechle/EPFL/PhD/BRDF Data and Rendering Engines/Purdue - DC'\n\npath = 'images/final_small.jpg'\npath = 'images/final_raw_small.jpg'\nsave_images_path = 'frames_DC/'\nn_frames = 150\nn_samples = 34\n\nDIFFUSE = False\nalpha = 10 #angle of the prism (in DEGREES)\nn1 = 1.45 #refraction index of glass\nn2 = 1.0002 #refraction index of air\n\n\n \n#lippmann_plate = create_multispectral_image_discrete(path, n_samples)\n\n#lippmann_plate = load_multispectral_image_CAVE(path_CAVE)\n#lippmann_plate = load_multispectral_image_Suwannee(path_Suwannee)\nlippmann_plate = load_multispectral_image_PURDUE(path_PURDUE)\n\nlippmann_plate = lippmann_plate.to_uniform_freq(n_samples)\n\nlippmann_plate.compute_new_spectrum()\n\n#im = misc.imread(path).astype(float)/255.0\n\n#angles = np.linspace(0, theta, n_frames)\n\n\nr = 10 \nz_max = 10-7.07\nz_max = r\nz = r - np.arange(n_frames)/(n_frames-1)*z_max\nxplusy = np.sqrt( (r**2 - z**2) )\nangles = np.pi/2.-np.arctan(z/xplusy)\n\ntheta_i = from_viewing_angle_to_theta_i(-angles, np.deg2rad(alpha), n1, n2, deg=False)\n\nshape = lippmann_plate.spectrums.intensities.shape\nimages = np.zeros((shape[0], shape[1], 3, n_frames))\n\n\n\nfor idx, angle in enumerate( theta_i ):\n \n print(idx)\n \n #create a copy of the plate\n lippmann_copy = copy.deepcopy(lippmann_plate)\n \n #shift of the spectrum towards the blues\n #lippmann_copy.spectrums.blue_shift(angle)\n if not DIFFUSE:\n lippmann_copy.spectrums.blue_shift(1./np.cos(angle) )\n else:\n lippmann_copy.spectrums.blue_shift(0.5 + 0.5/np.cos(angle) )\n\n lippmann_copy.spectrums.rgb_colors = None\n lippmann_copy.spectrums.xyz_colors = None\n im2 = lippmann_copy.spectrums.compute_rgb(sqrt=False)\n\n# im2 = image_perspective_transform(im2, angle=theta_i)\n# images[:,:,:,idx] = im2*np.cos(angle)\n images[:,:,:,idx] = im2\n \n #gamma correction\n# im2 = im2**2.2\n\n plt.imsave(save_images_path + '%.3d' %idx + '.png', im2)\n\n\nimage_diffuse = np.mean(images, axis=3)\nplt.figure()\nplt.imshow(image_diffuse)\n\nlippmann_plate.spectrums.compute_rgb()\n\nwavelengths = lippmann_plate.spectrums.wave_lengths\nnew_spectrums = ct.reconstruct_spectrum_from_rgb_shifts(images, theta_i, wavelengths)\nlippmann_plate.new_spectrums = new_spectrums\n\nlippmann_plate.new_spectrums.compute_rgb(sqrt=False)\n\n#show both spectrums\ngui_manager = GuiManager(lippmann_plate, normalize_spectrums=False, gamma_correct=False)\ngui_manager.show()\n\n\nif DIFFUSE:\n plt.imsave(save_images_path + 'diffuse.png', image_diffuse**2.2)\n\n\n\n\n" ]
[ [ "matplotlib.pyplot.gca", "scipy.special.hyp1f1", "numpy.imag", "scipy.special.gamma", "numpy.conj", "numpy.linspace", "numpy.gradient", "numpy.cos", "numpy.sin", "matplotlib.pyplot.plot", "scipy.stats.norm", "numpy.real", "matplotlib.pyplot.close", "numpy.mod", "numpy.exp", "numpy.trapz", "matplotlib.pyplot.figure" ], [ "matplotlib.pyplot.imsave", "matplotlib.pyplot.imshow", "numpy.sqrt", "numpy.arctan", "numpy.arange", "numpy.cos", "numpy.deg2rad", "numpy.mean", "matplotlib.pyplot.close", "numpy.zeros", "matplotlib.pyplot.figure" ] ]
luizgfalqueto/algNum1
[ "e8c28656bf2abdd18b4abe42a6668dbf188f3f7d" ]
[ "ajusteCurvas.py" ]
[ "from sympy import Symbol\nimport matplotlib.pyplot as plt\n\n\ndef geraFuncao(a, b):\n func = 0\n var = Symbol('x') # Simbolo para montar o polinomio interpolador\n\n return a + b * var\n\n\ndef calculaReta(a, b, x):\n Y = a + b * x\n\n return Y\n\n\ndef tabelaQuadradosMinimos(x, y):\n x2 = 0\n xy = 0\n xi = 0\n yi = 0\n for i in range(len(x)):\n x2 += x[i] ** 2\n xy += x[i] * y[i]\n xi += x[i]\n yi += y[i]\n\n b = ((xi * yi) - (len(x) * xy)) / (xi ** 2 - (len(x) * x2))\n a = (yi - (b * xi)) / len(x)\n\n return xi, yi, xy, x2, a, b\n\n\ndef calculaDesvio(x, y, a, b):\n D = 0\n for i in range(len(x)):\n D += (y[i] - (a + b * x[i])) ** 2\n\n return D\n\n\ndef calculaAjuste(x, y, a, b):\n num = 0\n den1 = 0\n den2 = 0\n for i in range(len(x)):\n num += (y[i] - a - b * x[i]) ** 2\n den1 += y[i] ** 2\n den2 += y[i]\n\n return 1 - num / (den1 - (1 / len(x)) * den2 ** 2)\n\n\ndef mostraGrafico(x, y, a, b, funcao, val):\n vetorXRetaAjuste = [61, 81]\n vetorYRetaAjuste = [calculaReta(a, b, 61), calculaReta(a, b, 81)]\n\n if val != 'null':\n pontoY = round(a + b * val, 2)\n plt.scatter(val, pontoY, color='yellow')\n plt.text(val, pontoY, f\"P({val},{pontoY})\", fontsize=8, horizontalalignment='right')\n\n plt.scatter(x[0], y[0], color='yellow')\n plt.scatter(x[1], y[1], color='yellow')\n plt.scatter(x[2], y[2], color='yellow')\n plt.scatter(x[3], y[3], color='yellow')\n plt.scatter(x[4], y[4], color='yellow')\n # plt.plot(x, y, marker='o', label='Curva real')\n plt.plot(vetorXRetaAjuste, vetorYRetaAjuste, label=f'f(x) = {funcao}')\n plt.title('Ajuste de Curvas')\n plt.xlabel('eixo X')\n plt.ylabel('eixo Y')\n plt.grid(True)\n plt.legend(loc=0)\n\n plt.show()\n\n\ndef main():\n # x = [0.3, 2.7, 4.5, 5.9, 7.8]\n # y = [1.8, 1.9, 3.1, 3.9, 3.3]\n\n x = [0.5, 1.2, 2.1, 3.5, 5.4]\n y = [5.1, 3.2, 2.8, 1.0, 0.4]\n\n xi, yi, xy, x2, a, b = tabelaQuadradosMinimos(x, y)\n\n funcao = geraFuncao(round(a, 4), round(b, 4))\n\n resp = float(input('Deseja avaliar um ponto da reta? (1 para SIM) ou (0 para NÃO): '))\n\n if resp == 1:\n val = float(input('Informe o valor que deseja calcular: '))\n else:\n val = 'null'\n\n mostraGrafico(x, y, a, b, funcao, val)\n\n a = 4.3271\n b = -0.7272\n\n D = calculaDesvio(x, y, a, b)\n\n r2 = calculaAjuste(x, y, a, b)\n\n print(f'a = {a}')\n print(f'b = {b}')\n print(f'\\nCurva: {funcao}')\n print(f'Valor do desvio = {D}')\n print(f'Qualidade do ajuste = {r2}')\n\n\nif __name__ == '__main__':\n main()\n" ]
[ [ "matplotlib.pyplot.legend", "matplotlib.pyplot.title", "matplotlib.pyplot.scatter", "matplotlib.pyplot.plot", "matplotlib.pyplot.grid", "matplotlib.pyplot.xlabel", "matplotlib.pyplot.text", "matplotlib.pyplot.show", "matplotlib.pyplot.ylabel" ] ]
Alanthink/banditpylib
[ "d455424ed74be1850ee3969b7b31f08d49339005", "d455424ed74be1850ee3969b7b31f08d49339005" ]
[ "banditpylib/learners/mab_learner/softmax.py", "banditpylib/learners/mab_learner/ucb_test.py" ]
[ "from typing import Optional\n\nimport math\n\nimport numpy as np\n\nfrom banditpylib.arms import PseudoArm\nfrom banditpylib.data_pb2 import Context, Actions, Feedback\nfrom .utils import MABLearner\n\n\nclass Softmax(MABLearner):\n r\"\"\"Softmax policy\n\n At time :math:`t`, sample arm :math:`i` to play with sampling weight\n\n .. math::\n \\exp\\left( \\bar{\\mu}_i(t) / \\gamma \\right)\n\n where :math:`\\gamma` is a parameter to control how much exploration we want.\n\n :param int arm_num: number of arms\n :param float gamma: gamma\n :param Optional[str] name: alias name\n\n .. note::\n When :math:`\\gamma` approaches 0, the learner will have an increasing\n probability to select the arm with the maximum empirical mean rewards. When\n :math:`\\gamma` approaches to infinity, the policy of the learner tends to\n become uniform sampling.\n \"\"\"\n def __init__(self,\n arm_num: int,\n gamma: float = 1.0,\n name: Optional[str] = None):\n super().__init__(arm_num=arm_num, name=name)\n if gamma <= 0:\n raise ValueError('Gamma is expected greater than 0. Got %.2f.' % gamma)\n self.__gamma = gamma\n\n def _name(self) -> str:\n return 'softmax'\n\n def reset(self):\n self.__pseudo_arms = [PseudoArm() for arm_id in range(self.arm_num)]\n # Current time step\n self.__time = 1\n\n def actions(self, context: Context) -> Actions:\n del context\n\n actions = Actions()\n arm_pull = actions.arm_pulls.add()\n\n if self.__time <= self.arm_num:\n arm_pull.arm.id = self.__time - 1\n else:\n weights = np.array([\n math.exp(self.__pseudo_arms[arm_id].em_mean / self.__gamma)\n for arm_id in range(self.arm_num)\n ])\n arm_pull.arm.id = np.random.choice(\n self.arm_num, 1, p=[weight / sum(weights) for weight in weights])[0]\n\n arm_pull.times = 1\n return actions\n\n def update(self, feedback: Feedback):\n arm_feedback = feedback.arm_feedbacks[0]\n self.__pseudo_arms[arm_feedback.arm.id].update(\n np.array(arm_feedback.rewards))\n self.__time += 1\n", "from unittest.mock import MagicMock\n\nimport google.protobuf.text_format as text_format\n\nimport numpy as np\n\nfrom banditpylib.data_pb2 import Context, Actions, Feedback\nfrom .ucb import UCB\n\n\nclass TestUCB:\n \"\"\"Test UCB policy\"\"\"\n def test_simple_run(self):\n arm_num = 5\n horizon = 10\n learner = UCB(arm_num=arm_num)\n learner.reset()\n mock_ucb = np.array([1.2, 1, 1, 1, 1])\n # pylint: disable=protected-access\n learner._UCB__UCB = MagicMock(return_value=mock_ucb)\n\n # During the initial time steps, each arm is pulled once\n for time in range(1, arm_num + 1):\n assert learner.actions(\n Context()).SerializeToString() == text_format.Parse(\n \"\"\"\n arm_pulls <\n arm <\n id: {arm_id}\n >\n times: 1\n >\n \"\"\".format(arm_id=time - 1), Actions()).SerializeToString()\n learner.update(\n text_format.Parse(\n \"\"\"\n arm_feedbacks <\n arm <\n id: {arm_id}\n >\n rewards: 0\n >\n \"\"\".format(arm_id=time - 1), Feedback()))\n # For the left time steps, arm 0 is always the choice\n for _ in range(arm_num + 1, horizon + 1):\n assert learner.actions(\n Context()).SerializeToString() == text_format.Parse(\n \"\"\"\n arm_pulls <\n arm <\n id: 0\n >\n times: 1\n >\n \"\"\", Actions()).SerializeToString()\n learner.update(\n text_format.Parse(\n \"\"\"\n arm_feedbacks <\n arm <\n id: 0\n >\n rewards: 0\n >\n \"\"\", Feedback()))\n" ]
[ [ "numpy.array" ], [ "numpy.array" ] ]
nespinoza/mirage
[ "b5ab8f8c6a1e02bb8402aff6f4aedc62f1dabbbc", "b5ab8f8c6a1e02bb8402aff6f4aedc62f1dabbbc" ]
[ "mirage/catalogs/utils.py", "mirage/utils/siaf_interface.py" ]
[ "#! /usr/bin/env python\n\n\"\"\"This module contains utility functions related to source catalogs\n\"\"\"\nimport os\n\nfrom astropy.io import ascii\nimport numpy as np\nfrom mirage.utils.constants import IMAGING_ALLOWED_CATALOGS, WFSS_ALLOWED_CATALOGS, \\\n TS_IMAGING_ALLOWED_CATALOGS, TS_GRISM_ALLOWED_CATALOGS\n\n\ndef catalog_index_check(catalogs):\n \"\"\"Check to see if there are any overlaps in the index\n values of the provided catalogs. Search only by looking\n at the minimum and maximum index values, rather than\n searching through all the individual values.\n\n Parameters\n ----------\n catalogs : list\n List of catalog filenames\n\n Returns\n -------\n overlaps : bool\n True if there are overlapping index values\n \"\"\"\n min_indexes = []\n max_indexes = []\n for catalog in catalogs:\n cat = ascii.read(catalog)\n if 'index' in cat.colnames:\n # Collect the min and max index values from each catalog\n min_val = np.min(cat['index'])\n min_indexes.append(min_val)\n max_indexes.append(np.max(cat['index']))\n if min_val == 0:\n raise ValueError((\"{} has a source with an index value of 0. This is not allowed. \"\n \"Zero is reserved for pixels in the segmentation map with no source flux.\".format(catalog)))\n if min_val < 0:\n raise ValueError(\"{} has sources with negative index numbers. This is not allowed.\".format(catalog))\n\n else:\n raise ValueError(\"{} does not have an 'index' column. Cannot compare to other catalogs.\".format(catalog))\n\n # Reorder the min_indexes to be monotonically increasing.\n # Apply the same sorting to the max_indexes and the catalog list\n min_indexes = np.array(min_indexes)\n max_indexes = np.array(max_indexes)\n sorter = np.argsort(min_indexes)\n min_indexes = min_indexes[sorter]\n max_indexes = max_indexes[sorter]\n\n # Create an array of min and max indexes and make sure the list is\n # monotonically increasing\n indexes = [[mn, mx] for mn, mx in zip(min_indexes, max_indexes)]\n indexes = np.array([item for sublist in indexes for item in sublist])\n\n # Check that each element of the array is less than the preceding element\n overlaps = ~np.all(indexes[1:] >= indexes[:-1])\n return overlaps\n\n\ndef determine_used_cats(obs_mode, cat_dict):\n \"\"\"Return a list of the source catalogs that will be used by Mirage,\n based on the observation mode\n\n Parameters\n ----------\n obs_mode : str\n e.g. 'imaging', 'wfss'\n\n cat_dict : dict\n Dictionary containing catalog names. Keys should match those\n in the catalog entries of the Mirage yaml input file\n\n Returns\n -------\n cats : list\n List of catalogs that will be used for the given observing mode\n \"\"\"\n if obs_mode == 'imaging':\n possible_cats = [cat_dict[entry] for entry in IMAGING_ALLOWED_CATALOGS]\n elif obs_mode == 'wfss':\n possible_cats = [cat_dict[entry] for entry in WFSS_ALLOWED_CATALOGS]\n elif obs_mode == 'ts_imaging':\n possible_cats = [cat_dict[entry] for entry in TS_IMAGING_ALLOWED_CATALOGS]\n elif obs_mode == 'ts_grism':\n possible_cats = [cat_dict[entry] for entry in TS_GRISM_ALLOWED_CATALOGS]\n\n # Remove any catalogs that are set to None\n cats = [ele for ele in possible_cats if str(ele).lower() != 'none']\n return cats\n\n\ndef get_nonsidereal_catalog_name(cat_dict, target_name, instrument_name):\n \"\"\"Given a dictionary or nested dictionary of source catalogs,\n return the name of the non-sidereal catalog for the given target\n name.\n\n Parameters\n ----------\n cat_dict : dict\n Dictionary of source catalogs as input by the user\n\n target_name : str\n Target name to search for\n\n instrument_name : str\n Name of instrument used to observe ``target_name``\n\n Returns\n -------\n cat_file : str\n Name of source catalog\n \"\"\"\n target_cat = cat_dict[target_name]\n if 'moving_target_to_track' in target_cat.keys():\n if not os.path.isfile(target_cat['moving_target_to_track']):\n raise ValueError((\"{} is listed as the non-sidereal target catalog for \"\n \"{}, but it appears this file does not exist.\".format(target_cat['moving_target_to_track'],\n target_name)))\n else:\n cat_file = target_cat['moving_target_to_track']\n else:\n if instrument_name not in target_cat.keys():\n raise ValueError((\"Catalog dictionary does not contain a 'moving_target_to_track' catalog \"\n \"for {}. Unable to proceed.\".format(target_name)))\n else:\n if 'moving_target_to_track' in target_cat[instrument_name].keys():\n if not os.path.isfile(target_cat[instrument_name]['moving_target_to_track']):\n raise ValueError((\"{} is listed as the non-sidereal target catalog for \"\n \"{}, but it appears this file does not exist.\"\n .format(target_cat['moving_target_to_track'], target_name)))\n else:\n cat_file = target_cat[instrument_name]['moving_target_to_track']\n else:\n raise ValueError((\"Catalog dictionary does not contain a 'moving_target_to_track' catalog \"\n \"for {}. Unable to proceed.\".format(target_name)))\n return cat_file\n\n\ndef read_nonsidereal_catalog(filename):\n \"\"\"Read in a Mirage formatted non-sidereal source catalog\n\n Paramters\n ---------\n filename : str\n Name of ascii catalog\n\n Returns\n -------\n catalog_table : astropy.table.Table\n Catalog contents\n\n pixelflag : bool\n True if the source position is in units of detector (x, y)\n False if RA, Dec\n\n pixelvelflag : bool\n True if the source velocity is given in pixels/hour.\n False if arcsec/hour\n \"\"\"\n catalog_table = ascii.read(filename, comment='#')\n\n # Check to see whether the position is in x,y or ra,dec\n pixelflag = False\n try:\n if 'position_pixels' in catalog_table.meta['comments'][0:4]:\n pixelflag = True\n except:\n pass\n\n # If present, check whether the velocity entries are pix/sec\n # or arcsec/sec.\n pixelvelflag = False\n try:\n if 'velocity_pixels' in catalog_table.meta['comments'][0:4]:\n pixelvelflag = True\n except:\n pass\n return catalog_table, pixelflag, pixelvelflag\n", "\"\"\"SIAF interface module to support accessing SIAF information.\n\nThis module provides ``mirage`` with functions to interface SIAF content via the pysiaf module.\n\nAuthors\n-------\n\n - Johannes Sahlmann\n - Bryan Hilbert\n\nUse\n---\n\n This module can be imported and used with\n\n ::\n\n from mirage.utils import siaf_interface\n\n\"\"\"\nimport os\nimport logging\nimport numpy as np\n\nimport pysiaf\nfrom pysiaf import iando\n\nfrom mirage.logging import logging_functions\nfrom ..utils import rotations\nfrom ..utils import set_telescope_pointing_separated as set_telescope_pointing\nfrom mirage.utils.constants import LOG_CONFIG_FILENAME, STANDARD_LOGFILE_NAME\n\n\nclassdir = os.path.abspath(os.path.join(os.path.dirname(__file__), '../'))\nlog_config_file = os.path.join(classdir, 'logging', LOG_CONFIG_FILENAME)\nlogging_functions.create_logger(log_config_file, STANDARD_LOGFILE_NAME)\n\n\ndef aperture_ra_dec(siaf_instance, aperture_name, ra, dec, telescope_roll, output_apertures):\n \"\"\"For a given aperture with a known RA, Dec, and telescope roll angle,\n calculate the RA, Dec values at the reference location in a list of\n other apertures.\n\n Parameters\n ----------\n siaf_instance : pysiaf.Siaf\n Instance of SIAF for a single instrument\n\n aperture_name : str\n Aperture name (e.g. \"NRCA1_FULL\")\n\n ra : float\n RA value of pointing in degrees\n\n dec : float\n Dec value of pointing in degrees\n\n telescope_roll : float\n PA_V3, Position angle of the telescope in degrees\n\n output_apertures : list\n List of aperture names to calculate RA, Dec for\n\n Returns\n -------\n aperture_pointing : dict\n Dictionary with output_apertures as keys. Values are (ra, dec)\n tuples\n \"\"\"\n local_roll, att_matrix, fullframesize, subarray_boundaries = get_siaf_information(siaf_instance,\n aperture_name,\n ra, dec,\n telescope_roll)\n aperture_pointing = {}\n for aperture in output_apertures:\n siaf_out = siaf_instance[aperture]\n out_ra, out_dec = pysiaf.rotations.pointing(att_matrix, siaf_out.V2Ref, siaf_out.V3Ref)\n aperture_pointing[aperture] = (out_ra, out_dec)\n return aperture_pointing\n\n\ndef aperture_xy_to_radec(x, y, instrument, aperture, fiducial_ra, fiducial_dec, pav3):\n \"\"\"For a given aperture and roll angle, translate a given detector\n (x, y) location to RA, Dec\n\n Parameters\n ----------\n x : float\n X-coordinate within ```aperture```\n\n y : float\n Y-coordinate within ```aperture```\n\n instrument : str\n Name of JWST instrument (e.g. 'nircam')\n\n aperture : str\n Name of aperture (e.g. 'NRCA1_FULL')\n\n fiducial_ra : float\n Right ascention value at the reference location of the aperture,\n in decimal degrees\n\n fiducial_dec : float\n Declination value at the reference location of the aperture,\n in decimal degrees\n\n pav3 : float\n Telescope roll angle, in degrees\n\n Returns\n -------\n ra : float\n RA corresponding to (x, y)\n\n dec : float\n Dec corresponding to (x, y)\n \"\"\"\n instrument_siaf = siaf_interface.get_instance(instrument)\n siaf = instrument_siaf[aperture]\n local_roll, attitude_matrix, ffsize, \\\n subarray_bounds = get_siaf_information(instrument, aperture, fiducial_ra,\n fiducial_dec, pav3)\n loc_v2, loc_v3 = siaf.sci_to_tel(x + 1, y + 1)\n ra, dec = pysiaf.utils.rotations.pointing(attitude_matrix, loc_v2, loc_v3)\n return ra, dec\n\n\ndef get_instance(instrument):\n \"\"\"Return an instance of a pysiaf.Siaf object for the given instrument\n\n Parameters\n ----------\n instrument : str\n Name of instrument\n\n Returns\n -------\n siaf : pysiaf.Siaf\n Siaf object for the requested instrument\n \"\"\"\n siaf = pysiaf.Siaf(instrument)\n return siaf\n\n\ndef get_siaf_information(siaf_instance, aperture_name, ra, dec, telescope_roll, v2_arcsec=None,\n v3_arcsec=None, verbose=False):\n \"\"\"Use pysiaf to get aperture information.\n\n Parameters\n ----------\n siaf_instance : pysiaf.Siaf\n Instance of SIAF for a single instrument\n\n aperture_name : str\n Aperture name (e.g. \"NRCA1_FULL\")\n\n ra : float\n RA value of pointing in degrees\n\n dec : float\n Dec value of pointing in degrees\n\n telescope_roll : float\n PA_V3, Position angle of the telescope in degrees\n\n v2_arcsec : float\n The V2 value in arcseconds of the reference location for the\n instrument aperture\n\n v3_arcsecc : float\n The V3 value in arcseconds of the reference location for the\n instrument aperture\n\n verbose : bool\n Print extra information to the screen\n\n Returns\n -------\n local_roll : float\n Local roll angle at the reference location of the aperture\n\n att_matrix : matrix\n Attitude matrix used to relate RA, Dec, local roll angle to V2, V3\n\n fullframesize : int\n Number of columns in the given aperture\n\n subarray_boundaries : list\n List of full-frame coordinates corresponding to the minimum and maximum\n values of x and y in the given aperture\n \"\"\"\n logger = logging.getLogger('mirage.utils.siaf_interface.get_siaf_information')\n\n # Select the correct aperture\n siaf = siaf_instance[aperture_name]\n\n if v2_arcsec is None:\n v2_arcsec = siaf.V2Ref\n if v3_arcsec is None:\n v3_arcsec = siaf.V3Ref\n\n local_roll = set_telescope_pointing.compute_local_roll(telescope_roll,\n ra, dec, v2_arcsec, v3_arcsec)\n\n # Create attitude_matrix\n att_matrix = rotations.attitude(v2_arcsec, v3_arcsec, ra, dec, local_roll)\n\n # Get full frame size\n fullframesize = siaf.XDetSize\n\n # Subarray boundaries in full frame coordinates\n try:\n xcorner, ycorner = sci_subarray_corners(siaf_instance.instrument, aperture_name, siaf=siaf_instance)\n subarray_boundaries = [xcorner[0], ycorner[0], xcorner[1], ycorner[1]]\n except (RuntimeError, TypeError) as e: # e.g. NIRSpec NRS_FULL_MSA aperture\n if verbose:\n logger.info('get_siaf_information raised error:\\n{}\\nIgnoring it.'.format(e))\n subarray_boundaries = [0, 0, 0, 0]\n return local_roll, att_matrix, fullframesize, subarray_boundaries\n\n\ndef sci_subarray_corners(instrument, aperture_name, siaf=None, verbose=False):\n \"\"\"Return the two opposing aperture corners in the SIAF Science frame of the full-frame SCA.\n\n This function serves as interface between the SIAF information accessible via the pysiaf package\n and the subarray information formatted for use by mirage.\n\n Parameters\n ----------\n instrument : str\n JWST instrument name with correct capitalization\n aperture_name : str\n SIAF aperture name\n siaf : pysiaf.Siaf\n SIAF instance for a single instrument\n verbose : bool\n Verbose output on/off\n\n Returns\n -------\n x_sci, y_sci : tuple of numpy arrays\n Subarray corner coordinates\n\n \"\"\"\n logger = logging.getLogger('mirage.utils.get_siaf_information.sci_subarray_corners')\n\n # get SIAF\n if siaf is None:\n siaf = get_instance(instrument)\n\n # get master aperture names\n siaf_detector_layout = iando.read.read_siaf_detector_layout()\n master_aperture_names = siaf_detector_layout['AperName'].data\n\n # read pysiaf aperture definition file containing DetRef and SciRef values\n siaf_aperture_definitions = iando.read.read_siaf_aperture_definitions(instrument)\n\n # aperture object\n aperture = siaf[aperture_name]\n\n # aperture corners in SIAF detector coordinates\n x_det, y_det = aperture.corners('det', rederive=True)\n\n # determine parent aperture, i.e. the underlying full frame SCA aperture\n index = siaf_aperture_definitions['AperName'].tolist().index(aperture_name)\n aperture._parent_apertures = siaf_aperture_definitions['parent_apertures'][index]\n\n # If multiuple apertures are listed as parents keep only the first\n if ';' in aperture._parent_apertures:\n logger.info('Multiple parent apertures: {}'.format(aperture._parent_apertures))\n aperture._parent_apertures = aperture._parent_apertures.split(';')[0]\n\n if aperture_name in master_aperture_names:\n # if master aperture, use it directly to transform to science frame\n x_sci, y_sci = aperture.det_to_sci(x_det, y_det)\n elif aperture._parent_apertures is not None:\n # use parent aperture for transformation\n if verbose:\n logger.info('Using parent {} for {}'.format(aperture._parent_apertures, aperture_name))\n x_sci, y_sci = siaf[aperture._parent_apertures].det_to_sci(x_det, y_det)\n aperture = siaf[aperture._parent_apertures]\n\n if instrument.lower() == 'nircam':\n if aperture.DetSciParity == 1:\n corner_index = np.array([1, 3])\n elif aperture.DetSciParity == -1:\n # NIRCam will always fall in here, except in case of non-dms orientation\n corner_index = np.array([0, 2])\n x_corner = x_sci[corner_index]\n y_corner = y_sci[corner_index]\n elif instrument.lower() == 'niriss':\n x_corner_index = np.array([0, 2])\n y_corner_index = np.array([0, 2])\n if aperture_name == 'NIS_CEN_OSS':\n x_corner_index = np.array([1, 3])\n y_corner_index = np.array([3, 1])\n x_corner = x_sci[x_corner_index]\n y_corner = y_sci[y_corner_index]\n if aperture_name in ['NIS_SUBSTRIP96', 'NIS_SUBSTRIP256']:\n x_corner = [1, 2048]\n y_corner = [1, 2048]\n elif instrument.lower() == 'fgs':\n x_corner_index = np.array([0, 2])\n y_corner_index = np.array([0, 2])\n if aperture_name == 'FGS1_FULL_OSS':\n x_corner_index = np.array([1, 3])\n y_corner_index = np.array([3, 1])\n if aperture_name == 'FGS2_FULL_OSS':\n x_corner_index = np.array([1, 3])\n y_corner_index = np.array([1, 3])\n x_corner = x_sci[x_corner_index]\n y_corner = y_sci[y_corner_index]\n else:\n raise NotImplementedError((\"Instrument {} not supported for SIAF subarray corners\"\n .format(instrument)))\n\n # account for mirage conventions (e.g. 0-based indexing)\n # we also want integer values as these will be indexes\n x_corner = np.array([np.ceil(x_corner[0]) - 1, np.floor(x_corner[1]) - 1])\n y_corner = np.array([np.ceil(y_corner[0]) - 1, np.floor(y_corner[1]) - 1])\n return x_corner.astype(np.int), y_corner.astype(np.int)\n" ]
[ [ "numpy.min", "numpy.all", "numpy.max", "numpy.argsort", "numpy.array" ], [ "numpy.ceil", "numpy.array", "numpy.floor" ] ]
iesl/s-diora
[ "e93b4d3b0a8f52b629161769622bbcbae4d35d87" ]
[ "eval_parsing.py" ]
[ "import collections\nimport json\nimport os\n\nimport nltk\nfrom nltk.treeprettyprinter import TreePrettyPrinter\nimport numpy as np\nimport torch\nfrom tqdm import tqdm\n\nfrom cky import ParsePredictor as CKY\nfrom experiment_logger import get_logger\nfrom evaluation_utils import BaseEvalFunc\n\n\ndef convert_to_nltk(tr, label='|'):\n def helper(tr):\n if not isinstance(tr, (list, tuple)):\n return '({} {})'.format(label, tr)\n nodes = []\n for x in tr:\n nodes.append(helper(x))\n return '({} {})'.format(label, ' '.join(nodes))\n return helper(tr)\n\n\ndef example_f1(gt, pred):\n correct = len(gt.intersection(pred))\n if correct == 0:\n return 0., 0., 0.\n gt_total = len(gt)\n pred_total = len(pred)\n prec = float(correct) / pred_total\n recall = float(correct) / gt_total\n f1 = 2 * (prec * recall) / (prec + recall)\n return f1, prec, recall\n\n\ndef tree_to_spans(tree):\n spans = []\n\n def helper(tr, pos):\n if not isinstance(tr, (list, tuple)):\n size = 1\n return size\n size = 0\n for x in tr:\n xpos = pos + size\n xsize = helper(x, xpos)\n size += xsize\n spans.append((pos, size))\n return size\n\n helper(tree, 0)\n\n return spans\n\n\ndef spans_to_tree(spans, tokens):\n length = len(tokens)\n\n # Add missing spans.\n span_set = set(spans)\n for pos in range(length):\n if pos not in span_set:\n spans.append((pos, 1))\n\n spans = sorted(spans, key=lambda x: (x[1], x[0]))\n\n pos_to_node = {}\n root_node = None\n\n for i, span in enumerate(spans):\n\n pos, size = span\n\n if i < length:\n assert i == pos\n node = (pos, size, tokens[i])\n pos_to_node[pos] = node\n continue\n\n node = (pos, size, [])\n\n for i_pos in range(pos, pos+size):\n child = pos_to_node[i_pos]\n c_pos, c_size = child[0], child[1]\n\n if i_pos == c_pos:\n node[2].append(child)\n pos_to_node[i_pos] = node\n\n def helper(node):\n pos, size, x = node\n if isinstance(x, str):\n return x\n return tuple([helper(xx) for xx in x])\n\n root_node = pos_to_node[0]\n tree = helper(root_node)\n\n return tree\n\n\nclass TreesFromDiora(object):\n def __init__(self, diora, word2idx, outside, oracle):\n self.diora = diora\n self.word2idx = word2idx\n self.idx2word = {idx: w for w, idx in word2idx.items()}\n self.outside = outside\n self.oracle = oracle\n\n def to_spans(self, lst):\n return [(pos, level + 1) for level, pos in lst]\n\n def predict(self, batch_map):\n batch_size, length = batch_map['sentences'].shape\n example_ids = batch_map['example_ids']\n tscores = [0.0] * batch_size\n K = self.diora.K\n\n for i_b in range(batch_size):\n tokens = batch_map['ground_truth'][i_b]['tokens']\n root_level, root_pos = length - 1, 0\n spans = self.to_spans(self.diora.cache['inside_tree'][(i_b, 0)][(root_level, root_pos)])\n binary_tree = spans_to_tree(spans, tokens)\n other_trees = []\n\n yield dict(example_id=example_ids[i_b], binary_tree=binary_tree, binary_tree_score=tscores[i_b], other_trees=other_trees)\n\n\nclass ParsingComponent(BaseEvalFunc):\n\n def init_defaults(self):\n self.agg_mode = 'sum'\n self.cky_mode = 'sum'\n self.ground_truth = None\n self.inside_pool = 'sum'\n self.oracle = {'use': False}\n self.outside = True\n self.seed = 121\n self.semi_supervised = False\n self.K = None\n self.choose_tree = 'local'\n\n def compare(self, prev_best, results):\n out = []\n key, val, is_best = 'placeholder', None, True\n out.append((key, val, is_best))\n return out\n\n def parse(self, trainer, info):\n logger = self.logger\n\n multilayer = False\n diora = trainer.get_single_net(trainer.net).diora\n if hasattr(diora, 'layers'):\n multilayer = True\n pred_lst = []\n for i, layer in enumerate(diora.layers):\n logger.info(f'Diora Layer {i}:')\n pred = self.single_layer_parser(trainer, layer, info)\n pred_lst.append(pred)\n else:\n pred_lst = self.single_layer_parser(trainer, diora, info)\n return pred_lst, multilayer\n\n def single_layer_parser(self, trainer, diora, info):\n logger = self.logger\n epoch = info.get('epoch', 0)\n\n original_K = diora.K\n if self.K is not None:\n diora.safe_set_K(self.K)\n\n # set choose_tree\n if hasattr(diora, 'choose_tree'):\n original_choose_tree = diora.choose_tree\n diora.choose_tree = self.choose_tree\n\n word2idx = self.dataset['word2idx']\n if self.cky_mode == 'cky':\n parse_predictor = CKY(net=diora, word2idx=word2idx,\n add_bos_token=trainer.net.add_bos_token, add_eos_token=trainer.net.add_eos_token)\n elif self.cky_mode == 'diora':\n parse_predictor = TreesFromDiora(diora=diora, word2idx=word2idx, outside=self.outside, oracle=self.oracle)\n\n batches = self.batch_iterator.get_iterator(random_seed=self.seed, epoch=epoch)\n\n logger.info('Parsing.')\n\n pred_lst = []\n counter = 0\n eval_cache = {}\n\n if self.ground_truth is not None:\n self.ground_truth = os.path.expanduser(self.ground_truth)\n ground_truth_data = {}\n with open(self.ground_truth) as f:\n for line in f:\n ex = json.loads(line)\n ground_truth_data[ex['example_id']] = ex\n\n # Eval loop.\n with torch.no_grad():\n for i, batch_map in enumerate(batches):\n batch_size, length = batch_map['sentences'].shape\n\n if length <= 2:\n continue\n\n example_ids = batch_map['example_ids']\n if self.ground_truth is not None:\n batch_ground_truth = [ground_truth_data[x] for x in example_ids]\n batch_map['ground_truth'] = batch_ground_truth\n\n _ = trainer.step(batch_map, train=False, compute_loss=False, info={ 'inside_pool': self.inside_pool, 'outside': self.outside })\n\n for j, x in enumerate(parse_predictor.predict(batch_map)):\n\n pred_lst.append(x)\n\n self.eval_loop_hook(trainer, diora, info, eval_cache, batch_map)\n\n self.post_eval_hook(trainer, diora, info, eval_cache)\n\n diora.safe_set_K(original_K)\n\n # set choose_tree\n if hasattr(diora, 'choose_tree'):\n diora.choose_tree = original_choose_tree\n\n return pred_lst\n\n def eval_loop_hook(self, trainer, diora, info, eval_cache, batch_map):\n pass\n\n def post_eval_hook(self, trainer, diora, info, eval_cache):\n pass\n\n def run(self, trainer, info):\n logger = self.logger\n outfile = info.get('outfile', None)\n pred_lst, multilayer = self.parse(trainer, info)\n\n if self.write:\n corpus = collections.OrderedDict()\n\n # Read the ground truth.\n with open(self.ground_truth) as f:\n for line in f:\n ex = json.loads(line)\n corpus[ex['example_id']] = ex\n\n def to_raw_parse(tr):\n def helper(tr):\n if isinstance(tr, (str, int)):\n return '(DT {})'.format(tr)\n nodes = []\n for x in tr:\n nodes.append(helper(x))\n return '(S {})'.format(' '.join(nodes))\n return '(ROOT {})'.format(helper(tr))\n\n # Write more general format.\n path = outfile + '.pred'\n logger.info('writing parse tree output -> {}'.format(path))\n with open(path, 'w') as f:\n for x in pred_lst:\n pred_binary_tree = x['binary_tree']\n f.write(to_raw_parse(pred_binary_tree) + '\\n')\n\n path = outfile + '.gold'\n logger.info('writing parse tree output -> {}'.format(path))\n with open(path, 'w') as f:\n for x in pred_lst:\n example_id = x['example_id']\n gt = corpus[example_id]\n gt_binary_tree = gt['binary_tree']\n f.write(to_raw_parse(gt_binary_tree) + '\\n')\n\n path = outfile + '.diora'\n logger.info('writing parse tree output -> {}'.format(path))\n with open(path, 'w') as f:\n for x in pred_lst:\n example_id = x['example_id']\n gt = corpus[example_id]\n o = collections.OrderedDict()\n o['example_id'] = example_id\n o['binary_tree'] = x['binary_tree']\n o['raw_parse'] = to_raw_parse(x['binary_tree'])\n o['tokens'] = gt['tokens']\n f.write(json.dumps(o) + '\\n')\n\n eval_result = dict()\n eval_result['name'] = self.name\n eval_result['meta'] = dict()\n\n return eval_result\n" ]
[ [ "torch.no_grad" ] ]
JoeyOhman/alps
[ "77945f414304ec749110bd4dd2b8236d9a4868b3" ]
[ "src/train.py" ]
[ "# coding=utf-8\n# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.\n# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\" Finetuning the library models for sequence classification on GLUE (Bert, XLM, XLNet, RoBERTa, Albert, XLM-RoBERTa).\"\"\"\n\n\nimport argparse\nimport glob\nimport json\nimport logging\nimport os\nimport random\n\nimport numpy as np\nimport torch\nfrom torch.utils.data import DataLoader, RandomSampler, SequentialSampler, TensorDataset, Subset\nfrom torch.utils.data.distributed import DistributedSampler\nfrom tqdm import tqdm, trange\n\nfrom transformers import (\n WEIGHTS_NAME,\n AdamW,\n get_linear_schedule_with_warmup,\n)\n\nimport src.setup\nfrom src.data import (\n convert_examples_to_features,\n compute_metrics,\n processors,\n output_modes\n)\n\n\ntry:\n from torch.utils.tensorboard import SummaryWriter\nexcept ImportError:\n from tensorboardX import SummaryWriter\n\n\nlogger = logging.getLogger(__name__)\n\ndef train(args, train_dataset, model, tokenizer):\n \"\"\" Train the model \"\"\"\n\n args.train_batch_size = args.per_gpu_train_batch_size * max(1, args.n_gpu)\n train_sampler = RandomSampler(train_dataset) if args.local_rank == -1 else DistributedSampler(train_dataset)\n train_dataloader = DataLoader(train_dataset, sampler=train_sampler, batch_size=args.train_batch_size)\n\n if args.max_steps > 0:\n t_total = args.max_steps\n args.num_train_epochs = args.max_steps // (len(train_dataloader) // args.gradient_accumulation_steps) + 1\n else:\n t_total = len(train_dataloader) // args.gradient_accumulation_steps * args.num_train_epochs\n\n # Prepare optimizer and schedule (linear warmup and decay)\n no_decay = [\"bias\", \"LayerNorm.weight\"]\n optimizer_grouped_parameters = [\n {\n \"params\": [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)],\n \"weight_decay\": args.weight_decay,\n },\n {\"params\": [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)], \"weight_decay\": 0.0},\n ]\n\n optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon)\n scheduler = get_linear_schedule_with_warmup(\n optimizer, num_warmup_steps=args.warmup_steps, num_training_steps=t_total\n )\n\n # Check if saved optimizer or scheduler states exist\n if os.path.isfile(os.path.join(args.model_name_or_path, \"optimizer.pt\")) and os.path.isfile(\n os.path.join(args.model_name_or_path, \"scheduler.pt\")\n ):\n # Load in optimizer and scheduler states\n optimizer.load_state_dict(torch.load(os.path.join(args.model_name_or_path, \"optimizer.pt\")))\n scheduler.load_state_dict(torch.load(os.path.join(args.model_name_or_path, \"scheduler.pt\")))\n\n if args.fp16:\n try:\n from apex import amp\n except ImportError:\n raise ImportError(\"Please install apex from https://www.github.com/nvidia/apex to use fp16 training.\")\n model, optimizer = amp.initialize(model, optimizer, opt_level=args.fp16_opt_level)\n\n # multi-gpu training (should be after apex fp16 initialization)\n if args.n_gpu > 1:\n model = torch.nn.DataParallel(model)\n\n # Distributed training (should be after apex fp16 initialization)\n if args.local_rank != -1:\n model = torch.nn.parallel.DistributedDataParallel(\n model, device_ids=[args.local_rank], output_device=args.local_rank, find_unused_parameters=True,\n )\n\n # Train!\n logger.info(\"***** Running training *****\")\n logger.info(\" Num examples = %d\", len(train_dataset))\n logger.info(\" Num Epochs = %d\", args.num_train_epochs)\n logger.info(\" Instantaneous batch size per GPU = %d\", args.per_gpu_train_batch_size)\n logger.info(\n \" Total train batch size (w. parallel, distributed & accumulation) = %d\",\n args.train_batch_size\n * args.gradient_accumulation_steps\n * (torch.distributed.get_world_size() if args.local_rank != -1 else 1),\n )\n logger.info(\" Gradient Accumulation steps = %d\", args.gradient_accumulation_steps)\n logger.info(\" Total optimization steps = %d\", t_total)\n\n global_step = 0\n epochs_trained = 0\n steps_trained_in_current_epoch = 0\n\n tr_loss, logging_loss = 0.0, 0.0\n model.zero_grad()\n train_iterator = trange(\n epochs_trained, int(args.num_train_epochs), desc=\"Epoch\", disable=args.local_rank not in [-1, 0],\n )\n src.setup.set_seed(args) # Added here for reproductibility\n for _ in train_iterator:\n epoch_iterator = tqdm(train_dataloader, desc=\"Iteration\", disable=args.local_rank not in [-1, 0])\n for step, batch in enumerate(epoch_iterator):\n\n # Skip past any already trained steps if resuming training\n if steps_trained_in_current_epoch > 0:\n steps_trained_in_current_epoch -= 1\n continue\n\n model.train()\n batch = tuple(t.to(args.device) for t in batch)\n inputs = {\"input_ids\": batch[0], \"attention_mask\": batch[1], \"labels\": batch[3]}\n if args.model_type != \"distilbert\":\n inputs[\"token_type_ids\"] = (\n batch[2] if args.model_type in [\"bert\", \"xlnet\", \"albert\"] else None\n ) # XLM, DistilBERT, RoBERTa, and XLM-RoBERTa don't use segment_ids\n outputs = model(**inputs)\n loss = outputs[0] # model outputs are always tuple in transformers (see doc)\n\n if args.n_gpu > 1:\n loss = loss.mean() # mean() to average on multi-gpu parallel training\n if args.gradient_accumulation_steps > 1:\n loss = loss / args.gradient_accumulation_steps\n\n if args.fp16:\n with amp.scale_loss(loss, optimizer) as scaled_loss:\n scaled_loss.backward()\n else:\n loss.backward()\n\n tr_loss += loss.item()\n if (step + 1) % args.gradient_accumulation_steps == 0:\n if args.fp16:\n torch.nn.utils.clip_grad_norm_(amp.master_params(optimizer), args.max_grad_norm)\n else:\n torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm)\n\n optimizer.step()\n scheduler.step() # Update learning rate schedule\n model.zero_grad()\n global_step += 1\n\n if args.local_rank in [-1, 0] and args.logging_steps > 0 and global_step % args.logging_steps == 0:\n logs = {}\n if (\n args.local_rank == -1 and args.evaluate_during_training\n ): # Only evaluate when single GPU otherwise metrics may not average well\n results = evaluate(args, model, tokenizer)\n for key, value in results.items():\n eval_key = \"eval_{}\".format(key)\n logs[eval_key] = value\n\n loss_scalar = (tr_loss - logging_loss) / args.logging_steps\n learning_rate_scalar = scheduler.get_lr()[0]\n logs[\"learning_rate\"] = learning_rate_scalar\n logs[\"loss\"] = loss_scalar\n logging_loss = tr_loss\n\n print(json.dumps({**logs, **{\"step\": global_step}}))\n\n if args.local_rank in [-1, 0] and args.save_steps > 0 and global_step % args.save_steps == 0:\n # Save model checkpoint\n output_dir = os.path.join(args.output_dir, \"checkpoint-{}\".format(global_step))\n if not os.path.exists(output_dir):\n os.makedirs(output_dir)\n model_to_save = (\n model.module if hasattr(model, \"module\") else model\n ) # Take care of distributed/parallel training\n model_to_save.save_pretrained(output_dir)\n tokenizer.save_pretrained(output_dir)\n\n torch.save(args, os.path.join(output_dir, \"training_args.bin\"))\n logger.info(\"Saving model checkpoint to %s\", output_dir)\n\n torch.save(optimizer.state_dict(), os.path.join(output_dir, \"optimizer.pt\"))\n torch.save(scheduler.state_dict(), os.path.join(output_dir, \"scheduler.pt\"))\n logger.info(\"Saving optimizer and scheduler states to %s\", output_dir)\n\n if args.max_steps > 0 and global_step > args.max_steps:\n epoch_iterator.close()\n break\n if args.max_steps > 0 and global_step > args.max_steps:\n train_iterator.close()\n break\n\n\n return global_step, tr_loss / global_step\n\n\ndef evaluate(args, model, tokenizer, prefix=\"\"):\n # Loop to handle MNLI double evaluation (matched, mis-matched)\n eval_task_names = (\"mnli\", \"mnli-mm\") if args.task_name == \"mnli\" else (args.task_name,)\n eval_outputs_dirs = (args.output_dir, args.output_dir + \"-MM\") if args.task_name == \"mnli\" else (args.output_dir,)\n\n results = {}\n for eval_task, eval_output_dir in zip(eval_task_names, eval_outputs_dirs):\n eval_dataset = load_and_cache_examples(args, eval_task, tokenizer, evaluate=True)\n\n if not os.path.exists(eval_output_dir) and args.local_rank in [-1, 0]:\n os.makedirs(eval_output_dir)\n\n args.eval_batch_size = args.per_gpu_eval_batch_size * max(1, args.n_gpu)\n # Note that DistributedSampler samples randomly\n eval_sampler = SequentialSampler(eval_dataset)\n eval_dataloader = DataLoader(eval_dataset, sampler=eval_sampler, batch_size=args.eval_batch_size)\n\n # multi-gpu eval\n if args.n_gpu > 1 and not isinstance(model, torch.nn.DataParallel):\n model = torch.nn.DataParallel(model)\n\n # Eval!\n logger.info(\"***** Running evaluation {} *****\".format(prefix))\n logger.info(\" Num examples = %d\", len(eval_dataset))\n logger.info(\" Batch size = %d\", args.eval_batch_size)\n eval_loss = 0.0\n nb_eval_steps = 0\n preds = None\n out_label_ids = None\n for batch in tqdm(eval_dataloader, desc=\"Evaluating\"):\n model.eval()\n batch = tuple(t.to(args.device) for t in batch)\n\n with torch.no_grad():\n inputs = {\"input_ids\": batch[0], \"attention_mask\": batch[1], \"labels\": batch[3]}\n if args.model_type != \"distilbert\":\n inputs[\"token_type_ids\"] = (\n batch[2] if args.model_type in [\"bert\", \"xlnet\", \"albert\"] else None\n ) # XLM, DistilBERT, RoBERTa, and XLM-RoBERTa don't use segment_ids\n outputs = model(**inputs)\n tmp_eval_loss, logits = outputs[:2]\n\n eval_loss += tmp_eval_loss.mean().item()\n nb_eval_steps += 1\n if preds is None:\n preds = logits.detach().cpu().numpy()\n out_label_ids = inputs[\"labels\"].detach().cpu().numpy()\n else:\n preds = np.append(preds, logits.detach().cpu().numpy(), axis=0)\n out_label_ids = np.append(out_label_ids, inputs[\"labels\"].detach().cpu().numpy(), axis=0)\n\n eval_loss = eval_loss / nb_eval_steps\n logger.info(f\"eval_loss = {eval_loss}\")\n if args.output_mode == \"classification\":\n preds = np.argmax(preds, axis=1)\n elif args.output_mode == \"regression\":\n preds = np.squeeze(preds)\n np.seterr(divide='ignore', invalid='ignore')\n result = compute_metrics(eval_task, preds, out_label_ids)\n np.seterr(divide='warn', invalid='warn')\n results.update(result)\n\n eval_prefix_dir = os.path.join(eval_output_dir, prefix)\n if not os.path.exists(eval_prefix_dir) and args.local_rank in [-1, 0]:\n os.makedirs(eval_prefix_dir)\n output_eval_file = os.path.join(eval_prefix_dir, \"eval_results.txt\")\n with open(output_eval_file, \"w\") as writer:\n logger.info(\"***** Eval results {} *****\".format(prefix))\n for key in sorted(result.keys()):\n logger.info(\" %s = %s\", key, str(result[key]))\n writer.write(\"%s = %s\\n\" % (key, str(result[key])))\n\n return results\n\n\ndef load_and_cache_examples(args, task, tokenizer, evaluate=False, test=False):\n if args.local_rank not in [-1, 0] and not evaluate:\n torch.distributed.barrier() # Make sure only the first process in distributed training process the dataset, and the others will use the cache\n\n processor = processors[task]()\n output_mode = output_modes[task]\n # Load data features from cache or dataset file\n if test:\n data_split = \"test\"\n elif evaluate:\n data_split = \"dev\"\n else:\n data_split = \"train\"\n cached_features_file = os.path.join(\n args.data_dir,\n \"cached_{}_{}_{}_{}\".format(\n data_split,\n list(filter(None, args.base_model.split(\"/\"))).pop(),\n str(args.max_seq_length),\n str(task),\n ),\n )\n if os.path.exists(cached_features_file) and not args.overwrite_cache and False:\n logger.info(\"Loading features from cached file %s\", cached_features_file)\n features = torch.load(cached_features_file)\n else:\n logger.info(\"Creating features from dataset file at %s\", args.data_dir)\n label_list = processor.get_labels()\n if task in [\"mnli\", \"mnli-mm\"] and args.model_type in [\"roberta\", \"xlmroberta\"]:\n # HACK(label indices are swapped in RoBERTa pretrained model)\n label_list[1], label_list[2] = label_list[2], label_list[1]\n if test:\n examples = processor.get_test_examples(args.data_dir)\n elif evaluate:\n examples = processor.get_dev_examples(args.data_dir)\n else:\n examples = processor.get_train_examples(args.data_dir)\n features = convert_examples_to_features(\n examples,\n tokenizer,\n label_list=label_list,\n max_length=args.max_seq_length,\n output_mode=output_mode,\n )\n if args.local_rank in [-1, 0]:\n logger.info(\"Saving features into cached file %s\", cached_features_file)\n torch.save(features, cached_features_file)\n\n if args.local_rank == 0 and not evaluate:\n torch.distributed.barrier() # Make sure only the first process in distributed training process the dataset, and the others will use the cache\n\n # Convert to Tensors and build dataset\n def pad(value, max_seq_len, tokenizer, key):\n padded_value = value\n # verify that the tokenizer has a pad_token_id\n if tokenizer._pad_token is not None:\n # Padding handle\n padded_value = [\n v + [tokenizer.pad_token_id if key == \"input_ids\" else 1] * (max_seq_len - len(v))\n for v in padded_value\n ]\n\n return padded_value\n\n # Convert to Tensors and build dataset\n all_input_ids = torch.tensor(pad([f.input_ids for f in features], args.max_seq_length, tokenizer, 'input_ids'),\n dtype=torch.long)\n all_attention_mask = torch.tensor(\n pad([f.attention_mask for f in features], args.max_seq_length, tokenizer, 'attention_mask'), dtype=torch.long)\n all_token_type_ids = torch.tensor(\n pad([f.token_type_ids for f in features], args.max_seq_length, tokenizer, 'token_type_ids'), dtype=torch.long)\n # all_input_ids = torch.tensor([f.input_ids for f in features], dtype=torch.long)\n # all_attention_mask = torch.tensor([f.attention_mask for f in features], dtype=torch.long)\n # all_token_type_ids = torch.tensor([f.token_type_ids for f in features], dtype=torch.long)\n if output_mode == \"classification\":\n all_labels = torch.tensor([f.label for f in features], dtype=torch.long)\n elif output_mode == \"regression\":\n all_labels = torch.tensor([f.label for f in features], dtype=torch.float)\n dataset = TensorDataset(all_input_ids, all_attention_mask, all_token_type_ids, all_labels)\n\n print(\"Dataset size (probably):\", len(all_input_ids))\n\n return dataset\n\n\ndef main():\n args = src.setup.get_args()\n\n if (\n os.path.isfile(os.path.join(args.output_dir, 'eval_results.txt'))\n and args.do_train\n and not args.overwrite_output_dir\n ):\n raise ValueError(\n \"Output directory ({}) already exists and is not empty. Use --overwrite_output_dir to overcome.\".format(\n args.output_dir\n )\n )\n\n # Setup distant debugging if needed\n if args.server_ip and args.server_port:\n # Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script\n import ptvsd\n\n print(\"Waiting for debugger attach\")\n ptvsd.enable_attach(address=(args.server_ip, args.server_port), redirect_output=True)\n ptvsd.wait_for_attach()\n\n # Setup logging\n logging.basicConfig(\n format=\"%(asctime)s - %(levelname)s - %(name)s - %(message)s\",\n datefmt=\"%m/%d/%Y %H:%M:%S\",\n level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN,\n )\n logger.warning(\n \"Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s\",\n args.local_rank,\n args.device,\n args.n_gpu,\n bool(args.local_rank != -1),\n args.fp16,\n )\n\n # Set seed\n src.setup.set_seed(args)\n\n # Prepare GLUE task\n args.task_name = args.task_name.lower()\n if args.task_name not in processors:\n raise ValueError(\"Task not found: %s\" % (args.task_name))\n\n # Load pretrained model and tokenizer\n if args.local_rank not in [-1, 0]:\n torch.distributed.barrier() # Make sure only the first process in distributed training will download model & vocab\n\n args.head = \"sc\"\n model, tokenizer, model_class, tokenizer_class = src.setup.load_model(args)\n\n if args.local_rank == 0:\n torch.distributed.barrier() # Make sure only the first process in distributed training will download model & vocab\n\n logger.info(\"Training/evaluation parameters %s\", args)\n\n # Training\n if args.do_train:\n train_dataset = load_and_cache_examples(args, args.task_name, tokenizer, evaluate=False)\n # Train only on sampled examples\n sampled_file = os.path.join(args.output_dir, 'sampled.pt')\n if os.path.isfile(sampled_file):\n sampled = torch.load(sampled_file)\n sampled_dataset = Subset(train_dataset, sampled)\n else:\n sampled_dataset = train_dataset\n\n global_step, tr_loss = train(args, sampled_dataset, model, tokenizer)\n logger.info(\" global_step = %s, average loss = %s\", global_step, tr_loss)\n\n # Saving best-practices: if you use defaults names for the model, you can reload it using from_pretrained()\n if args.do_train and (args.local_rank == -1 or torch.distributed.get_rank() == 0):\n # Create output directory if needed\n if not os.path.exists(args.output_dir) and args.local_rank in [-1, 0]:\n os.makedirs(args.output_dir)\n\n logger.info(\"Saving model checkpoint to %s\", args.output_dir)\n # Save a trained model, configuration and tokenizer using `save_pretrained()`.\n # They can then be reloaded using `from_pretrained()`\n model_to_save = (\n model.module if hasattr(model, \"module\") else model\n ) # Take care of distributed/parallel training\n model_to_save.save_pretrained(args.output_dir)\n tokenizer.save_pretrained(args.output_dir)\n\n # Good practice: save your training arguments together with the trained model\n torch.save(args, os.path.join(args.output_dir, \"training_args.bin\"))\n\n # Load a trained model and vocabulary that you have fine-tuned\n model = model_class.from_pretrained(args.output_dir)\n tokenizer = tokenizer_class.from_pretrained(args.output_dir)\n model.to(args.device)\n\n # Evaluation\n results = {}\n if args.do_eval and args.local_rank in [-1, 0]:\n tokenizer = tokenizer_class.from_pretrained(args.output_dir, do_lower_case=args.do_lower_case)\n checkpoints = [args.output_dir]\n if args.eval_all_checkpoints:\n checkpoints = list(\n os.path.dirname(c) for c in sorted(glob.glob(args.output_dir + \"/**/\" + WEIGHTS_NAME, recursive=True))\n )\n logging.getLogger(\"transformers.modeling_utils\").setLevel(logging.WARN) # Reduce logging\n logger.info(\"Evaluate the following checkpoints: %s\", checkpoints)\n for checkpoint in checkpoints:\n global_step = checkpoint.split(\"-\")[-1] if len(checkpoints) > 1 else \"\"\n prefix = checkpoint.split(\"/\")[-1] if checkpoint.find(\"checkpoint\") != -1 else \"\"\n\n model = model_class.from_pretrained(checkpoint)\n model.to(args.device)\n result = evaluate(args, model, tokenizer, prefix=prefix)\n result = dict((k + \"_{}\".format(global_step), v) for k, v in result.items())\n results.update(result)\n\n return results\n\n\nif __name__ == \"__main__\":\n main()\n" ]
[ [ "torch.utils.data.distributed.DistributedSampler", "torch.load", "torch.utils.data.TensorDataset", "torch.utils.data.SequentialSampler", "torch.utils.data.DataLoader", "torch.utils.data.RandomSampler", "torch.distributed.barrier", "torch.tensor", "numpy.squeeze", "numpy.seterr", "numpy.argmax", "torch.no_grad", "torch.utils.data.Subset", "torch.distributed.get_rank", "torch.nn.DataParallel", "torch.distributed.get_world_size", "torch.nn.parallel.DistributedDataParallel", "torch.save" ] ]
hsnemlekar/irl-maxent
[ "6ff99dae5571bb17e771bc2f38d7b47ffb107fa0" ]
[ "src/experiments.py" ]
[ "# import python libraries\nimport numpy as np\nfrom copy import deepcopy\nimport pandas as pd\nimport seaborn as sns\nimport matplotlib.pyplot as plt\n\n# import functions\nimport optimizer as O # stochastic gradient descent optimizer\nfrom vi import value_iteration\nfrom maxent_irl import *\nfrom assembly_tasks import *\nfrom visualize import *\n\n# -------------------------------------------------- Load data ------------------------------------------------------ #\n# paths\nroot_path = \"data/\"\ncanonical_path = root_path + \"canonical_demos.csv\"\ncomplex_path = root_path + \"complex_demos.csv\"\nfeature_path = root_path + \"survey_data.csv\"\n\n# load user demonstrations\ncanonical_df = pd.read_csv(canonical_path, header=None)\ncomplex_df = pd.read_csv(complex_path, header=None)\ncanonical_demos = canonical_df.to_numpy().T\ncomplex_demos = complex_df.to_numpy().T\n\n# load user responses\nratings_df = pd.read_csv(feature_path)\n\n\n# pre-process feature value\ndef process_val(x):\n if x == \"1 (No effort at all)\":\n x = 1.1\n elif x == \"7 (A lot of effort)\":\n x = 6.9\n else:\n x = float(x)\n\n return x\n\n\n# load user ratings\ndef load_features(data, feature_idx, action_idx):\n user_features = []\n for i in range(2, len(ratings_df)):\n fea_mat = []\n for j in action_idx:\n fea_vec = []\n for k in feature_idx:\n fea_col = k + str(j)\n fea_val = process_val(ratings_df[fea_col][i])\n fea_vec.append(fea_val)\n fea_mat.append(fea_vec)\n user_features.append(fea_mat.copy())\n return user_features\n\n\n# user ratings for features\ncanonical_q, complex_q = [\"Q7_\", \"Q8_\"], [\"Q14_\", \"Q15_\"] # [\"Q6_\", \"Q7_\", \"Q8_\"], [\"Q13_\", \"Q14_\", \"Q15_\"]\ncanonical_features = load_features(ratings_df, canonical_q, [1, 3, 5, 2, 4, 6])\ncomplex_features = load_features(ratings_df, complex_q, [1, 3, 7, 8, 2, 4, 5, 6])\n\n# ------------------------------------------------- Optimization ---------------------------------------------------- #\n\n# choose our parameter initialization strategy:\n# initialize parameters with constant\ninit = O.Constant(1.0)\n\n# choose our optimization strategy:\n# we select exponentiated stochastic gradient descent with linear learning-rate decay\noptim = O.ExpSga(lr=O.linear_decay(lr0=0.6))\n\n# ------------------------------------------- Training: Learn weights ----------------------------------------------- #\n\nrank_features = False\nscale_weights = False\n\nvisualize = True\n\nrun_proposed = True\nrun_random_baseline = False\n\nmatch_scores, predict_scores, random_scores = [], [], []\n\n# loop over all users\nfor i in range(11):\n\n print(\"=======================\")\n print(\"User:\", i)\n\n # ---------------------------------------- Training: Learn weights ---------------------------------------------- #\n\n # initialize canonical task\n C = CanonicalTask(canonical_features[i])\n C.set_end_state(canonical_demos[i])\n C.enumerate_states()\n C.set_terminal_idx()\n if rank_features:\n C.convert_to_rankings()\n\n # demonstrations\n canonical_user_demo = [list(canonical_demos[i])]\n canonical_trajectories = get_trajectories(C.states, canonical_user_demo, C.transition)\n\n # visualize_rel_actions(C, canonical_user_demo[0], i, \"canonical\")\n\n if run_proposed:\n print(\"Training ...\")\n\n # using abstract features\n abstract_features = np.array([C.get_features(state) for state in C.states])\n norm_abstract_features = abstract_features / np.linalg.norm(abstract_features, axis=0)\n canonical_rewards_abstract, canonical_weights_abstract = maxent_irl(C, norm_abstract_features,\n canonical_trajectories,\n optim, init)\n\n print(\"Weights have been learned for the canonical task! Hopefully.\")\n print(\"Weights -\", canonical_weights_abstract)\n\n # scale weights\n if scale_weights:\n canonical_weights_abstract /= max(canonical_weights_abstract)\n\n # --------------------------------------- Verifying: Reproduce demo --------------------------------------------- #\n\n # canonical_rewards_true = norm_abstract_features.dot(np.array([1., 0., 0., 0., 1., 0.]))\n # qf_true, _, _ = value_iteration(C.states, C.actions, C.transition, canonical_rewards_true, C.terminal_idx)\n # generated_sequence_true = rollout_trajectory(qf_true, C.states, canonical_user_demo, C.transition)\n #\n # qf_abstract, _, _ = value_iteration(C.states, C.actions, C.transition, canonical_rewards_abstract, C.terminal_idx)\n # predict_sequence_canonical, _ = predict_trajectory(qf_abstract, C.states, canonical_user_demo, C.transition)\n #\n # print(\"\\n\")\n # print(\"Canonical task:\")\n # print(\" demonstration -\", canonical_user_demo)\n # print(\" generated (true) -\", generated_sequence_true)\n # print(\"predict (abstract) -\", predict_sequence_canonical)\n\n # ----------------------------------------- Testing: Predict complex -------------------------------------------- #\n\n # initialize complex task\n X = ComplexTask(complex_features[i])\n X.set_end_state(complex_demos[i])\n X.enumerate_states()\n X.set_terminal_idx()\n if rank_features:\n X.convert_to_rankings()\n\n # demonstrations\n complex_user_demo = [list(complex_demos[i])]\n complex_trajectories = get_trajectories(X.states, complex_user_demo, X.transition)\n\n # using abstract features\n complex_abstract_features = np.array([X.get_features(state) for state in X.states])\n complex_abstract_features /= np.linalg.norm(complex_abstract_features, axis=0)\n\n if run_proposed:\n # transfer rewards to complex task\n transfer_rewards_abstract = complex_abstract_features.dot(canonical_weights_abstract)\n\n # score for predicting the action based on transferred rewards based on abstract features\n qf_transfer, _, _ = value_iteration(X.states, X.actions, X.transition, transfer_rewards_abstract, X.terminal_idx)\n predict_sequence, predict_score = predict_trajectory(qf_transfer, X.states, complex_user_demo, X.transition,\n sensitivity=0.0, consider_options=False)\n predict_scores.append(predict_score)\n\n if visualize:\n visualize_rel_actions(X, complex_user_demo[0], i, \"actual\", predict_sequence)\n\n # -------------------------------- Training: Learn weights from complex demo ------------------------------------ #\n\n # using true features\n # complex_state_features = np.array(X.states) / np.linalg.norm(X.states, axis=0)\n # complex_rewards_true, complex_weights_true = maxent_irl(X, complex_state_features, complex_trajectories,\n # optim, init, eps=1e-2)\n\n # using abstract features\n # complex_rewards_abstract, complex_weights_abstract = maxent_irl(X, complex_abstract_features,\n # complex_trajectories,\n # optim, init, eps=1e-2)\n\n # ----------------------------------------- Testing: Random baselines ------------------------------------------- #\n if run_random_baseline:\n print(\"Assuming random weights ...\")\n random_score = []\n for _ in range(100):\n # # score for selecting actions based on random weights\n # random_weights = np.random.rand(6) # np.random.shuffle(canonical_weights_abstract)\n # random_rewards_abstract = complex_abstract_features.dot(random_weights)\n # qf_random, _, _ = value_iteration(X.states, X.actions, X.transition, random_rewards_abstract, X.terminal_idx)\n # predict_sequence, r_score = predict_trajectory(qf_random, X.states, complex_user_demo, X.transition,\n # sensitivity=0.0, consider_options=False)\n\n # score for randomly selecting an action\n predict_sequence, r_score = random_trajectory(X.states, complex_user_demo, X.transition)\n\n random_score.append(r_score)\n\n random_score = np.mean(random_score, axis=0)\n random_scores.append(random_score)\n\n print(\"\\n\")\n print(\"Complex task:\")\n print(\" demonstration -\", complex_user_demo)\n print(\" predictions -\", predict_sequence)\n\n# -------------------------------------------------- Save results --------------------------------------------------- #\nif run_proposed:\n np.savetxt(\"results_final/predict11_normalized_features.csv\", predict_scores)\n\nif run_random_baseline:\n np.savetxt(\"results_final/random11_normalized_features_random_actions_new.csv\", random_scores)\n\n" ]
[ [ "numpy.savetxt", "pandas.read_csv", "numpy.mean", "numpy.linalg.norm" ] ]
cristicmf/tpu
[ "05f7b15cdf0ae36bac84beb4aef0a09983ce8f66" ]
[ "models/experimental/detection/export_saved_model.py" ]
[ "# Copyright 2019 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\n# pylint: disable=line-too-long\nr\"\"\"A binary to export the Cloud TPU detection model.\n\nTo export to the SavedModel, one needs to specify at least the export directory\nand a given model checkpoint.\n\"\"\"\n# pylint: enable=line-too-long\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport functools\nfrom absl import flags\nimport tensorflow as tf\n\nfrom config import retinanet_config\nfrom modeling import serving\nfrom hyperparameters import params_dict\nfrom tensorflow.contrib.tpu.python.tpu import tpu_config\n\nFLAGS = flags.FLAGS\n\n# pylint: disable=line-too-long\nflags.DEFINE_string('export_dir', None, 'The export directory.')\nflags.DEFINE_string('checkpoint_path', None, 'Checkpoint path.')\nflags.DEFINE_boolean('use_tpu', False, 'Whether or not use TPU.')\nflags.DEFINE_string('params_overrides', '', 'The model parameters to override.')\nflags.DEFINE_integer('batch_size', 1, 'The batch size.')\nflags.DEFINE_string('input_type', 'image_bytes', 'One of `image_tensor`, `image_bytes` and `tf_example`')\nflags.DEFINE_string('input_name', 'input', 'The name of the input node.')\nflags.DEFINE_string('input_image_size', '640,640', 'The comma-separated string of two integers, representing the (height, width) of the input to the model.')\nflags.DEFINE_boolean('output_image_info', True, 'Whether or not output image_info node.')\nflags.DEFINE_boolean('output_normalized_coordinates', False, 'Whether or not output boxes in normalized coordinates.')\nflags.DEFINE_boolean('cast_num_detections_to_float', False, 'Whether or not cast the number of detections to float type.')\n# pylint: enable=line-too-long\n\nflags.mark_flag_as_required('export_dir')\nflags.mark_flag_as_required('checkpoint_path')\n\n\ndef main(argv):\n del argv # Unused.\n\n params = params_dict.ParamsDict(\n retinanet_config.RETINANET_CFG, retinanet_config.RETINANET_RESTRICTIONS)\n params = params_dict.override_params_dict(\n params, FLAGS.params_overrides, is_strict=True)\n params.validate()\n params.lock()\n\n model_params = dict(\n params.as_dict(),\n use_tpu=FLAGS.use_tpu,\n mode=tf.estimator.ModeKeys.PREDICT,\n transpose_input=False)\n\n print(' - Setting up TPUEstimator...')\n estimator = tf.contrib.tpu.TPUEstimator(\n model_fn=serving.serving_model_fn_builder(\n FLAGS.use_tpu,\n FLAGS.output_image_info,\n FLAGS.output_normalized_coordinates,\n FLAGS.cast_num_detections_to_float),\n model_dir=None,\n config=tpu_config.RunConfig(\n tpu_config=tpu_config.TPUConfig(iterations_per_loop=1),\n master='local',\n evaluation_master='local'),\n params=model_params,\n use_tpu=FLAGS.use_tpu,\n train_batch_size=FLAGS.batch_size,\n predict_batch_size=FLAGS.batch_size,\n export_to_tpu=FLAGS.use_tpu,\n export_to_cpu=True)\n\n print(' - Exporting the model...')\n input_type = FLAGS.input_type\n image_size = [int(x) for x in FLAGS.input_image_size.split(',')]\n export_path = estimator.export_saved_model(\n export_dir_base=FLAGS.export_dir,\n serving_input_receiver_fn=functools.partial(\n serving.serving_input_fn,\n batch_size=FLAGS.batch_size,\n desired_image_size=image_size,\n stride=(2 ** params.anchor.max_level),\n input_type=input_type,\n input_name=FLAGS.input_name),\n checkpoint_path=FLAGS.checkpoint_path)\n\n print(' - Done! path: %s' % export_path)\n\n\nif __name__ == '__main__':\n tf.app.run(main)\n" ]
[ [ "tensorflow.contrib.tpu.python.tpu.tpu_config.TPUConfig", "tensorflow.app.run" ] ]
dHannasch/datashader
[ "207e13dc372e03967aaee71ffb21bf6fc9a59fc4" ]
[ "datashader/reductions.py" ]
[ "from __future__ import absolute_import, division, print_function\n\nimport numpy as np\nfrom datashape import dshape, isnumeric, Record, Option\nfrom datashape import coretypes as ct\nfrom toolz import concat, unique\nimport xarray as xr\n\nfrom datashader.glyphs.glyph import isnull\nfrom numba import cuda as nb_cuda\n\ntry:\n import cudf\nexcept Exception:\n cudf = None\n\nfrom .utils import Expr, ngjit, nansum_missing\n\n\nclass Preprocess(Expr):\n \"\"\"Base clase for preprocessing steps.\"\"\"\n def __init__(self, column):\n self.column = column\n\n @property\n def inputs(self):\n return (self.column,)\n\n\nclass extract(Preprocess):\n \"\"\"Extract a column from a dataframe as a numpy array of values.\"\"\"\n def apply(self, df):\n if cudf and isinstance(df, cudf.DataFrame):\n import cupy\n if df[self.column].dtype.kind == 'f':\n nullval = np.nan\n else:\n nullval = 0\n return cupy.array(df[self.column].to_gpu_array(fillna=nullval))\n elif isinstance(df, xr.Dataset):\n # DataArray could be backed by numpy or cupy array\n return df[self.column].data\n else:\n return df[self.column].values\n\n\nclass category_codes(Preprocess):\n \"\"\"Extract just the category codes from a categorical column.\"\"\"\n def apply(self, df):\n if cudf and isinstance(df, cudf.DataFrame):\n return df[self.column].cat.codes.to_gpu_array()\n else:\n return df[self.column].cat.codes.values\n\nclass category_values(Preprocess):\n \"\"\"Extract multiple columns from a dataframe as a numpy array of values.\"\"\"\n def __init__(self, columns):\n self.columns = list(columns)\n\n @property\n def inputs(self):\n return self.columns\n\n def apply(self, df):\n if cudf and isinstance(df, cudf.DataFrame):\n import cupy\n if df[self.columns[1]].dtype.kind == 'f':\n nullval = np.nan\n else:\n nullval = 0\n a = cupy.asarray(df[self.columns[0]].cat.codes.to_gpu_array())\n b = cupy.asarray(df[self.columns[1]].to_gpu_array(fillna=nullval))\n return cupy.stack((a, b), axis=-1)\n else:\n a = df[self.columns[0]].cat.codes.values\n b = df[self.columns[1]].values\n return np.stack((a, b), axis=-1)\n\nclass Reduction(Expr):\n \"\"\"Base class for per-bin reductions.\"\"\"\n def __init__(self, column=None):\n self.column = column\n\n def validate(self, in_dshape):\n if not self.column in in_dshape.dict:\n raise ValueError(\"specified column not found\")\n if not isnumeric(in_dshape.measure[self.column]):\n raise ValueError(\"input must be numeric\")\n\n def out_dshape(self, in_dshape):\n return self._dshape\n\n @property\n def inputs(self):\n return (extract(self.column),)\n\n def _build_bases(self, cuda=False):\n return (self,)\n\n def _build_temps(self, cuda=False):\n return ()\n\n def _build_create(self, dshape):\n return self._create\n\n def _build_append(self, dshape, schema, cuda=False):\n if cuda:\n if self.column is None:\n return self._append_no_field_cuda\n else:\n return self._append_cuda\n else:\n if self.column is None:\n return self._append_no_field\n else:\n return self._append\n\n def _build_combine(self, dshape):\n return self._combine\n\n def _build_finalize(self, dshape):\n return self._finalize\n\n\nclass OptionalFieldReduction(Reduction):\n \"\"\"Base class for things like ``count`` or ``any`` for which the field is optional\"\"\"\n def __init__(self, column=None):\n self.column = column\n\n @property\n def inputs(self):\n return (extract(self.column),) if self.column is not None else ()\n\n def validate(self, in_dshape):\n pass\n\n @staticmethod\n def _finalize(bases, cuda=False, **kwargs):\n return xr.DataArray(bases[0], **kwargs)\n\nclass by(Reduction):\n \"\"\"Apply the provided reduction separately per categorical ``column`` value.\n Parameters\n ----------\n column : str\n Name of the column to aggregate over. Column data type must be\n categorical. Resulting aggregate has an outer dimension axis along the\n categories present.\n reduction : Reduction\n Per-category reduction function.\n \"\"\"\n def __init__(self, cat_column, reduction):\n self.columns = (cat_column, getattr(reduction, 'column', None))\n self.reduction = reduction\n self.column = cat_column # for backwards compatibility with count_cat\n\n def __hash__(self):\n return hash((type(self), self._hashable_inputs(), self.reduction))\n\n def _build_temps(self, cuda=False):\n return tuple(by(self.cat_column, tmp) for tmp in self.reduction._build_temps(cuda))\n\n @property\n def cat_column(self):\n return self.columns[0]\n\n @property\n def val_column(self):\n return self.columns[1]\n\n def validate(self, in_dshape):\n if not self.cat_column in in_dshape.dict:\n raise ValueError(\"specified column not found\")\n if not isinstance(in_dshape.measure[self.cat_column], ct.Categorical):\n raise ValueError(\"input must be categorical\")\n\n self.reduction.validate(in_dshape)\n\n def out_dshape(self, input_dshape):\n cats = input_dshape.measure[self.cat_column].categories\n red_shape = self.reduction.out_dshape(input_dshape)\n return dshape(Record([(c, red_shape) for c in cats]))\n\n @property\n def inputs(self):\n if self.val_column is not None:\n return (category_values(self.columns),)\n else:\n return (category_codes(self.columns[0]),)\n\n def _build_create(self, out_dshape):\n n_cats = len(out_dshape.measure.fields)\n return lambda shape, array_module: self.reduction._build_create(\n out_dshape)(shape + (n_cats,), array_module)\n\n def _build_bases(self, cuda=False):\n bases = self.reduction._build_bases(cuda)\n if len(bases) == 1 and bases[0] is self:\n return bases\n return tuple(by(self.cat_column, base) for base in bases)\n\n def _build_append(self, dshape, schema, cuda=False):\n return self.reduction._build_append(dshape, schema, cuda)\n\n def _build_combine(self, dshape):\n return self.reduction._combine\n\n def _build_finalize(self, dshape):\n cats = list(dshape[self.cat_column].categories)\n\n def finalize(bases, cuda=False, **kwargs):\n kwargs['dims'] += [self.cat_column]\n kwargs['coords'][self.cat_column] = cats\n return self.reduction._finalize(bases, cuda=cuda, **kwargs)\n\n return finalize\n\nclass count(OptionalFieldReduction):\n \"\"\"Count elements in each bin, returning the result as a uint32.\n\n Parameters\n ----------\n column : str, optional\n If provided, only counts elements in ``column`` that are not ``NaN``.\n Otherwise, counts every element.\n \"\"\"\n _dshape = dshape(ct.uint32)\n\n # CPU append functions\n @staticmethod\n @ngjit\n def _append_no_field(x, y, agg):\n agg[y, x] += 1\n\n\n @staticmethod\n @ngjit\n def _append(x, y, agg, field):\n if not isnull(field):\n agg[y, x] += 1\n\n # GPU append functions\n @staticmethod\n @nb_cuda.jit(device=True)\n def _append_no_field_cuda(x, y, agg):\n nb_cuda.atomic.add(agg, (y, x), 1)\n\n @staticmethod\n @nb_cuda.jit(device=True)\n def _append_cuda(x, y, agg, field):\n if not isnull(field):\n nb_cuda.atomic.add(agg, (y, x), 1)\n\n @staticmethod\n def _create(shape, array_module):\n return array_module.zeros(shape, dtype='u4')\n\n @staticmethod\n def _combine(aggs):\n return aggs.sum(axis=0, dtype='u4')\n\n\nclass any(OptionalFieldReduction):\n \"\"\"Whether any elements in ``column`` map to each bin.\n\n Parameters\n ----------\n column : str, optional\n If provided, only elements in ``column`` that are ``NaN`` are skipped.\n \"\"\"\n _dshape = dshape(ct.bool_)\n\n @staticmethod\n @ngjit\n def _append_no_field(x, y, agg):\n agg[y, x] = True\n _append_no_field_cuda = _append_no_field\n\n @staticmethod\n @ngjit\n def _append(x, y, agg, field):\n if not isnull(field):\n agg[y, x] = True\n _append_cuda =_append\n\n @staticmethod\n def _create(shape, array_module):\n return array_module.zeros(shape, dtype='bool')\n\n @staticmethod\n def _combine(aggs):\n return aggs.sum(axis=0, dtype='bool')\n\n\nclass _upsample(Reduction):\n \"\"\"\"Special internal class used for upsampling\"\"\"\n _dshape = dshape(Option(ct.float64))\n\n @staticmethod\n def _finalize(bases, cuda=False, **kwargs):\n return xr.DataArray(bases[0], **kwargs)\n\n @property\n def inputs(self):\n return (extract(self.column),)\n\n @staticmethod\n def _create(shape, array_module):\n # Use uninitialized memory, the upsample function must explicitly set unused\n # values to nan\n return array_module.empty(shape, dtype='f8')\n\n @staticmethod\n @ngjit\n def _append(x, y, agg, field):\n # not called, the upsample function must set agg directly\n pass\n\n @staticmethod\n @ngjit\n def _append_cuda(x, y, agg, field):\n # not called, the upsample function must set agg directly\n pass\n\n @staticmethod\n def _combine(aggs):\n return np.nanmax(aggs, axis=0)\n\n\nclass FloatingReduction(Reduction):\n \"\"\"Base classes for reductions that always have floating-point dtype.\"\"\"\n _dshape = dshape(Option(ct.float64))\n\n @staticmethod\n def _create(shape, array_module):\n return array_module.full(shape, np.nan, dtype='f8')\n\n @staticmethod\n def _finalize(bases, cuda=False, **kwargs):\n return xr.DataArray(bases[0], **kwargs)\n\n\nclass _sum_zero(FloatingReduction):\n \"\"\"Sum of all elements in ``column``.\n\n Parameters\n ----------\n column : str\n Name of the column to aggregate over. Column data type must be numeric.\n ``NaN`` values in the column are skipped.\n \"\"\"\n\n @staticmethod\n def _create(shape, array_module):\n return array_module.zeros(shape, dtype='f8')\n\n @staticmethod\n @ngjit\n def _append(x, y, agg, field):\n if not isnull(field):\n agg[y, x] += field\n\n @staticmethod\n @ngjit\n def _append_cuda(x, y, agg, field):\n if not isnull(field):\n nb_cuda.atomic.add(agg, (y, x), field)\n\n @staticmethod\n def _combine(aggs):\n return aggs.sum(axis=0, dtype='f8')\n\nclass sum(FloatingReduction):\n \"\"\"Sum of all elements in ``column``.\n\n Elements of resulting aggregate are nan if they are not updated.\n\n Parameters\n ----------\n column : str\n Name of the column to aggregate over. Column data type must be numeric.\n ``NaN`` values in the column are skipped.\n \"\"\"\n _dshape = dshape(Option(ct.float64))\n\n # Cuda implementation\n def _build_bases(self, cuda=False):\n if cuda:\n return (_sum_zero(self.column), any(self.column))\n else:\n return (self,)\n\n @staticmethod\n def _finalize(bases, cuda=False, **kwargs):\n if cuda:\n sums, anys = bases\n x = np.where(anys, sums, np.nan)\n return xr.DataArray(x, **kwargs)\n else:\n return xr.DataArray(bases[0], **kwargs)\n\n # Single pass CPU implementation\n # These methods will only be called if _build_bases returned (self,)\n @staticmethod\n @ngjit\n def _append(x, y, agg, field):\n if not isnull(field):\n if isnull(agg[y, x]):\n agg[y, x] = field\n else:\n agg[y, x] += field\n\n @staticmethod\n def _combine(aggs):\n return nansum_missing(aggs, axis=0)\n\n\nclass m2(FloatingReduction):\n \"\"\"Sum of square differences from the mean of all elements in ``column``.\n\n Intermediate value for computing ``var`` and ``std``, not intended to be\n used on its own.\n\n Parameters\n ----------\n column : str\n Name of the column to aggregate over. Column data type must be numeric.\n ``NaN`` values in the column are skipped.\n \"\"\"\n\n @staticmethod\n def _create(shape, array_module):\n return array_module.full(shape, 0.0, dtype='f8')\n\n def _build_temps(self, cuda=False):\n return (_sum_zero(self.column), count(self.column))\n\n def _build_append(self, dshape, schema, cuda=False):\n if cuda:\n raise ValueError(\"\"\"\\\nThe 'std' and 'var' reduction operations are not yet supported on the GPU\"\"\")\n return super(m2, self)._build_append(dshape, schema, cuda)\n\n @staticmethod\n @ngjit\n def _append(x, y, m2, field, sum, count):\n # sum & count are the results of sum[y, x], count[y, x] before being\n # updated by field\n if not isnull(field):\n if count > 0:\n u1 = np.float64(sum) / count\n u = np.float64(sum + field) / (count + 1)\n m2[y, x] += (field - u1) * (field - u)\n\n @staticmethod\n def _combine(Ms, sums, ns):\n with np.errstate(divide='ignore', invalid='ignore'):\n mu = np.nansum(sums, axis=0) / ns.sum(axis=0)\n return np.nansum(Ms + ns*(sums/ns - mu)**2, axis=0)\n\n\nclass min(FloatingReduction):\n \"\"\"Minimum value of all elements in ``column``.\n\n Parameters\n ----------\n column : str\n Name of the column to aggregate over. Column data type must be numeric.\n ``NaN`` values in the column are skipped.\n \"\"\"\n @staticmethod\n @ngjit\n def _append(x, y, agg, field):\n if isnull(agg[y, x]):\n agg[y, x] = field\n elif agg[y, x] > field:\n agg[y, x] = field\n\n @staticmethod\n @ngjit\n def _append_cuda(x, y, agg, field):\n nb_cuda.atomic.min(agg, (y, x), field)\n\n @staticmethod\n def _combine(aggs):\n return np.nanmin(aggs, axis=0)\n\n\nclass max(FloatingReduction):\n \"\"\"Maximum value of all elements in ``column``.\n\n Parameters\n ----------\n column : str\n Name of the column to aggregate over. Column data type must be numeric.\n ``NaN`` values in the column are skipped.\n \"\"\"\n @staticmethod\n @ngjit\n def _append(x, y, agg, field):\n if isnull(agg[y, x]):\n agg[y, x] = field\n elif agg[y, x] < field:\n agg[y, x] = field\n\n @staticmethod\n @ngjit\n def _append_cuda(x, y, agg, field):\n nb_cuda.atomic.max(agg, (y, x), field)\n\n @staticmethod\n def _combine(aggs):\n return np.nanmax(aggs, axis=0)\n\n\nclass count_cat(by):\n \"\"\"Count of all elements in ``column``, grouped by category.\n Alias for `by(...,count())`, for backwards compatibility.\n\n Parameters\n ----------\n column : str\n Name of the column to aggregate over. Column data type must be\n categorical. Resulting aggregate has a outer dimension axis along the\n categories present.\n \"\"\"\n def __init__(self, column):\n super(count_cat, self).__init__(column, count())\n\n\nclass mean(Reduction):\n \"\"\"Mean of all elements in ``column``.\n\n Parameters\n ----------\n column : str\n Name of the column to aggregate over. Column data type must be numeric.\n ``NaN`` values in the column are skipped.\n \"\"\"\n _dshape = dshape(Option(ct.float64))\n\n def _build_bases(self, cuda=False):\n return (_sum_zero(self.column), count(self.column))\n\n @staticmethod\n def _finalize(bases, cuda=False, **kwargs):\n sums, counts = bases\n with np.errstate(divide='ignore', invalid='ignore'):\n x = np.where(counts > 0, sums/counts, np.nan)\n return xr.DataArray(x, **kwargs)\n\n\nclass var(Reduction):\n \"\"\"Variance of all elements in ``column``.\n\n Parameters\n ----------\n column : str\n Name of the column to aggregate over. Column data type must be numeric.\n ``NaN`` values in the column are skipped.\n \"\"\"\n _dshape = dshape(Option(ct.float64))\n\n def _build_bases(self, cuda=False):\n return (_sum_zero(self.column), count(self.column), m2(self.column))\n\n @staticmethod\n def _finalize(bases, cuda=False, **kwargs):\n sums, counts, m2s = bases\n with np.errstate(divide='ignore', invalid='ignore'):\n x = np.where(counts > 0, m2s / counts, np.nan)\n return xr.DataArray(x, **kwargs)\n\n\nclass std(Reduction):\n \"\"\"Standard Deviation of all elements in ``column``.\n\n Parameters\n ----------\n column : str\n Name of the column to aggregate over. Column data type must be numeric.\n ``NaN`` values in the column are skipped.\n \"\"\"\n _dshape = dshape(Option(ct.float64))\n\n def _build_bases(self, cuda=False):\n return (_sum_zero(self.column), count(self.column), m2(self.column))\n\n @staticmethod\n def _finalize(bases, cuda=False, **kwargs):\n sums, counts, m2s = bases\n with np.errstate(divide='ignore', invalid='ignore'):\n x = np.where(counts > 0, np.sqrt(m2s / counts), np.nan)\n return xr.DataArray(x, **kwargs)\n\n\nclass first(Reduction):\n \"\"\"First value encountered in ``column``.\n\n Useful for categorical data where an actual value must always be returned,\n not an average or other numerical calculation.\n\n Currently only supported for rasters, externally to this class.\n\n Parameters\n ----------\n column : str\n Name of the column to aggregate over. If the data type is floating point,\n ``NaN`` values in the column are skipped.\n \"\"\"\n _dshape = dshape(Option(ct.float64))\n\n @staticmethod\n def _append(x, y, agg):\n raise NotImplementedError(\"first is currently implemented only for rasters\")\n\n @staticmethod\n def _create(shape, array_module):\n raise NotImplementedError(\"first is currently implemented only for rasters\")\n\n @staticmethod\n def _combine(aggs):\n raise NotImplementedError(\"first is currently implemented only for rasters\")\n\n @staticmethod\n def _finalize(bases, **kwargs):\n raise NotImplementedError(\"first is currently implemented only for rasters\")\n\n\n\nclass last(Reduction):\n \"\"\"Last value encountered in ``column``.\n\n Useful for categorical data where an actual value must always be returned,\n not an average or other numerical calculation.\n\n Currently only supported for rasters, externally to this class.\n\n Parameters\n ----------\n column : str\n Name of the column to aggregate over. If the data type is floating point,\n ``NaN`` values in the column are skipped.\n \"\"\"\n _dshape = dshape(Option(ct.float64))\n\n @staticmethod\n def _append(x, y, agg):\n raise NotImplementedError(\"last is currently implemented only for rasters\")\n\n @staticmethod\n def _create(shape, array_module):\n raise NotImplementedError(\"last is currently implemented only for rasters\")\n\n @staticmethod\n def _combine(aggs):\n raise NotImplementedError(\"last is currently implemented only for rasters\")\n\n @staticmethod\n def _finalize(bases, **kwargs):\n raise NotImplementedError(\"last is currently implemented only for rasters\")\n\n\n\nclass mode(Reduction):\n \"\"\"Mode (most common value) of all the values encountered in ``column``.\n\n Useful for categorical data where an actual value must always be returned,\n not an average or other numerical calculation.\n\n Currently only supported for rasters, externally to this class.\n Implementing it for other glyph types would be difficult due to potentially\n unbounded data storage requirements to store indefinite point or line\n data per pixel.\n\n Parameters\n ----------\n column : str\n Name of the column to aggregate over. If the data type is floating point,\n ``NaN`` values in the column are skipped.\n \"\"\"\n _dshape = dshape(Option(ct.float64))\n\n @staticmethod\n def _append(x, y, agg):\n raise NotImplementedError(\"mode is currently implemented only for rasters\")\n\n @staticmethod\n def _create(shape, array_module):\n raise NotImplementedError(\"mode is currently implemented only for rasters\")\n\n @staticmethod\n def _combine(aggs):\n raise NotImplementedError(\"mode is currently implemented only for rasters\")\n\n @staticmethod\n def _finalize(bases, **kwargs):\n raise NotImplementedError(\"mode is currently implemented only for rasters\")\n\n\n\nclass summary(Expr):\n \"\"\"A collection of named reductions.\n\n Computes all aggregates simultaneously, output is stored as a\n ``xarray.Dataset``.\n\n Examples\n --------\n A reduction for computing the mean of column \"a\", and the sum of column \"b\"\n for each bin, all in a single pass.\n\n >>> import datashader as ds\n >>> red = ds.summary(mean_a=ds.mean('a'), sum_b=ds.sum('b'))\n \"\"\"\n def __init__(self, **kwargs):\n ks, vs = zip(*sorted(kwargs.items()))\n self.keys = ks\n self.values = vs\n\n def __hash__(self):\n return hash((type(self), tuple(self.keys), tuple(self.values)))\n\n def validate(self, input_dshape):\n for v in self.values:\n v.validate(input_dshape)\n\n def out_dshape(self, in_dshape):\n return dshape(Record([(k, v.out_dshape(in_dshape)) for (k, v)\n in zip(self.keys, self.values)]))\n\n @property\n def inputs(self):\n return tuple(unique(concat(v.inputs for v in self.values)))\n\n\n\n__all__ = list(set([_k for _k,_v in locals().items()\n if isinstance(_v,type) and (issubclass(_v,Reduction) or _v is summary)\n and _v not in [Reduction, OptionalFieldReduction,\n FloatingReduction, m2]]))\n" ]
[ [ "numpy.nanmax", "numpy.sqrt", "numpy.nanmin", "numpy.stack", "numpy.nansum", "numpy.float64", "numpy.errstate", "numpy.where" ] ]
bcrafton/speed_read
[ "3e9c0c873e49e4948a216aae14ec0d4654d1a62c" ]
[ "src/resnet.py" ]
[ "\nimport numpy as np\nimport tensorflow as tf\n\nfrom layers import *\nfrom conv import *\nfrom block import *\nfrom model import *\n\n################\n\ndef quantize_np(x):\n scale = 127 / np.max(np.absolute(x))\n x = x * scale\n x = np.round(x)\n x = np.clip(x, -127, 127)\n return x, scale\n\ndef load_inputs(num_example):\n dataset = np.load('../imagenet.npy', allow_pickle=True).item()\n xs, ys = dataset['x'], dataset['y']\n assert (np.shape(xs) == (10, 224, 224, 3))\n\n # TODO: make sure we are using the right input images and weights\n # xs = xs / 255. \n # xs = xs - np.array([0.485, 0.456, 0.406])\n # xs = xs / np.array([0.229, 0.224, 0.225])\n # xs, scale = quantize_np(xs)\n \n xs = xs[0:num_example]\n ys = ys[0:num_example]\n return xs, ys\n\n################\n\ndef create_model(array_params):\n weights = np.load('../resnet18_quant_weights.npy', allow_pickle=True).item()\n\n layers=[\n Conv(input_size=(224, 224, 3), filter_size=(7,7,3,64), pool=1, stride=2, pad1=3, pad2=3, params=array_params, weights=weights),\n \n MaxPool(input_size=(112, 112, 64), kernel_size=3, stride=2, params=array_params, weights=weights),\n \n Block1(input_size=(56, 56, 64), filter_size=(64, 64), stride=1, params=array_params, weights=weights),\n Block1(input_size=(56, 56, 64), filter_size=(64, 64), stride=1, params=array_params, weights=weights),\n \n Block2(input_size=(56, 56, 64), filter_size=(64, 128), stride=2, params=array_params, weights=weights),\n Block1(input_size=(28, 28, 128), filter_size=(128, 128), stride=1, params=array_params, weights=weights),\n \n Block2(input_size=(28, 28, 128), filter_size=(128, 256), stride=2, params=array_params, weights=weights),\n Block1(input_size=(14, 14, 256), filter_size=(256, 256), stride=1, params=array_params, weights=weights),\n \n Block2(input_size=(14, 14, 256), filter_size=(256, 512), stride=2, params=array_params, weights=weights),\n Block1(input_size=( 7, 7, 512), filter_size=(512, 512), stride=1, params=array_params, weights=weights),\n ]\n\n model = Model(layers=layers, array_params=array_params)\n return model\n\n################\n\ndef load_resnet(num_example, array_params):\n model = create_model(array_params)\n x, y = load_inputs(num_example)\n return model, x, y\n\n################\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n" ]
[ [ "numpy.absolute", "numpy.clip", "numpy.round", "numpy.shape", "numpy.load" ] ]
lazyKindMan/BioBERT-MCNN
[ "d971e25fd1fe7b3ecbf46e1225b07eb15a759d78" ]
[ "ner_util/create_ner_data_bake.py" ]
[ "import codecs\nimport collections\nimport json\nimport os\nimport pickle\nfrom functools import partial\n\nimport tensorflow as tf\n\nfrom bert_base import tokenization\n\nfrom ner_util.logutil import set_logger\nfrom prepro_utils import preprocess_text, encode_ids\nimport sentencepiece as spm\n\nlogger = set_logger('NER Training')\n\nSEG_ID_A = 0\nSEG_ID_B = 1\nSEG_ID_CLS = 2\nSEG_ID_SEP = 3\nSEG_ID_PAD = 4\n\nspecial_symbols = {\n \"[unk]\": 0,\n \"[s]\": 1,\n \"[/s]\": 2,\n \"[cls]\": 3,\n \"[sep]\": 4,\n \"[pad]\": 5,\n \"[mask]\": 6,\n \"[eod]\": 7,\n \"[eop]\": 8,\n}\n\nCLS_ID = special_symbols[\"[cls]\"]\nSEP_ID = special_symbols[\"[sep]\"]\nMASK_ID = special_symbols[\"[mask]\"]\n\n\nclass InputExample(object):\n \"\"\"A single training/test example for simple sequence classification.\"\"\"\n\n def __init__(self, guid=None, text=None, label=None, is_start_token=None, pieces=None):\n \"\"\"Constructs a InputExample.\n Args:\n guid: Unique id for the example.\n text_a: string. The untokenized text of the first sequence. For single\n sequence tasks, only this sequence must be specified.\n label: (Optional) string. The label of the example. This should be\n specified for train and dev examples, but not for test examples.\n \"\"\"\n self.guid = guid\n self.text = text\n self.label = label\n self.pieces = pieces\n self.is_start_token = is_start_token\n\n\nclass InputFeatures(object):\n \"\"\"A single set of features of data for ner task.\"\"\"\n \"\"\"Constructor for InputFeatures.\n\n Args:\n input_ids: int32 Tensor of shape [seq_length]. Already been converted into WordPiece token ids\n input_mask: (optional) int32 Tensor of shape [batch_size, seq_length].\n segment_ids: (optional) int32 Tensor of shape [batch_size, seq_length].\n label_id: int32 Tensor of shape [batch_size, seq_length]. for recording the tag corresponding to token \n \"\"\"\n\n def __init__(self, input_ids, input_mask, segment_ids, label_ids, is_start_label, token_weight, piece_list=None):\n self.input_ids = input_ids\n self.input_mask = input_mask\n self.segment_ids = segment_ids\n self.label_ids = label_ids\n self.is_start_label = is_start_label\n self.piece_list = piece_list\n self.token_weight = token_weight\n\n\ndef write_tokens(tokens, output_dir, mode):\n \"\"\"\n 将序列解析结果写入到文件中\n 只在mode=test的时候启用\n :param tokens:\n :param mode:\n :return:\n \"\"\"\n if mode == \"test\":\n path = os.path.join(output_dir, \"token_\" + mode + \".txt\")\n wf = codecs.open(path, 'a', encoding='utf-8')\n for token in tokens:\n if token != \"**NULL**\":\n wf.write(token + '\\n')\n wf.close()\n\n\ndef convert_sentences_to_features(sentences, data_dir, tokenizer, max_seq_length, label_map=None, lower=True):\n \"\"\"\n convert sentences to InputExample class\n :param sentences: all sequence input to the model\n :param data_dir: directory path for saving data\n :param sp sentencepiece model\n :param max_seq_length the max sequence for bert model\n :param lower: if ignore the case\n :param label_map: label_map2id if empty it will read label2id.pkl file to load which is in the data_dir\n :return: for list ([tokens] [labels] [is_start_token] [pieces_list])\n \"\"\"\n # compatible for windows\n if label_map is None:\n label_map = {}\n sep = os.sep if os.sep in data_dir else \"/\"\n # three are remained for the mark [CLS] and two [SEP] in the end\n max_seq_length = max_seq_length - 3\n # read label mapping to id\n label_map_path = sep.join([data_dir, \"label2id.pkl\"])\n if not label_map and os.path.exists(label_map_path):\n with open(label_map_path, \"r\") as f:\n for line in f.readlines():\n if line:\n lines = line.strip().split(\" \")\n if len(lines) != 2:\n continue\n label_map[lines[0]] = int(lines[1].replace(\"\\n\", \"\"))\n elif not os.path.exists(label_map_path):\n print(\"no such label2id.pkl in {} or the param label_map is empty\".format(data_dir))\n return None\n tokens = []\n label = []\n is_start_token = []\n pieces_list = []\n # weight according length\n token_weight = []\n for sentence in sentences:\n if not sentence:\n continue\n words, labels = zip(*sentence)\n t, l, ist, pieces, t_weight = process_seq(words, labels, tokenizer, lower)\n\n if len(t) > max_seq_length:\n yield tokens, label, is_start_token, pieces_list,token_weight, label_map\n tokens = []\n label = []\n is_start_token = []\n pieces_list = []\n # this operation will combine multiple sentences into one sequence\n t = [t[i:i + max_seq_length] for i in range(0, len(t), max_seq_length)]\n l = [l[i:i + max_seq_length] for i in range(0, len(l), max_seq_length)]\n ist = [ist[i:i + max_seq_length] for i in range(0, len(ist), max_seq_length)]\n pieces = [pieces[i:i + max_seq_length] for i in range(0, len(pieces), max_seq_length)]\n token_weight = [t_weight[i:i + max_seq_length] for i in range(0, len(t_weight), max_seq_length)]\n z = zip(t, l, ist, pieces)\n for i in z:\n yield i\n continue\n if len(t) + len(tokens) > max_seq_length:\n yield tokens, label, is_start_token, pieces_list, token_weight, label_map\n tokens = t\n label = l\n is_start_token = ist\n pieces_list = pieces\n token_weight = t_weight\n else:\n tokens.extend(t)\n label.extend(l)\n is_start_token.extend(ist)\n pieces_list.extend(pieces)\n token_weight.extend(t_weight)\n if tokens:\n yield tokens, label, is_start_token, pieces_list, token_weight, label_map\n\n\ndef process_seq(words, labels, tokenizer, lower=True):\n assert len(words) == len(labels)\n tokens = []\n label = []\n is_start_token = []\n pieces_list = []\n token_weight = []\n for i in range(len(words)):\n word = tokenization.convert_to_unicode(words[i])\n pieces = tokenizer.tokenize(word)\n t = tokenizer.convert_tokens_to_ids(pieces)\n tokens.extend(t)\n pieces_list.extend(pieces)\n label.extend([int(labels[i])] * len(t))\n is_start_token.append(1)\n for _ in range(len(t) - 1):\n is_start_token.append(0)\n all_word_length = float(len(words[i]))\n for piece in pieces:\n token_weight.append(float(len(piece.replace(\"##\", \"\"))) / all_word_length)\n return tokens, label, is_start_token, pieces_list, token_weight\n\n\ndef single_example(ex_index, tokens, labels, is_start_token, pieces_list, token_weight, label_map, max_seq_length, mode, data_dir):\n tokens_length = len(tokens)\n input_ids = []\n input_mask = []\n segment_ids = []\n label_ids = []\n is_start_label = []\n pieces = []\n t_weight = []\n # add [CLS] to the start position\n input_ids.append(CLS_ID)\n label_ids.append(label_map[\"O\"])\n is_start_label.append(1)\n pieces.append(\"[CLS]\")\n t_weight.append(1.0)\n\n input_ids.extend(tokens)\n input_mask.extend([1] * (tokens_length + 3))\n segment_ids.extend([SEG_ID_A] * (tokens_length + 3))\n label_ids.extend(labels)\n is_start_label.extend(is_start_token)\n t_weight.extend(token_weight)\n # for show res\n pieces.extend(pieces_list)\n\n # add [SEP] to the end position\n input_ids.extend([SEP_ID, SEP_ID])\n label_ids.extend([label_map[\"O\"]] * 2)\n is_start_label.extend([1, 1])\n pieces.extend([\"[SEP]\", \"[SEP]\"])\n t_weight.extend([1.0, 1.0])\n\n for _ in range(max_seq_length - tokens_length - 3):\n input_ids.append(0)\n input_mask.append(0)\n segment_ids.append(SEG_ID_PAD)\n label_ids.append(label_map[\"O\"])\n is_start_label.append(0)\n pieces.append(\"*\")\n t_weight.append(1.0)\n\n assert len(input_ids) == max_seq_length\n assert len(input_mask) == max_seq_length\n assert len(segment_ids) == max_seq_length\n assert len(label_ids) == max_seq_length\n assert len(is_start_label) == max_seq_length\n assert len(t_weight) == max_seq_length\n # print some example\n if ex_index < 1:\n logger.info(\"*** Example {}***\".format(mode))\n logger.info(\"pieces: %s\" % \" \".join(piece for piece in pieces))\n logger.info(\"input_ids: %s\" % \" \".join([str(x) for x in input_ids]))\n logger.info(\"input_mask: %s\" % \" \".join([str(x) for x in input_mask]))\n logger.info(\"segment_ids: %s\" % \" \".join([str(x) for x in segment_ids]))\n logger.info(\"label_ids: %s\" % \" \".join([str(x) for x in label_ids]))\n logger.info(\"is_start_label %s\" % \" \".join([str(x) for x in is_start_label]))\n logger.info(\"token_weight %s\" % \" \".join([str(x) for x in t_weight]))\n example = {\n \"input_ids\": input_ids,\n \"input_mask\": input_mask,\n \"segment_ids\": segment_ids,\n \"label_ids\": label_ids,\n \"is_start_label\": is_start_label,\n \"token_weight\": t_weight,\n \"pieces\": pieces\n }\n if not os.path.exists(\"/\".join([data_dir, \"{}\".format(\"tfrecord_data\")])):\n os.makedirs(\"/\".join([data_dir, \"{}\".format(\"tfrecord_data\")]))\n with open(\"/\".join([data_dir, \"{}/{}_example.json\".format(\"tfrecord_data\", mode)]), \"w\") as f:\n f.write(json.dumps(example, indent=4))\n # save token original sample in test data\n if mode == \"test\":\n return InputFeatures(input_ids, input_mask, segment_ids, label_ids, is_start_label, t_weight, pieces)\n return InputFeatures(input_ids, input_mask, segment_ids, label_ids, is_start_label, t_weight)\n\n\ndef file_based_convert_examples_to_features(examples, output_file, task):\n tf.logging.info(\"Start writing tfrecord %s.\", output_file)\n writer = tf.python_io.TFRecordWriter(output_file)\n tf.logging.info(\"total %d examples\", len(examples))\n for ex_index, example in enumerate(examples):\n if ex_index % 100 == 0:\n tf.logging.info(\"Writing example %d of %d\" % (ex_index, len(examples)))\n\n def create_int_feature(values):\n f = tf.train.Feature(int64_list=tf.train.Int64List(value=list(values)))\n return f\n\n def create_float_feature(values):\n f = tf.train.Feature(float_list=tf.train.FloatList(value=list(values)))\n return f\n\n def create_string_feature(values):\n byte_values = [bytes(s, encoding='utf-8') for s in values]\n f = tf.train.Feature(bytes_list=tf.train.BytesList(value=list(byte_values)))\n return f\n\n features = collections.OrderedDict()\n features[\"input_ids\"] = create_int_feature(example.input_ids)\n features[\"input_mask\"] = create_int_feature(example.input_mask)\n features[\"segment_ids\"] = create_int_feature(example.segment_ids)\n features[\"label_ids\"] = create_int_feature(example.label_ids)\n features[\"is_start_label\"] = create_int_feature(example.is_start_label)\n features[\"token_weight\"] = create_float_feature(example.token_weight)\n if task == \"test\":\n features[\"piece_list\"] = create_string_feature(example.piece_list)\n tf_example = tf.train.Example(features=tf.train.Features(feature=features))\n writer.write(tf_example.SerializeToString())\n tf.logging.info(\"write finish!\")\n writer.close()\n\n\nclass DataProcessor(object):\n \"\"\"Base class for data converters for sequence classification data sets.\"\"\"\n\n @classmethod\n def _read_data(cls, input_file, split_char=\"\\t\", label_map=None, base_dir=None):\n \"\"\"Reads a BIO data. convert to [original token1,label_id1 ...] \"\"\"\n if not label_map:\n label_map = {}\n with open(input_file, 'r') as f:\n lines = f.readlines()\n sentences = []\n for line in lines:\n line = line.strip()\n if line:\n line = line.split(split_char)\n if line[1] in label_map.keys():\n line[1] = label_map[line[1]]\n else:\n if len(label_map.keys()) == 0:\n label_map[line[1]] = 0\n line[1] = 0\n else:\n label_map[line[1]] = max(label_map.values()) + 1\n line[1] = label_map[line[1]]\n if sentences:\n sentences[-1].append([line[0], line[1]])\n else:\n sentences.append([[line[0], line[1]]])\n else:\n sentences.append([])\n # create label set Vocabulary\n if base_dir is None:\n base_dir = \"\"\n path_sep = os.sep if os.sep in input_file else \"/\"\n print(input_file.split(path_sep))\n for p in input_file.split(path_sep)[:-1]:\n base_dir = os.path.join(base_dir, p)\n # 1表示从1开始对label进行index化\n # 保存label->index 的map\n if not os.path.exists(os.path.join(base_dir, 'label2id.pkl')):\n label_map[\"X\"] = max(label_map.values()) + 1\n with open(os.path.join(base_dir, 'label2id.pkl'), 'w') as w:\n for key, val in label_map.items():\n w.write(\"{} {}\\n\".format(key, val))\n return sentences\n\n\nclass NerProcessor(DataProcessor):\n def __init__(self, vocab, data_dir, max_seq_length, processor_data_name=\"\", lower=True):\n self.labels = set()\n self.vocab = vocab\n self.lower = lower\n self.data_dir = data_dir\n self.max_seq_length = max_seq_length\n self.processor_data_name = processor_data_name\n self.sep = os.sep if os.sep in self.data_dir else \"/\"\n self.label_map = {}\n\n\n self.get_train_examples()\n if os.path.exists(self.sep.join([data_dir, 'label2id.pkl'])):\n with open(os.path.join(data_dir, 'label2id.pkl'), 'r') as f:\n for line in f.readlines():\n if line:\n lines = line.strip().split(\" \")\n if len(lines) != 2:\n continue\n self.label_map[lines[0]] = int(lines[1].replace(\"\\n\", \"\"))\n self.get_dev_examples()\n self.get_test_examples()\n self.classes = len(self.label_map) - 1\n\n def get_train_examples(self):\n if not tf.io.gfile.exists(self.get_record_path(\"train\")):\n self._create_tfrecord(\n self._read_data(self.sep.join([self.data_dir, \"train.tsv\"]), label_map=self.label_map), \"train\")\n\n def get_dev_examples(self):\n if not tf.io.gfile.exists(self.get_record_path(\"devel\")):\n return self._create_tfrecord(\n self._read_data(self.sep.join([self.data_dir, \"devel.tsv\"]), label_map=self.label_map), \"eval\")\n\n def get_test_examples(self):\n if not tf.io.gfile.exists(self.get_record_path(\"test\")):\n return self._create_tfrecord(\n self._read_data(self.sep.join([self.data_dir, \"test.tsv\"]), label_map=self.label_map), \"test\")\n\n def get_record_path(self, mode):\n return self.sep.join([self.data_dir, \"tfrecord_data\",\n \"{}_{}_{}.tfrecord\".format(self.processor_data_name, mode, self.max_seq_length)])\n\n def get_path(self, mode):\n return self.sep.join([self.data_dir, \"tfrecord_data\", \"{}_{}_{}.tfrecord\".format(self.processor_data_name, mode,self.max_seq_length)])\n\n def get_train_data(self):\n return self.get_path(\"train\")\n\n def get_dev_data(self):\n return self.get_path(\"devel\")\n\n def get_test_data(self):\n return self.get_path(\"test\")\n\n\n def _create_tfrecord(self, sentences, task):\n tf.logging.set_verbosity(tf.logging.INFO)\n examples = []\n i = 0\n tokenizer = tokenization.FullTokenizer(\n vocab_file=self.vocab, do_lower_case=self.lower)\n for data in convert_sentences_to_features(sentences, self.data_dir, tokenizer, self.max_seq_length, self.label_map):\n examples.append(single_example(i, *data, max_seq_length=self.max_seq_length, mode=task, data_dir = self.data_dir))\n i += 1\n sep = os.sep if os.sep in self.data_dir else \"/\"\n if not os.path.exists(sep.join([self.data_dir, \"tfrecord_data\"])):\n os.mkdir(sep.join([self.data_dir, \"tfrecord_data\"]))\n output_file = sep.join(\n [self.data_dir, \"tfrecord_data\", \"{}_{}_{}.tfrecord\".format(self.processor_data_name, task, self.max_seq_length)])\n file_based_convert_examples_to_features(examples, output_file, task)\n return len(examples)\n\n def get_train_step(self):\n c = 0\n for record in tf.python_io.tf_record_iterator(self.get_path(\"train\")):\n c += 1\n print(\"record_num {}\".format(str(c)))\n return ((c * 7) // 300 + 1) * 300 \n" ]
[ [ "tensorflow.logging.info", "tensorflow.logging.set_verbosity", "tensorflow.train.Features", "tensorflow.python_io.TFRecordWriter" ] ]
coder-cell/Color-Detection
[ "4aa8e5ade20d1ef5143edd2ffef599ef4198278c" ]
[ "main.py" ]
[ "import cv2\nimport pandas as pd\n\n# #Creating argument parser to take image path from command line\n# ap = argparse.ArgumentParser()\n# ap.add_argument('-i', '--image', required=True, help=\"Image Path\")\n# args = vars(ap.parse_args())\n# img_path = args['image']\n\n# Reading the image with opencv\nimg = cv2.imread(\"colorpic.jpg\")\n\n# Declaring global variables (are used later on)\nclicked = False\nr = g = b = xpos = ypos = 0\n\n# Reading csv file with pandas and giving names to each column\nindex = [\"color\", \"color_name\", \"hex\", \"R\", \"G\", \"B\"]\ncsv = pd.read_csv('colors.csv', names=index, header=None)\n\n\n# Function to calculate minimum distance from all colors and get the most matching color\ndef getColorName(R, G, B):\n minimum = 10000\n cname = (0, 0, 0)\n for i in range(len(csv)):\n d = abs(R - int(csv.loc[i, \"R\"])) + abs(G - int(csv.loc[i, \"G\"])) + abs(B - int(csv.loc[i, \"B\"]))\n if d <= minimum:\n minimum = d\n cname = csv.loc[i, \"color_name\"]\n return cname\n\n\n# function to get x,y coordinates of mouse double click\ndef draw_function(event, x, y, flags, param):\n if event == cv2.EVENT_LBUTTONDBLCLK:\n global b, g, r, xpos, ypos, clicked\n clicked = True\n xpos = x\n ypos = y\n b, g, r = img[y, x]\n b = int(b)\n g = int(g)\n r = int(r)\n\n\ncv2.namedWindow('image')\ncv2.setMouseCallback('image', draw_function)\n\nwhile (1):\n\n cv2.imshow(\"image\", img)\n if clicked:\n\n # cv2.rectangle(image, startpoint, endpoint, color, thickness)-1 fills entire rectangle\n cv2.rectangle(img, (20, 20), (750, 60), (b, g, r), -1)\n\n # Creating text string to display( Color name and RGB values )\n text = getColorName(r, g, b) + ' R=' + str(r) + ' G=' + str(g) + ' B=' + str(b)\n\n # cv2.putText(img,text,start,font(0-7),fontScale,color,thickness,lineType )\n cv2.putText(img, text, (50, 50), 2, 0.8, (255, 255, 255), 2, cv2.LINE_AA)\n\n # For very light colours we will display text in black colour\n if r + g + b >= 600:\n cv2.putText(img, text, (50, 50), 2, 0.8, (0, 0, 0), 2, cv2.LINE_AA)\n\n clicked = False\n\n # Break the loop when user hits 'esc' key\n if cv2.waitKey(20) & 0xFF == 27:\n break\n\ncv2.destroyAllWindows()\n" ]
[ [ "pandas.read_csv" ] ]
akhilpm/gradCAM
[ "1a3b62cdfa81bb6584a0aee7fdda88608663a12b" ]
[ "script/hyp_test.py" ]
[ "import os\nimport sys\nimport time\nimport cv2 as cv\nimport pickle\nimport torch\nimport numpy as np\nimport torch.nn.functional as F\nimport dataset.dataset_factory as dataset_factory\nfrom colorama import Back, Fore\nfrom config import cfg, update_config_from_file\nfrom torch.utils.data import DataLoader\nfrom dataset.collate import collate_test\nfrom torchvision.ops import nms\n#from torchvision.ops import box_convert\nimport matplotlib.pyplot as plt\nfrom matplotlib import colors\nimport matplotlib.cm as cm\n#from utils.bbox_transform import bbox_overlaps_batch\n\nwatch_list = ['000012', '000017', '000019', '000021', '000026', '000036', '000089', '000102', '000121', '000130', '000198']\n\ndef find_iou(bb, BBGT):\n # intersection\n ixmin = np.maximum(BBGT[:, 0], bb[0])\n iymin = np.maximum(BBGT[:, 1], bb[1])\n ixmax = np.minimum(BBGT[:, 2], bb[2])\n iymax = np.minimum(BBGT[:, 3], bb[3])\n iw = np.maximum(ixmax - ixmin + 1., 0.)\n ih = np.maximum(iymax - iymin + 1., 0.)\n inters = iw * ih\n # union\n area_bb = (bb[2] - bb[0] + 1.) * (bb[3] - bb[1] + 1.)\n area_BBGT = (BBGT[:, 2] - BBGT[:, 0] + 1.) * (BBGT[:, 3] - BBGT[:, 1] + 1.)\n uni = ( area_bb + area_BBGT - inters)\n overlaps = inters / uni\n ratio = area_bb/area_BBGT\n return np.max(ratio), np.max(overlaps)\n\n\ndef plot_detecton_boxes(image, cls_scores, classes, dets, im_labels, real_gt_boxes=None, sampled_boxes=None):\n obj_classes = []\n for label in im_labels:\n obj_classes.append(classes[label][:3])\n text_width, text_height = cv.getTextSize(' '.join(obj_classes), cv.FONT_HERSHEY_PLAIN, fontScale=1, thickness=1)[0]\n box_coords = ((1, 1 + 10), (1 + text_width + 2, 1 + 10 - text_height - 2))\n cv.rectangle(image, box_coords[0], box_coords[1], (255, 255, 255), cv.FILLED)\n cv.putText(image, ' '.join(obj_classes), (1, 1 + 10), cv.FONT_HERSHEY_PLAIN, 1.0, (0, 0, 255), thickness=1)\n\n if real_gt_boxes is not None:\n for i in range(real_gt_boxes.shape[0]):\n bbox = tuple(int(np.round(x)) for x in real_gt_boxes[i, :4])\n cv.rectangle(image, bbox[0:2], bbox[2:4], (255, 255, 255), 6)\n\n if sampled_boxes is not None:\n for j in range(len(sampled_boxes)):\n bbox = tuple(int(np.round(x)) for x in sampled_boxes[j, :4])\n cv.rectangle(image, bbox[0:2], bbox[2:4], (0, 0, 0), 3)\n\n sel_indices = np.argsort(-cls_scores)[:20][::-1]\n cls_scores = cls_scores[sel_indices]\n dets = dets[sel_indices]\n cNorm = colors.Normalize(vmin=min(cls_scores), vmax=max(cls_scores))\n jet = plt.get_cmap('jet')\n scalarMap = cm.ScalarMappable(norm=cNorm, cmap=jet)\n\n for i in range(dets.shape[0]):\n #bbox = tuple(int(np.round(x)) for x in dets[i, 1:5])\n scores = np.around(cls_scores[i], 2)\n bbox = tuple(int(np.round(x)) for x in dets[i, :4])\n cmap = scalarMap.to_rgba(cls_scores[i])\n cmap = tuple((np.array(cmap[0:-1]) * 255).astype(np.int32))[::-1]\n cv.rectangle(image, bbox[0:2], bbox[2:4], (int(cmap[0]), int(cmap[1]), int(cmap[2])), 2)\n #cv.rectangle(image, bbox[0:2], bbox[2:4], (0, 255, 0), 2)\n text_width, text_height = cv.getTextSize(str(scores), cv.FONT_HERSHEY_PLAIN, fontScale=1, thickness=1)[0]\n box_coords = ((bbox[0], bbox[1] + 15), (bbox[0] + text_width + 2, bbox[1] + 15 - text_height - 2))\n cv.rectangle(image, box_coords[0], box_coords[1], (255, 255, 255), cv.FILLED)\n cv.putText(image, str(scores), (bbox[0], bbox[1] + 15), cv.FONT_HERSHEY_PLAIN, 1.0, (0, 0, 255), thickness=1)\n return image\n\ndef clip_boxes(rois, height, width):\n rois[:, 1].clamp_(0, width-1)\n rois[:, 2].clamp_(0, height-1)\n rois[:, 3].clamp_(0, width-1)\n rois[:, 4].clamp_(0, height-1)\n return rois\n\ndef hyp_test(dataset, net, class_agnostic, load_dir, session, epoch, log, add_params):\n log.info(\"============== Testing EPOCH {} =============\".format(epoch))\n device = torch.device('cuda:0') if cfg.CUDA else torch.device('cpu')\n print(Back.CYAN + Fore.BLACK + 'Current device: %s' % (str(device).upper()))\n\n if 'cfg_file' in add_params:\n update_config_from_file(add_params['cfg_file'])\n\n log.info(Back.WHITE + Fore.BLACK + 'Using config:')\n log.info('GENERAL:')\n log.info(cfg.GENERAL)\n log.info('TEST:')\n log.info(cfg.TEST)\n log.info('RPN:')\n log.info(cfg.RPN)\n\n # TODO: add competition mode\n dataset, ds_name = dataset_factory.get_dataset(dataset, add_params, mode='test')\n loader = DataLoader(dataset, batch_size=1, shuffle=False,\n collate_fn=collate_test)\n\n if 'data_path' in add_params: cfg.DATA_DIR = add_params['data_path']\n output_dir = os.path.join(cfg.DATA_DIR, 'output', net, ds_name)\n if not os.path.exists(output_dir):\n os.makedirs(output_dir)\n log.info(Back.CYAN + Fore.BLACK + 'Output directory: %s' % (output_dir))\n\n if net == 'vgg16':\n faster_rcnn = VGG16(dataset.num_classes, class_agnostic=class_agnostic)\n elif net.startswith('resnet'):\n num_layers = net[6:]\n faster_rcnn = Resnet(num_layers, dataset.num_classes, class_agnostic=class_agnostic)\n else:\n raise ValueError(Back.RED + 'Network \"{}\" is not defined!'.format(net))\n\n faster_rcnn.init()\n faster_rcnn.to(device)\n\n #model_path = os.path.join(cfg.DATA_DIR, load_dir, net, ds_name, 'frcnn_{}_{}.pth'.format(session, epoch))\n #log.info(Back.WHITE + Fore.BLACK + 'Loading model from %s' % (model_path))\n #checkpoint = torch.load(model_path, map_location=device)\n #faster_rcnn.load_state_dict(checkpoint['model'])\n #log.info('Done.')\n\n start = time.time()\n\n faster_rcnn.eval()\n debug_dir = os.path.join(cfg.DATA_DIR, 'debug', 'session_' + str(session))\n\n # load the scores of this epoch\n if dataset._image_set == 'trainval':\n save_dir = os.path.join(debug_dir, 'ss_box_scores')\n save_path = os.path.join(save_dir, 'score_epoch_' + str(epoch) + '.pt')\n pseudo_GT_scores = torch.load(save_path)\n save_dir = os.path.join(debug_dir, 'sampled_boxes')\n save_path = os.path.join(save_dir, 'sampled_boxes_' + str(epoch) + '.pt')\n sampled_boxes = torch.load(save_path)\n #save_dir = os.path.join(debug_dir, 'iou_nis')\n #save_path = os.path.join(save_dir, 'iou_nis_' + str(epoch) + '.pt')\n #iou_nis = torch.load(save_path)\n\n save_det_dir = os.path.join(debug_dir, 'plot_boxes')\n if not os.path.exists(save_det_dir):\n os.makedirs(save_det_dir)\n\n all_ious = []\n all_criterias = []\n for i, data in enumerate(loader):\n #image_data = data[0].to(device)\n image_info = data[1]#.to(device)\n gt_boxes = data[2].to(device)\n image_labels = data[3]\n image_ids = data[4]\n real_gt_boxes = data[5].to(device)\n num_gt_boxes = int(image_info[0, 3])\n\n #base_feature = faster_rcnn.RCNN_base(image_data)\n #rois = torch.zeros(num_gt_boxes, 5).to(device)\n #rois[:, 1:5] = gt_boxes[0, :num_gt_boxes, :4]\n #pooled_feat = faster_rcnn.RCNN_roi_layer(base_feature, rois)\n #pooled_feat = faster_rcnn._feed_pooled_feature_to_top(pooled_feat)\n #cls_score = faster_rcnn.RCNN_cls_score(pooled_feat).detach()\n\n\n if len(real_gt_boxes)>0:\n sampled_boxes_this_image = sampled_boxes[image_ids[0]]\n for j, cls in enumerate(image_labels[0]):\n #if cls==5:\n actual_gt_boxes = real_gt_boxes[0][real_gt_boxes[0, :, 4] == cls, :4]\n sampled_boxes_this_class = sampled_boxes_this_image[sampled_boxes_this_image[:, 4]==cls, :4]\n max_box = torch.argmax(pseudo_GT_scores[image_ids[0]][:num_gt_boxes, cls])\n iou, corloc = find_iou(gt_boxes[0, max_box, :4].cpu().numpy(), actual_gt_boxes.cpu().numpy())\n all_ious.append([iou, corloc])\n\n overlaps = bbox_overlaps_batch(actual_gt_boxes, sampled_boxes_this_class).squeeze(0)\n #overlaps = bbox_overlaps_batch(actual_gt_boxes, gt_boxes[0, :num_gt_boxes, :4]).squeeze(0)\n criteria = (overlaps >= 0.5).sum(dim=1).cpu().numpy()\n criteria = (criteria >=1)\n all_criterias.append(criteria)\n\n\n if i % 200 == 0:\n print(\"Plotting for {} th image\".format(i))\n watch_list.append(image_ids[0])\n\n gt_boxes /= image_info[0][2].item()\n gt_boxes = gt_boxes[0, :num_gt_boxes].cpu().numpy()\n cls_score = pseudo_GT_scores[image_ids[0]][:num_gt_boxes]\n num_real_gt_boxes = int(image_info[0, 4])\n real_gt_boxes[0, :, :4] /= image_info[0][2].item()\n real_gt_boxes = real_gt_boxes[0, :num_real_gt_boxes]\n sampled_boxes_this_image = sampled_boxes[image_ids[0]]\n\n for j, cls in enumerate(image_labels[0]):\n image = cv.imread(dataset.image_path_at(data[4][0]))\n sampled_boxes_this_class = sampled_boxes_this_image[sampled_boxes_this_image[:, 4]==cls, :4].cpu().numpy()\n sampled_boxes_this_class /= image_info[0][2].item()\n real_gt_boxes_this_class = real_gt_boxes[real_gt_boxes[:, 4] ==cls, :4].cpu().numpy()\n image = plot_detecton_boxes(image, cls_score[:, cls].cpu().numpy(), dataset.classes, gt_boxes, image_labels[0].cpu().numpy(), real_gt_boxes_this_class)\n class_name = dataset.classes[cls]\n save_det_path = os.path.join(save_det_dir, image_ids[0] + '_' + class_name + '_epoch_' + str(epoch) + '_det.jpg')\n cv.imwrite(save_det_path, image)\n\n\n\n all_ious = np.array(all_ious)\n no_overlap = np.sum(all_ious[:, 1]==0)\n greater = np.sum(all_ious[:, 0]>1.0)\n smaller = len(all_ious) - no_overlap - greater\n corloc = np.sum(all_ious[:, 1]>=0.5)\n log.info(\"CorLoc: {:.3f} {}/{}\".format(float(corloc)/len(all_ious), corloc, len(all_ious)))\n log.info(\"No overlap: {:.3f} {}/{}\".format(float(no_overlap)/len(all_ious), no_overlap, len(all_ious)))\n log.info(\"No of small proposals: {:.3f} {}/{}\".format(float(smaller) / len(all_ious), smaller, len(all_ious)))\n log.info(\"No of large proposals: {:.3f} {}/{}\".format(float(greater) / len(all_ious), greater, len(all_ious)))\n\n all_criterias = np.hstack(all_criterias)\n log.info(\"Covered: {} {}/{}\".format(float(all_criterias.sum())/len(all_criterias), all_criterias.sum(), len(all_criterias)))\n\n end = time.time()\n log.info(Back.GREEN + Fore.BLACK + 'Plot time: %.4fs.' % (end - start))\n" ]
[ [ "numpy.hstack", "numpy.maximum", "numpy.minimum", "torch.load", "numpy.around", "torch.utils.data.DataLoader", "matplotlib.pyplot.get_cmap", "numpy.round", "numpy.max", "matplotlib.cm.ScalarMappable", "torch.device", "numpy.argsort", "numpy.array", "numpy.sum", "torch.argmax" ] ]
junj2ejj/ptranking.github.io
[ "06fa9751dd2eca89749ba4bb9641e4272cfc30a1" ]
[ "ptranking/metric/metric_utils.py" ]
[ "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\n\"\"\"Description\n\n\"\"\"\n\nimport torch\n\nfrom ptranking.metric.adhoc_metric import torch_ideal_dcg\nfrom ptranking.ltr_global import global_gpu as gpu, tensor\n\n#######\n# For Delta Metrics\n#######\n\ndef get_delta_ndcg(batch_stds, batch_stds_sorted_via_preds, multi_level_rele=True):\n '''\n Delta-nDCG w.r.t. pairwise swapping of the currently predicted ltr_adhoc\n :param batch_stds: the standard labels sorted in a descending order\n :param batch_stds_sorted_via_preds: the standard labels sorted based on the corresponding predictions\n :return:\n '''\n # ideal discount cumulative gains\n batch_idcgs = torch_ideal_dcg(batch_sorted_labels=batch_stds, gpu=gpu, multi_level_rele=multi_level_rele)\n\n if multi_level_rele:\n batch_gains = torch.pow(2.0, batch_stds_sorted_via_preds) - 1.0\n else:\n batch_gains = batch_stds_sorted_via_preds\n\n batch_n_gains = batch_gains / batch_idcgs # normalised gains\n batch_ng_diffs = torch.unsqueeze(batch_n_gains, dim=2) - torch.unsqueeze(batch_n_gains, dim=1)\n\n batch_std_ranks = torch.arange(batch_stds_sorted_via_preds.size(1)).type(tensor)\n batch_dists = 1.0 / torch.log2(batch_std_ranks + 2.0) # discount co-efficients\n batch_dists = torch.unsqueeze(batch_dists, dim=0)\n batch_dists_diffs = torch.unsqueeze(batch_dists, dim=2) - torch.unsqueeze(batch_dists, dim=1)\n batch_delta_ndcg = torch.abs(batch_ng_diffs) * torch.abs(batch_dists_diffs) # absolute changes w.r.t. pairwise swapping\n\n return batch_delta_ndcg\n\n\ndef get_sharp_swap_deltas(batch_stds, batch_stds_sorted_via_preds, pos_swap_const=1., neg_swap_const=1.):\n '''\n pure changes w.r.t. pairwise swapping of the currently predicted ltr_adhoc\n pure changes w.r.t. pairwise swapping is given that: (1) (1/D_i - 1/D_j)(G_j - G_i) (2)(G_i - G_j)(1/D_j - 1/D_i)\n\n :param batch_stds: the standard labels sorted in a descending order\n :param batch_stds_sorted_via_preds: the standard labels sorted based on the corresponding predictions\n :return:\n '''\n batch_idcgs = torch_ideal_dcg(batch_sorted_labels=batch_stds, gpu=gpu) # ideal discount cumulative gains\n\n batch_gains = torch.pow(2.0, batch_stds_sorted_via_preds) - 1.0\n batch_n_gains = batch_gains / batch_idcgs # normalised gains\n batch_ng_diffs = torch.unsqueeze(batch_n_gains, dim=2) - torch.unsqueeze(batch_n_gains, dim=1)\n\n batch_std_ranks = torch.arange(batch_stds_sorted_via_preds.size(1)).type(tensor)\n batch_dists = 1.0 / torch.log2(batch_std_ranks + 2.0) # discount co-efficients\n batch_dists = torch.unsqueeze(batch_dists, dim=0)\n batch_dists_diffs = torch.unsqueeze(batch_dists, dim=2) - torch.unsqueeze(batch_dists, dim=1)\n t_batch_dists_diffs = torch.transpose(batch_dists_diffs, dim0=1, dim1=2)\n\n batch_swap_ndcg = batch_ng_diffs * t_batch_dists_diffs # pure changes\n\n batch_pos_swap_ones = (batch_swap_ndcg > 0).type(tensor) # s_ij is one for positive swap, otherwise 0\n batch_pos_swap_cofs = batch_pos_swap_ones * pos_swap_const\n\n batch_neg_swap_ones = (batch_swap_ndcg < 0).type(tensor) # negative swap means that the current pairwise order is consistent with the standard order\n batch_neg_swap_cofs = batch_neg_swap_ones * neg_swap_const\n\n batch_all_cofs = batch_pos_swap_cofs + batch_neg_swap_cofs\n\n #1 what is the meaning?\n #batch_swap_ndcg = torch.clamp(batch_swap_ndcg, min=0.0, max=100000.) # keeping positive swapping\n #batch_swap_streths = batch_swap_ndcg + batch_neg_swap_cofs\n\n #2\n #batch_delta_ndcg = torch.abs(batch_swap_ndcg)\n #batch_swap_streths = batch_all_cofs * batch_delta_ndcg\n\n #3 all constant\n batch_swap_streths = torch.ones_like(batch_swap_ndcg)\n\n return batch_swap_streths\n\n\ndef metric_results_to_string(list_scores=None, list_cutoffs=None, split_str=', '):\n \"\"\"\n Convert metric results to a string representation\n :param list_scores:\n :param list_cutoffs:\n :param split_str:\n :return:\n \"\"\"\n list_str = []\n for i in range(len(list_scores)):\n list_str.append('nDCG@{}:{:.4f}'.format(list_cutoffs[i], list_scores[i]))\n return split_str.join(list_str)\n" ]
[ [ "torch.abs", "torch.transpose", "torch.unsqueeze", "torch.log2", "torch.pow", "torch.ones_like" ] ]
marcevrard/pyannote-audio
[ "1744715041c7393ddc8739698839abc6141fecaf" ]
[ "pyannote/audio/applications/speaker_embedding.py" ]
[ "#!/usr/bin/env python\n# encoding: utf-8\n\n# The MIT License (MIT)\n\n# Copyright (c) 2017-2020 CNRS\n\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n\n# AUTHORS\n# Hervé BREDIN - http://herve.niderb.fr\n\nimport torch\nimport numpy as np\nfrom typing import Optional\n\nfrom .base import Application\n\nfrom pyannote.core import Segment, Timeline, Annotation\n\nfrom pyannote.database import get_protocol\nfrom pyannote.database import get_annotated\nfrom pyannote.database import get_unique_identifier\nfrom pyannote.database import FileFinder\nfrom pyannote.database.protocol import SpeakerDiarizationProtocol\nfrom pyannote.database.protocol import SpeakerVerificationProtocol\n\nimport scipy.optimize\nfrom scipy.cluster.hierarchy import fcluster\nfrom pyannote.core.utils.hierarchy import linkage\n\nfrom pyannote.core.utils.distance import pdist\nfrom pyannote.core.utils.distance import cdist\nfrom pyannote.audio.features.precomputed import Precomputed\n\nfrom pyannote.metrics.binary_classification import det_curve\nfrom pyannote.metrics.diarization import DiarizationPurityCoverageFMeasure\n\nfrom pyannote.audio.features import Pretrained\nfrom pyannote.audio.features.utils import get_audio_duration\n\n\nclass SpeakerEmbedding(Application):\n @property\n def config_default_module(self):\n return \"pyannote.audio.embedding.approaches\"\n\n def validation_criterion(self, protocol_name, **kwargs):\n protocol = get_protocol(protocol_name)\n if isinstance(protocol, SpeakerVerificationProtocol):\n return \"equal_error_rate\"\n elif isinstance(protocol, SpeakerDiarizationProtocol):\n return \"diarization_fscore\"\n\n def validate_init(self, protocol_name, subset=\"development\"):\n\n protocol = get_protocol(protocol_name)\n\n if isinstance(\n protocol, (SpeakerVerificationProtocol, SpeakerDiarizationProtocol)\n ):\n return\n\n msg = (\n \"Only SpeakerVerification or SpeakerDiarization tasks are\"\n 'supported in \"validation\" mode.'\n )\n raise ValueError(msg)\n\n def validate_epoch(self, epoch, validation_data, protocol=None, **kwargs):\n\n _protocol = get_protocol(protocol)\n\n if isinstance(_protocol, SpeakerVerificationProtocol):\n return self._validate_epoch_verification(\n epoch, validation_data, protocol=protocol, **kwargs\n )\n\n elif isinstance(_protocol, SpeakerDiarizationProtocol):\n return self._validate_epoch_diarization(\n epoch, validation_data, protocol=protocol, **kwargs\n )\n\n else:\n msg = (\n \"Only SpeakerVerification or SpeakerDiarization tasks are\"\n 'supported in \"validation\" mode.'\n )\n raise ValueError(msg)\n\n @staticmethod\n def get_hash(file):\n hashable = []\n for f in file.files():\n hashable.append((f[\"uri\"], tuple(f[\"try_with\"])))\n return hash(tuple(sorted(hashable)))\n\n @staticmethod\n def get_embedding(file, pretrained):\n emb = []\n for f in file.files():\n if isinstance(f[\"try_with\"], Segment):\n segments = [f[\"try_with\"]]\n else:\n segments = f[\"try_with\"]\n for segment in segments:\n emb.append(pretrained.crop(f, segment, mode=\"center\"))\n\n return np.mean(np.vstack(emb), axis=0, keepdims=True)\n\n def _validate_epoch_verification(\n self,\n epoch,\n validation_data,\n protocol=None,\n subset=\"development\",\n device: Optional[torch.device] = None,\n batch_size: int = 32,\n n_jobs: int = 1,\n duration: float = None,\n step: float = 0.25,\n metric: str = None,\n **kwargs,\n ):\n\n # initialize embedding extraction\n pretrained = Pretrained(\n validate_dir=self.validate_dir_,\n epoch=epoch,\n duration=duration,\n step=step,\n batch_size=batch_size,\n device=device,\n )\n\n preprocessors = self.preprocessors_\n if \"audio\" not in preprocessors:\n preprocessors[\"audio\"] = FileFinder()\n if \"duration\" not in preprocessors:\n preprocessors[\"duration\"] = get_audio_duration\n _protocol = get_protocol(protocol, progress=False, preprocessors=preprocessors)\n\n y_true, y_pred, cache = [], [], {}\n\n for trial in getattr(_protocol, \"{0}_trial\".format(subset))():\n\n # compute embedding for file1\n file1 = trial[\"file1\"]\n hash1 = self.get_hash(file1)\n if hash1 in cache:\n emb1 = cache[hash1]\n else:\n emb1 = self.get_embedding(file1, pretrained)\n cache[hash1] = emb1\n\n # compute embedding for file2\n file2 = trial[\"file2\"]\n hash2 = self.get_hash(file2)\n if hash2 in cache:\n emb2 = cache[hash2]\n else:\n emb2 = self.get_embedding(file2, pretrained)\n cache[hash2] = emb2\n\n # compare embeddings\n distance = cdist(emb1, emb2, metric=metric)[0, 0]\n y_pred.append(distance)\n\n y_true.append(trial[\"reference\"])\n\n _, _, _, eer = det_curve(np.array(y_true), np.array(y_pred), distances=True)\n\n return {\"metric\": \"equal_error_rate\", \"minimize\": True, \"value\": float(eer)}\n\n def _validate_epoch_diarization(\n self,\n epoch,\n validation_data,\n protocol=None,\n subset=\"development\",\n device: Optional[torch.device] = None,\n batch_size: int = 32,\n n_jobs: int = 1,\n duration: float = None,\n step: float = 0.25,\n metric: str = None,\n **kwargs,\n ):\n\n # initialize embedding extraction\n pretrained = Pretrained(\n validate_dir=self.validate_dir_,\n epoch=epoch,\n duration=duration,\n step=step,\n batch_size=batch_size,\n device=device,\n )\n\n preprocessors = self.preprocessors_\n if \"audio\" not in preprocessors:\n preprocessors[\"audio\"] = FileFinder()\n if \"duration\" not in preprocessors:\n preprocessors[\"duration\"] = get_audio_duration\n _protocol = get_protocol(protocol, progress=False, preprocessors=preprocessors)\n\n Z, t = dict(), dict()\n min_d, max_d = np.inf, -np.inf\n\n for current_file in getattr(_protocol, subset)():\n\n uri = get_unique_identifier(current_file)\n uem = get_annotated(current_file)\n reference = current_file[\"annotation\"]\n\n X_, t_ = [], []\n embedding = pretrained(current_file)\n for i, (turn, _) in enumerate(reference.itertracks()):\n\n # extract embedding for current speech turn\n x_ = embedding.crop(turn, mode=\"center\")\n if len(x_) < 1:\n x_ = embedding.crop(turn, mode=\"loose\")\n if len(x_) < 1:\n msg = f\"No embedding for {turn} in {uri:s}.\"\n raise ValueError(msg)\n\n # each speech turn is represented by its average embedding\n X_.append(np.mean(x_, axis=0))\n t_.append(turn)\n\n X_ = np.array(X_)\n # apply hierarchical agglomerative clustering\n # all the way up to just one cluster (ie complete dendrogram)\n D = pdist(X_, metric=metric)\n min_d = min(np.min(D), min_d)\n max_d = max(np.max(D), max_d)\n\n Z[uri] = linkage(X_, method=\"pool\", metric=metric)\n t[uri] = np.array(t_)\n\n def fun(threshold):\n\n _metric = DiarizationPurityCoverageFMeasure(weighted=False)\n\n for current_file in getattr(_protocol, subset)():\n\n uri = get_unique_identifier(current_file)\n uem = get_annotated(current_file)\n reference = current_file[\"annotation\"]\n\n clusters = fcluster(Z[uri], threshold, criterion=\"distance\")\n\n hypothesis = Annotation(uri=uri)\n for (start_time, end_time), cluster in zip(t[uri], clusters):\n hypothesis[Segment(start_time, end_time)] = cluster\n\n _ = _metric(reference, hypothesis, uem=uem)\n\n return 1.0 - abs(_metric)\n\n res = scipy.optimize.minimize_scalar(\n fun, bounds=(0.0, 1.0), method=\"bounded\", options={\"maxiter\": 10}\n )\n\n threshold = res.x.item()\n\n return {\n \"metric\": \"diarization_fscore\",\n \"minimize\": False,\n \"value\": float(1.0 - res.fun),\n }\n" ]
[ [ "numpy.min", "numpy.vstack", "numpy.max", "numpy.mean", "numpy.array", "scipy.cluster.hierarchy.fcluster" ] ]
mathialm/GPUTaskScheduler
[ "c65fb95950239cac792dd28a5807e239defb6727" ]
[ "gpu_task_scheduler/start_gpu_task.py" ]
[ "try:\n import matplotlib\nexcept ImportError:\n pass\nelse:\n matplotlib.use(\"Agg\")\n\nimport sys\nimport imp\ntry:\n import cPickle as pickle\nexcept ImportError:\n import _pickle as pickle\n\n\ndef main():\n cwd = sys.argv[4]\n sys.path.append(cwd)\n pkl_file = sys.argv[1]\n imp.load_source(sys.argv[2], sys.argv[3])\n with open(pkl_file, \"rb\") as f:\n worker = pickle.load(f)\n worker.main()\n" ]
[ [ "matplotlib.use" ] ]
makdaddy604/Tensorflow
[ "61793e67bd0dc3aedd5d395c281993261b597af8" ]
[ "official/resnet/resnet_run_loop.py" ]
[ "# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Contains utility and supporting functions for ResNet.\n\n This module contains ResNet code which does not directly build layers. This\nincludes dataset management, hyperparameter and optimizer code, and argument\nparsing. Code for defining the ResNet layers can be found in resnet_model.py.\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport functools\nimport math\nimport multiprocessing\nimport os\n\n# pylint: disable=g-bad-import-order\nfrom absl import flags\nimport tensorflow as tf\n\nfrom official.resnet import resnet_model\nfrom official.utils.flags import core as flags_core\nfrom official.utils.export import export\nfrom official.utils.logs import hooks_helper\nfrom official.utils.logs import logger\nfrom official.resnet import imagenet_preprocessing\nfrom official.utils.misc import distribution_utils\nfrom official.utils.misc import model_helpers\n\n\n################################################################################\n# Functions for input processing.\n################################################################################\ndef process_record_dataset(dataset,\n is_training,\n batch_size,\n shuffle_buffer,\n parse_record_fn,\n num_epochs=1,\n dtype=tf.float32,\n datasets_num_private_threads=None,\n num_parallel_batches=1,\n drop_remainder=False):\n \"\"\"Given a Dataset with raw records, return an iterator over the records.\n\n Args:\n dataset: A Dataset representing raw records\n is_training: A boolean denoting whether the input is for training.\n batch_size: The number of samples per batch.\n shuffle_buffer: The buffer size to use when shuffling records. A larger\n value results in better randomness, but smaller values reduce startup\n time and use less memory.\n parse_record_fn: A function that takes a raw record and returns the\n corresponding (image, label) pair.\n num_epochs: The number of epochs to repeat the dataset.\n dtype: Data type to use for images/features.\n datasets_num_private_threads: Number of threads for a private\n threadpool created for all datasets computation.\n num_parallel_batches: Number of parallel batches for tf.data.\n drop_remainder: A boolean indicates whether to drop the remainder of the\n batches. If True, the batch dimension will be static.\n\n Returns:\n Dataset of (image, label) pairs ready for iteration.\n \"\"\"\n # Defines a specific size thread pool for tf.data operations.\n if datasets_num_private_threads:\n options = tf.data.Options()\n options.experimental_threading.private_threadpool_size = (\n datasets_num_private_threads)\n dataset = dataset.with_options(options)\n tf.compat.v1.logging.info('datasets_num_private_threads: %s',\n datasets_num_private_threads)\n\n # Disable intra-op parallelism to optimize for throughput instead of latency.\n options = tf.data.Options()\n options.experimental_threading.max_intra_op_parallelism = 1\n dataset = dataset.with_options(options)\n\n # Prefetches a batch at a time to smooth out the time taken to load input\n # files for shuffling and processing.\n dataset = dataset.prefetch(buffer_size=batch_size)\n if is_training:\n # Shuffles records before repeating to respect epoch boundaries.\n dataset = dataset.shuffle(buffer_size=shuffle_buffer)\n\n # Repeats the dataset for the number of epochs to train.\n dataset = dataset.repeat(num_epochs)\n\n # Parses the raw records into images and labels.\n dataset = dataset.map(\n lambda value: parse_record_fn(value, is_training, dtype),\n num_parallel_calls=tf.data.experimental.AUTOTUNE)\n dataset = dataset.batch(batch_size, drop_remainder=drop_remainder)\n\n # Operations between the final prefetch and the get_next call to the iterator\n # will happen synchronously during run time. We prefetch here again to\n # background all of the above processing work and keep it out of the\n # critical training path. Setting buffer_size to tf.contrib.data.AUTOTUNE\n # allows DistributionStrategies to adjust how many batches to fetch based\n # on how many devices are present.\n dataset = dataset.prefetch(buffer_size=tf.data.experimental.AUTOTUNE)\n\n return dataset\n\n\ndef get_synth_input_fn(height, width, num_channels, num_classes,\n dtype=tf.float32):\n \"\"\"Returns an input function that returns a dataset with random data.\n\n This input_fn returns a data set that iterates over a set of random data and\n bypasses all preprocessing, e.g. jpeg decode and copy. The host to device\n copy is still included. This used to find the upper throughput bound when\n tunning the full input pipeline.\n\n Args:\n height: Integer height that will be used to create a fake image tensor.\n width: Integer width that will be used to create a fake image tensor.\n num_channels: Integer depth that will be used to create a fake image tensor.\n num_classes: Number of classes that should be represented in the fake labels\n tensor\n dtype: Data type for features/images.\n\n Returns:\n An input_fn that can be used in place of a real one to return a dataset\n that can be used for iteration.\n \"\"\"\n # pylint: disable=unused-argument\n def input_fn(is_training, data_dir, batch_size, *args, **kwargs):\n \"\"\"Returns dataset filled with random data.\"\"\"\n # Synthetic input should be within [0, 255].\n inputs = tf.random.truncated_normal(\n [batch_size] + [height, width, num_channels],\n dtype=dtype,\n mean=127,\n stddev=60,\n name='synthetic_inputs')\n\n labels = tf.random.uniform(\n [batch_size],\n minval=0,\n maxval=num_classes - 1,\n dtype=tf.int32,\n name='synthetic_labels')\n data = tf.data.Dataset.from_tensors((inputs, labels)).repeat()\n data = data.prefetch(buffer_size=tf.data.experimental.AUTOTUNE)\n return data\n\n return input_fn\n\n\ndef image_bytes_serving_input_fn(image_shape, dtype=tf.float32):\n \"\"\"Serving input fn for raw jpeg images.\"\"\"\n\n def _preprocess_image(image_bytes):\n \"\"\"Preprocess a single raw image.\"\"\"\n # Bounding box around the whole image.\n bbox = tf.constant([0.0, 0.0, 1.0, 1.0], dtype=dtype, shape=[1, 1, 4])\n height, width, num_channels = image_shape\n image = imagenet_preprocessing.preprocess_image(\n image_bytes, bbox, height, width, num_channels, is_training=False)\n return image\n\n image_bytes_list = tf.compat.v1.placeholder(\n shape=[None], dtype=tf.string, name='input_tensor')\n images = tf.map_fn(\n _preprocess_image, image_bytes_list, back_prop=False, dtype=dtype)\n return tf.estimator.export.TensorServingInputReceiver(\n images, {'image_bytes': image_bytes_list})\n\n\ndef override_flags_and_set_envars_for_gpu_thread_pool(flags_obj):\n \"\"\"Override flags and set env_vars for performance.\n\n These settings exist to test the difference between using stock settings\n and manual tuning. It also shows some of the ENV_VARS that can be tweaked to\n squeeze a few extra examples per second. These settings are defaulted to the\n current platform of interest, which changes over time.\n\n On systems with small numbers of cpu cores, e.g. under 8 logical cores,\n setting up a gpu thread pool with `tf_gpu_thread_mode=gpu_private` may perform\n poorly.\n\n Args:\n flags_obj: Current flags, which will be adjusted possibly overriding\n what has been set by the user on the command-line.\n \"\"\"\n cpu_count = multiprocessing.cpu_count()\n tf.compat.v1.logging.info('Logical CPU cores: %s', cpu_count)\n\n # Sets up thread pool for each GPU for op scheduling.\n per_gpu_thread_count = 1\n total_gpu_thread_count = per_gpu_thread_count * flags_obj.num_gpus\n os.environ['TF_GPU_THREAD_MODE'] = flags_obj.tf_gpu_thread_mode\n os.environ['TF_GPU_THREAD_COUNT'] = str(per_gpu_thread_count)\n tf.compat.v1.logging.info('TF_GPU_THREAD_COUNT: %s',\n os.environ['TF_GPU_THREAD_COUNT'])\n tf.compat.v1.logging.info('TF_GPU_THREAD_MODE: %s',\n os.environ['TF_GPU_THREAD_MODE'])\n\n # Reduces general thread pool by number of threads used for GPU pool.\n main_thread_count = cpu_count - total_gpu_thread_count\n flags_obj.inter_op_parallelism_threads = main_thread_count\n\n # Sets thread count for tf.data. Logical cores minus threads assign to the\n # private GPU pool along with 2 thread per GPU for event monitoring and\n # sending / receiving tensors.\n num_monitoring_threads = 2 * flags_obj.num_gpus\n flags_obj.datasets_num_private_threads = (cpu_count - total_gpu_thread_count\n - num_monitoring_threads)\n\n\n################################################################################\n# Functions for running training/eval/validation loops for the model.\n################################################################################\ndef learning_rate_with_decay(\n batch_size, batch_denom, num_images, boundary_epochs, decay_rates,\n base_lr=0.1, warmup=False):\n \"\"\"Get a learning rate that decays step-wise as training progresses.\n\n Args:\n batch_size: the number of examples processed in each training batch.\n batch_denom: this value will be used to scale the base learning rate.\n `0.1 * batch size` is divided by this number, such that when\n batch_denom == batch_size, the initial learning rate will be 0.1.\n num_images: total number of images that will be used for training.\n boundary_epochs: list of ints representing the epochs at which we\n decay the learning rate.\n decay_rates: list of floats representing the decay rates to be used\n for scaling the learning rate. It should have one more element\n than `boundary_epochs`, and all elements should have the same type.\n base_lr: Initial learning rate scaled based on batch_denom.\n warmup: Run a 5 epoch warmup to the initial lr.\n Returns:\n Returns a function that takes a single argument - the number of batches\n trained so far (global_step)- and returns the learning rate to be used\n for training the next batch.\n \"\"\"\n initial_learning_rate = base_lr * batch_size / batch_denom\n batches_per_epoch = num_images / batch_size\n\n # Reduce the learning rate at certain epochs.\n # CIFAR-10: divide by 10 at epoch 100, 150, and 200\n # ImageNet: divide by 10 at epoch 30, 60, 80, and 90\n boundaries = [int(batches_per_epoch * epoch) for epoch in boundary_epochs]\n vals = [initial_learning_rate * decay for decay in decay_rates]\n\n def learning_rate_fn(global_step):\n \"\"\"Builds scaled learning rate function with 5 epoch warm up.\"\"\"\n lr = tf.compat.v1.train.piecewise_constant(global_step, boundaries, vals)\n if warmup:\n warmup_steps = int(batches_per_epoch * 5)\n warmup_lr = (\n initial_learning_rate * tf.cast(global_step, tf.float32) / tf.cast(\n warmup_steps, tf.float32))\n return tf.cond(pred=global_step < warmup_steps,\n true_fn=lambda: warmup_lr,\n false_fn=lambda: lr)\n return lr\n\n def poly_rate_fn(global_step):\n \"\"\"Handles linear scaling rule, gradual warmup, and LR decay.\n\n The learning rate starts at 0, then it increases linearly per step. After\n FLAGS.poly_warmup_epochs, we reach the base learning rate (scaled to account\n for batch size). The learning rate is then decayed using a polynomial rate\n decay schedule with power 2.0.\n\n Args:\n global_step: the current global_step\n\n Returns:\n returns the current learning rate\n \"\"\"\n\n # Learning rate schedule for LARS polynomial schedule\n if flags.FLAGS.batch_size < 8192:\n plr = 5.0\n w_epochs = 5\n elif flags.FLAGS.batch_size < 16384:\n plr = 10.0\n w_epochs = 5\n elif flags.FLAGS.batch_size < 32768:\n plr = 25.0\n w_epochs = 5\n else:\n plr = 32.0\n w_epochs = 14\n\n w_steps = int(w_epochs * batches_per_epoch)\n wrate = (plr * tf.cast(global_step, tf.float32) / tf.cast(\n w_steps, tf.float32))\n\n # TODO(pkanwar): use a flag to help calc num_epochs.\n num_epochs = 90\n train_steps = batches_per_epoch * num_epochs\n\n min_step = tf.constant(1, dtype=tf.int64)\n decay_steps = tf.maximum(min_step, tf.subtract(global_step, w_steps))\n poly_rate = tf.train.polynomial_decay(\n plr,\n decay_steps,\n train_steps - w_steps + 1,\n power=2.0)\n return tf.where(global_step <= w_steps, wrate, poly_rate)\n\n # For LARS we have a new learning rate schedule\n if flags.FLAGS.enable_lars:\n return poly_rate_fn\n\n return learning_rate_fn\n\n\ndef resnet_model_fn(features, labels, mode, model_class,\n resnet_size, weight_decay, learning_rate_fn, momentum,\n data_format, resnet_version, loss_scale,\n loss_filter_fn=None, dtype=resnet_model.DEFAULT_DTYPE,\n fine_tune=False, label_smoothing=0.0):\n \"\"\"Shared functionality for different resnet model_fns.\n\n Initializes the ResnetModel representing the model layers\n and uses that model to build the necessary EstimatorSpecs for\n the `mode` in question. For training, this means building losses,\n the optimizer, and the train op that get passed into the EstimatorSpec.\n For evaluation and prediction, the EstimatorSpec is returned without\n a train op, but with the necessary parameters for the given mode.\n\n Args:\n features: tensor representing input images\n labels: tensor representing class labels for all input images\n mode: current estimator mode; should be one of\n `tf.estimator.ModeKeys.TRAIN`, `EVALUATE`, `PREDICT`\n model_class: a class representing a TensorFlow model that has a __call__\n function. We assume here that this is a subclass of ResnetModel.\n resnet_size: A single integer for the size of the ResNet model.\n weight_decay: weight decay loss rate used to regularize learned variables.\n learning_rate_fn: function that returns the current learning rate given\n the current global_step\n momentum: momentum term used for optimization\n data_format: Input format ('channels_last', 'channels_first', or None).\n If set to None, the format is dependent on whether a GPU is available.\n resnet_version: Integer representing which version of the ResNet network to\n use. See README for details. Valid values: [1, 2]\n loss_scale: The factor to scale the loss for numerical stability. A detailed\n summary is present in the arg parser help text.\n loss_filter_fn: function that takes a string variable name and returns\n True if the var should be included in loss calculation, and False\n otherwise. If None, batch_normalization variables will be excluded\n from the loss.\n dtype: the TensorFlow dtype to use for calculations.\n fine_tune: If True only train the dense layers(final layers).\n label_smoothing: If greater than 0 then smooth the labels.\n\n Returns:\n EstimatorSpec parameterized according to the input params and the\n current mode.\n \"\"\"\n\n # Generate a summary node for the images\n tf.compat.v1.summary.image('images', features, max_outputs=6)\n # Checks that features/images have same data type being used for calculations.\n assert features.dtype == dtype\n\n model = model_class(resnet_size, data_format, resnet_version=resnet_version,\n dtype=dtype)\n\n logits = model(features, mode == tf.estimator.ModeKeys.TRAIN)\n\n # This acts as a no-op if the logits are already in fp32 (provided logits are\n # not a SparseTensor). If dtype is is low precision, logits must be cast to\n # fp32 for numerical stability.\n logits = tf.cast(logits, tf.float32)\n\n predictions = {\n 'classes': tf.argmax(input=logits, axis=1),\n 'probabilities': tf.nn.softmax(logits, name='softmax_tensor')\n }\n\n if mode == tf.estimator.ModeKeys.PREDICT:\n # Return the predictions and the specification for serving a SavedModel\n return tf.estimator.EstimatorSpec(\n mode=mode,\n predictions=predictions,\n export_outputs={\n 'predict': tf.estimator.export.PredictOutput(predictions)\n })\n\n # Calculate loss, which includes softmax cross entropy and L2 regularization.\n if label_smoothing != 0.0:\n one_hot_labels = tf.one_hot(labels, 1001)\n cross_entropy = tf.losses.softmax_cross_entropy(\n logits=logits, onehot_labels=one_hot_labels,\n label_smoothing=label_smoothing)\n else:\n cross_entropy = tf.compat.v1.losses.sparse_softmax_cross_entropy(\n logits=logits, labels=labels)\n\n # Create a tensor named cross_entropy for logging purposes.\n tf.identity(cross_entropy, name='cross_entropy')\n tf.compat.v1.summary.scalar('cross_entropy', cross_entropy)\n\n # If no loss_filter_fn is passed, assume we want the default behavior,\n # which is that batch_normalization variables are excluded from loss.\n def exclude_batch_norm(name):\n return 'batch_normalization' not in name\n loss_filter_fn = loss_filter_fn or exclude_batch_norm\n\n # Add weight decay to the loss.\n l2_loss = weight_decay * tf.add_n(\n # loss is computed using fp32 for numerical stability.\n [\n tf.nn.l2_loss(tf.cast(v, tf.float32))\n for v in tf.compat.v1.trainable_variables()\n if loss_filter_fn(v.name)\n ])\n tf.compat.v1.summary.scalar('l2_loss', l2_loss)\n loss = cross_entropy + l2_loss\n\n if mode == tf.estimator.ModeKeys.TRAIN:\n global_step = tf.compat.v1.train.get_or_create_global_step()\n\n learning_rate = learning_rate_fn(global_step)\n\n # Create a tensor named learning_rate for logging purposes\n tf.identity(learning_rate, name='learning_rate')\n tf.compat.v1.summary.scalar('learning_rate', learning_rate)\n\n if flags.FLAGS.enable_lars:\n optimizer = tf.contrib.opt.LARSOptimizer(\n learning_rate,\n momentum=momentum,\n weight_decay=weight_decay,\n skip_list=['batch_normalization', 'bias'])\n else:\n optimizer = tf.compat.v1.train.MomentumOptimizer(\n learning_rate=learning_rate,\n momentum=momentum\n )\n\n def _dense_grad_filter(gvs):\n \"\"\"Only apply gradient updates to the final layer.\n\n This function is used for fine tuning.\n\n Args:\n gvs: list of tuples with gradients and variable info\n Returns:\n filtered gradients so that only the dense layer remains\n \"\"\"\n return [(g, v) for g, v in gvs if 'dense' in v.name]\n\n if loss_scale != 1:\n # When computing fp16 gradients, often intermediate tensor values are\n # so small, they underflow to 0. To avoid this, we multiply the loss by\n # loss_scale to make these tensor values loss_scale times bigger.\n scaled_grad_vars = optimizer.compute_gradients(loss * loss_scale)\n\n if fine_tune:\n scaled_grad_vars = _dense_grad_filter(scaled_grad_vars)\n\n # Once the gradient computation is complete we can scale the gradients\n # back to the correct scale before passing them to the optimizer.\n unscaled_grad_vars = [(grad / loss_scale, var)\n for grad, var in scaled_grad_vars]\n minimize_op = optimizer.apply_gradients(unscaled_grad_vars, global_step)\n else:\n grad_vars = optimizer.compute_gradients(loss)\n if fine_tune:\n grad_vars = _dense_grad_filter(grad_vars)\n minimize_op = optimizer.apply_gradients(grad_vars, global_step)\n\n update_ops = tf.compat.v1.get_collection(tf.compat.v1.GraphKeys.UPDATE_OPS)\n train_op = tf.group(minimize_op, update_ops)\n else:\n train_op = None\n\n accuracy = tf.compat.v1.metrics.accuracy(labels, predictions['classes'])\n accuracy_top_5 = tf.compat.v1.metrics.mean(\n tf.nn.in_top_k(predictions=logits, targets=labels, k=5, name='top_5_op'))\n metrics = {'accuracy': accuracy,\n 'accuracy_top_5': accuracy_top_5}\n\n # Create a tensor named train_accuracy for logging purposes\n tf.identity(accuracy[1], name='train_accuracy')\n tf.identity(accuracy_top_5[1], name='train_accuracy_top_5')\n tf.compat.v1.summary.scalar('train_accuracy', accuracy[1])\n tf.compat.v1.summary.scalar('train_accuracy_top_5', accuracy_top_5[1])\n\n return tf.estimator.EstimatorSpec(\n mode=mode,\n predictions=predictions,\n loss=loss,\n train_op=train_op,\n eval_metric_ops=metrics)\n\n\ndef resnet_main(\n flags_obj, model_function, input_function, dataset_name, shape=None):\n \"\"\"Shared main loop for ResNet Models.\n\n Args:\n flags_obj: An object containing parsed flags. See define_resnet_flags()\n for details.\n model_function: the function that instantiates the Model and builds the\n ops for train/eval. This will be passed directly into the estimator.\n input_function: the function that processes the dataset and returns a\n dataset that the estimator can train on. This will be wrapped with\n all the relevant flags for running and passed to estimator.\n dataset_name: the name of the dataset for training and evaluation. This is\n used for logging purpose.\n shape: list of ints representing the shape of the images used for training.\n This is only used if flags_obj.export_dir is passed.\n\n Returns:\n Dict of results of the run.\n \"\"\"\n\n model_helpers.apply_clean(flags.FLAGS)\n\n # Ensures flag override logic is only executed if explicitly triggered.\n if flags_obj.tf_gpu_thread_mode:\n override_flags_and_set_envars_for_gpu_thread_pool(flags_obj)\n\n # Configures cluster spec for distribution strategy.\n num_workers = distribution_utils.configure_cluster(flags_obj.worker_hosts,\n flags_obj.task_index)\n\n # Creates session config. allow_soft_placement = True, is required for\n # multi-GPU and is not harmful for other modes.\n session_config = tf.compat.v1.ConfigProto(\n inter_op_parallelism_threads=flags_obj.inter_op_parallelism_threads,\n intra_op_parallelism_threads=flags_obj.intra_op_parallelism_threads,\n allow_soft_placement=True)\n\n distribution_strategy = distribution_utils.get_distribution_strategy(\n distribution_strategy=flags_obj.distribution_strategy,\n num_gpus=flags_core.get_num_gpus(flags_obj),\n num_workers=num_workers,\n all_reduce_alg=flags_obj.all_reduce_alg)\n\n # Creates a `RunConfig` that checkpoints every 24 hours which essentially\n # results in checkpoints determined only by `epochs_between_evals`.\n run_config = tf.estimator.RunConfig(\n train_distribute=distribution_strategy,\n session_config=session_config,\n save_checkpoints_secs=60*60*24,\n save_checkpoints_steps=None)\n\n # Initializes model with all but the dense layer from pretrained ResNet.\n if flags_obj.pretrained_model_checkpoint_path is not None:\n warm_start_settings = tf.estimator.WarmStartSettings(\n flags_obj.pretrained_model_checkpoint_path,\n vars_to_warm_start='^(?!.*dense)')\n else:\n warm_start_settings = None\n\n classifier = tf.estimator.Estimator(\n model_fn=model_function, model_dir=flags_obj.model_dir, config=run_config,\n warm_start_from=warm_start_settings, params={\n 'resnet_size': int(flags_obj.resnet_size),\n 'data_format': flags_obj.data_format,\n 'batch_size': flags_obj.batch_size,\n 'resnet_version': int(flags_obj.resnet_version),\n 'loss_scale': flags_core.get_loss_scale(flags_obj),\n 'dtype': flags_core.get_tf_dtype(flags_obj),\n 'fine_tune': flags_obj.fine_tune,\n 'num_workers': num_workers,\n })\n\n run_params = {\n 'batch_size': flags_obj.batch_size,\n 'dtype': flags_core.get_tf_dtype(flags_obj),\n 'resnet_size': flags_obj.resnet_size,\n 'resnet_version': flags_obj.resnet_version,\n 'synthetic_data': flags_obj.use_synthetic_data,\n 'train_epochs': flags_obj.train_epochs,\n 'num_workers': num_workers,\n }\n if flags_obj.use_synthetic_data:\n dataset_name = dataset_name + '-synthetic'\n\n benchmark_logger = logger.get_benchmark_logger()\n benchmark_logger.log_run_info('resnet', dataset_name, run_params,\n test_id=flags_obj.benchmark_test_id)\n\n train_hooks = hooks_helper.get_train_hooks(\n flags_obj.hooks,\n model_dir=flags_obj.model_dir,\n batch_size=flags_obj.batch_size)\n\n def input_fn_train(num_epochs, input_context=None):\n return input_function(\n is_training=True,\n data_dir=flags_obj.data_dir,\n batch_size=distribution_utils.per_device_batch_size(\n flags_obj.batch_size, flags_core.get_num_gpus(flags_obj)),\n num_epochs=num_epochs,\n dtype=flags_core.get_tf_dtype(flags_obj),\n datasets_num_private_threads=flags_obj.datasets_num_private_threads,\n num_parallel_batches=flags_obj.datasets_num_parallel_batches,\n input_context=input_context)\n\n def input_fn_eval():\n return input_function(\n is_training=False,\n data_dir=flags_obj.data_dir,\n batch_size=distribution_utils.per_device_batch_size(\n flags_obj.batch_size, flags_core.get_num_gpus(flags_obj)),\n num_epochs=1,\n dtype=flags_core.get_tf_dtype(flags_obj))\n\n train_epochs = (0 if flags_obj.eval_only or not flags_obj.train_epochs else\n flags_obj.train_epochs)\n\n use_train_and_evaluate = flags_obj.use_train_and_evaluate or num_workers > 1\n if use_train_and_evaluate:\n train_spec = tf.estimator.TrainSpec(\n input_fn=lambda input_context=None: input_fn_train(\n train_epochs, input_context=input_context),\n hooks=train_hooks,\n max_steps=flags_obj.max_train_steps)\n eval_spec = tf.estimator.EvalSpec(input_fn=input_fn_eval)\n tf.compat.v1.logging.info('Starting to train and evaluate.')\n tf.estimator.train_and_evaluate(classifier, train_spec, eval_spec)\n # tf.estimator.train_and_evalute doesn't return anything in multi-worker\n # case.\n return {}\n else:\n if train_epochs == 0:\n # If --eval_only is set, perform a single loop with zero train epochs.\n schedule, n_loops = [0], 1\n else:\n # Compute the number of times to loop while training. All but the last\n # pass will train for `epochs_between_evals` epochs, while the last will\n # train for the number needed to reach `training_epochs`. For instance if\n # train_epochs = 25 and epochs_between_evals = 10\n # schedule will be set to [10, 10, 5]. That is to say, the loop will:\n # Train for 10 epochs and then evaluate.\n # Train for another 10 epochs and then evaluate.\n # Train for a final 5 epochs (to reach 25 epochs) and then evaluate.\n n_loops = math.ceil(train_epochs / flags_obj.epochs_between_evals)\n schedule = [flags_obj.epochs_between_evals for _ in range(int(n_loops))]\n schedule[-1] = train_epochs - sum(schedule[:-1]) # over counting.\n\n for cycle_index, num_train_epochs in enumerate(schedule):\n tf.compat.v1.logging.info('Starting cycle: %d/%d', cycle_index,\n int(n_loops))\n\n if num_train_epochs:\n # Since we are calling classifier.train immediately in each loop, the\n # value of num_train_epochs in the lambda function will not be changed\n # before it is used. So it is safe to ignore the pylint error here\n # pylint: disable=cell-var-from-loop\n classifier.train(\n input_fn=lambda input_context=None: input_fn_train(\n num_train_epochs, input_context=input_context),\n hooks=train_hooks,\n max_steps=flags_obj.max_train_steps)\n\n # flags_obj.max_train_steps is generally associated with testing and\n # profiling. As a result it is frequently called with synthetic data,\n # which will iterate forever. Passing steps=flags_obj.max_train_steps\n # allows the eval (which is generally unimportant in those circumstances)\n # to terminate. Note that eval will run for max_train_steps each loop,\n # regardless of the global_step count.\n tf.compat.v1.logging.info('Starting to evaluate.')\n eval_results = classifier.evaluate(input_fn=input_fn_eval,\n steps=flags_obj.max_train_steps)\n\n benchmark_logger.log_evaluation_result(eval_results)\n\n if model_helpers.past_stop_threshold(\n flags_obj.stop_threshold, eval_results['accuracy']):\n break\n\n if flags_obj.export_dir is not None:\n # Exports a saved model for the given classifier.\n export_dtype = flags_core.get_tf_dtype(flags_obj)\n if flags_obj.image_bytes_as_serving_input:\n input_receiver_fn = functools.partial(\n image_bytes_serving_input_fn, shape, dtype=export_dtype)\n else:\n input_receiver_fn = export.build_tensor_serving_input_receiver_fn(\n shape, batch_size=flags_obj.batch_size, dtype=export_dtype)\n classifier.export_savedmodel(flags_obj.export_dir, input_receiver_fn,\n strip_default_attrs=True)\n\n stats = {}\n stats['eval_results'] = eval_results\n stats['train_hooks'] = train_hooks\n\n return stats\n\n\ndef define_resnet_flags(resnet_size_choices=None, dynamic_loss_scale=False):\n \"\"\"Add flags and validators for ResNet.\"\"\"\n flags_core.define_base()\n flags_core.define_performance(num_parallel_calls=False,\n tf_gpu_thread_mode=True,\n datasets_num_private_threads=True,\n datasets_num_parallel_batches=True,\n dynamic_loss_scale=dynamic_loss_scale)\n flags_core.define_image()\n flags_core.define_benchmark()\n flags.adopt_module_key_flags(flags_core)\n\n flags.DEFINE_enum(\n name='resnet_version', short_name='rv', default='1',\n enum_values=['1', '2'],\n help=flags_core.help_wrap(\n 'Version of ResNet. (1 or 2) See README.md for details.'))\n flags.DEFINE_bool(\n name='fine_tune', short_name='ft', default=False,\n help=flags_core.help_wrap(\n 'If True do not train any parameters except for the final layer.'))\n flags.DEFINE_string(\n name='pretrained_model_checkpoint_path', short_name='pmcp', default=None,\n help=flags_core.help_wrap(\n 'If not None initialize all the network except the final layer with '\n 'these values'))\n flags.DEFINE_boolean(\n name='eval_only', default=False,\n help=flags_core.help_wrap('Skip training and only perform evaluation on '\n 'the latest checkpoint.'))\n flags.DEFINE_boolean(\n name='image_bytes_as_serving_input', default=False,\n help=flags_core.help_wrap(\n 'If True exports savedmodel with serving signature that accepts '\n 'JPEG image bytes instead of a fixed size [HxWxC] tensor that '\n 'represents the image. The former is easier to use for serving at '\n 'the expense of image resize/cropping being done as part of model '\n 'inference. Note, this flag only applies to ImageNet and cannot '\n 'be used for CIFAR.'))\n flags.DEFINE_boolean(\n name='use_train_and_evaluate', default=False,\n help=flags_core.help_wrap(\n 'If True, uses `tf.estimator.train_and_evaluate` for the training '\n 'and evaluation loop, instead of separate calls to `classifier.train '\n 'and `classifier.evaluate`, which is the default behavior.'))\n flags.DEFINE_string(\n name='worker_hosts', default=None,\n help=flags_core.help_wrap(\n 'Comma-separated list of worker ip:port pairs for running '\n 'multi-worker models with DistributionStrategy. The user would '\n 'start the program on each host with identical value for this flag.'))\n flags.DEFINE_integer(\n name='task_index', default=-1,\n help=flags_core.help_wrap('If multi-worker training, the task_index of '\n 'this worker.'))\n flags.DEFINE_bool(\n name='enable_lars', default=False,\n help=flags_core.help_wrap(\n 'Enable LARS optimizer for large batch training.'))\n flags.DEFINE_float(\n name='label_smoothing', default=0.0,\n help=flags_core.help_wrap(\n 'Label smoothing parameter used in the softmax_cross_entropy'))\n flags.DEFINE_float(\n name='weight_decay', default=1e-4,\n help=flags_core.help_wrap(\n 'Weight decay coefficiant for l2 regularization.'))\n\n choice_kwargs = dict(\n name='resnet_size', short_name='rs', default='50',\n help=flags_core.help_wrap('The size of the ResNet model to use.'))\n\n if resnet_size_choices is None:\n flags.DEFINE_string(**choice_kwargs)\n else:\n flags.DEFINE_enum(enum_values=resnet_size_choices, **choice_kwargs)\n" ]
[ [ "tensorflow.cond", "tensorflow.cast", "tensorflow.map_fn", "tensorflow.where", "tensorflow.estimator.RunConfig", "tensorflow.group", "tensorflow.estimator.train_and_evaluate", "tensorflow.estimator.export.PredictOutput", "tensorflow.contrib.opt.LARSOptimizer", "tensorflow.compat.v1.summary.image", "tensorflow.compat.v1.trainable_variables", "tensorflow.compat.v1.train.piecewise_constant", "tensorflow.subtract", "tensorflow.losses.softmax_cross_entropy", "tensorflow.compat.v1.train.get_or_create_global_step", "tensorflow.estimator.EvalSpec", "tensorflow.nn.in_top_k", "tensorflow.argmax", "tensorflow.data.Options", "tensorflow.estimator.WarmStartSettings", "tensorflow.data.Dataset.from_tensors", "tensorflow.compat.v1.metrics.accuracy", "tensorflow.random.truncated_normal", "tensorflow.random.uniform", "tensorflow.identity", "tensorflow.compat.v1.get_collection", "tensorflow.one_hot", "tensorflow.compat.v1.summary.scalar", "tensorflow.compat.v1.losses.sparse_softmax_cross_entropy", "tensorflow.train.polynomial_decay", "tensorflow.compat.v1.ConfigProto", "tensorflow.constant", "tensorflow.nn.softmax", "tensorflow.estimator.export.TensorServingInputReceiver", "tensorflow.compat.v1.train.MomentumOptimizer", "tensorflow.compat.v1.placeholder", "tensorflow.compat.v1.logging.info", "tensorflow.estimator.EstimatorSpec" ] ]
SiavashMT/OCT-MPS
[ "1fe8a8c25063ef3cee8b96128f20d040ac613ba7" ]
[ "util/visualization_BScan.py" ]
[ "import matplotlib\nmatplotlib.use('Qt5Agg')\nfrom matplotlib import pyplot as plt\nfrom src.python.octmps_output import parse_OCTMPS_output_file\n\nfont = {'family': 'serif',\n 'weight': 'normal',\n 'size': 18}\n\n\ndef force_aspect(ax, aspect=1):\n im = ax.get_images()\n extent = im[0].get_extent()\n ax.set_aspect(abs((extent[1]-extent[0])/(extent[3]-extent[2]))/aspect)\n\n\ndef visualize(file_name, label=None):\n\n reflectance_grid, reflectances, x_positions, z_positions = parse_OCTMPS_output_file(file_name=file_name)\n\n fig, axes = plt.subplots(nrows=1, ncols=2)\n if label:\n fig.suptitle(label, **font)\n\n im = axes[1].imshow(reflectance_grid.transpose(),\n extent=(x_positions.min(), x_positions.max(), z_positions.max(), z_positions.min()), cmap='jet',\n interpolation='none')\n axes[1].set_xlabel('Distance X [cm]', **font)\n axes[1].set_ylabel('Depth Z [cm]', **font)\n axes[1].set_xticks([x_positions.min(), x_positions.max()])\n\n axes[0].imshow(reflectance_grid[:, :150].transpose(),\n extent=(x_positions.min(), x_positions.max(), z_positions.max(), z_positions.min()), cmap='jet',\n interpolation='none')\n axes[0].set_xlabel('Distance X [cm]', **font)\n axes[0].set_ylabel('Depth Z [cm]', **font)\n axes[0].set_aspect('auto')\n\n colorbar = fig.colorbar(im)\n colorbar.set_label(label='Reflectance', **font)\n fig.tight_layout()\n plt.subplots_adjust(top=0.9)\n plt.show()\n\n\nif __name__ == '__main__':\n \n import argparse\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--OCTMPS_output_file\", type=str, required=True, help=\"OCTMPS output file\")\n parser.add_argument(\"--Label\", type=str, required=False, help=\"Label for the figures\")\n args = parser.parse_args()\n\n visualize(args.OCTMPS_output_file, args.Label)\n\n" ]
[ [ "matplotlib.use", "matplotlib.pyplot.show", "matplotlib.pyplot.subplots", "matplotlib.pyplot.subplots_adjust" ] ]
RyanWangZf/NRE-IF
[ "738126d3ea06b396c67417e684400f510405f319" ]
[ "main_findif.py" ]
[ "# -*- coding: utf-8 -*-\n\nfrom config import opt\nimport models\nimport dataset\nimport torch\nimport numpy as np\nimport torch.nn as nn\nfrom torch.utils.data import DataLoader\nimport torch.optim as optim\nimport torch.nn.functional as F\nfrom utils import save_pr, now, eval_metric\nfrom torch.autograd import grad\n\nimport pdb\n\ndef collate_fn(batch):\n data, label = zip(*batch)\n return data, label\n\ndef setup_seed(seed):\n torch.manual_seed(seed)\n torch.cuda.manual_seed_all(seed)\n np.random.seed(seed)\n torch.backends.cudnn.deterministic = True\n\ndef train(**kwargs):\n\n setup_seed(opt.seed)\n\n # kwargs.update({'model': 'PCNN_ONE'})\n kwargs.update({'model': 'PCNN_IF'}) \n opt.parse(kwargs)\n\n if opt.use_gpu:\n torch.cuda.set_device(opt.gpu_id)\n\n # torch.manual_seed(opt.seed)\n model = getattr(models, 'PCNN_IF')(opt)\n if opt.use_gpu:\n # torch.cuda.manual_seed_all(opt.seed)\n model.cuda()\n # parallel\n # model = nn.DataParallel(model)\n\n # loading data\n DataModel = getattr(dataset, opt.data + 'Data')\n train_data = DataModel(opt.data_root, \"train\")\n train_data_loader = DataLoader(train_data, opt.batch_size, shuffle=True, num_workers=opt.num_workers, collate_fn=collate_fn)\n\n val_data = DataModel(opt.data_root, \"val\")\n val_data_loader = DataLoader(val_data, opt.batch_size, shuffle=False, num_workers=opt.num_workers, collate_fn=collate_fn)\n\n test_data = DataModel(opt.data_root, \"test\")\n test_data_loader = DataLoader(test_data, batch_size=opt.batch_size, shuffle=False, num_workers=opt.num_workers, collate_fn=collate_fn)\n print('train data: {}; test data: {}'.format(len(train_data), len(test_data)))\n\n params = get_model_param_dict(model)\n theta = [params[\"linear.weight\"], params[\"linear.bias\"]]\n\n print(\"Start Training.\")\n criterion = nn.CrossEntropyLoss()\n optimizer = optim.Adadelta(filter(lambda p: p.requires_grad, model.parameters()), rho=0.95, eps=1e-6, weight_decay=opt.weight_decay)\n\n max_pre = -1.0\n max_rec = -1.0\n for epoch in range(opt.num_epochs):\n total_loss = 0\n for idx, (data, label_set) in enumerate(train_data_loader):\n label = [l[0] for l in label_set]\n\n if opt.use_gpu:\n label = torch.LongTensor(label).cuda()\n else:\n label = torch.LongTensor(label)\n\n if epoch == 0:\n # first epoch, we use PCNN-ONE strategy\n sub_data = select_instance_ONE(model, data, label)\n else:\n sub_data, sub_label = select_influential_instance(model, data, label, s_test, opt.sample_ratio)\n\n model.batch_size = opt.batch_size\n\n if epoch == 0:\n out = model(sub_data, train=True)\n loss = criterion(out, label)\n else:\n loss = model.loss(sub_data, sub_label)\n \n optimizer.zero_grad() \n loss.backward()\n optimizer.step()\n\n total_loss += loss.item()\n\n if epoch > 0:\n # evaluate\n true_y, pred_y, pred_p = predict(model, test_data_loader)\n all_pre, all_rec, fp_res = eval_metric(true_y, pred_y, pred_p)\n\n last_pre, last_rec = all_pre[-1], all_rec[-1]\n if last_pre > 0.1 and last_rec > 0.1:\n save_pr(opt.result_dir, model.model_name, epoch, all_pre, all_rec, fp_res, opt=opt.print_opt)\n print('{} Epoch {} save pr'.format(now(), epoch + 1))\n if last_pre > max_pre and last_rec > max_rec:\n print(\"save model\")\n max_pre = last_pre\n max_rec = last_rec\n model.save(opt.print_opt)\n\n print('{} Epoch {}/{}: train loss: {}; test precision: {}, test recall {}'.format(now(), epoch + 1, opt.num_epochs, total_loss, last_pre, last_rec))\n\n # in each epoch, update s_test\n s_test = cal_inverse_hvp_lissa(model, val_data_loader, train_data_loader, theta, tol=1e-3, verbose=True)\n\n\n\ndef predict(model, test_data_loader):\n\n model.eval()\n\n pred_y = []\n true_y = []\n pred_p = []\n for idx, (data, labels) in enumerate(test_data_loader):\n true_y.extend(labels)\n\n for bag in data:\n insNum = bag[1]\n model.batch_size = insNum\n if opt.use_gpu:\n data = map(lambda x: torch.LongTensor(x).cuda(), bag)\n else:\n data = map(lambda x: torch.LongTensor(x), bag)\n\n out = model(data)\n out = F.softmax(out, 1)\n max_ins_prob, max_ins_label = map(lambda x: x.data.cpu().numpy(), torch.max(out, 1))\n tmp_prob = -1.0\n tmp_NA_prob = -1.0\n pred_label = 0\n pos_flag = False\n\n for i in range(insNum):\n if pos_flag and max_ins_label[i] < 1:\n continue\n else:\n if max_ins_label[i] > 0:\n pos_flag = True\n if max_ins_prob[i] > tmp_prob:\n pred_label = max_ins_label[i]\n tmp_prob = max_ins_prob[i]\n else:\n if max_ins_prob[i] > tmp_NA_prob:\n tmp_NA_prob = max_ins_prob[i]\n\n if pos_flag:\n pred_p.append(tmp_prob)\n else:\n pred_p.append(tmp_NA_prob)\n\n pred_y.append(pred_label)\n\n size = len(test_data_loader.dataset)\n assert len(pred_y) == size and len(true_y) == size\n\n model.train()\n return true_y, pred_y, pred_p\n\ndef select_influential_instance(model, batch_data, labels, s_test, sample_ratio=0.1, max_size=5):\n model.eval()\n s_test = [s_.view(-1,1) for s_ in s_test]\n\n sub_bag_list = []\n sub_label_list = []\n\n for idx, bag in enumerate(batch_data):\n select_ent = []\n select_num = []\n select_sen = []\n select_pf = []\n select_pool = []\n select_mask = []\n\n insNum = bag[1]\n label = labels[idx]\n selected_ins_id = [0]\n\n if insNum > 1:\n model.batch_size = insNum\n if model.opt.use_gpu:\n data = map(lambda x: torch.LongTensor(x).cuda(), bag)\n else:\n data = map(lambda x: torch.LongTensor(x), bag)\n\n batch_label = label.repeat(insNum)\n\n if model.opt.use_gpu:\n batch_label = batch_label.cuda()\n\n \n phi = cal_influence_function(s_test, model, data, batch_label)\n \n if insNum <= 3:\n # only select one\n selected_ins_id = [np.argmin(phi)]\n else:\n # select multiple samples via subsampling\n prob_pi = cal_sampling_prob(phi, sigmoid_k=1)\n selected_ins_id = sampling(prob_pi, sample_ratio, max_size)\n\n\n # collect all together\n for j in selected_ins_id:\n # entity word's id\n select_ent = bag[0]\n # instance number in this bag\n select_num = len(selected_ins_id)\n # sentence\n select_sen.append(bag[2][j])\n # position feature\n select_pf.append(bag[3][j])\n # pool\n select_pool.append(bag[4][j])\n # piece-wise mask\n select_mask.append(bag[5][j])\n\n sub_label_list.append(label.repeat(select_num))\n\n sub_bag_list.append([select_ent, select_num, select_sen, select_pf, select_pool, select_mask])\n\n data = sub_bag_list\n\n sub_labels = torch.cat(sub_label_list)\n\n if model.opt.use_gpu:\n sub_labels = sub_labels.cuda()\n\n # if model.opt.use_gpu:\n # data = map(lambda x: torch.LongTensor(x).cuda(), [select_ent, select_num, select_sen, select_pf, select_pool, select_mask])\n # else:\n # data = map(lambda x: torch.LongTensor(x), [select_ent, select_num, select_sen, select_pf, select_pool, select_mask])\n\n model.train()\n return data, sub_labels\n\ndef cal_sampling_prob(phi, sigmoid_k=1):\n if phi.std() == 0:\n # all same\n prob_pi = np.array([0.5]*len(phi))\n #print(\"Phi All SAME in this bag!\",len(phi))\n return prob_pi\n else:\n # standardize\n # remove outliers\n upper_bound = phi.mean() + 3 * phi.std()\n sub_phi = phi[phi < upper_bound]\n if len(sub_phi) == 0:\n prob_pi = np.array([0.5]*len(phi))\n #print(\"Phi All SAME in this bag!\",len(phi))\n\n return prob_pi\n else:\n phi_std = phi - sub_phi.mean()\n a_param = sigmoid_k / (1e-10 + sub_phi.max() - sub_phi.min())\n prob_pi = 1 / (1 + np.exp(a_param*phi_std))\n prob_pi[phi >= upper_bound] = 0\n return prob_pi\n\ndef sampling(prob_pi, ratio, max_size=10):\n num_sample = prob_pi.shape[0]\n all_idx = np.arange(num_sample)\n obj_sample_size = int(np.ceil(ratio * num_sample))\n\n if obj_sample_size > max_size:\n # do not involve too many samples\n obj_sample_size = max_size\n\n sb_idx = None\n iteration = 0\n while True:\n rand_prob = np.random.rand(num_sample)\n iter_idx = all_idx[rand_prob < prob_pi]\n if sb_idx is None:\n sb_idx = iter_idx\n else:\n new_idx = np.setdiff1d(iter_idx, sb_idx)\n diff_size = obj_sample_size - sb_idx.shape[0]\n if new_idx.shape[0] < diff_size:\n sb_idx = np.union1d(iter_idx, sb_idx)\n else:\n new_idx = np.random.choice(new_idx, diff_size, replace=False)\n sb_idx = np.union1d(sb_idx, new_idx)\n iteration += 1\n if sb_idx.shape[0] >= obj_sample_size:\n sb_idx = np.random.choice(sb_idx,obj_sample_size,replace=False)\n return sb_idx\n\n if iteration > 100:\n diff_size = obj_sample_size - sb_idx.shape[0]\n leave_idx = np.setdiff1d(all_idx, sb_idx)\n # left samples are sorted by their IF\n # leave_idx = leave_idx[np.argsort(prob_pi[leave_idx])[-diff_size:]]\n leave_idx = np.random.choice(leave_idx,diff_size,replace=False)\n sb_idx = np.union1d(sb_idx, leave_idx)\n return sb_idx\n \ndef get_model_param_dict(model):\n params = {}\n for name,param in model.named_parameters():\n params[name] = param\n\n return params\n\ndef hvp(y, w, v):\n \"\"\"Multiply the Hessians of y and w by v.\n Uses a backprop-like approach to compute the product between the Hessian\n and another vector efficiently, which even works for large Hessians.\n Example: if: y = 0.5 * w^T A x then hvp(y, w, v) returns and expression\n which evaluates to the same values as (A + A.t) v.\n\n Arguments:\n y: scalar/tensor, for example the output of the loss function\n w: list of torch tensors, tensors over which the Hessian\n should be constructed\n v: list of torch tensors, same shape as w,\n will be multiplied with the Hessian\n\n Returns:\n return_grads: list of torch tensors, contains product of Hessian and v.\n\n Raises:\n ValueError: `y` and `w` have a different length.\"\"\"\n if len(w) != len(v):\n raise(ValueError(\"w and v must have the same length.\"))\n\n # First backprop\n first_grads = grad(y, w, retain_graph=True, create_graph=True)\n\n # Elementwise products\n elemwise_products = 0\n for grad_elem, v_elem in zip(first_grads, v):\n elemwise_products += torch.sum(grad_elem * v_elem.detach())\n\n # Second backprop\n return_grads = grad(elemwise_products, w, create_graph=True)\n\n return return_grads\n\ndef cal_inverse_hvp_lissa(model, \n val_data_loader,\n train_data_loader,\n theta,\n damp=0.01,\n scale=25.0,\n tol=1e-3,\n recursion_depth=1000,\n verbose=False,\n ):\n\n def _compute_diff(h0, h1):\n assert len(h0) == len(h1)\n diff_ratio = [1e8] * len(h0)\n for i in range(len(h0)):\n h0_ = h0[i].detach().cpu().numpy()\n h1_ = h1[i].detach().cpu().numpy()\n norm_0 = np.linalg.norm(h0_) \n norm_1 = np.linalg.norm(h1_)\n abs_diff = abs(norm_0 - norm_1)\n diff_ratio[i] = abs_diff / norm_0\n\n return max(diff_ratio)\n\n model.eval()\n\n # get grad theta on val data\n fp_loss_list = []\n for idx,(data, label_set) in enumerate(val_data_loader):\n # label_set: 0: NA relation, 1: has relation\n labels = torch.LongTensor(np.concatenate(label_set,0))\n pred = model.inference(data)\n\n pred_prob = F.softmax(pred, 1)\n _, max_ins_label = map(lambda x: x.data.cpu().numpy(), torch.max(pred_prob, 1))\n pred_label = torch.zeros(len(labels))\n pred_label[max_ins_label != 0] = 1\n mask = pred_label.long() ^ labels\n mask = mask.float()\n\n if model.opt.use_gpu:\n mask = mask.cuda()\n\n pred_max_ins_prob = torch.max(pred_prob, 1)[0]\n loss = - mask * torch.log(1 - pred_max_ins_prob)\n\n fp_loss_list.append(loss)\n\n fp_loss_list = torch.cat(fp_loss_list)\n fp_loss_avg = torch.mean(fp_loss_list)\n\n grads_val = list(grad(fp_loss_avg, theta, create_graph=True))\n \n grads_val = [g.detach() for g in grads_val]\n\n # start recurssively update the estimate\n h_estimate = grads_val.copy()\n xent_loss_func = nn.CrossEntropyLoss()\n\n for i in range(recursion_depth):\n h_estimate_last = h_estimate\n # randomly select a batch from train data\n for data, label_set in train_data_loader:\n label = []\n for j in range(len(data)):\n insNum = data[j][1]\n label.append([label_set[j][0]]*insNum)\n\n label = torch.LongTensor(np.concatenate(label))\n if model.opt.use_gpu:\n label = torch.LongTensor(label).cuda()\n else:\n label = torch.LongTensor(label)\n\n pred = model.inference(data)\n loss = xent_loss_func(pred, label)\n\n hv = hvp(loss, theta, h_estimate)\n h_estimate = [ _v + (1 - damp) * _h_e - _hv.detach() / scale for _v, _h_e, _hv in zip(grads_val, h_estimate, hv)]\n diff_ratio = _compute_diff(h_estimate, h_estimate_last)\n break\n\n if i % 10 == 0:\n if verbose:\n print(\"[LISSA]: epoch: {}, diff: {:.4f}\".format(i, diff_ratio))\n\n # average diff to stop iteration\n if diff_ratio <= tol:\n print(\"[LISSA]: Reach tolerance in epoch {}.\".format(int(i)))\n break\n\n return h_estimate\n\ndef one_hot_transform(y, num_class=10):\n one_hot_y = nn.functional.one_hot(y, num_classes=num_class)\n return one_hot_y.float()\n\ndef cal_influence_function(s_test, model, x, y):\n # do forward and get num class\n model.eval()\n pred, x_h = model(x, hidden=True)\n pred = nn.functional.softmax(pred, 1)\n num_class = pred.shape[1]\n y_oh = one_hot_transform(y, num_class)\n\n # get grad theta\n diff_pred = pred - y_oh\n x_h = torch.unsqueeze(x_h, 1)\n partial_J_theta = x_h * torch.unsqueeze(diff_pred, 2)\n partial_J_theta = partial_J_theta.view(-1, partial_J_theta.shape[1] * partial_J_theta.shape[2]).detach()\n\n # get grad bias\n if model.opt.use_gpu:\n partial_J_b = torch.mm(diff_pred, torch.eye(num_class).cuda())\n else:\n partial_J_b = torch.mm(diff_pred, torch.eye(num_class))\n\n # get the IF\n predicted_loss_diff = -torch.mm(partial_J_theta, s_test[0]) \\\n -torch.mm(partial_J_b, s_test[1])\n\n predicted_loss_diff = predicted_loss_diff.view(-1).detach().cpu().numpy()\n\n return predicted_loss_diff\n\ndef select_instance_ONE(model, batch_data, labels):\n model.eval()\n select_ent = []\n select_num = []\n select_sen = []\n select_pf = []\n select_pool = []\n select_mask = []\n for idx, bag in enumerate(batch_data):\n insNum = bag[1] # num of instances in this bag\n label = labels[idx] # labels are a batch of bags' labels, batch_size is 128\n max_ins_id = 0\n if insNum > 1:\n model.batch_size = insNum\n if opt.use_gpu:\n data = map(lambda x: torch.LongTensor(x).cuda(), bag)\n else:\n data = map(lambda x: torch.LongTensor(x), bag)\n\n out = model(data) # ?, 27\n\n # max_ins_id = torch.max(torch.max(out, 1)[0], 0)[1]\n max_ins_id = torch.max(out[:, label], 0)[1] # select index of the largest instance\n\n if opt.use_gpu:\n # max_ins_id = max_ins_id.data.cpu().numpy()[0]\n max_ins_id = max_ins_id.item()\n else:\n max_ins_id = max_ins_id.data.numpy()\n pass \n\n max_sen = bag[2][max_ins_id] # sentence word\n max_pf = bag[3][max_ins_id] # position features\n max_pool = bag[4][max_ins_id] # entity's postion in this sentence\n max_mask = bag[5][max_ins_id] # mask for piece pooling in [1,2,3]\n\n select_ent.append(bag[0]) # entity word's id\n select_num.append(bag[1]) # instance number in this bag\n select_sen.append(max_sen)\n select_pf.append(max_pf)\n select_pool.append(max_pool)\n select_mask.append(max_mask)\n\n\n if opt.use_gpu:\n data = map(lambda x: torch.LongTensor(x).cuda(), [select_ent, select_num, select_sen, select_pf, select_pool, select_mask])\n else:\n data = map(lambda x: torch.LongTensor(x), [select_ent, select_num, select_sen, select_pf, select_pool, select_mask])\n\n model.train()\n return data\n\n\nif __name__ == '__main__':\n import fire\n fire.Fire()\n\n" ]
[ [ "torch.mean", "torch.nn.functional.softmax", "torch.max", "torch.cat", "torch.utils.data.DataLoader", "numpy.concatenate", "numpy.argmin", "torch.cuda.manual_seed_all", "numpy.exp", "torch.nn.CrossEntropyLoss", "torch.mm", "numpy.arange", "torch.eye", "numpy.ceil", "torch.autograd.grad", "torch.LongTensor", "numpy.random.choice", "numpy.union1d", "torch.unsqueeze", "torch.log", "numpy.random.rand", "numpy.random.seed", "torch.cuda.set_device", "torch.manual_seed", "numpy.linalg.norm", "numpy.setdiff1d", "torch.nn.functional.one_hot" ] ]
popsa-hq/keras-vggface
[ "ed40b687c1669a2aed2f216dd5670520c0a9a08f" ]
[ "keras_vggface/android_model_creation.py" ]
[ "import time\n\nimport tensorflow as tf\nfrom tflite_support import flatbuffers\nfrom tflite_support import metadata as _metadata\nfrom tflite_support import metadata_schema_py_generated as _metadata_fb\n\nfrom keras_vggface import VGGFace\nfrom keras_vggface.preprocessing import create_preprocessing_model\nfrom keras_vggface.strings_model_metadata import VggFaceMetadata\n\nTFLITE_FILE_FORMAT = \".tflite\"\n\n\ndef create_tflite_model_file(keras_model, filename):\n \"\"\"Converts keras model into TensorFlow lite model, and\n saves it as `filename.tflite` file in working directory\n # Arguments\n keras_model: a Keras model to be converted\n filename: Filename (with or without `.tflite` extension)\n \"\"\"\n if TFLITE_FILE_FORMAT not in filename:\n filename += TFLITE_FILE_FORMAT\n\n start = time.time()\n\n converter = tf.lite.TFLiteConverter.from_keras_model(keras_model)\n tflite_model = converter.convert()\n with open(filename, 'wb') as f:\n f.write(tflite_model)\n\n end = time.time()\n print(f\"{filename} took {end - start} seconds to create.\")\n\n\ndef write_metadata(model_filename):\n metadata = _metadata_fb.ModelMetadataT()\n metadata.description = VggFaceMetadata.SHORT_DESCRIPTION\n metadata.name = VggFaceMetadata.NAME\n metadata.author = VggFaceMetadata.AUTHOR\n metadata.license = VggFaceMetadata.LICENSE\n metadata.version = VggFaceMetadata.VERSION\n\n subgraph = _metadata_fb.SubGraphMetadataT()\n subgraph.inputTensorMetadata = [create_input_metadata()]\n subgraph.outputTensorMetadata = [create_output_metadata()]\n metadata.subgraphMetadata = [subgraph]\n\n save_metadata_to_model_file(metadata, model_filename)\n\n\ndef create_input_metadata():\n input_metadata = _metadata_fb.TensorMetadataT()\n input_metadata.name = \"image\"\n input_metadata.description = VggFaceMetadata.Layers.ANDROID_INPUT\n input_metadata.content = _metadata_fb.ContentT()\n input_metadata.content.contentProperties = _metadata_fb.ImagePropertiesT()\n input_metadata.content.contentProperties.colorSpace = _metadata_fb.ColorSpaceType.RGB\n input_metadata.content.contentPropertiesType = _metadata_fb.ContentProperties.ImageProperties\n\n # Normalization\n input_normalization = _metadata_fb.ProcessUnitT()\n input_normalization.optionsType = _metadata_fb.ProcessUnitOptions.NormalizationOptions\n input_normalization.options = _metadata_fb.NormalizationOptionsT()\n input_normalization.options.mean = [91.4953,\n 103.8827,\n 131.0912]\n input_normalization.options.std = [1, 1, 1]\n input_metadata.processUnits = [input_normalization]\n\n # Input stats\n input_stats = _metadata_fb.StatsT()\n input_metadata.stats = input_stats\n return input_metadata\n\n\ndef create_output_metadata():\n output_metadata = _metadata_fb.TensorMetadataT()\n output_metadata.name = \"face_embeddings\"\n output_metadata.description = \"Embedding vector with 2048 values per face.\"\n output_metadata.content = _metadata_fb.ContentT()\n output_metadata.content.content_properties = _metadata_fb.FeaturePropertiesT()\n output_metadata.content.contentPropertiesType = (\n _metadata_fb.ContentProperties.FeatureProperties)\n output_stats = _metadata_fb.StatsT()\n # output_stats.max = [1.0]\n # output_stats.min = [0.0]\n output_metadata.stats = output_stats\n return output_metadata\n\n\ndef save_metadata_to_model_file(metadata, model_filename):\n b = flatbuffers.Builder(0)\n b.Finish(metadata.Pack(b),\n _metadata.MetadataPopulator.METADATA_FILE_IDENTIFIER)\n metadata_buffer = b.Output()\n populator = _metadata.MetadataPopulator.with_model_file(model_filename)\n populator.load_metadata_buffer(metadata_buffer)\n # populator.load_associated_files([\"your_path_to_label_file\"]) # No associated files for this (e.g. No labels files)\n populator.populate()\n\n\n# Examples\ndef tensorflow_lite_example():\n \"\"\"Example usage to get face embeddings from cropped image of human face\"\"\"\n import numpy as np\n from tensorflow.keras.preprocessing import image\n\n img = image.load_img('../image/ajb.jpg', target_size=(224, 224))\n x = image.img_to_array(img)\n x = np.expand_dims(x, axis=0)\n\n interpreter = tf.lite.Interpreter(model_path=\"FaceEmbeddings.tflite\")\n interpreter.allocate_tensors()\n input_details = interpreter.get_input_details()\n output_details = interpreter.get_output_details()\n\n # An option to compare the 2. They produce the same results.\n USE_TENSORFLOW_PREPROCESSOR = True\n if USE_TENSORFLOW_PREPROCESSOR:\n preprocessed = use_tensorflow_preprocessor(x)\n else:\n preprocessed = use_tensorflow_lite_preprocessor(x)\n\n interpreter.set_tensor(input_details[0]['index'], preprocessed)\n interpreter.invoke()\n tflite_interpreter_output = interpreter.get_tensor(output_details[0]['index'])\n embeddings = tflite_interpreter_output[0]\n print(\"TensorFlow Lite embeddings: \", embeddings)\n\n\ndef use_tensorflow_preprocessor(x):\n image_preprocessor = create_preprocessing_model()\n return image_preprocessor.predict(x)\n\n\ndef use_tensorflow_lite_preprocessor(x):\n image_preprocessor_interpreter = tf.lite.Interpreter(model_path=\"Face-preprocessing.tflite\")\n image_preprocessor_interpreter.resize_tensor_input(0, [1, 224, 224, 3])\n image_preprocessor_interpreter.allocate_tensors()\n preprocessor_input_details = image_preprocessor_interpreter.get_input_details()\n preprocessor_output_details = image_preprocessor_interpreter.get_output_details()\n image_preprocessor_interpreter.set_tensor(preprocessor_input_details[0]['index'], x)\n image_preprocessor_interpreter.invoke()\n preprocessor_interpreter_output = image_preprocessor_interpreter.get_tensor(preprocessor_output_details[0]['index'])\n preprocessed = preprocessor_interpreter_output[0]\n preprocessed = np.expand_dims(preprocessed, axis=0)\n return preprocessed\n\n\ndef tensorflow_custom_preprocessing_example():\n \"\"\"Example usage to get face embeddings from cropped image of human face\"\"\"\n import numpy as np\n from tensorflow.keras.preprocessing import image\n\n image_preprocessor = create_preprocessing_model()\n embeddings_model = VGGFace(model=\"senet50\", pooling=\"avg\", include_top=False, input_shape=(224, 224, 3))\n\n img = image.load_img('../image/ajb.jpg', target_size=(224, 224))\n x = image.img_to_array(img)\n x = np.expand_dims(x, axis=0)\n preprocessed = image_preprocessor.predict(x)\n embeddings = embeddings_model.predict(preprocessed)\n print(\"TensorFlow embeddings: \", embeddings)\n\n\ndef get_predictions_from_png_image_example():\n \"\"\"Example usage to get predictions (human identity) from image\"\"\"\n from tensorflow.keras.preprocessing import image\n import numpy as np\n import keras_vggface.utils as libutils\n\n image_preprocessor = create_preprocessing_model()\n model = VGGFace(model='senet50')\n img = image.load_img('image/ajb-resized.jpg', target_size=(224, 224), interpolation=\"bilinear\")\n x = image.img_to_array(img)\n x = np.expand_dims(x, axis=0)\n preprocessed = image_preprocessor.predict(x)\n predictions = model.predict(preprocessed)\n print('Predicted:', libutils.decode_predictions(predictions))\n # Output of normal: [[[\"b' A._J._Buckley'\", 0.91385096], [\"b' Guy_Garvey'\", 0.009176245], [\"b' Jeff_Corwin'\", 0.008781389], [\"b' Michael_Voltaggio'\", 0.0073467665], [\"b' Nick_Frost'\", 0.0065856054]]]\n # Output of custom preprocessing (1 model): [[[\"b' A._J._Buckley'\", 0.91558367], [\"b' Guy_Garvey'\", 0.009039231], [\"b' Jeff_Corwin'\", 0.008346532], [\"b' Michael_Voltaggio'\", 0.0071733994], [\"b' Nick_Frost'\", 0.006603726]]]\n # (this) output of custom preprocessing (2 model): [[[\"b' A._J._Buckley'\", 0.91385096], [\"b' Guy_Garvey'\", 0.009176245], [\"b' Jeff_Corwin'\", 0.008781389], [\"b' Michael_Voltaggio'\", 0.0073467665], [\"b' Nick_Frost'\", 0.0065856054]]]\n\n\ndef create_preprocessor_input_metadata():\n input_meta = _metadata_fb.TensorMetadataT()\n input_meta.name = \"image preprocessing\"\n input_meta.description = (\n \"Input image to be preprocessed. The input image can be of any size.\"\n )\n input_meta.content = _metadata_fb.ContentT()\n input_meta.content.contentProperties = _metadata_fb.ImagePropertiesT()\n input_meta.content.contentProperties.colorSpace = _metadata_fb.ColorSpaceType.RGB\n input_meta.content.contentPropertiesType = _metadata_fb.ContentProperties.ImageProperties\n input_meta.processUnits = []\n input_stats = _metadata_fb.StatsT()\n input_stats.max = [255]\n input_stats.min = [0]\n input_meta.stats = input_stats\n return input_meta\n\n\ndef create_preprocessor_output_metadata():\n output_meta = _metadata_fb.TensorMetadataT()\n output_meta.name = \"Preprocessed image\"\n image_size = 224\n output_meta.description = f\"Preprocessed image, shaped 3 x {image_size} x {image_size}\"\n output_meta.content = _metadata_fb.ContentT()\n output_meta.content.content_properties = _metadata_fb.FeaturePropertiesT()\n output_meta.content.contentPropertiesType = (\n _metadata_fb.ContentProperties.FeatureProperties)\n return output_meta\n\n\ndef write_preprocessor_metadata(image_preprocessor_filename):\n metadata = _metadata_fb.ModelMetadataT()\n metadata.description = \"A preprocessing model used to channel-reverse, resize and finally depthwise normalize images prior to \" \\\n \"processing through the face recognition model, {0}. If you use this model, then don't do \" \\\n \"the preprocessing manually since this model does it for you.\".format(VggFaceMetadata.NAME)\n metadata.name = VggFaceMetadata.NAME + \" preprocessing\"\n metadata.author = \"Ben Butterworth\"\n metadata.license = \"Github: See https://github.com/popsa-hq/keras-vggface\"\n metadata.version = VggFaceMetadata.VERSION\n\n subgraph = _metadata_fb.SubGraphMetadataT()\n subgraph.inputTensorMetadata = [create_preprocessor_input_metadata()]\n subgraph.outputTensorMetadata = [create_preprocessor_output_metadata()]\n metadata.subgraphMetadata = [subgraph]\n\n save_metadata_to_model_file(metadata, image_preprocessor_filename)\n\n\nif __name__ == \"__main__\":\n # tensorflow_lite_example()\n # tensorflow_custom_preprocessing_example()\n\n # # First stage: Image preprocessing model\n image_preprocessor = create_preprocessing_model()\n preprocessor_filename = 'FaceEmbeddingsPreprocessing.tflite'\n create_tflite_model_file(image_preprocessor, preprocessor_filename)\n write_preprocessor_metadata(preprocessor_filename)\n\n # Second stage: Face vector calculation\n embeddings_model = VGGFace(model=\"senet50\", pooling=\"avg\", include_top=False, input_shape=(224, 224, 3))\n model_filename = 'FaceEmbeddings.tflite'\n create_tflite_model_file(embeddings_model, model_filename)\n write_metadata(model_filename)\n" ]
[ [ "numpy.expand_dims", "tensorflow.lite.TFLiteConverter.from_keras_model", "tensorflow.keras.preprocessing.image.load_img", "tensorflow.lite.Interpreter", "tensorflow.keras.preprocessing.image.img_to_array" ] ]
tamaswells/Nanocut_redistribute
[ "3103eaa3c015ab1c04fb254d51c263a00df90cae" ]
[ "src/nanocut/periodic_1D_prism.py" ]
[ "import numpy as np\nfrom nanocut.common import EPSILON, PERIODIC_TOLERANCE\nfrom nanocut.polyhedron import Polyhedron\nfrom nanocut.output import error\n\nclass Periodic1DPrism(Polyhedron):\n \"\"\"Class for periodic bodies bounded by a group of planes.\"\"\"\n \n\n def __init__(self, geometry, period, **kwargs):\n \"\"\"Construct Periodic1DPrism instance.\n \n Keyword args:\n shift_vector: Origin of the body.\n planes_normal: Plane definitions with normal vectors and distances.\n planes_miller: Plane definitions with miller indices and distances.\n \"\"\"\n # Check for plane normals not orthogonal to axis (plane cuts axis) \n self.periodicity = period\n axis = self.periodicity.get_axis(\"cartesian\")\n planes_normal = self.pop_planes(geometry, kwargs)\n projections = abs(np.dot(planes_normal[:,:3], axis.transpose())) \n if np.any(projections > EPSILON): \n error(\"Some plane(s) are not parallel to axis\")\n \n # Determine basal planes. Shift them with a small amount to make sure\n # atoms do not stay outside due to arithmetic errors.\n axisnorm = np.linalg.norm(axis[0])\n axis0 = axis[0] / axisnorm\n basal_planes = np.array(\n [[ axis0[0], axis0[1], axis0[2], -PERIODIC_TOLERANCE ],\n [ axis0[0], axis0[1], axis0[2], axisnorm + PERIODIC_TOLERANCE ]])\n \n # Extend planes by basal planes and call base class\n planes_normal = np.vstack(( basal_planes, planes_normal ))\n kwargs[\"planes_normal\"] = planes_normal\n kwargs[\"planes_normal_coordsys\"] = \"cartesian\"\n Polyhedron.__init__(self, geometry, period, **kwargs)\n \n\n def atoms_inside(self, atoms):\n \"\"\"Decides which atoms are inside the body (see Body class).\"\"\"\n \n atoms_inside_body = Polyhedron.atoms_inside(self, atoms)\n atoms_inside_body *= self.periodicity.mask_unique(\n atoms - self.shift_vector, atoms_inside_body)\n return atoms_inside_body\n" ]
[ [ "numpy.array", "numpy.linalg.norm", "numpy.any", "numpy.vstack" ] ]
rmarx/quic_iot
[ "56e3c184bdaa20c065150c33851a5b6608987b8b" ]
[ "proxy/fiat_module/feature_selection.py" ]
[ "import json\nimport time\n\nfrom scapy.all import PcapReader\nfrom scapy.layers.inet import IP, TCP, UDP, ICMP, Ether\nimport scapy.all as sc\nsc.load_layer(\"tls\")\nimport sys\nimport os\nimport struct\nimport copy\nfrom collections import defaultdict\nimport hashlib\nimport hmac\nimport socket\n\nfrom string import digits\nfrom datetime import datetime\n\nimport numpy as np\n\nimport fiat_module.utils as utils\n\n# TCP flags\nFIN = 0x01\nSYN = 0x02\nRST = 0x04\nPSH = 0x08\nACK = 0x10\nURG = 0x20\nECE = 0x40\nCWR = 0x80\n# MEANINGFUL_FLAG = 0xff\nMEANINGFUL_FLAG = FIN | SYN | RST\n\ndef get_tls_version(pkt):\n ret = 0\n if TCP in pkt:\n try:\n if TLS in pkt:\n ret = pkt[TLS].version\n elif TLS in TLS(bytes(pkt[TCP].payload)):\n ret = TLS(bytes(pkt[TCP].payload)).version\n except Exception as e:\n print('proto error!', e, pkt)\n elif UDP in pkt:\n try:\n if TLS in pkt:\n ret = pkt[TLS].version\n elif TLS in TLS(bytes(pkt[UDP].payload)):\n ret = TLS(bytes(pkt[UDP].payload)).version\n except Exception as e:\n print('proto error!', e, pkt)\n return ret\n\nNUM_SHORT_FEATURE = 12 \ndef extract_feature_short(pkts, device_ip):\n # features:\n # dst_IP, dst_domain, \n # tcp/udp, flags, src_port, dst_port, \n # tls,\n # pkt size, inter-arrival time \n features = []\n\n pkt_sizes = []\n int_times = []\n last_ts = 0\n for pkt in pkts:\n pkt_sizes.append(len(pkt))\n if pkt[IP].src == device_ip:\n features.append(1)\n features.extend([int(dst) for dst in pkt[IP].dst.split('.')])\n # features.append(utils.LOCAL_DNS[pkt[IP].dst])\n if TCP in pkt:\n features.append(0)\n features.append(int(pkt[TCP].flags) & MEANINGFUL_FLAG)\n features.append(pkt[TCP].sport)\n features.append(pkt[TCP].dport)\n features.append(get_tls_version(pkt))\n # if TLS in pkt:\n # features.append(pkt[TLS].version)\n # elif TLS in TLS(pkt[TCP].payload):\n # features.append(TLS(pkt[TCP].payload).version)\n # else:\n # features.append(0)\n features.append(len(pkt))\n elif UDP in pkt:\n features.append(1)\n features.append(0)\n features.append(pkt[UDP].sport)\n features.append(pkt[UDP].dport)\n features.append(get_tls_version(pkt))\n # if TLS in pkt:\n # features.append(pkt[TLS].version)\n # elif TLS in TLS(pkt[UDP].payload):\n # features.append(TLS(pkt[UDP].payload).version)\n # else:\n # features.append(0)\n features.append(len(pkt))\n else:\n features.append([-1] * 6)\n else:\n features.append(0)\n features.extend([int(src) for src in pkt[IP].src.split('.')])\n # features.append(utils.LOCAL_DNS[pkt[IP].src])\n if TCP in pkt:\n features.append(0)\n features.append(int(pkt[TCP].flags) & MEANINGFUL_FLAG)\n features.append(pkt[TCP].dport)\n features.append(pkt[TCP].sport)\n features.append(get_tls_version(pkt))\n # if TLS in pkt:\n # features.append(pkt[TLS].version)\n # elif TLS in TLS(bytes(pkt[TCP].payload)):\n # features.append(TLS(bytes(pkt[TCP].payload)).version)\n # else:\n # features.append(0)\n features.append(len(pkt))\n elif UDP in pkt:\n features.append(1)\n features.append(0)\n features.append(pkt[UDP].dport)\n features.append(pkt[UDP].sport)\n features.append(get_tls_version(pkt))\n # if TLS in pkt:\n # features.append(pkt[TLS].version)\n # elif TLS in TLS(bytes(pkt[UDP].payload)):\n # features.append(TLS(bytes(pkt[UDP].payload)).version)\n # else:\n # features.append(0)\n features.append(len(pkt))\n else:\n features.append([-1] * 6)\n\n if last_ts == 0:\n features.append(0)\n else:\n features.append(float(pkt.time) - last_ts)\n int_times.append(float(pkt.time) - last_ts)\n last_ts = float(pkt.time)\n\n if len(pkts) < utils.SHORT_PKT_THRES:\n features.extend([-1] * NUM_SHORT_FEATURE * int(utils.SHORT_PKT_THRES - len(pkts)))\n # features.extend(\n # [-1] * int(\n # (utils.SHORT_PKT_THRES - len(pkts)) * (len(features) / len(pkts))\n # ))\n\n features.append(len(pkt_sizes))\n features.append(np.average(pkt_sizes))\n features.append(np.std(pkt_sizes))\n\n features.append(sum(int_times))\n features.append(np.average(int_times))\n features.append(np.std(int_times))\n\n return features\n\n\nNUM_LONG_FEATURE = 11 # 12\ndef extract_feature_long(pkts, device_ip):\n # features:\n # first 3 distinct dst ip, \n # tcp/udp, flags, src_port, dst_port, \n # tls,\n # pkt size, inter-arrival time \n features = []\n distinct_ip = []\n last_ts = 0\n inter_arrival_time = []\n pkt_sizes = []\n upload, download = [], []\n for pkt in pkts:\n if last_ts == 0:\n last_ts = float(pkt.time)\n else:\n inter_arrival_time.append(float(pkt.time) - last_ts)\n last_ts = float(pkt.time)\n pkt_sizes.append(len(pkt))\n\n # first 3 distinct ip\n if pkt[IP].src == device_ip:\n upload.append(len(pkt))\n if (pkt[IP].dst not in distinct_ip) and len(distinct_ip) < 3:\n distinct_ip.append(pkt[IP].dst)\n features.extend(pkt[IP].dst.split('.'))\n if TCP in pkt:\n features.append(0)\n features.append(int(pkt[TCP].flags))\n features.append(pkt[TCP].sport)\n features.append(pkt[TCP].dport)\n features.append(get_tls_version(pkt))\n # features.append(pkt[TLS].version if TLS in pkt else 0)\n features.append(len(pkt))\n elif UDP in pkt:\n features.append(1)\n features.append(0)\n features.append(pkt[UDP].sport)\n features.append(pkt[UDP].dport)\n features.append(get_tls_version(pkt))\n # features.append(0)\n features.append(len(pkt))\n else:\n features.append([-1] * 6)\n else:\n download.append(len(pkt))\n if (pkt[IP].src not in distinct_ip) and len(distinct_ip) < 3:\n distinct_ip.append(pkt[IP].src)\n features.extend(pkt[IP].src.split('.'))\n if TCP in pkt:\n features.append(0)\n features.append(int(pkt[TCP].flags))\n features.append(pkt[TCP].sport)\n features.append(pkt[TCP].dport)\n features.append(get_tls_version(pkt))\n # features.append(pkt[TLS].version if TLS in pkt else 0)\n features.append(len(pkt))\n elif UDP in pkt:\n features.append(1)\n features.append(0)\n features.append(pkt[UDP].sport)\n features.append(pkt[UDP].dport)\n features.append(get_tls_version(pkt))\n # features.append(0)\n features.append(len(pkt))\n else:\n features.append([-1] * 6)\n\n if len(features) < 3*NUM_LONG_FEATURE:\n features.extend([-1] * (3*NUM_LONG_FEATURE - len(features)))\n \n # statistics\n features.append(len(pkts))\n features.append(np.average(pkt_sizes))\n features.append(np.std(pkt_sizes))\n features.append(min(pkt_sizes))\n features.append(np.percentile(pkt_sizes, 25))\n features.append(np.percentile(pkt_sizes, 50))\n features.append(np.percentile(pkt_sizes, 75))\n features.append(max(pkt_sizes))\n\n features.append(sum(inter_arrival_time))\n features.append(np.average(inter_arrival_time))\n features.append(np.std(inter_arrival_time))\n features.append(min(inter_arrival_time))\n features.append(np.percentile(inter_arrival_time, 25))\n features.append(np.percentile(inter_arrival_time, 50))\n features.append(np.percentile(inter_arrival_time, 75))\n features.append(max(inter_arrival_time))\n\n features.append(float(sum(download)) / (sum(upload)+sum(download)))\n\n return features" ]
[ [ "numpy.std", "numpy.average", "numpy.percentile" ] ]
raymondyeh07/tv_layers_for_cv
[ "623379b249acc115ccb3ac0873fbf6d64ea9768f" ]
[ "tv_opt_layers/layers/l1_tv_1d_layer.py" ]
[ "\"\"\"Implements an 1D TVL1 layer.\"\"\"\n\nimport torch\nimport torch.nn as nn\n\nfrom tv_opt_layers.ops.proximity_tv_cuda import ProxTV_l1_cuda\n\n\nclass L1TV1DLayer(nn.Module):\n def __init__(self, lmbd_mode='learned', lmbd_init=-1, lmbd_zero=-1, num_channels=1,\n direction='row', dtype=torch.float):\n \"\"\"\n lmbd_zero: specifies lmbd when parameter = 0; this changes weight decay reg.\n \"\"\"\n super(L1TV1DLayer, self).__init__()\n assert lmbd_mode in ['learned', 'fixed', 'given']\n self.lmbd_mode = lmbd_mode\n self.lmbd_offset = 0\n if lmbd_zero >= 0:\n self.lmbd_offset = lmbd_zero + \\\n torch.log(-torch.expm1(-torch.tensor(lmbd_zero))+1e-8)\n self.num_channels = num_channels\n assert direction in ['row', 'col']\n self.direction = direction\n\n if lmbd_init < 0: # random initialize.\n lmbd_init = torch.rand(num_channels)\n # Inverse of softplus for correct initialization.\n self.lmbd_init = lmbd_init + \\\n torch.log(-torch.expm1(-torch.tensor(lmbd_init)))-self.lmbd_offset\n if lmbd_mode in ['learned', 'fixed']:\n self.lmbd = nn.parameter.Parameter(self.lmbd_init*torch.ones(1, num_channels, dtype=dtype),\n requires_grad=lmbd_mode == 'learned')\n else:\n self.lmbd = None\n self.solve = ProxTV_l1_cuda.apply\n\n def get_lmbd_val(self, input_lmbd=None):\n if input_lmbd is not None:\n return nn.functional.softplus(input_lmbd+self.lmbd_offset)\n else:\n return nn.functional.softplus(self.lmbd+self.lmbd_offset)\n\n def forward(self, y, input_lmbd=None, direction=None):\n \"\"\"\n y: (N, C, H, W) tensor.\n input_lmbd: (N,1) tensor.\n \"\"\"\n # Each channels are propcessed separately.\n if not direction:\n direction = self.direction\n\n if direction == 'col':\n y = y.transpose(-1, -2)\n\n N, C, H, W = y.shape\n\n y_flat = y.reshape(-1, W)\n\n if input_lmbd is not None:\n input_lmbd = input_lmbd.squeeze()\n if len(input_lmbd.shape) == 1:\n input_lmbd = input_lmbd.unsqueeze(-1).repeat(1, C, H).reshape(-1, 1)\n elif len(input_lmbd.shape) == 2:\n input_lmbd = input_lmbd.unsqueeze(-1).repeat(1, 1, H).reshape(-1, 1)\n else:\n assert False # This should not happen.\n else:\n input_lmbd = self.lmbd\n if self.num_channels != 1:\n input_lmbd = input_lmbd.unsqueeze(-1).repeat(N, 1, H).reshape(-1, 1)\n ret_flat = self.solve(y_flat, self.get_lmbd_val(input_lmbd=input_lmbd))\n ret = ret_flat.reshape(N, C, H, W)\n\n if direction == 'col':\n ret = ret.transpose(-1, -2)\n return ret\n" ]
[ [ "torch.tensor", "torch.ones", "torch.rand", "torch.nn.functional.softplus" ] ]
IBM/yaso-tsa
[ "148d2ba14c8213d9d946305bc558066028c43468" ]
[ "tests/test_CategoricalLabel.py" ]
[ "# © Copyright IBM Corporation 2021.\n#\n# LICENSE: Apache License 2.0 (Apache-2.0)\n# http://www.apache.org/licenses/LICENSE-2.0\n\nfrom unittest import TestCase\nfrom collections import Counter\n\nfrom yaso_tsa.infra.CategoricalLabel import CategoricalLabel\n\n\nclass TestCategoricalLabel(TestCase):\n def test_is_unanimous(self):\n categorical_label = CategoricalLabel(Counter(positive=5))\n self.assertTrue(categorical_label.is_unanimous())\n categorical_label = CategoricalLabel(Counter(positive=5, negative=2))\n self.assertFalse(categorical_label.is_unanimous())\n\n def test_is_inconclusive(self):\n categorical_label = CategoricalLabel(Counter(positive=5, negative=4))\n self.assertFalse(categorical_label.is_inconclusive)\n categorical_label = CategoricalLabel(Counter(positive=5, negative=5))\n self.assertTrue(categorical_label.is_inconclusive)\n categorical_label = CategoricalLabel(Counter(positive=5))\n self.assertFalse(categorical_label.is_inconclusive)\n\n def test_from_series_with_labels_to_columns(self):\n import pandas as pd\n labels = pd.Series({'positive_label': 2, 'negative_label': 1})\n label = CategoricalLabel.from_series(labels, labels_to_columns={\n 'positive': 'positive_label',\n 'negative': 'negative_label'\n })\n self.assertTrue(label.most_common_label, 'positive')\n self.assertTrue(label.most_common_count, 2)\n\n def test_from_series_with_label_column(self):\n import pandas as pd\n labels = pd.Series({'label': 'positive', 'other_column': 'some other data'})\n label = CategoricalLabel.from_series(labels, index_label='label')\n self.assertTrue(label.most_common_label, 'positive')\n self.assertTrue(label.most_common_count, 1)\n" ]
[ [ "pandas.Series" ] ]
dmavridis/catz
[ "5c5d2c451dc8128889c65e57f0465787d1b7f6c9" ]
[ "train.py" ]
[ "from keras.layers import Conv2D, UpSampling2D, MaxPooling2D\nfrom keras.models import Sequential\nfrom keras.callbacks import Callback\nimport random\nimport glob\nimport wandb\nfrom wandb.keras import WandbCallback\nimport subprocess\nimport os\nfrom PIL import Image\nimport numpy as np\nfrom keras import backend as K\n\nrun = wandb.init(project='catz')\nconfig = run.config\n\nconfig.num_epochs = 2\nconfig.batch_size = 32\nconfig.img_dir = \"images\"\nconfig.height = 96\nconfig.width = 96\n\nval_dir = 'catz/test'\ntrain_dir = 'catz/train'\n\n# automatically get the data if it doesn't exist\nif not os.path.exists(\"catz\"):\n print(\"Downloading catz dataset...\")\n subprocess.check_output(\n \"curl https://storage.googleapis.com/wandb/catz.tar.gz | tar xz\", shell=True)\n\n\nclass ImageCallback(Callback):\n def on_epoch_end(self, epoch, logs):\n validation_X, validation_y = next(\n my_generator(15, val_dir))\n output = self.model.predict(validation_X)\n wandb.log({\n \"input\": [wandb.Image(np.concatenate(np.split(c, 5, axis=2), axis=1)) for c in validation_X],\n \"output\": [wandb.Image(np.concatenate([validation_y[i], o], axis=1)) for i, o in enumerate(output)]\n }, commit=False)\n\n\ndef my_generator(batch_size, img_dir):\n \"\"\"A generator that returns 5 images plus a result image\"\"\"\n cat_dirs = glob.glob(img_dir + \"/*\")\n counter = 0\n while True:\n input_images = np.zeros(\n (batch_size, config.width, config.height, 3 * 5))\n output_images = np.zeros((batch_size, config.width, config.height, 3))\n random.shuffle(cat_dirs)\n if ((counter+1)*batch_size >= len(cat_dirs)):\n counter = 0\n for i in range(batch_size):\n input_imgs = glob.glob(cat_dirs[counter + i] + \"/cat_[0-5]*\")\n imgs = [Image.open(img) for img in sorted(input_imgs)]\n input_images[i] = np.concatenate(imgs, axis=2)\n output_images[i] = np.array(Image.open(\n cat_dirs[counter + i] + \"/cat_result.jpg\"))\n yield (input_images, output_images)\n counter += batch_size\n\n\nmodel = Sequential()\nmodel.add(Conv2D(32, (3, 3), activation='relu', padding='same',\n input_shape=(config.height, config.width, 5 * 3)))\nmodel.add(MaxPooling2D(2, 2))\nmodel.add(Conv2D(32, (3, 3), activation='relu', padding='same'))\nmodel.add(UpSampling2D((2, 2)))\nmodel.add(Conv2D(3, (3, 3), activation='relu', padding='same'))\n\n\ndef perceptual_distance(y_true, y_pred):\n rmean = (y_true[:, :, :, 0] + y_pred[:, :, :, 0]) / 2\n r = y_true[:, :, :, 0] - y_pred[:, :, :, 0]\n g = y_true[:, :, :, 1] - y_pred[:, :, :, 1]\n b = y_true[:, :, :, 2] - y_pred[:, :, :, 2]\n\n return K.mean(K.sqrt((((512+rmean)*r*r)/256) + 4*g*g + (((767-rmean)*b*b)/256)))\n\n\nmodel.compile(optimizer='adam', loss='mse', metrics=[perceptual_distance])\n\nmodel.fit_generator(my_generator(config.batch_size, train_dir),\n steps_per_epoch=len(\n glob.glob(train_dir + \"/*\")) // config.batch_size,\n epochs=config.num_epochs, callbacks=[\n ImageCallback(), WandbCallback()],\n validation_steps=len(glob.glob(val_dir + \"/*\")) // config.batch_size,\n validation_data=my_generator(config.batch_size, val_dir))\n" ]
[ [ "numpy.concatenate", "numpy.split", "numpy.zeros" ] ]
joshmurr/style-based-gan-pytorch
[ "d65e5f03805ffa56a38f3d187d8a8734a56b17d7" ]
[ "generate.py" ]
[ "import argparse\r\nimport math\r\n\r\nimport torch\r\nfrom torchvision import utils, transforms, io\r\nimport numpy as np\r\nfrom PIL import Image\r\n\r\nfrom model import StyledGenerator\r\n\r\n\r\n@torch.no_grad()\r\ndef get_mean_style(generator, device):\r\n\tmean_style = None\r\n\r\n\tfor i in range(10):\r\n\t\tstyle = generator.mean_style(torch.randn(256, 64).to(device))\r\n\r\n\t\tif mean_style is None:\r\n\t\t\tmean_style = style\r\n\r\n\t\telse:\r\n\t\t\tmean_style += style\r\n\r\n\tmean_style /= 10\r\n\treturn mean_style\r\n\r\n@torch.no_grad()\r\ndef sample(generator, step, mean_style, n_sample, device):\r\n\timage = generator(\r\n\t\ttorch.randn(n_sample, 64).to(device),\r\n\t\tstep=step,\r\n\t\talpha=1,\r\n\t\tmean_style=mean_style,\r\n\t\tstyle_weight=0.7,\r\n\t)\r\n\r\n\treturn image\r\n\r\n@torch.no_grad()\r\ndef linear_interpolate(code1, code2, alpha):\r\n\treturn code1 * alpha + code2 * (1 - alpha)\r\n\r\n\r\n@torch.no_grad()\r\ndef get_interp_frames(generator, step, mean_style, num_interps):\r\n\tfps = 25\r\n\tstep_size = 1.0/num_interps\r\n\tamounts = np.arange(0, 1, step_size)\r\n\r\n\tcodes = torch.randn(2, 64).to(device)\r\n\r\n\tall_zs = torch.stack([linear_interpolate(codes[0], codes[1], alpha) for alpha in amounts])\r\n\r\n\timages = generator(\r\n\t\tall_zs,\r\n\t\tstep=step,\r\n\t\talpha=1,\r\n\t\tmean_style=mean_style,\r\n\t\tstyle_weight=0.7,\r\n\t)\r\n\r\n\treturn images, codes\r\n\r\ndef normalize_frames(frames):\r\n\t# normed = torch.zeros(*frames.size())\r\n\t# for i, img in enumerate(frames):\r\n\t\t# _min = img.min()\r\n\t\t# _max = img.max()\r\n\t\t# img -= _min\r\n\t\t# img *= 255 / (_max - _min)\r\n\t\t# normed[i] = img\r\n\treturn frames.mul(255).add_(0.5).clamp_(0, 255)\r\n\r\ndef make_gif(frames, size=(256,256), name='latent_space_traversal.gif'):\r\n\tnormed_frames = normalize_frames(frames)\r\n\r\n\tfor img in normed_framed:\r\n\t\timg = img.permute(1,2,0).to('cpu', torch.uint8).numpy()\r\n\r\n\t\tall_imgs.append(Image.fromarray(img).resize(size))\r\n\r\n\tall_imgs[0].save(name, save_all=True, append_images=all_imgs[1:], duration=1000/fps, loop=0)\r\n\r\n\treturn images\r\n\r\ndef make_video(frames, size=(256,256), name='latent_space_traversal.mp4'):\r\n\tfps = 24\r\n\tnormed_frames = normalize_frames(frames)\r\n\tio.write_video(name, normed_frames.permute(0, 2, 3, 1).cpu(), fps)\r\n\r\n\r\n@torch.no_grad()\r\ndef style_mixing(generator, step, mean_style, n_source, n_target, device):\r\n\tsource_code = torch.randn(n_source, 64).to(device)\r\n\ttarget_code = torch.randn(n_target, 64).to(device)\r\n\tshape = 4 * 2 ** step\r\n\talpha = 1\r\n\r\n\timages = [torch.ones(1, 3, shape, shape).to(device) * -1]\r\n\r\n\tsource_image = generator(\r\n\t\tsource_code, step=step, alpha=alpha, mean_style=mean_style, style_weight=0.7\r\n\t)\r\n\ttarget_image = generator(\r\n\t\ttarget_code, step=step, alpha=alpha, mean_style=mean_style, style_weight=0.7\r\n\t)\r\n\r\n\timages.append(source_image)\r\n\r\n\tfor i in range(n_target):\r\n\t\timage = generator(\r\n\t\t\t[target_code[i].unsqueeze(0).repeat(n_source, 1), source_code],\r\n\t\t\tstep=step,\r\n\t\t\talpha=alpha,\r\n\t\t\tmean_style=mean_style,\r\n\t\t\tstyle_weight=0.7,\r\n\t\t\tmixing_range=(0, 1),\r\n\t\t)\r\n\t\timages.append(target_image[i].unsqueeze(0))\r\n\t\timages.append(image)\r\n\r\n\timages = torch.cat(images, 0)\r\n\r\n\treturn images\r\n\r\n\r\nif __name__ == '__main__':\r\n\tparser = argparse.ArgumentParser()\r\n\tparser.add_argument('--size', type=int, default=256, help='size of the image')\r\n\tparser.add_argument('--n_row', type=int, default=3, help='number of rows of sample matrix')\r\n\tparser.add_argument('--n_col', type=int, default=5, help='number of columns of sample matrix')\r\n\tparser.add_argument('--state_dict', type=str, default='g_running', help='state dict')\r\n\tparser.add_argument('--output', type=str, default='images', help='image, gif or video')\r\n\tparser.add_argument('--frames', type=int, default=32, help='num frames for GIF or video')\r\n\tparser.add_argument('--name', type=str, default='latent_space_traversal', help='name of output')\r\n\tparser.add_argument('path', type=str, help='path to checkpoint file')\r\n\r\n\targs = parser.parse_args()\r\n\r\n\tdevice = 'cuda'\r\n\r\n\tgenerator = StyledGenerator().to(device)\r\n\tif 'train_step' in args.path:\r\n\t\tprint('Loading from train_step')\r\n\t\tgenerator.load_state_dict(torch.load(args.path)[args.state_dict])\r\n\telse:\r\n\t\tgenerator.load_state_dict(torch.load(args.path))\r\n\tgenerator.eval()\r\n\r\n\tmean_style = get_mean_style(generator, device)\r\n\r\n\tstep = int(math.log(args.size, 2)) - 2\r\n\r\n\tif args.output == 'gif':\r\n\t\tframes, _ = get_interp_frames(generator, step, mean_style, args.frames)\r\n\t\timg = make_gif(frames, name=args.name + '.gif')\r\n\telif args.output == 'video':\r\n\t\tframes, _ = get_interp_frames(generator, step, mean_style, args.frames)\r\n\t\timg = make_video(frames, name=args.name + '.mp4')\r\n\telif args.output == 'image':\r\n\t\timg = sample(generator, step, mean_style, args.n_row * args.n_col, device)\r\n\t\tutils.save_image(img, 'sample.png', nrow=args.n_col, normalize=True, range=(-1, 1))\r\n\telse:\r\n\t\tprint('Please choose image, gif or video for --output')\r\n\r\n\t# for j in range(20):\r\n\t# img = style_mixing(generator, step, mean_style, args.n_col, args.n_row, device)\r\n\t# utils.save_image(\r\n\t# img, f'sample_mixing_{j}.png', nrow=args.n_col + 1, normalize=True, range=(-1, 1)\r\n\t# )\r\n" ]
[ [ "torch.ones", "torch.load", "torch.cat", "numpy.arange", "torch.randn", "torch.no_grad" ] ]
rsese/dedupe
[ "cfba7c08f03a0949c5d72f0f4d8ebfb8e48e4507" ]
[ "dedupe/clustering.py" ]
[ "#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\nimport itertools\nfrom collections import defaultdict\nimport array\nimport logging\n\nimport numpy\nimport fastcluster\nimport hcluster\n\nlogger = logging.getLogger(__name__)\n\n\ndef connected_components(edgelist, max_components):\n\n if len(edgelist) == 0:\n raise StopIteration()\n\n components = union_find(edgelist['pairs'])\n\n for component in components:\n sub_graph = edgelist[component]\n n_components = len(numpy.unique(sub_graph['pairs']))\n\n if n_components > max_components:\n min_score = numpy.min(sub_graph['score'])\n min_score_logit = numpy.log(min_score) - numpy.log(1 - min_score)\n threshold = 1 / (1 + numpy.exp(-min_score_logit - 1))\n logger.warning('A component contained %s elements. '\n 'Components larger than %s are '\n 're-filtered. The threshold for this '\n 'filtering is %s' % (n_components,\n max_components,\n threshold))\n filtered_sub_graph = sub_graph[sub_graph['score'] > threshold]\n for sub_graph in connected_components(filtered_sub_graph,\n max_components):\n yield sub_graph\n else:\n yield sub_graph\n\n\ndef union_find(edgelist):\n\n root = {}\n components = {}\n component_size = {}\n\n it = numpy.nditer(edgelist, ['external_loop'])\n\n for i, (a, b) in enumerate(it):\n root_a = root.get(a)\n root_b = root.get(b)\n\n if root_a is None and root_b is None:\n # assuming that it will be a while before we are handling\n # edgelists of much more than 4 billion elements we will\n # use an the 'I' type\n components[a] = array.array('I', [i])\n component_size[a] = 2\n root[a] = root[b] = a\n elif root_a is None or root_b is None:\n if root_a is None:\n b = a\n root_a = root_b\n components[root_a].append(i)\n component_size[root_a] += 1\n root[b] = root_a\n elif root_a != root_b:\n if component_size[root_a] < component_size[root_b]:\n root_a, root_b = root_b, root_a\n\n components[root_a].extend(components[root_b])\n components[root_a].append(i)\n\n component_b = numpy.unique(edgelist[components[root_b]])\n\n for node in component_b:\n root[node] = root_a\n\n component_size[root_a] += len(component_b)\n\n del components[root_b]\n del component_size[root_b]\n\n else:\n components[root_a].append(i)\n\n return components.values()\n\n\ndef condensedDistance(dupes):\n '''\n Convert the pairwise list of distances in dupes to \"condensed\n distance matrix\" required by the hierarchical clustering\n algorithms. Also return a dictionary that maps the distance matrix\n to the record_ids.\n\n The formula for an index of the condensed matrix is\n\n index = {N choose 2}-{N-row choose 2} + (col-row-1)\n = N*(N-1)/2 - (N-row)*(N-row-1)/2 + col - row - 1\n ^^^^^^^^^ ^^^^^^^^^^^^^^^^^^^\n matrix_length row_step\n\n where (row,col) is index of an uncondensed square N X N distance matrix.\n\n See http://docs.scipy.org/doc/scipy/reference/generated/scipy.spatial.distance.squareform.html\n '''\n\n candidate_set = numpy.unique(dupes['pairs'])\n\n i_to_id = dict(enumerate(candidate_set))\n\n ids = candidate_set.searchsorted(dupes['pairs'])\n row = ids[:, 0]\n col = ids[:, 1]\n\n N = len(candidate_set)\n matrix_length = N * (N - 1) / 2\n\n row_step = (N - row) * (N - row - 1) / 2\n index = matrix_length - row_step + col - row - 1\n\n condensed_distances = numpy.ones(int(matrix_length), 'f4')\n condensed_distances[index.astype(int)] = 1 - dupes['score']\n\n return i_to_id, condensed_distances, N\n\n\ndef cluster(dupes, threshold=.5, max_components=30000):\n '''\n Takes in a list of duplicate pairs and clusters them in to a\n list records that all refer to the same entity based on a given\n threshold\n\n Keyword arguments:\n threshold -- number betweent 0 and 1 (default is .5). lowering the\n number will increase precision, raising it will increase\n recall\n '''\n distance_threshold = 1 - threshold\n dupe_sub_graphs = connected_components(dupes, max_components)\n\n for sub_graph in dupe_sub_graphs:\n if len(sub_graph) > 1:\n\n i_to_id, condensed_distances, N = condensedDistance(sub_graph)\n\n linkage = fastcluster.linkage(condensed_distances,\n method='centroid',\n preserve_input=True)\n\n partition = hcluster.fcluster(linkage,\n distance_threshold,\n criterion='distance')\n\n clusters = defaultdict(list)\n\n for i, cluster_id in enumerate(partition):\n clusters[cluster_id].append(i)\n\n for cluster in clusters.values():\n if len(cluster) > 1:\n scores = confidences(cluster, condensed_distances, N)\n yield tuple(i_to_id[i] for i in cluster), scores\n\n else:\n (ids, score), = sub_graph\n if score > threshold:\n yield tuple(ids), (score,) * 2\n\n\ndef confidences(cluster, condensed_distances, d):\n '''\n We calculate a per record score that is similar to a standard\n deviation. The main reason is that these record scores can be\n used to calculate the standard deviation of an entire cluster,\n which is a reasonable metric for clusters.\n '''\n\n scores = dict.fromkeys(cluster, 0.0)\n squared_distances = condensed_distances ** 2\n for i, j in itertools.combinations(cluster, 2):\n index = d * (d - 1) / 2 - (d - i) * (d - i - 1) / 2 + j - i - 1\n squared_dist = squared_distances[int(index)]\n scores[i] += squared_dist\n scores[j] += squared_dist\n scores = numpy.array([score for _, score in sorted(scores.items())])\n scores /= len(cluster) - 1\n scores = numpy.sqrt(scores)\n scores = 1 - scores\n return scores\n\n\ndef greedyMatching(dupes, threshold=0.5):\n A = set()\n B = set()\n\n dupes = dupes[dupes['score'] >= threshold]\n dupes.sort(order='score')\n dupes = dupes[::-1]\n\n for (a, b), score in dupes:\n if a not in A and b not in B:\n A.add(a)\n B.add(b)\n\n yield (a, b), score\n\n\ndef gazetteMatching(scored_blocks, n_matches=1):\n\n for block in scored_blocks:\n block.sort(order='score')\n block = block[::-1]\n\n if n_matches:\n yield block[:n_matches]\n else:\n yield block\n" ]
[ [ "numpy.log", "numpy.sqrt", "numpy.nditer", "numpy.min", "numpy.unique", "numpy.exp" ] ]
NannyML/NannyML
[ "a6fc73f627d1e0c23c4ef7cc43bd653684c48e1a" ]
[ "tests/test_chunk.py" ]
[ "# Author: Niels Nuyttens <niels@nannyml.com>\n#\n# License: Apache Software License 2.0\n\n\"\"\"Tests for the chunking functionality.\"\"\"\nimport datetime\nimport math\nfrom typing import List\n\nimport numpy as np\nimport pandas as pd\nimport pytest\nfrom pandas import Timestamp\n\nfrom nannyml.chunk import Chunk, Chunker, CountBasedChunker, DefaultChunker, PeriodBasedChunker, SizeBasedChunker\nfrom nannyml.exceptions import ChunkerException, InvalidArgumentsException, MissingMetadataException\nfrom nannyml.metadata.base import (\n NML_METADATA_PARTITION_COLUMN_NAME,\n NML_METADATA_TARGET_COLUMN_NAME,\n NML_METADATA_TIMESTAMP_COLUMN_NAME,\n)\nfrom nannyml.metadata.binary_classification import NML_METADATA_PREDICTION_COLUMN_NAME\n\nrng = np.random.default_rng()\n\n\n@pytest.fixture\ndef sample_chunk() -> Chunk: # noqa: D103\n df = pd.DataFrame(rng.uniform(0, 100, size=(100, 4)), columns=list('ABCD'))\n chunk = Chunk(key='key', data=df)\n chunk.partition = 'reference'\n chunk.start_index = 0\n chunk.end_index = 100\n chunk.start_datetime = datetime.datetime.min\n chunk.end_datetime = datetime.datetime.max\n return chunk\n\n\n@pytest.fixture\ndef sample_chunk_data() -> pd.DataFrame: # noqa: D103\n data = pd.DataFrame(pd.date_range(start='1/6/2020', freq='10min', periods=20 * 1008), columns=['ordered_at'])\n data['week'] = data.ordered_at.dt.isocalendar().week - 1\n data['partition'] = 'reference'\n data.loc[data.week >= 11, ['partition']] = 'analysis'\n data[NML_METADATA_PARTITION_COLUMN_NAME] = data['partition'] # simulate preprocessing\n np.random.seed(13)\n data['f1'] = np.random.randn(data.shape[0])\n data['f2'] = np.random.rand(data.shape[0])\n data['f3'] = np.random.randint(4, size=data.shape[0])\n data['f4'] = np.random.randint(20, size=data.shape[0])\n data[NML_METADATA_PREDICTION_COLUMN_NAME] = np.random.randint(2, size=data.shape[0])\n data[NML_METADATA_TARGET_COLUMN_NAME] = np.random.randint(2, size=data.shape[0])\n data[NML_METADATA_TIMESTAMP_COLUMN_NAME] = data['ordered_at']\n\n # Rule 1b is the shifted feature, 75% 0 instead of 50%\n rule1a = {2: 0, 3: 1}\n rule1b = {2: 0, 3: 0}\n data.loc[data.week < 16, ['f3']] = data.loc[data.week < 16, ['f3']].replace(rule1a)\n data.loc[data.week >= 16, ['f3']] = data.loc[data.week >= 16, ['f3']].replace(rule1b)\n\n # Rule 2b is the shifted feature\n c1 = 'white'\n c2 = 'red'\n c3 = 'green'\n c4 = 'blue'\n\n rule2a = {\n 0: c1,\n 1: c1,\n 2: c1,\n 3: c1,\n 4: c1,\n 5: c2,\n 6: c2,\n 7: c2,\n 8: c2,\n 9: c2,\n 10: c3,\n 11: c3,\n 12: c3,\n 13: c3,\n 14: c3,\n 15: c4,\n 16: c4,\n 17: c4,\n 18: c4,\n 19: c4,\n }\n\n rule2b = {\n 0: c1,\n 1: c1,\n 2: c1,\n 3: c1,\n 4: c1,\n 5: c2,\n 6: c2,\n 7: c2,\n 8: c2,\n 9: c2,\n 10: c3,\n 11: c3,\n 12: c3,\n 13: c1,\n 14: c1,\n 15: c4,\n 16: c4,\n 17: c4,\n 18: c1,\n 19: c2,\n }\n\n data.loc[data.week < 16, ['f4']] = data.loc[data.week < 16, ['f4']].replace(rule2a)\n data.loc[data.week >= 16, ['f4']] = data.loc[data.week >= 16, ['f4']].replace(rule2b)\n\n data.loc[data.week >= 16, ['f1']] = data.loc[data.week >= 16, ['f1']] + 0.6\n data.loc[data.week >= 16, ['f2']] = np.sqrt(data.loc[data.week >= 16, ['f2']])\n\n return data\n\n\n@pytest.mark.parametrize(\n 'text',\n [\n 'key=key',\n 'data=pd.DataFrame[[100x4]]',\n 'is_transition=False',\n 'partition=reference',\n f'start_datetime={datetime.datetime.min}',\n f'end_datetime={datetime.datetime.max}',\n 'start_index=0',\n 'end_index=100',\n ],\n)\ndef test_chunk_repr_should_contain_attribute(sample_chunk, text): # noqa: D103\n sut = str(sample_chunk)\n assert text in sut\n\n\ndef test_chunk_len_should_return_data_length(sample_chunk): # noqa: D103\n sut = len(sample_chunk)\n assert sut == len(sample_chunk.data)\n\n\ndef test_chunk_len_should_return_0_for_empty_chunk(): # noqa: D103\n sut = len(Chunk(key='test', data=pd.DataFrame()))\n assert sut == 0\n\n\ndef test_chunker_should_log_warning_when_less_than_6_chunks(sample_chunk_data, caplog): # noqa: D103\n class SimpleChunker(Chunker):\n def _split(self, data: pd.DataFrame, minimum_chunk_size: int = None) -> List[Chunk]:\n return [Chunk(key='row0', data=data)]\n\n c = SimpleChunker()\n with pytest.warns(UserWarning, match=\"The resulting number of chunks is too low.\"):\n _ = c.split(sample_chunk_data)\n\n\ndef test_chunker_should_log_warning_when_some_chunks_are_underpopulated(sample_chunk_data, caplog): # noqa: D103\n class SimpleChunker(Chunker):\n def _split(self, data: pd.DataFrame, minimum_chunk_size: int = None) -> List[Chunk]:\n return [Chunk(key='row0', data=data.iloc[[0]])]\n\n c = SimpleChunker()\n with pytest.warns(UserWarning, match=\"The resulting list of chunks contains 1 underpopulated chunks.\"):\n _ = c.split(sample_chunk_data, minimum_chunk_size=100000)\n\n\ndef test_chunker_should_set_chunk_transition_flag_when_it_contains_observations_from_multiple_partitions( # noqa: D103\n sample_chunk_data,\n):\n class SimpleChunker(Chunker):\n def _split(self, data: pd.DataFrame, minimum_chunk_size: int = None) -> List[Chunk]:\n return [\n Chunk(key='[0:6665]', data=data.iloc[0:6666, :]),\n Chunk(key='[6666:13331]', data=data.iloc[6666:13332, :]),\n Chunk(key='[13332:20160]', data=data.iloc[13332:, :]),\n ]\n\n chunker = SimpleChunker()\n sut = chunker.split(data=sample_chunk_data)\n\n assert len(sut) == 3\n assert sut[0].is_transition is False\n assert sut[2].is_transition is False\n assert sut[1].is_transition\n\n\ndef test_chunker_should_set_index_boundaries(sample_chunk_data): # noqa: D103\n class SimpleChunker(Chunker):\n def _split(self, data: pd.DataFrame, minimum_chunk_size: int = None) -> List[Chunk]:\n return [\n Chunk(key='[0:6665]', data=data.iloc[0:6666, :]),\n Chunk(key='[6666:13331]', data=data.iloc[6666:13332, :]),\n Chunk(key='[13332:20160]', data=data.iloc[13332:, :]),\n ]\n\n chunker = SimpleChunker()\n sut = chunker.split(data=sample_chunk_data)\n assert sut[0].start_index == 0\n assert sut[0].end_index == 6665\n assert sut[1].start_index == 6666\n assert sut[1].end_index == 13331\n assert sut[2].start_index == 13332\n assert sut[2].end_index == 20159\n\n\ndef test_chunker_should_include_all_data_columns_by_default(sample_chunk_data): # noqa: D103\n class SimpleChunker(Chunker):\n def _split(self, data: pd.DataFrame, minimum_chunk_size: int = None) -> List[Chunk]:\n return [Chunk(key='row0', data=data)]\n\n c = SimpleChunker()\n sut = c.split(sample_chunk_data)[0].data.columns\n assert sorted(sut) == sorted(sample_chunk_data.columns)\n\n\ndef test_chunker_should_only_include_listed_columns_when_given_columns_param(sample_chunk_data): # noqa: D103\n class SimpleChunker(Chunker):\n def _split(self, data: pd.DataFrame, minimum_chunk_size: int = None) -> List[Chunk]:\n return [Chunk(key='row0', data=data)]\n\n columns = ['f1', 'f3', 'partition']\n c = SimpleChunker()\n sut = c.split(sample_chunk_data, columns=columns)[0].data.columns\n assert sorted(sut) == sorted(columns)\n\n\ndef test_chunker_should_raise_chunker_exception_upon_exception_during_inherited_split_execution( # noqa: D103\n sample_chunk_data,\n):\n class SimpleChunker(Chunker):\n def _split(self, data: pd.DataFrame, minimum_chunk_size: int = None) -> List[Chunk]:\n raise RuntimeError(\"oops, I broke it again\")\n\n c = SimpleChunker()\n with pytest.raises(ChunkerException):\n _ = c.split(sample_chunk_data)\n\n\ndef test_chunker_get_partition_should_raise_missing_metadata_exception_when_partition_column_not_present( # noqa: D103\n sample_chunk_data,\n):\n class SimpleChunker(Chunker):\n def _split(self, data: pd.DataFrame, minimum_chunk_size: int = None) -> List[Chunk]:\n return [Chunk(key='row0', data=data)]\n\n c = SimpleChunker()\n with pytest.raises(\n MissingMetadataException, match=f\"missing partition column '{NML_METADATA_PARTITION_COLUMN_NAME}'\"\n ):\n _ = c.split(pd.DataFrame(columns=['a', 'b', 'c', 'nml_meta_timestamp']))\n\n\ndef test_chunker_get_boundary_timestamps_should_raise_missing_metadata_exception_when_column_not_present( # noqa: D103\n sample_chunk_data,\n):\n class SimpleChunker(Chunker):\n def _split(self, data: pd.DataFrame, minimum_chunk_size: int = None) -> List[Chunk]:\n return [Chunk(key='row0', data=data)]\n\n c = SimpleChunker()\n with pytest.raises(\n MissingMetadataException, match=f\"missing timestamp column '{NML_METADATA_TIMESTAMP_COLUMN_NAME}'\"\n ):\n data = sample_chunk_data.drop(columns=[NML_METADATA_TIMESTAMP_COLUMN_NAME])\n _ = c.split(data)\n\n\ndef test_period_based_chunker_uses_metadata_timestamp_column_when_no_date_column_name_given( # noqa: D103\n sample_chunk_data,\n):\n chunker = PeriodBasedChunker()\n assert chunker.date_column_name == NML_METADATA_TIMESTAMP_COLUMN_NAME\n\n\ndef test_period_based_chunker_works_with_date_column_name(sample_chunk_data): # noqa: D103\n chunker = PeriodBasedChunker(date_column_name='ordered_at')\n sut = chunker.split(sample_chunk_data)\n assert len(sut) == 20\n assert len(sut[0]) == 1008\n\n\ndef test_period_based_chunker_works_with_non_default_offset(sample_chunk_data): # noqa: D103\n chunker = PeriodBasedChunker(date_column_name='ordered_at', offset='M')\n sut = chunker.split(sample_chunk_data)\n assert len(sut) == 5 # 20 weeks == 5 months\n\n\ndef test_period_based_chunker_works_with_empty_dataset(): # noqa: D103\n chunker = PeriodBasedChunker(date_column_name='date')\n sut = chunker.split(pd.DataFrame(columns=['date', 'nml_meta_timestamp', 'f1', 'f2', 'f3', 'f4']))\n assert len(sut) == 0\n\n\ndef test_period_based_chunker_fails_when_date_column_does_not_exist(sample_chunk_data): # noqa: D103\n chunker = PeriodBasedChunker(date_column_name='non_existent')\n with pytest.raises(ChunkerException, match=\"could not find date_column 'non_existent' in given data\"):\n _ = chunker.split(sample_chunk_data)\n\n\ndef test_period_based_chunker_fails_when_date_column_does_not_contain_dates(sample_chunk_data): # noqa: D103\n chunker = PeriodBasedChunker(date_column_name='f4')\n with pytest.raises(ChunkerException, match=\"could not parse date_column 'f4'\"):\n _ = chunker.split(sample_chunk_data)\n\n\ndef test_period_based_chunker_assigns_periods_to_chunk_keys(sample_chunk_data): # noqa: D103\n chunker = PeriodBasedChunker(date_column_name='ordered_at', offset='M')\n sut = chunker.split(sample_chunk_data)\n assert sut[0].key == '2020-01'\n assert sut[1].key == '2020-02'\n assert sut[-1].key == '2020-05'\n\n\ndef test_period_based_chunker_uses_periods_to_set_chunk_date_boundaries(sample_chunk_data): # noqa: D103\n chunker = PeriodBasedChunker(date_column_name='ordered_at', offset='M')\n sut = chunker.split(sample_chunk_data)\n assert sut[0].start_datetime == Timestamp(year=2020, month=1, day=1, hour=0, minute=0, second=0)\n assert sut[-1].end_datetime == Timestamp(\n year=2020, month=5, day=31, hour=23, minute=59, second=59, microsecond=999999, nanosecond=999\n )\n\n\ndef test_size_based_chunker_raises_exception_when_passed_nan_size(sample_chunk_data): # noqa: D103\n with pytest.raises(InvalidArgumentsException):\n _ = SizeBasedChunker(chunk_size='size?')\n\n\ndef test_size_based_chunker_raises_exception_when_passed_negative_size(sample_chunk_data): # noqa: D103\n with pytest.raises(InvalidArgumentsException):\n _ = SizeBasedChunker(chunk_size=-1)\n\n\ndef test_size_based_chunker_raises_exception_when_passed_zero_size(sample_chunk_data): # noqa: D103\n with pytest.raises(InvalidArgumentsException):\n _ = SizeBasedChunker(chunk_size=0)\n\n\ndef test_size_based_chunker_works_with_empty_dataset(): # noqa: D103\n chunker = SizeBasedChunker(chunk_size=100)\n sut = chunker.split(pd.DataFrame(columns=['date', 'nml_meta_timestamp', 'f1', 'f2', 'f3', 'f4']))\n assert len(sut) == 0\n\n\ndef test_size_based_chunker_returns_chunks_of_required_size(sample_chunk_data): # noqa: D103\n chunk_size = 1500\n chunker = SizeBasedChunker(chunk_size=chunk_size)\n sut = chunker.split(sample_chunk_data)\n assert len(sut[0]) == chunk_size\n assert len(sut) == sample_chunk_data.shape[0] // chunk_size\n\n\ndef test_size_based_chunker_uses_observations_to_set_chunk_date_boundaries(sample_chunk_data): # noqa: D103\n chunker = SizeBasedChunker(chunk_size=5000)\n sut = chunker.split(sample_chunk_data)\n assert sut[0].start_datetime == Timestamp(year=2020, month=1, day=6, hour=0, minute=0, second=0)\n assert sut[-1].end_datetime == Timestamp(year=2020, month=5, day=23, hour=21, minute=10, second=0)\n\n\ndef test_size_based_chunker_assigns_observation_range_to_chunk_keys(sample_chunk_data): # noqa: D103\n chunk_size = 1500\n last_chunk_start = (math.floor(sample_chunk_data.shape[0] / chunk_size) - 1) * chunk_size\n last_chunk_end = math.floor(sample_chunk_data.shape[0] / chunk_size) * chunk_size - 1\n\n chunker = SizeBasedChunker(chunk_size=chunk_size)\n sut = chunker.split(sample_chunk_data)\n assert sut[0].key == '[0:1499]'\n assert sut[1].key == '[1500:2999]'\n assert sut[-1].key == f'[{last_chunk_start}:{last_chunk_end}]'\n\n\ndef test_count_based_chunker_raises_exception_when_passed_nan_size(sample_chunk_data): # noqa: D103\n with pytest.raises(InvalidArgumentsException):\n _ = CountBasedChunker(chunk_count='size?')\n\n\ndef test_count_based_chunker_raises_exception_when_passed_negative_size(sample_chunk_data): # noqa: D103\n with pytest.raises(InvalidArgumentsException):\n _ = CountBasedChunker(chunk_count=-1)\n\n\ndef test_count_based_chunker_raises_exception_when_passed_zero_size(sample_chunk_data): # noqa: D103\n with pytest.raises(InvalidArgumentsException):\n _ = CountBasedChunker(chunk_count=0)\n\n\ndef test_count_based_chunker_works_with_empty_dataset(): # noqa: D103\n chunker = CountBasedChunker(chunk_count=5)\n sut = chunker.split(pd.DataFrame(columns=['date', 'nml_meta_timestamp', 'f1', 'f2', 'f3', 'f4']))\n assert len(sut) == 0\n\n\ndef test_count_based_chunker_returns_chunks_of_required_size(sample_chunk_data): # noqa: D103\n chunk_count = 5\n chunker = CountBasedChunker(chunk_count=chunk_count)\n sut = chunker.split(sample_chunk_data)\n assert len(sut[0]) == sample_chunk_data.shape[0] // chunk_count\n assert len(sut) == chunk_count\n\n\ndef test_count_based_chunker_uses_observations_to_set_chunk_date_boundaries(sample_chunk_data): # noqa: D103\n chunker = CountBasedChunker(chunk_count=20)\n sut = chunker.split(sample_chunk_data)\n assert sut[0].start_datetime == Timestamp(year=2020, month=1, day=6, hour=0, minute=0, second=0)\n assert sut[-1].end_datetime == Timestamp(year=2020, month=5, day=24, hour=23, minute=50, second=0)\n\n\ndef test_count_based_chunker_assigns_observation_range_to_chunk_keys(sample_chunk_data): # noqa: D103\n chunk_count = 5\n\n chunker = CountBasedChunker(chunk_count=chunk_count)\n sut = chunker.split(sample_chunk_data)\n assert sut[0].key == '[0:4031]'\n assert sut[1].key == '[4032:8063]'\n assert sut[-1].key == '[16128:20159]'\n\n\ndef test_default_chunker_uses_3_times_minimum_chunk_size_for_size(sample_chunk_data): # noqa: D103\n minimum_chunk_size = 300\n sut = DefaultChunker().split(sample_chunk_data, minimum_chunk_size=minimum_chunk_size)\n expected = minimum_chunk_size * 3\n assert len(sut) == sample_chunk_data.shape[0] // expected\n assert len(sut[0]) == expected\n assert len(sut[1]) == expected\n assert len(sut[-1]) == expected\n" ]
[ [ "numpy.sqrt", "numpy.random.seed", "pandas.DataFrame", "numpy.random.randn", "numpy.random.rand", "pandas.date_range", "pandas.Timestamp", "numpy.random.default_rng", "numpy.random.randint" ] ]
HCGB-IGTP/BacterialTyper
[ "215e29a0381d4ae616cf0a6462a04117dc30e293" ]
[ "BacterialTyper/report/Staphylococcus/agr_typing.py" ]
[ "#!/usr/bin/env python3\n##########################################################\n## Jose F. Sanchez ##\n## Copyright (C) 2019-2021 Lauro Sumoy Lab, IGTP, Spain ##\n##########################################################\n\"\"\"\nCreates agr typing\n\"\"\"\n## useful imports\nimport os\nimport sys\nfrom sys import argv\nfrom io import open\nfrom termcolor import colored\nimport pandas as pd\nfrom Bio import SeqIO\n\n## import my modules\nfrom BacterialTyper.config import set_config\n\n## import my HCGB module \nimport HCGB.functions.files_functions as HCGB_files\nimport HCGB.functions.time_functions as HCGB_time\nimport HCGB.functions.main_functions as HCGB_main\nimport HCGB.functions.system_call_functions as HCGB_sys\nimport HCGB.functions.aesthetics_functions as HCGB_aes\n\n##############\ndef help_options():\n print (\"\\nUSAGE: python %s path sample...\\n\" %os.path.realpath(__file__))\n\n##############################\ndef agrvate_caller(dict_assemblies, dict_folders, debug=False):\n \"\"\"Create agrvate call and control for parameters\"\"\"\n \n ## ATTENTION: agrvate needs to chdir to output folder\n path_here = os.getcwd()\n \n ## info2return\n agrvate_bin = set_config.get_exe('agrvate')\n info_dict={ 'agrvate database': os.path.join(os.path.basename(agrvate_bin), \"agrvate_databases\")}\n \n \n print (\"+ Checking agr genes for each sample retrieved...\")\n \n agrvate_results = pd.DataFrame()\n \n ## No need to optimize. There is a problem with the working dir of agrvate and we \n ## need to change every time.\n for name, assembly_file in dict_assemblies.items():\n report_folder = HCGB_files.create_folder(dict_folders[name])\n sample_folder = HCGB_files.create_subfolder('agr_typing', report_folder) \n ## check if previously done and succeeded\n filename_stamp = sample_folder + '/.success'\n if os.path.isfile(filename_stamp):\n stamp = HCGB_time.read_time_stamp(filename_stamp)\n print (colored(\"\\tA previous command generated results on: %s [%s]\" %(stamp, name), 'yellow'))\n info_sample = get_results_agrvate(assembly_file, sample_folder, name, debug) \n else:\n os.chdir(sample_folder)\n info_sample = agrvate_call(name, assembly_file, sample_folder, debug)\n \n if (info_sample.shape[0] == 0):\n print(\"+ Some error occurred with sample %s. Please re-run analysis or check log files.\" %name)\n else:\n ## success\n HCGB_time.print_time_stamp(filename_stamp)\n \n ## merge results\n agrvate_results = pd.concat([agrvate_results, info_sample], join='outer')\n \n print (\"+ Jobs finished\\n+ Collecting information for all samples...\")\n os.chdir(path_here)\n \n ## debug messages\n if debug:\n HCGB_aes.debug_message('agrvate_results', 'yellow')\n HCGB_main.print_all_pandaDF(agrvate_results)\n\n return(agrvate_results, info_dict)\n\n##############################\ndef agrvate_call(sample, assembly_file, folder, debug=False):\n \"\"\"agrvate call and check results.\"\"\"\n \n ## prepare call\n log_call = os.path.join(folder, \"agrvate_cmd.log\")\n err_call = os.path.join(folder, \"agrvate_cmd.err\")\n agrvate_bin = set_config.get_exe('agrvate')\n \n ## system call\n cmd_call = \"%s -i %s -m -f > %s 2> %s \" %(agrvate_bin, \n assembly_file,\n log_call, err_call) ## use mummer (-m) and force results folder (-f)\n status = HCGB_sys.system_call(cmd_call)\n \n if status:\n res = get_results_agrvate(assembly_file, folder, sample, debug)\n return (res)\n else:\n return(False)\n \n#########################################\ndef get_results_agrvate(assembly_file, folder, sample, debug=False):\n ## check results\n ## see https://github.com/VishnuRaghuram94/AgrVATE#results for additional details\n results = pd.DataFrame()\n \n ## check folder is created\n assembly_file_name = os.path.basename(assembly_file).split('.fna')[0] \n original_results_folder = os.path.join(folder, assembly_file_name + '-results')\n results_folder = os.path.join(folder, 'agrvate_results')\n \n ## rename folder\n if os.path.isdir(original_results_folder):\n print(\"+ Results folder generated OK\")\n print(\"+ Check results generated:\")\n \n ## rename folder\n os.rename(original_results_folder, results_folder)\n os.rename(os.path.join(folder, assembly_file_name + '.fna-error-report.tab'), os.path.join(results_folder, 'error_report.tab'))\n \n ## get results\n if (os.path.isdir(results_folder)):\n ## write to excel1\n file_name_Excel = os.path.join(folder, sample + '_agr_results.xlsx')\n writer_Excel = pd.ExcelWriter(file_name_Excel, engine='xlsxwriter') ## open excel handle\n \n ## get all files\n list_files = HCGB_main.get_fullpath_list(results_folder)\n \n ## summary tab\n summary_tab_file = [s for s in list_files if s.endswith(\"summary.tab\")][0]\n summary_tab = HCGB_main.get_data(summary_tab_file, '\\t', options=\"\")\n summary_tab['sample'] = sample\n \n ## columns\n #agr_group: gp1/gp2/gp3/gp4. 'u' means unknown. \n ## If multiple agr groups were found (col 5 = m), \n ## the displayed agr group is the majority/highest confidence. \n # match_score: maximum 15; 0 means untypeable; < 5 means low confidence.\n # canonical_agrD: 1 means canonical; 0 means non-canonical; u means unknown.\n # multiple_agr: s means single, m means multiple, u means unknown ) \n ## Multiple groups are found likely due to multiple S. aureus isolates in sequence\n # frameshifts: Number found in CDS of extracted agr operon ('u' if agr operon not extracted)\n \n ## debug messages\n if debug:\n HCGB_aes.debug_message(\"agrvate results: Summary tab file\", 'yellow')\n print(summary_tab_file)\n print(summary_tab)\n\n ## add summary results to all results\n del summary_tab['#filename']\n results = summary_tab.copy()\n\n ## save summary_tab into excel\n ## tab summary\n summary_tab.to_excel(writer_Excel, sheet_name='summary') ## write excel handle\n\n ## agr_gp tab\n agr_gp_tab_file = [s for s in list_files if s.endswith(\"agr_gp.tab\")][0]\n if HCGB_files.is_non_zero_file(agr_gp_tab_file):\n agr_gp_tab = HCGB_main.get_data(agr_gp_tab_file, '\\t', options='header=None')\n agr_gp_tab.columns = ['contig', 'agr', 'evalue', 'identity', 'start', 'end']\n agr_gp_tab['sample'] = sample\n \n ## columns\n ## Assembly Contig ID\n ## ID of matched agr group kmer\n ## evalue\n ## Percentage identity of match\n ## Start position of kmer alignment on input sequence\n ## End position of kmer alignment on input sequence\n \n ## debug messages\n if debug:\n HCGB_aes.debug_message(\"agrvate results: agr_gp file\", 'yellow')\n print(agr_gp_tab_file)\n print(agr_gp_tab)\n \n ## save agr_gp_tab file into excel\n ## tab operon\n agr_gp_tab.to_excel(writer_Excel, sheet_name='operon') ## write excel handle\n\n ## agr_operon fna\n try:\n agr_operon_fna_file = [s for s in list_files if s.endswith(\"agr_operon.fna\")][0]\n ## debug messages\n if debug:\n HCGB_aes.debug_message(\"agrvate results: agr_operon file\", 'yellow')\n print(agr_operon_fna_file)\n \n results['operon_fna'] = agr_operon_fna_file\n except:\n results['operon_fna'] = ''\n\n ## agr_operon fna\n error_report_file = [s for s in list_files if s.endswith(\"error_report.tab\")][0]\n error_report = HCGB_main.get_data(error_report_file, '\\t', options=\"\")\n del error_report['#input_name']\n\n ## debug messages\n if debug:\n HCGB_aes.debug_message(\"agrvate results: error_report.tab file\", 'yellow')\n print(error_report_file)\n print(error_report)\n \n ## save error_report file into excel\n ## tab steps\n error_report.to_excel(writer_Excel, sheet_name='steps') ## write excel handle\n \n ## merge results\n results = pd.concat([results, error_report], axis=1)\n\n ## close xlsx file\n writer_Excel.save() ## close excel handle\n \n ## add to pandas dataframe\n results['agr_operon_xlsx'] = file_name_Excel\n\n ## debug messages\n if debug:\n HCGB_aes.debug_message(\"agrvate results\", 'yellow')\n HCGB_main.print_all_pandaDF(results)\n \n return (results)\n\n##############\ndef help_options():\n print (\"\\nUSAGE: python %s name assembly_fasta folder...\\n\" %os.path.realpath(__file__))\n\n##############\ndef main():\n\n ## control if options provided or help\n if len(sys.argv) > 1:\n print (\"\")\n else:\n help_options()\n exit()\n \n name = argv[1]\n fasta_file = os.path.abspath(argv[2])\n folder = os.path.abspath(argv[3])\n debug=True\n\n ## path\n folder = HCGB_files.create_folder(folder)\n ## ATTENTION: agrvate needs to chdir to output folder\n os.chdir(folder)\n\n ###\n agrvate_call(name, fasta_file, folder, debug)\n \n\n'''******************************************'''\nif __name__== \"__main__\":\n main()\n\n" ]
[ [ "pandas.concat", "pandas.DataFrame", "pandas.ExcelWriter" ] ]
anuj-harisinghani/canary-nlp
[ "5225fa028f0f744cd6582f927f3990c1a50b1f9b" ]
[ "classes/handlers/ResultsHandler.py" ]
[ "import sys\nimport os\nimport pandas as pd\n\nMETRICS = 'metric'\nMODEL = 'model'\nACCURACY = 'acc'\nROC = 'roc'\nF1_SCORE = 'f1'\nPRECISION = 'precision'\nRECALL = 'recall'\nSETTINGS = 'settings'\nSPECIFICITY = 'specificity'\n\nACCURACY_SD = 'acc_sd'\nROC_SD = 'roc_sd'\nF1_SD = 'f1_sd'\nPREC_SD = 'prec_sd'\nREC_SD = 'rec_sd'\nSPEC_SD = 'spec_sd'\n\nRESULT_COLUMNS = [SETTINGS, MODEL, ACCURACY, ROC, F1_SCORE, PRECISION, RECALL, SPECIFICITY]\nRESULT_COLUMNS2 = [SETTINGS, MODEL, ACCURACY, ACCURACY_SD, ROC, ROC_SD, F1_SCORE, F1_SD, PRECISION, PREC_SD, RECALL, REC_SD, SPECIFICITY, SPEC_SD]\n\nclass ResultsHandler:\n def __init__(self):\n pass\n\n @staticmethod\n def compile_results(dataset_name: str, foldername: str):\n input_files = os.path.join(os.getcwd(), 'results', dataset_name, foldername)\n results_csv = pd.DataFrame(columns=RESULT_COLUMNS)\n for directory in os.listdir(input_files):\n if os.path.isdir(os.path.join(input_files, directory)):\n for filename in os.listdir(os.path.join(input_files, directory)):\n if filename.startswith('results'):\n \n if filename[-5:-4] == '_':\n suffix = 'overall'\n else:\n suffix = ''\n \n results = pd.read_csv(os.path.join(input_files, directory, filename))\n models = results.model.unique()\n\n for model in models:\n model_info = results[results[MODEL] == model]\n \n acc = model_info[model_info[METRICS] == ACCURACY]['1'].mean()\n roc = model_info[model_info[METRICS] == ROC]['1'].mean()\n f1_score = model_info[model_info[METRICS] == 'fms']['1'].mean()\n precision = model_info[model_info[METRICS] == PRECISION]['1'].mean()\n recall = model_info[model_info[METRICS] == RECALL]['1'].mean()\n specificity = model_info[model_info[METRICS] == SPECIFICITY]['1'].mean()\n \n results_csv = results_csv.append({\n SETTINGS: filename[20:-4] + suffix,\n MODEL: model,\n ACCURACY: acc,\n ROC: roc,\n F1_SCORE: f1_score,\n PRECISION: precision,\n RECALL: recall,\n SPECIFICITY: specificity\n }, ignore_index=True)\n \n ResultsHandler.average_seeds(results_csv, dataset_name, foldername)\n\n @staticmethod\n def average_seeds(results: pd.DataFrame, dataset_name: str, foldername: str):\n results_csv = pd.DataFrame(columns=RESULT_COLUMNS)\n settings = results.settings.unique()\n for setting in settings:\n setting_groups = results[results[SETTINGS] == setting]\n models = results.model.unique()\n \n for model in models:\n setting_model_info = setting_groups[setting_groups[MODEL] == model]\n \n acc = round(setting_model_info[ACCURACY].mean(), 2)\n roc = round(setting_model_info[ROC].mean(), 2)\n f1_score = round(setting_model_info[F1_SCORE].mean(), 2)\n precision = round(setting_model_info[PRECISION].mean(), 2)\n recall = round(setting_model_info[RECALL].mean(), 2)\n specificity = round(setting_model_info[SPECIFICITY].mean(), 2)\n\n acc_sd = round(setting_model_info[ACCURACY].std(), 2)\n roc_sd = round(setting_model_info[ROC].std(), 2)\n f1_sd = round(setting_model_info[F1_SCORE].std(), 2)\n prec_sd = round(setting_model_info[PRECISION].std(), 2)\n rec_sd = round(setting_model_info[RECALL].std(), 2)\n spec_sd = round(setting_model_info[SPECIFICITY].std(), 2)\n \n results_csv = results_csv.append({\n SETTINGS: setting,\n MODEL: model,\n ACCURACY: acc,\n ROC: roc,\n F1_SCORE: f1_score,\n PRECISION: precision,\n RECALL: recall,\n SPECIFICITY: specificity,\n \n ACCURACY_SD : acc_sd,\n ROC_SD : roc_sd,\n F1_SD : f1_sd,\n PREC_SD : prec_sd,\n REC_SD : rec_sd,\n SPEC_SD : spec_sd\n }, ignore_index=True)\n \n outfile = os.path.join(os.getcwd(), 'results', dataset_name, foldername, foldername+'.csv')\n results_csv.to_csv(outfile, index=False)\n\n" ]
[ [ "pandas.DataFrame" ] ]
lmillard79/DFUDA_2019_Adapter
[ "933410d197a87e7defed20a084bf8a250f4c1dca" ]
[ "03_Post_Adapter.py" ]
[ "\nfrom xml.etree.ElementTree import * # Import everything \n\nimport time\nfrom datetime import datetime\nimport csv\nimport os\n\n## from xlsxwriter.workbook import Workbook\n\nimport numpy as np\nimport pandas as pd ## Abbreviate it using the convention\npd.options.display.date_dayfirst=True # Important to some assumptions below\n\n\n## Worker Function for checking currency\ndef newest(path):\n files = os.listdir(path)\n paths = [os.path.join(path, basename) for basename in files]\n return max(paths, key=os.path.getctime)\n\n# It is a good idea to put your variables up front\nfewsNamespace=\"http://www.wldelft.nl/fews/PI\"\n\nregionHome = r'C:\\Users\\Lindsay\\Documents\\GitHub\\DFUDA_2019_Adapter/'\n\nExportXML = '3_Output/Export_fromGoldSim.xml'\n\n# populate the xml file\ndef xml(parameter, location, eventValues, dT64, unit):\n #sd = str(dateTime[0])\n sd = dateTime[0].strftime(\"%Y-%m-%d\")\n st = dateTime[0].strftime(\"%H:%M:%S\")\n ed = dateTime[-1].strftime(\"%Y-%m-%d\")\n et = dateTime[-1].strftime(\"%H:%M:%S\")\n DTtimeStep = dateTime[1]-dateTime[0]\n timeStep = int(DTtimeStep.total_seconds())\n \n # write XML output timeseries\n with open(regionHome+ExportXML,'a') as xf: # 'a+b'\n \n ## Write Header for each Series \n \n xf.write(' <series>\\n')\n xf.write(' <header>\\n')\n xf.write(' <type>instantaneous</type>\\n') \n \n loc_text = str(' <locationId>%s</locationId>\\n') % (location)\n xf.write(loc_text) \n \n par_text = str(' <parameterId>%s</parameterId>\\n') % (parameter)\n xf.write(par_text)\n \n timestep_text = str(' <timeStep unit=\"second\" multiplier=\"%s\"/>\\n') %(timeStep)\n xf.write(timestep_text) \n \n sd_text = str(' <startDate date=\"%s\" time=\"%s\"/>\\n') % (sd, st)\n xf.write(sd_text)\n \n ed_text = str(' <endDate date=\"%s\" time=\"%s\"/>\\n') % (ed, et)\n xf.write(ed_text)\n \n xf.write(' <missVal>-999.0</missVal>\\n')\n xf.write(' <stationName>Hydro Gauge 1</stationName>\\n') \n \n unit_text = str(' <units>%s</units>\\n') %(unit)\n xf.write(unit_text) \n \n xf.write(' </header>\\n')\n \n \n ## Write all the event values for each timestamp in the series\n for i in range(len(eventValues)):\n event_date = str(' <event date=\"%s\"') % (dateTime[i].strftime(\"%Y-%m-%d\"))\n event_time = str(' time=\"%s\"') % (dateTime[i].strftime(\"%H:%M:%S\"))\n event_value = str(' value=\"%s\"') % eventValues[i][0].round(2)\n event_flag = str(' flag=\"0\"/>\\n')\n \n event = str(event_date+event_time+event_value+event_flag)\n xf.write(event)\n \n xf.write(' </series>\\n')\n \n \n\nGoldSimOutputs = ['RoutedFlows.txt','RoutedLevels.txt']\n\nDF = pd.DataFrame()\n\n# write XML output header, once\nwith open(ExportXML,'w') as xf:\n xf.write('<?xml version=\"1.0\" encoding=\"UTF-8\"?>\\n')\n xf.write('<TimeSeries xmlns=\"http://www.wldelft.nl/fews/PI\" xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\" xsi:schemaLocation=\"http://www.wldelft.nl/fews/PI http://fews.wldelft.nl/schemas/version1.0/pi-schemas/pi_timeseries.xsd\" version=\"1.2\">\\n')\n xf.write(' <timeZone>10.0</timeZone>\\n')\n\nfor GS in GoldSimOutputs:\n fin = os.path.join(regionHome,'2_Model',GS)\n\n with open(fin,'r') as f:\n lines = f.readlines()\n parameter = lines[11].split()[1]\n unit = lines[12].split()[1]\n\n df = pd.read_table(fin,skiprows=14,header=None,names=['DateTime',parameter+'_'+unit,'Blank'],index_col=0,\n parse_dates=True,dayfirst=True)\n\n df = df.drop(['Blank'],axis=1) \n DF = pd.concat([DF,df],axis=1)\n \n locn = 'NPD'\n dateTime = df.index.to_pydatetime()\n xml(parameter,locn,df.values,dateTime,unit)\n\nwith open(ExportXML,'a') as xf:\n xf.write('</TimeSeries>\\n') \n\nax=DF.iloc[:,0].plot(legend=True)\nDF.iloc[:,1].plot(secondary_y=True,\n ax=ax,legend=True)\nax.set_ylabel(DF.columns[0])\nax.set_xlim('09-01-2011','13-01-2011')\nax.grid(which='both')\n\nfig = ax.get_figure()\nfig.savefig('GoldSim_TS.png',dpi=300)\n\n " ]
[ [ "pandas.read_table", "pandas.concat", "pandas.DataFrame" ] ]
shiwj16/SSINet
[ "7f285878a7798b7a8eeb3c64f99e2787ffa0b32e" ]
[ "pretrain_ppo_atari.py" ]
[ "import os\nimport numpy as np\n\nfrom src.ppo_atari import PPO\nfrom src.args import get_ppo_args\nfrom src.utils import get_dirs, write_arguments, seed\nfrom envs_utils.create_env import create_multiple_envs, create_single_env\n\nos.environ[\"CUDA_VISIBLE_DEVICES\"] = '0'\n\n\nif __name__ == '__main__':\n # set signle thread\n # os.environ['OMP_NUM_THREADS'] = '1'\n # os.environ['MKL_NUM_THREADS'] = '1'\n # get arguments\n args = get_ppo_args()\n model_dir, data_dir = get_dirs(args.dir_name)\n # video_dir = data_dir + '/video/train/'\n write_arguments(args, os.path.dirname(data_dir) + '/arguments.txt')\n # set seeds\n seed(args.seed)\n # start to create the environment\n envs = create_multiple_envs(args)\n\n # create trainer\n agent = PPO(envs.action_space.n, args)\n\n # start to train the network...\n evals = []\n best_eval_rew = -np.float(\"Inf\")\n episode_rewards = np.zeros((args.num_workers, ), dtype=np.float32)\n final_rewards = np.zeros((args.num_workers, ), dtype=np.float32)\n # get the observation\n batch_ob_shape = (args.num_workers * args.nsteps, ) + envs.observation_space.shape\n obs = np.zeros((args.num_workers, ) + envs.observation_space.shape, dtype=envs.observation_space.dtype.name)\n obs[:] = envs.reset()\n dones = [False for _ in range(args.num_workers)]\n # get total number of updates\n num_updates = args.total_frames // (args.nsteps * args.num_workers)\n for update in range(num_updates):\n mb_obs, mb_rewards, mb_actions, mb_dones, mb_values = [], [], [], [], []\n if args.lr_decay:\n agent._adjust_learning_rate(args.lr, update, num_updates)\n for step in range(args.nsteps):\n # prepocessing the state for the task where there are some black features.\n obs += 5*np.ones((args.num_workers, ) + envs.observation_space.shape, dtype=envs.observation_space.dtype.name)\n # predict\n values, actions = agent.predict(obs, is_training=True)\n # start to store information\n mb_obs.append(np.copy(obs))\n mb_actions.append(actions)\n mb_dones.append(dones)\n mb_values.append(values)\n # start to excute the actions in the environment\n obs, rewards, dones, _ = envs.step(actions)\n mb_rewards.append(rewards)\n # clear the observation\n for n, done in enumerate(dones):\n if done:\n obs[n] *= 0\n # process the rewards part -- display the rewards on the screen\n episode_rewards += rewards\n masks = np.array([0.0 if done_ else 1.0 for done_ in dones], dtype=np.float32)\n final_rewards *= masks\n final_rewards += (1 - masks) * episode_rewards\n episode_rewards *= masks\n \n # process the rollouts\n mb_obs = np.asarray(mb_obs, dtype=np.float32)\n mb_rewards = np.asarray(mb_rewards, dtype=np.float32)\n mb_actions = np.asarray(mb_actions, dtype=np.float32)\n mb_dones = np.asarray(mb_dones, dtype=np.bool)\n mb_values = np.asarray(mb_values, dtype=np.float32)\n # compute the last state value\n last_values, _ = agent.predict(obs, is_training=True)\n # start to compute advantages...\n mb_returns = np.zeros_like(mb_rewards)\n mb_advs = np.zeros_like(mb_rewards)\n lastgaelam = 0\n for t in reversed(range(args.nsteps)):\n if t == args.nsteps - 1:\n nextnonterminal = 1.0 - dones\n nextvalues = last_values\n else:\n nextnonterminal = 1.0 - mb_dones[t + 1]\n nextvalues = mb_values[t + 1]\n delta = mb_rewards[t] + args.gamma * nextvalues * nextnonterminal - mb_values[t]\n mb_advs[t] = lastgaelam = delta + args.gamma * args.tau * nextnonterminal * lastgaelam\n mb_returns = mb_advs + mb_values\n # after compute the returns, let's process the rollouts\n mb_obs = mb_obs.swapaxes(0, 1).reshape(batch_ob_shape)\n mb_actions = mb_actions.swapaxes(0, 1).flatten()\n mb_returns = mb_returns.swapaxes(0, 1).flatten()\n mb_advs = mb_advs.swapaxes(0, 1).flatten()\n # start to update the network\n agent._update_network(mb_obs, mb_actions, mb_returns, mb_advs)\n\n # display the training information\n if update % args.display_interval == 0:\n mean_rewards = final_rewards.mean()\n print('Update: {} / {}, Rewards: {:.3f}, Min: {:.3f}, Max: {:.3f}'.format(\n update, num_updates, mean_rewards, final_rewards.min(), final_rewards.max()))\n # save data\n evals.append(mean_rewards)\n np.savez(data_dir+'/rewards.npz', evals)\n # save the model\n agent.save(\"final\", model_dir)\n if mean_rewards > best_eval_rew:\n best_eval_rew = mean_rewards\n agent.save(\"best\", model_dir)\n\n # close the environment\n envs.close()\n" ]
[ [ "numpy.savez", "numpy.asarray", "numpy.ones", "numpy.copy", "numpy.zeros_like", "numpy.array", "numpy.zeros", "numpy.float" ] ]
lermert/scipy
[ "b7dce8db4bb9833d7f3faee9d783c483295711cb" ]
[ "scipy/optimize/optimize.py" ]
[ "#__docformat__ = \"restructuredtext en\"\n# ******NOTICE***************\n# optimize.py module by Travis E. Oliphant\n#\n# You may copy and use this module as you see fit with no\n# guarantee implied provided you keep this notice in all copies.\n# *****END NOTICE************\n\n# A collection of optimization algorithms. Version 0.5\n# CHANGES\n# Added fminbound (July 2001)\n# Added brute (Aug. 2002)\n# Finished line search satisfying strong Wolfe conditions (Mar. 2004)\n# Updated strong Wolfe conditions line search to use\n# cubic-interpolation (Mar. 2004)\n\nfrom __future__ import division, print_function, absolute_import\n\n\n# Minimization routines\n\n__all__ = ['fmin', 'fmin_powell', 'fmin_bfgs', 'fmin_ncg', 'fmin_cg',\n 'fminbound', 'brent', 'golden', 'bracket', 'rosen', 'rosen_der',\n 'rosen_hess', 'rosen_hess_prod', 'brute', 'approx_fprime',\n 'line_search', 'check_grad', 'OptimizeResult', 'show_options',\n 'OptimizeWarning']\n\n__docformat__ = \"restructuredtext en\"\n\nimport warnings\nimport sys\nimport numpy\nfrom scipy._lib.six import callable, xrange\nfrom numpy import (atleast_1d, eye, mgrid, argmin, zeros, shape, squeeze,\n asarray, sqrt, Inf, asfarray, isinf)\nimport numpy as np\nfrom .linesearch import (line_search_wolfe1, line_search_wolfe2,\n line_search_wolfe2 as line_search,\n LineSearchWarning)\nfrom scipy._lib._util import getargspec_no_self as _getargspec\nfrom scipy._lib._util import MapWrapper\n\n\n# standard status messages of optimizers\n_status_message = {'success': 'Optimization terminated successfully.',\n 'maxfev': 'Maximum number of function evaluations has '\n 'been exceeded.',\n 'maxiter': 'Maximum number of iterations has been '\n 'exceeded.',\n 'pr_loss': 'Desired error not necessarily achieved due '\n 'to precision loss.'}\n\n\nclass MemoizeJac(object):\n \"\"\" Decorator that caches the value gradient of function each time it\n is called. \"\"\"\n def __init__(self, fun):\n self.fun = fun\n self.jac = None\n self.x = None\n\n def __call__(self, x, *args):\n self.x = numpy.asarray(x).copy()\n fg = self.fun(x, *args)\n self.jac = fg[1]\n return fg[0]\n\n def derivative(self, x, *args):\n if self.jac is not None and numpy.all(x == self.x):\n return self.jac\n else:\n self(x, *args)\n return self.jac\n\n\nclass OptimizeResult(dict):\n \"\"\" Represents the optimization result.\n\n Attributes\n ----------\n x : ndarray\n The solution of the optimization.\n success : bool\n Whether or not the optimizer exited successfully.\n status : int\n Termination status of the optimizer. Its value depends on the\n underlying solver. Refer to `message` for details.\n message : str\n Description of the cause of the termination.\n fun, jac, hess: ndarray\n Values of objective function, its Jacobian and its Hessian (if\n available). The Hessians may be approximations, see the documentation\n of the function in question.\n hess_inv : object\n Inverse of the objective function's Hessian; may be an approximation.\n Not available for all solvers. The type of this attribute may be\n either np.ndarray or scipy.sparse.linalg.LinearOperator.\n nfev, njev, nhev : int\n Number of evaluations of the objective functions and of its\n Jacobian and Hessian.\n nit : int\n Number of iterations performed by the optimizer.\n maxcv : float\n The maximum constraint violation.\n\n Notes\n -----\n There may be additional attributes not listed above depending of the\n specific solver. Since this class is essentially a subclass of dict\n with attribute accessors, one can see which attributes are available\n using the `keys()` method.\n \"\"\"\n def __getattr__(self, name):\n try:\n return self[name]\n except KeyError:\n raise AttributeError(name)\n\n __setattr__ = dict.__setitem__\n __delattr__ = dict.__delitem__\n\n def __repr__(self):\n if self.keys():\n m = max(map(len, list(self.keys()))) + 1\n return '\\n'.join([k.rjust(m) + ': ' + repr(v)\n for k, v in sorted(self.items())])\n else:\n return self.__class__.__name__ + \"()\"\n\n def __dir__(self):\n return list(self.keys())\n\n\nclass OptimizeWarning(UserWarning):\n pass\n\n\ndef _check_unknown_options(unknown_options):\n if unknown_options:\n msg = \", \".join(map(str, unknown_options.keys()))\n # Stack level 4: this is called from _minimize_*, which is\n # called from another function in SciPy. Level 4 is the first\n # level in user code.\n warnings.warn(\"Unknown solver options: %s\" % msg, OptimizeWarning, 4)\n\n\ndef is_array_scalar(x):\n \"\"\"Test whether `x` is either a scalar or an array scalar.\n\n \"\"\"\n return np.size(x) == 1\n\n\n_epsilon = sqrt(numpy.finfo(float).eps)\n\n\ndef vecnorm(x, ord=2):\n if ord == Inf:\n return numpy.amax(numpy.abs(x))\n elif ord == -Inf:\n return numpy.amin(numpy.abs(x))\n else:\n return numpy.sum(numpy.abs(x)**ord, axis=0)**(1.0 / ord)\n\n\ndef rosen(x):\n \"\"\"\n The Rosenbrock function.\n\n The function computed is::\n\n sum(100.0*(x[1:] - x[:-1]**2.0)**2.0 + (1 - x[:-1])**2.0)\n\n Parameters\n ----------\n x : array_like\n 1-D array of points at which the Rosenbrock function is to be computed.\n\n Returns\n -------\n f : float\n The value of the Rosenbrock function.\n\n See Also\n --------\n rosen_der, rosen_hess, rosen_hess_prod\n\n Examples\n --------\n >>> from scipy.optimize import rosen\n >>> X = 0.1 * np.arange(10)\n >>> rosen(X)\n 76.56\n\n \"\"\"\n x = asarray(x)\n r = numpy.sum(100.0 * (x[1:] - x[:-1]**2.0)**2.0 + (1 - x[:-1])**2.0,\n axis=0)\n return r\n\n\ndef rosen_der(x):\n \"\"\"\n The derivative (i.e. gradient) of the Rosenbrock function.\n\n Parameters\n ----------\n x : array_like\n 1-D array of points at which the derivative is to be computed.\n\n Returns\n -------\n rosen_der : (N,) ndarray\n The gradient of the Rosenbrock function at `x`.\n\n See Also\n --------\n rosen, rosen_hess, rosen_hess_prod\n\n Examples\n --------\n >>> from scipy.optimize import rosen_der\n >>> X = 0.1 * np.arange(9)\n >>> rosen_der(X)\n array([ -2. , 10.6, 15.6, 13.4, 6.4, -3. , -12.4, -19.4, 62. ])\n\n \"\"\"\n x = asarray(x)\n xm = x[1:-1]\n xm_m1 = x[:-2]\n xm_p1 = x[2:]\n der = numpy.zeros_like(x)\n der[1:-1] = (200 * (xm - xm_m1**2) -\n 400 * (xm_p1 - xm**2) * xm - 2 * (1 - xm))\n der[0] = -400 * x[0] * (x[1] - x[0]**2) - 2 * (1 - x[0])\n der[-1] = 200 * (x[-1] - x[-2]**2)\n return der\n\n\ndef rosen_hess(x):\n \"\"\"\n The Hessian matrix of the Rosenbrock function.\n\n Parameters\n ----------\n x : array_like\n 1-D array of points at which the Hessian matrix is to be computed.\n\n Returns\n -------\n rosen_hess : ndarray\n The Hessian matrix of the Rosenbrock function at `x`.\n\n See Also\n --------\n rosen, rosen_der, rosen_hess_prod\n\n Examples\n --------\n >>> from scipy.optimize import rosen_hess\n >>> X = 0.1 * np.arange(4)\n >>> rosen_hess(X)\n array([[-38., 0., 0., 0.],\n [ 0., 134., -40., 0.],\n [ 0., -40., 130., -80.],\n [ 0., 0., -80., 200.]])\n\n \"\"\"\n x = atleast_1d(x)\n H = numpy.diag(-400 * x[:-1], 1) - numpy.diag(400 * x[:-1], -1)\n diagonal = numpy.zeros(len(x), dtype=x.dtype)\n diagonal[0] = 1200 * x[0]**2 - 400 * x[1] + 2\n diagonal[-1] = 200\n diagonal[1:-1] = 202 + 1200 * x[1:-1]**2 - 400 * x[2:]\n H = H + numpy.diag(diagonal)\n return H\n\n\ndef rosen_hess_prod(x, p):\n \"\"\"\n Product of the Hessian matrix of the Rosenbrock function with a vector.\n\n Parameters\n ----------\n x : array_like\n 1-D array of points at which the Hessian matrix is to be computed.\n p : array_like\n 1-D array, the vector to be multiplied by the Hessian matrix.\n\n Returns\n -------\n rosen_hess_prod : ndarray\n The Hessian matrix of the Rosenbrock function at `x` multiplied\n by the vector `p`.\n\n See Also\n --------\n rosen, rosen_der, rosen_hess\n\n Examples\n --------\n >>> from scipy.optimize import rosen_hess_prod\n >>> X = 0.1 * np.arange(9)\n >>> p = 0.5 * np.arange(9)\n >>> rosen_hess_prod(X, p)\n array([ -0., 27., -10., -95., -192., -265., -278., -195., -180.])\n\n \"\"\"\n x = atleast_1d(x)\n Hp = numpy.zeros(len(x), dtype=x.dtype)\n Hp[0] = (1200 * x[0]**2 - 400 * x[1] + 2) * p[0] - 400 * x[0] * p[1]\n Hp[1:-1] = (-400 * x[:-2] * p[:-2] +\n (202 + 1200 * x[1:-1]**2 - 400 * x[2:]) * p[1:-1] -\n 400 * x[1:-1] * p[2:])\n Hp[-1] = -400 * x[-2] * p[-2] + 200*p[-1]\n return Hp\n\n\ndef wrap_function(function, args):\n ncalls = [0]\n if function is None:\n return ncalls, None\n\n def function_wrapper(*wrapper_args):\n ncalls[0] += 1\n return function(*(wrapper_args + args))\n\n return ncalls, function_wrapper\n\n\ndef fmin(func, x0, args=(), xtol=1e-4, ftol=1e-4, maxiter=None, maxfun=None,\n full_output=0, disp=1, retall=0, callback=None, initial_simplex=None):\n \"\"\"\n Minimize a function using the downhill simplex algorithm.\n\n This algorithm only uses function values, not derivatives or second\n derivatives.\n\n Parameters\n ----------\n func : callable func(x,*args)\n The objective function to be minimized.\n x0 : ndarray\n Initial guess.\n args : tuple, optional\n Extra arguments passed to func, i.e. ``f(x,*args)``.\n xtol : float, optional\n Absolute error in xopt between iterations that is acceptable for\n convergence.\n ftol : number, optional\n Absolute error in func(xopt) between iterations that is acceptable for\n convergence.\n maxiter : int, optional\n Maximum number of iterations to perform.\n maxfun : number, optional\n Maximum number of function evaluations to make.\n full_output : bool, optional\n Set to True if fopt and warnflag outputs are desired.\n disp : bool, optional\n Set to True to print convergence messages.\n retall : bool, optional\n Set to True to return list of solutions at each iteration.\n callback : callable, optional\n Called after each iteration, as callback(xk), where xk is the\n current parameter vector.\n initial_simplex : array_like of shape (N + 1, N), optional\n Initial simplex. If given, overrides `x0`.\n ``initial_simplex[j,:]`` should contain the coordinates of\n the j-th vertex of the ``N+1`` vertices in the simplex, where\n ``N`` is the dimension.\n\n Returns\n -------\n xopt : ndarray\n Parameter that minimizes function.\n fopt : float\n Value of function at minimum: ``fopt = func(xopt)``.\n iter : int\n Number of iterations performed.\n funcalls : int\n Number of function calls made.\n warnflag : int\n 1 : Maximum number of function evaluations made.\n 2 : Maximum number of iterations reached.\n allvecs : list\n Solution at each iteration.\n\n See also\n --------\n minimize: Interface to minimization algorithms for multivariate\n functions. See the 'Nelder-Mead' `method` in particular.\n\n Notes\n -----\n Uses a Nelder-Mead simplex algorithm to find the minimum of function of\n one or more variables.\n\n This algorithm has a long history of successful use in applications.\n But it will usually be slower than an algorithm that uses first or\n second derivative information. In practice it can have poor\n performance in high-dimensional problems and is not robust to\n minimizing complicated functions. Additionally, there currently is no\n complete theory describing when the algorithm will successfully\n converge to the minimum, or how fast it will if it does. Both the ftol and\n xtol criteria must be met for convergence.\n\n Examples\n --------\n >>> def f(x):\n ... return x**2\n\n >>> from scipy import optimize\n\n >>> minimum = optimize.fmin(f, 1)\n Optimization terminated successfully.\n Current function value: 0.000000\n Iterations: 17\n Function evaluations: 34\n >>> minimum[0]\n -8.8817841970012523e-16\n\n References\n ----------\n .. [1] Nelder, J.A. and Mead, R. (1965), \"A simplex method for function\n minimization\", The Computer Journal, 7, pp. 308-313\n\n .. [2] Wright, M.H. (1996), \"Direct Search Methods: Once Scorned, Now\n Respectable\", in Numerical Analysis 1995, Proceedings of the\n 1995 Dundee Biennial Conference in Numerical Analysis, D.F.\n Griffiths and G.A. Watson (Eds.), Addison Wesley Longman,\n Harlow, UK, pp. 191-208.\n\n \"\"\"\n opts = {'xatol': xtol,\n 'fatol': ftol,\n 'maxiter': maxiter,\n 'maxfev': maxfun,\n 'disp': disp,\n 'return_all': retall,\n 'initial_simplex': initial_simplex}\n\n res = _minimize_neldermead(func, x0, args, callback=callback, **opts)\n if full_output:\n retlist = res['x'], res['fun'], res['nit'], res['nfev'], res['status']\n if retall:\n retlist += (res['allvecs'], )\n return retlist\n else:\n if retall:\n return res['x'], res['allvecs']\n else:\n return res['x']\n\n\ndef _minimize_neldermead(func, x0, args=(), callback=None,\n maxiter=None, maxfev=None, disp=False,\n return_all=False, initial_simplex=None,\n xatol=1e-4, fatol=1e-4, adaptive=False,\n **unknown_options):\n \"\"\"\n Minimization of scalar function of one or more variables using the\n Nelder-Mead algorithm.\n\n Options\n -------\n disp : bool\n Set to True to print convergence messages.\n maxiter, maxfev : int\n Maximum allowed number of iterations and function evaluations.\n Will default to ``N*200``, where ``N`` is the number of\n variables, if neither `maxiter` or `maxfev` is set. If both\n `maxiter` and `maxfev` are set, minimization will stop at the\n first reached.\n initial_simplex : array_like of shape (N + 1, N)\n Initial simplex. If given, overrides `x0`.\n ``initial_simplex[j,:]`` should contain the coordinates of\n the j-th vertex of the ``N+1`` vertices in the simplex, where\n ``N`` is the dimension.\n xatol : float, optional\n Absolute error in xopt between iterations that is acceptable for\n convergence.\n fatol : number, optional\n Absolute error in func(xopt) between iterations that is acceptable for\n convergence.\n adaptive : bool, optional\n Adapt algorithm parameters to dimensionality of problem. Useful for\n high-dimensional minimization [1]_.\n\n References\n ----------\n .. [1] Gao, F. and Han, L.\n Implementing the Nelder-Mead simplex algorithm with adaptive\n parameters. 2012. Computational Optimization and Applications.\n 51:1, pp. 259-277\n\n \"\"\"\n if 'ftol' in unknown_options:\n warnings.warn(\"ftol is deprecated for Nelder-Mead,\"\n \" use fatol instead. If you specified both, only\"\n \" fatol is used.\",\n DeprecationWarning)\n if (np.isclose(fatol, 1e-4) and\n not np.isclose(unknown_options['ftol'], 1e-4)):\n # only ftol was probably specified, use it.\n fatol = unknown_options['ftol']\n unknown_options.pop('ftol')\n if 'xtol' in unknown_options:\n warnings.warn(\"xtol is deprecated for Nelder-Mead,\"\n \" use xatol instead. If you specified both, only\"\n \" xatol is used.\",\n DeprecationWarning)\n if (np.isclose(xatol, 1e-4) and\n not np.isclose(unknown_options['xtol'], 1e-4)):\n # only xtol was probably specified, use it.\n xatol = unknown_options['xtol']\n unknown_options.pop('xtol')\n\n _check_unknown_options(unknown_options)\n maxfun = maxfev\n retall = return_all\n\n fcalls, func = wrap_function(func, args)\n\n if adaptive:\n dim = float(len(x0))\n rho = 1\n chi = 1 + 2/dim\n psi = 0.75 - 1/(2*dim)\n sigma = 1 - 1/dim\n else:\n rho = 1\n chi = 2\n psi = 0.5\n sigma = 0.5\n\n nonzdelt = 0.05\n zdelt = 0.00025\n\n x0 = asfarray(x0).flatten()\n\n if initial_simplex is None:\n N = len(x0)\n\n sim = numpy.zeros((N + 1, N), dtype=x0.dtype)\n sim[0] = x0\n for k in range(N):\n y = numpy.array(x0, copy=True)\n if y[k] != 0:\n y[k] = (1 + nonzdelt)*y[k]\n else:\n y[k] = zdelt\n sim[k + 1] = y\n else:\n sim = np.asfarray(initial_simplex).copy()\n if sim.ndim != 2 or sim.shape[0] != sim.shape[1] + 1:\n raise ValueError(\"`initial_simplex` should be an array of shape (N+1,N)\")\n if len(x0) != sim.shape[1]:\n raise ValueError(\"Size of `initial_simplex` is not consistent with `x0`\")\n N = sim.shape[1]\n\n if retall:\n allvecs = [sim[0]]\n\n # If neither are set, then set both to default\n if maxiter is None and maxfun is None:\n maxiter = N * 200\n maxfun = N * 200\n elif maxiter is None:\n # Convert remaining Nones, to np.inf, unless the other is np.inf, in\n # which case use the default to avoid unbounded iteration\n if maxfun == np.inf:\n maxiter = N * 200\n else:\n maxiter = np.inf\n elif maxfun is None:\n if maxiter == np.inf:\n maxfun = N * 200\n else:\n maxfun = np.inf\n\n one2np1 = list(range(1, N + 1))\n fsim = numpy.zeros((N + 1,), float)\n\n for k in range(N + 1):\n fsim[k] = func(sim[k])\n\n ind = numpy.argsort(fsim)\n fsim = numpy.take(fsim, ind, 0)\n # sort so sim[0,:] has the lowest function value\n sim = numpy.take(sim, ind, 0)\n\n iterations = 1\n\n while (fcalls[0] < maxfun and iterations < maxiter):\n if (numpy.max(numpy.ravel(numpy.abs(sim[1:] - sim[0]))) <= xatol and\n numpy.max(numpy.abs(fsim[0] - fsim[1:])) <= fatol):\n break\n\n xbar = numpy.add.reduce(sim[:-1], 0) / N\n xr = (1 + rho) * xbar - rho * sim[-1]\n fxr = func(xr)\n doshrink = 0\n\n if fxr < fsim[0]:\n xe = (1 + rho * chi) * xbar - rho * chi * sim[-1]\n fxe = func(xe)\n\n if fxe < fxr:\n sim[-1] = xe\n fsim[-1] = fxe\n else:\n sim[-1] = xr\n fsim[-1] = fxr\n else: # fsim[0] <= fxr\n if fxr < fsim[-2]:\n sim[-1] = xr\n fsim[-1] = fxr\n else: # fxr >= fsim[-2]\n # Perform contraction\n if fxr < fsim[-1]:\n xc = (1 + psi * rho) * xbar - psi * rho * sim[-1]\n fxc = func(xc)\n\n if fxc <= fxr:\n sim[-1] = xc\n fsim[-1] = fxc\n else:\n doshrink = 1\n else:\n # Perform an inside contraction\n xcc = (1 - psi) * xbar + psi * sim[-1]\n fxcc = func(xcc)\n\n if fxcc < fsim[-1]:\n sim[-1] = xcc\n fsim[-1] = fxcc\n else:\n doshrink = 1\n\n if doshrink:\n for j in one2np1:\n sim[j] = sim[0] + sigma * (sim[j] - sim[0])\n fsim[j] = func(sim[j])\n\n ind = numpy.argsort(fsim)\n sim = numpy.take(sim, ind, 0)\n fsim = numpy.take(fsim, ind, 0)\n if callback is not None:\n callback(sim[0])\n iterations += 1\n if retall:\n allvecs.append(sim[0])\n\n x = sim[0]\n fval = numpy.min(fsim)\n warnflag = 0\n\n if fcalls[0] >= maxfun:\n warnflag = 1\n msg = _status_message['maxfev']\n if disp:\n print('Warning: ' + msg)\n elif iterations >= maxiter:\n warnflag = 2\n msg = _status_message['maxiter']\n if disp:\n print('Warning: ' + msg)\n else:\n msg = _status_message['success']\n if disp:\n print(msg)\n print(\" Current function value: %f\" % fval)\n print(\" Iterations: %d\" % iterations)\n print(\" Function evaluations: %d\" % fcalls[0])\n\n result = OptimizeResult(fun=fval, nit=iterations, nfev=fcalls[0],\n status=warnflag, success=(warnflag == 0),\n message=msg, x=x, final_simplex=(sim, fsim))\n if retall:\n result['allvecs'] = allvecs\n return result\n\n\ndef _approx_fprime_helper(xk, f, epsilon, args=(), f0=None):\n \"\"\"\n See ``approx_fprime``. An optional initial function value arg is added.\n\n \"\"\"\n if f0 is None:\n f0 = f(*((xk,) + args))\n grad = numpy.zeros((len(xk),), float)\n ei = numpy.zeros((len(xk),), float)\n for k in range(len(xk)):\n ei[k] = 1.0\n d = epsilon * ei\n df = (f(*((xk + d,) + args)) - f0) / d[k]\n if not np.isscalar(df):\n try:\n df = df.item()\n except (ValueError, AttributeError):\n raise ValueError(\"The user-provided \"\n \"objective function must \"\n \"return a scalar value.\")\n grad[k] = df\n ei[k] = 0.0\n return grad\n\n\ndef approx_fprime(xk, f, epsilon, *args):\n \"\"\"Finite-difference approximation of the gradient of a scalar function.\n\n Parameters\n ----------\n xk : array_like\n The coordinate vector at which to determine the gradient of `f`.\n f : callable\n The function of which to determine the gradient (partial derivatives).\n Should take `xk` as first argument, other arguments to `f` can be\n supplied in ``*args``. Should return a scalar, the value of the\n function at `xk`.\n epsilon : array_like\n Increment to `xk` to use for determining the function gradient.\n If a scalar, uses the same finite difference delta for all partial\n derivatives. If an array, should contain one value per element of\n `xk`.\n \\\\*args : args, optional\n Any other arguments that are to be passed to `f`.\n\n Returns\n -------\n grad : ndarray\n The partial derivatives of `f` to `xk`.\n\n See Also\n --------\n check_grad : Check correctness of gradient function against approx_fprime.\n\n Notes\n -----\n The function gradient is determined by the forward finite difference\n formula::\n\n f(xk[i] + epsilon[i]) - f(xk[i])\n f'[i] = ---------------------------------\n epsilon[i]\n\n The main use of `approx_fprime` is in scalar function optimizers like\n `fmin_bfgs`, to determine numerically the Jacobian of a function.\n\n Examples\n --------\n >>> from scipy import optimize\n >>> def func(x, c0, c1):\n ... \"Coordinate vector `x` should be an array of size two.\"\n ... return c0 * x[0]**2 + c1*x[1]**2\n\n >>> x = np.ones(2)\n >>> c0, c1 = (1, 200)\n >>> eps = np.sqrt(np.finfo(float).eps)\n >>> optimize.approx_fprime(x, func, [eps, np.sqrt(200) * eps], c0, c1)\n array([ 2. , 400.00004198])\n\n \"\"\"\n return _approx_fprime_helper(xk, f, epsilon, args=args)\n\n\ndef check_grad(func, grad, x0, *args, **kwargs):\n \"\"\"Check the correctness of a gradient function by comparing it against a\n (forward) finite-difference approximation of the gradient.\n\n Parameters\n ----------\n func : callable ``func(x0, *args)``\n Function whose derivative is to be checked.\n grad : callable ``grad(x0, *args)``\n Gradient of `func`.\n x0 : ndarray\n Points to check `grad` against forward difference approximation of grad\n using `func`.\n args : \\\\*args, optional\n Extra arguments passed to `func` and `grad`.\n epsilon : float, optional\n Step size used for the finite difference approximation. It defaults to\n ``sqrt(numpy.finfo(float).eps)``, which is approximately 1.49e-08.\n\n Returns\n -------\n err : float\n The square root of the sum of squares (i.e. the 2-norm) of the\n difference between ``grad(x0, *args)`` and the finite difference\n approximation of `grad` using func at the points `x0`.\n\n See Also\n --------\n approx_fprime\n\n Examples\n --------\n >>> def func(x):\n ... return x[0]**2 - 0.5 * x[1]**3\n >>> def grad(x):\n ... return [2 * x[0], -1.5 * x[1]**2]\n >>> from scipy.optimize import check_grad\n >>> check_grad(func, grad, [1.5, -1.5])\n 2.9802322387695312e-08\n\n \"\"\"\n step = kwargs.pop('epsilon', _epsilon)\n if kwargs:\n raise ValueError(\"Unknown keyword arguments: %r\" %\n (list(kwargs.keys()),))\n return sqrt(sum((grad(x0, *args) -\n approx_fprime(x0, func, step, *args))**2))\n\n\ndef approx_fhess_p(x0, p, fprime, epsilon, *args):\n f2 = fprime(*((x0 + epsilon*p,) + args))\n f1 = fprime(*((x0,) + args))\n return (f2 - f1) / epsilon\n\n\nclass _LineSearchError(RuntimeError):\n pass\n\n\ndef _line_search_wolfe12(f, fprime, xk, pk, gfk, old_fval, old_old_fval,\n **kwargs):\n \"\"\"\n Same as line_search_wolfe1, but fall back to line_search_wolfe2 if\n suitable step length is not found, and raise an exception if a\n suitable step length is not found.\n\n Raises\n ------\n _LineSearchError\n If no suitable step size is found\n\n \"\"\"\n\n extra_condition = kwargs.pop('extra_condition', None)\n\n ret = line_search_wolfe1(f, fprime, xk, pk, gfk,\n old_fval, old_old_fval,\n **kwargs)\n\n if ret[0] is not None and extra_condition is not None:\n xp1 = xk + ret[0] * pk\n if not extra_condition(ret[0], xp1, ret[3], ret[5]):\n # Reject step if extra_condition fails\n ret = (None,)\n\n if ret[0] is None:\n # line search failed: try different one.\n with warnings.catch_warnings():\n warnings.simplefilter('ignore', LineSearchWarning)\n kwargs2 = {}\n for key in ('c1', 'c2', 'amax'):\n if key in kwargs:\n kwargs2[key] = kwargs[key]\n ret = line_search_wolfe2(f, fprime, xk, pk, gfk,\n old_fval, old_old_fval,\n extra_condition=extra_condition,\n **kwargs2)\n\n if ret[0] is None:\n raise _LineSearchError()\n\n return ret\n\n\ndef fmin_bfgs(f, x0, fprime=None, args=(), gtol=1e-5, norm=Inf,\n epsilon=_epsilon, maxiter=None, full_output=0, disp=1,\n retall=0, callback=None):\n \"\"\"\n Minimize a function using the BFGS algorithm.\n\n Parameters\n ----------\n f : callable f(x,*args)\n Objective function to be minimized.\n x0 : ndarray\n Initial guess.\n fprime : callable f'(x,*args), optional\n Gradient of f.\n args : tuple, optional\n Extra arguments passed to f and fprime.\n gtol : float, optional\n Gradient norm must be less than gtol before successful termination.\n norm : float, optional\n Order of norm (Inf is max, -Inf is min)\n epsilon : int or ndarray, optional\n If fprime is approximated, use this value for the step size.\n callback : callable, optional\n An optional user-supplied function to call after each\n iteration. Called as callback(xk), where xk is the\n current parameter vector.\n maxiter : int, optional\n Maximum number of iterations to perform.\n full_output : bool, optional\n If True,return fopt, func_calls, grad_calls, and warnflag\n in addition to xopt.\n disp : bool, optional\n Print convergence message if True.\n retall : bool, optional\n Return a list of results at each iteration if True.\n\n Returns\n -------\n xopt : ndarray\n Parameters which minimize f, i.e. f(xopt) == fopt.\n fopt : float\n Minimum value.\n gopt : ndarray\n Value of gradient at minimum, f'(xopt), which should be near 0.\n Bopt : ndarray\n Value of 1/f''(xopt), i.e. the inverse hessian matrix.\n func_calls : int\n Number of function_calls made.\n grad_calls : int\n Number of gradient calls made.\n warnflag : integer\n 1 : Maximum number of iterations exceeded.\n 2 : Gradient and/or function calls not changing.\n allvecs : list\n The value of xopt at each iteration. Only returned if retall is True.\n\n See also\n --------\n minimize: Interface to minimization algorithms for multivariate\n functions. See the 'BFGS' `method` in particular.\n\n Notes\n -----\n Optimize the function, f, whose gradient is given by fprime\n using the quasi-Newton method of Broyden, Fletcher, Goldfarb,\n and Shanno (BFGS)\n\n References\n ----------\n Wright, and Nocedal 'Numerical Optimization', 1999, pg. 198.\n\n \"\"\"\n opts = {'gtol': gtol,\n 'norm': norm,\n 'eps': epsilon,\n 'disp': disp,\n 'maxiter': maxiter,\n 'return_all': retall}\n\n res = _minimize_bfgs(f, x0, args, fprime, callback=callback, **opts)\n\n if full_output:\n retlist = (res['x'], res['fun'], res['jac'], res['hess_inv'],\n res['nfev'], res['njev'], res['status'])\n if retall:\n retlist += (res['allvecs'], )\n return retlist\n else:\n if retall:\n return res['x'], res['allvecs']\n else:\n return res['x']\n\n\ndef _minimize_bfgs(fun, x0, args=(), jac=None, callback=None,\n gtol=1e-5, norm=Inf, eps=_epsilon, maxiter=None,\n disp=False, return_all=False,\n **unknown_options):\n \"\"\"\n Minimization of scalar function of one or more variables using the\n BFGS algorithm.\n\n Options\n -------\n disp : bool\n Set to True to print convergence messages.\n maxiter : int\n Maximum number of iterations to perform.\n gtol : float\n Gradient norm must be less than `gtol` before successful\n termination.\n norm : float\n Order of norm (Inf is max, -Inf is min).\n eps : float or ndarray\n If `jac` is approximated, use this value for the step size.\n\n \"\"\"\n _check_unknown_options(unknown_options)\n f = fun\n fprime = jac\n epsilon = eps\n retall = return_all\n\n x0 = asarray(x0).flatten()\n if x0.ndim == 0:\n x0.shape = (1,)\n if maxiter is None:\n maxiter = len(x0) * 200\n func_calls, f = wrap_function(f, args)\n\n old_fval = f(x0)\n\n if fprime is None:\n grad_calls, myfprime = wrap_function(approx_fprime, (f, epsilon))\n else:\n grad_calls, myfprime = wrap_function(fprime, args)\n gfk = myfprime(x0)\n k = 0\n N = len(x0)\n I = numpy.eye(N, dtype=int)\n Hk = I\n\n # Sets the initial step guess to dx ~ 1\n old_old_fval = old_fval + np.linalg.norm(gfk) / 2\n\n xk = x0\n if retall:\n allvecs = [x0]\n warnflag = 0\n gnorm = vecnorm(gfk, ord=norm)\n while (gnorm > gtol) and (k < maxiter):\n pk = -numpy.dot(Hk, gfk)\n try:\n alpha_k, fc, gc, old_fval, old_old_fval, gfkp1 = \\\n _line_search_wolfe12(f, myfprime, xk, pk, gfk,\n old_fval, old_old_fval, amin=1e-100, amax=1e100)\n except _LineSearchError:\n # Line search failed to find a better solution.\n warnflag = 2\n break\n\n xkp1 = xk + alpha_k * pk\n if retall:\n allvecs.append(xkp1)\n sk = xkp1 - xk\n xk = xkp1\n if gfkp1 is None:\n gfkp1 = myfprime(xkp1)\n\n yk = gfkp1 - gfk\n gfk = gfkp1\n if callback is not None:\n callback(xk)\n k += 1\n gnorm = vecnorm(gfk, ord=norm)\n if (gnorm <= gtol):\n break\n\n if not numpy.isfinite(old_fval):\n # We correctly found +-Inf as optimal value, or something went\n # wrong.\n warnflag = 2\n break\n\n try: # this was handled in numeric, let it remaines for more safety\n rhok = 1.0 / (numpy.dot(yk, sk))\n except ZeroDivisionError:\n rhok = 1000.0\n if disp:\n print(\"Divide-by-zero encountered: rhok assumed large\")\n if isinf(rhok): # this is patch for numpy\n rhok = 1000.0\n if disp:\n print(\"Divide-by-zero encountered: rhok assumed large\")\n A1 = I - sk[:, numpy.newaxis] * yk[numpy.newaxis, :] * rhok\n A2 = I - yk[:, numpy.newaxis] * sk[numpy.newaxis, :] * rhok\n Hk = numpy.dot(A1, numpy.dot(Hk, A2)) + (rhok * sk[:, numpy.newaxis] *\n sk[numpy.newaxis, :])\n\n fval = old_fval\n if np.isnan(fval):\n # This can happen if the first call to f returned NaN;\n # the loop is then never entered.\n warnflag = 2\n\n if warnflag == 2:\n msg = _status_message['pr_loss']\n elif k >= maxiter:\n warnflag = 1\n msg = _status_message['maxiter']\n else:\n msg = _status_message['success']\n\n if disp:\n print(\"%s%s\" % (\"Warning: \" if warnflag != 0 else \"\", msg))\n print(\" Current function value: %f\" % fval)\n print(\" Iterations: %d\" % k)\n print(\" Function evaluations: %d\" % func_calls[0])\n print(\" Gradient evaluations: %d\" % grad_calls[0])\n\n result = OptimizeResult(fun=fval, jac=gfk, hess_inv=Hk, nfev=func_calls[0],\n njev=grad_calls[0], status=warnflag,\n success=(warnflag == 0), message=msg, x=xk,\n nit=k)\n if retall:\n result['allvecs'] = allvecs\n return result\n\n\ndef fmin_cg(f, x0, fprime=None, args=(), gtol=1e-5, norm=Inf, epsilon=_epsilon,\n maxiter=None, full_output=0, disp=1, retall=0, callback=None,\n parallel_rank=0, itargs=None):\n \"\"\"\n Minimize a function using a nonlinear conjugate gradient algorithm.\n\n Parameters\n ----------\n f : callable, ``f(x, *args)``\n Objective function to be minimized. Here `x` must be a 1-D array of\n the variables that are to be changed in the search for a minimum, and\n `args` are the other (fixed) parameters of `f`.\n x0 : ndarray\n A user-supplied initial estimate of `xopt`, the optimal value of `x`.\n It must be a 1-D array of values.\n fprime : callable, ``fprime(x, *args)``, optional\n A function that returns the gradient of `f` at `x`. Here `x` and `args`\n are as described above for `f`. The returned value must be a 1-D array.\n Defaults to None, in which case the gradient is approximated\n numerically (see `epsilon`, below).\n args : tuple, optional\n Parameter values passed to `f` and `fprime`. Must be supplied whenever\n additional fixed parameters are needed to completely specify the\n functions `f` and `fprime`.\n gtol : float, optional\n Stop when the norm of the gradient is less than `gtol`.\n norm : float, optional\n Order to use for the norm of the gradient\n (``-np.Inf`` is min, ``np.Inf`` is max).\n epsilon : float or ndarray, optional\n Step size(s) to use when `fprime` is approximated numerically. Can be a\n scalar or a 1-D array. Defaults to ``sqrt(eps)``, with eps the\n floating point machine precision. Usually ``sqrt(eps)`` is about\n 1.5e-8.\n maxiter : int, optional\n Maximum number of iterations to perform. Default is ``200 * len(x0)``.\n full_output : bool, optional\n If True, return `fopt`, `func_calls`, `grad_calls`, and `warnflag` in\n addition to `xopt`. See the Returns section below for additional\n information on optional return values.\n disp : bool, optional\n If True, return a convergence message, followed by `xopt`.\n retall : bool, optional\n If True, add to the returned values the results of each iteration.\n callback : callable, optional\n An optional user-supplied function, called after each iteration.\n Called as ``callback(xk)``, where ``xk`` is the current value of `x0`.\n\n Returns\n -------\n xopt : ndarray\n Parameters which minimize f, i.e. ``f(xopt) == fopt``.\n fopt : float, optional\n Minimum value found, f(xopt). Only returned if `full_output` is True.\n func_calls : int, optional\n The number of function_calls made. Only returned if `full_output`\n is True.\n grad_calls : int, optional\n The number of gradient calls made. Only returned if `full_output` is\n True.\n warnflag : int, optional\n Integer value with warning status, only returned if `full_output` is\n True.\n\n 0 : Success.\n\n 1 : The maximum number of iterations was exceeded.\n\n 2 : Gradient and/or function calls were not changing. May indicate\n that precision was lost, i.e., the routine did not converge.\n\n allvecs : list of ndarray, optional\n List of arrays, containing the results at each iteration.\n Only returned if `retall` is True.\n\n See Also\n --------\n minimize : common interface to all `scipy.optimize` algorithms for\n unconstrained and constrained minimization of multivariate\n functions. It provides an alternative way to call\n ``fmin_cg``, by specifying ``method='CG'``.\n\n Notes\n -----\n This conjugate gradient algorithm is based on that of Polak and Ribiere\n [1]_.\n\n Conjugate gradient methods tend to work better when:\n\n 1. `f` has a unique global minimizing point, and no local minima or\n other stationary points,\n 2. `f` is, at least locally, reasonably well approximated by a\n quadratic function of the variables,\n 3. `f` is continuous and has a continuous gradient,\n 4. `fprime` is not too large, e.g., has a norm less than 1000,\n 5. The initial guess, `x0`, is reasonably close to `f` 's global\n minimizing point, `xopt`.\n\n References\n ----------\n .. [1] Wright & Nocedal, \"Numerical Optimization\", 1999, pp. 120-122.\n\n Examples\n --------\n Example 1: seek the minimum value of the expression\n ``a*u**2 + b*u*v + c*v**2 + d*u + e*v + f`` for given values\n of the parameters and an initial guess ``(u, v) = (0, 0)``.\n\n >>> args = (2, 3, 7, 8, 9, 10) # parameter values\n >>> def f(x, *args):\n ... u, v = x\n ... a, b, c, d, e, f = args\n ... return a*u**2 + b*u*v + c*v**2 + d*u + e*v + f\n >>> def gradf(x, *args):\n ... u, v = x\n ... a, b, c, d, e, f = args\n ... gu = 2*a*u + b*v + d # u-component of the gradient\n ... gv = b*u + 2*c*v + e # v-component of the gradient\n ... return np.asarray((gu, gv))\n >>> x0 = np.asarray((0, 0)) # Initial guess.\n >>> from scipy import optimize\n >>> res1 = optimize.fmin_cg(f, x0, fprime=gradf, args=args)\n Optimization terminated successfully.\n Current function value: 1.617021\n Iterations: 4\n Function evaluations: 8\n Gradient evaluations: 8\n >>> res1\n array([-1.80851064, -0.25531915])\n\n Example 2: solve the same problem using the `minimize` function.\n (This `myopts` dictionary shows all of the available options,\n although in practice only non-default values would be needed.\n The returned value will be a dictionary.)\n\n >>> opts = {'maxiter' : None, # default value.\n ... 'disp' : True, # non-default value.\n ... 'gtol' : 1e-5, # default value.\n ... 'norm' : np.inf, # default value.\n ... 'eps' : 1.4901161193847656e-08} # default value.\n >>> res2 = optimize.minimize(f, x0, jac=gradf, args=args,\n ... method='CG', options=opts)\n Optimization terminated successfully.\n Current function value: 1.617021\n Iterations: 4\n Function evaluations: 8\n Gradient evaluations: 8\n >>> res2.x # minimum found\n array([-1.80851064, -0.25531915])\n\n \"\"\"\n opts = {'gtol': gtol,\n 'norm': norm,\n 'eps': epsilon,\n 'disp': disp,\n 'maxiter': maxiter,\n 'return_all': retall,\n 'parallel_rank': parallel_rank,\n 'itargs': itargs} # laura\n\n res = _minimize_cg(f, x0, args, fprime, callback=callback, **opts)\n\n if full_output:\n retlist = res['x'], res['fun'], res['nfev'], res['njev'], res['status']\n if retall:\n retlist += (res['allvecs'], )\n return retlist\n else:\n if retall:\n return res['x'], res['allvecs']\n else:\n return res['x']\n\n\ndef _minimize_cg(fun, x0, args=(), jac=None, callback=None,\n gtol=1e-5, norm=Inf, eps=_epsilon, maxiter=None,\n disp=False, return_all=False, parallel_rank=0, itargs=None,\n **unknown_options):\n \"\"\"\n Minimization of scalar function of one or more variables using the\n conjugate gradient algorithm.\n\n Options\n -------\n disp : bool\n Set to True to print convergence messages.\n maxiter : int\n Maximum number of iterations to perform.\n gtol : float\n Gradient norm must be less than `gtol` before successful\n termination.\n norm : float\n Order of norm (Inf is max, -Inf is min).\n eps : float or ndarray\n If `jac` is approximated, use this value for the step size.\n\n \"\"\"\n print('Rank {} starts optimizer.'.format(parallel_rank))\n # laura modified 11Jul19\n _check_unknown_options(unknown_options)\n f = fun\n fprime = jac\n epsilon = eps\n retall = return_all\n\n x0 = asarray(x0).flatten()\n if maxiter is None:\n maxiter = len(x0) * 200\n func_calls, f = wrap_function(f, args)\n if fprime is None:\n grad_calls, myfprime = wrap_function(approx_fprime, (f, epsilon))\n else:\n grad_calls, myfprime = wrap_function(fprime, args)\n gfk = myfprime(x0)\n k = 0\n xk = x0\n\n # Sets the initial step guess to dx ~ 1\n old_fval = f(xk)\n old_old_fval = old_fval + np.linalg.norm(gfk) / 2\n\n if retall:\n allvecs = [xk]\n warnflag = 0\n pk = -gfk\n gnorm = vecnorm(gfk, ord=norm)\n\n sigma_3 = 0.01\n\n while (gnorm > gtol) and (k < maxiter):\n deltak = numpy.dot(gfk, gfk)\n\n cached_step = [None]\n\n def polak_ribiere_powell_step(alpha, gfkp1=None):\n xkp1 = xk + alpha * pk\n if gfkp1 is None:\n gfkp1 = myfprime(xkp1)\n yk = gfkp1 - gfk\n beta_k = max(0, numpy.dot(yk, gfkp1) / deltak)\n pkp1 = -gfkp1 + beta_k * pk\n gnorm = vecnorm(gfkp1, ord=norm)\n return (alpha, xkp1, pkp1, gfkp1, gnorm)\n\n def descent_condition(alpha, xkp1, fp1, gfkp1):\n # Polak-Ribiere+ needs an explicit check of a sufficient\n # descent condition, which is not guaranteed by strong Wolfe.\n #\n # See Gilbert & Nocedal, \"Global convergence properties of\n # conjugate gradient methods for optimization\",\n # SIAM J. Optimization 2, 21 (1992).\n cached_step[:] = polak_ribiere_powell_step(alpha, gfkp1)\n alpha, xk, pk, gfk, gnorm = cached_step\n\n # Accept step if it leads to convergence.\n if gnorm <= gtol:\n return True\n\n # Accept step if sufficient descent condition applies.\n return numpy.dot(pk, gfk) <= -sigma_3 * numpy.dot(gfk, gfk)\n\n try:\n alpha_k, fc, gc, old_fval, old_old_fval, gfkp1 = \\\n _line_search_wolfe12(f, myfprime, xk, pk, gfk, old_fval,\n old_old_fval, c2=0.4, amin=1e-100, amax=1e100,\n extra_condition=descent_condition)\n except _LineSearchError:\n # Line search failed to find a better solution.\n warnflag = 2\n break\n\n # Reuse already computed results if possible\n if alpha_k == cached_step[0]:\n alpha_k, xk, pk, gfk, gnorm = cached_step\n else:\n alpha_k, xk, pk, gfk, gnorm = polak_ribiere_powell_step(alpha_k, gfkp1)\n\n if retall:\n allvecs.append(xk)\n if callback is not None:\n callback(xk, parallel_rank, itargs) # laura\n k += 1\n\n fval = old_fval\n if warnflag == 2:\n msg = _status_message['pr_loss']\n elif k >= maxiter:\n warnflag = 1\n msg = _status_message['maxiter']\n else:\n msg = _status_message['success']\n\n if parallel_rank == 0:\n if disp:\n print(\"%s%s\" % (\"Warning: \" if warnflag != 0 else \"\", msg))\n print(\" Current function value: %f\" % fval)\n print(\" Iterations: %d\" % k)\n print(\" Function evaluations: %d\" % func_calls[0])\n print(\" Gradient evaluations: %d\" % grad_calls[0])\n\n result = OptimizeResult(fun=fval, jac=gfk, nfev=func_calls[0],\n njev=grad_calls[0], status=warnflag,\n success=(warnflag == 0), message=msg, x=xk,\n nit=k)\n if retall:\n result['allvecs'] = allvecs\n return result\n else:\n return()\n\n\ndef fmin_ncg(f, x0, fprime, fhess_p=None, fhess=None, args=(), avextol=1e-5,\n epsilon=_epsilon, maxiter=None, full_output=0, disp=1, retall=0,\n callback=None):\n \"\"\"\n Unconstrained minimization of a function using the Newton-CG method.\n\n Parameters\n ----------\n f : callable ``f(x, *args)``\n Objective function to be minimized.\n x0 : ndarray\n Initial guess.\n fprime : callable ``f'(x, *args)``\n Gradient of f.\n fhess_p : callable ``fhess_p(x, p, *args)``, optional\n Function which computes the Hessian of f times an\n arbitrary vector, p.\n fhess : callable ``fhess(x, *args)``, optional\n Function to compute the Hessian matrix of f.\n args : tuple, optional\n Extra arguments passed to f, fprime, fhess_p, and fhess\n (the same set of extra arguments is supplied to all of\n these functions).\n epsilon : float or ndarray, optional\n If fhess is approximated, use this value for the step size.\n callback : callable, optional\n An optional user-supplied function which is called after\n each iteration. Called as callback(xk), where xk is the\n current parameter vector.\n avextol : float, optional\n Convergence is assumed when the average relative error in\n the minimizer falls below this amount.\n maxiter : int, optional\n Maximum number of iterations to perform.\n full_output : bool, optional\n If True, return the optional outputs.\n disp : bool, optional\n If True, print convergence message.\n retall : bool, optional\n If True, return a list of results at each iteration.\n\n Returns\n -------\n xopt : ndarray\n Parameters which minimize f, i.e. ``f(xopt) == fopt``.\n fopt : float\n Value of the function at xopt, i.e. ``fopt = f(xopt)``.\n fcalls : int\n Number of function calls made.\n gcalls : int\n Number of gradient calls made.\n hcalls : int\n Number of hessian calls made.\n warnflag : int\n Warnings generated by the algorithm.\n 1 : Maximum number of iterations exceeded.\n allvecs : list\n The result at each iteration, if retall is True (see below).\n\n See also\n --------\n minimize: Interface to minimization algorithms for multivariate\n functions. See the 'Newton-CG' `method` in particular.\n\n Notes\n -----\n Only one of `fhess_p` or `fhess` need to be given. If `fhess`\n is provided, then `fhess_p` will be ignored. If neither `fhess`\n nor `fhess_p` is provided, then the hessian product will be\n approximated using finite differences on `fprime`. `fhess_p`\n must compute the hessian times an arbitrary vector. If it is not\n given, finite-differences on `fprime` are used to compute\n it.\n\n Newton-CG methods are also called truncated Newton methods. This\n function differs from scipy.optimize.fmin_tnc because\n\n 1. scipy.optimize.fmin_ncg is written purely in python using numpy\n and scipy while scipy.optimize.fmin_tnc calls a C function.\n 2. scipy.optimize.fmin_ncg is only for unconstrained minimization\n while scipy.optimize.fmin_tnc is for unconstrained minimization\n or box constrained minimization. (Box constraints give\n lower and upper bounds for each variable separately.)\n\n References\n ----------\n Wright & Nocedal, 'Numerical Optimization', 1999, pg. 140.\n\n \"\"\"\n opts = {'xtol': avextol,\n 'eps': epsilon,\n 'maxiter': maxiter,\n 'disp': disp,\n 'return_all': retall}\n\n res = _minimize_newtoncg(f, x0, args, fprime, fhess, fhess_p,\n callback=callback, **opts)\n\n if full_output:\n retlist = (res['x'], res['fun'], res['nfev'], res['njev'],\n res['nhev'], res['status'])\n if retall:\n retlist += (res['allvecs'], )\n return retlist\n else:\n if retall:\n return res['x'], res['allvecs']\n else:\n return res['x']\n\n\ndef _minimize_newtoncg(fun, x0, args=(), jac=None, hess=None, hessp=None,\n callback=None, xtol=1e-5, eps=_epsilon, maxiter=None,\n disp=False, return_all=False,\n **unknown_options):\n \"\"\"\n Minimization of scalar function of one or more variables using the\n Newton-CG algorithm.\n\n Note that the `jac` parameter (Jacobian) is required.\n\n Options\n -------\n disp : bool\n Set to True to print convergence messages.\n xtol : float\n Average relative error in solution `xopt` acceptable for\n convergence.\n maxiter : int\n Maximum number of iterations to perform.\n eps : float or ndarray\n If `jac` is approximated, use this value for the step size.\n\n \"\"\"\n _check_unknown_options(unknown_options)\n if jac is None:\n raise ValueError('Jacobian is required for Newton-CG method')\n f = fun\n fprime = jac\n fhess_p = hessp\n fhess = hess\n avextol = xtol\n epsilon = eps\n retall = return_all\n\n def terminate(warnflag, msg):\n if disp:\n print(msg)\n print(\" Current function value: %f\" % old_fval)\n print(\" Iterations: %d\" % k)\n print(\" Function evaluations: %d\" % fcalls[0])\n print(\" Gradient evaluations: %d\" % gcalls[0])\n print(\" Hessian evaluations: %d\" % hcalls)\n fval = old_fval\n result = OptimizeResult(fun=fval, jac=gfk, nfev=fcalls[0],\n njev=gcalls[0], nhev=hcalls, status=warnflag,\n success=(warnflag == 0), message=msg, x=xk,\n nit=k)\n if retall:\n result['allvecs'] = allvecs\n return result\n\n x0 = asarray(x0).flatten()\n fcalls, f = wrap_function(f, args)\n gcalls, fprime = wrap_function(fprime, args)\n hcalls = 0\n if maxiter is None:\n maxiter = len(x0)*200\n cg_maxiter = 20*len(x0)\n\n xtol = len(x0) * avextol\n update = [2 * xtol]\n xk = x0\n if retall:\n allvecs = [xk]\n k = 0\n gfk = None\n old_fval = f(x0)\n old_old_fval = None\n float64eps = numpy.finfo(numpy.float64).eps\n while numpy.add.reduce(numpy.abs(update)) > xtol:\n if k >= maxiter:\n msg = \"Warning: \" + _status_message['maxiter']\n return terminate(1, msg)\n # Compute a search direction pk by applying the CG method to\n # del2 f(xk) p = - grad f(xk) starting from 0.\n b = -fprime(xk)\n maggrad = numpy.add.reduce(numpy.abs(b))\n eta = numpy.min([0.5, numpy.sqrt(maggrad)])\n termcond = eta * maggrad\n xsupi = zeros(len(x0), dtype=x0.dtype)\n ri = -b\n psupi = -ri\n i = 0\n dri0 = numpy.dot(ri, ri)\n\n if fhess is not None: # you want to compute hessian once.\n A = fhess(*(xk,) + args)\n hcalls = hcalls + 1\n\n for k2 in xrange(cg_maxiter):\n if numpy.add.reduce(numpy.abs(ri)) <= termcond:\n break\n if fhess is None:\n if fhess_p is None:\n Ap = approx_fhess_p(xk, psupi, fprime, epsilon)\n else:\n Ap = fhess_p(xk, psupi, *args)\n hcalls = hcalls + 1\n else:\n Ap = numpy.dot(A, psupi)\n # check curvature\n Ap = asarray(Ap).squeeze() # get rid of matrices...\n curv = numpy.dot(psupi, Ap)\n if 0 <= curv <= 3 * float64eps:\n break\n elif curv < 0:\n if (i > 0):\n break\n else:\n # fall back to steepest descent direction\n xsupi = dri0 / (-curv) * b\n break\n alphai = dri0 / curv\n xsupi = xsupi + alphai * psupi\n ri = ri + alphai * Ap\n dri1 = numpy.dot(ri, ri)\n betai = dri1 / dri0\n psupi = -ri + betai * psupi\n i = i + 1\n dri0 = dri1 # update numpy.dot(ri,ri) for next time.\n else:\n # curvature keeps increasing, bail out\n msg = (\"Warning: CG iterations didn't converge. The Hessian is not \"\n \"positive definite.\")\n return terminate(3, msg)\n\n pk = xsupi # search direction is solution to system.\n gfk = -b # gradient at xk\n\n try:\n alphak, fc, gc, old_fval, old_old_fval, gfkp1 = \\\n _line_search_wolfe12(f, fprime, xk, pk, gfk,\n old_fval, old_old_fval)\n except _LineSearchError:\n # Line search failed to find a better solution.\n msg = \"Warning: \" + _status_message['pr_loss']\n return terminate(2, msg)\n\n update = alphak * pk\n xk = xk + update # upcast if necessary\n if callback is not None:\n callback(xk)\n if retall:\n allvecs.append(xk)\n k += 1\n else:\n msg = _status_message['success']\n return terminate(0, msg)\n\n\ndef fminbound(func, x1, x2, args=(), xtol=1e-5, maxfun=500,\n full_output=0, disp=1):\n \"\"\"Bounded minimization for scalar functions.\n\n Parameters\n ----------\n func : callable f(x,*args)\n Objective function to be minimized (must accept and return scalars).\n x1, x2 : float or array scalar\n The optimization bounds.\n args : tuple, optional\n Extra arguments passed to function.\n xtol : float, optional\n The convergence tolerance.\n maxfun : int, optional\n Maximum number of function evaluations allowed.\n full_output : bool, optional\n If True, return optional outputs.\n disp : int, optional\n If non-zero, print messages.\n 0 : no message printing.\n 1 : non-convergence notification messages only.\n 2 : print a message on convergence too.\n 3 : print iteration results.\n\n\n Returns\n -------\n xopt : ndarray\n Parameters (over given interval) which minimize the\n objective function.\n fval : number\n The function value at the minimum point.\n ierr : int\n An error flag (0 if converged, 1 if maximum number of\n function calls reached).\n numfunc : int\n The number of function calls made.\n\n See also\n --------\n minimize_scalar: Interface to minimization algorithms for scalar\n univariate functions. See the 'Bounded' `method` in particular.\n\n Notes\n -----\n Finds a local minimizer of the scalar function `func` in the\n interval x1 < xopt < x2 using Brent's method. (See `brent`\n for auto-bracketing).\n\n Examples\n --------\n `fminbound` finds the minimum of the function in the given range.\n The following examples illustrate the same\n\n >>> def f(x):\n ... return x**2\n\n >>> from scipy import optimize\n\n >>> minimum = optimize.fminbound(f, -1, 2)\n >>> minimum\n 0.0\n >>> minimum = optimize.fminbound(f, 1, 2)\n >>> minimum\n 1.0000059608609866\n \"\"\"\n options = {'xatol': xtol,\n 'maxiter': maxfun,\n 'disp': disp}\n\n res = _minimize_scalar_bounded(func, (x1, x2), args, **options)\n if full_output:\n return res['x'], res['fun'], res['status'], res['nfev']\n else:\n return res['x']\n\n\ndef _minimize_scalar_bounded(func, bounds, args=(),\n xatol=1e-5, maxiter=500, disp=0,\n **unknown_options):\n \"\"\"\n Options\n -------\n maxiter : int\n Maximum number of iterations to perform.\n disp: int, optional\n If non-zero, print messages.\n 0 : no message printing.\n 1 : non-convergence notification messages only.\n 2 : print a message on convergence too.\n 3 : print iteration results.\n xatol : float\n Absolute error in solution `xopt` acceptable for convergence.\n\n \"\"\"\n _check_unknown_options(unknown_options)\n maxfun = maxiter\n # Test bounds are of correct form\n if len(bounds) != 2:\n raise ValueError('bounds must have two elements.')\n x1, x2 = bounds\n\n if not (is_array_scalar(x1) and is_array_scalar(x2)):\n raise ValueError(\"Optimisation bounds must be scalars\"\n \" or array scalars.\")\n if x1 > x2:\n raise ValueError(\"The lower bound exceeds the upper bound.\")\n\n flag = 0\n header = ' Func-count x f(x) Procedure'\n step = ' initial'\n\n sqrt_eps = sqrt(2.2e-16)\n golden_mean = 0.5 * (3.0 - sqrt(5.0))\n a, b = x1, x2\n fulc = a + golden_mean * (b - a)\n nfc, xf = fulc, fulc\n rat = e = 0.0\n x = xf\n fx = func(x, *args)\n num = 1\n fmin_data = (1, xf, fx)\n\n ffulc = fnfc = fx\n xm = 0.5 * (a + b)\n tol1 = sqrt_eps * numpy.abs(xf) + xatol / 3.0\n tol2 = 2.0 * tol1\n\n if disp > 2:\n print(\" \")\n print(header)\n print(\"%5.0f %12.6g %12.6g %s\" % (fmin_data + (step,)))\n\n while (numpy.abs(xf - xm) > (tol2 - 0.5 * (b - a))):\n golden = 1\n # Check for parabolic fit\n if numpy.abs(e) > tol1:\n golden = 0\n r = (xf - nfc) * (fx - ffulc)\n q = (xf - fulc) * (fx - fnfc)\n p = (xf - fulc) * q - (xf - nfc) * r\n q = 2.0 * (q - r)\n if q > 0.0:\n p = -p\n q = numpy.abs(q)\n r = e\n e = rat\n\n # Check for acceptability of parabola\n if ((numpy.abs(p) < numpy.abs(0.5*q*r)) and (p > q*(a - xf)) and\n (p < q * (b - xf))):\n rat = (p + 0.0) / q\n x = xf + rat\n step = ' parabolic'\n\n if ((x - a) < tol2) or ((b - x) < tol2):\n si = numpy.sign(xm - xf) + ((xm - xf) == 0)\n rat = tol1 * si\n else: # do a golden section step\n golden = 1\n\n if golden: # Do a golden-section step\n if xf >= xm:\n e = a - xf\n else:\n e = b - xf\n rat = golden_mean*e\n step = ' golden'\n\n si = numpy.sign(rat) + (rat == 0)\n x = xf + si * numpy.max([numpy.abs(rat), tol1])\n fu = func(x, *args)\n num += 1\n fmin_data = (num, x, fu)\n if disp > 2:\n print(\"%5.0f %12.6g %12.6g %s\" % (fmin_data + (step,)))\n\n if fu <= fx:\n if x >= xf:\n a = xf\n else:\n b = xf\n fulc, ffulc = nfc, fnfc\n nfc, fnfc = xf, fx\n xf, fx = x, fu\n else:\n if x < xf:\n a = x\n else:\n b = x\n if (fu <= fnfc) or (nfc == xf):\n fulc, ffulc = nfc, fnfc\n nfc, fnfc = x, fu\n elif (fu <= ffulc) or (fulc == xf) or (fulc == nfc):\n fulc, ffulc = x, fu\n\n xm = 0.5 * (a + b)\n tol1 = sqrt_eps * numpy.abs(xf) + xatol / 3.0\n tol2 = 2.0 * tol1\n\n if num >= maxfun:\n flag = 1\n break\n\n fval = fx\n if disp > 0:\n _endprint(x, flag, fval, maxfun, xatol, disp)\n\n result = OptimizeResult(fun=fval, status=flag, success=(flag == 0),\n message={0: 'Solution found.',\n 1: 'Maximum number of function calls '\n 'reached.'}.get(flag, ''),\n x=xf, nfev=num)\n\n return result\n\n\nclass Brent:\n #need to rethink design of __init__\n def __init__(self, func, args=(), tol=1.48e-8, maxiter=500,\n full_output=0):\n self.func = func\n self.args = args\n self.tol = tol\n self.maxiter = maxiter\n self._mintol = 1.0e-11\n self._cg = 0.3819660\n self.xmin = None\n self.fval = None\n self.iter = 0\n self.funcalls = 0\n\n # need to rethink design of set_bracket (new options, etc)\n def set_bracket(self, brack=None):\n self.brack = brack\n\n def get_bracket_info(self):\n #set up\n func = self.func\n args = self.args\n brack = self.brack\n ### BEGIN core bracket_info code ###\n ### carefully DOCUMENT any CHANGES in core ##\n if brack is None:\n xa, xb, xc, fa, fb, fc, funcalls = bracket(func, args=args)\n elif len(brack) == 2:\n xa, xb, xc, fa, fb, fc, funcalls = bracket(func, xa=brack[0],\n xb=brack[1], args=args)\n elif len(brack) == 3:\n xa, xb, xc = brack\n if (xa > xc): # swap so xa < xc can be assumed\n xc, xa = xa, xc\n if not ((xa < xb) and (xb < xc)):\n raise ValueError(\"Not a bracketing interval.\")\n fa = func(*((xa,) + args))\n fb = func(*((xb,) + args))\n fc = func(*((xc,) + args))\n if not ((fb < fa) and (fb < fc)):\n raise ValueError(\"Not a bracketing interval.\")\n funcalls = 3\n else:\n raise ValueError(\"Bracketing interval must be \"\n \"length 2 or 3 sequence.\")\n ### END core bracket_info code ###\n\n return xa, xb, xc, fa, fb, fc, funcalls\n\n def optimize(self):\n # set up for optimization\n func = self.func\n xa, xb, xc, fa, fb, fc, funcalls = self.get_bracket_info()\n _mintol = self._mintol\n _cg = self._cg\n #################################\n #BEGIN CORE ALGORITHM\n #################################\n x = w = v = xb\n fw = fv = fx = func(*((x,) + self.args))\n if (xa < xc):\n a = xa\n b = xc\n else:\n a = xc\n b = xa\n deltax = 0.0\n funcalls += 1\n iter = 0\n while (iter < self.maxiter):\n tol1 = self.tol * numpy.abs(x) + _mintol\n tol2 = 2.0 * tol1\n xmid = 0.5 * (a + b)\n # check for convergence\n if numpy.abs(x - xmid) < (tol2 - 0.5 * (b - a)):\n break\n # XXX In the first iteration, rat is only bound in the true case\n # of this conditional. This used to cause an UnboundLocalError\n # (gh-4140). It should be set before the if (but to what?).\n if (numpy.abs(deltax) <= tol1):\n if (x >= xmid):\n deltax = a - x # do a golden section step\n else:\n deltax = b - x\n rat = _cg * deltax\n else: # do a parabolic step\n tmp1 = (x - w) * (fx - fv)\n tmp2 = (x - v) * (fx - fw)\n p = (x - v) * tmp2 - (x - w) * tmp1\n tmp2 = 2.0 * (tmp2 - tmp1)\n if (tmp2 > 0.0):\n p = -p\n tmp2 = numpy.abs(tmp2)\n dx_temp = deltax\n deltax = rat\n # check parabolic fit\n if ((p > tmp2 * (a - x)) and (p < tmp2 * (b - x)) and\n (numpy.abs(p) < numpy.abs(0.5 * tmp2 * dx_temp))):\n rat = p * 1.0 / tmp2 # if parabolic step is useful.\n u = x + rat\n if ((u - a) < tol2 or (b - u) < tol2):\n if xmid - x >= 0:\n rat = tol1\n else:\n rat = -tol1\n else:\n if (x >= xmid):\n deltax = a - x # if it's not do a golden section step\n else:\n deltax = b - x\n rat = _cg * deltax\n\n if (numpy.abs(rat) < tol1): # update by at least tol1\n if rat >= 0:\n u = x + tol1\n else:\n u = x - tol1\n else:\n u = x + rat\n fu = func(*((u,) + self.args)) # calculate new output value\n funcalls += 1\n\n if (fu > fx): # if it's bigger than current\n if (u < x):\n a = u\n else:\n b = u\n if (fu <= fw) or (w == x):\n v = w\n w = u\n fv = fw\n fw = fu\n elif (fu <= fv) or (v == x) or (v == w):\n v = u\n fv = fu\n else:\n if (u >= x):\n a = x\n else:\n b = x\n v = w\n w = x\n x = u\n fv = fw\n fw = fx\n fx = fu\n\n iter += 1\n #################################\n #END CORE ALGORITHM\n #################################\n\n self.xmin = x\n self.fval = fx\n self.iter = iter\n self.funcalls = funcalls\n\n def get_result(self, full_output=False):\n if full_output:\n return self.xmin, self.fval, self.iter, self.funcalls\n else:\n return self.xmin\n\n\ndef brent(func, args=(), brack=None, tol=1.48e-8, full_output=0, maxiter=500):\n \"\"\"\n Given a function of one-variable and a possible bracket, return\n the local minimum of the function isolated to a fractional precision\n of tol.\n\n Parameters\n ----------\n func : callable f(x,*args)\n Objective function.\n args : tuple, optional\n Additional arguments (if present).\n brack : tuple, optional\n Either a triple (xa,xb,xc) where xa<xb<xc and func(xb) <\n func(xa), func(xc) or a pair (xa,xb) which are used as a\n starting interval for a downhill bracket search (see\n `bracket`). Providing the pair (xa,xb) does not always mean\n the obtained solution will satisfy xa<=x<=xb.\n tol : float, optional\n Stop if between iteration change is less than `tol`.\n full_output : bool, optional\n If True, return all output args (xmin, fval, iter,\n funcalls).\n maxiter : int, optional\n Maximum number of iterations in solution.\n\n Returns\n -------\n xmin : ndarray\n Optimum point.\n fval : float\n Optimum value.\n iter : int\n Number of iterations.\n funcalls : int\n Number of objective function evaluations made.\n\n See also\n --------\n minimize_scalar: Interface to minimization algorithms for scalar\n univariate functions. See the 'Brent' `method` in particular.\n\n Notes\n -----\n Uses inverse parabolic interpolation when possible to speed up\n convergence of golden section method.\n\n Does not ensure that the minimum lies in the range specified by\n `brack`. See `fminbound`.\n\n Examples\n --------\n We illustrate the behaviour of the function when `brack` is of\n size 2 and 3 respectively. In the case where `brack` is of the\n form (xa,xb), we can see for the given values, the output need\n not necessarily lie in the range (xa,xb).\n\n >>> def f(x):\n ... return x**2\n\n >>> from scipy import optimize\n\n >>> minimum = optimize.brent(f,brack=(1,2))\n >>> minimum\n 0.0\n >>> minimum = optimize.brent(f,brack=(-1,0.5,2))\n >>> minimum\n -2.7755575615628914e-17\n\n \"\"\"\n options = {'xtol': tol,\n 'maxiter': maxiter}\n res = _minimize_scalar_brent(func, brack, args, **options)\n if full_output:\n return res['x'], res['fun'], res['nit'], res['nfev']\n else:\n return res['x']\n\n\ndef _minimize_scalar_brent(func, brack=None, args=(),\n xtol=1.48e-8, maxiter=500,\n **unknown_options):\n \"\"\"\n Options\n -------\n maxiter : int\n Maximum number of iterations to perform.\n xtol : float\n Relative error in solution `xopt` acceptable for convergence.\n\n Notes\n -----\n Uses inverse parabolic interpolation when possible to speed up\n convergence of golden section method.\n\n \"\"\"\n _check_unknown_options(unknown_options)\n tol = xtol\n if tol < 0:\n raise ValueError('tolerance should be >= 0, got %r' % tol)\n\n brent = Brent(func=func, args=args, tol=tol,\n full_output=True, maxiter=maxiter)\n brent.set_bracket(brack)\n brent.optimize()\n x, fval, nit, nfev = brent.get_result(full_output=True)\n return OptimizeResult(fun=fval, x=x, nit=nit, nfev=nfev,\n success=nit < maxiter)\n\n\ndef golden(func, args=(), brack=None, tol=_epsilon,\n full_output=0, maxiter=5000):\n \"\"\"\n Return the minimum of a function of one variable using golden section\n method.\n\n Given a function of one variable and a possible bracketing interval,\n return the minimum of the function isolated to a fractional precision of\n tol.\n\n Parameters\n ----------\n func : callable func(x,*args)\n Objective function to minimize.\n args : tuple, optional\n Additional arguments (if present), passed to func.\n brack : tuple, optional\n Triple (a,b,c), where (a<b<c) and func(b) <\n func(a),func(c). If bracket consists of two numbers (a,\n c), then they are assumed to be a starting interval for a\n downhill bracket search (see `bracket`); it doesn't always\n mean that obtained solution will satisfy a<=x<=c.\n tol : float, optional\n x tolerance stop criterion\n full_output : bool, optional\n If True, return optional outputs.\n maxiter : int\n Maximum number of iterations to perform.\n\n See also\n --------\n minimize_scalar: Interface to minimization algorithms for scalar\n univariate functions. See the 'Golden' `method` in particular.\n\n Notes\n -----\n Uses analog of bisection method to decrease the bracketed\n interval.\n\n Examples\n --------\n We illustrate the behaviour of the function when `brack` is of\n size 2 and 3 respectively. In the case where `brack` is of the\n form (xa,xb), we can see for the given values, the output need\n not necessarily lie in the range ``(xa, xb)``.\n\n >>> def f(x):\n ... return x**2\n\n >>> from scipy import optimize\n\n >>> minimum = optimize.golden(f, brack=(1, 2))\n >>> minimum\n 1.5717277788484873e-162\n >>> minimum = optimize.golden(f, brack=(-1, 0.5, 2))\n >>> minimum\n -1.5717277788484873e-162\n\n \"\"\"\n options = {'xtol': tol, 'maxiter': maxiter}\n res = _minimize_scalar_golden(func, brack, args, **options)\n if full_output:\n return res['x'], res['fun'], res['nfev']\n else:\n return res['x']\n\n\ndef _minimize_scalar_golden(func, brack=None, args=(),\n xtol=_epsilon, maxiter=5000, **unknown_options):\n \"\"\"\n Options\n -------\n maxiter : int\n Maximum number of iterations to perform.\n xtol : float\n Relative error in solution `xopt` acceptable for convergence.\n\n \"\"\"\n _check_unknown_options(unknown_options)\n tol = xtol\n if brack is None:\n xa, xb, xc, fa, fb, fc, funcalls = bracket(func, args=args)\n elif len(brack) == 2:\n xa, xb, xc, fa, fb, fc, funcalls = bracket(func, xa=brack[0],\n xb=brack[1], args=args)\n elif len(brack) == 3:\n xa, xb, xc = brack\n if (xa > xc): # swap so xa < xc can be assumed\n xc, xa = xa, xc\n if not ((xa < xb) and (xb < xc)):\n raise ValueError(\"Not a bracketing interval.\")\n fa = func(*((xa,) + args))\n fb = func(*((xb,) + args))\n fc = func(*((xc,) + args))\n if not ((fb < fa) and (fb < fc)):\n raise ValueError(\"Not a bracketing interval.\")\n funcalls = 3\n else:\n raise ValueError(\"Bracketing interval must be length 2 or 3 sequence.\")\n\n _gR = 0.61803399 # golden ratio conjugate: 2.0/(1.0+sqrt(5.0))\n _gC = 1.0 - _gR\n x3 = xc\n x0 = xa\n if (numpy.abs(xc - xb) > numpy.abs(xb - xa)):\n x1 = xb\n x2 = xb + _gC * (xc - xb)\n else:\n x2 = xb\n x1 = xb - _gC * (xb - xa)\n f1 = func(*((x1,) + args))\n f2 = func(*((x2,) + args))\n funcalls += 2\n nit = 0\n for i in xrange(maxiter):\n if numpy.abs(x3 - x0) <= tol * (numpy.abs(x1) + numpy.abs(x2)):\n break\n if (f2 < f1):\n x0 = x1\n x1 = x2\n x2 = _gR * x1 + _gC * x3\n f1 = f2\n f2 = func(*((x2,) + args))\n else:\n x3 = x2\n x2 = x1\n x1 = _gR * x2 + _gC * x0\n f2 = f1\n f1 = func(*((x1,) + args))\n funcalls += 1\n nit += 1\n if (f1 < f2):\n xmin = x1\n fval = f1\n else:\n xmin = x2\n fval = f2\n\n return OptimizeResult(fun=fval, nfev=funcalls, x=xmin, nit=nit,\n success=nit < maxiter)\n\n\ndef bracket(func, xa=0.0, xb=1.0, args=(), grow_limit=110.0, maxiter=1000):\n \"\"\"\n Bracket the minimum of the function.\n\n Given a function and distinct initial points, search in the\n downhill direction (as defined by the initital points) and return\n new points xa, xb, xc that bracket the minimum of the function\n f(xa) > f(xb) < f(xc). It doesn't always mean that obtained\n solution will satisfy xa<=x<=xb\n\n Parameters\n ----------\n func : callable f(x,*args)\n Objective function to minimize.\n xa, xb : float, optional\n Bracketing interval. Defaults `xa` to 0.0, and `xb` to 1.0.\n args : tuple, optional\n Additional arguments (if present), passed to `func`.\n grow_limit : float, optional\n Maximum grow limit. Defaults to 110.0\n maxiter : int, optional\n Maximum number of iterations to perform. Defaults to 1000.\n\n Returns\n -------\n xa, xb, xc : float\n Bracket.\n fa, fb, fc : float\n Objective function values in bracket.\n funcalls : int\n Number of function evaluations made.\n\n \"\"\"\n _gold = 1.618034 # golden ratio: (1.0+sqrt(5.0))/2.0\n _verysmall_num = 1e-21\n fa = func(*(xa,) + args)\n fb = func(*(xb,) + args)\n if (fa < fb): # Switch so fa > fb\n xa, xb = xb, xa\n fa, fb = fb, fa\n xc = xb + _gold * (xb - xa)\n fc = func(*((xc,) + args))\n funcalls = 3\n iter = 0\n while (fc < fb):\n tmp1 = (xb - xa) * (fb - fc)\n tmp2 = (xb - xc) * (fb - fa)\n val = tmp2 - tmp1\n if numpy.abs(val) < _verysmall_num:\n denom = 2.0 * _verysmall_num\n else:\n denom = 2.0 * val\n w = xb - ((xb - xc) * tmp2 - (xb - xa) * tmp1) / denom\n wlim = xb + grow_limit * (xc - xb)\n if iter > maxiter:\n raise RuntimeError(\"Too many iterations.\")\n iter += 1\n if (w - xc) * (xb - w) > 0.0:\n fw = func(*((w,) + args))\n funcalls += 1\n if (fw < fc):\n xa = xb\n xb = w\n fa = fb\n fb = fw\n return xa, xb, xc, fa, fb, fc, funcalls\n elif (fw > fb):\n xc = w\n fc = fw\n return xa, xb, xc, fa, fb, fc, funcalls\n w = xc + _gold * (xc - xb)\n fw = func(*((w,) + args))\n funcalls += 1\n elif (w - wlim)*(wlim - xc) >= 0.0:\n w = wlim\n fw = func(*((w,) + args))\n funcalls += 1\n elif (w - wlim)*(xc - w) > 0.0:\n fw = func(*((w,) + args))\n funcalls += 1\n if (fw < fc):\n xb = xc\n xc = w\n w = xc + _gold * (xc - xb)\n fb = fc\n fc = fw\n fw = func(*((w,) + args))\n funcalls += 1\n else:\n w = xc + _gold * (xc - xb)\n fw = func(*((w,) + args))\n funcalls += 1\n xa = xb\n xb = xc\n xc = w\n fa = fb\n fb = fc\n fc = fw\n return xa, xb, xc, fa, fb, fc, funcalls\n\n\ndef _linesearch_powell(func, p, xi, tol=1e-3):\n \"\"\"Line-search algorithm using fminbound.\n\n Find the minimium of the function ``func(x0+ alpha*direc)``.\n\n \"\"\"\n def myfunc(alpha):\n return func(p + alpha*xi)\n alpha_min, fret, iter, num = brent(myfunc, full_output=1, tol=tol)\n xi = alpha_min*xi\n return squeeze(fret), p + xi, xi\n\n\ndef fmin_powell(func, x0, args=(), xtol=1e-4, ftol=1e-4, maxiter=None,\n maxfun=None, full_output=0, disp=1, retall=0, callback=None,\n direc=None):\n \"\"\"\n Minimize a function using modified Powell's method.\n\n This method only uses function values, not derivatives.\n\n Parameters\n ----------\n func : callable f(x,*args)\n Objective function to be minimized.\n x0 : ndarray\n Initial guess.\n args : tuple, optional\n Extra arguments passed to func.\n xtol : float, optional\n Line-search error tolerance.\n ftol : float, optional\n Relative error in ``func(xopt)`` acceptable for convergence.\n maxiter : int, optional\n Maximum number of iterations to perform.\n maxfun : int, optional\n Maximum number of function evaluations to make.\n full_output : bool, optional\n If True, ``fopt``, ``xi``, ``direc``, ``iter``, ``funcalls``, and\n ``warnflag`` are returned.\n disp : bool, optional\n If True, print convergence messages.\n retall : bool, optional\n If True, return a list of the solution at each iteration.\n callback : callable, optional\n An optional user-supplied function, called after each\n iteration. Called as ``callback(xk)``, where ``xk`` is the\n current parameter vector.\n direc : ndarray, optional\n Initial fitting step and parameter order set as an (N, N) array, where N\n is the number of fitting parameters in `x0`. Defaults to step size 1.0\n fitting all parameters simultaneously (``np.ones((N, N))``). To\n prevent initial consideration of values in a step or to change initial\n step size, set to 0 or desired step size in the Jth position in the Mth\n block, where J is the position in `x0` and M is the desired evaluation\n step, with steps being evaluated in index order. Step size and ordering\n will change freely as minimization proceeds.\n\n Returns\n -------\n xopt : ndarray\n Parameter which minimizes `func`.\n fopt : number\n Value of function at minimum: ``fopt = func(xopt)``.\n direc : ndarray\n Current direction set.\n iter : int\n Number of iterations.\n funcalls : int\n Number of function calls made.\n warnflag : int\n Integer warning flag:\n 1 : Maximum number of function evaluations.\n 2 : Maximum number of iterations.\n allvecs : list\n List of solutions at each iteration.\n\n See also\n --------\n minimize: Interface to unconstrained minimization algorithms for\n multivariate functions. See the 'Powell' method in particular.\n\n Notes\n -----\n Uses a modification of Powell's method to find the minimum of\n a function of N variables. Powell's method is a conjugate\n direction method.\n\n The algorithm has two loops. The outer loop merely iterates over the inner\n loop. The inner loop minimizes over each current direction in the direction\n set. At the end of the inner loop, if certain conditions are met, the\n direction that gave the largest decrease is dropped and replaced with the\n difference between the current estimated x and the estimated x from the\n beginning of the inner-loop.\n\n The technical conditions for replacing the direction of greatest\n increase amount to checking that\n\n 1. No further gain can be made along the direction of greatest increase\n from that iteration.\n 2. The direction of greatest increase accounted for a large sufficient\n fraction of the decrease in the function value from that iteration of\n the inner loop.\n\n References\n ----------\n Powell M.J.D. (1964) An efficient method for finding the minimum of a\n function of several variables without calculating derivatives,\n Computer Journal, 7 (2):155-162.\n\n Press W., Teukolsky S.A., Vetterling W.T., and Flannery B.P.:\n Numerical Recipes (any edition), Cambridge University Press\n\n Examples\n --------\n >>> def f(x):\n ... return x**2\n\n >>> from scipy import optimize\n\n >>> minimum = optimize.fmin_powell(f, -1)\n Optimization terminated successfully.\n Current function value: 0.000000\n Iterations: 2\n Function evaluations: 18\n >>> minimum\n array(0.0)\n\n \"\"\"\n opts = {'xtol': xtol,\n 'ftol': ftol,\n 'maxiter': maxiter,\n 'maxfev': maxfun,\n 'disp': disp,\n 'direc': direc,\n 'return_all': retall}\n\n res = _minimize_powell(func, x0, args, callback=callback, **opts)\n\n if full_output:\n retlist = (res['x'], res['fun'], res['direc'], res['nit'],\n res['nfev'], res['status'])\n if retall:\n retlist += (res['allvecs'], )\n return retlist\n else:\n if retall:\n return res['x'], res['allvecs']\n else:\n return res['x']\n\n\ndef _minimize_powell(func, x0, args=(), callback=None,\n xtol=1e-4, ftol=1e-4, maxiter=None, maxfev=None,\n disp=False, direc=None, return_all=False,\n **unknown_options):\n \"\"\"\n Minimization of scalar function of one or more variables using the\n modified Powell algorithm.\n\n Options\n -------\n disp : bool\n Set to True to print convergence messages.\n xtol : float\n Relative error in solution `xopt` acceptable for convergence.\n ftol : float\n Relative error in ``fun(xopt)`` acceptable for convergence.\n maxiter, maxfev : int\n Maximum allowed number of iterations and function evaluations.\n Will default to ``N*1000``, where ``N`` is the number of\n variables, if neither `maxiter` or `maxfev` is set. If both\n `maxiter` and `maxfev` are set, minimization will stop at the\n first reached.\n direc : ndarray\n Initial set of direction vectors for the Powell method.\n\n \"\"\"\n _check_unknown_options(unknown_options)\n maxfun = maxfev\n retall = return_all\n # we need to use a mutable object here that we can update in the\n # wrapper function\n fcalls, func = wrap_function(func, args)\n x = asarray(x0).flatten()\n if retall:\n allvecs = [x]\n N = len(x)\n # If neither are set, then set both to default\n if maxiter is None and maxfun is None:\n maxiter = N * 1000\n maxfun = N * 1000\n elif maxiter is None:\n # Convert remaining Nones, to np.inf, unless the other is np.inf, in\n # which case use the default to avoid unbounded iteration\n if maxfun == np.inf:\n maxiter = N * 1000\n else:\n maxiter = np.inf\n elif maxfun is None:\n if maxiter == np.inf:\n maxfun = N * 1000\n else:\n maxfun = np.inf\n\n if direc is None:\n direc = eye(N, dtype=float)\n else:\n direc = asarray(direc, dtype=float)\n\n fval = squeeze(func(x))\n x1 = x.copy()\n iter = 0\n ilist = list(range(N))\n while True:\n fx = fval\n bigind = 0\n delta = 0.0\n for i in ilist:\n direc1 = direc[i]\n fx2 = fval\n fval, x, direc1 = _linesearch_powell(func, x, direc1,\n tol=xtol * 100)\n if (fx2 - fval) > delta:\n delta = fx2 - fval\n bigind = i\n iter += 1\n if callback is not None:\n callback(x)\n if retall:\n allvecs.append(x)\n bnd = ftol * (numpy.abs(fx) + numpy.abs(fval)) + 1e-20\n if 2.0 * (fx - fval) <= bnd:\n break\n if fcalls[0] >= maxfun:\n break\n if iter >= maxiter:\n break\n\n # Construct the extrapolated point\n direc1 = x - x1\n x2 = 2*x - x1\n x1 = x.copy()\n fx2 = squeeze(func(x2))\n\n if (fx > fx2):\n t = 2.0*(fx + fx2 - 2.0*fval)\n temp = (fx - fval - delta)\n t *= temp*temp\n temp = fx - fx2\n t -= delta*temp*temp\n if t < 0.0:\n fval, x, direc1 = _linesearch_powell(func, x, direc1,\n tol=xtol*100)\n direc[bigind] = direc[-1]\n direc[-1] = direc1\n\n warnflag = 0\n if fcalls[0] >= maxfun:\n warnflag = 1\n msg = _status_message['maxfev']\n if disp:\n print(\"Warning: \" + msg)\n elif iter >= maxiter:\n warnflag = 2\n msg = _status_message['maxiter']\n if disp:\n print(\"Warning: \" + msg)\n else:\n msg = _status_message['success']\n if disp:\n print(msg)\n print(\" Current function value: %f\" % fval)\n print(\" Iterations: %d\" % iter)\n print(\" Function evaluations: %d\" % fcalls[0])\n\n x = squeeze(x)\n\n result = OptimizeResult(fun=fval, direc=direc, nit=iter, nfev=fcalls[0],\n status=warnflag, success=(warnflag == 0),\n message=msg, x=x)\n if retall:\n result['allvecs'] = allvecs\n return result\n\n\ndef _endprint(x, flag, fval, maxfun, xtol, disp):\n if flag == 0:\n if disp > 1:\n print(\"\\nOptimization terminated successfully;\\n\"\n \"The returned value satisfies the termination criteria\\n\"\n \"(using xtol = \", xtol, \")\")\n if flag == 1:\n if disp:\n print(\"\\nMaximum number of function evaluations exceeded --- \"\n \"increase maxfun argument.\\n\")\n return\n\n\ndef brute(func, ranges, args=(), Ns=20, full_output=0, finish=fmin,\n disp=False, workers=1):\n \"\"\"Minimize a function over a given range by brute force.\n\n Uses the \"brute force\" method, i.e. computes the function's value\n at each point of a multidimensional grid of points, to find the global\n minimum of the function.\n\n The function is evaluated everywhere in the range with the datatype of the\n first call to the function, as enforced by the ``vectorize`` NumPy\n function. The value and type of the function evaluation returned when\n ``full_output=True`` are affected in addition by the ``finish`` argument\n (see Notes).\n\n The brute force approach is inefficient because the number of grid points\n increases exponentially - the number of grid points to evaluate is\n ``Ns ** len(x)``. Consequently, even with coarse grid spacing, even\n moderately sized problems can take a long time to run, and/or run into\n memory limitations.\n\n Parameters\n ----------\n func : callable\n The objective function to be minimized. Must be in the\n form ``f(x, *args)``, where ``x`` is the argument in\n the form of a 1-D array and ``args`` is a tuple of any\n additional fixed parameters needed to completely specify\n the function.\n ranges : tuple\n Each component of the `ranges` tuple must be either a\n \"slice object\" or a range tuple of the form ``(low, high)``.\n The program uses these to create the grid of points on which\n the objective function will be computed. See `Note 2` for\n more detail.\n args : tuple, optional\n Any additional fixed parameters needed to completely specify\n the function.\n Ns : int, optional\n Number of grid points along the axes, if not otherwise\n specified. See `Note2`.\n full_output : bool, optional\n If True, return the evaluation grid and the objective function's\n values on it.\n finish : callable, optional\n An optimization function that is called with the result of brute force\n minimization as initial guess. `finish` should take `func` and\n the initial guess as positional arguments, and take `args` as\n keyword arguments. It may additionally take `full_output`\n and/or `disp` as keyword arguments. Use None if no \"polishing\"\n function is to be used. See Notes for more details.\n disp : bool, optional\n Set to True to print convergence messages from the `finish` callable.\n workers : int or map-like callable, optional\n If `workers` is an int the grid is subdivided into `workers`\n sections and evaluated in parallel (uses\n `multiprocessing.Pool <multiprocessing>`).\n Supply `-1` to use all cores available to the Process.\n Alternatively supply a map-like callable, such as\n `multiprocessing.Pool.map` for evaluating the grid in parallel.\n This evaluation is carried out as ``workers(func, iterable)``.\n Requires that `func` be pickleable.\n\n .. versionadded:: 1.3.0\n\n Returns\n -------\n x0 : ndarray\n A 1-D array containing the coordinates of a point at which the\n objective function had its minimum value. (See `Note 1` for\n which point is returned.)\n fval : float\n Function value at the point `x0`. (Returned when `full_output` is\n True.)\n grid : tuple\n Representation of the evaluation grid. It has the same\n length as `x0`. (Returned when `full_output` is True.)\n Jout : ndarray\n Function values at each point of the evaluation\n grid, `i.e.`, ``Jout = func(*grid)``. (Returned\n when `full_output` is True.)\n\n See Also\n --------\n basinhopping, differential_evolution\n\n Notes\n -----\n *Note 1*: The program finds the gridpoint at which the lowest value\n of the objective function occurs. If `finish` is None, that is the\n point returned. When the global minimum occurs within (or not very far\n outside) the grid's boundaries, and the grid is fine enough, that\n point will be in the neighborhood of the global minimum.\n\n However, users often employ some other optimization program to\n \"polish\" the gridpoint values, `i.e.`, to seek a more precise\n (local) minimum near `brute's` best gridpoint.\n The `brute` function's `finish` option provides a convenient way to do\n that. Any polishing program used must take `brute's` output as its\n initial guess as a positional argument, and take `brute's` input values\n for `args` as keyword arguments, otherwise an error will be raised.\n It may additionally take `full_output` and/or `disp` as keyword arguments.\n\n `brute` assumes that the `finish` function returns either an\n `OptimizeResult` object or a tuple in the form:\n ``(xmin, Jmin, ... , statuscode)``, where ``xmin`` is the minimizing\n value of the argument, ``Jmin`` is the minimum value of the objective\n function, \"...\" may be some other returned values (which are not used\n by `brute`), and ``statuscode`` is the status code of the `finish` program.\n\n Note that when `finish` is not None, the values returned are those\n of the `finish` program, *not* the gridpoint ones. Consequently,\n while `brute` confines its search to the input grid points,\n the `finish` program's results usually will not coincide with any\n gridpoint, and may fall outside the grid's boundary. Thus, if a\n minimum only needs to be found over the provided grid points, make\n sure to pass in `finish=None`.\n\n *Note 2*: The grid of points is a `numpy.mgrid` object.\n For `brute` the `ranges` and `Ns` inputs have the following effect.\n Each component of the `ranges` tuple can be either a slice object or a\n two-tuple giving a range of values, such as (0, 5). If the component is a\n slice object, `brute` uses it directly. If the component is a two-tuple\n range, `brute` internally converts it to a slice object that interpolates\n `Ns` points from its low-value to its high-value, inclusive.\n\n Examples\n --------\n We illustrate the use of `brute` to seek the global minimum of a function\n of two variables that is given as the sum of a positive-definite\n quadratic and two deep \"Gaussian-shaped\" craters. Specifically, define\n the objective function `f` as the sum of three other functions,\n ``f = f1 + f2 + f3``. We suppose each of these has a signature\n ``(z, *params)``, where ``z = (x, y)``, and ``params`` and the functions\n are as defined below.\n\n >>> params = (2, 3, 7, 8, 9, 10, 44, -1, 2, 26, 1, -2, 0.5)\n >>> def f1(z, *params):\n ... x, y = z\n ... a, b, c, d, e, f, g, h, i, j, k, l, scale = params\n ... return (a * x**2 + b * x * y + c * y**2 + d*x + e*y + f)\n\n >>> def f2(z, *params):\n ... x, y = z\n ... a, b, c, d, e, f, g, h, i, j, k, l, scale = params\n ... return (-g*np.exp(-((x-h)**2 + (y-i)**2) / scale))\n\n >>> def f3(z, *params):\n ... x, y = z\n ... a, b, c, d, e, f, g, h, i, j, k, l, scale = params\n ... return (-j*np.exp(-((x-k)**2 + (y-l)**2) / scale))\n\n >>> def f(z, *params):\n ... return f1(z, *params) + f2(z, *params) + f3(z, *params)\n\n Thus, the objective function may have local minima near the minimum\n of each of the three functions of which it is composed. To\n use `fmin` to polish its gridpoint result, we may then continue as\n follows:\n\n >>> rranges = (slice(-4, 4, 0.25), slice(-4, 4, 0.25))\n >>> from scipy import optimize\n >>> resbrute = optimize.brute(f, rranges, args=params, full_output=True,\n ... finish=optimize.fmin)\n >>> resbrute[0] # global minimum\n array([-1.05665192, 1.80834843])\n >>> resbrute[1] # function value at global minimum\n -3.4085818767\n\n Note that if `finish` had been set to None, we would have gotten the\n gridpoint [-1.0 1.75] where the rounded function value is -2.892.\n\n \"\"\"\n N = len(ranges)\n if N > 40:\n raise ValueError(\"Brute Force not possible with more \"\n \"than 40 variables.\")\n lrange = list(ranges)\n for k in range(N):\n if type(lrange[k]) is not type(slice(None)):\n if len(lrange[k]) < 3:\n lrange[k] = tuple(lrange[k]) + (complex(Ns),)\n lrange[k] = slice(*lrange[k])\n if (N == 1):\n lrange = lrange[0]\n\n grid = np.mgrid[lrange]\n\n # obtain an array of parameters that is iterable by a map-like callable\n inpt_shape = grid.shape\n if (N > 1):\n grid = np.reshape(grid, (inpt_shape[0], np.prod(inpt_shape[1:]))).T\n\n wrapped_func = _Brute_Wrapper(func, args)\n\n # iterate over input arrays, possibly in parallel\n with MapWrapper(pool=workers) as mapper:\n Jout = np.array(list(mapper(wrapped_func, grid)))\n if (N == 1):\n grid = (grid,)\n Jout = np.squeeze(Jout)\n elif (N > 1):\n Jout = np.reshape(Jout, inpt_shape[1:])\n grid = np.reshape(grid.T, inpt_shape)\n\n Nshape = shape(Jout)\n\n indx = argmin(Jout.ravel(), axis=-1)\n Nindx = zeros(N, int)\n xmin = zeros(N, float)\n for k in range(N - 1, -1, -1):\n thisN = Nshape[k]\n Nindx[k] = indx % Nshape[k]\n indx = indx // thisN\n for k in range(N):\n xmin[k] = grid[k][tuple(Nindx)]\n\n Jmin = Jout[tuple(Nindx)]\n if (N == 1):\n grid = grid[0]\n xmin = xmin[0]\n\n if callable(finish):\n # set up kwargs for `finish` function\n finish_args = _getargspec(finish).args\n finish_kwargs = dict()\n if 'full_output' in finish_args:\n finish_kwargs['full_output'] = 1\n if 'disp' in finish_args:\n finish_kwargs['disp'] = disp\n elif 'options' in finish_args:\n # pass 'disp' as `options`\n # (e.g. if `finish` is `minimize`)\n finish_kwargs['options'] = {'disp': disp}\n\n # run minimizer\n res = finish(func, xmin, args=args, **finish_kwargs)\n\n if isinstance(res, OptimizeResult):\n xmin = res.x\n Jmin = res.fun\n success = res.success\n else:\n xmin = res[0]\n Jmin = res[1]\n success = res[-1] == 0\n if not success:\n if disp:\n print(\"Warning: Either final optimization did not succeed \"\n \"or `finish` does not return `statuscode` as its last \"\n \"argument.\")\n\n if full_output:\n return xmin, Jmin, grid, Jout\n else:\n return xmin\n\n\nclass _Brute_Wrapper(object):\n \"\"\"\n Object to wrap user cost function for optimize.brute, allowing picklability\n \"\"\"\n def __init__(self, f, args):\n self.f = f\n self.args = [] if args is None else args\n\n def __call__(self, x):\n # flatten needed for one dimensional case.\n return self.f(np.asarray(x).flatten(), *self.args)\n\n\ndef show_options(solver=None, method=None, disp=True):\n \"\"\"\n Show documentation for additional options of optimization solvers.\n\n These are method-specific options that can be supplied through the\n ``options`` dict.\n\n Parameters\n ----------\n solver : str\n Type of optimization solver. One of 'minimize', 'minimize_scalar',\n 'root', or 'linprog'.\n method : str, optional\n If not given, shows all methods of the specified solver. Otherwise,\n show only the options for the specified method. Valid values\n corresponds to methods' names of respective solver (e.g. 'BFGS' for\n 'minimize').\n disp : bool, optional\n Whether to print the result rather than returning it.\n\n Returns\n -------\n text\n Either None (for disp=True) or the text string (disp=False)\n\n Notes\n -----\n The solver-specific methods are:\n\n `scipy.optimize.minimize`\n\n - :ref:`Nelder-Mead <optimize.minimize-neldermead>`\n - :ref:`Powell <optimize.minimize-powell>`\n - :ref:`CG <optimize.minimize-cg>`\n - :ref:`BFGS <optimize.minimize-bfgs>`\n - :ref:`Newton-CG <optimize.minimize-newtoncg>`\n - :ref:`L-BFGS-B <optimize.minimize-lbfgsb>`\n - :ref:`TNC <optimize.minimize-tnc>`\n - :ref:`COBYLA <optimize.minimize-cobyla>`\n - :ref:`SLSQP <optimize.minimize-slsqp>`\n - :ref:`dogleg <optimize.minimize-dogleg>`\n - :ref:`trust-ncg <optimize.minimize-trustncg>`\n\n `scipy.optimize.root`\n\n - :ref:`hybr <optimize.root-hybr>`\n - :ref:`lm <optimize.root-lm>`\n - :ref:`broyden1 <optimize.root-broyden1>`\n - :ref:`broyden2 <optimize.root-broyden2>`\n - :ref:`anderson <optimize.root-anderson>`\n - :ref:`linearmixing <optimize.root-linearmixing>`\n - :ref:`diagbroyden <optimize.root-diagbroyden>`\n - :ref:`excitingmixing <optimize.root-excitingmixing>`\n - :ref:`krylov <optimize.root-krylov>`\n - :ref:`df-sane <optimize.root-dfsane>`\n\n `scipy.optimize.minimize_scalar`\n\n - :ref:`brent <optimize.minimize_scalar-brent>`\n - :ref:`golden <optimize.minimize_scalar-golden>`\n - :ref:`bounded <optimize.minimize_scalar-bounded>`\n\n `scipy.optimize.linprog`\n\n - :ref:`simplex <optimize.linprog-simplex>`\n - :ref:`interior-point <optimize.linprog-interior-point>`\n\n \"\"\"\n import textwrap\n\n doc_routines = {\n 'minimize': (\n ('bfgs', 'scipy.optimize.optimize._minimize_bfgs'),\n ('cg', 'scipy.optimize.optimize._minimize_cg'),\n ('cobyla', 'scipy.optimize.cobyla._minimize_cobyla'),\n ('dogleg', 'scipy.optimize._trustregion_dogleg._minimize_dogleg'),\n ('l-bfgs-b', 'scipy.optimize.lbfgsb._minimize_lbfgsb'),\n ('nelder-mead', 'scipy.optimize.optimize._minimize_neldermead'),\n ('newton-cg', 'scipy.optimize.optimize._minimize_newtoncg'),\n ('powell', 'scipy.optimize.optimize._minimize_powell'),\n ('slsqp', 'scipy.optimize.slsqp._minimize_slsqp'),\n ('tnc', 'scipy.optimize.tnc._minimize_tnc'),\n ('trust-ncg', 'scipy.optimize._trustregion_ncg._minimize_trust_ncg'),\n ),\n 'root': (\n ('hybr', 'scipy.optimize.minpack._root_hybr'),\n ('lm', 'scipy.optimize._root._root_leastsq'),\n ('broyden1', 'scipy.optimize._root._root_broyden1_doc'),\n ('broyden2', 'scipy.optimize._root._root_broyden2_doc'),\n ('anderson', 'scipy.optimize._root._root_anderson_doc'),\n ('diagbroyden', 'scipy.optimize._root._root_diagbroyden_doc'),\n ('excitingmixing', 'scipy.optimize._root._root_excitingmixing_doc'),\n ('linearmixing', 'scipy.optimize._root._root_linearmixing_doc'),\n ('krylov', 'scipy.optimize._root._root_krylov_doc'),\n ('df-sane', 'scipy.optimize._spectral._root_df_sane'),\n ),\n 'root_scalar': (\n ('bisect', 'scipy.optimize._root_scalar._root_scalar_bisect_doc'),\n ('brentq', 'scipy.optimize._root_scalar._root_scalar_brentq_doc'),\n ('brenth', 'scipy.optimize._root_scalar._root_scalar_brenth_doc'),\n ('ridder', 'scipy.optimize._root_scalar._root_scalar_ridder_doc'),\n ('toms748', 'scipy.optimize._root_scalar._root_scalar_toms748_doc'),\n ('secant', 'scipy.optimize._root_scalar._root_scalar_secant_doc'),\n ('newton', 'scipy.optimize._root_scalar._root_scalar_newton_doc'),\n ('halley', 'scipy.optimize._root_scalar._root_scalar_halley_doc'),\n ),\n 'linprog': (\n ('simplex', 'scipy.optimize._linprog._linprog_simplex'),\n ('interior-point', 'scipy.optimize._linprog._linprog_ip'),\n ),\n 'minimize_scalar': (\n ('brent', 'scipy.optimize.optimize._minimize_scalar_brent'),\n ('bounded', 'scipy.optimize.optimize._minimize_scalar_bounded'),\n ('golden', 'scipy.optimize.optimize._minimize_scalar_golden'),\n ),\n }\n\n if solver is None:\n text = [\"\\n\\n\\n========\\n\", \"minimize\\n\", \"========\\n\"]\n text.append(show_options('minimize', disp=False))\n text.extend([\"\\n\\n===============\\n\", \"minimize_scalar\\n\",\n \"===============\\n\"])\n text.append(show_options('minimize_scalar', disp=False))\n text.extend([\"\\n\\n\\n====\\n\", \"root\\n\",\n \"====\\n\"])\n text.append(show_options('root', disp=False))\n text.extend(['\\n\\n\\n=======\\n', 'linprog\\n',\n '=======\\n'])\n text.append(show_options('linprog', disp=False))\n text = \"\".join(text)\n else:\n solver = solver.lower()\n if solver not in doc_routines:\n raise ValueError('Unknown solver %r' % (solver,))\n\n if method is None:\n text = []\n for name, _ in doc_routines[solver]:\n text.extend([\"\\n\\n\" + name, \"\\n\" + \"=\"*len(name) + \"\\n\\n\"])\n text.append(show_options(solver, name, disp=False))\n text = \"\".join(text)\n else:\n method = method.lower()\n methods = dict(doc_routines[solver])\n if method not in methods:\n raise ValueError(\"Unknown method %r\" % (method,))\n name = methods[method]\n\n # Import function object\n parts = name.split('.')\n mod_name = \".\".join(parts[:-1])\n __import__(mod_name)\n obj = getattr(sys.modules[mod_name], parts[-1])\n\n # Get doc\n doc = obj.__doc__\n if doc is not None:\n text = textwrap.dedent(doc).strip()\n else:\n text = \"\"\n\n if disp:\n print(text)\n return\n else:\n return text\n\n\ndef main():\n import time\n\n times = []\n algor = []\n x0 = [0.8, 1.2, 0.7]\n print(\"Nelder-Mead Simplex\")\n print(\"===================\")\n start = time.time()\n x = fmin(rosen, x0)\n print(x)\n times.append(time.time() - start)\n algor.append('Nelder-Mead Simplex\\t')\n\n print()\n print(\"Powell Direction Set Method\")\n print(\"===========================\")\n start = time.time()\n x = fmin_powell(rosen, x0)\n print(x)\n times.append(time.time() - start)\n algor.append('Powell Direction Set Method.')\n\n print()\n print(\"Nonlinear CG\")\n print(\"============\")\n start = time.time()\n x = fmin_cg(rosen, x0, fprime=rosen_der, maxiter=200)\n print(x)\n times.append(time.time() - start)\n algor.append('Nonlinear CG \\t')\n\n print()\n print(\"BFGS Quasi-Newton\")\n print(\"=================\")\n start = time.time()\n x = fmin_bfgs(rosen, x0, fprime=rosen_der, maxiter=80)\n print(x)\n times.append(time.time() - start)\n algor.append('BFGS Quasi-Newton\\t')\n\n print()\n print(\"BFGS approximate gradient\")\n print(\"=========================\")\n start = time.time()\n x = fmin_bfgs(rosen, x0, gtol=1e-4, maxiter=100)\n print(x)\n times.append(time.time() - start)\n algor.append('BFGS without gradient\\t')\n\n print()\n print(\"Newton-CG with Hessian product\")\n print(\"==============================\")\n start = time.time()\n x = fmin_ncg(rosen, x0, rosen_der, fhess_p=rosen_hess_prod, maxiter=80)\n print(x)\n times.append(time.time() - start)\n algor.append('Newton-CG with hessian product')\n\n print()\n print(\"Newton-CG with full Hessian\")\n print(\"===========================\")\n start = time.time()\n x = fmin_ncg(rosen, x0, rosen_der, fhess=rosen_hess, maxiter=80)\n print(x)\n times.append(time.time() - start)\n algor.append('Newton-CG with full hessian')\n\n print()\n print(\"\\nMinimizing the Rosenbrock function of order 3\\n\")\n print(\" Algorithm \\t\\t\\t Seconds\")\n print(\"===========\\t\\t\\t =========\")\n for k in range(len(algor)):\n print(algor[k], \"\\t -- \", times[k])\n\n\nif __name__ == \"__main__\":\n main()\n" ]
[ [ "numpy.diag", "numpy.dot", "numpy.take", "numpy.sqrt", "numpy.asarray", "numpy.squeeze", "numpy.all", "numpy.zeros_like", "numpy.reshape", "numpy.eye", "numpy.add.reduce", "numpy.finfo", "numpy.atleast_1d", "numpy.asfarray", "numpy.size", "scipy._lib._util.getargspec_no_self", "numpy.zeros", "scipy._lib._util.MapWrapper", "numpy.isclose", "numpy.min", "numpy.isnan", "scipy._lib.six.xrange", "numpy.argsort", "numpy.array", "numpy.sum", "numpy.abs", "numpy.isfinite", "numpy.linalg.norm", "numpy.sign", "scipy._lib.six.callable", "numpy.shape", "numpy.isscalar", "numpy.prod", "numpy.isinf" ] ]
slowbull/a3c
[ "d146ff10fc06d9278957872d882f6eb06751f41b" ]
[ "train.py" ]
[ "import math\nimport os\nimport sys\n\nimport torch\nimport torch.nn.functional as F\nimport torch.optim as optim\nfrom envs import create_atari_env\nfrom model import ActorCritic\nfrom torch.autograd import Variable\nfrom torchvision import datasets, transforms\n\n\ndef ensure_shared_grads(model, shared_model):\n for param, shared_param in zip(model.parameters(), shared_model.parameters()):\n if shared_param.grad is not None:\n return\n shared_param._grad = param.grad\n\n\ndef train(rank, args, shared_model, optimizer=None):\n torch.manual_seed(args.seed + rank)\n\n env = create_atari_env(args.env_name)\n env.seed(args.seed + rank)\n\n model = ActorCritic(env.observation_space.shape[0], env.action_space)\n\n if optimizer is None:\n optimizer = optim.Adam(shared_model.parameters(), lr=args.lr)\n\n model.train()\n\n state = env.reset()\n state = torch.from_numpy(state)\n done = True\n\n episode_length = 0\n while True:\n episode_length += 1\n # Sync with the shared model\n model.load_state_dict(shared_model.state_dict())\n if done:\n cx = Variable(torch.zeros(1, 256))\n hx = Variable(torch.zeros(1, 256))\n else:\n cx = Variable(cx.data)\n hx = Variable(hx.data)\n\n values = []\n log_probs = []\n rewards = []\n entropies = []\n\n for step in range(args.num_steps):\n value, logit, (hx, cx) = model(\n (Variable(state.unsqueeze(0)), (hx, cx)))\n prob = F.softmax(logit)\n log_prob = F.log_softmax(logit)\n entropy = -(log_prob * prob).sum(1)\n entropies.append(entropy)\n\n action = prob.multinomial().data\n log_prob = log_prob.gather(1, Variable(action))\n\n state, reward, done, _ = env.step(action.numpy())\n done = done or episode_length >= args.max_episode_length\n reward = max(min(reward, 1), -1)\n\n if done:\n episode_length = 0\n state = env.reset()\n\n state = torch.from_numpy(state)\n values.append(value)\n log_probs.append(log_prob)\n rewards.append(reward)\n\n if done:\n break\n\n R = torch.zeros(1, 1)\n if not done:\n value, _, _ = model((Variable(state.unsqueeze(0)), (hx, cx)))\n R = value.data\n\n values.append(Variable(R))\n policy_loss = 0\n value_loss = 0\n R = Variable(R)\n gae = torch.zeros(1, 1)\n for i in reversed(range(len(rewards))):\n R = args.gamma * R + rewards[i]\n advantage = R - values[i]\n value_loss = value_loss + 0.5 * advantage.pow(2)\n\n # Generalized Advantage Estimataion\n delta_t = rewards[i] + args.gamma * \\\n values[i + 1].data - values[i].data\n gae = gae * args.gamma * args.tau + delta_t\n\n policy_loss = policy_loss - \\\n log_probs[i] * Variable(gae) - 0.01 * entropies[i]\n\n optimizer.zero_grad()\n\n (policy_loss + 0.5 * value_loss).backward()\n torch.nn.utils.clip_grad_norm(model.parameters(), 40)\n\n ensure_shared_grads(model, shared_model)\n optimizer.step()\n" ]
[ [ "torch.nn.functional.softmax", "torch.nn.functional.log_softmax", "torch.zeros", "torch.manual_seed", "torch.from_numpy", "torch.autograd.Variable" ] ]
pevisscher/camelot
[ "d93423e0dc78b64e3f2881714c91709d9241d163" ]
[ "camelot/image_processing.py" ]
[ "# -*- coding: utf-8 -*-\n\nimport cv2\nimport numpy as np\n\n\ndef adaptive_threshold(imagename, process_background=False, blocksize=15, c=-2):\n \"\"\"Thresholds an image using OpenCV's adaptiveThreshold.\n\n Parameters\n ----------\n imagename : string\n Path to image file.\n process_background : bool, optional (default: False)\n Whether or not to process lines that are in background.\n blocksize : int, optional (default: 15)\n Size of a pixel neighborhood that is used to calculate a\n threshold value for the pixel: 3, 5, 7, and so on.\n\n For more information, refer `OpenCV's adaptiveThreshold <https://docs.opencv.org/2.4/modules/imgproc/doc/miscellaneous_transformations.html#adaptivethreshold>`_.\n c : int, optional (default: -2)\n Constant subtracted from the mean or weighted mean.\n Normally, it is positive but may be zero or negative as well.\n\n For more information, refer `OpenCV's adaptiveThreshold <https://docs.opencv.org/2.4/modules/imgproc/doc/miscellaneous_transformations.html#adaptivethreshold>`_.\n\n Returns\n -------\n img : object\n numpy.ndarray representing the original image.\n threshold : object\n numpy.ndarray representing the thresholded image.\n\n \"\"\"\n img = cv2.imread(imagename)\n gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n\n if process_background:\n threshold = cv2.adaptiveThreshold(\n gray, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY, blocksize, c\n )\n else:\n threshold = cv2.adaptiveThreshold(\n np.invert(gray),\n 255,\n cv2.ADAPTIVE_THRESH_GAUSSIAN_C,\n cv2.THRESH_BINARY,\n blocksize,\n c,\n )\n return img, threshold\n\n\ndef find_lines(\n threshold, regions=None, direction=\"horizontal\", line_scale=15, iterations=0\n):\n \"\"\"Finds horizontal and vertical lines by applying morphological\n transformations on an image.\n\n Parameters\n ----------\n threshold : object\n numpy.ndarray representing the thresholded image.\n regions : list, optional (default: None)\n List of page regions that may contain tables of the form x1,y1,x2,y2\n where (x1, y1) -> left-top and (x2, y2) -> right-bottom\n in image coordinate space.\n direction : string, optional (default: 'horizontal')\n Specifies whether to find vertical or horizontal lines.\n line_scale : int, optional (default: 15)\n Factor by which the page dimensions will be divided to get\n smallest length of lines that should be detected.\n\n The larger this value, smaller the detected lines. Making it\n too large will lead to text being detected as lines.\n iterations : int, optional (default: 0)\n Number of times for erosion/dilation is applied.\n\n For more information, refer `OpenCV's dilate <https://docs.opencv.org/2.4/modules/imgproc/doc/filtering.html#dilate>`_.\n\n Returns\n -------\n dmask : object\n numpy.ndarray representing pixels where vertical/horizontal\n lines lie.\n lines : list\n List of tuples representing vertical/horizontal lines with\n coordinates relative to a left-top origin in\n image coordinate space.\n\n \"\"\"\n lines = []\n\n if direction == \"vertical\":\n size = threshold.shape[0] // line_scale\n el = cv2.getStructuringElement(cv2.MORPH_RECT, (1, size))\n elif direction == \"horizontal\":\n size = threshold.shape[1] // line_scale\n el = cv2.getStructuringElement(cv2.MORPH_RECT, (size, 1))\n elif direction is None:\n raise ValueError(\"Specify direction as either 'vertical' or 'horizontal'\")\n\n if regions is not None:\n region_mask = np.zeros(threshold.shape)\n for region in regions:\n x, y, w, h = region\n region_mask[y : y + h, x : x + w] = 1\n threshold = np.multiply(threshold, region_mask)\n\n threshold = cv2.erode(threshold, el)\n threshold = cv2.dilate(threshold, el)\n dmask = cv2.dilate(threshold, el, iterations=iterations)\n\n try:\n _, contours, _ = cv2.findContours(\n threshold.astype(np.uint8), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE\n )\n except ValueError:\n # for opencv backward compatibility\n contours, _ = cv2.findContours(\n threshold.astype(np.uint8), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE\n )\n\n for c in contours:\n x, y, w, h = cv2.boundingRect(c)\n x1, x2 = x, x + w\n y1, y2 = y, y + h\n if direction == \"vertical\":\n lines.append(((x1 + x2) // 2, y2, (x1 + x2) // 2, y1))\n elif direction == \"horizontal\":\n lines.append((x1, (y1 + y2) // 2, x2, (y1 + y2) // 2))\n\n return dmask, lines\n\n\ndef find_contours(vertical, horizontal):\n \"\"\"Finds table boundaries using OpenCV's findContours.\n\n Parameters\n ----------\n vertical : object\n numpy.ndarray representing pixels where vertical lines lie.\n horizontal : object\n numpy.ndarray representing pixels where horizontal lines lie.\n\n Returns\n -------\n cont : list\n List of tuples representing table boundaries. Each tuple is of\n the form (x, y, w, h) where (x, y) -> left-top, w -> width and\n h -> height in image coordinate space.\n\n \"\"\"\n mask = vertical + horizontal\n\n try:\n __, contours, __ = cv2.findContours(\n mask.astype(np.uint8), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE\n )\n except ValueError:\n # for opencv backward compatibility\n contours, __ = cv2.findContours(\n mask.astype(np.uint8), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE\n )\n # sort in reverse based on contour area and use first 10 contours\n contours = sorted(contours, key=cv2.contourArea, reverse=True)[:10]\n\n cont = []\n for c in contours:\n c_poly = cv2.approxPolyDP(c, 3, True)\n x, y, w, h = cv2.boundingRect(c_poly)\n cont.append((x, y, w, h))\n return cont\n\n\ndef find_joints(contours, vertical, horizontal):\n \"\"\"Finds joints/intersections present inside each table boundary.\n\n Parameters\n ----------\n contours : list\n List of tuples representing table boundaries. Each tuple is of\n the form (x, y, w, h) where (x, y) -> left-top, w -> width and\n h -> height in image coordinate space.\n vertical : object\n numpy.ndarray representing pixels where vertical lines lie.\n horizontal : object\n numpy.ndarray representing pixels where horizontal lines lie.\n\n Returns\n -------\n tables : dict\n Dict with table boundaries as keys and list of intersections\n in that boundary as their value.\n Keys are of the form (x1, y1, x2, y2) where (x1, y1) -> lb\n and (x2, y2) -> rt in image coordinate space.\n\n \"\"\"\n joints = np.multiply(vertical, horizontal)\n tables = {}\n for c in contours:\n x, y, w, h = c\n roi = joints[y : y + h, x : x + w]\n try:\n __, jc, __ = cv2.findContours(\n roi.astype(np.uint8), cv2.RETR_CCOMP, cv2.CHAIN_APPROX_SIMPLE\n )\n except ValueError:\n # for opencv backward compatibility\n jc, __ = cv2.findContours(\n roi.astype(np.uint8), cv2.RETR_CCOMP, cv2.CHAIN_APPROX_SIMPLE\n )\n if len(jc) <= 4: # remove contours with less than 4 joints\n continue\n joint_coords = []\n for j in jc:\n jx, jy, jw, jh = cv2.boundingRect(j)\n c1, c2 = x + (2 * jx + jw) // 2, y + (2 * jy + jh) // 2\n joint_coords.append((c1, c2))\n tables[(x, y + h, x + w, y)] = joint_coords\n\n return tables\n" ]
[ [ "numpy.invert", "numpy.zeros", "numpy.multiply" ] ]
ishaan-narula/coursera-deep-learning-specialisation
[ "ee33cd3cb5b0b72beadd73d9ec51acd97f1398ff" ]
[ "4 Coursera - Convolutional Neural Networks/Programming Assignments/Week 1 - Foundations of Convolutional Neural Networks/W1A2/test_utils.py" ]
[ "import numpy as np\nfrom termcolor import colored\n\nfrom tensorflow.keras.layers import Input\nfrom tensorflow.keras.layers import Conv2D\nfrom tensorflow.keras.layers import MaxPooling2D\nfrom tensorflow.keras.layers import Dropout \nfrom tensorflow.keras.layers import Conv2DTranspose\nfrom tensorflow.keras.layers import concatenate\nfrom tensorflow.keras.layers import ZeroPadding2D\nfrom tensorflow.keras.layers import Dense\n\n\n# Compare the two inputs\ndef comparator(learner, instructor):\n for a, b in zip(learner, instructor):\n if tuple(a) != tuple(b):\n print(colored(\"Test failed\", attrs=['bold']),\n \"\\n Expected value \\n\\n\", colored(f\"{b}\", \"green\"), \n \"\\n\\n does not match the input value: \\n\\n\", \n colored(f\"{a}\", \"red\"))\n raise AssertionError(\"Error in test\") \n print(colored(\"All tests passed!\", \"green\"))\n\n# extracts the description of a given model\ndef summary(model):\n model.compile(optimizer='adam',\n loss='categorical_crossentropy',\n metrics=['accuracy'])\n result = []\n for layer in model.layers:\n descriptors = [layer.__class__.__name__, layer.output_shape, layer.count_params()]\n if (type(layer) == Conv2D):\n descriptors.append(layer.padding)\n descriptors.append(layer.activation.__name__)\n descriptors.append(layer.kernel_initializer.__class__.__name__)\n if (type(layer) == MaxPooling2D):\n descriptors.append(layer.pool_size)\n descriptors.append(layer.strides)\n descriptors.append(layer.padding)\n if (type(layer) == Dropout):\n descriptors.append(layer.rate)\n if (type(layer) == ZeroPadding2D):\n descriptors.append(layer.padding)\n if (type(layer) == Dense):\n descriptors.append(layer.activation.__name__)\n result.append(descriptors)\n return result\n\ndef datatype_check(expected_output, target_output, error):\n success = 0\n if isinstance(target_output, dict):\n for key in target_output.keys():\n try:\n success += datatype_check(expected_output[key], \n target_output[key], error)\n except:\n print(\"Error: {} in variable {}. Got {} but expected type {}\".format(error,\n key, type(target_output[key]), type(expected_output[key])))\n if success == len(target_output.keys()):\n return 1\n else:\n return 0\n elif isinstance(target_output, tuple) or isinstance(target_output, list):\n for i in range(len(target_output)):\n try: \n success += datatype_check(expected_output[i], \n target_output[i], error)\n except:\n print(\"Error: {} in variable {}, expected type: {} but expected type {}\".format(error,\n i, type(target_output[i]), type(expected_output[i])))\n if success == len(target_output):\n return 1\n else:\n return 0\n \n else:\n assert isinstance(target_output, type(expected_output))\n return 1\n \ndef equation_output_check(expected_output, target_output, error):\n success = 0\n if isinstance(target_output, dict):\n for key in target_output.keys():\n try:\n success += equation_output_check(expected_output[key], \n target_output[key], error)\n except:\n print(\"Error: {} for variable {}.\".format(error,\n key))\n if success == len(target_output.keys()):\n return 1\n else:\n return 0\n elif isinstance(target_output, tuple) or isinstance(target_output, list):\n for i in range(len(target_output)):\n try: \n success += equation_output_check(expected_output[i], \n target_output[i], error)\n except:\n print(\"Error: {} for variable in position {}.\".format(error, i))\n if success == len(target_output):\n return 1\n else:\n return 0\n \n else:\n if hasattr(target_output, 'shape'):\n np.testing.assert_array_almost_equal(target_output, expected_output)\n else:\n assert target_output == expected_output\n return 1\n \ndef shape_check(expected_output, target_output, error):\n success = 0\n if isinstance(target_output, dict):\n for key in target_output.keys():\n try:\n success += shape_check(expected_output[key], \n target_output[key], error)\n except:\n print(\"Error: {} for variable {}.\".format(error, key))\n if success == len(target_output.keys()):\n return 1\n else:\n return 0\n elif isinstance(target_output, tuple) or isinstance(target_output, list):\n for i in range(len(target_output)):\n try: \n success += shape_check(expected_output[i], \n target_output[i], error)\n except:\n print(\"Error: {} for variable {}.\".format(error, i))\n if success == len(target_output):\n return 1\n else:\n return 0\n \n else:\n if hasattr(target_output, 'shape'):\n assert target_output.shape == expected_output.shape\n return 1\n \ndef single_test(test_cases, target):\n success = 0\n for test_case in test_cases:\n try:\n if test_case['name'] == \"datatype_check\":\n assert isinstance(target(*test_case['input']),\n type(test_case[\"expected\"]))\n success += 1\n if test_case['name'] == \"equation_output_check\":\n assert np.allclose(test_case[\"expected\"],\n target(*test_case['input']))\n success += 1\n if test_case['name'] == \"shape_check\":\n assert test_case['expected'].shape == target(*test_case['input']).shape\n success += 1\n except:\n print(\"Error: \" + test_case['error'])\n \n if success == len(test_cases):\n print(\"\\033[92m All tests passed.\")\n else:\n print('\\033[92m', success,\" Tests passed\")\n print('\\033[91m', len(test_cases) - success, \" Tests failed\")\n raise AssertionError(\"Not all tests were passed for {}. Check your equations and avoid using global variables inside the function.\".format(target.__name__))\n \ndef multiple_test(test_cases, target):\n success = 0\n for test_case in test_cases:\n try:\n target_answer = target(*test_case['input']) \n if test_case['name'] == \"datatype_check\":\n success += datatype_check(test_case['expected'], target_answer, test_case['error'])\n if test_case['name'] == \"equation_output_check\":\n success += equation_output_check(test_case['expected'], target_answer, test_case['error'])\n if test_case['name'] == \"shape_check\":\n success += shape_check(test_case['expected'], target_answer, test_case['error'])\n except:\n print(\"Error: \" + test_case['error'])\n \n if success == len(test_cases):\n print(\"\\033[92m All tests passed.\")\n else:\n print('\\033[92m', success,\" Tests passed\")\n print('\\033[91m', len(test_cases) - success, \" Tests failed\")\n raise AssertionError(\"Not all tests were passed for {}. Check your equations and avoid using global variables inside the function.\".format(target.__name__))\n \n \n \n" ]
[ [ "numpy.testing.assert_array_almost_equal" ] ]
airflow-plugins/spreadsheet_plugin
[ "bcb75666db14272dc31032920bb17263e6a6def0" ]
[ "operators/s3_to_spreadsheet_operator.py" ]
[ "from airflow.utils.decorators import apply_defaults\nfrom airflow.hooks.S3_hook import S3Hook\nfrom airflow.models import BaseOperator\nimport pandas as pd\nfrom BoxPlugin.hooks.box_hook import BoxHook\n\n\nclass S3ToSpreadsheetOperator(BaseOperator):\n \"\"\"\n S3 to Spreadsheet Operator\n :param input_s3_conn_id: The input s3 connection id.\n :type input_s3_conn_id: string\n :param input_s3_bucket: The input s3 bucket.\n :type input_s3_bucket: string\n :param input_s3_key: The input s3 key. This can be formatted\n as:\n 1) String - To be used with a\n single file with default 'Sheet1'\n naming convention.\n 2) Dictionary - To be used with a\n single file with a distinct sheet\n name to be used in the final\n spreadsheet.\n (e.g. \"{\"key_name\": \"sheet_name\"}\")\n 3) List of dictionaries - To be\n used with multiple files each\n going into a separate sheet.\n (e.g. \"[\n {\"key_name1\": \"sheet_name1\"},\n {\"key_name2\": \"sheet_name2\"},\n {\"key_name3\": \"sheet_name3\"},\n ])\n :type input_s3_key: string\n :param input_file_type: The file type of the input file.\n (JSON/CSV)\n :type input_file_type: string\n :type output_destination: The output destination. Currently,\n accepts \"S3\" (default) or \"Box\".\n :param output_destination: string\n :param output_conn_id: The output connection id.\n :type output_conn_id: string\n :param output_s3_bucket: The output s3 bucket. Only used if\n output_destination set to \"S3\".\n :type output_s3_bucket: string\n :param output_s3_key: The output s3 key. Only used if\n output_destination set to \"S3\".\n :type output_s3_key: string\n :param box_folder_id: The relevant box folder id. This value\n can be found in the URL when inside the\n Box UI. By default, this value will be\n set to 0 (i.e. home directory.)\n :type box_folder_id: string\n :param box_file_name: The file name in Box. This cannot be\n the same as any file already in the\n destination Box folder.\n :type box_file_name: string\n :param output_format: The output file format. Currently, only\n accepts \"excel\".\n :type output_format: string\n :param output_payload: The output payload, a self-defined\n dictionary of dataframe parameters to\n pass into output functions.\n :type output_payload: string\n :param filters: Key-Value pairs that filters the pandas\n dataframe prior to creating the Excel\n file.\n :type filters: dictionary\n :param append_fields: Key-Value pairs that get appended to\n the pandas dataframe prior to creating\n the Excel file.\n :type append_fields: dictionary\n \"\"\"\n\n template_fields = ['input_s3_key',\n 'output_s3_key',\n 'output_payload',\n 'filters',\n 'append_fields',\n 'output_box_file_name']\n\n @apply_defaults\n def __init__(self,\n input_s3_conn_id,\n input_s3_bucket,\n input_s3_key,\n input_file_type,\n output_destination='S3',\n output_conn_id=None,\n output_s3_bucket=None,\n output_s3_key=None,\n output_box_folder_id='0',\n output_box_file_name=None,\n output_format=None,\n output_payload=None,\n filters=None,\n append_fields=None,\n *args,\n **kwargs):\n super(S3ToSpreadsheetOperator, self).__init__(*args, **kwargs)\n self.input_s3_conn_id = input_s3_conn_id\n self.input_s3_bucket = input_s3_bucket\n self.input_s3_key = input_s3_key\n self.input_file_type = input_file_type\n self.output_destination = output_destination\n self.output_conn_id = output_conn_id\n self.output_s3_bucket = output_s3_bucket\n self.output_s3_key = output_s3_key\n self.output_box_folder_id = output_box_folder_id\n self.output_box_file_name = output_box_file_name\n self.output_format = output_format\n self.output_payload = output_payload\n self.filters = filters\n self.append_fields = append_fields\n\n if self.input_file_type.lower() not in ('json', 'csv'):\n raise Exception('Unsupported input file type.')\n\n if self.output_format.lower() not in ('excel'):\n raise Exception('Unsupported output file format.')\n\n if self.output_destination.lower() not in ('s3', 'box'):\n raise Exception('Unsupported output destination.')\n\n def execute(self, context):\n input_s3 = S3Hook(s3_conn_id=self.input_s3_conn_id)\n if self.output_format == 'excel':\n w = pd.ExcelWriter('temp.xlsx')\n print(self.input_s3_key)\n print(type(self.input_s3_key))\n if isinstance(self.input_s3_key, list):\n for i in self.input_s3_key:\n for k, v in i.items():\n input_key = \\\n (input_s3.get_key(k,\n bucket_name=self.input_s3_bucket))\n df = self.read_file(input_key)\n df.to_excel(w, sheet_name=v, **self.output_payload)\n else:\n if isinstance(self.input_s3_key, dict):\n for k, v in self.input_s3_key.items():\n input_key = input_s3.get_key(k,\n bucket_name=self.input_s3_bucket)\n df = self.read_file(input_key)\n df.to_excel(w, sheet_name=v, **self.output_payload)\n else:\n print(self.input_s3_key)\n input_key = input_s3.get_key(self.input_s3_key,\n bucket_name=self.input_s3_bucket)\n df = self.read_file(input_key)\n print(df.head())\n df.to_excel(w, **self.output_payload)\n input_s3.connection.close()\n w.save()\n if self.output_destination.lower() == 's3':\n output_s3 = S3Hook(s3_conn_id=self.output_conn_id)\n output_s3.load_file(\n filename='temp.xlsx',\n key=self.output_s3_key,\n bucket_name=self.output_s3_bucket,\n replace=True)\n output_s3.connection.close()\n elif self.output_destination.lower() == 'box':\n box_hook = BoxHook(self.output_conn_id)\n print(self.output_box_file_name)\n box_hook.upload_file(folder_id=self.output_box_folder_id,\n file_path='temp.xlsx',\n file_name=self.output_box_file_name)\n\n def read_file(self, input_key):\n if self.input_file_type.lower() == 'json':\n print('INPUT KEY')\n print(input_key)\n df = pd.read_json(input_key\n .get_contents_as_string(encoding='utf-8'),\n orient='records')\n elif self.input_file_type.lower() == 'csv':\n df = pd.read_csv(input_key, low_memory=False)\n # Apply a mapping function to escape invalid characters\n df = (df.applymap(lambda x: x.encode('unicode_escape')\n .decode('utf-8')\n if isinstance(x, str) else x))\n if self.filters:\n for i in self.filters:\n if i in df.columns.values.tolist():\n df = df[df[i] == self.filters[i]]\n # Append on any user-defined fields if they exist\n if self.append_fields:\n for i in self.append_fields:\n df[i] = self.append_fields[i]\n return df\n" ]
[ [ "pandas.read_csv", "pandas.ExcelWriter" ] ]
nitrawf/recommender
[ "66f30665a3bfae9fd719d0947fa3306ecaf5b63e" ]
[ "scraper.py" ]
[ "import requests\nfrom bs4 import BeautifulSoup\nimport json\nimport os\nimport pandas as pd\nimport time\n\ndef cls():\n os.system('cls' if os.name=='nt' else 'clear')\n\ndef get_details(player):\n\tuser_id = player.find(class_ = \"ranking-page-table__user-link-text js-usercard\").get('data-user-id')\n\trank = (\"\".join((player.find(class_ = \"ranking-page-table__column ranking-page-table__column--rank\").get_text()).split())).replace(\"#\", \"\")\n\tname = \"\".join((player.find(class_ = \"ranking-page-table__user-link-text js-usercard\").get_text()).split())\n\tmore_details = player.find_all(class_ = \"ranking-page-table__column ranking-page-table__column--dimmed\")\n\tacc = \"\".join((more_details[0].get_text()).split())\n\tplaycount = \"\".join((more_details[1].get_text()).split())\n\tpp = (\"\".join((player.find(class_ = \"ranking-page-table__column ranking-page-table__column--focused\").get_text()).split())).replace(\",\", \"\")\n\tentry = {\"user_id\" : user_id, \"rank\" : rank, \"name\" : name, \"accuracy\" : acc, \"playcount\" : playcount, \"pp\" : pp}\n\treturn entry\n\ndef get_country_links():\n\tcountry_links = []\n\tfor pg_no in range(1,3):\n\t\tcountry_page = requests.get(\"https://osu.ppy.sh/rankings/osu/country?page=\" + str(pg_no))\n\t\tsoup = BeautifulSoup(country_page.content, 'html.parser')\n\t\tcountries = soup.find_all('a', class_ = 'ranking-page-table__user-link')\n\t\tfor country in countries:\n\t\t\tcountry_links.append(country.get('href'))\n\treturn country_links\n\ndef get_player_records():\n\tcountry_links = get_country_links()\n\tplayer_details = []\n\n\tfor country in country_links:\n\t\tflag = True\n\t\tpgno = 1\n\t\twhile flag is True:\n\t\t\ttry:\n\t\t\t\tprint(f'Currently Scraping:{country.split(\"=\")[1]}')\n\t\t\t\tpage = requests.get(country+\"&page=\"+str(pgno))\n\t\t\t\tsoup = BeautifulSoup(page.content, 'html.parser')\n\t\t\t\ttable = soup.find('tbody')\n\t\t\t\tplayers = table.find_all('tr')\t\n\t\t\t\tfor player in players:\t\t\t\t\n\t\t\t\t\tentry = get_details(player)\n\t\t\t\t\tif (len(player_details)+1) % 3000 == 0:\n\t\t\t\t\t\ttime.sleep(60)\n\t\t\t\t\tif int(entry['pp']) < 5000:\n\t\t\t\t\t\tflag = False\n\t\t\t\t\t\tbreak\n\t\t\t\t\tplayer_details.append(entry)\n\t\t\t\tprint(f'Total number of records created = {len(player_details)}')\n\t\t\texcept:\n\t\t\t\tpass\n\t\t\tpgno += 1\n\treturn(player_details)\n\t\n\t\n\ndef get_top_10k():\n\tpre_dump = []\n\tfor pgno in range(1,201):\n\t\tpage = requests.get(\"https://osu.ppy.sh/rankings/osu/performance?page=\"+str(pgno))\n\t\tcls()\n\t\tprint(f\"Page {pgno} out of 200\")\n\t\ttry:\n\t\t\tsoup = BeautifulSoup(page.content, 'html.parser')\n\t\t\ttable = soup.find('tbody')\n\t\t\tplayers = table.find_all('tr')\n\t\t\tfor player in players:\n\t\t\t\tentry = get_details(player)\n\t\t\t\tpre_dump.append(entry)\n\t\texcept: \n\t\t\tpass\n\twith open(\"player_records.json\", \"w\") as write_file:\n\t\tjson.dump(pre_dump, write_file)\n\n\nplayer_details = get_player_records()\nwith open(\"player_records.json\", \"w\") as write_file:\n\tjson.dump(player_details, write_file)\ndf = pd.DataFrame(player_details)\nprint(df.head(50))\nprint(df.tail(50))\n" ]
[ [ "pandas.DataFrame" ] ]
mitmedialab/color_tag_tracker
[ "9eddbb76d41ca6ea531a8b7c7c76792312be1ab4" ]
[ "color_tag_tracker_webcam_test.py" ]
[ "import numpy as np\nimport cv2\nfrom color_tag_tracker import find_tags\n\nframes = 5000\n\ncam_mtx = np.load('mtx.npy')\ncam_dist = np.load('dist.npy')\n\ncamera = cv2.VideoCapture(0)\n\nprint(\"Starting test\")\n\nfor i in range(frames):\n if i % 50 is 0:\n print(i)\n\n # Read image from camera\n _, image = camera.read()\n\n # Attempt to find tag in image\n tags = find_tags(image, cam_mtx, cam_dist, display_img=True)\n\n if len(tags) == 0:\n continue\n\n # If any are found, print pose of first tag\n tag_id, r_vec, t_vec = tags[0]\n print('Tag id: ' + str(tag_id))\n print('Rotation vector:')\n print(r_vec)\n print('Translation vector:')\n print(t_vec)\n\nprint(\"Finished test\")\n" ]
[ [ "numpy.load" ] ]
worldveil/tensorflow
[ "f5de234d7f601214443f371e90fbadc8f128bb9a" ]
[ "tensorflow/contrib/py2tf/conversion.py" ]
[ "# Copyright 2016 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"High level conversion support.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport six\n\nfrom tensorflow.contrib.py2tf import config\nfrom tensorflow.contrib.py2tf import naming\nfrom tensorflow.contrib.py2tf.convert import call_trees\nfrom tensorflow.contrib.py2tf.convert import control_flow\nfrom tensorflow.contrib.py2tf.convert import gradients_function\nfrom tensorflow.contrib.py2tf.convert import logical_expressions\nfrom tensorflow.contrib.py2tf.convert import print_functions\nfrom tensorflow.contrib.py2tf.convert import side_effect_guards\nfrom tensorflow.contrib.py2tf.pyct import parser\nfrom tensorflow.contrib.py2tf.pyct.static_analysis import access\nfrom tensorflow.contrib.py2tf.pyct.static_analysis import live_values\nfrom tensorflow.contrib.py2tf.pyct.static_analysis import type_info\n\n\nclass ConversionMap(object):\n \"\"\"ConversionMaps keep track of converting function hierarchies.\n\n Attributes:\n dependency_cache: dict[object]: ast; maps original objects to their\n converted AST\n name_map: dict[string]: string; maps original objects to the name of\n their converted counterparts\n \"\"\"\n\n def __init__(self):\n self.dependency_cache = {}\n self.name_map = {}\n\n def new_namer(self, global_symbols):\n return naming.Namer(global_symbols, self.name_map)\n\n def update_name_map(self, namer):\n for o, name in namer.renamed_calls.items():\n if o in self.name_map:\n if self.name_map[o] != name:\n raise ValueError(\n 'Calls to %s were converted using multiple names (%s). This is '\n 'possible when an object with one of these names already '\n 'existed. To fix, avoid using any of these names.')\n else:\n self.name_map[o] = name\n\n def add_to_cache(self, original_object, converted_ast):\n self.dependency_cache[original_object] = converted_ast\n\n\ndef object_to_graph(o, conversion_map, value_hints):\n \"\"\"Compile a Python object into equivalent TensorFlow.\n\n The function will also recursively compile all the objects that `o`\n references, updating `dependency_cache`.\n\n This function is reentrant, and relies on dependency_cache to avoid\n generating duplicate code.\n\n Args:\n o: A Python object.\n conversion_map: A ConversionMap object.\n value_hints: A dict containing value hints for symbols like function\n parameters.\n\n Returns:\n A tuple (ast, new_name):\n * ast: An AST representing an object with interface equivalent to `o`,\n but which when executed it creates TF a graph.\n * new_name: The symbol name under which the new object can be found.\n\n Raises:\n ValueError: if the object is not supported.\n \"\"\"\n if callable(o):\n return function_to_graph(o, conversion_map, value_hints)\n raise ValueError(\n 'Unsupported object type %s. Only functions are supported for now.')\n\n\ndef function_to_graph(f, conversion_map, param_value_hints):\n \"\"\"Specialization of `object_to_graph` for callable functions.\"\"\"\n node = parser.parse_object(f).body[0]\n node_globals = six.get_function_globals(f)\n\n # This is needed for non-global functions.\n closure = six.get_function_closure(f)\n if closure:\n for e in closure:\n if callable(e.cell_contents):\n fn = e.cell_contents\n node_globals[fn.__name__] = fn\n\n namer = conversion_map.new_namer(node_globals)\n node = node_to_graph(node, namer, node_globals, param_value_hints)\n\n # Simulate a rename to ensure the top level is in the name map. This is needed\n # for top level functions, and it also helps the consistency verification made\n # by update_name_map.\n namer.compiled_function_name(f.__name__, f)\n\n conversion_map.add_to_cache(f, node)\n conversion_map.update_name_map(namer)\n\n # Recursively convert any remaining dependencies.\n for obj in conversion_map.name_map.keys():\n if obj not in conversion_map.dependency_cache:\n object_to_graph(obj, conversion_map, None)\n return node, conversion_map.name_map[f]\n\n\ndef node_to_graph(node, namer, namespace, value_hints):\n \"\"\"Convert Python code to equivalent TF graph mode code.\n\n Args:\n node: A Python AST node representing the code to convert.\n namer: A naming.Namer object.\n namespace: Dict mapping symbol names to their corresponding live objects.\n value_hints: A dict containing value hints for symbols like function\n parameters.\n\n Returns:\n A tuple (node, deps):\n * node: A Python ast node, representing the converted code.\n * deps: A set of strings, the fully qualified names of object\n dependencies that this node has.\n \"\"\"\n # TODO(mdan): Get rid of this.\n node = gradients_function.transform(node)\n\n node = access.resolve(node)\n node = live_values.resolve(node, namespace, config.PYTHON_LITERALS)\n node = type_info.resolve(node, value_hints)\n\n # TODO(mdan): Factor out common elements.\n # These include:\n # * keeping track of symbols that have been created\n # * marking nodes (e.g. py_func wrappers) to suppress further processing\n\n node = print_functions.transform(node)\n node = call_trees.transform(node, namer, config.DEFAULT_UNCOMPILED_MODULES)\n node = control_flow.transform(node, namer)\n node = logical_expressions.transform(node)\n node = side_effect_guards.transform(node, namer)\n\n return node\n" ]
[ [ "tensorflow.contrib.py2tf.pyct.parser.parse_object", "tensorflow.contrib.py2tf.convert.control_flow.transform", "tensorflow.contrib.py2tf.convert.logical_expressions.transform", "tensorflow.contrib.py2tf.convert.side_effect_guards.transform", "tensorflow.contrib.py2tf.naming.Namer", "tensorflow.contrib.py2tf.pyct.static_analysis.type_info.resolve", "tensorflow.contrib.py2tf.convert.gradients_function.transform", "tensorflow.contrib.py2tf.convert.print_functions.transform", "tensorflow.contrib.py2tf.convert.call_trees.transform", "tensorflow.contrib.py2tf.pyct.static_analysis.live_values.resolve", "tensorflow.contrib.py2tf.pyct.static_analysis.access.resolve" ] ]
klauscc/tfcx
[ "df96fd433f13f277d831da6ffc91163566bb4a56", "df96fd433f13f277d831da6ffc91163566bb4a56" ]
[ "network/unet.py", "callbacks/ckpt_callbacks.py" ]
[ "# -*- coding: utf-8 -*-\n#================================================================\n# God Bless You.\n#\n# author: klaus\n# email: chengfeng2333@gmail.com\n# created date: 2019/09/26\n# description:\n#\n#================================================================\n\nimport numpy as np\nimport tensorflow as tf\nimport tensorflow.keras.backend as K\nfrom .. import layers as mi_layers\nfrom ..layers.conv_layers import *\n\n\ndef pytorch_conv_initializer(shape, dtype=None):\n dim = len(shape)\n c_in = shape[-2]\n k = 1.0\n for i in range(dim - 1):\n k *= shape[i]\n limit = np.sqrt(1 / k)\n return K.random_uniform(shape, -limit, limit)\n\n\nclass Pix2PixUnet(tf.keras.layers.Layer):\n \"\"\"Unet\n \n Args:\n init_filters: Int. The initial filter number.\n kernel_size: Int. Kernel size used by conv layers.\n num_downsample: Int. The downsample times. Default to 3.\n **kwargs: The other arguments passed to the Layer.\n \n \"\"\"\n\n def __init__(self,\n init_filters,\n output_channels,\n kernel_size,\n norm_type=\"batchnorm\",\n num_downsample=4,\n **kwargs):\n super(Pix2PixUnet, self).__init__(**kwargs)\n\n self.down_stack = []\n self.up_stack = []\n self.down_stack = [\n downsample(init_filters, kernel_size, norm_type, first_down=True), # / (64,64,64) \n downsample(init_filters * 2, kernel_size, norm_type), # / (32,32,128) \n downsample(init_filters * 4, kernel_size, norm_type), # / (16,16,256) \n ]\n\n self.bridge = tf.keras.Sequential([\n tf.keras.layers.MaxPool2D((2, 2), padding=\"same\"),\n conv_norm_relu(init_filters * 8, kernel_size=kernel_size, norm_type=norm_type),\n Conv2DTranspose(init_filters * 4, kernel_size, strides=2, padding='same')\n ])\n\n self.up_stack = [\n self.bridge, # (16,16,256)\n upsample(init_filters * 2, kernel_size, norm_type), # / (32,32,128) \n upsample(init_filters, kernel_size, norm_type), # / (64,64,64) \n ]\n\n self.last = tf.keras.Sequential([\n Conv2D(init_filters, kernel_size, padding=\"same\", use_bias=False),\n tf.keras.layers.BatchNormalization(momentum=0.9, epsilon=1e-5),\n tf.keras.layers.ReLU(),\n Conv2D(output_channels, kernel_size, padding=\"same\")\n ]) # (64,64,output_channels)\n\n self.concat = tf.keras.layers.Concatenate()\n\n def call(self, x, training=None):\n\n skips = []\n for down in self.down_stack:\n x = down(x, training)\n skips.append(x)\n\n skips = reversed(skips)\n for up, skip in zip(self.up_stack, skips):\n x = up(x, training)\n x = self.concat([x, skip])\n x = self.last(x, training)\n return x\n\n\ndef conv_norm_relu(filters,\n *args,\n kernel_size=3,\n norm_type=\"batchnorm\",\n apply_norm=True,\n apply_dropout=False,\n **kwargs):\n net = tf.keras.Sequential()\n if norm_type.lower() == \"batchnorm\":\n norm_layer = tf.keras.layers.BatchNormalization\n elif norm_type.lower() == \"instancenorm\":\n norm_layer = InstanceNormalization\n else:\n norm_layer = None\n net.add(Conv2D(filters, kernel_size, padding=\"same\", use_bias=False, *args, **kwargs))\n if norm_layer and apply_norm:\n net.add(norm_layer(momentum=0.9, epsilon=1e-5))\n net.add(tf.keras.layers.ReLU())\n if apply_dropout:\n net.add(tf.keras.layers.Dropout(0.3))\n\n return net\n\n\ndef downsample(filters, size, norm_type='batchnorm', apply_norm=True, first_down=False):\n \"\"\"Downsamples an input.\n Conv2D => Batchnorm => LeakyRelu\n Args:\n filters: number of filters\n size: filter size\n norm_type: Normalization type; either 'batchnorm' or 'instancenorm'.\n apply_norm: If True, adds the batchnorm layer\n Returns:\n Downsample Sequential Model\n \"\"\"\n # initializer = tf.random_normal_initializer(0., 0.02)\n\n result = tf.keras.Sequential()\n if not first_down:\n result.add(tf.keras.layers.MaxPool2D((2, 2), padding=\"same\"))\n result.add(Conv2D(filters, size, strides=1, padding='same', use_bias=False))\n\n if apply_norm:\n if norm_type.lower() == 'batchnorm':\n result.add(tf.keras.layers.BatchNormalization(momentum=0.9, epsilon=1e-5))\n elif norm_type.lower() == 'instancenorm':\n result.add(InstanceNormalization())\n\n result.add(tf.keras.layers.ReLU())\n\n return result\n\n\ndef upsample(filters, size, norm_type='batchnorm', apply_dropout=False):\n \"\"\"Upsamples an input.\n Conv2DTranspose => Batchnorm => Dropout => Relu\n Args:\n filters: number of filters\n size: filter size\n norm_type: Normalization type; either 'batchnorm' or 'instancenorm'.\n apply_dropout: If True, adds the dropout layer\n Returns:\n Upsample Sequential Model\n \"\"\"\n\n # initializer = tf.random_normal_initializer(0., 0.02)\n\n if norm_type == \"batchnorm\":\n norm_layer = tf.keras.layers.BatchNormalization\n elif norm_type == \"instancenorm\":\n norm_layer = InstanceNormalization\n else:\n norm_layer = None\n\n result = tf.keras.Sequential()\n result.add(Conv2D(filters * 2, size, strides=1, padding=\"same\", use_bias=False))\n if norm_layer:\n result.add(norm_layer(momentum=0.9, epsilon=1e-5))\n result.add(tf.keras.layers.ReLU())\n if apply_dropout:\n result.add(tf.keras.layers.Dropout(0.5))\n result.add(Conv2DTranspose(filters, size, strides=2, padding='same'))\n return result\n", "# -*- coding: utf-8 -*-\n#================================================================\n# God Bless You.\n#\n# author: klaus\n# email: chengfeng2333@gmail.com\n# created date: 2019/09/26\n# description:\n#\n#================================================================\n\nimport tensorflow as tf\n\n\nclass CheckpointCallback(tf.keras.callbacks.Callback):\n \"\"\"save and load tf checkpoints\"\"\"\n\n def __init__(self, filepath, ckpt=None, save_freq=1, max_to_keep=100):\n super(CheckpointCallback, self).__init__()\n self.filepath = filepath\n self.save_freq = save_freq\n self.max_to_keep = max_to_keep\n self.ckpt = ckpt\n self.ckpt_manager = None\n\n def set_ckpt_manager(self):\n if self.ckpt is None:\n self.ckpt = tf.train.Checkpoint(step=tf.Variable(0),\n optimizer=self.model.optimizer,\n model=self.model)\n if self.ckpt_manager is None:\n self.ckpt_manager = tf.train.CheckpointManager(self.ckpt,\n self.filepath,\n max_to_keep=self.max_to_keep)\n return self.ckpt_manager\n\n def on_train_batch_end(self, batch, logs):\n if self.ckpt is None:\n self.set_ckpt_manager()\n self.ckpt.step.assign_add(1)\n\n def on_epoch_end(self, epoch, logs):\n if (epoch + 1) % self.save_freq == 0:\n manager = self.set_ckpt_manager()\n save_path = manager.save(checkpoint_number=epoch)\n print(\"Save checkpoint for epoch {}/ step {} : {}\".format(epoch, int(self.ckpt.step),\n save_path))\n" ]
[ [ "tensorflow.keras.layers.Concatenate", "tensorflow.keras.layers.ReLU", "numpy.sqrt", "tensorflow.keras.Sequential", "tensorflow.keras.layers.MaxPool2D", "tensorflow.keras.layers.BatchNormalization", "tensorflow.keras.backend.random_uniform", "tensorflow.keras.layers.Dropout" ], [ "tensorflow.train.CheckpointManager", "tensorflow.Variable" ] ]
WhuEven/multi_hyp_cc
[ "53a6bc438b865d606f5e6a53a442efbd8a04fe5b" ]
[ "loss/ffcc.py" ]
[ "# Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. THE SOFTWARE IS PROVIDED \"AS IS\" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.\n\nimport torch\nimport math\nimport torch.nn as nn\nfrom torch.autograd import Variable\nfrom core.utils import *\nfrom numpy.linalg import norm\nimport torch.nn.functional as F\n\n# google FFCC loss\nclass Ffcc(nn.Module):\n def __init__(self, conf, logistic_loss_epochs,\n logistic_loss_mult=2.5, bvm_mult=2.5,\n regularization_mult=0.5):\n logistic_loss_mult = 2**logistic_loss_mult\n bvm_mult = 2**bvm_mult\n\n super(Ffcc, self).__init__()\n self._conf = conf\n self._bin_size = self._conf['log_uv_warp_histogram']['bin_size']\n\n self._logistic_loss_epochs = logistic_loss_epochs\n self._logistic_loss_mult = logistic_loss_mult\n self._bvm_mult = bvm_mult\n self._regularization_mult = regularization_mult\n\n def forward(self, outputs, data, model):\n labels = Variable(data['illuminant_log_uv'], requires_grad=False)\n mu = outputs['mu']\n sigma = outputs['sigma']\n\n regularization_term = 0\n for name, param in model.named_parameters():\n if 'conv' not in name:\n regularization_term += (param*param).sum()\n\n # they actually use 2 losses, logistic regression for some epochs,\n # then, BVM\n if data['epoch'] < self._logistic_loss_epochs:\n # logistic loss\n gt_pdf = data['gt_pdf']\n bin_probability_logits = outputs['bin_probability_logits'].squeeze(1)\n logsoft = F.log_softmax(bin_probability_logits.view(bin_probability_logits.shape[0], -1), 1).view_as(bin_probability_logits)\n logistic_loss_positive = (gt_pdf*logsoft).view(bin_probability_logits.shape[0], -1).sum(1)\n data_term = -self._logistic_loss_mult*logistic_loss_positive.mean()\n else:\n # bivariate von mises\n dif = (labels - mu).unsqueeze(-1)\n\n sigma_inv = torch.inverse(sigma)\n fitting_loss = torch.sum(torch.mul(torch.matmul(sigma_inv, dif), dif).squeeze(-1), 1)\n logdet = batch_logdet2x2(sigma)\n loss_bvm = 0.5*(fitting_loss + logdet + 2*math.log(2*math.pi))\n loss_bvm_min = math.log(2*math.pi*outputs['bivariate_von_mises_epsilon']*self._bin_size*self._bin_size)\n l = loss_bvm - loss_bvm_min\n data_term = self._bvm_mult*l.mean()\n\n return data_term + self._regularization_mult*regularization_term\n" ]
[ [ "torch.inverse", "torch.matmul", "torch.autograd.Variable" ] ]
Katsumata420/generic-pretrained-GEC
[ "c7c6391e8d033ac784f72490d1aceabacb27ad43" ]
[ "BART-GEC/translate.py" ]
[ "import torch\nfrom fairseq.models.bart import BARTModel\n\nimport sys\nimport os\n\nassert len(sys.argv) == 4, \"translate.py model_dir input_text output_dir\"\nmodel_dir = sys.argv[1]\ninput_text = sys.argv[2]\noutput_dir = sys.argv[3]\noutput_path = os.path.join(output_dir, \"hyp.txt\")\n\n\nbart = BARTModel.from_pretrained(\n model_dir,\n checkpoint_file='checkpoint_best.pt',\n data_name_or_path='gec_data-bin'\n)\n\nbart.cuda()\nbart.eval()\nbart.half()\ncount = 1\nbsz = 32\nwith open(input_text) as source, open(output_path, 'w') as fout:\n sline = source.readline().strip()\n slines = [sline]\n for sline in source:\n if count % bsz == 0:\n with torch.no_grad():\n hypotheses_batch = bart.sample(slines, beam=1)\n\n for hypothesis in hypotheses_batch:\n fout.write(hypothesis + '\\n')\n fout.flush()\n slines = []\n\n slines.append(sline.strip())\n count += 1\n if slines != []:\n hypotheses_batch = bart.sample(slines, beam=1)\n for hypothesis in hypotheses_batch:\n fout.write(hypothesis + '\\n')\n fout.flush()\nprint(\"fin\")\n" ]
[ [ "torch.no_grad" ] ]
LukasMosser/MPyS
[ "e1be4ab9ca77fb4fbcd8481cb67605586161a05e" ]
[ "mpys/ds/nearest_neighbor_search.py" ]
[ "from numba import jit\nimport numpy as np\nimport bottleneck as btn\n\n\n@jit(nopython=True, nogil=True)\ndef compute_distances_brute_force_2d(current_node, traced_path, distances):\n for i in range(len(traced_path)):\n distances[i] = (traced_path[i][0] - current_node[0]) * (traced_path[i][0] - current_node[0]) \\\n + (traced_path[i][1] - current_node[1]) * (traced_path[i][1] - current_node[1])\n return distances\n\n\n@jit(nopython=True, nogil=True)\ndef compute_distances_brute_force_3d(current_node, traced_path, distances):\n for i in range(len(traced_path)):\n distances[i] = (traced_path[i][0] - current_node[0]) * (traced_path[i][0] - current_node[0]) \\\n + (traced_path[i][1] - current_node[1]) * (traced_path[i][1] - current_node[1]) \\\n + (traced_path[i][2] - current_node[2]) * (traced_path[i][2] - current_node[2])\n return distances\n\nfrom scipy.spatial import cKDTree\n\ndef compute_distances_kd_tree(current_node, traced_path, n_neighbors):\n tree = cKDTree(traced_path)\n return tree.query(current_node, k=n_neighbors)\n\n@jit(nopython=True, nogil=True)\ndef shell_search_jit(current_node, n_neighbors, shape, image):\n nx, ny = current_node\n shapea, shapeb = shape\n max_shape = None\n shell = None\n if shapea > shapeb:\n max_shape = shapea\n else:\n max_shape = shapeb\n for i in xrange(max_shape):\n low_x = nx-i-1\n if low_x <= 0:\n low_x = 0\n\n high_x = nx+i+2\n if high_x >= shapea:\n high_x = shapea\n\n low_y = ny-i-1\n if low_y <= 0:\n low_y = 0\n\n high_y = ny+i+2\n if high_y >= shapeb:\n high_y = shapeb\n\n shell = image[low_x:high_x, low_y:high_y]\n #flat_shell = shell.flatten()\n count = 0\n node_ids = np.ones((n_neighbors+1, 2))\n for k in xrange(shell.shape[0]):\n for l in xrange(shell.shape[1]):\n val = shell[k, l]\n if val == 0 or val == 1:\n count += 1\n node_ids[count-1][0] = low_x+k\n node_ids[count-1][1] = low_y+l\n# node_ids.append([int(), int(low_y+l)])\n if count >= n_neighbors:\n break\n if count >= n_neighbors:\n break\n if count >= n_neighbors:\n break\n return node_ids\n\ndef brute_force_search(current_node, traced_path, n_neighbors, simulation_grid):\n distances = np.zeros(len(traced_path), dtype=np.int64)\n distances = compute_distances_brute_force_2d(current_node, traced_path, distances)\n nearest_neighbor_ids = btn.argpartition(distances, kth=n_neighbors)\n\n\n nearest_point_ids = nearest_neighbor_ids[0:n_neighbors]\n\n nearest_points = [traced_path[i] for i in nearest_point_ids]\n\n return nearest_points\n\ndef shell_search(current_node, traced_path, n_neighbors, simulation_grid):\n\n return shell_search_jit(current_node, n_neighbors, simulation_grid.shape, simulation_grid)\n\nclass NearestNeighborSearch(object):\n def __init__(self, nearest_neighbor_method=shell_search, n_neighbors=30):\n self.n_neighbors = n_neighbors\n\n self.nearest_neighbor_search = nearest_neighbor_method\n\n self.distances = None\n\n self.nearest_neighbor_ids = None\n\n def find(self, current_node, traced_path, simulation_grid):\n\n self.nodes = self.nearest_neighbor_search(current_node, traced_path, self.n_neighbors, simulation_grid)\n\n return self.nodes\n" ]
[ [ "scipy.spatial.cKDTree", "numpy.ones" ] ]
namabilly/iLOCuS
[ "761fe4162a9fb551f43d887c3ae9d448c3cc8c14" ]
[ "RL/driver_func_test.py" ]
[ "import numpy as np\r\nclass DriverSim:\r\n def __init__(self):\r\n pass\r\n\r\n def react(self, pricing):\r\n return np.random.rand(4,15,15), False\r\n \r\n def reset(self):\r\n return np.random.rand(4,15,15)\r\n" ]
[ [ "numpy.random.rand" ] ]
cmbi/Benchmarking_splice_prediction_tools
[ "d0b50220091c031c46656c7feac120e3f972d890", "d0b50220091c031c46656c7feac120e3f972d890" ]
[ "DSSP/DSSP_DI_input.py", "analysis_variants.py" ]
[ "#import\nimport pandas as pd\nimport numpy as np\nimport pybedtools\nfrom Bio import SeqIO\nimport hgvs\nfrom hgvs.easy import parser\nimport sys\nsys.path.insert(1, '../')\nfrom functions import reverse_sequence\n\ngene = 'ABCA4'\nlength = 140\n\n# define variables\nexcel_file = '../data/variant_scores.xlsx'\ngenome = 'GRCh37'\nchromosome = 1\ndataset = gene + '_DI'\n# reference fasta can be downloaded from http://ftp.ensembl.org/pub/release-75/fasta/homo_sapiens/dna/Homo_sapiens.GRCh37.75.dna.chromosome.1.fa.gz\nreference_fasta = '../references/Homo_sapiens.GRCh37.dna.chromosome.1.fa'\n\n# Read in the data \n# Read the second column of the excel sheet and store the variants in a list\ndf = pd.read_excel(excel_file, dataset ,index_col=None, engine='openpyxl')\n\n# store the variant information\ninfo = []\nhp = hgvs.parser.Parser()\nfor i in range(len(df['genomic variant'])):\n variant = df['genomic variant'][i]\n v = hp.parse_hgvs_variant('Chr' + str(chromosome) + 'GRCh37:' + variant)\n \n # Store the variant information so that it can be accessed separately\n var_info = []\n var_info.append(v.posedit.pos.start.base)\n var_info.append(v.posedit.pos.end.base)\n var_info.append(v.posedit.edit.ref)\n var_info.append(v.posedit.edit.alt)\n \n # add if it affects a donor or acceptor\n var_info.append(df['affects'][i])\n\n # add the distance to the splice site\n var_info.append(df['position ss'][i])\n\n info.append(var_info)\n\n# 2) Create the BED file\n# The BED file defines the sequence range that is written to the fasta file later on\nwith open ((dataset + '.bed'), 'w') as file:\n for i in range(len(info)):\n # An input for acceptor site should be a 140-mer string with the AG at positions 69 and 70\n if df['affects'][i] == 'acceptor':\n var_loc = info[i][0]\n loc = var_loc - info[i][5] + 1\n \n # An input for donor site should be a 140-mer string with the GT at positions 71 and 72\n else:\n var_loc = info[i][0]\n loc = var_loc + info[i][5]\n \n # If the variant contains a deletion, a longer sequence is needed to end up with a 140nt long sequence\n if 'del' in df['cDNA variant'][i]:\n # get the length of the deletion\n l = info[i][1] - info[i][0]\n # For deletions affecting the donor site additional bases are added to the end of the sequence\n if df['affects'][i] == 'donor':\n file.write('chr' + str(chromosome) + '\\t' + str(loc-(length//2+2)) + '\\t' + str(loc+(length//2-1)+l) + '\\t\\t\\t' + '-' + '\\n')\n # For deletions affecting the acceptor site additonal bases are added to the beginning of the sequence\n else:\n file.write('chr' + str(chromosome) + '\\t' + str(loc-(length//2+1)-l) + '\\t' + str(loc+(length//2)) + '\\t\\t\\t' + '-' + '\\n')\n \n else: \n file.write('chr' + str(chromosome) + '\\t' + str(loc-(length//2+1)) + '\\t' + str(loc+(length//2-1)) + '\\t\\t\\t' + '-' + '\\n')\n\n\n#Get the sequence for each variant and store it in a fasta file\na = pybedtools.BedTool((dataset + '.bed'))\na = a.sequence(fi = reference_fasta, fo = (dataset + '.fa.out'))\n\nfasta_sequences = SeqIO.parse(open((dataset + '.fa.out')),'fasta')\n# open the new fasta file to save the mutated sequences\nwith open ((dataset + '_donor.fa.out'), 'w') as file:\n with open ((dataset + '_acceptor.fa.out'), 'w') as file2:\n i = 0\n for fasta in fasta_sequences:\n # get the name and the sequence\n name, sequence = fasta.id, str(fasta.seq)\n wt_sequence = reverse_sequence(sequence)[140:]\n\n # get the location of the variant\n if df['affects'][i] == 'acceptor':\n loc = length//2 + info[i][5] - 1\n else:\n loc = length//2 - info[i][5]\n\n # variants where one base is changed\n if info[i][0] == info[i][1] and info[i][2] != '':\n assert sequence[loc] == info[i][2]\n # change the base at the variant position\n l = list(sequence)\n l[loc] = info[i][3]\n s = ''.join(l)\n # test if the base at the variant position in the sequence is now the same as the mutated base\n assert s[loc] == info[i][3] \n\n # filter for variants where one single base is deleted\n elif info[i][0] == info[i][1]:\n s = sequence[:loc+1] + sequence[(loc+2):]\n\n # handle deletions with more bases\n else:\n size = info[i][1] - info[i][0] + 1\n s = sequence[:loc + 1] + sequence[(loc + size + 1):]\n\n s = reverse_sequence(s)\n\n # Check if the bases at the donor/acceptor position are correct\n if df['affects'][i] == 'acceptor':\n assert s[68:70] == 'AG'\n else:\n assert s[70:72] in ['GT','GC']\n\n # write the result to a file\n if df['affects'][i] == 'acceptor':\n file2.write('>' + df['cDNA variant'][i] + '\\n' + wt_sequence + '\\n') \n file2.write('>' + df['cDNA variant'][i] + '_var\\n' + s + '\\n') \n else:\n file.write('>' + df['cDNA variant'][i] + '\\n' + wt_sequence + '\\n') \n file.write('>' + df['cDNA variant'][i] + '_var\\n' + s + '\\n') \n\n i += 1", "# import\nimport numpy as np\nimport pandas as pd\nfrom numpy import nan\nfrom collections import Counter\nfrom functions import read_scores_from_excel\n\n# define the variants that should be analyzed (ABCA4_NCSS, ABCA4_DI or MYBPC3_NCSS)\nvariants = 'ABCA4_NCSS'\n\n# calculate the missing scores for each tool\ndf = pd.read_excel('data/variant_scores.xlsx', variants, engine='openpyxl')\ndf.isnull().sum()\n\n# replace missing values with 0\ndf = df.replace(nan, 0)\n\n# print statistics about the variants\nprint('# variants: ', df.shape[0])\nprint('# non splice altering variants: ',\n df[df['% Mutant RNA'] <= 20].count()['cDNA variant'])\nprint('# splice altering variants: ',\n df[df['% Mutant RNA'] > 20].count()['cDNA variant'])\nprint('donor: ', df[df['affects'] == 'donor'].count()['cDNA variant'])\nprint('acceptor: ', df[df['affects'] == 'acceptor'].count()['cDNA variant'])\n\n# print the number of variants that affect the SAS and SDS\ndonor = df['affects'] == \"donor\"\nacceptor = df['affects'] == \"acceptor\"\nsa = df['% Mutant RNA'] > 20\nnsa = df['% Mutant RNA'] <= 20\nprint('SDS, splice altering: ', df[donor & sa].shape[0])\nprint('SDS, normal splicing: ', df[donor & nsa].shape[0])\nprint('SAS, splice altering: ', df[acceptor & sa].shape[0])\nprint('SAS, normal splicing: ', df[acceptor & nsa].shape[0])\n\n# print the position of the variants in the NCSS\nlocations = []\n# only calculate the distribution for NCSS variants\nif 'NCSS' in variants:\n for index in df.index:\n # get the ss positions\n pos = df.at[index, 'position ss']\n # check if the variant alters splicing\n if df.at[index, '% Mutant RNA'] > 20:\n sa = 'splice altering'\n else:\n sa = 'normal splicing'\n # check if the variant affects the donor or acceptor\n affects = df.at[index, 'affects']\n locations.append((str(pos) + ' ' + sa + ' ' + affects))\n# count how often a variant is located at a certain position in the NCSS motif and print the result\nprint(Counter(locations))\n" ]
[ [ "pandas.read_excel" ], [ "pandas.read_excel" ] ]
Akssi/fps-rl
[ "3437ba6d1289350865bc4c65d6cc0e884897dd27" ]
[ "models/DQN.py" ]
[ "import torch\nimport torch.nn as nn\n\n\nclass DQN(nn.Module):\n def __init__(self, dims, n_actions, conv_net):\n super(DQN, self).__init__()\n self.conv_net = conv_net\n self.n_actions = n_actions\n self.layers = nn.ModuleList()\n prev_dim = dims[0]\n for dim in dims[1:]:\n self.layers.append(nn.Linear(prev_dim, dim))\n self.layers.append(nn.ReLU())\n prev_dim = dim\n self.layers.append(nn.Linear(prev_dim, n_actions))\n\n def forward(self, x):\n x = self.conv_net(x)\n x = x.view(x.size(0), -1)\n for layer in self.layers:\n x = layer(x)\n return x\n\n\nclass DuelingDQN(nn.Module):\n def __init__(self, dims, n_actions, conv_net):\n super(DuelingDQN, self).__init__()\n self.conv_net = conv_net\n self.n_actions = n_actions\n self.layers = nn.ModuleList()\n prev_dim = dims[0]\n for dim in dims[1:]:\n self.layers.append(nn.Linear(prev_dim, dim))\n self.layers.append(nn.ReLU())\n prev_dim = dim\n self.layer_advantage = nn.Linear(prev_dim, n_actions)\n self.layer_value = nn.Linear(prev_dim, 1)\n\n def forward(self, x):\n x = self.conv_net(x)\n x = x.view(x.size(0), -1)\n for layer in self.layers:\n x = layer(x)\n v = self.layer_value(x)\n a = self.layer_advantage(x)\n x = (\n v.expand(x.size(0), self.n_actions)\n + a\n - a.mean(1).unsqueeze(1).expand(x.size(0), self.n_actions)\n )\n return x\n\n\nclass ReccurentDDQN(nn.Module):\n def __init__(self, input_dim, hidden_dim, n_layer, n_actions, conv_net):\n super(ReccurentDDQN, self).__init__()\n self.conv_net = conv_net\n self.n_actions = n_actions\n self.layers = nn.ModuleList()\n self.input_dim = input_dim\n\n self.layer_lstm = nn.LSTM(input_dim, hidden_dim, n_layer)\n self.layer_activation = nn.ReLU()\n self.layer_advantage = nn.Linear(hidden_dim, n_actions)\n self.layer_value = nn.Linear(hidden_dim, 1)\n\n def forward(self, x):\n # Convolutions\n conv_feats = torch.zeros(x.size(0), x.size(1), self.input_dim).to(x.device)\n for i in range(x.size(0)):\n conv_feats[i] = self.conv_net(x[i]).view(x[i].size(0), -1)\n # LSTM\n x, _ = self.layer_lstm(conv_feats)\n\n # Distributed FC\n out = []\n for i in range(x.size(0)):\n x[i] = self.layer_activation(x[i])\n v = self.layer_value(x[i])\n a = self.layer_advantage(x[i])\n out.append(\n v.expand(x[i].size(0), self.n_actions)\n + a\n - a.mean(1).unsqueeze(1).expand(x[i].size(0), self.n_actions)\n )\n return torch.stack(out)\n" ]
[ [ "torch.nn.LSTM", "torch.nn.ModuleList", "torch.nn.Linear", "torch.stack", "torch.nn.ReLU" ] ]
HackintoshMan/IndigoMQTTBridge
[ "d0d610fac45486167bcc8c9ab7e5d83ba3528e70" ]
[ "GreenSkyMQTTBridge.indigoPlugin/Contents/Server Plugin/jsonpickle/ext/numpy.py" ]
[ "from __future__ import absolute_import\nimport ast\nimport sys\nimport zlib\nimport warnings\n\nimport numpy as np\n\nfrom ..handlers import BaseHandler, register, unregister\nfrom ..compat import numeric_types\nfrom ..util import b64decode, b64encode\nfrom .. import compat\n\n__all__ = ['register_handlers', 'unregister_handlers']\n\nnative_byteorder = '<' if sys.byteorder == 'little' else '>'\n\n\ndef get_byteorder(arr):\n \"\"\"translate equals sign to native order\"\"\"\n byteorder = arr.dtype.byteorder\n return native_byteorder if byteorder == '=' else byteorder\n\n\nclass NumpyBaseHandler(BaseHandler):\n\n def flatten_dtype(self, dtype, data):\n if hasattr(dtype, 'tostring'):\n data['dtype'] = dtype.tostring()\n else:\n dtype = compat.ustr(dtype)\n prefix = '(numpy.record, '\n if dtype.startswith(prefix):\n dtype = dtype[len(prefix):-1]\n data['dtype'] = dtype\n\n def restore_dtype(self, data):\n dtype = data['dtype']\n if dtype.startswith(('{', '[')):\n dtype = ast.literal_eval(dtype)\n return np.dtype(dtype)\n\n\nclass NumpyDTypeHandler(NumpyBaseHandler):\n\n def flatten(self, obj, data):\n self.flatten_dtype(obj, data)\n return data\n\n def restore(self, data):\n return self.restore_dtype(data)\n\n\nclass NumpyGenericHandler(NumpyBaseHandler):\n\n def flatten(self, obj, data):\n self.flatten_dtype(obj.dtype.newbyteorder('N'), data)\n data['value'] = self.context.flatten(obj.tolist(), reset=False)\n return data\n\n def restore(self, data):\n value = self.context.restore(data['value'], reset=False)\n return self.restore_dtype(data).type(value)\n\n\nclass NumpyNDArrayHandler(NumpyBaseHandler):\n \"\"\"Stores arrays as text representation, without regard for views\n \"\"\"\n def flatten_flags(self, obj, data):\n if obj.flags.writeable is False:\n data['writeable'] = False\n\n def restore_flags(self, data, arr):\n if not data.get('writeable', True):\n arr.flags.writeable = False\n\n def flatten(self, obj, data):\n self.flatten_dtype(obj.dtype.newbyteorder('N'), data)\n self.flatten_flags(obj, data)\n data['values'] = self.context.flatten(obj.tolist(), reset=False)\n if 0 in obj.shape:\n # add shape information explicitly as it cannot be\n # inferred from an empty list\n data['shape'] = obj.shape\n return data\n\n def restore(self, data):\n values = self.context.restore(data['values'], reset=False)\n arr = np.array(\n values,\n dtype=self.restore_dtype(data),\n order=data.get('order', 'C')\n )\n shape = data.get('shape', None)\n if shape is not None:\n arr = arr.reshape(shape)\n\n self.restore_flags(data, arr)\n return arr\n\n\nclass NumpyNDArrayHandlerBinary(NumpyNDArrayHandler):\n \"\"\"stores arrays with size greater than 'size_threshold' as\n (optionally) compressed base64\n\n Notes\n -----\n This would be easier to implement using np.save/np.load, but\n that would be less language-agnostic\n \"\"\"\n\n def __init__(self, size_threshold=16, compression=zlib):\n \"\"\"\n :param size_threshold: nonnegative int or None\n valid values for 'size_threshold' are all nonnegative\n integers and None\n if size_threshold is None, values are always stored as nested lists\n :param compression: a compression module or None\n valid values for 'compression' are {zlib, bz2, None}\n if compresion is None, no compression is applied\n \"\"\"\n self.size_threshold = size_threshold\n self.compression = compression\n\n def flatten_byteorder(self, obj, data):\n byteorder = obj.dtype.byteorder\n if byteorder != '|':\n data['byteorder'] = get_byteorder(obj)\n\n def restore_byteorder(self, data, arr):\n byteorder = data.get('byteorder', None)\n if byteorder:\n arr.dtype = arr.dtype.newbyteorder(byteorder)\n\n def flatten(self, obj, data):\n \"\"\"encode numpy to json\"\"\"\n if self.size_threshold is None or self.size_threshold >= obj.size:\n # encode as text\n data = super(NumpyNDArrayHandlerBinary, self).flatten(obj, data)\n else:\n # encode as binary\n if hasattr(obj, 'tobytes'):\n # numpy docstring is lacking as of 1.11.2,\n # but this is the option we need\n buffer = obj.tobytes(order='a')\n else:\n # numpy < 1.9 compatibility\n buffer = obj.tostring(order='a')\n if self.compression:\n buffer = self.compression.compress(buffer)\n data['values'] = b64encode(buffer)\n data['shape'] = obj.shape\n self.flatten_dtype(obj.dtype.newbyteorder('N'), data)\n self.flatten_byteorder(obj, data)\n self.flatten_flags(obj, data)\n\n if not obj.flags.c_contiguous:\n data['order'] = 'F'\n\n return data\n\n def restore(self, data):\n \"\"\"decode numpy from json\"\"\"\n values = data['values']\n if isinstance(values, list):\n # decode text representation\n arr = super(NumpyNDArrayHandlerBinary, self).restore(data)\n elif isinstance(values, numeric_types):\n # single-value array\n arr = np.array([values], dtype=self.restore_dtype(data))\n else:\n # decode binary representation\n buffer = b64decode(values)\n if self.compression:\n buffer = self.compression.decompress(buffer)\n arr = np.ndarray(\n buffer=buffer,\n dtype=self.restore_dtype(data),\n shape=data.get('shape'),\n order=data.get('order', 'C')\n ).copy() # make a copy, to force the result to own the data\n self.restore_byteorder(data, arr)\n self.restore_flags(data, arr)\n\n return arr\n\n\nclass NumpyNDArrayHandlerView(NumpyNDArrayHandlerBinary):\n \"\"\"Pickles references inside ndarrays, or array-views\n\n Notes\n -----\n The current implementation has some restrictions.\n\n 'base' arrays, or arrays which are viewed by other arrays,\n must be f-or-c-contiguous.\n This is not such a large restriction in practice, because all\n numpy array creation is c-contiguous by default.\n Relaxing this restriction would be nice though; especially if\n it can be done without bloating the design too much.\n\n Furthermore, ndarrays which are views of array-like objects\n implementing __array_interface__,\n but which are not themselves nd-arrays, are deepcopied with\n a warning (by default),\n as we cannot guarantee whatever custom logic such classes\n implement is correctly reproduced.\n \"\"\"\n def __init__(self, mode='warn', size_threshold=16, compression=zlib):\n \"\"\"\n :param mode: {'warn', 'raise', 'ignore'}\n How to react when encountering array-like objects whos\n references we cannot safely serialize\n :param size_threshold: nonnegative int or None\n valid values for 'size_threshold' are all nonnegative\n integers and None\n if size_threshold is None, values are always stored as nested lists\n :param compression: a compression module or None\n valid values for 'compression' are {zlib, bz2, None}\n if compresion is None, no compression is applied\n \"\"\"\n super(NumpyNDArrayHandlerView, self).__init__(\n size_threshold, compression)\n self.mode = mode\n\n def flatten(self, obj, data):\n \"\"\"encode numpy to json\"\"\"\n base = obj.base\n if base is None and obj.flags.forc:\n # store by value\n data = super(NumpyNDArrayHandlerView, self).flatten(obj, data)\n # ensure that views on arrays stored as text\n # are interpreted correctly\n if not obj.flags.c_contiguous:\n data['order'] = 'F'\n elif isinstance(base, np.ndarray) and base.flags.forc:\n # store by reference\n data['base'] = self.context.flatten(base, reset=False)\n\n offset = obj.ctypes.data - base.ctypes.data\n if offset:\n data['offset'] = offset\n\n if not obj.flags.c_contiguous:\n data['strides'] = obj.strides\n\n data['shape'] = obj.shape\n self.flatten_dtype(obj.dtype.newbyteorder('N'), data)\n self.flatten_flags(obj, data)\n\n if get_byteorder(obj) != '|':\n byteorder = (\n 'S' if get_byteorder(obj) != get_byteorder(base) else None)\n if byteorder:\n data['byteorder'] = byteorder\n\n if self.size_threshold is None or self.size_threshold >= obj.size:\n # not used in restore since base is present, but\n # include values for human-readability\n super(NumpyNDArrayHandlerBinary, self).flatten(obj, data)\n else:\n # store a deepcopy or fail\n if self.mode == 'warn':\n msg = (\n \"ndarray is defined by reference to an object \"\n \"we do not know how to serialize. \"\n \"A deep copy is serialized instead, breaking \"\n \"memory aliasing.\"\n )\n warnings.warn(msg)\n elif self.mode == 'raise':\n msg = (\n \"ndarray is defined by reference to an object we do \"\n \"not know how to serialize.\"\n )\n raise ValueError(msg)\n data = super(NumpyNDArrayHandlerView, self) \\\n .flatten(obj.copy(), data)\n\n return data\n\n def restore(self, data):\n \"\"\"decode numpy from json\"\"\"\n base = data.get('base', None)\n if base is None:\n # decode array with owndata=True\n arr = super(NumpyNDArrayHandlerView, self).restore(data)\n else:\n # decode array view, which references the data of another array\n base = self.context.restore(base, reset=False)\n assert base.flags.forc, \\\n \"Current implementation assumes base is C or F contiguous\"\n\n arr = np.ndarray(\n buffer=base.data,\n dtype=self.restore_dtype(data).newbyteorder(\n data.get('byteorder', '|')),\n shape=data.get('shape'),\n offset=data.get('offset', 0),\n strides=data.get('strides', None)\n )\n\n self.restore_flags(data, arr)\n\n return arr\n\n\ndef register_handlers():\n register(np.dtype, NumpyDTypeHandler, base=True)\n register(np.generic, NumpyGenericHandler, base=True)\n register(np.ndarray, NumpyNDArrayHandlerView(), base=True)\n\n\ndef unregister_handlers():\n unregister(np.dtype)\n unregister(np.generic)\n unregister(np.ndarray)\n" ]
[ [ "numpy.dtype" ] ]
w-rfrsh/leads-recommender
[ "38706f1e9eb080a294c455394f7d09e8b200702f" ]
[ "src/models/recommender.py" ]
[ "import os\nimport pandas as pd\nfrom src.evaluation.evaluate import similarity_metric\n\nusefull_cols = ['sg_uf', 'nm_meso_regiao', 'nm_micro_regiao', 'fl_rm', 'setor', 'nm_segmento',\n 'de_natureza_juridica', 'de_nivel_atividade', 'idade_empresa_anos', 'vl_faturamento_estimado_aux']\n\n\ndef generate_leads(processed_portfolio, processed_market, model):\n \"\"\"Generate recommendations via kneighbors, remove duplicates and\n ids already included in portfolio.\n\n Args:\n processed_portfolio (pandas.core.frame.DataFrame): Processed portfolio dataframe.\n processed_market (pandas.core.frame.DataFrame): Processed market dataframe.\n model (sklearn.neighbors._unsupervised.NearestNeighbors): NN model.\n\n Returns:\n Panda Series with recommended ids\n \"\"\"\n\n dist, indices = model.kneighbors(processed_portfolio.dropna())\n\n leads = pd.DataFrame(list(zip(processed_market.index[indices.flatten()], dist.flatten())),\n columns=['COMPANY ID', 'DISTANCE'])\n leads = leads.sort_values('DISTANCE').set_index('COMPANY ID')\n\n leads = leads.loc[~leads.index.duplicated(keep='first')]\n leads = leads.drop([x for x in leads.index if x in processed_portfolio.index])\n\n return leads\n\n\ndef build_leads_df(raw_market, leads):\n \"\"\"Build raw dataframe and a fancy dataframe(with the usefulls columns).\n The fancy dataframe makes the viewing / decision-making experience more user-friendly,\n since only columns with relevance to these processes are displayed.\n\n Args:\n raw_market (pandas.core.frame.DataFrame): Processed portfolio dataframe.\n leads (pandas.core.series.Series): Leads ids.\n\n Returns:\n A raw dataframe and a dataframe proper to visualization, both composed by the recommended companies.\n \"\"\"\n\n raw_leads = raw_market.reindex(leads.index)\n\n # Build leads dataframe with usefull columns\n df_leads = raw_market[usefull_cols].reindex(raw_leads.index)\n df_leads.reset_index(level=0, inplace=True) # Reset index\n df_leads.index = pd.RangeIndex(1, df_leads.shape[0]+1) # Set start at 1\n df_leads.columns = ['ID', 'UF', 'MESO REGIAO', 'MICRO REGIAO', 'RM', 'SETOR', 'SEGMENTO',\n 'NATUREZA JURIDICA', 'NIVEL DE ATIVIDADE', 'IDADE', 'FATURAMENTO ESTIMADO']\n\n return raw_leads, df_leads\n\n\ndef color_reliability(val):\n \"\"\"Returns different shades of green for different levels of similarity.\"\"\"\n\n if val == 'EXTREMA':\n color='#1f7a1f'\n return f'background-color: {color}'\n elif val == 'ALTA':\n color='#85e085'\n return f'background-color: {color}'\n elif val == 'MÉDIA':\n color='#d8ebb5'\n return f'background-color: {color}'\n else:\n pass\n\n\ndef colorize_df(df, processed_portfolio, processed_leads):\n \"\"\"Calculates similariry metric and make a stylized dataframe, with a new column\n colorized according the levels of similarity\"\"\"\n\n df['CONFIABILIDADE'] = similarity_metric(processed_portfolio, processed_leads)\n return df.style.applymap(color_reliability, subset=['CONFIABILIDADE'])\n\n\ndef save_leads(raw_leads, df_leads):\n \"\"\"Save raw and fancy leads dataframe as .csv. Also save just the IDs in a .txt file\"\"\"\n\n if not os.path.exists('output'):\n os.mkdir('output')\n\n raw_leads.to_csv('output/raw_leads.csv')\n df_leads.to_csv('output/leads.csv')\n with open('output/leads_id.txt', 'w') as f:\n for item in df_leads['ID']:\n f.write(\"%s\\n\" % item)\n" ]
[ [ "pandas.RangeIndex" ] ]
ak710/Stocks-Dash
[ "c916dce1327a4c9d328da8437daff78acbdde169" ]
[ "dash.py" ]
[ "from altair.vegalite.v4.schema.core import Step\nimport yfinance as yf\nimport streamlit as st\nimport wealthsimple\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport ta\nimport backtrader as bt\n\ndef format_number(number):\n return f\"{number:,}\"\n\n\nst.sidebar.write(\"Navigation\")\nscreen = st.sidebar.selectbox(\"View\",(\"Overview\",\"Fundamentals\",\"Wealthsimple\",\"Stock Compare\",\"Technical Analysis\"))\nst.title(screen)\nsymbol = st.sidebar.text_input(\"Symbol\",value=\"AAPL\")\nticker = yf.Ticker(symbol)\n\nif screen == \"Overview\":\n col1,col2 = st.beta_columns([1,4])\n with col1:\n st.image(ticker.info.get('logo_url'))\n with col2:\n st.subheader(ticker.info.get('longName')) \n st.subheader(\"Industry\")\n st.write(ticker.info.get('industry'))\n st.subheader(\"Traded Exchange\")\n st.write(ticker.info.get('exchange'))\n st.write(ticker.info.get('market'))\n st.subheader(\"Summary\")\n with st.beta_expander(label=\"Expand\", expanded=False):\n st.write(ticker.info.get('longBusinessSummary'))\n\nif screen == \"Fundamentals\":\n st.subheader(ticker.info.get('longName')) \n col1,col2,col3 = st.beta_columns(3)\n with col1:\n st.header(\"Basic Values\")\n st.subheader(\"Market Cap\")\n if ticker.info.get('marketCap') is not None:\n st.write(format_number(ticker.info.get('marketCap')))\n else:\n st.write(\"No Value\")\n\n st.subheader(\"EPS\")\n if ticker.info.get('trailingEps') is not None:\n st.write(format_number(ticker.info.get('trailingEps')))\n else:\n st.write(\"No Value\")\n\n st.subheader(\"Dividend\")\n if ticker.info.get('dividendRate') is not None:\n st.write(format_number(ticker.info.get('dividendRate')))\n else:\n st.write('No Value')\n\n st.subheader(\"Volume\")\n if ticker.info.get('volume') is not None:\n st.write(format_number(ticker.info.get('volume')))\n else:\n st.write('No Value')\n \n st.subheader(\"P/E Ratio\")\n if ticker.info.get('trailingPE') is not None:\n st.write(format_number(ticker.info.get('trailingPE')))\n else:\n st.write('No Value')\n\n with col2:\n st.header(\"Prices\")\n st.subheader(\"Yesterday's Price\") \n yest = ticker.info.get('previousClose')\n st.write(format_number(yest))\n\n st.subheader(\"Opening Price\") \n open = ticker.info.get('regularMarketOpen')\n st.write(format_number(open))\n\n st.subheader(\"Closing Price\")\n close = ticker.info.get('regularMarketPrice')\n st.write(format_number(open))\n\n st.subheader(\"% Change\")\n change = (close - yest)/(yest)*100\n st.write(format_number(change))\n\n st.subheader(\"Day High/Low\")\n st.write(ticker.info.get('dayLow'),\"/\",ticker.info.get('dayHigh'))\n\n with col3:\n st.header(\"Fun Stats\")\n\n st.subheader(\"Shorted Shares\")\n if ticker.info.get('sharesShort') is not None:\n st.write(format_number(ticker.info.get('sharesShort')))\n else:\n st.write('No Value')\n\n st.subheader(\"Shorted Ratio\")\n if ticker.info.get('shortRatio') is not None:\n st.write(format_number(ticker.info.get('shortRatio')))\n else:\n st.write('No Value')\n \n st.subheader(\"200 Day Average\")\n if ticker.info.get('twoHundredDayAverage') is not None:\n st.write(format_number(ticker.info.get('twoHundredDayAverage')))\n else:\n st.write('No Value')\n\n st.subheader(\"50 Day Average\")\n if ticker.info.get('fiftyDayAverage') is not None:\n st.write(format_number(ticker.info.get('fiftyDayAverage')))\n else:\n st.write('No Value')\n\n st.subheader(\"Beta\")\n if ticker.info.get('beta') is not None:\n st.write(format_number(ticker.info.get('beta')))\n else:\n st.write('No Value')\n\n st.subheader(\"PEG Ratio\")\n if ticker.info.get('pegRatio') is not None:\n st.write(format_number(ticker.info.get('pegRatio')))\n else:\n st.write('No Value')\n \n time_period = st.selectbox(\"Select Timeframe\",(\"1d\",\"5d\",\"1mo\",\"3mo\",\"6mo\",\"1y\",\"2y\",\"5y\", \"10y\", \"ytd\",\"max\"))\n if time_period == \"1d\" or \"5d\" or \"1mo\":\n interval = st.selectbox(\"Select Interval\",(\"1m\",\"2m\",\"5m\",\"15m\",\"30m\",\"60m\",\"90m\",\"1h\",\"1d\",\"5d\",\"1wk\"))\n else: \n interval = st.selectbox(\"Select Interval\",(\"1d\",\"5d\",\"1wk\",\"1mo\",\"3mo\"))\n\n df = pd.DataFrame(ticker.history(period=time_period,interval=interval))\n st.line_chart(data=df['Close'], width=0, height=0, use_container_width=True)\n\n\nif screen == \"Wealthsimple\":\n email = st.text_input(\"Enter Email\")\n password = st.text_input(\"Enter Password\",type=\"password\")\n tfa = st.text_input(\"Enter 2FA code:\")\n\n try:\n def my_two_factor_function():\n MFACode = tfa\n return MFACode\n\n ws = wealthsimple.WSTrade(\n email,\n password,\n two_factor_callback=my_two_factor_function,\n )\n except:\n pass\n else:\n st.header(\"Wealthsimple info\")\n st.subheader(\"Positions\")\n #st.write(ws.get_account_ids())\n #st.write(ws.get_account(\"tfsa-gjcsjvzu\"))\n\n positions = ws.get_positions(\"tfsa-gjcsjvzu\")\n # st.write(positions[0])\n\n stock_vals = {}\n stock_name = []\n symbols = []\n avg_daily_vol = []\n stock_country = []\n exchange = []\n old_value = []\n currency = []\n quantity = []\n closing_price = []\n daily_high = []\n daily_low = []\n daily_vol = []\n\n for i in range(len(positions)):\n stock_name.append(positions[i]['stock']['name'])\n symbols.append(positions[i]['stock']['symbol'])\n stock_country.append(positions[i]['stock']['country_of_issue'])\n exchange.append(positions[i]['stock']['primary_exchange'])\n currency.append(positions[i]['market_book_value']['currency'])\n old_value.append(positions[i]['market_book_value']['amount'])\n quantity.append(float(positions[i]['quantity']))\n closing_price.append(float(positions[i]['quote']['amount']))\n daily_high.append(positions[i]['quote']['high'])\n daily_low.append(positions[i]['quote']['low'])\n avg_daily_vol.append(positions[i]['stock']['avg_daily_volume_last_month'])\n daily_vol.append(positions[i]['quote']['volume'])\n\n today_value = []\n for num1, num2 in zip(quantity, closing_price):\n \t today_value.append(num1 * num2)\n\n\n stock_vals[\"Name\"] = stock_name\n stock_vals[\"Symbol\"] = symbols\n stock_vals[\"Country\"] = stock_country\n stock_vals[\"Exchange\"] = exchange\n stock_vals[\"Currency\"] = currency\n stock_vals[\"Value At Buy\"] = old_value\n stock_vals[\"Value Today\"] = today_value\n stock_vals[\"Quantity\"] = quantity\n stock_vals[\"Closing Price\"] = closing_price\n stock_vals[\"Daily High\"] = daily_high\n stock_vals[\"Daily Low\"] = daily_low\n stock_vals[\"Average Daily Volume\"] = avg_daily_vol\n stock_vals[\"Daily Volume\"] = daily_vol\n\n portfolio = pd.DataFrame(stock_vals)\n st.write(portfolio)\n\n st.subheader(\"Portfolio Breakdown\")\n # show_chart = st.selectbox(\"select stock\", symbols)\n fig, ax = plt.subplots()\n ax = plt.pie(today_value, labels = symbols, autopct='%1.1f%%')\n st.pyplot(fig)\n\n # st.write(\"Deposits\")\n # st.write(ws.get_deposits())\n st.header(\"History\")\n portfolio_amount = {}\n history = []\n dates = []\n historical_vals = ws.get_account_history(\"tfsa-gjcsjvzu\")[\"results\"]\n for i in range(len(historical_vals)):\n dates.append(historical_vals[i][\"date\"])\n history.append(historical_vals[i][\"value\"]['amount'])\n\n\n portfolio_amount[\"Date\"] = dates\n portfolio_amount[\"Value\"] = history\n \n performance = pd.DataFrame(portfolio_amount)\n # st.write(performance)\n st.line_chart(data=performance, width=0, height=0, use_container_width=True)\n # st.write(\"Get activities\")\n # st.write(ws.get_activities())\n\nif screen == \"Stock Compare\":\n col1,col2,col3,col4,col5 = st.beta_columns(5)\n with col1:\n symbol1 = st.text_input(\"Symbol 1\",value = \"AAPL\",max_chars=10)\n with col2:\n symbol2 = st.text_input(\"Symbol 2\", value = \"MSFT\",max_chars=10)\n with col3:\n symbol3 = st.text_input(\"Symbol 3\", value = \"TSLA\",max_chars=10)\n with col4:\n symbol4 = st.text_input(\"Symbol 4\", value = \"AMZN\",max_chars=10)\n with col5:\n symbol5 = st.text_input(\"Symbol 5\",value = \"GOOG\",max_chars=10)\n\n start = st.date_input(\"Select Start Date\")\n end = st.date_input(\"Select End Date\")\n\n data = (yf.download(f\"{symbol1} {symbol2} {symbol3} {symbol4} {symbol5}\", start=start, end=end))\n # st.write(data)\n st.line_chart(data=data['Close'], width=0, height=0, use_container_width=True)\n\n # df = pd.DataFrame(ticker.history(period=time_period,interval=interval))\n # st.line_chart(data=df['Close'], width=0, height=0, use_container_width=True)\n\nif screen == \"Technical Analysis\":\n stock = yf.Ticker(\"SPY\")\n stock_data = stock.history(period=\"4y\")\n\n # print(stock_data.head())\n\n\n class SMA200Strategy(bt.Strategy):\n def __init__(self):\n self.etf = self.datas[0]\n self.etf_sma = bt.ind.SMA(self.etf.lines.close, period=200)\n self.crossover = bt.ind.CrossOver(self.etf, self.etf_sma)\n\n def next(self):\n if self.crossover > 0:\n self.order_target_percent(self.etf, target=0.9)\n elif self.crossover < 0:\n self.order_target_percent(self.etf, target=0)\n\n\n cerebro = bt.Cerebro()\n\n cerebro.broker.setcash(100000)\n cerebro.adddata(\n bt.feeds.PandasData(dataname=stock_data))\n cerebro.addstrategy(SMA200Strategy)\n\n cerebro.run()\n cerebro.plot()" ]
[ [ "matplotlib.pyplot.pie", "matplotlib.pyplot.subplots", "pandas.DataFrame" ] ]
NijatZeynalov/Building-Neural-Network-architectures-from-scratch
[ "c64ff9599f92d5ae1b7a3426dda2de8fc195d93e" ]
[ "vgg16.py" ]
[ "import tensorflow as tf\nimport tensorflow_datasets as tfds\n\n\nclass Block(tf.keras.Model):\n def __init__(self, filters, kernel_size, repetitions, pool_size=2, strides=2):\n super(Block, self).__init__()\n self.filters = filters\n self.kernel_size = kernel_size\n self.repetitions = repetitions\n\n # Define a conv2D_0, conv2D_1, etc based on the number of repetitions\n for i in range(self.repetitions):\n # Define a Conv2D layer, specifying filters, kernel_size, activation and padding.\n vars(self)[f'conv2D_{i}'] = tf.keras.layers.Conv2D(filters, kernel_size, activation='relu', padding='same')\n\n # Define the max pool layer that will be added after the Conv2D blocks\n self.max_pool = tf.keras.layers.MaxPooling2D(pool_size, strides=strides)\n\n def call(self, inputs):\n # access the class's conv2D_0 layer\n conv2D_0 = self.conv2D_0\n\n # Connect the conv2D_0 layer to inputs\n x = self.conv2D_0(inputs)\n\n # for the remaining conv2D_i layers from 1 to `repetitions` they will be connected to the previous layer\n for i in range(1, self.repetitions):\n # access conv2D_i by formatting the integer `i`. (hint: check how these were saved using `vars()` earlier)\n conv2D_i = vars(self)[f'conv2D_{i}']\n\n # Use the conv2D_i and connect it to the previous layer\n x = conv2D_i(x)\n\n # Finally, add the max_pool layer\n max_pool = self.max_pool(x)\n\n return max_pool\n\n\n\nclass MyVGG(tf.keras.Model):\n\n def __init__(self, num_classes):\n super(MyVGG, self).__init__()\n\n # Creating blocks of VGG with the following\n # (filters, kernel_size, repetitions) configurations\n self.block_a = Block(filters=64, kernel_size=3, repetitions=2)\n self.block_b = Block(filters=128, kernel_size=3, repetitions=2)\n self.block_c = Block(filters=256, kernel_size=3, repetitions=3)\n self.block_d = Block(filters=512, kernel_size=3, repetitions=3)\n self.block_e = Block(filters=512, kernel_size=3, repetitions=3)\n\n # Classification head\n # Define a Flatten layer\n self.flatten = tf.keras.layers.Flatten()\n # Create a Dense layer with 256 units and ReLU as the activation function\n self.fc = tf.keras.layers.Dense(256, activation='relu')\n # Finally add the softmax classifier using a Dense layer\n self.classifier =tf.keras.layers.Dense(num_classes, activation='softmax')\n\n def call(self, inputs):\n # Chain all the layers one after the other\n x = self.block_a(inputs)\n x = self.block_b(x)\n x = self.block_c(x)\n x = self.block_d(x)\n x = self.block_e(x)\n x = self.flatten(x)\n x = self.fc(x)\n x = self.classifier(x)\n return x\n \ndef preprocess(features):\n return tf.cast(features['image'], tf.float32) / 255., features['label']\n\nmodel = MyVGG(10)\nmodel.compile(optimizer='adam', loss='sparse_categorical_crossentropy', metrics=['accuracy'])\n\n# load and preprocess the dataset\ndataset = tfds.load('mnist', split=tfds.Split.TRAIN, data_dir='./data')\ndataset = dataset.map(preprocess).batch(32)\n\n# train the model.\nmodel.fit(dataset, epochs=1)\n\nresnet.summary()\n" ]
[ [ "tensorflow.keras.layers.Dense", "tensorflow.cast", "tensorflow.keras.layers.Conv2D", "tensorflow.keras.layers.MaxPooling2D", "tensorflow.keras.layers.Flatten" ] ]
yutiansut/ray
[ "4157bcb80b169779f43a549dd503911d95507cc2" ]
[ "python/ray/worker.py" ]
[ "from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport atexit\nimport cloudpickle as pickle\nimport collections\nimport colorama\nimport copy\nimport hashlib\nimport inspect\nimport json\nimport numpy as np\nimport os\nimport redis\nimport signal\nimport sys\nimport threading\nimport time\nimport traceback\n\n# Ray modules\nimport pyarrow\nimport pyarrow.plasma as plasma\nimport ray.experimental.state as state\nimport ray.serialization as serialization\nimport ray.services as services\nimport ray.signature as signature\nimport ray.local_scheduler\nimport ray.plasma\nfrom ray.utils import FunctionProperties, random_string, binary_to_hex\n\nSCRIPT_MODE = 0\nWORKER_MODE = 1\nPYTHON_MODE = 2\nSILENT_MODE = 3\n\nLOG_POINT = 0\nLOG_SPAN_START = 1\nLOG_SPAN_END = 2\n\nERROR_KEY_PREFIX = b\"Error:\"\nDRIVER_ID_LENGTH = 20\nERROR_ID_LENGTH = 20\n\n# This must match the definition of NIL_ACTOR_ID in task.h.\nNIL_ID = 20 * b\"\\xff\"\nNIL_LOCAL_SCHEDULER_ID = NIL_ID\nNIL_FUNCTION_ID = NIL_ID\nNIL_ACTOR_ID = NIL_ID\n\n# When performing ray.get, wait 1 second before attemping to reconstruct and\n# fetch the object again.\nGET_TIMEOUT_MILLISECONDS = 1000\n\n# This must be kept in sync with the `error_types` array in\n# common/state/error_table.h.\nOBJECT_HASH_MISMATCH_ERROR_TYPE = b\"object_hash_mismatch\"\nPUT_RECONSTRUCTION_ERROR_TYPE = b\"put_reconstruction\"\n\n# This must be kept in sync with the `scheduling_state` enum in common/task.h.\nTASK_STATUS_RUNNING = 8\n\n\nclass FunctionID(object):\n def __init__(self, function_id):\n self.function_id = function_id\n\n def id(self):\n return self.function_id\n\n\nclass RayTaskError(Exception):\n \"\"\"An object used internally to represent a task that threw an exception.\n\n If a task throws an exception during execution, a RayTaskError is stored in\n the object store for each of the task's outputs. When an object is\n retrieved from the object store, the Python method that retrieved it checks\n to see if the object is a RayTaskError and if it is then an exception is\n thrown propagating the error message.\n\n Currently, we either use the exception attribute or the traceback attribute\n but not both.\n\n Attributes:\n function_name (str): The name of the function that failed and produced\n the RayTaskError.\n exception (Exception): The exception object thrown by the failed task.\n traceback_str (str): The traceback from the exception.\n \"\"\"\n\n def __init__(self, function_name, exception, traceback_str):\n \"\"\"Initialize a RayTaskError.\"\"\"\n self.function_name = function_name\n if (isinstance(exception, RayGetError) or\n isinstance(exception, RayGetArgumentError)):\n self.exception = exception\n else:\n self.exception = None\n self.traceback_str = traceback_str\n\n def __str__(self):\n \"\"\"Format a RayTaskError as a string.\"\"\"\n if self.traceback_str is None:\n # This path is taken if getting the task arguments failed.\n return (\"Remote function {}{}{} failed with:\\n\\n{}\"\n .format(colorama.Fore.RED, self.function_name,\n colorama.Fore.RESET, self.exception))\n else:\n # This path is taken if the task execution failed.\n return (\"Remote function {}{}{} failed with:\\n\\n{}\"\n .format(colorama.Fore.RED, self.function_name,\n colorama.Fore.RESET, self.traceback_str))\n\n\nclass RayGetError(Exception):\n \"\"\"An exception used when get is called on an output of a failed task.\n\n Attributes:\n objectid (lib.ObjectID): The ObjectID that get was called on.\n task_error (RayTaskError): The RayTaskError object created by the\n failed task.\n \"\"\"\n\n def __init__(self, objectid, task_error):\n \"\"\"Initialize a RayGetError object.\"\"\"\n self.objectid = objectid\n self.task_error = task_error\n\n def __str__(self):\n \"\"\"Format a RayGetError as a string.\"\"\"\n return (\"Could not get objectid {}. It was created by remote function \"\n \"{}{}{} which failed with:\\n\\n{}\"\n .format(self.objectid, colorama.Fore.RED,\n self.task_error.function_name, colorama.Fore.RESET,\n self.task_error))\n\n\nclass RayGetArgumentError(Exception):\n \"\"\"An exception used when a task's argument was produced by a failed task.\n\n Attributes:\n argument_index (int): The index (zero indexed) of the failed argument\n in present task's remote function call.\n function_name (str): The name of the function for the current task.\n objectid (lib.ObjectID): The ObjectID that was passed in as the\n argument.\n task_error (RayTaskError): The RayTaskError object created by the\n failed task.\n \"\"\"\n\n def __init__(self, function_name, argument_index, objectid, task_error):\n \"\"\"Initialize a RayGetArgumentError object.\"\"\"\n self.argument_index = argument_index\n self.function_name = function_name\n self.objectid = objectid\n self.task_error = task_error\n\n def __str__(self):\n \"\"\"Format a RayGetArgumentError as a string.\"\"\"\n return (\"Failed to get objectid {} as argument {} for remote function \"\n \"{}{}{}. It was created by remote function {}{}{} which \"\n \"failed with:\\n{}\".format(self.objectid, self.argument_index,\n colorama.Fore.RED,\n self.function_name,\n colorama.Fore.RESET,\n colorama.Fore.RED,\n self.task_error.function_name,\n colorama.Fore.RESET,\n self.task_error))\n\n\nclass Worker(object):\n \"\"\"A class used to define the control flow of a worker process.\n\n Note:\n The methods in this class are considered unexposed to the user. The\n functions outside of this class are considered exposed.\n\n Attributes:\n functions (Dict[str, Callable]): A dictionary mapping the name of a\n remote function to the remote function itself. This is the set of\n remote functions that can be executed by this worker.\n connected (bool): True if Ray has been started and False otherwise.\n mode: The mode of the worker. One of SCRIPT_MODE, PYTHON_MODE,\n SILENT_MODE, and WORKER_MODE.\n cached_remote_functions (List[Tuple[str, str]]): A list of pairs\n representing the remote functions that were defined before the\n worker called connect. The first element is the name of the remote\n function, and the second element is the serialized remote function.\n When the worker eventually does call connect, if it is a driver, it\n will export these functions to the scheduler. If\n cached_remote_functions is None, that means that connect has been\n called already.\n cached_functions_to_run (List): A list of functions to run on all of\n the workers that should be exported as soon as connect is called.\n \"\"\"\n\n def __init__(self):\n \"\"\"Initialize a Worker object.\"\"\"\n # The functions field is a dictionary that maps a driver ID to a\n # dictionary of functions that have been registered for that driver\n # (this inner dictionary maps function IDs to a tuple of the function\n # name and the function itself). This should only be used on workers\n # that execute remote functions.\n self.functions = collections.defaultdict(lambda: {})\n # The function_properties field is a dictionary that maps a driver ID\n # to a dictionary of functions that have been registered for that\n # driver (this inner dictionary maps function IDs to a tuple of the\n # number of values returned by that function, the number of CPUs\n # required by that function, and the number of GPUs required by that\n # function). This is used when submitting a function (which can be done\n # both on workers and on drivers).\n self.function_properties = collections.defaultdict(lambda: {})\n # This is a dictionary mapping driver ID to a dictionary that maps\n # remote function IDs for that driver to a counter of the number of\n # times that remote function has been executed on this worker. The\n # counter is incremented every time the function is executed on this\n # worker. When the counter reaches the maximum number of executions\n # allowed for a particular function, the worker is killed.\n self.num_task_executions = collections.defaultdict(lambda: {})\n self.connected = False\n self.mode = None\n self.cached_remote_functions = []\n self.cached_functions_to_run = []\n self.fetch_and_register_actor = None\n self.make_actor = None\n self.actors = {}\n self.actor_task_counter = 0\n # Whether an actor instance has been loaded yet. The actor counts as\n # loaded once it has either executed its first task or successfully\n # resumed from a checkpoint.\n self.actor_loaded = False\n # This field is used to report actor checkpoint failure for the last\n # task assigned. Workers are not assigned a task on startup, so we\n # initialize to False.\n self.actor_checkpoint_failed = False\n # TODO(swang): This is a hack to prevent the object store from evicting\n # dummy objects. Once we allow object pinning in the store, we may\n # remove this variable.\n self.actor_pinned_objects = None\n\n def set_mode(self, mode):\n \"\"\"Set the mode of the worker.\n\n The mode SCRIPT_MODE should be used if this Worker is a driver that is\n being run as a Python script or interactively in a shell. It will print\n information about task failures.\n\n The mode WORKER_MODE should be used if this Worker is not a driver. It\n will not print information about tasks.\n\n The mode PYTHON_MODE should be used if this Worker is a driver and if\n you want to run the driver in a manner equivalent to serial Python for\n debugging purposes. It will not send remote function calls to the\n scheduler and will insead execute them in a blocking fashion.\n\n The mode SILENT_MODE should be used only during testing. It does not\n print any information about errors because some of the tests\n intentionally fail.\n\n args:\n mode: One of SCRIPT_MODE, WORKER_MODE, PYTHON_MODE, and\n SILENT_MODE.\n \"\"\"\n self.mode = mode\n\n def store_and_register(self, object_id, value, depth=100):\n \"\"\"Store an object and attempt to register its class if needed.\n\n Args:\n object_id: The ID of the object to store.\n value: The value to put in the object store.\n depth: The maximum number of classes to recursively register.\n\n Raises:\n Exception: An exception is raised if the attempt to store the\n object fails. This can happen if there is already an object\n with the same ID in the object store or if the object store is\n full.\n \"\"\"\n counter = 0\n while True:\n if counter == depth:\n raise Exception(\"Ray exceeded the maximum number of classes \"\n \"that it will recursively serialize when \"\n \"attempting to serialize an object of \"\n \"type {}.\".format(type(value)))\n counter += 1\n try:\n self.plasma_client.put(value, pyarrow.plasma.ObjectID(\n object_id.id()), self.serialization_context)\n break\n except pyarrow.SerializationCallbackError as e:\n try:\n _register_class(type(e.example_object))\n warning_message = (\"WARNING: Serializing objects of type \"\n \"{} by expanding them as dictionaries \"\n \"of their fields. This behavior may \"\n \"be incorrect in some cases.\"\n .format(type(e.example_object)))\n print(warning_message)\n except (serialization.RayNotDictionarySerializable,\n pickle.pickle.PicklingError):\n _register_class(type(e.example_object), use_pickle=True)\n warning_message = (\"WARNING: Falling back to serializing \"\n \"objects of type {} by using pickle. \"\n \"This may be inefficient.\"\n .format(type(e.example_object)))\n print(warning_message)\n\n def put_object(self, object_id, value):\n \"\"\"Put value in the local object store with object id objectid.\n\n This assumes that the value for objectid has not yet been placed in the\n local object store.\n\n Args:\n object_id (object_id.ObjectID): The object ID of the value to be\n put.\n value: The value to put in the object store.\n\n Raises:\n Exception: An exception is raised if the attempt to store the\n object fails. This can happen if there is already an object\n with the same ID in the object store or if the object store is\n full.\n \"\"\"\n # Make sure that the value is not an object ID.\n if isinstance(value, ray.local_scheduler.ObjectID):\n raise Exception(\"Calling 'put' on an ObjectID is not allowed \"\n \"(similarly, returning an ObjectID from a remote \"\n \"function is not allowed). If you really want to \"\n \"do this, you can wrap the ObjectID in a list and \"\n \"call 'put' on it (or return it).\")\n\n # Serialize and put the object in the object store.\n try:\n self.store_and_register(object_id, value)\n except pyarrow.PlasmaObjectExists as e:\n # The object already exists in the object store, so there is no\n # need to add it again. TODO(rkn): We need to compare the hashes\n # and make sure that the objects are in fact the same. We also\n # should return an error code to the caller instead of printing a\n # message.\n print(\"This object already exists in the object store.\")\n\n def retrieve_and_deserialize(self, object_ids, timeout, error_timeout=10):\n start_time = time.time()\n # Only send the warning once.\n warning_sent = False\n while True:\n try:\n # We divide very large get requests into smaller get requests\n # so that a single get request doesn't block the store for a\n # long time, if the store is blocked, it can block the manager\n # as well as a consequence.\n results = []\n get_request_size = 10000\n for i in range(0, len(object_ids), get_request_size):\n results += self.plasma_client.get(\n object_ids[i:(i + get_request_size)],\n timeout,\n self.serialization_context)\n return results\n except pyarrow.DeserializationCallbackError as e:\n # Wait a little bit for the import thread to import the class.\n # If we currently have the worker lock, we need to release it\n # so that the import thread can acquire it.\n if self.mode == WORKER_MODE:\n self.lock.release()\n time.sleep(0.01)\n if self.mode == WORKER_MODE:\n self.lock.acquire()\n\n if time.time() - start_time > error_timeout:\n warning_message = (\"This worker or driver is waiting to \"\n \"receive a class definition so that it \"\n \"can deserialize an object from the \"\n \"object store. This may be fine, or it \"\n \"may be a bug.\")\n if not warning_sent:\n self.push_error_to_driver(self.task_driver_id.id(),\n \"wait_for_class\",\n warning_message)\n warning_sent = True\n\n def get_object(self, object_ids):\n \"\"\"Get the value or values in the object store associated with the IDs.\n\n Return the values from the local object store for object_ids. This will\n block until all the values for object_ids have been written to the\n local object store.\n\n Args:\n object_ids (List[object_id.ObjectID]): A list of the object IDs\n whose values should be retrieved.\n \"\"\"\n # Make sure that the values are object IDs.\n for object_id in object_ids:\n if not isinstance(object_id, ray.local_scheduler.ObjectID):\n raise Exception(\"Attempting to call `get` on the value {}, \"\n \"which is not an ObjectID.\".format(object_id))\n # Do an initial fetch for remote objects. We divide the fetch into\n # smaller fetches so as to not block the manager for a prolonged period\n # of time in a single call.\n fetch_request_size = 10000\n plain_object_ids = [plasma.ObjectID(object_id.id())\n for object_id in object_ids]\n for i in range(0, len(object_ids), fetch_request_size):\n self.plasma_client.fetch(\n plain_object_ids[i:(i + fetch_request_size)])\n\n # Get the objects. We initially try to get the objects immediately.\n final_results = self.retrieve_and_deserialize(plain_object_ids, 0)\n # Construct a dictionary mapping object IDs that we haven't gotten yet\n # to their original index in the object_ids argument.\n unready_ids = dict((plain_object_ids[i].binary(), i) for (i, val) in\n enumerate(final_results)\n if val is plasma.ObjectNotAvailable)\n was_blocked = (len(unready_ids) > 0)\n # Try reconstructing any objects we haven't gotten yet. Try to get them\n # until at least GET_TIMEOUT_MILLISECONDS milliseconds passes, then\n # repeat.\n while len(unready_ids) > 0:\n for unready_id in unready_ids:\n self.local_scheduler_client.reconstruct_object(unready_id)\n # Do another fetch for objects that aren't available locally yet,\n # in case they were evicted since the last fetch. We divide the\n # fetch into smaller fetches so as to not block the manager for a\n # prolonged period of time in a single call.\n object_ids_to_fetch = list(map(\n plasma.ObjectID, unready_ids.keys()))\n for i in range(0, len(object_ids_to_fetch), fetch_request_size):\n self.plasma_client.fetch(\n object_ids_to_fetch[i:(i + fetch_request_size)])\n results = self.retrieve_and_deserialize(\n object_ids_to_fetch,\n max([GET_TIMEOUT_MILLISECONDS, int(0.01 * len(unready_ids))]))\n # Remove any entries for objects we received during this iteration\n # so we don't retrieve the same object twice.\n for i, val in enumerate(results):\n if val is not plasma.ObjectNotAvailable:\n object_id = object_ids_to_fetch[i].binary()\n index = unready_ids[object_id]\n final_results[index] = val\n unready_ids.pop(object_id)\n\n # If there were objects that we weren't able to get locally, let the\n # local scheduler know that we're now unblocked.\n if was_blocked:\n self.local_scheduler_client.notify_unblocked()\n\n assert len(final_results) == len(object_ids)\n return final_results\n\n def submit_task(self, function_id, args, actor_id=None, actor_counter=0,\n is_actor_checkpoint_method=False):\n \"\"\"Submit a remote task to the scheduler.\n\n Tell the scheduler to schedule the execution of the function with ID\n function_id with arguments args. Retrieve object IDs for the outputs of\n the function from the scheduler and immediately return them.\n\n Args:\n function_id: The ID of the function to execute.\n args: The arguments to pass into the function. Arguments can be\n object IDs or they can be values. If they are values, they must\n be serializable objecs.\n actor_id: The ID of the actor that this task is for.\n actor_counter: The counter of the actor task.\n is_actor_checkpoint_method: True if this is an actor checkpoint\n task and false otherwise.\n \"\"\"\n with log_span(\"ray:submit_task\", worker=self):\n check_main_thread()\n actor_id = (ray.local_scheduler.ObjectID(NIL_ACTOR_ID)\n if actor_id is None else actor_id)\n # Put large or complex arguments that are passed by value in the\n # object store first.\n args_for_local_scheduler = []\n for arg in args:\n if isinstance(arg, ray.local_scheduler.ObjectID):\n args_for_local_scheduler.append(arg)\n elif ray.local_scheduler.check_simple_value(arg):\n args_for_local_scheduler.append(arg)\n else:\n args_for_local_scheduler.append(put(arg))\n\n # Look up the various function properties.\n function_properties = self.function_properties[\n self.task_driver_id.id()][function_id.id()]\n\n # Submit the task to local scheduler.\n task = ray.local_scheduler.Task(\n self.task_driver_id,\n ray.local_scheduler.ObjectID(function_id.id()),\n args_for_local_scheduler,\n function_properties.num_return_vals,\n self.current_task_id,\n self.task_index,\n actor_id,\n actor_counter,\n is_actor_checkpoint_method,\n [function_properties.num_cpus, function_properties.num_gpus,\n function_properties.num_custom_resource])\n # Increment the worker's task index to track how many tasks have\n # been submitted by the current task so far.\n self.task_index += 1\n self.local_scheduler_client.submit(task)\n\n return task.returns()\n\n def run_function_on_all_workers(self, function):\n \"\"\"Run arbitrary code on all of the workers.\n\n This function will first be run on the driver, and then it will be\n exported to all of the workers to be run. It will also be run on any\n new workers that register later. If ray.init has not been called yet,\n then cache the function and export it later.\n\n Args:\n function (Callable): The function to run on all of the workers. It\n should not take any arguments. If it returns anything, its\n return values will not be used.\n \"\"\"\n check_main_thread()\n # If ray.init has not been called yet, then cache the function and\n # export it when connect is called. Otherwise, run the function on all\n # workers.\n if self.mode is None:\n self.cached_functions_to_run.append(function)\n else:\n # Attempt to pickle the function before we need it. This could\n # fail, and it is more convenient if the failure happens before we\n # actually run the function locally.\n pickled_function = pickle.dumps(function)\n\n function_to_run_id = hashlib.sha1(pickled_function).digest()\n key = b\"FunctionsToRun:\" + function_to_run_id\n # First run the function on the driver. Pass in the number of\n # workers on this node that have already started executing this\n # remote function, and increment that value. Subtract 1 so that the\n # counter starts at 0.\n counter = self.redis_client.hincrby(self.node_ip_address,\n key, 1) - 1\n # We always run the task locally.\n function({\"counter\": counter, \"worker\": self})\n # Check if the function has already been put into redis.\n function_exported = self.redis_client.setnx(b\"Lock:\" + key, 1)\n if not function_exported:\n # In this case, the function has already been exported, so\n # we don't need to export it again.\n return\n # Run the function on all workers.\n self.redis_client.hmset(key,\n {\"driver_id\": self.task_driver_id.id(),\n \"function_id\": function_to_run_id,\n \"function\": pickled_function})\n self.redis_client.rpush(\"Exports\", key)\n # TODO(rkn): If the worker fails after it calls setnx and before it\n # successfully completes the hmset and rpush, then the program will\n # most likely hang. This could be fixed by making these three\n # operations into a transaction (or by implementing a custom\n # command that does all three things).\n\n def push_error_to_driver(self, driver_id, error_type, message, data=None):\n \"\"\"Push an error message to the driver to be printed in the background.\n\n Args:\n driver_id: The ID of the driver to push the error message to.\n error_type (str): The type of the error.\n message (str): The message that will be printed in the background\n on the driver.\n data: This should be a dictionary mapping strings to strings. It\n will be serialized with json and stored in Redis.\n \"\"\"\n error_key = ERROR_KEY_PREFIX + driver_id + b\":\" + random_string()\n data = {} if data is None else data\n self.redis_client.hmset(error_key, {\"type\": error_type,\n \"message\": message,\n \"data\": data})\n self.redis_client.rpush(\"ErrorKeys\", error_key)\n\n def _wait_for_function(self, function_id, driver_id, timeout=10):\n \"\"\"Wait until the function to be executed is present on this worker.\n\n This method will simply loop until the import thread has imported the\n relevant function. If we spend too long in this loop, that may indicate\n a problem somewhere and we will push an error message to the user.\n\n If this worker is an actor, then this will wait until the actor has\n been defined.\n\n Args:\n is_actor (bool): True if this worker is an actor, and false\n otherwise.\n function_id (str): The ID of the function that we want to execute.\n driver_id (str): The ID of the driver to push the error message to\n if this times out.\n \"\"\"\n start_time = time.time()\n # Only send the warning once.\n warning_sent = False\n while True:\n with self.lock:\n if (self.actor_id == NIL_ACTOR_ID and\n (function_id.id() in self.functions[driver_id])):\n break\n elif self.actor_id != NIL_ACTOR_ID and (self.actor_id in\n self.actors):\n break\n if time.time() - start_time > timeout:\n warning_message = (\"This worker was asked to execute a \"\n \"function that it does not have \"\n \"registered. You may have to restart \"\n \"Ray.\")\n if not warning_sent:\n self.push_error_to_driver(driver_id,\n \"wait_for_function\",\n warning_message)\n warning_sent = True\n time.sleep(0.001)\n\n def _get_arguments_for_execution(self, function_name, serialized_args):\n \"\"\"Retrieve the arguments for the remote function.\n\n This retrieves the values for the arguments to the remote function that\n were passed in as object IDs. Argumens that were passed by value are\n not changed. This is called by the worker that is executing the remote\n function.\n\n Args:\n function_name (str): The name of the remote function whose\n arguments are being retrieved.\n serialized_args (List): The arguments to the function. These are\n either strings representing serialized objects passed by value\n or they are ObjectIDs.\n\n Returns:\n The retrieved arguments in addition to the arguments that were\n passed by value.\n\n Raises:\n RayGetArgumentError: This exception is raised if a task that\n created one of the arguments failed.\n \"\"\"\n arguments = []\n for (i, arg) in enumerate(serialized_args):\n if isinstance(arg, ray.local_scheduler.ObjectID):\n # get the object from the local object store\n argument = self.get_object([arg])[0]\n if isinstance(argument, RayTaskError):\n # If the result is a RayTaskError, then the task that\n # created this object failed, and we should propagate the\n # error message here.\n raise RayGetArgumentError(function_name, i, arg, argument)\n else:\n # pass the argument by value\n argument = arg\n\n arguments.append(argument)\n return arguments\n\n def _store_outputs_in_objstore(self, objectids, outputs):\n \"\"\"Store the outputs of a remote function in the local object store.\n\n This stores the values that were returned by a remote function in the\n local object store. If any of the return values are object IDs, then\n these object IDs are aliased with the object IDs that the scheduler\n assigned for the return values. This is called by the worker that\n executes the remote function.\n\n Note:\n The arguments objectids and outputs should have the same length.\n\n Args:\n objectids (List[ObjectID]): The object IDs that were assigned to\n the outputs of the remote function call.\n outputs (Tuple): The value returned by the remote function. If the\n remote function was supposed to only return one value, then its\n output was wrapped in a tuple with one element prior to being\n passed into this function.\n \"\"\"\n for i in range(len(objectids)):\n self.put_object(objectids[i], outputs[i])\n\n def _process_task(self, task):\n \"\"\"Execute a task assigned to this worker.\n\n This method deserializes a task from the scheduler, and attempts to\n execute the task. If the task succeeds, the outputs are stored in the\n local object store. If the task throws an exception, RayTaskError\n objects are stored in the object store to represent the failed task\n (these will be retrieved by calls to get or by subsequent tasks that\n use the outputs of this task).\n \"\"\"\n # The ID of the driver that this task belongs to. This is needed so\n # that if the task throws an exception, we propagate the error\n # message to the correct driver.\n self.task_driver_id = task.driver_id()\n self.current_task_id = task.task_id()\n self.current_function_id = task.function_id().id()\n self.task_index = 0\n self.put_index = 0\n function_id = task.function_id()\n args = task.arguments()\n return_object_ids = task.returns()\n if task.actor_id().id() != NIL_ACTOR_ID:\n dummy_return_id = return_object_ids.pop()\n function_name, function_executor = (self.functions\n [self.task_driver_id.id()]\n [function_id.id()])\n\n # Get task arguments from the object store.\n try:\n with log_span(\"ray:task:get_arguments\", worker=self):\n arguments = self._get_arguments_for_execution(function_name,\n args)\n except (RayGetError, RayGetArgumentError) as e:\n self._handle_process_task_failure(function_id, return_object_ids,\n e, None)\n return\n except Exception as e:\n self._handle_process_task_failure(\n function_id, return_object_ids, e,\n format_error_message(traceback.format_exc()))\n return\n\n # Execute the task.\n try:\n with log_span(\"ray:task:execute\", worker=self):\n if task.actor_id().id() == NIL_ACTOR_ID:\n outputs = function_executor.executor(arguments)\n else:\n outputs = function_executor(\n dummy_return_id, task.actor_counter(),\n self.actors[task.actor_id().id()],\n *arguments)\n except Exception as e:\n # Determine whether the exception occured during a task, not an\n # actor method.\n task_exception = task.actor_id().id() == NIL_ACTOR_ID\n traceback_str = format_error_message(traceback.format_exc(),\n task_exception=task_exception)\n self._handle_process_task_failure(function_id, return_object_ids,\n e, traceback_str)\n return\n\n # Store the outputs in the local object store.\n try:\n with log_span(\"ray:task:store_outputs\", worker=self):\n # If this is an actor task, then the last object ID returned by\n # the task is a dummy output, not returned by the function\n # itself. Decrement to get the correct number of return values.\n num_returns = len(return_object_ids)\n if num_returns == 1:\n outputs = (outputs,)\n self._store_outputs_in_objstore(return_object_ids, outputs)\n except Exception as e:\n self._handle_process_task_failure(\n function_id, return_object_ids, e,\n format_error_message(traceback.format_exc()))\n\n def _handle_process_task_failure(self, function_id, return_object_ids,\n error, backtrace):\n function_name, _ = self.functions[\n self.task_driver_id.id()][function_id.id()]\n failure_object = RayTaskError(function_name, error, backtrace)\n failure_objects = [failure_object for _ in\n range(len(return_object_ids))]\n self._store_outputs_in_objstore(return_object_ids, failure_objects)\n # Log the error message.\n self.push_error_to_driver(self.task_driver_id.id(), \"task\",\n str(failure_object),\n data={\"function_id\": function_id.id(),\n \"function_name\": function_name})\n\n def _wait_for_and_process_task(self, task):\n \"\"\"Wait for a task to be ready and process the task.\n\n Args:\n task: The task to execute.\n \"\"\"\n function_id = task.function_id()\n # Wait until the function to be executed has actually been registered\n # on this worker. We will push warnings to the user if we spend too\n # long in this loop.\n with log_span(\"ray:wait_for_function\", worker=self):\n self._wait_for_function(function_id, task.driver_id().id())\n\n # Execute the task.\n # TODO(rkn): Consider acquiring this lock with a timeout and pushing a\n # warning to the user if we are waiting too long to acquire the lock\n # because that may indicate that the system is hanging, and it'd be\n # good to know where the system is hanging.\n log(event_type=\"ray:acquire_lock\", kind=LOG_SPAN_START, worker=self)\n with self.lock:\n log(event_type=\"ray:acquire_lock\", kind=LOG_SPAN_END,\n worker=self)\n\n function_name, _ = (self.functions[task.driver_id().id()]\n [function_id.id()])\n contents = {\"function_name\": function_name,\n \"task_id\": task.task_id().hex(),\n \"worker_id\": binary_to_hex(self.worker_id)}\n with log_span(\"ray:task\", contents=contents, worker=self):\n self._process_task(task)\n\n # Push all of the log events to the global state store.\n flush_log()\n\n # Increase the task execution counter.\n (self.num_task_executions[task.driver_id().id()]\n [function_id.id()]) += 1\n\n reached_max_executions = (\n self.num_task_executions[task.driver_id().id()]\n [function_id.id()] ==\n self.function_properties[task.driver_id().id()]\n [function_id.id()].max_calls)\n if reached_max_executions:\n ray.worker.global_worker.local_scheduler_client.disconnect()\n os._exit(0)\n\n def _get_next_task_from_local_scheduler(self):\n \"\"\"Get the next task from the local scheduler.\n\n Returns:\n A task from the local scheduler.\n \"\"\"\n with log_span(\"ray:get_task\", worker=self):\n task = self.local_scheduler_client.get_task(\n self.actor_checkpoint_failed)\n # We assume that the task is not a checkpoint, or that if it is,\n # that the task will succeed. The checkpoint task executor is\n # responsible for reporting task failure to the local scheduler.\n self.actor_checkpoint_failed = False\n\n # Automatically restrict the GPUs available to this task.\n os.environ[\"CUDA_VISIBLE_DEVICES\"] = \",\".join(\n [str(i) for i in ray.get_gpu_ids()])\n\n return task\n\n def main_loop(self):\n \"\"\"The main loop a worker runs to receive and execute tasks.\"\"\"\n\n def exit(signum, frame):\n cleanup(worker=self)\n sys.exit(0)\n\n signal.signal(signal.SIGTERM, exit)\n\n check_main_thread()\n while True:\n task = self._get_next_task_from_local_scheduler()\n self._wait_for_and_process_task(task)\n\n\ndef get_gpu_ids():\n \"\"\"Get the IDs of the GPU that are available to the worker.\n\n Each ID is an integer in the range [0, NUM_GPUS - 1], where NUM_GPUS is the\n number of GPUs that the node has.\n \"\"\"\n return global_worker.local_scheduler_client.gpu_ids()\n\n\ndef _webui_url_helper(client):\n \"\"\"Parsing for getting the url of the web UI.\n\n Args:\n client: A redis client to use to query the primary Redis shard.\n\n Returns:\n The URL of the web UI as a string.\n \"\"\"\n result = client.hmget(\"webui\", \"url\")[0]\n return result.decode(\"ascii\") if result is not None else result\n\n\ndef get_webui_url():\n \"\"\"Get the URL to access the web UI.\n\n Note that the URL does not specify which node the web UI is on.\n\n Returns:\n The URL of the web UI as a string.\n \"\"\"\n return _webui_url_helper(global_worker.redis_client)\n\n\nglobal_worker = Worker()\n\"\"\"Worker: The global Worker object for this worker process.\n\nWe use a global Worker object to ensure that there is a single worker object\nper worker process.\n\"\"\"\n\nglobal_state = state.GlobalState()\n\n\nclass RayConnectionError(Exception):\n pass\n\n\ndef check_main_thread():\n \"\"\"Check that we are currently on the main thread.\n\n Raises:\n Exception: An exception is raised if this is called on a thread other\n than the main thread.\n \"\"\"\n if threading.current_thread().getName() != \"MainThread\":\n raise Exception(\"The Ray methods are not thread safe and must be \"\n \"called from the main thread. This method was called \"\n \"from thread {}.\"\n .format(threading.current_thread().getName()))\n\n\ndef check_connected(worker=global_worker):\n \"\"\"Check if the worker is connected.\n\n Raises:\n Exception: An exception is raised if the worker is not connected.\n \"\"\"\n if not worker.connected:\n raise RayConnectionError(\"This command cannot be called before Ray \"\n \"has been started. You can start Ray with \"\n \"'ray.init()'.\")\n\n\ndef print_failed_task(task_status):\n \"\"\"Print information about failed tasks.\n\n Args:\n task_status (Dict): A dictionary containing the name, operationid, and\n error message for a failed task.\n \"\"\"\n print(\"\"\"\n Error: Task failed\n Function Name: {}\n Task ID: {}\n Error Message: \\n{}\n \"\"\".format(task_status[\"function_name\"], task_status[\"operationid\"],\n task_status[\"error_message\"]))\n\n\ndef error_applies_to_driver(error_key, worker=global_worker):\n \"\"\"Return True if the error is for this driver and false otherwise.\"\"\"\n # TODO(rkn): Should probably check that this is only called on a driver.\n # Check that the error key is formatted as in push_error_to_driver.\n assert len(error_key) == (len(ERROR_KEY_PREFIX) + DRIVER_ID_LENGTH + 1 +\n ERROR_ID_LENGTH), error_key\n # If the driver ID in the error message is a sequence of all zeros, then\n # the message is intended for all drivers.\n generic_driver_id = DRIVER_ID_LENGTH * b\"\\x00\"\n driver_id = error_key[len(ERROR_KEY_PREFIX):(len(ERROR_KEY_PREFIX) +\n DRIVER_ID_LENGTH)]\n return (driver_id == worker.task_driver_id.id() or\n driver_id == generic_driver_id)\n\n\ndef error_info(worker=global_worker):\n \"\"\"Return information about failed tasks.\"\"\"\n check_connected(worker)\n check_main_thread()\n error_keys = worker.redis_client.lrange(\"ErrorKeys\", 0, -1)\n errors = []\n for error_key in error_keys:\n if error_applies_to_driver(error_key, worker=worker):\n error_contents = worker.redis_client.hgetall(error_key)\n # If the error is an object hash mismatch, look up the function\n # name for the nondeterministic task. TODO(rkn): Change this so\n # that we don't have to look up additional information. Ideally all\n # relevant information would already be in error_contents.\n error_type = error_contents[b\"type\"]\n if error_type in [OBJECT_HASH_MISMATCH_ERROR_TYPE,\n PUT_RECONSTRUCTION_ERROR_TYPE]:\n function_id = error_contents[b\"data\"]\n if function_id == NIL_FUNCTION_ID:\n function_name = b\"Driver\"\n else:\n task_driver_id = worker.task_driver_id\n function_name = worker.redis_client.hget(\n (b\"RemoteFunction:\" + task_driver_id.id() +\n b\":\" + function_id),\n \"name\")\n error_contents[b\"data\"] = function_name\n errors.append(error_contents)\n\n return errors\n\n\ndef _initialize_serialization(worker=global_worker):\n \"\"\"Initialize the serialization library.\n\n This defines a custom serializer for object IDs and also tells ray to\n serialize several exception classes that we define for error handling.\n \"\"\"\n worker.serialization_context = pyarrow.SerializationContext()\n\n # Define a custom serializer and deserializer for handling Object IDs.\n def objectid_custom_serializer(obj):\n return obj.id()\n\n def objectid_custom_deserializer(serialized_obj):\n return ray.local_scheduler.ObjectID(serialized_obj)\n\n worker.serialization_context.register_type(\n ray.local_scheduler.ObjectID, 20 * b\"\\x00\", pickle=False,\n custom_serializer=objectid_custom_serializer,\n custom_deserializer=objectid_custom_deserializer)\n\n # Define a custom serializer and deserializer for handling numpy arrays\n # that contain objects.\n def array_custom_serializer(obj):\n return obj.tolist(), obj.dtype.str\n\n def array_custom_deserializer(serialized_obj):\n return np.array(serialized_obj[0], dtype=np.dtype(serialized_obj[1]))\n\n worker.serialization_context.register_type(\n np.ndarray, 20 * b\"\\x01\", pickle=False,\n custom_serializer=array_custom_serializer,\n custom_deserializer=array_custom_deserializer)\n\n def ordered_dict_custom_serializer(obj):\n return list(obj.keys()), list(obj.values())\n\n def ordered_dict_custom_deserializer(obj):\n return collections.OrderedDict(zip(obj[0], obj[1]))\n\n worker.serialization_context.register_type(\n collections.OrderedDict, 20 * b\"\\x02\", pickle=False,\n custom_serializer=ordered_dict_custom_serializer,\n custom_deserializer=ordered_dict_custom_deserializer)\n\n def default_dict_custom_serializer(obj):\n return list(obj.keys()), list(obj.values()), obj.default_factory\n\n def default_dict_custom_deserializer(obj):\n return collections.defaultdict(obj[2], zip(obj[0], obj[1]))\n\n worker.serialization_context.register_type(\n collections.defaultdict, 20 * b\"\\x03\", pickle=False,\n custom_serializer=default_dict_custom_serializer,\n custom_deserializer=default_dict_custom_deserializer)\n\n def _serialize_pandas_series(s):\n import pandas as pd\n # TODO: serializing Series without extra copy\n serialized = pyarrow.serialize_pandas(pd.DataFrame({s.name: s}))\n return {\n 'type': 'Series',\n 'data': serialized.to_pybytes()\n }\n\n def _serialize_pandas_dataframe(df):\n return {\n 'type': 'DataFrame',\n 'data': pyarrow.serialize_pandas(df).to_pybytes()\n }\n\n def _deserialize_callback_pandas(data):\n deserialized = pyarrow.deserialize_pandas(data['data'])\n type_ = data['type']\n if type_ == 'Series':\n return deserialized[deserialized.columns[0]]\n elif type_ == 'DataFrame':\n return deserialized\n else:\n raise ValueError(type_)\n\n try:\n import pandas as pd\n worker.serialization_context.register_type(\n pd.Series, 'pandas.Series',\n custom_serializer=_serialize_pandas_series,\n custom_deserializer=_deserialize_callback_pandas)\n\n worker.serialization_context.register_type(\n pd.DataFrame, 'pandas.DataFrame',\n custom_serializer=_serialize_pandas_dataframe,\n custom_deserializer=_deserialize_callback_pandas)\n except ImportError:\n # no pandas\n pass\n\n if worker.mode in [SCRIPT_MODE, SILENT_MODE]:\n # These should only be called on the driver because _register_class\n # will export the class to all of the workers.\n _register_class(RayTaskError)\n _register_class(RayGetError)\n _register_class(RayGetArgumentError)\n # Tell Ray to serialize lambdas with pickle.\n _register_class(type(lambda: 0), use_pickle=True)\n # Tell Ray to serialize types with pickle.\n _register_class(type(int), use_pickle=True)\n\n\ndef get_address_info_from_redis_helper(redis_address, node_ip_address):\n redis_ip_address, redis_port = redis_address.split(\":\")\n # For this command to work, some other client (on the same machine as\n # Redis) must have run \"CONFIG SET protected-mode no\".\n redis_client = redis.StrictRedis(host=redis_ip_address,\n port=int(redis_port))\n # The client table prefix must be kept in sync with the file\n # \"src/common/redis_module/ray_redis_module.cc\" where it is defined.\n REDIS_CLIENT_TABLE_PREFIX = \"CL:\"\n client_keys = redis_client.keys(\"{}*\".format(REDIS_CLIENT_TABLE_PREFIX))\n # Filter to live clients on the same node and do some basic checking.\n plasma_managers = []\n local_schedulers = []\n for key in client_keys:\n info = redis_client.hgetall(key)\n\n # Ignore clients that were deleted.\n deleted = info[b\"deleted\"]\n deleted = bool(int(deleted))\n if deleted:\n continue\n\n assert b\"ray_client_id\" in info\n assert b\"node_ip_address\" in info\n assert b\"client_type\" in info\n if info[b\"node_ip_address\"].decode(\"ascii\") == node_ip_address:\n if info[b\"client_type\"].decode(\"ascii\") == \"plasma_manager\":\n plasma_managers.append(info)\n elif info[b\"client_type\"].decode(\"ascii\") == \"local_scheduler\":\n local_schedulers.append(info)\n # Make sure that we got at least one plasma manager and local scheduler.\n assert len(plasma_managers) >= 1\n assert len(local_schedulers) >= 1\n # Build the address information.\n object_store_addresses = []\n for manager in plasma_managers:\n address = manager[b\"address\"].decode(\"ascii\")\n port = services.get_port(address)\n object_store_addresses.append(\n services.ObjectStoreAddress(\n name=manager[b\"store_socket_name\"].decode(\"ascii\"),\n manager_name=manager[b\"manager_socket_name\"].decode(\"ascii\"),\n manager_port=port))\n scheduler_names = [\n scheduler[b\"local_scheduler_socket_name\"].decode(\"ascii\")\n for scheduler in local_schedulers]\n client_info = {\"node_ip_address\": node_ip_address,\n \"redis_address\": redis_address,\n \"object_store_addresses\": object_store_addresses,\n \"local_scheduler_socket_names\": scheduler_names,\n # Web UI should be running.\n \"webui_url\": _webui_url_helper(redis_client)}\n return client_info\n\n\ndef get_address_info_from_redis(redis_address, node_ip_address, num_retries=5):\n counter = 0\n while True:\n try:\n return get_address_info_from_redis_helper(redis_address,\n node_ip_address)\n except Exception as e:\n if counter == num_retries:\n raise\n # Some of the information may not be in Redis yet, so wait a little\n # bit.\n print(\"Some processes that the driver needs to connect to have \"\n \"not registered with Redis, so retrying. Have you run \"\n \"'ray start' on this node?\")\n time.sleep(1)\n counter += 1\n\n\ndef _init(address_info=None,\n start_ray_local=False,\n object_id_seed=None,\n num_workers=None,\n num_local_schedulers=None,\n object_store_memory=None,\n driver_mode=SCRIPT_MODE,\n redirect_output=False,\n start_workers_from_local_scheduler=True,\n num_cpus=None,\n num_gpus=None,\n num_custom_resource=None,\n num_redis_shards=None,\n plasma_directory=None,\n huge_pages=False):\n \"\"\"Helper method to connect to an existing Ray cluster or start a new one.\n\n This method handles two cases. Either a Ray cluster already exists and we\n just attach this driver to it, or we start all of the processes associated\n with a Ray cluster and attach to the newly started cluster.\n\n Args:\n address_info (dict): A dictionary with address information for\n processes in a partially-started Ray cluster. If\n start_ray_local=True, any processes not in this dictionary will be\n started. If provided, an updated address_info dictionary will be\n returned to include processes that are newly started.\n start_ray_local (bool): If True then this will start any processes not\n already in address_info, including Redis, a global scheduler, local\n scheduler(s), object store(s), and worker(s). It will also kill\n these processes when Python exits. If False, this will attach to an\n existing Ray cluster.\n object_id_seed (int): Used to seed the deterministic generation of\n object IDs. The same value can be used across multiple runs of the\n same job in order to generate the object IDs in a consistent\n manner. However, the same ID should not be used for different jobs.\n num_workers (int): The number of workers to start. This is only\n provided if start_ray_local is True.\n num_local_schedulers (int): The number of local schedulers to start.\n This is only provided if start_ray_local is True.\n object_store_memory: The amount of memory (in bytes) to start the\n object store with.\n driver_mode (bool): The mode in which to start the driver. This should\n be one of ray.SCRIPT_MODE, ray.PYTHON_MODE, and ray.SILENT_MODE.\n redirect_output (bool): True if stdout and stderr for all the processes\n should be redirected to files and false otherwise.\n start_workers_from_local_scheduler (bool): If this flag is True, then\n start the initial workers from the local scheduler. Else, start\n them from Python. The latter case is for debugging purposes only.\n num_cpus: A list containing the number of CPUs the local schedulers\n should be configured with.\n num_gpus: A list containing the number of GPUs the local schedulers\n should be configured with.\n num_custom_resource: A list containing the quantity of a user-defined\n custom resource that the local schedulers should be configured\n with.\n num_redis_shards: The number of Redis shards to start in addition to\n the primary Redis shard.\n plasma_directory: A directory where the Plasma memory mapped files will\n be created.\n huge_pages: Boolean flag indicating whether to start the Object\n Store with hugetlbfs support. Requires plasma_directory.\n\n Returns:\n Address information about the started processes.\n\n Raises:\n Exception: An exception is raised if an inappropriate combination of\n arguments is passed in.\n \"\"\"\n check_main_thread()\n if driver_mode not in [SCRIPT_MODE, PYTHON_MODE, SILENT_MODE]:\n raise Exception(\"Driver_mode must be in [ray.SCRIPT_MODE, \"\n \"ray.PYTHON_MODE, ray.SILENT_MODE].\")\n\n # Get addresses of existing services.\n if address_info is None:\n address_info = {}\n else:\n assert isinstance(address_info, dict)\n node_ip_address = address_info.get(\"node_ip_address\")\n redis_address = address_info.get(\"redis_address\")\n\n # Start any services that do not yet exist.\n if driver_mode == PYTHON_MODE:\n # If starting Ray in PYTHON_MODE, don't start any other processes.\n pass\n elif start_ray_local:\n # In this case, we launch a scheduler, a new object store, and some\n # workers, and we connect to them. We do not launch any processes that\n # are already registered in address_info.\n # Use the address 127.0.0.1 in local mode.\n node_ip_address = (\"127.0.0.1\" if node_ip_address is None\n else node_ip_address)\n # Use 1 local scheduler if num_local_schedulers is not provided. If\n # existing local schedulers are provided, use that count as\n # num_local_schedulers.\n local_schedulers = address_info.get(\"local_scheduler_socket_names\", [])\n if num_local_schedulers is None:\n if len(local_schedulers) > 0:\n num_local_schedulers = len(local_schedulers)\n else:\n num_local_schedulers = 1\n # Use 1 additional redis shard if num_redis_shards is not provided.\n num_redis_shards = 1 if num_redis_shards is None else num_redis_shards\n # Start the scheduler, object store, and some workers. These will be\n # killed by the call to cleanup(), which happens when the Python script\n # exits.\n address_info = services.start_ray_head(\n address_info=address_info,\n node_ip_address=node_ip_address,\n num_workers=num_workers,\n num_local_schedulers=num_local_schedulers,\n object_store_memory=object_store_memory,\n redirect_output=redirect_output,\n start_workers_from_local_scheduler=(\n start_workers_from_local_scheduler),\n num_cpus=num_cpus,\n num_gpus=num_gpus,\n num_custom_resource=num_custom_resource,\n num_redis_shards=num_redis_shards,\n plasma_directory=plasma_directory,\n huge_pages=huge_pages)\n else:\n if redis_address is None:\n raise Exception(\"When connecting to an existing cluster, \"\n \"redis_address must be provided.\")\n if num_workers is not None:\n raise Exception(\"When connecting to an existing cluster, \"\n \"num_workers must not be provided.\")\n if num_local_schedulers is not None:\n raise Exception(\"When connecting to an existing cluster, \"\n \"num_local_schedulers must not be provided.\")\n if (num_cpus is not None or num_gpus is not None or\n num_custom_resource is not None):\n raise Exception(\"When connecting to an existing cluster, resource \"\n \"labels (e.g., num_gpus, num_cpus, \"\n \"num_custom_resource) must not be provided.\")\n if num_redis_shards is not None:\n raise Exception(\"When connecting to an existing cluster, \"\n \"num_redis_shards must not be provided.\")\n if object_store_memory is not None:\n raise Exception(\"When connecting to an existing cluster, \"\n \"object_store_memory must not be provided.\")\n if plasma_directory is not None:\n raise Exception(\"When connecting to an existing cluster, \"\n \"plasma_directory must not be provided.\")\n if huge_pages:\n raise Exception(\"When connecting to an existing cluster, \"\n \"huge_pages must not be provided.\")\n # Get the node IP address if one is not provided.\n if node_ip_address is None:\n node_ip_address = services.get_node_ip_address(redis_address)\n # Get the address info of the processes to connect to from Redis.\n address_info = get_address_info_from_redis(redis_address,\n node_ip_address)\n\n # Connect this driver to Redis, the object store, and the local scheduler.\n # Choose the first object store and local scheduler if there are multiple.\n # The corresponding call to disconnect will happen in the call to cleanup()\n # when the Python script exits.\n if driver_mode == PYTHON_MODE:\n driver_address_info = {}\n else:\n driver_address_info = {\n \"node_ip_address\": node_ip_address,\n \"redis_address\": address_info[\"redis_address\"],\n \"store_socket_name\": (\n address_info[\"object_store_addresses\"][0].name),\n \"manager_socket_name\": (\n address_info[\"object_store_addresses\"][0].manager_name),\n \"local_scheduler_socket_name\": (\n address_info[\"local_scheduler_socket_names\"][0]),\n \"webui_url\": address_info[\"webui_url\"]}\n connect(driver_address_info, object_id_seed=object_id_seed,\n mode=driver_mode, worker=global_worker, actor_id=NIL_ACTOR_ID)\n return address_info\n\n\ndef init(redis_address=None, node_ip_address=None, object_id_seed=None,\n num_workers=None, driver_mode=SCRIPT_MODE, redirect_output=False,\n num_cpus=None, num_gpus=None, num_custom_resource=None,\n num_redis_shards=None,\n plasma_directory=None, huge_pages=False):\n \"\"\"Connect to an existing Ray cluster or start one and connect to it.\n\n This method handles two cases. Either a Ray cluster already exists and we\n just attach this driver to it, or we start all of the processes associated\n with a Ray cluster and attach to the newly started cluster.\n\n Args:\n node_ip_address (str): The IP address of the node that we are on.\n redis_address (str): The address of the Redis server to connect to. If\n this address is not provided, then this command will start Redis, a\n global scheduler, a local scheduler, a plasma store, a plasma\n manager, and some workers. It will also kill these processes when\n Python exits.\n object_id_seed (int): Used to seed the deterministic generation of\n object IDs. The same value can be used across multiple runs of the\n same job in order to generate the object IDs in a consistent\n manner. However, the same ID should not be used for different jobs.\n num_workers (int): The number of workers to start. This is only\n provided if redis_address is not provided.\n driver_mode (bool): The mode in which to start the driver. This should\n be one of ray.SCRIPT_MODE, ray.PYTHON_MODE, and ray.SILENT_MODE.\n redirect_output (bool): True if stdout and stderr for all the processes\n should be redirected to files and false otherwise.\n num_cpus (int): Number of cpus the user wishes all local schedulers to\n be configured with.\n num_gpus (int): Number of gpus the user wishes all local schedulers to\n be configured with.\n num_custom_resource (int): The quantity of a user-defined custom\n resource that the local scheduler should be configured with. This\n flag is experimental and is subject to changes in the future.\n num_redis_shards: The number of Redis shards to start in addition to\n the primary Redis shard.\n plasma_directory: A directory where the Plasma memory mapped files will\n be created.\n huge_pages: Boolean flag indicating whether to start the Object\n Store with hugetlbfs support. Requires plasma_directory.\n\n Returns:\n Address information about the started processes.\n\n Raises:\n Exception: An exception is raised if an inappropriate combination of\n arguments is passed in.\n \"\"\"\n info = {\"node_ip_address\": node_ip_address,\n \"redis_address\": redis_address}\n return _init(address_info=info, start_ray_local=(redis_address is None),\n num_workers=num_workers, driver_mode=driver_mode,\n redirect_output=redirect_output, num_cpus=num_cpus,\n num_gpus=num_gpus, num_custom_resource=num_custom_resource,\n num_redis_shards=num_redis_shards,\n plasma_directory=plasma_directory,\n huge_pages=huge_pages)\n\n\ndef cleanup(worker=global_worker):\n \"\"\"Disconnect the worker, and terminate any processes started in init.\n\n This will automatically run at the end when a Python process that uses Ray\n exits. It is ok to run this twice in a row. Note that we manually call\n services.cleanup() in the tests because we need to start and stop many\n clusters in the tests, but the import and exit only happen once.\n \"\"\"\n disconnect(worker)\n if hasattr(worker, \"local_scheduler_client\"):\n del worker.local_scheduler_client\n if hasattr(worker, \"plasma_client\"):\n worker.plasma_client.disconnect()\n\n if worker.mode in [SCRIPT_MODE, SILENT_MODE]:\n # If this is a driver, push the finish time to Redis and clean up any\n # other services that were started with the driver.\n worker.redis_client.hmset(b\"Drivers:\" + worker.worker_id,\n {\"end_time\": time.time()})\n services.cleanup()\n else:\n # If this is not a driver, make sure there are no orphan processes,\n # besides possibly the worker itself.\n for process_type, processes in services.all_processes.items():\n if process_type == services.PROCESS_TYPE_WORKER:\n assert(len(processes)) <= 1\n else:\n assert(len(processes) == 0)\n\n worker.set_mode(None)\n\n\natexit.register(cleanup)\n\n# Define a custom excepthook so that if the driver exits with an exception, we\n# can push that exception to Redis.\nnormal_excepthook = sys.excepthook\n\n\ndef custom_excepthook(type, value, tb):\n # If this is a driver, push the exception to redis.\n if global_worker.mode in [SCRIPT_MODE, SILENT_MODE]:\n error_message = \"\".join(traceback.format_tb(tb))\n global_worker.redis_client.hmset(b\"Drivers:\" + global_worker.worker_id,\n {\"exception\": error_message})\n # Call the normal excepthook.\n normal_excepthook(type, value, tb)\n\n\nsys.excepthook = custom_excepthook\n\n\ndef print_error_messages(worker):\n \"\"\"Print error messages in the background on the driver.\n\n This runs in a separate thread on the driver and prints error messages in\n the background.\n \"\"\"\n # TODO(rkn): All error messages should have a \"component\" field indicating\n # which process the error came from (e.g., a worker or a plasma store).\n # Currently all error messages come from workers.\n\n helpful_message = \"\"\"\n You can inspect errors by running\n\n ray.error_info()\n\n If this driver is hanging, start a new one with\n\n ray.init(redis_address=\"{}\")\n \"\"\".format(worker.redis_address)\n\n worker.error_message_pubsub_client = worker.redis_client.pubsub()\n # Exports that are published after the call to\n # error_message_pubsub_client.psubscribe and before the call to\n # error_message_pubsub_client.listen will still be processed in the loop.\n worker.error_message_pubsub_client.psubscribe(\"__keyspace@0__:ErrorKeys\")\n num_errors_received = 0\n\n # Get the exports that occurred before the call to psubscribe.\n with worker.lock:\n error_keys = worker.redis_client.lrange(\"ErrorKeys\", 0, -1)\n for error_key in error_keys:\n if error_applies_to_driver(error_key, worker=worker):\n error_message = worker.redis_client.hget(\n error_key, \"message\").decode(\"ascii\")\n print(error_message)\n print(helpful_message)\n num_errors_received += 1\n\n try:\n for msg in worker.error_message_pubsub_client.listen():\n with worker.lock:\n for error_key in worker.redis_client.lrange(\n \"ErrorKeys\", num_errors_received, -1):\n if error_applies_to_driver(error_key, worker=worker):\n error_message = worker.redis_client.hget(\n error_key, \"message\").decode(\"ascii\")\n print(error_message)\n print(helpful_message)\n num_errors_received += 1\n except redis.ConnectionError:\n # When Redis terminates the listen call will throw a ConnectionError,\n # which we catch here.\n pass\n\n\ndef fetch_and_register_remote_function(key, worker=global_worker):\n \"\"\"Import a remote function.\"\"\"\n (driver_id, function_id_str, function_name,\n serialized_function, num_return_vals, module, num_cpus,\n num_gpus, num_custom_resource, max_calls) = worker.redis_client.hmget(\n key, [\"driver_id\",\n \"function_id\",\n \"name\",\n \"function\",\n \"num_return_vals\",\n \"module\",\n \"num_cpus\",\n \"num_gpus\",\n \"num_custom_resource\",\n \"max_calls\"])\n function_id = ray.local_scheduler.ObjectID(function_id_str)\n function_name = function_name.decode(\"ascii\")\n function_properties = FunctionProperties(\n num_return_vals=int(num_return_vals),\n num_cpus=int(num_cpus),\n num_gpus=int(num_gpus),\n num_custom_resource=int(num_custom_resource),\n max_calls=int(max_calls))\n module = module.decode(\"ascii\")\n\n # This is a placeholder in case the function can't be unpickled. This will\n # be overwritten if the function is successfully registered.\n def f():\n raise Exception(\"This function was not imported properly.\")\n remote_f_placeholder = remote(function_id=function_id)(lambda *xs: f())\n worker.functions[driver_id][function_id.id()] = (function_name,\n remote_f_placeholder)\n worker.function_properties[driver_id][function_id.id()] = (\n function_properties)\n worker.num_task_executions[driver_id][function_id.id()] = 0\n\n try:\n function = pickle.loads(serialized_function)\n except:\n # If an exception was thrown when the remote function was imported, we\n # record the traceback and notify the scheduler of the failure.\n traceback_str = format_error_message(traceback.format_exc())\n # Log the error message.\n worker.push_error_to_driver(driver_id, \"register_remote_function\",\n traceback_str,\n data={\"function_id\": function_id.id(),\n \"function_name\": function_name})\n else:\n # TODO(rkn): Why is the below line necessary?\n function.__module__ = module\n worker.functions[driver_id][function_id.id()] = (\n function_name, remote(function_id=function_id)(function))\n # Add the function to the function table.\n worker.redis_client.rpush(b\"FunctionTable:\" + function_id.id(),\n worker.worker_id)\n\n\ndef fetch_and_execute_function_to_run(key, worker=global_worker):\n \"\"\"Run on arbitrary function on the worker.\"\"\"\n driver_id, serialized_function = worker.redis_client.hmget(\n key, [\"driver_id\", \"function\"])\n # Get the number of workers on this node that have already started\n # executing this remote function, and increment that value. Subtract 1 so\n # the counter starts at 0.\n counter = worker.redis_client.hincrby(worker.node_ip_address, key, 1) - 1\n try:\n # Deserialize the function.\n function = pickle.loads(serialized_function)\n # Run the function.\n function({\"counter\": counter, \"worker\": worker})\n except:\n # If an exception was thrown when the function was run, we record the\n # traceback and notify the scheduler of the failure.\n traceback_str = traceback.format_exc()\n # Log the error message.\n name = function.__name__ if (\"function\" in locals() and\n hasattr(function, \"__name__\")) else \"\"\n worker.push_error_to_driver(driver_id, \"function_to_run\",\n traceback_str, data={\"name\": name})\n\n\ndef import_thread(worker, mode):\n worker.import_pubsub_client = worker.redis_client.pubsub()\n # Exports that are published after the call to\n # import_pubsub_client.psubscribe and before the call to\n # import_pubsub_client.listen will still be processed in the loop.\n worker.import_pubsub_client.psubscribe(\"__keyspace@0__:Exports\")\n # Keep track of the number of imports that we've imported.\n num_imported = 0\n\n # Get the exports that occurred before the call to psubscribe.\n with worker.lock:\n export_keys = worker.redis_client.lrange(\"Exports\", 0, -1)\n for key in export_keys:\n num_imported += 1\n\n # Handle the driver case first.\n if mode != WORKER_MODE:\n if key.startswith(b\"FunctionsToRun\"):\n fetch_and_execute_function_to_run(key, worker=worker)\n # Continue because FunctionsToRun are the only things that the\n # driver should import.\n continue\n\n if key.startswith(b\"RemoteFunction\"):\n fetch_and_register_remote_function(key, worker=worker)\n elif key.startswith(b\"FunctionsToRun\"):\n fetch_and_execute_function_to_run(key, worker=worker)\n elif key.startswith(b\"ActorClass\"):\n # If this worker is an actor that is supposed to construct this\n # class, fetch the actor and class information and construct\n # the class.\n class_id = key.split(b\":\", 1)[1]\n if (worker.actor_id != NIL_ACTOR_ID and\n worker.class_id == class_id):\n worker.fetch_and_register_actor(key, worker)\n else:\n raise Exception(\"This code should be unreachable.\")\n\n try:\n for msg in worker.import_pubsub_client.listen():\n with worker.lock:\n if msg[\"type\"] == \"psubscribe\":\n continue\n assert msg[\"data\"] == b\"rpush\"\n num_imports = worker.redis_client.llen(\"Exports\")\n assert num_imports >= num_imported\n for i in range(num_imported, num_imports):\n num_imported += 1\n key = worker.redis_client.lindex(\"Exports\", i)\n\n # Handle the driver case first.\n if mode != WORKER_MODE:\n if key.startswith(b\"FunctionsToRun\"):\n with log_span(\"ray:import_function_to_run\",\n worker=worker):\n fetch_and_execute_function_to_run(\n key, worker=worker)\n # Continue because FunctionsToRun are the only things\n # that the driver should import.\n continue\n\n if key.startswith(b\"RemoteFunction\"):\n with log_span(\"ray:import_remote_function\",\n worker=worker):\n fetch_and_register_remote_function(key,\n worker=worker)\n elif key.startswith(b\"FunctionsToRun\"):\n with log_span(\"ray:import_function_to_run\",\n worker=worker):\n fetch_and_execute_function_to_run(key,\n worker=worker)\n elif key.startswith(b\"Actor\"):\n # Only get the actor if the actor ID matches the actor\n # ID of this worker.\n actor_id, = worker.redis_client.hmget(key, \"actor_id\")\n if worker.actor_id == actor_id:\n worker.fetch_and_register[\"Actor\"](key, worker)\n else:\n raise Exception(\"This code should be unreachable.\")\n except redis.ConnectionError:\n # When Redis terminates the listen call will throw a ConnectionError,\n # which we catch here.\n pass\n\n\ndef connect(info, object_id_seed=None, mode=WORKER_MODE, worker=global_worker,\n actor_id=NIL_ACTOR_ID):\n \"\"\"Connect this worker to the local scheduler, to Plasma, and to Redis.\n\n Args:\n info (dict): A dictionary with address of the Redis server and the\n sockets of the plasma store, plasma manager, and local scheduler.\n object_id_seed: A seed to use to make the generation of object IDs\n deterministic.\n mode: The mode of the worker. One of SCRIPT_MODE, WORKER_MODE,\n PYTHON_MODE, and SILENT_MODE.\n actor_id: The ID of the actor running on this worker. If this worker is\n not an actor, then this is NIL_ACTOR_ID.\n \"\"\"\n check_main_thread()\n # Do some basic checking to make sure we didn't call ray.init twice.\n error_message = \"Perhaps you called ray.init twice by accident?\"\n assert not worker.connected, error_message\n assert worker.cached_functions_to_run is not None, error_message\n assert worker.cached_remote_functions is not None, error_message\n # Initialize some fields.\n worker.worker_id = random_string()\n worker.actor_id = actor_id\n worker.connected = True\n worker.set_mode(mode)\n # The worker.events field is used to aggregate logging information and\n # display it in the web UI. Note that Python lists protected by the GIL,\n # which is important because we will append to this field from multiple\n # threads.\n worker.events = []\n # If running Ray in PYTHON_MODE, there is no need to create call\n # create_worker or to start the worker service.\n if mode == PYTHON_MODE:\n return\n # Set the node IP address.\n worker.node_ip_address = info[\"node_ip_address\"]\n worker.redis_address = info[\"redis_address\"]\n\n # Create a Redis client.\n redis_ip_address, redis_port = info[\"redis_address\"].split(\":\")\n worker.redis_client = redis.StrictRedis(host=redis_ip_address,\n port=int(redis_port))\n worker.lock = threading.Lock()\n\n # Check the RedirectOutput key in Redis and based on its value redirect\n # worker output and error to their own files.\n if mode == WORKER_MODE:\n # This key is set in services.py when Redis is started.\n redirect_worker_output_val = worker.redis_client.get(\"RedirectOutput\")\n if (redirect_worker_output_val is not None and\n int(redirect_worker_output_val) == 1):\n redirect_worker_output = 1\n else:\n redirect_worker_output = 0\n if redirect_worker_output:\n log_stdout_file, log_stderr_file = services.new_log_files(\"worker\",\n True)\n sys.stdout = log_stdout_file\n sys.stderr = log_stderr_file\n services.record_log_files_in_redis(info[\"redis_address\"],\n info[\"node_ip_address\"],\n [log_stdout_file,\n log_stderr_file])\n\n # Create an object for interfacing with the global state.\n global_state._initialize_global_state(redis_ip_address, int(redis_port))\n\n # Register the worker with Redis.\n if mode in [SCRIPT_MODE, SILENT_MODE]:\n # The concept of a driver is the same as the concept of a \"job\".\n # Register the driver/job with Redis here.\n import __main__ as main\n driver_info = {\n \"node_ip_address\": worker.node_ip_address,\n \"driver_id\": worker.worker_id,\n \"start_time\": time.time(),\n \"plasma_store_socket\": info[\"store_socket_name\"],\n \"plasma_manager_socket\": info[\"manager_socket_name\"],\n \"local_scheduler_socket\": info[\"local_scheduler_socket_name\"]}\n driver_info[\"name\"] = (main.__file__ if hasattr(main, \"__file__\")\n else \"INTERACTIVE MODE\")\n worker.redis_client.hmset(b\"Drivers:\" + worker.worker_id, driver_info)\n if not worker.redis_client.exists(\"webui\"):\n worker.redis_client.hmset(\"webui\", {\"url\": info[\"webui_url\"]})\n is_worker = False\n elif mode == WORKER_MODE:\n # Register the worker with Redis.\n worker_dict = {\n \"node_ip_address\": worker.node_ip_address,\n \"plasma_store_socket\": info[\"store_socket_name\"],\n \"plasma_manager_socket\": info[\"manager_socket_name\"],\n \"local_scheduler_socket\": info[\"local_scheduler_socket_name\"]}\n if redirect_worker_output:\n worker_dict[\"stdout_file\"] = os.path.abspath(log_stdout_file.name)\n worker_dict[\"stderr_file\"] = os.path.abspath(log_stderr_file.name)\n worker.redis_client.hmset(b\"Workers:\" + worker.worker_id, worker_dict)\n is_worker = True\n else:\n raise Exception(\"This code should be unreachable.\")\n\n # Create an object store client.\n worker.plasma_client = plasma.connect(info[\"store_socket_name\"],\n info[\"manager_socket_name\"],\n 64)\n # Create the local scheduler client.\n if worker.actor_id != NIL_ACTOR_ID:\n num_gpus = int(worker.redis_client.hget(b\"Actor:\" + actor_id,\n \"num_gpus\"))\n else:\n num_gpus = 0\n worker.local_scheduler_client = ray.local_scheduler.LocalSchedulerClient(\n info[\"local_scheduler_socket_name\"], worker.worker_id, worker.actor_id,\n is_worker, num_gpus)\n\n # If this is a driver, set the current task ID, the task driver ID, and set\n # the task index to 0.\n if mode in [SCRIPT_MODE, SILENT_MODE]:\n # If the user provided an object_id_seed, then set the current task ID\n # deterministically based on that seed (without altering the state of\n # the user's random number generator). Otherwise, set the current task\n # ID randomly to avoid object ID collisions.\n numpy_state = np.random.get_state()\n if object_id_seed is not None:\n np.random.seed(object_id_seed)\n else:\n # Try to use true randomness.\n np.random.seed(None)\n worker.current_task_id = ray.local_scheduler.ObjectID(\n np.random.bytes(20))\n # When tasks are executed on remote workers in the context of multiple\n # drivers, the task driver ID is used to keep track of which driver is\n # responsible for the task so that error messages will be propagated to\n # the correct driver.\n worker.task_driver_id = ray.local_scheduler.ObjectID(worker.worker_id)\n # Reset the state of the numpy random number generator.\n np.random.set_state(numpy_state)\n # Set other fields needed for computing task IDs.\n worker.task_index = 0\n worker.put_index = 0\n\n # Create an entry for the driver task in the task table. This task is\n # added immediately with status RUNNING. This allows us to push errors\n # related to this driver task back to the driver. For example, if the\n # driver creates an object that is later evicted, we should notify the\n # user that we're unable to reconstruct the object, since we cannot\n # rerun the driver.\n nil_actor_counter = 0\n driver_task = ray.local_scheduler.Task(\n worker.task_driver_id,\n ray.local_scheduler.ObjectID(NIL_FUNCTION_ID),\n [],\n 0,\n worker.current_task_id,\n worker.task_index,\n ray.local_scheduler.ObjectID(NIL_ACTOR_ID),\n nil_actor_counter,\n False,\n [0, 0, 0])\n global_state._execute_command(\n driver_task.task_id(),\n \"RAY.TASK_TABLE_ADD\",\n driver_task.task_id().id(),\n TASK_STATUS_RUNNING,\n NIL_LOCAL_SCHEDULER_ID,\n ray.local_scheduler.task_to_string(driver_task))\n # Set the driver's current task ID to the task ID assigned to the\n # driver task.\n worker.current_task_id = driver_task.task_id()\n\n # If this is an actor, get the ID of the corresponding class for the actor.\n if worker.actor_id != NIL_ACTOR_ID:\n actor_key = b\"Actor:\" + worker.actor_id\n class_id = worker.redis_client.hget(actor_key, \"class_id\")\n worker.class_id = class_id\n # Store a list of the dummy outputs produced by actor tasks, to pin the\n # dummy outputs in the object store.\n worker.actor_pinned_objects = {}\n\n # Initialize the serialization library. This registers some classes, and so\n # it must be run before we export all of the cached remote functions.\n _initialize_serialization()\n\n # Start a thread to import exports from the driver or from other workers.\n # Note that the driver also has an import thread, which is used only to\n # import custom class definitions from calls to _register_class that happen\n # under the hood on workers.\n t = threading.Thread(target=import_thread, args=(worker, mode))\n # Making the thread a daemon causes it to exit when the main thread exits.\n t.daemon = True\n t.start()\n\n # If this is a driver running in SCRIPT_MODE, start a thread to print error\n # messages asynchronously in the background. Ideally the scheduler would\n # push messages to the driver's worker service, but we ran into bugs when\n # trying to properly shutdown the driver's worker service, so we are\n # temporarily using this implementation which constantly queries the\n # scheduler for new error messages.\n if mode == SCRIPT_MODE:\n t = threading.Thread(target=print_error_messages, args=(worker,))\n # Making the thread a daemon causes it to exit when the main thread\n # exits.\n t.daemon = True\n t.start()\n\n if mode in [SCRIPT_MODE, SILENT_MODE]:\n # Add the directory containing the script that is running to the Python\n # paths of the workers. Also add the current directory. Note that this\n # assumes that the directory structures on the machines in the clusters\n # are the same.\n script_directory = os.path.abspath(os.path.dirname(sys.argv[0]))\n current_directory = os.path.abspath(os.path.curdir)\n worker.run_function_on_all_workers(\n lambda worker_info: sys.path.insert(1, script_directory))\n worker.run_function_on_all_workers(\n lambda worker_info: sys.path.insert(1, current_directory))\n # TODO(rkn): Here we first export functions to run, then remote\n # functions. The order matters. For example, one of the functions to\n # run may set the Python path, which is needed to import a module used\n # to define a remote function. We may want to change the order to\n # simply be the order in which the exports were defined on the driver.\n # In addition, we will need to retain the ability to decide what the\n # first few exports are (mostly to set the Python path). Additionally,\n # note that the first exports to be defined on the driver will be the\n # ones defined in separate modules that are imported by the driver.\n # Export cached functions_to_run.\n for function in worker.cached_functions_to_run:\n worker.run_function_on_all_workers(function)\n # Export cached remote functions to the workers.\n for info in worker.cached_remote_functions:\n (function_id, func_name, func,\n func_invoker, function_properties) = info\n export_remote_function(function_id, func_name, func, func_invoker,\n function_properties, worker)\n worker.cached_functions_to_run = None\n worker.cached_remote_functions = None\n\n\ndef disconnect(worker=global_worker):\n \"\"\"Disconnect this worker from the scheduler and object store.\"\"\"\n # Reset the list of cached remote functions so that if more remote\n # functions are defined and then connect is called again, the remote\n # functions will be exported. This is mostly relevant for the tests.\n worker.connected = False\n worker.cached_functions_to_run = []\n worker.cached_remote_functions = []\n worker.serialization_context = pyarrow.SerializationContext()\n\n\ndef register_class(cls, use_pickle=False, worker=global_worker):\n raise Exception(\"The function ray.register_class is deprecated. It should \"\n \"be safe to remove any calls to this function.\")\n\n\ndef _register_class(cls, use_pickle=False, worker=global_worker):\n \"\"\"Enable serialization and deserialization for a particular class.\n\n This method runs the register_class function defined below on every worker,\n which will enable ray to properly serialize and deserialize objects of\n this class.\n\n Args:\n cls (type): The class that ray should serialize.\n use_pickle (bool): If False then objects of this class will be\n serialized by turning their __dict__ fields into a dictionary. If\n True, then objects of this class will be serialized using pickle.\n\n Raises:\n Exception: An exception is raised if pickle=False and the class cannot\n be efficiently serialized by Ray.\n \"\"\"\n if not use_pickle:\n # In this case, the class ID will be used to deduplicate the class\n # across workers.\n class_id = hashlib.sha1(pickle.dumps(cls)).digest()\n else:\n # In this case, the class ID only needs to be meaningful on this worker\n # and not across workers.\n class_id = random_string()\n\n def register_class_for_serialization(worker_info):\n worker_info[\"worker\"].serialization_context.register_type(\n cls, class_id, pickle=use_pickle)\n\n if not use_pickle:\n # Raise an exception if cls cannot be serialized efficiently by Ray.\n serialization.check_serializable(cls)\n worker.run_function_on_all_workers(register_class_for_serialization)\n else:\n # Since we are pickling objects of this class, we don't actually need\n # to ship the class definition.\n register_class_for_serialization({\"worker\": worker})\n\n\nclass RayLogSpan(object):\n \"\"\"An object used to enable logging a span of events with a with statement.\n\n Attributes:\n event_type (str): The type of the event being logged.\n contents: Additional information to log.\n \"\"\"\n def __init__(self, event_type, contents=None, worker=global_worker):\n \"\"\"Initialize a RayLogSpan object.\"\"\"\n self.event_type = event_type\n self.contents = contents\n self.worker = worker\n\n def __enter__(self):\n \"\"\"Log the beginning of a span event.\"\"\"\n log(event_type=self.event_type,\n contents=self.contents,\n kind=LOG_SPAN_START,\n worker=self.worker)\n\n def __exit__(self, type, value, tb):\n \"\"\"Log the end of a span event. Log any exception that occurred.\"\"\"\n if type is None:\n log(event_type=self.event_type, kind=LOG_SPAN_END,\n worker=self.worker)\n else:\n log(event_type=self.event_type,\n contents={\"type\": str(type),\n \"value\": value,\n \"traceback\": traceback.format_exc()},\n kind=LOG_SPAN_END,\n worker=self.worker)\n\n\ndef log_span(event_type, contents=None, worker=global_worker):\n return RayLogSpan(event_type, contents=contents, worker=worker)\n\n\ndef log_event(event_type, contents=None, worker=global_worker):\n log(event_type, kind=LOG_POINT, contents=contents, worker=worker)\n\n\ndef log(event_type, kind, contents=None, worker=global_worker):\n \"\"\"Log an event to the global state store.\n\n This adds the event to a buffer of events locally. The buffer can be\n flushed and written to the global state store by calling flush_log().\n\n Args:\n event_type (str): The type of the event.\n contents: More general data to store with the event.\n kind (int): Either LOG_POINT, LOG_SPAN_START, or LOG_SPAN_END. This is\n LOG_POINT if the event being logged happens at a single point in\n time. It is LOG_SPAN_START if we are starting to log a span of\n time, and it is LOG_SPAN_END if we are finishing logging a span of\n time.\n \"\"\"\n # TODO(rkn): This code currently takes around half a microsecond. Since we\n # call it tens of times per task, this adds up. We will need to redo the\n # logging code, perhaps in C.\n contents = {} if contents is None else contents\n assert isinstance(contents, dict)\n # Make sure all of the keys and values in the dictionary are strings.\n contents = {str(k): str(v) for k, v in contents.items()}\n worker.events.append((time.time(), event_type, kind, contents))\n\n\ndef flush_log(worker=global_worker):\n \"\"\"Send the logged worker events to the global state store.\"\"\"\n event_log_key = b\"event_log:\" + worker.worker_id\n event_log_value = json.dumps(worker.events)\n worker.local_scheduler_client.log_event(event_log_key,\n event_log_value,\n time.time())\n worker.events = []\n\n\ndef get(object_ids, worker=global_worker):\n \"\"\"Get a remote object or a list of remote objects from the object store.\n\n This method blocks until the object corresponding to the object ID is\n available in the local object store. If this object is not in the local\n object store, it will be shipped from an object store that has it (once the\n object has been created). If object_ids is a list, then the objects\n corresponding to each object in the list will be returned.\n\n Args:\n object_ids: Object ID of the object to get or a list of object IDs to\n get.\n\n Returns:\n A Python object or a list of Python objects.\n \"\"\"\n check_connected(worker)\n with log_span(\"ray:get\", worker=worker):\n check_main_thread()\n\n if worker.mode == PYTHON_MODE:\n # In PYTHON_MODE, ray.get is the identity operation (the input will\n # actually be a value not an objectid).\n return object_ids\n if isinstance(object_ids, list):\n values = worker.get_object(object_ids)\n for i, value in enumerate(values):\n if isinstance(value, RayTaskError):\n raise RayGetError(object_ids[i], value)\n return values\n else:\n value = worker.get_object([object_ids])[0]\n if isinstance(value, RayTaskError):\n # If the result is a RayTaskError, then the task that created\n # this object failed, and we should propagate the error message\n # here.\n raise RayGetError(object_ids, value)\n return value\n\n\ndef put(value, worker=global_worker):\n \"\"\"Store an object in the object store.\n\n Args:\n value: The Python object to be stored.\n\n Returns:\n The object ID assigned to this value.\n \"\"\"\n check_connected(worker)\n with log_span(\"ray:put\", worker=worker):\n check_main_thread()\n\n if worker.mode == PYTHON_MODE:\n # In PYTHON_MODE, ray.put is the identity operation.\n return value\n object_id = worker.local_scheduler_client.compute_put_id(\n worker.current_task_id, worker.put_index)\n worker.put_object(object_id, value)\n worker.put_index += 1\n return object_id\n\n\ndef wait(object_ids, num_returns=1, timeout=None, worker=global_worker):\n \"\"\"Return a list of IDs that are ready and a list of IDs that are not.\n\n If timeout is set, the function returns either when the requested number of\n IDs are ready or when the timeout is reached, whichever occurs first. If it\n is not set, the function simply waits until that number of objects is ready\n and returns that exact number of objectids.\n\n This method returns two lists. The first list consists of object IDs that\n correspond to objects that are stored in the object store. The second list\n corresponds to the rest of the object IDs (which may or may not be ready).\n\n Args:\n object_ids (List[ObjectID]): List of object IDs for objects that may or\n may not be ready. Note that these IDs must be unique.\n num_returns (int): The number of object IDs that should be returned.\n timeout (int): The maximum amount of time in milliseconds to wait\n before returning.\n\n Returns:\n A list of object IDs that are ready and a list of the remaining object\n IDs.\n \"\"\"\n check_connected(worker)\n with log_span(\"ray:wait\", worker=worker):\n check_main_thread()\n\n # When Ray is run in PYTHON_MODE, all functions are run immediately,\n # so all objects in object_id are ready.\n if worker.mode == PYTHON_MODE:\n return object_ids[:num_returns], object_ids[num_returns:]\n\n # TODO(rkn): This is a temporary workaround for\n # https://github.com/ray-project/ray/issues/997. However, it should be\n # fixed in Arrow instead of here.\n if len(object_ids) == 0:\n return [], []\n\n object_id_strs = [plasma.ObjectID(object_id.id())\n for object_id in object_ids]\n timeout = timeout if timeout is not None else 2 ** 30\n ready_ids, remaining_ids = worker.plasma_client.wait(object_id_strs,\n timeout,\n num_returns)\n ready_ids = [ray.local_scheduler.ObjectID(object_id.binary())\n for object_id in ready_ids]\n remaining_ids = [ray.local_scheduler.ObjectID(object_id.binary())\n for object_id in remaining_ids]\n return ready_ids, remaining_ids\n\n\ndef format_error_message(exception_message, task_exception=False):\n \"\"\"Improve the formatting of an exception thrown by a remote function.\n\n This method takes a traceback from an exception and makes it nicer by\n removing a few uninformative lines and adding some space to indent the\n remaining lines nicely.\n\n Args:\n exception_message (str): A message generated by traceback.format_exc().\n\n Returns:\n A string of the formatted exception message.\n \"\"\"\n lines = exception_message.split(\"\\n\")\n if task_exception:\n # For errors that occur inside of tasks, remove lines 1, 2, 3, and 4,\n # which are always the same, they just contain information about the\n # main loop.\n lines = lines[0:1] + lines[5:]\n return \"\\n\".join(lines)\n\n\ndef _submit_task(function_id, args, worker=global_worker):\n \"\"\"This is a wrapper around worker.submit_task.\n\n We use this wrapper so that in the remote decorator, we can call\n _submit_task instead of worker.submit_task. The difference is that when we\n attempt to serialize remote functions, we don't attempt to serialize the\n worker object, which cannot be serialized.\n \"\"\"\n return worker.submit_task(function_id, args)\n\n\ndef _mode(worker=global_worker):\n \"\"\"This is a wrapper around worker.mode.\n\n We use this wrapper so that in the remote decorator, we can call _mode()\n instead of worker.mode. The difference is that when we attempt to serialize\n remote functions, we don't attempt to serialize the worker object, which\n cannot be serialized.\n \"\"\"\n return worker.mode\n\n\ndef export_remote_function(function_id, func_name, func, func_invoker,\n function_properties, worker=global_worker):\n check_main_thread()\n if _mode(worker) not in [SCRIPT_MODE, SILENT_MODE]:\n raise Exception(\"export_remote_function can only be called on a \"\n \"driver.\")\n\n worker.function_properties[\n worker.task_driver_id.id()][function_id.id()] = function_properties\n task_driver_id = worker.task_driver_id\n key = b\"RemoteFunction:\" + task_driver_id.id() + b\":\" + function_id.id()\n\n # Work around limitations of Python pickling.\n func_name_global_valid = func.__name__ in func.__globals__\n func_name_global_value = func.__globals__.get(func.__name__)\n # Allow the function to reference itself as a global variable\n func.__globals__[func.__name__] = func_invoker\n try:\n pickled_func = pickle.dumps(func)\n finally:\n # Undo our changes\n if func_name_global_valid:\n func.__globals__[func.__name__] = func_name_global_value\n else:\n del func.__globals__[func.__name__]\n\n worker.redis_client.hmset(key, {\n \"driver_id\": worker.task_driver_id.id(),\n \"function_id\": function_id.id(),\n \"name\": func_name,\n \"module\": func.__module__,\n \"function\": pickled_func,\n \"num_return_vals\": function_properties.num_return_vals,\n \"num_cpus\": function_properties.num_cpus,\n \"num_gpus\": function_properties.num_gpus,\n \"num_custom_resource\": function_properties.num_custom_resource,\n \"max_calls\": function_properties.max_calls})\n worker.redis_client.rpush(\"Exports\", key)\n\n\ndef in_ipython():\n \"\"\"Return true if we are in an IPython interpreter and false otherwise.\"\"\"\n try:\n __IPYTHON__\n return True\n except NameError:\n return False\n\n\ndef compute_function_id(func_name, func):\n \"\"\"Compute an function ID for a function.\n\n Args:\n func_name: The name of the function (this includes the module name plus\n the function name).\n func: The actual function.\n\n Returns:\n This returns the function ID.\n \"\"\"\n function_id_hash = hashlib.sha1()\n # Include the function name in the hash.\n function_id_hash.update(func_name.encode(\"ascii\"))\n # If we are running a script or are in IPython, include the source code in\n # the hash. If we are in a regular Python interpreter we skip this part\n # because the source code is not accessible.\n import __main__ as main\n if hasattr(main, \"__file__\") or in_ipython():\n function_id_hash.update(inspect.getsource(func).encode(\"ascii\"))\n # Compute the function ID.\n function_id = function_id_hash.digest()\n assert len(function_id) == 20\n function_id = FunctionID(function_id)\n\n return function_id\n\n\ndef remote(*args, **kwargs):\n \"\"\"This decorator is used to define remote functions and to define actors.\n\n Args:\n num_return_vals (int): The number of object IDs that a call to this\n function should return.\n num_cpus (int): The number of CPUs needed to execute this function.\n num_gpus (int): The number of GPUs needed to execute this function.\n num_custom_resource (int): The quantity of a user-defined custom\n resource that is needed to execute this function. This flag is\n experimental and is subject to changes in the future.\n max_calls (int): The maximum number of tasks of this kind that can be\n run on a worker before the worker needs to be restarted.\n checkpoint_interval (int): The number of tasks to run between\n checkpoints of the actor state.\n \"\"\"\n worker = global_worker\n\n def make_remote_decorator(num_return_vals, num_cpus, num_gpus,\n num_custom_resource, max_calls,\n checkpoint_interval, func_id=None):\n def remote_decorator(func_or_class):\n if inspect.isfunction(func_or_class):\n function_properties = FunctionProperties(\n num_return_vals=num_return_vals,\n num_cpus=num_cpus,\n num_gpus=num_gpus,\n num_custom_resource=num_custom_resource,\n max_calls=max_calls)\n return remote_function_decorator(func_or_class,\n function_properties)\n if inspect.isclass(func_or_class):\n return worker.make_actor(func_or_class, num_cpus, num_gpus,\n checkpoint_interval)\n raise Exception(\"The @ray.remote decorator must be applied to \"\n \"either a function or to a class.\")\n\n def remote_function_decorator(func, function_properties):\n func_name = \"{}.{}\".format(func.__module__, func.__name__)\n if func_id is None:\n function_id = compute_function_id(func_name, func)\n else:\n function_id = func_id\n\n def func_call(*args, **kwargs):\n \"\"\"This runs immediately when a remote function is called.\"\"\"\n check_connected()\n check_main_thread()\n args = signature.extend_args(function_signature, args, kwargs)\n\n if _mode() == PYTHON_MODE:\n # In PYTHON_MODE, remote calls simply execute the function.\n # We copy the arguments to prevent the function call from\n # mutating them and to match the usual behavior of\n # immutable remote objects.\n result = func(*copy.deepcopy(args))\n return result\n objectids = _submit_task(function_id, args)\n if len(objectids) == 1:\n return objectids[0]\n elif len(objectids) > 1:\n return objectids\n\n def func_executor(arguments):\n \"\"\"This gets run when the remote function is executed.\"\"\"\n result = func(*arguments)\n return result\n\n def func_invoker(*args, **kwargs):\n \"\"\"This is used to invoke the function.\"\"\"\n raise Exception(\"Remote functions cannot be called directly. \"\n \"Instead of running '{}()', try '{}.remote()'.\"\n .format(func_name, func_name))\n func_invoker.remote = func_call\n func_invoker.executor = func_executor\n func_invoker.is_remote = True\n func_name = \"{}.{}\".format(func.__module__, func.__name__)\n func_invoker.func_name = func_name\n if sys.version_info >= (3, 0):\n func_invoker.__doc__ = func.__doc__\n else:\n func_invoker.func_doc = func.func_doc\n\n signature.check_signature_supported(func)\n function_signature = signature.extract_signature(func)\n\n # Everything ready - export the function\n if worker.mode in [SCRIPT_MODE, SILENT_MODE]:\n export_remote_function(function_id, func_name, func,\n func_invoker, function_properties)\n elif worker.mode is None:\n worker.cached_remote_functions.append((function_id, func_name,\n func, func_invoker,\n function_properties))\n return func_invoker\n\n return remote_decorator\n\n num_return_vals = (kwargs[\"num_return_vals\"] if \"num_return_vals\"\n in kwargs else 1)\n num_cpus = kwargs[\"num_cpus\"] if \"num_cpus\" in kwargs else 1\n num_gpus = kwargs[\"num_gpus\"] if \"num_gpus\" in kwargs else 0\n num_custom_resource = (kwargs[\"num_custom_resource\"]\n if \"num_custom_resource\" in kwargs else 0)\n max_calls = kwargs[\"max_calls\"] if \"max_calls\" in kwargs else 0\n checkpoint_interval = (kwargs[\"checkpoint_interval\"]\n if \"checkpoint_interval\" in kwargs else -1)\n\n if _mode() == WORKER_MODE:\n if \"function_id\" in kwargs:\n function_id = kwargs[\"function_id\"]\n return make_remote_decorator(num_return_vals, num_cpus, num_gpus,\n num_custom_resource, max_calls,\n checkpoint_interval, function_id)\n\n if len(args) == 1 and len(kwargs) == 0 and callable(args[0]):\n # This is the case where the decorator is just @ray.remote.\n return make_remote_decorator(\n num_return_vals, num_cpus,\n num_gpus, num_custom_resource,\n max_calls, checkpoint_interval)(args[0])\n else:\n # This is the case where the decorator is something like\n # @ray.remote(num_return_vals=2).\n error_string = (\"The @ray.remote decorator must be applied either \"\n \"with no arguments and no parentheses, for example \"\n \"'@ray.remote', or it must be applied using some of \"\n \"the arguments 'num_return_vals', 'num_cpus', \"\n \"'num_gpus', num_custom_resource, or 'max_calls', \"\n \"like '@ray.remote(num_return_vals=2)'.\")\n assert (len(args) == 0 and\n (\"num_return_vals\" in kwargs or\n \"num_cpus\" in kwargs or\n \"num_gpus\" in kwargs or\n \"num_custom_resource\" in kwargs or\n \"max_calls\" in kwargs or\n \"checkpoint_interval\" in kwargs)), error_string\n for key in kwargs:\n assert key in [\"num_return_vals\", \"num_cpus\",\n \"num_gpus\", \"num_custom_resource\", \"max_calls\",\n \"checkpoint_interval\"], error_string\n assert \"function_id\" not in kwargs\n return make_remote_decorator(num_return_vals, num_cpus, num_gpus,\n num_custom_resource, max_calls,\n checkpoint_interval)\n" ]
[ [ "numpy.random.get_state", "numpy.random.seed", "pandas.DataFrame", "numpy.dtype", "numpy.random.bytes", "numpy.random.set_state" ] ]
stefanbschneider/keras-rl
[ "216c3145f3dc4d17877be26ca2185ce7db462bad" ]
[ "examples/ddpg_mujoco.py" ]
[ "import numpy as np\n\nimport gym\nfrom gym import wrappers\n\nfrom keras.models import Sequential, Model\nfrom keras.layers import Dense, Activation, Flatten, Input, Concatenate\nfrom keras.optimizers import Adam\n\nfrom rl.processors import WhiteningNormalizerProcessor\nfrom rl.agents import DDPGAgent\nfrom rl.memory import SequentialMemory\nfrom rl.random import OrnsteinUhlenbeckProcess\n\n\nclass MujocoProcessor(WhiteningNormalizerProcessor):\n def process_action(self, action):\n return np.clip(action, -1., 1.)\n\n\nENV_NAME = 'HalfCheetah-v2'\n\n\n# Get the environment and extract the number of actions.\nenv = gym.make(ENV_NAME)\nenv = wrappers.Monitor(env, '/tmp/{}'.format(ENV_NAME), force=True)\nnp.random.seed(123)\nenv.seed(123)\nassert len(env.action_space.shape) == 1\nnb_actions = env.action_space.shape[0]\n\n# Next, we build a very simple model.\nactor = Sequential()\nactor.add(Flatten(input_shape=(1,) + env.observation_space.shape))\nactor.add(Dense(400))\nactor.add(Activation('relu'))\nactor.add(Dense(300))\nactor.add(Activation('relu'))\nactor.add(Dense(nb_actions))\nactor.add(Activation('tanh'))\nprint(actor.summary())\n\naction_input = Input(shape=(nb_actions,), name='action_input')\nobservation_input = Input(shape=(1,) + env.observation_space.shape, name='observation_input')\nflattened_observation = Flatten()(observation_input)\nx = Dense(400)(flattened_observation)\nx = Activation('relu')(x)\nx = Concatenate()([x, action_input])\nx = Dense(300)(x)\nx = Activation('relu')(x)\nx = Dense(1)(x)\nx = Activation('linear')(x)\ncritic = Model(inputs=[action_input, observation_input], outputs=x)\nprint(critic.summary())\n\n# Finally, we configure and compile our agent. You can use every built-in Keras optimizer and\n# even the metrics!\nmemory = SequentialMemory(limit=100000, window_length=1)\nrandom_process = OrnsteinUhlenbeckProcess(size=nb_actions, theta=.15, mu=0., sigma=.1)\nagent = DDPGAgent(nb_actions=nb_actions, actor=actor, critic=critic, critic_action_input=action_input,\n memory=memory, nb_steps_warmup_critic=1000, nb_steps_warmup_actor=1000,\n random_process=random_process, gamma=.99, target_model_update=1e-3,\n processor=MujocoProcessor())\nagent.compile([Adam(lr=1e-4), Adam(lr=1e-3)], metrics=['mae'])\n\n# Okay, now it's time to learn something! We visualize the training here for show, but this\n# slows down training quite a lot. You can always safely abort the training prematurely using\n# Ctrl + C.\nagent.fit(env, nb_steps=1000000, visualize=False, verbose=1)\n\n# After training is done, we save the final weights.\nagent.save_weights('ddpg_{}_weights.h5f'.format(ENV_NAME), overwrite=True)\n\n# Finally, evaluate our algorithm for 5 episodes.\nagent.test(env, nb_episodes=5, visualize=True, nb_max_episode_steps=200)\n" ]
[ [ "numpy.random.seed", "numpy.clip" ] ]
yangyangxusheng/Sarcasm-style-tranfer
[ "5619a226e952bde0b6964a75b6e4d35ec42ca937" ]
[ "Predict_the_level_of_sarcasm.py" ]
[ "import torch\r\nimport json\r\nfrom run_pplm_discrim_train import predict,Discriminator\r\nfrom pplm_classification_head import ClassificationHead\r\n\r\nEPSILON = 1e-10\r\npretrained_model = 'gpt2-medium'\r\nidx2class = ['0', '1']\r\n\r\ndef load_classifier_head(weights_path, meta_path, device):\r\n with open(meta_path, 'r', encoding=\"utf8\") as f:\r\n meta_params = json.load(f)\r\n classifier_head = ClassificationHead(\r\n class_size=meta_params['class_size'],\r\n embed_size=meta_params['embed_size']\r\n ).to(device)\r\n classifier_head.load_state_dict(\r\n torch.load(weights_path, map_location=device))\r\n classifier_head.eval()\r\n return classifier_head, meta_params\r\n\r\ndef load_discriminator(weights_path, meta_path, device):\r\n classifier_head, meta_param = load_classifier_head(\r\n weights_path, meta_path, device\r\n )\r\n discriminator = Discriminator(\r\n pretrained_model=meta_param['pretrained_model'],\r\n classifier_head=classifier_head,\r\n cached_mode=False,\r\n device=device\r\n )\r\n return discriminator, meta_param\r\n\r\ndiscriminator, meta = load_discriminator(weights_path='headlines_sarcasm_discriminator(7).pt', meta_path='sarcasm_classifier_head_meta.json', device='cpu')\r\n# sentence = 'the biggest fails of the first 100 years'\r\n\r\ndef predict_the_score_of_sentence(sentence):\r\n predict(sentence, discriminator, idx2class, cached=False, device='cpu')\r\n\r\n# predict_the_score_of_sentence(sentence)" ]
[ [ "torch.load" ] ]
saist1993/parseq
[ "dce90d06d14ffbb0a471849f04c373a173475d3a" ]
[ "parseq/scripts/lcquad_vib.py" ]
[ "import math\nimport os\nimport random\nimport re\nimport sys\nfrom abc import ABC\nfrom functools import partial\nfrom typing import *\n\nimport dill as dill\nimport torch\nimport numpy as np\nimport ujson\n\nimport qelos as q\nfrom allennlp.modules.seq2seq_encoders import PytorchSeq2SeqWrapper\nfrom nltk import PorterStemmer, Tree\n\nfrom torch.utils.data import DataLoader\n\n# from funcparse.decoding import TransitionModel, TFActionSeqDecoder, LSTMCellTransition, BeamActionSeqDecoder, \\\n# GreedyActionSeqDecoder, TFTokenSeqDecoder\n# from funcparse.grammar import FuncGrammar, passtr_to_pas\n# from funcparse.states import FuncTreeState, FuncTreeStateBatch, BasicState, BasicStateBatch\n# from funcparse.vocab import VocabBuilder, SentenceEncoder, FuncQueryEncoder\n# from funcparse.nn import TokenEmb, PtrGenOutput, SumPtrGenOutput, BasicGenOutput\nfrom parseq.decoding import SeqDecoder, BeamDecoder, BeamTransition\nfrom parseq.eval import CELoss, SeqAccuracies, make_array_of_metrics, DerivedAccuracy, TreeAccuracy, StatePenalty\nfrom parseq.grammar import prolog_to_pas, lisp_to_pas, pas_to_prolog, pas_to_tree, tree_size, tree_to_prolog, \\\n tree_to_lisp, lisp_to_tree\nfrom parseq.nn import TokenEmb, BasicGenOutput, PtrGenOutput, PtrGenOutput2, load_pretrained_embeddings, GRUEncoder, \\\n LSTMEncoder\nfrom parseq.states import DecodableState, BasicDecoderState, State, TreeDecoderState, ListState\nfrom parseq.transitions import TransitionModel, LSTMCellTransition, LSTMTransition, GRUTransition\nfrom parseq.vib import VIB, VIB_seq\nfrom parseq.vocab import SequenceEncoder, Vocab\n\n\ndef stem_id_words(pas, idparents, stem=False, strtok=None):\n if stem is True:\n assert(not isinstance(pas, tuple))\n if not isinstance(pas, tuple):\n if stem is True:\n assert(isinstance(pas, str))\n if re.match(r\"'([^']+)'\", pas):\n pas = re.match(r\"'([^']+)'\", pas).group(1)\n pas = strtok(pas)\n return [(\"str\", pas)]\n else:\n return [pas]\n else:\n return [pas]\n else:\n tostem = pas[0] in idparents\n children = [stem_id_words(k, idparents, stem=tostem, strtok=strtok)\n for k in pas[1]]\n children = [a for b in children for a in b]\n return [(pas[0], children)]\n\n\ndef stem_id_words_tree(tree:Tree, idparents, stem=False, strtok=None):\n if stem is True:\n assert(len(tree) == 0) # should be leaf\n if len(tree) == 0:\n if stem is True:\n if re.match(r\"'([^']+)'\", tree.label()):\n pas = re.match(r\"'([^']+)'\", tree.label()).group(1)\n pas = strtok(pas)\n return [Tree(\"str\", pas)]\n else:\n return [tree]\n else:\n return [tree]\n else:\n tostem = tree.label() in idparents\n children = [stem_id_words_tree(k, idparents, stem=tostem, strtok=strtok)\n for k in tree]\n children = [a for b in children for a in b]\n return [Tree(tree.label(), children)]\n\n\ndef pas2toks(pas):\n if not isinstance(pas, tuple):\n return [pas]\n else:\n children = [pas2toks(k) for k in pas[1]]\n ret = [pas[0]] if pas[0] != \"@NAMELESS@\" else []\n ret.insert(0, \"(\")\n for child in children:\n ret += child\n # ret.append(\",\")\n # ret.pop(-1)\n ret.append(\")\")\n return ret\n\ndef tree2toks(tree:Tree):\n if len(tree) == 0:\n return [tree.label()]\n else:\n children = [tree2toks(x) for x in tree]\n ret = [tree.label()] if tree.label() != \"@NAMELESS@\" else []\n ret.insert(0, \"(\")\n for child in children:\n ret += child\n ret.append(\")\")\n return ret\n\n\ndef basic_query_tokenizer(x:str, strtok=None):\n pas = lisp_to_pas(x)\n # idpreds = set(\"_cityid _countryid _stateid _riverid _placeid\".split(\" \"))\n # idpreds = set(\"cityid stateid countryid riverid placeid\".split(\" \"))\n idpreds = set()\n pas = stem_id_words(pas, idpreds, strtok=strtok)[0]\n ret = pas2toks(pas)\n return ret\n\n\ndef tree_query_tokenizer(x:Tree, strtok=None):\n idpreds = set()\n _x = stem_id_words_tree(x, idpreds, strtok=strtok)[0]\n ret = tree2toks(_x)\n return ret\n\n\ndef try_basic_query_tokenizer():\n stemmer = PorterStemmer()\n x = \"answer(cityid('new york', _))\"\n y = basic_query_tokenizer(x, strtok=lambda x: [stemmer.stem(xe) for xe in x.split()])\n # print(y)\n\n\nclass LCQuaDnoENTDataset(object):\n def __init__(self,\n p=\"../../datasets/lcquad/\",\n sentence_encoder:SequenceEncoder=None,\n min_freq:int=2,\n splits=None, **kw):\n super(LCQuaDnoENTDataset, self).__init__(**kw)\n self._simplify_filters = True # if True, filter expressions are converted to orderless and-expressions\n self._initialize(p, sentence_encoder, min_freq)\n self.splits_proportions = splits\n\n def lines_to_examples(self, lines:List[str]):\n maxsize_before = 0\n avgsize_before = []\n maxsize_after = 0\n avgsize_after = []\n afterstring = set()\n\n def convert_to_lispstr(_x):\n splits = _x.split()\n assert(sum([1 if xe == \"~\" else 0 for xe in splits]) == 1)\n assert(splits[1] == \"~\")\n splits = [\",\" if xe == \"&\" else xe for xe in splits]\n pstr = f\"{splits[0]} ({' '.join(splits[2:])})\"\n return pstr\n\n ret = []\n ltp = None\n j = 0\n for i, line in enumerate(lines):\n question = line[\"question\"]\n query = line[\"logical_form\"]\n query = convert_to_lispstr(query)\n z, ltp = prolog_to_pas(query, ltp)\n if z is not None:\n ztree = pas_to_tree(z)\n maxsize_before = max(maxsize_before, tree_size(ztree))\n avgsize_before.append(tree_size(ztree))\n lf = ztree\n ret.append((question, lf))\n # print(f\"Example {j}:\")\n # print(ret[-1][0])\n # print(ret[-1][1])\n # print()\n ltp = None\n maxsize_after = max(maxsize_after, tree_size(lf))\n avgsize_after.append(tree_size(lf))\n j += 1\n\n avgsize_before = sum(avgsize_before) / len(avgsize_before)\n avgsize_after = sum(avgsize_after) / len(avgsize_after)\n\n print(\"Sizes ({j} examples):\")\n # print(f\"\\t Max, Avg size before: {maxsize_before}, {avgsize_before}\")\n print(f\"\\t Max, Avg size: {maxsize_after}, {avgsize_after}\")\n\n return ret\n\n def _initialize(self, p, sentence_encoder:SequenceEncoder, min_freq:int):\n self.data = {}\n self.sentence_encoder = sentence_encoder\n\n jp = os.path.join(p, \"lcquad_dataset.json\")\n with open(jp, \"r\") as f:\n examples = ujson.load(f)\n\n examples = self.lines_to_examples(examples)\n\n questions, queries = tuple(zip(*examples))\n trainlen = int(round(0.8 * len(examples)))\n validlen = int(round(0.1 * len(examples)))\n testlen = int(round(0.1 * len(examples)))\n splits = [\"train\"] * trainlen + [\"valid\"] * validlen + [\"test\"] * testlen\n random.seed(1337)\n random.shuffle(splits)\n assert(len(splits) == len(examples))\n\n self.query_encoder = SequenceEncoder(tokenizer=partial(tree_query_tokenizer, strtok=sentence_encoder.tokenizer), add_end_token=True)\n\n # build vocabularies\n for i, (question, query, split) in enumerate(zip(questions, queries, splits)):\n self.sentence_encoder.inc_build_vocab(question, seen=split==\"train\")\n self.query_encoder.inc_build_vocab(query, seen=split==\"train\")\n for word, wordid in self.sentence_encoder.vocab.D.items():\n self.query_encoder.vocab.add_token(word, seen=False)\n self.sentence_encoder.finalize_vocab(min_freq=min_freq)\n self.query_encoder.finalize_vocab(min_freq=min_freq)\n\n self.build_data(questions, queries, splits)\n\n def build_data(self, inputs:Iterable[str], outputs:Iterable[str], splits:Iterable[str]):\n maxlen_in, maxlen_out = 0, 0\n eid = 0\n\n gold_map = torch.arange(0, self.query_encoder.vocab.number_of_ids(last_nonrare=False))\n rare_tokens = self.query_encoder.vocab.rare_tokens - set(self.sentence_encoder.vocab.D.keys())\n for rare_token in rare_tokens:\n gold_map[self.query_encoder.vocab[rare_token]] = \\\n self.query_encoder.vocab[self.query_encoder.vocab.unktoken]\n\n for inp, out, split in zip(inputs, outputs, splits):\n inp_tensor, inp_tokens = self.sentence_encoder.convert(inp, return_what=\"tensor,tokens\")\n out_tensor, out_tokens = self.query_encoder.convert(out, return_what=\"tensor,tokens\")\n out_tensor = gold_map[out_tensor]\n\n state = TreeDecoderState([inp], [out],\n inp_tensor[None, :], out_tensor[None, :],\n [inp_tokens], [out_tokens],\n self.sentence_encoder.vocab, self.query_encoder.vocab)\n state.eids = np.asarray([eid], dtype=\"int64\")\n maxlen_in, maxlen_out = max(maxlen_in, len(state.inp_tokens[0])), max(maxlen_out, len(state.gold_tokens[0]))\n if split not in self.data:\n self.data[split] = []\n self.data[split].append(state)\n eid += 1\n self.maxlen_input, self.maxlen_output = maxlen_in, maxlen_out\n\n def get_split(self, split:str):\n splits = split.split(\"+\")\n data = []\n for split in splits:\n data += self.data[split]\n return DatasetSplitProxy(data)\n\n @staticmethod\n def collate_fn(data:Iterable):\n goldmaxlen = 0\n inpmaxlen = 0\n data = [state.make_copy(detach=True, deep=True) for state in data]\n for state in data:\n goldmaxlen = max(goldmaxlen, state.gold_tensor.size(1))\n inpmaxlen = max(inpmaxlen, state.inp_tensor.size(1))\n for state in data:\n state.gold_tensor = torch.cat([\n state.gold_tensor,\n state.gold_tensor.new_zeros(1, goldmaxlen - state.gold_tensor.size(1))], 1)\n state.inp_tensor = torch.cat([\n state.inp_tensor,\n state.inp_tensor.new_zeros(1, inpmaxlen - state.inp_tensor.size(1))], 1)\n ret = data[0].merge(data)\n return ret\n\n def dataloader(self, split:str=None, batsize:int=5):\n if split is None: # return all splits\n ret = {}\n for split in self.data.keys():\n ret[split] = self.dataloader(batsize=batsize, split=split)\n return ret\n else:\n dl = DataLoader(self.get_split(split), batch_size=batsize, shuffle=split in (\"train\", \"train+valid\"),\n collate_fn=type(self).collate_fn)\n return dl\n\n\nclass DatasetSplitProxy(object):\n def __init__(self, data, **kw):\n super(DatasetSplitProxy, self).__init__(**kw)\n self.data = data\n\n def __getitem__(self, item):\n return self.data[item].make_copy()\n\n def __len__(self):\n return len(self.data)\n\n\ndef try_dataset():\n tt = q.ticktock(\"dataset\")\n tt.tick(\"building dataset\")\n ds = LCQuaDnoENTDataset(sentence_encoder=SequenceEncoder(tokenizer=lambda x: x.split()), splits=(80, 10, 10))\n train_dl = ds.dataloader(\"train+valid\", batsize=20)\n test_dl = ds.dataloader(\"test\", batsize=20)\n examples = set()\n examples_list = []\n duplicates = []\n testexamples = set()\n testexamples_list = []\n testduplicates = []\n for b in train_dl:\n for i in range(len(b)):\n example = b.inp_strings[i] + \" --> \" + str(b.gold_trees[i])\n if example in examples:\n duplicates.append(example)\n examples.add(example)\n examples_list.append(example)\n # print(example)\n for b in test_dl:\n for i in range(len(b)):\n example = b.inp_strings[i] + \" --> \" + str(b.gold_trees[i])\n if example in examples:\n testduplicates.append(example)\n testexamples.add(example)\n testexamples_list.append(example)\n\n print(f\"duplicates within train: {len(duplicates)} from {len(examples_list)} total\")\n print(f\"duplicates from test to train: {len(testduplicates)} from {len(testexamples_list)} total:\")\n for x in testduplicates:\n print(x)\n tt.tock(\"dataset built\")\n\n\nclass BasicGenModel_VIB(TransitionModel):\n def __init__(self, embdim, hdim, numlayers:int=1, dropout=0.,\n sentence_encoder:SequenceEncoder=None,\n query_encoder:SequenceEncoder=None,\n feedatt=False, store_attn=True,\n vib_init=False, vib_enc=False,\n **kw):\n super(BasicGenModel_VIB, self).__init__(**kw)\n\n inpemb = torch.nn.Embedding(sentence_encoder.vocab.number_of_ids(), embdim, padding_idx=0)\n\n # _, covered_word_ids = load_pretrained_embeddings(inpemb.emb, sentence_encoder.vocab.D,\n # p=\"../../data/glove/glove300uncased\") # load glove embeddings where possible into the inner embedding class\n # inpemb._do_rare(inpemb.rare_token_ids - covered_word_ids)\n self.inp_emb = inpemb\n\n encoder_dim = hdim * 2\n encoder = GRUEncoder(embdim, hdim, num_layers=numlayers, dropout=dropout, bidirectional=True)\n # encoder = q.LSTMEncoder(embdim, *([encoder_dim // 2] * numlayers), bidir=True, dropout_in=dropout)\n self.inp_enc = encoder\n\n decoder_emb = torch.nn.Embedding(query_encoder.vocab.number_of_ids(), embdim, padding_idx=0)\n self.out_emb = decoder_emb\n\n dec_rnn_in_dim = embdim + (encoder_dim if feedatt else 0)\n decoder_rnn = GRUTransition(dec_rnn_in_dim, hdim, numlayers, dropout=dropout)\n self.out_rnn = decoder_rnn\n\n decoder_out = BasicGenOutput(hdim + encoder_dim, vocab=query_encoder.vocab)\n # decoder_out.build_copy_maps(inp_vocab=sentence_encoder.vocab)\n self.out_lin = decoder_out\n\n self.att = q.Attention(q.SimpleFwdAttComp(hdim, encoder_dim, hdim), dropout=min(0.1, dropout))\n\n self.enc_to_dec = torch.nn.ModuleList([torch.nn.Sequential(\n torch.nn.Linear(encoder_dim, hdim),\n torch.nn.Tanh()\n ) for _ in range(numlayers)])\n\n self.feedatt = feedatt\n self.nocopy = True\n\n self.store_attn = store_attn\n\n # VIBs\n self.vib_init = torch.nn.ModuleList([VIB(encoder_dim) for _ in range(numlayers)]) if vib_init else None\n self.vib_enc = VIB_seq(encoder_dim) if vib_enc else None\n\n self.reset_parameters()\n\n def reset_parameters(self):\n def _param_reset(m):\n if type(m) == torch.nn.Linear:\n torch.nn.init.uniform_(m.weight, -0.1, 0.1)\n if m.bias is not None:\n torch.nn.init.uniform_(m.bias, -0.1, 0.1)\n elif type(m) in (torch.nn.LSTM, torch.nn.GRU):\n for name, param in m.named_parameters():\n if \"weight\" in name or \"bias\" in name:\n torch.nn.init.uniform(param, -0.1, 0.1)\n elif type(m) == torch.nn.Embedding:\n torch.nn.init.uniform_(m.weight, -0.1, 0.1)\n torch.nn.init.constant_(m.weight[0], 0)\n # self.apply(_param_reset)\n\n def forward(self, x:State):\n if not \"mstate\" in x:\n x.mstate = State()\n x.mstate.vib = State()\n mstate = x.mstate\n init_states = []\n final_encs_kls = torch.tensor([0])\n if not \"ctx\" in mstate:\n # encode input\n inptensor = x.inp_tensor\n mask = inptensor != 0\n inpembs = self.inp_emb(inptensor)\n # inpembs = self.dropout(inpembs)\n inpenc, final_encs = self.inp_enc(inpembs, mask)\n final_encs = [final_enc[0] for final_enc in final_encs]\n\n if self.vib_init is not None:\n final_encs, final_enc_kls = zip(*[vib(final_enc) for vib, final_enc in zip(self.vib_init, final_encs)])\n mstate.vib.init = final_enc_kls\n if self.vib_enc is not None:\n inpenc, inpenc_kl = self.vib_enc(inpenc, mask)\n mstate.vib.enc = inpenc_kl\n init_states = [enctodec(final_enc) for enctodec, final_enc in zip(self.enc_to_dec, final_encs)]\n mstate.ctx = inpenc\n mstate.ctx_mask = mask\n\n ctx = mstate.ctx\n ctx_mask = mstate.ctx_mask\n\n emb = self.out_emb(x.prev_actions)\n\n if not \"rnnstate\" in mstate:\n init_rnn_state = self.out_rnn.get_init_state(emb.size(0), emb.device)\n # uncomment next line to initialize decoder state with last state of encoder\n # init_rnn_state[f\"{len(init_rnn_state)-1}\"][\"c\"] = final_enc\n if len(init_states) == init_rnn_state.h.size(1):\n init_rnn_state.h = torch.stack(init_states, 1).contiguous()\n mstate.rnnstate = init_rnn_state\n\n if \"prev_summ\" not in mstate:\n # mstate.prev_summ = torch.zeros_like(ctx[:, 0])\n mstate.prev_summ = final_encs[-1]\n _emb = emb\n if self.feedatt == True:\n _emb = torch.cat([_emb, mstate.prev_summ], 1)\n enc, new_rnnstate = self.out_rnn(_emb, mstate.rnnstate)\n mstate.rnnstate = new_rnnstate\n\n alphas, summ, scores = self.att(enc, ctx, ctx_mask)\n mstate.prev_summ = summ\n enc = torch.cat([enc, summ], -1)\n\n if self.nocopy is True:\n outs = self.out_lin(enc)\n else:\n outs = self.out_lin(enc, x.inp_tensor, scores)\n outs = (outs,) if not q.issequence(outs) else outs\n # _, preds = outs.max(-1)\n\n if self.store_attn:\n if \"stored_attentions\" not in x:\n x.stored_attentions = torch.zeros(alphas.size(0), 0, alphas.size(1), device=alphas.device)\n x.stored_attentions = torch.cat([x.stored_attentions, alphas.detach()[:, None, :]], 1)\n\n return outs[0], x\n\n\ndef do_rare_stats(ds, sentence_rare_tokens=None, query_rare_tokens=None):\n # how many examples contain rare words, in input and output, in both train and test\n def get_rare_portions(examples:List[State]):\n total = 0\n rare_in_question = 0\n rare_in_query = 0\n rare_in_both = 0\n rare_in_either = 0\n for example in examples:\n total += 1\n question_tokens = example.inp_tokens[0]\n query_tokens = example.gold_tokens[0]\n both = True\n either = False\n _sentence_rare_tokens = example.sentence_vocab.rare_tokens if sentence_rare_tokens is None else sentence_rare_tokens\n if len(set(question_tokens) & _sentence_rare_tokens) > 0:\n rare_in_question += 1\n either = True\n else:\n both = False\n _query_rare_tokens = example.query_vocab.rare_tokens if query_rare_tokens is None else query_rare_tokens\n if len(set(query_tokens) & _query_rare_tokens) > 0:\n either = True\n rare_in_query += 1\n else:\n both = False\n if both:\n rare_in_both += 1\n if either:\n rare_in_either += 1\n return rare_in_question / total, rare_in_query/total, rare_in_both/total, rare_in_either/total\n print(\"RARE STATS:::\")\n print(\"training data:\")\n ris, riq, rib, rie = get_rare_portions(ds.data[\"train\"])\n print(f\"\\t In question: {ris} \\n\\t In query: {riq} \\n\\t In both: {rib} \\n\\t In either: {rie}\")\n print(\"test data:\")\n ris, riq, rib, rie = get_rare_portions(ds.data[\"test\"])\n print(f\"\\t In question: {ris} \\n\\t In query: {riq} \\n\\t In both: {rib} \\n\\t In either: {rie}\")\n return\n\n\ndef tensor2tree(x, D:Vocab=None):\n # x: 1D int tensor\n x = list(x.detach().cpu().numpy())\n x = [D(xe) for xe in x]\n x = [xe for xe in x if xe != D.padtoken]\n\n # find first @END@ and cut off\n parentheses_balance = 0\n for i in range(len(x)):\n if x[i] == D.endtoken:\n x = x[:i]\n break\n elif x[i] == \"(\" or x[i][-1] == \"(\":\n parentheses_balance += 1\n elif x[i] == \")\":\n parentheses_balance -= 1\n else:\n pass\n\n # balance parentheses\n while parentheses_balance > 0:\n x.append(\")\")\n parentheses_balance -= 1\n i = len(x) - 1\n while parentheses_balance < 0 and i > 0:\n if x[i] == \")\":\n x.pop(i)\n parentheses_balance += 1\n i -= 1\n\n # convert to nltk.Tree\n try:\n tree, parsestate = lisp_to_tree(\" \".join(x), None)\n except Exception as e:\n tree = None\n return tree\n\n\ndef split_tokenizer(x):\n x = x.replace(\"?\", \" ?\"). \\\n replace(\".\", \" .\"). \\\n replace(\",\", \" ,\"). \\\n replace(\"'\", \" '\")\n x = re.sub(\"\\s+\", \" \", x)\n return x.lower().split()\n\n\ndef run(lr=0.001,\n batsize=50,\n epochs=50,\n embdim=100,\n encdim=100,\n numlayers=1,\n beamsize=1,\n dropout=.2,\n wreg=1e-10,\n cuda=False,\n gpu=0,\n minfreq=3,\n gradnorm=3.,\n cosine_restarts=1.,\n beta=0.001,\n vib_init=True,\n vib_enc=True,\n ):\n localargs = locals().copy()\n print(locals())\n tt = q.ticktock(\"script\")\n device = torch.device(\"cpu\") if not cuda else torch.device(\"cuda\", gpu)\n tt.tick(\"loading data\")\n ds = LCQuaDnoENTDataset(sentence_encoder=SequenceEncoder(tokenizer=split_tokenizer), min_freq=minfreq)\n print(f\"max lens: {ds.maxlen_input} (input) and {ds.maxlen_output} (output)\")\n tt.tock(\"data loaded\")\n\n do_rare_stats(ds)\n # batch = next(iter(train_dl))\n # print(batch)\n # print(\"input graph\")\n # print(batch.batched_states)\n\n model = BasicGenModel_VIB(embdim=embdim, hdim=encdim, dropout=dropout, numlayers=numlayers,\n sentence_encoder=ds.sentence_encoder, query_encoder=ds.query_encoder, feedatt=True,\n vib_init=vib_init, vib_enc=vib_enc)\n\n # sentence_rare_tokens = set([ds.sentence_encoder.vocab(i) for i in model.inp_emb.rare_token_ids])\n # do_rare_stats(ds, sentence_rare_tokens=sentence_rare_tokens)\n losses = [CELoss(ignore_index=0, mode=\"logprobs\")]\n if vib_init:\n losses.append(StatePenalty(lambda state: sum(state.mstate.vib.init), weight=beta))\n if vib_enc:\n losses.append(StatePenalty(\"mstate.vib.enc\", weight=beta))\n\n tfdecoder = SeqDecoder(model, tf_ratio=1.,\n eval=losses + [SeqAccuracies(), TreeAccuracy(tensor2tree=partial(tensor2tree, D=ds.query_encoder.vocab),\n orderless={\"select\", \"count\", \"ask\"})])\n # beamdecoder = BeamActionSeqDecoder(tfdecoder.model, beamsize=beamsize, maxsteps=50)\n if beamsize == 1:\n freedecoder = SeqDecoder(model, maxtime=40, tf_ratio=0.,\n eval=[SeqAccuracies(),\n TreeAccuracy(tensor2tree=partial(tensor2tree, D=ds.query_encoder.vocab),\n orderless={\"select\", \"count\", \"ask\"})])\n else:\n\n freedecoder = BeamDecoder(model, maxtime=30, beamsize=beamsize,\n eval=[SeqAccuracies(),\n TreeAccuracy(tensor2tree=partial(tensor2tree, D=ds.query_encoder.vocab),\n orderless={\"select\", \"count\", \"ask\"})])\n\n # # test\n # tt.tick(\"doing one epoch\")\n # for batch in iter(train_dl):\n # batch = batch.to(device)\n # ttt.tick(\"start batch\")\n # # with torch.no_grad():\n # out = tfdecoder(batch)\n # ttt.tock(\"end batch\")\n # tt.tock(\"done one epoch\")\n # print(out)\n # sys.exit()\n\n # beamdecoder(next(iter(train_dl)))\n\n # print(dict(tfdecoder.named_parameters()).keys())\n\n losses = make_array_of_metrics(\"loss\", \"elem_acc\", \"seq_acc\", \"tree_acc\")\n vlosses = make_array_of_metrics(\"seq_acc\", \"tree_acc\")\n # if beamsize >= 3:\n # vlosses = make_loss_array(\"seq_acc\", \"tree_acc\", \"tree_acc_at3\", \"tree_acc_at_last\")\n # else:\n # vlosses = make_loss_array(\"seq_acc\", \"tree_acc\", \"tree_acc_at_last\")\n\n # trainable_params = tfdecoder.named_parameters()\n # exclude_params = set()\n # exclude_params.add(\"model.model.inp_emb.emb.weight\") # don't train input embeddings if doing glove\n # trainable_params = [v for k, v in trainable_params if k not in exclude_params]\n\n # 4. define optim\n # optim = torch.optim.Adam(trainable_params, lr=lr, weight_decay=wreg)\n optim = torch.optim.Adam(tfdecoder.parameters(), lr=lr, weight_decay=wreg)\n\n # lr schedule\n if cosine_restarts >= 0:\n # t_max = epochs * len(train_dl)\n t_max = epochs\n print(f\"Total number of updates: {t_max}\")\n lr_schedule = q.WarmupCosineWithHardRestartsSchedule(optim, 0, t_max, cycles=cosine_restarts)\n reduce_lr = [lambda: lr_schedule.step()]\n else:\n reduce_lr = []\n\n # 6. define training function\n clipgradnorm = lambda: torch.nn.utils.clip_grad_norm_(tfdecoder.parameters(), gradnorm)\n # clipgradnorm = lambda: None\n trainbatch = partial(q.train_batch, on_before_optim_step=[clipgradnorm])\n trainepoch = partial(q.train_epoch, model=tfdecoder, dataloader=ds.dataloader(\"train\", batsize), optim=optim, losses=losses,\n _train_batch=trainbatch, device=device, on_end=reduce_lr)\n\n # 7. define validation function (using partial)\n validepoch = partial(q.test_epoch, model=freedecoder, dataloader=ds.dataloader(\"test\", batsize), losses=vlosses, device=device)\n # validepoch = partial(q.test_epoch, model=freedecoder, dataloader=valid_dl, losses=vlosses, device=device)\n\n # p = q.save_run(freedecoder, localargs, filepath=__file__)\n # q.save_dataset(ds, p)\n # _freedecoder, _localargs = q.load_run(p)\n # _ds = q.load_dataset(p)\n # sys.exit()\n\n # 7. run training\n tt.tick(\"training\")\n q.run_training(run_train_epoch=trainepoch, run_valid_epoch=validepoch, max_epochs=epochs)\n tt.tock(\"done training\")\n\n # testing\n tt.tick(\"testing\")\n testresults = q.test_epoch(model=freedecoder, dataloader=ds.dataloader(\"valid\", batsize), losses=vlosses, device=device)\n print(\"validation test results: \", testresults)\n tt.tock(\"tested\")\n tt.tick(\"testing\")\n testresults = q.test_epoch(model=freedecoder, dataloader=ds.dataloader(\"test\", batsize), losses=vlosses, device=device)\n print(\"test results: \", testresults)\n tt.tock(\"tested\")\n\n # save model?\n tosave = input(\"Save this model? 'y(es)'=Yes, <int>=overwrite previous, otherwise=No) \\n>\")\n if tosave.lower() == \"y\" or tosave.lower() == \"yes\" or re.match(\"\\d+\", tosave.lower()):\n overwrite = int(tosave) if re.match(\"\\d+\", tosave) else None\n p = q.save_run(model, localargs, filepath=__file__, overwrite=overwrite)\n q.save_dataset(ds, p)\n _model, _localargs = q.load_run(p)\n _ds = q.load_dataset(p)\n\n _freedecoder = BeamDecoder(_model, maxtime=50, beamsize=beamsize,\n eval_beam=[TreeAccuracy(tensor2tree=partial(tensor2tree, D=ds.query_encoder.vocab),\n orderless={\"op:and\", \"SW:concat\"})])\n\n # testing\n tt.tick(\"testing reloaded\")\n _testresults = q.test_epoch(model=_freedecoder, dataloader=_ds.dataloader(\"test\", batsize),\n losses=vlosses, device=device)\n print(_testresults)\n assert(testresults == _testresults)\n tt.tock(\"tested\")\n\n\nif __name__ == '__main__':\n # try_basic_query_tokenizer()\n # try_build_grammar()\n # try_dataset()\n q.argprun(run)\n # q.argprun(run_rerank)" ]
[ [ "torch.nn.init.uniform_", "torch.nn.init.uniform", "torch.cat", "numpy.asarray", "torch.nn.init.constant_", "torch.tensor", "torch.nn.Tanh", "torch.nn.Linear", "torch.device", "torch.stack" ] ]
SaiNikhileshReddy/Top_5_Projects
[ "1e754c80af8f5402fa828aad747d79d4edc62fb4" ]
[ "1) ASL Static Hand Gesture Recognition - Deep Learning/HandGesturev2.py" ]
[ "print(\"================================\")\n\nimport tensorflow as tf \nprint(f\"tensorflow : {tf.__version__}\")\nimport numpy as np\nprint(f\"numpy : {np.__version__}\")\nimport cv2\nprint(f\"cv2 : {cv2.__version__}\")\nimport time\n\ncam = cv2.VideoCapture(0)\nprint(f\"Web Camera Initialized\")\nmodel = tf.keras.models.load_model('asl_colab_model')\nprint(f\"Model Loaded\")\nkeys = ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z', 'space', 'del', 'nothing']\nprint(f\"Keys Loaded\")\n\nprint(\"================================\")\n\nold_predicted_value = \"\"\n\nwhile(True):\n\n # get frame from the camera\n check , frame = cam.read()\n frame = cv2.flip(frame , 1) # flip vertically to get correct output\n \n scan = cv2.resize(frame[200:400,100:300], (64,64))\n\n images = []\n images.append(scan)\n images = np.array(images)\n images = images.astype('float32')/255\n predicted_answer = keys[int(model.predict_classes(images))]\n\n if old_predicted_value != 'nothing' and predicted_answer == 'nothing':\n \n print(f\"predicted value : {old_predicted_value}\")\n\n cv2.rectangle(frame, (100, 100), (300, 300), (0,255,0), 5)\n\n cv2.putText(frame,'Prediction : '+str(predicted_answer),(75,50),cv2.FONT_HERSHEY_COMPLEX,1,(0,255,0),1)\n\n cv2.imshow('frame' , frame)\n \n if predicted_answer != \"nothing\":\n old_predicted_value = predicted_answer\n\n time.sleep(0.1)\n \n key = cv2.waitKey(33)\n if key == 27:\n break\n\nprint(f\"Cam Dimenstion : {frame.shape}\")\nprint(\"================================\")\ncam.release()\ncv2.destroyAllWindows()\nprint(f\"Web Camera Suspended\")\nprint(\"================================\")" ]
[ [ "tensorflow.keras.models.load_model", "numpy.array" ] ]
Totorino02/Numerical-algorithms
[ "2898386f52541612a6a21ceb413d0e55498152f1" ]
[ "eqDiff/runge-kutta-4.py" ]
[ "\"\"\"\n name: runge-kutta-4.py\n goal: numeric solve of differential equations\n author: Dr HOUNSI Madouvi antoine-sebastien\n date: 28/03/2022\n\"\"\"\n\nfrom math import exp, pow\nimport numpy as np\nimport matplotlib.pyplot as pt\nfrom interpolation.lagrange import Lagrange\n\n\nclass RungeKutta4:\n\n def __init__(self):\n self.a = None\n self.b = None\n self.pas = 0.1\n self._getValues()\n result = self._dev(self.a, self.b, self.initial, self.pas)\n vals = np.arange(self.a, self.b+self.pas, self.pas)\n # print(result)\n # print(Lagrange(\"runge-kutta-2.py\").funcLagrange(vals, result, len(result) - 1))\n # Lagrange(\"runge-kutta-2.py\").showC(vals, result)\n pt.scatter(vals, result, label='Courbe Obtenue', c='coral')\n # pt.plot(vals, [-pow(x, 2)+x+2 for x in vals], label='Courbe')\n pt.legend()\n pt.show()\n\n def _getValues(self):\n print(\"Entrez les valeurs des intervalles [a,b]: \")\n try:\n self.a = float(input(\"a:\"))\n self.b = float(input(\"b: \"))\n if self.a >= self.b:\n print(\"votre intervalle n'est pas valide\")\n self._getValues()\n self.initial = float(input(\"Valeur initial X0: \"))\n except ValueError:\n print(\"Données incorrecte\")\n self._getValues()\n\n def func(self, X, Y) -> int:\n # return -0.3 * Y + 2 * exp(X)\n # return -2 * X + 1\n return 2*Y - 3*X\n\n def _dev(self, a, b, X0, pas):\n K1 = 0\n K2 = 0\n K3 = 0\n K4 = 0\n f = X0\n values = list()\n values.append(f)\n val = np.arange(a, b, pas)\n for i in val:\n K1 = self.func(i, f)\n K2 = self.func(i+(pas/2), f+(pas/2)*K1)\n K3 = self.func(i+(pas/2), f+(pas/2)*K2)\n K4 = self.func(i+pas, f + pas*K3)\n f = f + pas/6 * (K1 + 2*K2 + 2*K3 + K4)\n values.append(f)\n return values\n\n\nRungeKutta4()\n" ]
[ [ "numpy.arange", "matplotlib.pyplot.show", "matplotlib.pyplot.scatter", "matplotlib.pyplot.legend" ] ]
Kage18/Split_Learning
[ "757e45e18797aa790ff7fd3438f2f69fcdc1c599" ]
[ "plotting.py" ]
[ "\nimport matplotlib\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom typing import List\n\n\ndef generate_simple_plot(x: List, y: List, title: str=\"\", x_label: str=\"\",\n y_label: str=\"\", y_lim: List[float]=[0.0, 1.0], save: bool=True,\n fname: str=\"\"):\n fig, ax = plt.subplots()\n ax.plot(x, y)\n\n ax.set(xlabel=x_label, ylabel=y_label, ylim=y_lim, title=title)\n\n if save:\n fig.savefig(\"~/Split_Learning/plots/\" + fname)\n else:\n plt.show()\n" ]
[ [ "matplotlib.pyplot.show", "matplotlib.pyplot.subplots" ] ]
AI4SIM/model-collection
[ "4e69558300e78d134d97d5a9665c5d0b717391eb" ]
[ "combustion/unets/utils.py" ]
[ "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom numpy.random import randint\nfrom typing import Union\n\n\nclass RandomCropper3D():\n \"\"\"Randomly crop a sub-block out of a 3D tensor.\n\n Args:\n out_shape (tuple or int): desired output shape.\n \"\"\"\n\n def __init__(self, out_shape: Union[int, tuple]):\n \"\"\"\n Args:\n out_shape (int | tuple): desired shape (after cropping), expanded to 3D if int.\n \"\"\"\n assert isinstance(out_shape, (int, tuple))\n if isinstance(out_shape, int):\n self.out_shape = (out_shape, out_shape, out_shape)\n else:\n assert len(out_shape) == 3\n self.out_shape = out_shape\n\n def __call__(self, x, y):\n \"\"\"Apply the random cropping to a (x,y) pair.\"\"\"\n h, w, d = x.shape[0], x.shape[1], x.shape[2]\n bh, bw, bd = self.out_shape\n tx = randint(0, h - bh)\n ty = randint(0, w - bw)\n tz = randint(0, d - bd)\n x_cropped = x[tx:tx + bh, ty:ty + bw, tz:tz + bd]\n y_cropped = y[tx:tx + bh, ty:ty + bw, tz:tz + bd]\n return x_cropped, y_cropped\n" ]
[ [ "numpy.random.randint" ] ]
haakonrob/AI-Feynman
[ "445b68e9a260dcea67a94eed6e0aeb267f25d2ef" ]
[ "aifeynman/S_NN_eval.py" ]
[ "from __future__ import print_function\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.optim as optim\nimport pandas as pd\nimport numpy as np\nimport torch\nfrom torch.utils import data\nimport pickle\nfrom torch.optim.lr_scheduler import CosineAnnealingLR\nfrom matplotlib import pyplot as plt\nimport time\n\nis_cuda = torch.cuda.is_available()\n\nbs = 2048\n\nclass MultDataset(data.Dataset):\n def __init__(self, factors, product):\n 'Initialization'\n self.factors = factors\n self.product = product\n\n def __len__(self):\n 'Denotes the total number of samples'\n return len(self.product)\n\n def __getitem__(self, index):\n # Load data and get label\n x = self.factors[index]\n y = self.product[index]\n\n return x, y\n\ndef rmse_loss(pred, targ):\n denom = targ**2\n denom = torch.sqrt(denom.sum()/len(denom))\n\n return torch.sqrt(F.mse_loss(pred, targ))/denom\n\n\ndef NN_eval(pathdir,filename, cuda=False):\n try:\n n_variables = np.loadtxt(pathdir+filename, dtype='str').shape[1]-1\n variables = np.loadtxt(pathdir+filename, usecols=(0,))\n\n if n_variables==0:\n return 0\n elif n_variables==1:\n variables = np.reshape(variables,(len(variables),1))\n else:\n for j in range(1,n_variables):\n v = np.loadtxt(pathdir+filename, usecols=(j,))\n variables = np.column_stack((variables,v))\n\n f_dependent = np.loadtxt(pathdir+filename, usecols=(n_variables,))\n f_dependent = np.reshape(f_dependent,(len(f_dependent),1))\n\n factors = torch.from_numpy(variables[0:int(5*len(variables)/6)])\n if cuda and is_cuda:\n factors = factors.cuda()\n else:\n factors = factors\n factors = factors.float()\n product = torch.from_numpy(f_dependent[0:int(5*len(f_dependent)/6)])\n if cuda and is_cuda:\n product = product.cuda()\n else:\n product = product\n product = product.float()\n\n factors_val = torch.from_numpy(variables[int(5*len(variables)/6):int(len(variables))])\n if cuda and is_cuda:\n factors_val = factors_val.cuda()\n else:\n factors_val = factors_val\n factors_val = factors_val.float()\n product_val = torch.from_numpy(f_dependent[int(5*len(variables)/6):int(len(variables))]) \n if cuda and is_cuda:\n product_val = product_val.cuda()\n else:\n product_val = product_val\n product_val = product_val.float()\n\n class SimpleNet(nn.Module):\n def __init__(self, ni):\n super().__init__()\n self.linear1 = nn.Linear(ni, 128)\n self.linear2 = nn.Linear(128, 128)\n self.linear3 = nn.Linear(128, 64)\n self.linear4 = nn.Linear(64,64)\n self.linear5 = nn.Linear(64,1)\n\n def forward(self, x):\n x = F.tanh(self.linear1(x))\n x = F.tanh(self.linear2(x))\n x = F.tanh(self.linear3(x))\n x = F.tanh(self.linear4(x))\n x = self.linear5(x)\n return x\n\n if cuda and is_cuda:\n model = SimpleNet(n_variables).cuda()\n else:\n model = SimpleNet(n_variables)\n \n model.load_state_dict(torch.load(\"results/NN_trained_models/models/\"+filename+\".h5\"))\n model.eval()\n return(rmse_loss(model(factors_val),product_val),model)\n\n except Exception as e:\n print(e)\n return (100,0)\n\n\n\n\n\n" ]
[ [ "torch.load", "torch.nn.Linear", "torch.nn.functional.mse_loss", "torch.cuda.is_available", "numpy.column_stack", "numpy.loadtxt" ] ]
orashi/PaintsPytorch
[ "41cf321722a035101758c0717f082d71c12c6cf4", "41cf321722a035101758c0717f082d71c12c6cf4" ]
[ "models/cp_model2.py", "old_train.py" ]
[ "import numpy as np\nimport torch\nimport os\nimport sys\nimport functools\nimport torch.nn as nn\nfrom torch.autograd import Variable\nfrom torch.nn import init\nimport torch.nn.functional as F\nimport torchvision.models as M\n\n\nclass ResNeXtBottleneck(nn.Module):\n def __init__(self, in_channels=256, out_channels=256, stride=1, cardinality=32, dilate=1):\n super(ResNeXtBottleneck, self).__init__()\n D = out_channels // 2\n self.conv_reduce = nn.Conv2d(in_channels, D, kernel_size=1, stride=1, padding=0, bias=False)\n self.conv_conv = nn.Conv2d(D, D, kernel_size=2 + stride, stride=stride, padding=dilate, dilation=dilate,\n groups=cardinality,\n bias=False)\n self.conv_expand = nn.Conv2d(D, out_channels, kernel_size=1, stride=1, padding=0, bias=False)\n self.shortcut = nn.Sequential()\n if stride != 1:\n self.shortcut.add_module('shortcut',\n nn.AvgPool2d(2, stride=2))\n\n def forward(self, x):\n bottleneck = self.conv_reduce.forward(x)\n bottleneck = F.leaky_relu(bottleneck, 0.2, True)\n bottleneck = self.conv_conv.forward(bottleneck)\n bottleneck = F.leaky_relu(bottleneck, 0.2, True)\n bottleneck = self.conv_expand.forward(bottleneck)\n x = self.shortcut.forward(x)\n return x + bottleneck\n\n\nclass Tunnel(nn.Module):\n def __init__(self, len=1, *args):\n super(Tunnel, self).__init__()\n\n tunnel = [ResNeXtBottleneck(*args) for _ in range(len)]\n self.tunnel = nn.Sequential(*tunnel)\n\n def forward(self, x):\n return self.tunnel(x)\n\n\nclass DilateTunnel(nn.Module):\n def __init__(self, depth=4):\n super(DilateTunnel, self).__init__()\n\n tunnel = [ResNeXtBottleneck(dilate=1) for _ in range(depth)]\n tunnel += [ResNeXtBottleneck(dilate=2) for _ in range(depth)]\n tunnel += [ResNeXtBottleneck(dilate=4) for _ in range(depth)]\n tunnel += [ResNeXtBottleneck(dilate=8) for _ in range(depth)]\n tunnel += [ResNeXtBottleneck(dilate=1) for _ in range(14)]\n\n self.tunnel = nn.Sequential(*tunnel)\n\n def forward(self, x):\n return self.tunnel(x)\n\n\nclass def_netG(nn.Module):\n def __init__(self, ngf=32):\n super(def_netG, self).__init__()\n\n self.toH = nn.Sequential(nn.Conv2d(4, ngf, kernel_size=7, stride=1, padding=3), nn.LeakyReLU(0.2, True))\n\n self.to0 = nn.Sequential(nn.Conv2d(1, ngf // 2, kernel_size=3, stride=1, padding=1), # 512\n nn.InstanceNorm2d(ngf // 2),\n nn.LeakyReLU(0.2, True))\n self.to1 = nn.Sequential(nn.Conv2d(ngf // 2, ngf, kernel_size=4, stride=2, padding=1), # 256\n nn.InstanceNorm2d(ngf),\n nn.LeakyReLU(0.2, True))\n self.to2 = nn.Sequential(nn.Conv2d(ngf, ngf * 2, kernel_size=4, stride=2, padding=1), # 128\n nn.InstanceNorm2d(ngf * 2),\n nn.LeakyReLU(0.2, True))\n self.to3 = nn.Sequential(nn.Conv2d(ngf * 3, ngf * 4, kernel_size=4, stride=2, padding=1), # 64\n nn.LeakyReLU(0.2, True))\n self.to4 = nn.Sequential(nn.Conv2d(ngf * 4, ngf * 8, kernel_size=4, stride=2, padding=1), # 32\n nn.LeakyReLU(0.2, True))\n\n tunnel4 = [ResNeXtBottleneck(ngf * 8, ngf * 8, cardinality=32, dilate=1) for _ in range(20)]\n tunnel4 += [nn.Conv2d(ngf * 8, ngf * 4 * 4, kernel_size=3, stride=1, padding=1),\n nn.PixelShuffle(2),\n nn.LeakyReLU(0.2, True)]\n self.tunnel4 = nn.Sequential(*tunnel4)\n\n depth = 2\n tunnel = [ResNeXtBottleneck(ngf * 4, ngf * 4, cardinality=16, dilate=1) for _ in range(depth)]\n tunnel += [ResNeXtBottleneck(ngf * 4, ngf * 4, cardinality=16, dilate=2) for _ in range(depth)]\n tunnel += [ResNeXtBottleneck(ngf * 4, ngf * 4, cardinality=16, dilate=4) for _ in range(depth)]\n tunnel += [ResNeXtBottleneck(ngf * 4, ngf * 4, cardinality=16, dilate=2),\n ResNeXtBottleneck(ngf * 4, ngf * 4, cardinality=16, dilate=1)]\n tunnel3 = nn.Sequential(*tunnel)\n\n self.tunnel3 = nn.Sequential(nn.Conv2d(ngf * 8, ngf * 4, kernel_size=3, stride=1, padding=1),\n nn.LeakyReLU(0.2, True),\n tunnel3,\n nn.Conv2d(ngf * 4, ngf * 2 * 4, kernel_size=3, stride=1, padding=1),\n nn.PixelShuffle(2),\n nn.LeakyReLU(0.2, True)\n )\n\n tunnel = [ResNeXtBottleneck(ngf * 2, ngf * 2, cardinality=8, dilate=1) for _ in range(depth)]\n tunnel += [ResNeXtBottleneck(ngf * 2, ngf * 2, cardinality=8, dilate=2) for _ in range(depth)]\n tunnel += [ResNeXtBottleneck(ngf * 2, ngf * 2, cardinality=8, dilate=4) for _ in range(depth)]\n tunnel += [ResNeXtBottleneck(ngf * 2, ngf * 2, cardinality=8, dilate=2),\n ResNeXtBottleneck(ngf * 2, ngf * 2, cardinality=8, dilate=1)]\n tunnel2 = nn.Sequential(*tunnel)\n\n self.tunnel2 = nn.Sequential(nn.Conv2d(ngf * 4, ngf * 2, kernel_size=3, stride=1, padding=1),\n nn.LeakyReLU(0.2, True),\n tunnel2,\n nn.Conv2d(ngf * 2, ngf * 4, kernel_size=3, stride=1, padding=1),\n nn.PixelShuffle(2),\n nn.LeakyReLU(0.2, True)\n )\n\n tunnel = [ResNeXtBottleneck(ngf, ngf, cardinality=4, dilate=1)]\n tunnel += [ResNeXtBottleneck(ngf, ngf, cardinality=4, dilate=2)]\n tunnel += [ResNeXtBottleneck(ngf, ngf, cardinality=4, dilate=4)]\n tunnel += [ResNeXtBottleneck(ngf, ngf, cardinality=4, dilate=2),\n ResNeXtBottleneck(ngf, ngf, cardinality=4, dilate=1)]\n tunnel1 = nn.Sequential(*tunnel)\n\n self.tunnel1 = nn.Sequential(nn.Conv2d(ngf * 2, ngf, kernel_size=3, stride=1, padding=1),\n nn.LeakyReLU(0.2, True),\n tunnel1,\n nn.Conv2d(ngf, ngf * 2, kernel_size=3, stride=1, padding=1),\n nn.PixelShuffle(2),\n nn.LeakyReLU(0.2, True)\n )\n\n self.exit = nn.Conv2d(ngf, 3, kernel_size=3, stride=1, padding=1)\n\n def forward(self, x, hint):\n v = self.toH(hint)\n\n x0 = self.to0(x)\n x1 = self.to1(x0)\n x2 = self.to2(x1)\n x3 = self.to3(torch.cat([x2, v], 1))\n x4 = self.to4(x3)\n\n x = self.tunnel4(x4)\n\n x = self.tunnel3(torch.cat([x, x3.detach()], 1))\n x = self.tunnel2(torch.cat([x, x2.detach()], 1))\n x = self.tunnel1(torch.cat([x, x1.detach()], 1))\n x = F.tanh(self.exit(torch.cat([x, x0.detach()], 1)))\n return x\n\n\nclass def_netD(nn.Module):\n def __init__(self, ndf=64):\n super(def_netD, self).__init__()\n\n sequence = [\n nn.Conv2d(4, ndf, kernel_size=7, stride=1, padding=3, bias=False), # 512\n nn.LeakyReLU(0.2, True),\n nn.Conv2d(ndf, ndf, kernel_size=4, stride=2, padding=1, bias=False), # 256\n nn.LeakyReLU(0.2, True),\n\n ResNeXtBottleneck(ndf, ndf, cardinality=8, dilate=1),\n ResNeXtBottleneck(ndf, ndf, cardinality=8, dilate=1, stride=2),\n nn.Conv2d(ndf, ndf * 2, kernel_size=1, stride=1, padding=0, bias=False), # 128\n nn.LeakyReLU(0.2, True)\n\n ]\n\n self.feat = nn.Sequential(*sequence)\n\n sequence = [\n ResNeXtBottleneck(ndf * 2, ndf * 2, cardinality=8, dilate=1),\n ResNeXtBottleneck(ndf * 2, ndf * 2, cardinality=8, dilate=1, stride=2),\n nn.Conv2d(ndf * 2, ndf * 4, kernel_size=1, stride=1, padding=0, bias=False), # 64\n nn.LeakyReLU(0.2, True),\n\n ResNeXtBottleneck(ndf * 4, ndf * 4, cardinality=8, dilate=1),\n ResNeXtBottleneck(ndf * 4, ndf * 4, cardinality=8, dilate=1, stride=2),\n nn.Conv2d(ndf * 4, ndf * 8, kernel_size=1, stride=1, padding=1, bias=False), # 32\n nn.LeakyReLU(0.2, True),\n\n ResNeXtBottleneck(ndf * 8, ndf * 8, cardinality=8, dilate=1),\n ResNeXtBottleneck(ndf * 8, ndf * 8, cardinality=8, dilate=1, stride=2), # 16\n ResNeXtBottleneck(ndf * 8, ndf * 8, cardinality=8, dilate=1),\n ResNeXtBottleneck(ndf * 8, ndf * 8, cardinality=8, dilate=1, stride=2), # 8\n ResNeXtBottleneck(ndf * 8, ndf * 8, cardinality=8, dilate=1),\n ResNeXtBottleneck(ndf * 8, ndf * 8, cardinality=8, dilate=1, stride=2), # 4\n ResNeXtBottleneck(ndf * 8, ndf * 8, cardinality=8, dilate=1),\n nn.Conv2d(ndf * 8, ndf * 8, kernel_size=4, stride=1, padding=0, bias=False), # 1\n nn.LeakyReLU(0.2, True),\n\n ]\n\n self.post = nn.Sequential(*sequence)\n\n self.out = nn.Linear(512, 1)\n\n def forward(self, color, sketch):\n feat = self.feat(torch.cat([color, sketch], 1))\n out = self.post(feat)\n return self.out(out.view(color.size(0), -1)), feat\n\n\ndef def_netF():\n vgg16 = M.vgg16()\n vgg16.load_state_dict(torch.load('vgg16-397923af.pth'))\n vgg16.features = nn.Sequential(\n *list(vgg16.features.children())[:9]\n )\n for param in vgg16.parameters():\n param.requires_grad = False\n return vgg16.features\n", "import argparse\nimport random\n\nimport scipy.stats as stats\nimport torch.backends.cudnn as cudnn\nimport torch.optim as optim\nimport torchvision.utils as vutils\nfrom tensorboardX import SummaryWriter\nfrom torch.autograd import grad\n\nfrom data.proData import CreateDataLoader\nfrom models.pro_model import *\n\nparser = argparse.ArgumentParser()\nparser.add_argument('--datarootC', required=True, help='path to colored dataset')\nparser.add_argument('--datarootS', required=True, help='path to sketch dataset')\nparser.add_argument('--workers', type=int, help='number of data loading workers', default=4)\nparser.add_argument('--batchSize', type=int, default=16, help='input batch size')\nparser.add_argument('--imageSize', type=int, default=512, help='the height / width of the input image to network')\nparser.add_argument('--cut', type=int, default=1, help='cut backup frequency')\nparser.add_argument('--niter', type=int, default=700, help='number of epochs to train for')\nparser.add_argument('--ngf', type=int, default=64)\nparser.add_argument('--ndf', type=int, default=64)\nparser.add_argument('--lrG', type=float, default=0.0001, help='learning rate, default=0.0001')\nparser.add_argument('--lrD', type=float, default=0.0001, help='learning rate, default=0.0001')\nparser.add_argument('--beta1', type=float, default=0.5, help='beta1 for adam. default=0.5')\nparser.add_argument('--cuda', action='store_true', help='enables cuda')\nparser.add_argument('--netG', default='', help=\"path to netG (to continue training)\")\nparser.add_argument('--netD', default='', help=\"path to netD (to continue training)\")\nparser.add_argument('--optim', action='store_true', help='load optimizer\\'s checkpoint')\nparser.add_argument('--outf', default='.', help='folder to output images and model checkpoints')\nparser.add_argument('--Diters', type=int, default=1, help='number of D iters per each G iter')\nparser.add_argument('--manualSeed', type=int, default=2345, help='random seed to use. Default=1234')\nparser.add_argument('--baseGeni', type=int, default=2500, help='start base of pure pair L1 loss')\nparser.add_argument('--geni', type=int, default=0, help='continue gen image num')\nparser.add_argument('--epoi', type=int, default=0, help='continue epoch num')\nparser.add_argument('--env', type=str, default=None, help='tensorboard env')\nparser.add_argument('--advW', type=float, default=0.01, help='adversarial weight, default=0.01')\nparser.add_argument('--gpW', type=float, default=10, help='gradient penalty weight')\nparser.add_argument('--drift', type=float, default=0.001, help='wasserstein drift weight')\nparser.add_argument('--mseW', type=float, default=0.01, help='MSE loss weight')\nparser.add_argument('--MSE', action='store_true', help='enables pure MSE')\nparser.add_argument('--feat', action='store_true', help='enables feat test')\n\nopt = parser.parse_args()\nprint(opt)\n\n####### regular set up\nif torch.cuda.is_available() and not opt.cuda:\n print(\"WARNING: You have a CUDA device, so you should probably run with --cuda\")\ngen_iterations = opt.geni\ntry:\n os.makedirs(opt.outf)\nexcept OSError:\n pass\n# random seed setup # !!!!!\nprint(\"Random Seed: \", opt.manualSeed)\nrandom.seed(opt.manualSeed)\ntorch.manual_seed(opt.manualSeed)\nif opt.cuda:\n torch.cuda.manual_seed(opt.manualSeed)\ncudnn.benchmark = True\n####### regular set up end\n\nwriter = SummaryWriter(log_dir=opt.env, comment='this is great')\n\ndataloader = CreateDataLoader(opt)\n\nnetG = def_netG(ngf=opt.ngf)\nif opt.netG != '':\n netG.load_state_dict(torch.load(opt.netG))\nprint(netG)\n\nnetD = def_netD(ndf=opt.ndf)\nif opt.netD != '':\n netD.load_state_dict(torch.load(opt.netD))\nprint(netD)\n\nnetF = def_netF()\nprint(netD)\n\ncriterion_L1 = nn.L1Loss()\ncriterion_MSE = nn.MSELoss()\nL2_dist = nn.PairwiseDistance(2)\none = torch.FloatTensor([1])\nmone = one * -1\n\nfixed_sketch = torch.FloatTensor()\nfixed_hint = torch.FloatTensor()\nsaber = torch.FloatTensor([0.485 - 0.5, 0.456 - 0.5, 0.406 - 0.5]).view(1, 3, 1, 1)\ndiver = torch.FloatTensor([0.229, 0.224, 0.225]).view(1, 3, 1, 1)\n\nif opt.cuda:\n netD.cuda()\n netG.cuda()\n netF.cuda()\n fixed_sketch, fixed_hint = fixed_sketch.cuda(), fixed_hint.cuda()\n saber, diver = saber.cuda(), diver.cuda()\n criterion_L1.cuda()\n criterion_MSE.cuda()\n one, mone = one.cuda(), mone.cuda()\n\nif opt.feat:\n netF2 = def_netF2().cuda()\n\n# setup optimizer\noptimizerG = optim.Adam(netG.parameters(), lr=opt.lrG, betas=(opt.beta1, 0.9))\noptimizerD = optim.Adam(netD.parameters(), lr=opt.lrD, betas=(opt.beta1, 0.9))\n\nif opt.optim:\n optimizerG.load_state_dict(torch.load('%s/optimG_checkpoint.pth' % opt.outf))\n optimizerD.load_state_dict(torch.load('%s/optimD_checkpoint.pth' % opt.outf))\n\n\n# schedulerG = lr_scheduler.ReduceLROnPlateau(optimizerG, mode='max', verbose=True, min_lr=0.0000005,\n# patience=8) # 1.5*10^5 iter\n# schedulerD = lr_scheduler.ReduceLROnPlateau(optimizerD, mode='max', verbose=True, min_lr=0.0000005,\n# patience=8) # 1.5*10^5 iter\n\n\n# schedulerG = lr_scheduler.MultiStepLR(optimizerG, milestones=[60, 120], gamma=0.1) # 1.5*10^5 iter\n# schedulerD = lr_scheduler.MultiStepLR(optimizerD, milestones=[60, 120], gamma=0.1)\n\n\ndef calc_gradient_penalty(netD, real_data, fake_data, sketch):\n alpha = torch.rand(opt.batchSize, 1, 1, 1)\n alpha = alpha.cuda() if opt.cuda else alpha\n\n interpolates = alpha * real_data + ((1 - alpha) * fake_data)\n\n if opt.cuda:\n interpolates = interpolates.cuda()\n interpolates = Variable(interpolates, requires_grad=True)\n\n disc_interpolates = netD(interpolates, Variable(sketch))\n\n gradients = grad(outputs=disc_interpolates, inputs=interpolates,\n grad_outputs=torch.ones(disc_interpolates.size()).cuda() if opt.cuda else torch.ones(\n disc_interpolates.size()),\n create_graph=True, retain_graph=True, only_inputs=True)[0]\n\n gradient_penalty = ((gradients.norm(2, dim=1) - 1) ** 2).mean() * opt.gpW\n return gradient_penalty\n\n\nflag = 1\nlower, upper = 0, 1\nmu, sigma = 1, 0.005\nmaskS = opt.imageSize // 4\nX = stats.truncnorm(\n (lower - mu) / sigma, (upper - mu) / sigma, loc=mu, scale=sigma)\n\nfor epoch in range(opt.niter):\n data_iter = iter(dataloader)\n i = 0\n while i < len(dataloader) - 4:\n ############################\n # (1) Update D network\n ###########################\n for p in netD.parameters(): # reset requires_grad\n p.requires_grad = True # they are set to False below in netG update\n for p in netG.parameters():\n p.requires_grad = False # to avoid computation\n\n # train the discriminator Diters times\n Diters = opt.Diters\n\n if gen_iterations < opt.baseGeni: # L2 stage\n Diters = 0\n\n j = 0\n while j < Diters and i < len(dataloader) - 4:\n\n j += 1\n netD.zero_grad()\n\n data = data_iter.next()\n real_cim, real_vim, real_sim = data\n i += 1\n ###############################\n\n if opt.cuda:\n real_cim, real_vim, real_sim = real_cim.cuda(), real_vim.cuda(), real_sim.cuda()\n\n mask = torch.cat([torch.rand(1, 1, maskS, maskS).ge(X.rvs(1)[0]).float() for _ in range(opt.batchSize)],\n 0).cuda()\n hint = torch.cat((real_vim * mask, mask), 1)\n\n # train with fake\n\n fake_cim = netG(Variable(real_sim, volatile=True), Variable(hint, volatile=True)).data\n errD_fake = netD(Variable(fake_cim), Variable(real_sim)).mean(0).view(1)\n errD_fake.backward(one, retain_graph=True) # backward on score on real\n\n errD_real = netD(Variable(real_cim), Variable(real_sim)).mean(0).view(1)\n errD = errD_real - errD_fake\n\n errD_realer = -1 * errD_real + errD_real.pow(2) * opt.drift\n # additional penalty term to keep the scores from drifting too far from zero\n errD_realer.backward(one, retain_graph=True) # backward on score on real\n\n gradient_penalty = calc_gradient_penalty(netD, real_cim, fake_cim, real_sim)\n gradient_penalty.backward()\n\n #\n # dist = L2_dist(Variable(real_cim).view(opt.batchSize, -1),\n # Variable(fake_cim).view(opt.batchSize, -1)).mean()\n # lip_est = (errD_real - errD_fake).abs() / (dist + 1e-8)\n # lip_loss = opt.gpW * ((1.0 - lip_est) ** 2).mean(0).view(1)\n # lip_loss.backward(one)\n # gradient_penalty = lip_loss\n # above is approximation \n\n optimizerD.step()\n\n ############################\n # (2) Update G network\n ############################\n if i < len(dataloader) - 4:\n if flag: # fix samples\n data = zip(*[data_iter.next() for _ in range(4)])\n real_cim, real_vim, real_sim = [torch.cat(dat, 0) for dat in data]\n i += 1\n\n if opt.cuda:\n real_cim, real_vim, real_sim = real_cim.cuda(), real_vim.cuda(), real_sim.cuda()\n\n mask = torch.cat(\n [torch.rand(1, 1, maskS, maskS).ge(X.rvs(1)[0]).float() for _ in range(opt.batchSize * 4)],\n 0).cuda()\n hint = torch.cat((real_vim * mask, mask), 1)\n\n writer.add_image('target imgs', vutils.make_grid(real_cim.mul(0.5).add(0.5), nrow=4))\n writer.add_image('sketch imgs', vutils.make_grid(real_sim.mul(0.5).add(0.5), nrow=4))\n writer.add_image('hint', vutils.make_grid((real_vim * mask).mul(0.5).add(0.5), nrow=4))\n vutils.save_image(real_cim.mul(0.5).add(0.5),\n '%s/color_samples' % opt.outf + '.png')\n vutils.save_image(real_sim.mul(0.5).add(0.5),\n '%s/blur_samples' % opt.outf + '.png')\n fixed_sketch.resize_as_(real_sim).copy_(real_sim)\n fixed_hint.resize_as_(hint).copy_(hint)\n\n flag -= 1\n\n for p in netD.parameters():\n p.requires_grad = False # to avoid computation\n for p in netG.parameters():\n p.requires_grad = True # to avoid computation\n netG.zero_grad()\n\n data = data_iter.next()\n real_cim, real_vim, real_sim = data\n i += 1\n\n if opt.cuda:\n real_cim, real_vim, real_sim = real_cim.cuda(), real_vim.cuda(), real_sim.cuda()\n\n mask = torch.cat([torch.rand(1, 1, maskS, maskS).ge(X.rvs(1)[0]).float() for _ in range(opt.batchSize)],\n 0).cuda()\n hint = torch.cat((real_vim * mask, mask), 1)\n\n fake = netG(Variable(real_sim), Variable(hint))\n\n if opt.MSE:\n MSELoss = criterion_MSE(fake, Variable(real_cim))\n\n errG = MSELoss\n errG.backward()\n contentLoss = MSELoss\n elif opt.feat:\n contentLoss = criterion_MSE(netF2((fake.mul(0.5) - Variable(saber)) / Variable(diver)),\n netF2(Variable((real_cim.mul(0.5) - saber) / diver)))\n MSELoss = criterion_MSE(netF((fake.mul(0.5) - Variable(saber)) / Variable(diver)),\n netF(Variable((real_cim.mul(0.5) - saber) / diver)))\n\n errG = (contentLoss + MSELoss) * 0.5\n errG.backward()\n elif gen_iterations < opt.baseGeni:\n contentLoss = criterion_MSE(netF((fake.mul(0.5) - Variable(saber)) / Variable(diver)),\n netF(Variable((real_cim.mul(0.5) - saber) / diver)))\n MSELoss = criterion_MSE(fake, Variable(real_cim))\n\n errG = contentLoss + MSELoss * opt.mseW\n errG.backward()\n\n else:\n errG = netD(fake, Variable(real_sim)).mean(0).view(1) * opt.advW\n errG.backward(mone, retain_graph=True)\n\n contentLoss = criterion_MSE(netF((fake.mul(0.5) - Variable(saber)) / Variable(diver)),\n netF(Variable((real_cim.mul(0.5) - saber) / diver)))\n MSELoss = criterion_MSE(fake, Variable(real_cim))\n errg = contentLoss + MSELoss * opt.mseW\n errg.backward()\n\n optimizerG.step()\n\n ############################\n # (3) Report & 100 Batch checkpoint\n ############################\n if gen_iterations < opt.baseGeni:\n writer.add_scalar('VGG MSE Loss', contentLoss.data[0], gen_iterations)\n writer.add_scalar('MSE Loss', MSELoss.data[0], gen_iterations)\n print('[%d/%d][%d/%d][%d] content %f '\n % (epoch, opt.niter, i, len(dataloader), gen_iterations, contentLoss.data[0]))\n else:\n writer.add_scalar('VGG MSE Loss', contentLoss.data[0], gen_iterations)\n writer.add_scalar('MSE Loss', MSELoss.data[0], gen_iterations)\n writer.add_scalar('wasserstein distance', errD.data[0], gen_iterations)\n writer.add_scalar('errD_real', errD_real.data[0], gen_iterations)\n writer.add_scalar('errD_fake', errD_fake.data[0], gen_iterations)\n writer.add_scalar('Gnet loss toward real', errG.data[0], gen_iterations)\n writer.add_scalar('gradient_penalty', gradient_penalty.data[0], gen_iterations)\n print('[%d/%d][%d/%d][%d] errD: %f err_G: %f err_D_real: %f err_D_fake %f content loss %f'\n % (epoch, opt.niter, i, len(dataloader), gen_iterations,\n errD.data[0], errG.data[0], errD_real.data[0], errD_fake.data[0], contentLoss.data[0]))\n\n if gen_iterations % 500 == 0:\n fake = netG(Variable(fixed_sketch, volatile=True), Variable(fixed_hint, volatile=True))\n writer.add_image('deblur imgs', vutils.make_grid(fake.data.mul(0.5).add(0.5), nrow=4),\n gen_iterations)\n\n # if gen_iterations % 2000 == 0:\n # for name, param in netG.named_parameters():\n # writer.add_histogram('netG ' + name, param.clone().cpu().data.numpy(), gen_iterations)\n # for name, param in netD.named_parameters():\n # writer.add_histogram('netD ' + name, param.clone().cpu().data.numpy(), gen_iterations)\n # vutils.save_image(fake.data.mul(0.5).add(0.5),\n # '%s/fake_samples_gen_iter_%08d.png' % (opt.outf, gen_iterations))\n gen_iterations += 1\n\n # do checkpointing\n if opt.cut == 0:\n torch.save(netG.state_dict(), '%s/netG_epoch_only.pth' % opt.outf)\n torch.save(netD.state_dict(), '%s/netD_epoch_only.pth' % opt.outf)\n elif epoch % opt.cut == 0:\n torch.save(netG.state_dict(), '%s/netG_epoch_%d.pth' % (opt.outf, epoch))\n torch.save(netD.state_dict(), '%s/netD_epoch_%d.pth' % (opt.outf, epoch))\n torch.save(optimizerG.state_dict(), '%s/optimG_checkpoint.pth' % opt.outf)\n torch.save(optimizerD.state_dict(), '%s/optimD_checkpoint.pth' % opt.outf)\n" ]
[ [ "torch.nn.Sequential", "torch.load", "torch.cat", "torch.nn.Conv2d", "torch.nn.PixelShuffle", "torch.nn.Linear", "torch.nn.AvgPool2d", "torch.nn.InstanceNorm2d", "torch.nn.LeakyReLU", "torch.nn.functional.leaky_relu" ], [ "scipy.stats.truncnorm" ] ]
Yustira/Zelibe-Ugwuanyi-And-Different-Standard-Deviation-Algorithm
[ "a50dff4fa413fc23bc507a2206cdf8468efac379" ]
[ "Zelibe Ugwuanyi.py" ]
[ "import pandas as pd\nimport numpy as np\n\ndef is_balanced(data):\n sum_a = data['Supply'].sum()\n sum_b = data.loc['Demand'].sum()\n if sum_a == sum_b: \n print('Balanced data: ', sum_a)\n pass\n elif sum_a < sum_b:\n print('Unbalanced data: total supply < total demand ({:d} < {:d})'.format(sum_a, sum_b))\n data_T = data.T\n dm = data_T.pop('Demand')\n data_T['Dum'] = np.zeros(len(dm))\n data_T['Demand'] = dm\n data = data_T.T\n data.loc['Dum', 'Supply'] = sum_b - sum_a\n else:\n print('Unbalanced data: total supply > total demand ({:d} > {:d})'.format(sum_a, sum_b))\n Supply = data.pop('Supply').reset_index()\n data['Dum'] = np.zeros(len(data.index))\n data['Supply'] = Supply.Supply.to_numpy()\n data.loc['Demand', 'Dum'] = sum_a - sum_b\n return data\n\ndef penalty(data):\n data['Penalty'] = np.zeros(data.shape[0])\n data.loc['Penalty'] = np.zeros(data.shape[1])\n \n m = data.index[:-2] \n n = data.columns[:-2]\n \n # Row penalty\n if len(n) == 1 and len(m) <= 2:\n for i in m:\n data.loc[i, 'Penalty'] = data.loc[i][:-2].max()\n elif len(n) == 1 and len(m) > 2:\n for i in m:\n data.loc[i, 'Penalty'] = data.loc[i][:-2].max() + len(m)\n elif len(n) == 2 and len(m) <= 2:\n for i in m:\n xj_min = data.loc[i][:-2].min()\n data.loc[i, 'Penalty'] = (data.loc[i][:-2] - xj_min).sum() \n else:\n for i in m:\n xj_min = data.loc[i][:-2].min()\n data.loc[i, 'Penalty'] = (data.loc[i][:-2] - xj_min).sum() + len(m)\n \n # Column penalty\n if len(m) == 1 and len(n) <= 2:\n for j in n:\n data.loc['Penalty', j] = data[j][:-2].max()\n elif len(m) == 1 and len(n) > 2:\n for j in n:\n data.loc['Penalty', j] = data[j][:-2].max() + len(n)\n elif len(m) == 2 and len(n) <= 2:\n for j in n:\n xi_min = data[j][:-2].min()\n data.loc['Penalty', j] = (data[j][:-2] - xi_min).sum()\n else:\n for j in n:\n xi_min = data[j][:-2].min()\n data.loc['Penalty', j] = (data[j][:-2] - xi_min).sum() + len(n)\n \n return data\n\ndef cell_allocation(data):\n rp_max = data['Penalty'][:-2].max()\n cp_max = data.loc['Penalty'][:-2].max()\n \n m = data['Penalty'][:-2][data['Penalty'][:-2] == rp_max].index\n n = data.loc['Penalty'][:-2][data.loc['Penalty'][:-2] == cp_max].index\n \n glc_r = {}\n glc_c = {}\n \n if rp_max == cp_max: \n for i in m:\n glc_r[data.loc[i][:-2].min()] = i\n for j in n:\n glc_c[data[j][:-2].min()] = j \n if max(glc_r) > max(glc_c):\n Ri = glc_r[max(glc_r)]\n Cj = data.loc[Ri][:-2][data.loc[Ri][:-2] == max(glc_r)].index[0]\n else:\n Cj = glc_c[max(glc_c)]\n Ri = data[Cj][:-2][data[Cj][:-2] == max(glc_c)].index[0]\n elif rp_max > cp_max:\n for i in m:\n glc_r[data.loc[i][:-2].min()] = i\n Ri = glc_r[max(glc_r)]\n Cj = data.loc[Ri][:-2][data.loc[Ri][:-2] == max(glc_r)].index[0]\n else:\n for j in n:\n glc_c[data[j][:-2].min()] = j\n Cj = glc_c[max(glc_c)]\n Ri = data[Cj][:-2][data[Cj][:-2] == max(glc_c)].index[0] \n print('Alocation to {:s} and {:s}'.format(Ri, Cj))\n return Ri, Cj\n\ndef cost_allocation(data, Ri, Cj):\n ai = data.loc[Ri, 'Supply']\n bi = data.loc['Demand', Cj]\n if ai > bi:\n cost_val = data.loc['Demand', Cj] * data.loc[Ri, Cj]\n cost.append(cost_val)\n data.loc[Ri, 'Supply'] = data.loc[Ri, 'Supply'] - data.loc['Demand', Cj]\n data.drop(Cj, axis=1, inplace=True)\n else:\n cost_val = data.loc[Ri, 'Supply'] * data.loc[Ri, Cj]\n cost.append(cost_val)\n data.loc['Demand', Cj] = data.loc['Demand', Cj] - data.loc[Ri, 'Supply']\n data.drop(Ri, axis=0, inplace=True)\n print('Cost: {:0.1f}'.format(cost_val))\n return data\n\nprint()\nfn = str(input('Enter file name: '))\ndata = pd.read_excel(\"Data/\"+fn+\".xlsx\")\ndata.set_index('Index', inplace=True)\nprint('\\n', data, '\\n')\ndata = is_balanced(data)\nprint()\ncost = []\ni = 0\nwhile True:\n i += 1\n m, n = data.index[:-2], data.columns[:-2]\n print(\"-\"*15)\n print(' ITERATION', i,)\n print(\"-\"*15)\n if len(m) == 1 and len(n) == 1:\n if data['Supply'][:-2][0] == data.loc['Demand'][:-2][0]:\n penalty(data)\n print(data, '\\n')\n cost_val = data.loc['Demand'][:-2][0] * data.loc[m[0]][:-2][0]\n cost.append(cost_val)\n print('Alocation to {:s} and {:s}'.format(m[0], n[0]))\n print('Cost: {:0.1f}'.format(cost_val))\n else:\n print('Error, demand not equal with supply')\n break\n else: \n penalty(data)\n print(data, '\\n')\n Ri, Cj = cell_allocation(data)\n cost_allocation(data, Ri, Cj)\n print()\nprint()\nprint(\"-\"*23)\nprint(' Total cost:', sum(cost))\nprint(\"-\"*23)\n\n" ]
[ [ "pandas.read_excel", "numpy.zeros" ] ]
JasonChu1313/KagglePipeline
[ "c4e2ab1150276dc2f1bffa30e0be8d7f5314b53a" ]
[ "preprocess/PIMP.py" ]
[ "from preprocess.FeatureSelection import FeatureSelection\n\nimport lightgbm as lgb\nimport pandas as pd\nfrom sklearn.metrics import roc_auc_score, mean_squared_error\nfrom preprocess.Dataset import Dataset\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport matplotlib.gridspec as gridspec\nimport seaborn as sns\nimport os\n\nclass PIMP(FeatureSelection):\n def __init__(self, feature_score_name, corr_score_name = 'corr_score.csv', nb_runs=80):\n super(PIMP, self).__init__(feature_score_name)\n self.nb_runs = nb_runs\n self.corr_score_name=corr_score_name\n\n def load_data(self):\n dataset = Dataset('train_agg_id1.csv', 'test_agg_id1.csv')\n self.train_X, self.target, self.test, self.features, self.cate_features = dataset.preprocess(reload=True)\n\n def get_feature_importances(self, data, shuffle, target, seed=2016, train_features = None, categorical_feats = None):\n if train_features == None:\n # Gather real features\n train_features = [f for f in data.columns.values if f not in ['target', 'card_id']]\n # Go over fold and keep track of CV score (train and valid) and feature importances\n\n # Shuffle target if required\n y = target.copy()\n if shuffle:\n # Here you could as well use a binomial distribution\n y = target.copy().sample(frac=1.0)\n\n # Fit LightGBM in RF mode, yes it's quicker than sklearn RandomForest\n dtrain = lgb.Dataset(data[train_features], y, free_raw_data=False, silent=True)\n\n lgb_params = {'num_leaves': 32,\n 'min_data_in_leaf': 20,\n 'objective': 'regression',\n 'max_depth': 4,\n 'learning_rate': 0.005,\n \"min_child_samples\": 20,\n \"boosting\": \"rf\",\n \"feature_fraction\": 0.9,\n \"bagging_freq\": 1,\n \"bagging_fraction\": 0.9,\n \"bagging_seed\": 11,\n \"metric\": 'rmse',\n \"lambda_l1\": 0.1,\n \"nthread\": 4,\n \"verbosity\":-1}\n\n # Fit the model\n clf = lgb.train(params=lgb_params, train_set=dtrain, num_boost_round=200, categorical_feature=categorical_feats)\n\n # Get feature importances\n imp_df = pd.DataFrame()\n imp_df[\"feature\"] = list(train_features)\n imp_df[\"importance_gain\"] = clf.feature_importance(importance_type='gain')\n imp_df[\"importance_split\"] = clf.feature_importance(importance_type='split')\n predictions = clf.predict(data[train_features])\n imp_df['trn_score'] = mean_squared_error(predictions, y) ** 0.5\n\n return imp_df\n\n def permutation_importance(self):\n self.load_data()\n # Seed the unexpected randomness of this world\n np.random.seed(123)\n # Get the actual importance, i.e. without shuffling\n self.actual_imp_df = self.get_feature_importances(self.train_X, False, self.target,\n train_features=self.features, categorical_feats= self.cate_features)\n self.null_imp_df = self.get_null_importance()\n score_df,corr_scores_df = self.get_feature_score()\n\n print('successfully calculate feature score, saving feature score df and corr score df ...')\n score_df.to_csv(os.path.join('./feature_score',self.feature_score_name), index = False)\n corr_scores_df.to_csv(os.path.join('./feature_score',self.corr_score_name), index = False)\n print('saving successfully ...')\n\n\n print('saving actual and null importance ...')\n self.save_feature_distribution()\n print('saving successfully ...')\n\n\n def get_null_importance(self):\n null_imp_df = pd.DataFrame()\n\n import time\n start = time.time()\n dsp = ''\n for i in range(self.nb_runs):\n # Get current run importances\n imp_df = self.get_feature_importances(self.train_X, True, self.target,\n train_features=self.features, categorical_feats= self.cate_features)\n imp_df['run'] = i + 1\n # Concat the latest importances with the old ones\n null_imp_df = pd.concat([null_imp_df, imp_df], axis=0)\n # Erase previous message\n for l in range(len(dsp)):\n print('\\b', end='', flush=True)\n # Display current run and time used\n spent = (time.time() - start) / 60\n dsp = 'Done with %4d of %4d (Spent %5.1f min)' % (i + 1, self.nb_runs, spent)\n print(dsp, end='', flush=True)\n return null_imp_df\n\n def get_feature_score(self):\n print(\"calculate feature_score ...\")\n feature_scores = []\n for _f in self.actual_imp_df['feature'].unique():\n f_null_imps_gain = self.null_imp_df.loc[self.null_imp_df['feature'] == _f, 'importance_gain'].values\n f_act_imps_gain = self.actual_imp_df.loc[self.actual_imp_df['feature'] == _f, 'importance_gain'].mean()\n gain_score = np.log(\n 1e-10 + f_act_imps_gain / (1 + np.percentile(f_null_imps_gain, 75))) # Avoid didvide by zero\n f_null_imps_split = self.null_imp_df.loc[self.null_imp_df['feature'] == _f, 'importance_split'].values\n f_act_imps_split = self.actual_imp_df.loc[self.actual_imp_df['feature'] == _f, 'importance_split'].mean()\n split_score = np.log(\n 1e-10 + f_act_imps_split / (1 + np.percentile(f_null_imps_split, 75))) # Avoid didvide by zero\n feature_scores.append((_f, split_score, gain_score))\n\n scores_df = pd.DataFrame(feature_scores, columns=['feature', 'split_score', 'gain_score'])\n\n print(\"calculate correlation score\")\n correlation_scores = []\n for _f in self.actual_imp_df['feature'].unique():\n f_null_imps = self.null_imp_df.loc[self.null_imp_df['feature'] == _f, 'importance_gain'].values\n f_act_imps = self.actual_imp_df.loc[self.actual_imp_df['feature'] == _f, 'importance_gain'].values\n gain_score = 100 * (f_null_imps < np.percentile(f_act_imps, 25)).sum() / f_null_imps.size\n f_null_imps = self.null_imp_df.loc[self.null_imp_df['feature'] == _f, 'importance_split'].values\n f_act_imps = self.actual_imp_df.loc[self.actual_imp_df['feature'] == _f, 'importance_split'].values\n split_score = 100 * (f_null_imps < np.percentile(f_act_imps, 25)).sum() / f_null_imps.size\n correlation_scores.append((_f, split_score, gain_score))\n\n corr_scores_df = pd.DataFrame(correlation_scores, columns=['feature', 'split_score', 'gain_score'])\n\n return scores_df, corr_scores_df\n\n def show_score_df(self):\n if os.path.isfile(os.path.join('./feature_score',self.feature_score_name)):\n score = pd.read_csv(os.path.join('./feature_score',self.feature_score_name))\n\n score_split = score.sort_values(by = ['split_score'], ascending=False).reset_index(drop=True)\n score_gain = score.sort_values(by = ['gain_score'], ascending=False).reset_index(drop=True)\n score_both = score.sort_values(by = ['split_score','gain_score'], ascending=False).reset_index(drop=True)\n\n\n corr_score = pd.read_csv(os.path.join('./feature_score',self.corr_score_name))\n\n return score_split, score_gain, score_both, corr_score\n else:\n print(\"no score file exist ...\")\n\n def save_feature_distribution(self):\n self.actual_imp_df.to_csv(os.path.join('./feature_score/null_and_actual_importance','actual_imp_df.csv'))\n self.null_imp_df.to_csv(os.path.join('./feature_score/null_and_actual_importance', 'null_imp_df.csv'))\n\n def plot_feature_score(self, df):\n fig = plt.figure(figsize=(16,16))\n gs = gridspec.GridSpec(1,2)\n ax = plt.subplot(gs[0,0])\n sns.barplot(x='split_score', y = 'features', data = df.sort_values(by=['split_score'], ascending=False).iloc[0:70], ax= ax)\n ax.set_title('Feature scores wrt split importances', fontweight='bold', fontsize=14)\n ax = plt.subplot([0,1])\n sns.barplot(x='gain_score', y='features', data=df.sort_values(by=['gain_score'],ascending=False).iloc[0:70],ax=ax)\n ax.set_title('Feature scores wrt gain importances', fontweight='bold', fontsize=14)\n plt.tight_layout()\n plt.suptitle(\"Features' split and gain scores\", fontweight='bold', fontsize=16)\n fig.subplots_adjust(top=0.93)\n\n\n\nif __name__ == \"__main__\":\n pimp = PIMP(\"pimp_score.csv\")\n pimp.permutation_importance()\n score_split, score_gain, score_both, corr_score = pimp.show_score_df()\n print(\"score_split\")\n print(score_split.head())\n print(\"score_gain\")\n print(score_gain.head())\n print(\"score_both\")\n print(score_both.head())\n print(\"corr_score\")\n print(corr_score.head())" ]
[ [ "pandas.concat", "matplotlib.pyplot.tight_layout", "numpy.random.seed", "pandas.DataFrame", "sklearn.metrics.mean_squared_error", "numpy.percentile", "matplotlib.pyplot.subplot", "matplotlib.gridspec.GridSpec", "matplotlib.pyplot.suptitle", "matplotlib.pyplot.figure" ] ]
loedata/OC-DS-P5-Segmentez_Clients_site_e-commerce
[ "fa3ccf6e284ed510867e90a25677bd1fc5c3b572" ]
[ "POLIST_07_outils_test.py" ]
[ "\"\"\" Librairie personnelle pour exécuter des tests de normalité,\n homostéradiscité, ANOVA, Kruskall-Wallis\n\"\"\"\n\n#! /usr/bin/env python3\n# coding: utf-8\n\n# ====================================================================\n# Outil visualisation - projet 3 Openclassrooms\n# Version : 0.0.0 - CRE LR 13/03/2021\n# ====================================================================\n\nfrom scipy.stats import chi2_contingency\nfrom scipy.stats import chi2\nfrom scipy.stats import shapiro, normaltest, anderson\nimport pandas as pd\nfrom IPython.display import display\n\n# --------------------------------------------------------------------\n# -- VERSION\n# --------------------------------------------------------------------\n__version__ = '0.0.0'\n\n# --------------------------------------------------------------------\n# -- TESTS DE NORMALITE\n# --------------------------------------------------------------------\n\n\ndef test_normalite(data):\n \"\"\"\n Test de la normalité d'une distribution.\n Parameters\n ----------\n data : dataframe ou dataframe restreint (une seule variable) obligatoire\n Returns\n -------\n None.\n \"\"\"\n # H0 : la distribution des données est normale (P>0,05)\n # H1 : la distribution des données n'est pas normale (P<0,05)\n\n df_resultat = pd.DataFrame([])\n # Shapiro-Wilk - D'Agostino's K^2\n for f_name, func in zip(\n ['Shapiro-Wilks', \"D'Agostino K^2\"], [shapiro, normaltest]):\n stat, p_val = func(data)\n df_resultat.loc[f_name, 'stat'] = stat\n df_resultat.loc[f_name, 'p_value'] = p_val\n df_resultat.loc[f_name, 'res'] = [p_val > 0.05]\n bool = df_resultat.loc[f_name, 'res']\n print\n if bool:\n df_resultat.loc[f_name,\n 'bilan'] = 'H0 aceptée - distribution normale'\n else:\n df_resultat.loc[f_name,\n 'bilan'] = 'H0 rejetée - distribution non normale'\n\n # Anderson-Darling\n result = anderson(data, dist='norm')\n df_resultat.loc['Anderson-Darling', 'stat'] = result.statistic\n res_and = [(int(result.significance_level[i]), result.statistic < res)\n for i, res in enumerate(result.critical_values)]\n df_resultat.loc['Anderson-Darling', 'res'] = str(res_and)\n display(df_resultat)\n\n\n# --------------------------------------------------------------------\n# -- TESTS D'INDEPENDANCE DE 2 VARIABLES QUALITATIVES\n# --------------------------------------------------------------------\n\n\ndef test_chi2(serie1, serie2):\n \"\"\"\n Test de dépendances de 2 variables qualitatives\n Parameters\n ----------\n serie1 : variable qualitative 1 obligatoire\n serie2 : variable qualitative 2 obligatoire\n Returns\n -------\n None.\n \"\"\"\n alpha = 0.03\n\n # H0 : les variables sont indépendantes\n\n #print('tableau de contingence :\\n', pd.crosstab(serie1.array, serie2.array))\n tab_contingence = pd.crosstab(serie1.array, serie2.array)\n stat_chi2, p_value, dof, expected_table = chi2_contingency(\n tab_contingence.values)\n print('chi2 : {0:.5f}'.format(stat_chi2))\n print('\\np_value : {0:.5f}'.format(p_value))\n print('\\ndof : {0:.5f}\\n'.format(dof))\n critical = chi2.ppf(1 - alpha, dof)\n print('critical : ', critical)\n\n if p_value <= alpha:\n print(\n '\\nVariables non indépendantes (H0 rejetée) car p_value = {} <= alpha = {}'.format(\n p_value,\n alpha))\n else:\n print('\\nH0 non rejetée car p = {} >= alpha = {}'.format(p_value, alpha))\n\n\n# -----------------------------------------------------------------------\n# -- TESTS D'INDEPENDANCE ENTRE 1 VARIABLE QUANTITATIVE ET 1 QUALITATIVE\n# -----------------------------------------------------------------------\n\n\ndef test_eta_squared(serie_qualitative, serie_quantitative):\n \"\"\"\n Test de dépendances de 1 variable qualitative et 1 quantitative\n Parameters\n ----------\n serie_qualitative : variable qualitative, obligatoire\n serie_quantitative : variable quantitative, obligatoire\n Returns\n -------\n eta_squared.\n \"\"\"\n moyenne_y = serie_quantitative.mean()\n classes = []\n for classe in serie_qualitative.unique():\n yi_classe = serie_quantitative[serie_qualitative == classe]\n classes.append({'ni': len(yi_classe),\n 'moyenne_classe': yi_classe.mean()})\n SCT = sum([(yj - moyenne_y)**2 for yj in serie_quantitative])\n SCE = sum([c['ni'] * (c['moyenne_classe'] - moyenne_y)**2 for c in classes])\n\n return SCE / SCT\n\n# X = \"categ\" # qualitative\n# Y = \"montant\" # quantitative\n\n# def eta_squared(x,y):\n\n# eta_squared(sous_echantillon[X],sous_echantillon[Y])\n" ]
[ [ "scipy.stats.chi2.ppf", "pandas.crosstab", "scipy.stats.chi2_contingency", "scipy.stats.anderson", "pandas.DataFrame" ] ]
SR42-dev/color-detection-against-white-background
[ "f3db16fc615259dfe3f3a189ad99488944ad0fe3" ]
[ "source/HSVLimitFinder.py" ]
[ "# HSV limit finding from webcam feed\r\n\r\nimport cv2\r\nimport numpy as np\r\n\r\n\r\ndef empty(a): # argument required\r\n pass\r\n\r\ndef stackImages(scale,imgArray):\r\n rows = len(imgArray)\r\n cols = len(imgArray[0])\r\n rowsAvailable = isinstance(imgArray[0], list)\r\n width = imgArray[0][0].shape[1]\r\n height = imgArray[0][0].shape[0]\r\n if rowsAvailable:\r\n for x in range ( 0, rows):\r\n for y in range(0, cols):\r\n if imgArray[x][y].shape[:2] == imgArray[0][0].shape [:2]:\r\n imgArray[x][y] = cv2.resize(imgArray[x][y], (0, 0), None, scale, scale)\r\n else:\r\n imgArray[x][y] = cv2.resize(imgArray[x][y], (imgArray[0][0].shape[1], imgArray[0][0].shape[0]), None, scale, scale)\r\n if len(imgArray[x][y].shape) == 2: imgArray[x][y]= cv2.cvtColor(imgArray[x][y], cv2.COLOR_GRAY2BGR)\r\n imageBlank = np.zeros((height, width, 3), np.uint8)\r\n hor = [imageBlank]*rows\r\n hor_con = [imageBlank]*rows\r\n for x in range(0, rows):\r\n hor[x] = np.hstack(imgArray[x])\r\n ver = np.vstack(hor)\r\n else:\r\n for x in range(0, rows):\r\n if imgArray[x].shape[:2] == imgArray[0].shape[:2]:\r\n imgArray[x] = cv2.resize(imgArray[x], (0, 0), None, scale, scale)\r\n else:\r\n imgArray[x] = cv2.resize(imgArray[x], (imgArray[0].shape[1], imgArray[0].shape[0]), None,scale, scale)\r\n if len(imgArray[x].shape) == 2: imgArray[x] = cv2.cvtColor(imgArray[x], cv2.COLOR_GRAY2BGR)\r\n hor= np.hstack(imgArray)\r\n ver = hor\r\n return ver\r\n\r\n# cap = cv2.VideoCapture(0) # 0 - default webcam\r\n# cap.set(3, 640) # width\r\n# cap.set(4, 480) # height\r\n# cap.set(10, 100) # brightness\r\n\r\ncv2.namedWindow('Trackbars') # Creating trackbars to isolate required color\r\ncv2.resizeWindow('Trackbars', 640, 240)\r\n\r\n# cv2.createTrackbar('H minimum', 'Trackbars', 0, 179, empty) # 180 hues available in opencv (lower and upper limits for trackbars), empty is a function called each time the trackbar is changed\r\n# cv2.createTrackbar('H maximum', 'Trackbars', 179, 179, empty) # initial trackbars for color detection and limit identification\r\n# cv2.createTrackbar('S minimum', 'Trackbars', 0, 255, empty)\r\n# cv2.createTrackbar('S maximum', 'Trackbars', 255, 255, empty)\r\n# cv2.createTrackbar('V minimum', 'Trackbars', 0, 255, empty)\r\n# cv2.createTrackbar('V maximum', 'Trackbars', 255, 255, empty)\r\n\r\ncv2.createTrackbar('H minimum', 'Trackbars', 29, 179, empty) # trackbars for specific colour\r\ncv2.createTrackbar('H maximum', 'Trackbars', 146, 179, empty)\r\ncv2.createTrackbar('S minimum', 'Trackbars', 13, 255, empty)\r\ncv2.createTrackbar('S maximum', 'Trackbars', 93, 255, empty)\r\ncv2.createTrackbar('V minimum', 'Trackbars', 66, 255, empty)\r\ncv2.createTrackbar('V maximum', 'Trackbars', 127, 255, empty)\r\n\r\nwhile True:\r\n\r\n # success, img = cap.read() # <successful execution (boolean)>, <image variable>\r\n img = cv2.imread('testImage.jpg')\r\n imgHSV = cv2.cvtColor(img, cv2.COLOR_BGR2HSV) # conversion to HSV from BGR\r\n\r\n hMin = cv2.getTrackbarPos('H minimum', 'Trackbars')\r\n hMax = cv2.getTrackbarPos('H maximum', 'Trackbars')\r\n sMin = cv2.getTrackbarPos('S minimum', 'Trackbars')\r\n sMax = cv2.getTrackbarPos('S maximum', 'Trackbars')\r\n vMin = cv2.getTrackbarPos('V minimum', 'Trackbars')\r\n vMax = cv2.getTrackbarPos('V maximum', 'Trackbars')\r\n # print(hMin, hMax, sMin, sMax, vMin, vMax)\r\n\r\n lower = np.array([hMin, sMin, vMin]) # minimum range array\r\n upper = np.array([hMax, sMax, vMax]) # maximum range array\r\n mask = cv2.inRange(imgHSV, lower, upper) # filtering out colours from HSV image\r\n imgResult = cv2.bitwise_and(img, img,mask=mask) # adds two images and creates a new one where non black colours on the mask are given colour from the original\r\n\r\n imgStacked = stackImages(0.5, ([img, mask, imgResult]))\r\n\r\n cv2.imshow('Test window', imgStacked)\r\n\r\n if cv2.waitKey(1) & 0xFF == ord('q'):\r\n break\r\n\r\nprint()\r\nprint('Required values : ')\r\nprint('hMin, sMin, vMin, hMax, sMax, vMax = ', hMin, ',', sMin, ',', vMin, ',', hMax, ',', sMax, ',', vMax)\r\n" ]
[ [ "numpy.hstack", "numpy.array", "numpy.zeros", "numpy.vstack" ] ]
nlvargas/capstone
[ "a74a073468fb1060e4f472722af620c17f887faf" ]
[ "simulation.py" ]
[ "import random\nimport copy\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom schedueling import cargar_calendario\n\ndef sigma_dispersion(tabla):\n sigma = np.mean([(tabla[x].puntaje - tabla[x+1].puntaje) for x in range(1,len(tabla)-2)])\n #maxs = np.mean([((tabla[x].puntaje - tabla[x+1].puntaje)-mean)**2 for x in range(1,len(tabla)-2)])\n return sigma\n #return 1/ sum([(tabla[x].puntaje - tabla[x+1].puntaje)/maxs for x in range(1,len(tabla)-2)])\n \n\ndef potencialmente_interesante(a):\n interesantes = []\n for j in range(1, len(a)):\n if 1 < a[0].puntaje - a[j].puntaje <= 3:\n interesantes.append((a[j],\"campeonato\"))\n #print(\"El partido de {} es interesante porque si gana puede quedar primero\".format(a[j].nombre))\n\n elif a[0].puntaje - a[j].puntaje == 1:\n interesantes.append((a[j],\"campeonato\"))\n #print(\"El partido de {} es interesante porque si empata o gana puede quedar primero\".format(a[j].nombre))\n\n elif 1 < a[5].puntaje - a[j].puntaje <= 3:\n interesantes.append((a[j],\"internacional\"))\n #print(\"El partido de {} es interesante porque si gana puede quedar en zona de clasificacion a torneo internacional\".format(a[j].nombre))\n\n elif a[5].puntaje - a[j].puntaje == 1:\n interesantes.append((a[j],\"internacional\"))\n #print(\"El partido de {} es interesante porque si empata o gana puede quedar en zona de clasificacion a torneo internacional\".format(a[j].nombre))\n\n elif 1 < a[13].puntaje - a[j].puntaje <= 3:\n interesantes.append((a[j],\"descenso\"))\n #print(\"El partido de {} es interesante porque si gana puede salir de posicion de descenso\".format(a[j].nombre))\n\n elif a[13].puntaje - a[j].puntaje == 1:\n interesantes.append((a[j],\"descenso\"))\n #print(\"El partido de {} es interesante porque si empata o gana puede salir de posicion de descenso\".format(a[j].nombre))\n return interesantes\n\n\nclass Simulacion:\n def __init__(self, calendario,equipos,montecarlo=False):\n self.calendario = calendario\n self.tabla = []\n self.equipos = copy.deepcopy(equipos)\n self.fecha = 0\n self._results = {}\n self.first_leg = True\n self.montecarlo = montecarlo\n self._pdraw = 0.26 #Dato historico de empates | analisis sensibilidad\n self._localwin = 0.4396 #Probabilidad de que un equipo local gane\n self.epsilon = 1 #Factor que le da mas chances de ganar a A\n self.plot = []\n self.suma_atractividad = 0\n self.atractividad_por_fecha ={}\n\n def guardar_fechas(self):\n with open('calendario.csv','w') as archivo:\n archivo.write('FECHA;LOCAL;VISITA;RESULTADO\\n')\n for fecha in self.calendario:\n for partidos in fecha.partidos:\n with open('calendario.csv','a') as archivo:\n archivo.write('{};{};{};{}\\n'.format(fecha,*partidos.split(\",\"),self._results[int(fecha.numero)][partidos]))\n with open('tabla_final.csv','w') as file:\n file.write('EQUIPO;PUNTAJE;VICTORIAS;EMPATES;DERROTAS\\n')\n for x in self.equipos:\n with open('tabla_final.csv','a') as archivo:\n archivo.write('{};{};{};{};{}\\n'.format(x.nombre,x.puntaje,len(x.victorias),len(x.empates),len(x.derrotas)))\n\n \n def attr_funcion(self,tipo,num):\n data_dic = {\"descenso\":2/15,\"internacional\":1/15,\"campeonato\":0.2}\n if tipo in data_dic:\n if self.fecha > 15:\n return data_dic[tipo]*num\n else:\n return 0\n @property\n def atractividad(self):\n self.equipos.sort(key=lambda x: x.puntaje, reverse = True)\n interesantes = potencialmente_interesante(self.equipos)\n #n =sum(self.attr_funcion(x[1],self.fecha) for x in interesantes)\n a = 0\n for x in interesantes:\n a += self.attr_funcion(x[1],self.fecha)\n return a\n \n def agregar_fechas(self,fechas):\n for x in fechas:\n self.calendario.append(x)\n \n def buscar_equipo(self,name): \n for x in self.equipos:\n if x.nombre == name:\n return x\n \n def no_jugados(self,team):\n e_local = self.equipos[:]\n e_visita = self.equipos[:]\n for x in team.partidos_local:\n for y in e_local:\n if x.nombre == y.nombre or team.nombre == y.nombre:\n e_local.remove(y)\n for x in team.partidos_visita:\n for y in e_visita:\n if x.nombre == y.nombre or team.nombre == y.nombre:\n e_visita.remove(y)\n return e_local, e_visita\n\n def add_victoria(self,a,b):\n a.victorias.append(b)\n b.derrotas.append(a)\n\n def p_empate(self):\n if self.first_leg:\n return self._pdraw\n n_empates_actuales = sum([len(x.empates) for x in self.equipos])/2\n n_partidos = len([keys for keys in self._results])*8\n return max(self._pdraw - 0.05 , min(self._pdraw + 0.05, \n n_empates_actuales / n_partidos ))\n\n def p_alpha (self,A):\n if self.first_leg:\n return 0.5\n n_partidos = len([keys for keys in self._results])\n #Tengo que arreglar aca ya que es el numero de partidos ganados como local\n #Esto se debe arreglar\n return max(0.5, len(A.victorias)/n_partidos)\n\n def p_betha (self,A,B):\n if self.first_leg:\n return 0.5\n return (A.rendimiento /(A.rendimiento + B.rendimiento))\n \n def p_delta(self,A,B):\n return (A.presupuesto /(A.presupuesto + B.presupuesto))\n\n def p_gamma (self,A,B):\n return (A.ranking/(A.ranking + B.ranking))\n\n def p_local(self,A,B):\n if self.first_leg:\n factor = self.p_gamma(A,B)+ self.p_delta(A,B)\n p = factor / 2\n\n else:\n factor = 2*(self.p_alpha(A) + self.p_betha(A,B))\n factor += (self.p_gamma(A,B)+ self.p_delta(A,B))\n p = factor / 6\n\n return (1-self.p_empate())* p * self.epsilon\n\n\n def match_ending(self, victory, draw):\n x = random.uniform(0,1)\n if x <= victory:\n return \"LW\"\n elif x > victory and x < (draw + victory):\n return \"D\"\n else:\n return \"AW\" \n\n def evento(self,local,visita):\n empate = self.p_empate()\n victoria_local = self.p_local(local,visita)\n r = self.match_ending(victoria_local,empate)\n \n if r == \"LW\":\n self.add_victoria(local,visita)\n return \"LW\"\n elif r == \"AW\":\n self.add_victoria(visita,local)\n return \"AW\"\n else:\n local.empates.append(visita)\n visita.empates.append(local)\n return \"D\"\n \n def publico(self,local,visita):\n return round(min(local.capacidad*1000\n ,(local.espectadores + min(0.3*local.capacidad*1000\n ,visita.espectadores))))\n \n \n def results(self,match):\n local, away = match.split(\",\")\n \n eqlocal = [x for x in self.equipos if x.nombre == local.strip()]\n eqvis = [x for x in self.equipos if x.nombre == away.strip()]\n resultado = self.evento(*(eqlocal + eqvis))\n eqlocal[0].partidos_local.append(*eqvis)\n eqvis[0].partidos_visita.append(*eqlocal)\n return resultado\n \n def run(self):\n n = int(self.fecha)\n for fechas in self.calendario[n:]:\n self.fecha += 1\n if self.fecha == 16:\n self.first_leg = False\n self._results[fechas.numero]= {}\n for x in fechas.partidos:\n self._results[fechas.numero][x]=self.results(x)\n self.equipos.sort(key=lambda x: x.puntaje, reverse = True)\n self.suma_atractividad += int(self.atractividad)\n self.atractividad_por_fecha[str(self.fecha)] = self.suma_atractividad\n if not self.montecarlo:\n y = np.float(self.suma_atractividad)\n x = np.int(self.fecha)\n self.plot.append((x, y))\n #If simulation finishes then we plot a nice graph\n if self.fecha == 30 and not self.montecarlo:\n for elems in self.plot:\n plt.scatter(*elems,color = 'blue')\n plt.ylabel('Atrractividad acumulada')\n plt.title('Instancia')\n \n plt.show()\n for fecha in self.calendario:\n print (\"\\n\")\n print (fecha)\n for partido in fecha.partidos:\n print (partido)\n print (\"\\n\")\n self.guardar_fechas()\n \n def show_results(self):\n for x in self.equipos:\n print(x)\n #print x\n print(\"-\"*50)\n #print \"-\"*50 \n #potencialmente_interesante(self.equipos)\n return self.equipos\n \ndef crear_simulacion(calendario,equipos):\n simulation = Simulacion(calendario,equipos)\n simulation.run()\n simulation.show_results()\n result_dict = {}\n for teams in simulation.equipos:\n result_dict[teams.nombre] = teams.puntaje\n return result_dict, simulation\n\ndef simulacion_unica(simulation):\n simulation.run()\n simulation.show_results()\n result_dict = {}\n for teams in simulation.equipos:\n result_dict[teams.nombre] = teams.puntaje\n return result_dict, simulation\n \n\n\n\n" ]
[ [ "matplotlib.pyplot.scatter", "matplotlib.pyplot.title", "numpy.int", "matplotlib.pyplot.show", "numpy.float", "matplotlib.pyplot.ylabel" ] ]
ShichenLiu/CondensedNet
[ "833a91d5f859df25579f70a2439dfd62f7fefb29" ]
[ "main.py" ]
[ "from __future__ import absolute_import\nfrom __future__ import unicode_literals\nfrom __future__ import print_function\nfrom __future__ import division\n\nimport argparse\nimport os\nimport shutil\nimport time\nimport math\nimport warnings\nimport models\nfrom utils import convert_model, measure_model\n\nparser = argparse.ArgumentParser(description='PyTorch Condensed Convolutional Networks')\nparser.add_argument('data', metavar='DIR',\n help='path to dataset')\nparser.add_argument('--model', default='condensenet', type=str, metavar='M',\n help='model to train the dataset')\nparser.add_argument('-j', '--workers', default=8, type=int, metavar='N',\n help='number of data loading workers (default: 4)')\nparser.add_argument('--epochs', default=120, type=int, metavar='N',\n help='number of total epochs to run')\nparser.add_argument('--start-epoch', default=0, type=int, metavar='N',\n help='manual epoch number (useful on restarts)')\nparser.add_argument('-b', '--batch-size', default=256, type=int,\n metavar='N', help='mini-batch size (default: 256)')\nparser.add_argument('--lr', '--learning-rate', default=0.1, type=float,\n metavar='LR', help='initial learning rate (default: 0.1)')\nparser.add_argument('--lr-type', default='cosine', type=str, metavar='T',\n help='learning rate strategy (default: cosine)',\n choices=['cosine', 'multistep'])\nparser.add_argument('--momentum', default=0.9, type=float, metavar='M',\n help='momentum (default: 0.9)')\nparser.add_argument('--weight-decay', '--wd', default=1e-4, type=float,\n metavar='W', help='weight decay (default: 1e-4)')\nparser.add_argument('--print-freq', '-p', default=10, type=int,\n metavar='N', help='print frequency (default: 10)')\nparser.add_argument('--pretrained', dest='pretrained', action='store_true',\n help='use pre-trained model (default: false)')\nparser.add_argument('--no-save-model', dest='no_save_model', action='store_true',\n help='only save best model (default: false)')\nparser.add_argument('--manual-seed', default=0, type=int, metavar='N',\n help='manual seed (default: 0)')\nparser.add_argument('--gpu',\n help='gpu available')\n\nparser.add_argument('--savedir', type=str, metavar='PATH', default='results/savedir',\n help='path to save result and checkpoint (default: results/savedir)')\nparser.add_argument('--resume', action='store_true',\n help='use latest checkpoint if have any (default: none)')\n\nparser.add_argument('--stages', type=str, metavar='STAGE DEPTH',\n help='per layer depth')\nparser.add_argument('--bottleneck', default=4, type=int, metavar='B',\n help='bottleneck (default: 4)')\nparser.add_argument('--group-1x1', type=int, metavar='G', default=4,\n help='1x1 group convolution (default: 4)')\nparser.add_argument('--group-3x3', type=int, metavar='G', default=4,\n help='3x3 group convolution (default: 4)')\nparser.add_argument('--condense-factor', type=int, metavar='C', default=4,\n help='condense factor (default: 4)')\nparser.add_argument('--growth', type=str, metavar='GROWTH RATE',\n help='per layer growth')\nparser.add_argument('--reduction', default=0.5, type=float, metavar='R',\n help='transition reduction (default: 0.5)')\nparser.add_argument('--dropout-rate', default=0, type=float,\n help='drop out (default: 0)')\nparser.add_argument('--group-lasso-lambda', default=0., type=float, metavar='LASSO',\n help='group lasso loss weight (default: 0)')\n\nparser.add_argument('--evaluate', action='store_true',\n help='evaluate model on validation set (default: false)')\nparser.add_argument('--convert-from', default=None, type=str, metavar='PATH',\n help='path to saved checkpoint (default: none)')\nparser.add_argument('--evaluate-from', default=None, type=str, metavar='PATH',\n help='path to saved checkpoint (default: none)')\n\nargs = parser.parse_args()\nos.environ[\"CUDA_VISIBLE_DEVICES\"] = args.gpu\nargs.stages = list(map(int, args.stages.split('-')))\nargs.growth = list(map(int, args.growth.split('-')))\nif args.condense_factor is None:\n args.condense_factor = args.group_1x1\n\nif args.data == 'cifar10':\n args.num_classes = 10\nelif args.data == 'cifar100':\n args.num_classes = 100\nelse:\n args.num_classes = 1000\n\nwarnings.filterwarnings(\"ignore\")\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.parallel\nimport torch.backends.cudnn as cudnn\nimport torch.optim\nimport torch.utils.data\nimport torchvision.transforms as transforms\nimport torchvision.datasets as datasets\n\ntorch.manual_seed(args.manual_seed)\ntorch.cuda.manual_seed_all(args.manual_seed)\n\nbest_prec1 = 0\n\n\ndef main():\n global args, best_prec1\n\n ### Calculate FLOPs & Param\n model = getattr(models, args.model)(args)\n print(model)\n if args.data in ['cifar10', 'cifar100']:\n IMAGE_SIZE = 32\n else:\n IMAGE_SIZE = 224\n n_flops, n_params = measure_model(model, IMAGE_SIZE, IMAGE_SIZE)\n print('FLOPs: %.2fM, Params: %.2fM' % (n_flops / 1e6, n_params / 1e6))\n args.filename = \"%s_%s_%s.txt\" % \\\n (args.model, int(n_params), int(n_flops))\n del(model)\n print(args)\n\n ### Create model\n model = getattr(models, args.model)(args)\n\n if args.model.startswith('alexnet') or args.model.startswith('vgg'):\n model.features = torch.nn.DataParallel(model.features)\n model.cuda()\n else:\n model = torch.nn.DataParallel(model).cuda()\n\n ### Define loss function (criterion) and optimizer\n criterion = nn.CrossEntropyLoss().cuda()\n optimizer = torch.optim.SGD(model.parameters(), args.lr,\n momentum=args.momentum,\n weight_decay=args.weight_decay,\n nesterov=True)\n\n ### Optionally resume from a checkpoint\n if args.resume:\n checkpoint = load_checkpoint(args)\n if checkpoint is not None:\n args.start_epoch = checkpoint['epoch'] + 1\n best_prec1 = checkpoint['best_prec1']\n model.load_state_dict(checkpoint['state_dict'])\n optimizer.load_state_dict(checkpoint['optimizer'])\n\n ### Optionally convert from a model\n if args.convert_from is not None:\n args.evaluate = True\n state_dict = torch.load(args.convert_from)['state_dict']\n model.load_state_dict(state_dict)\n model = model.cpu().module\n convert_model(model, args)\n model = nn.DataParallel(model).cuda()\n head, tail = os.path.split(args.convert_from)\n tail = \"converted_\" + tail\n torch.save({'state_dict': model.state_dict()}, os.path.join(head, tail))\n\n ### Optionally evaluate from a model\n if args.evaluate_from is not None:\n args.evaluate = True\n state_dict = torch.load(args.evaluate_from)['state_dict']\n model.load_state_dict(state_dict)\n\n cudnn.benchmark = True\n\n ### Data loading \n if args.data == \"cifar10\":\n normalize = transforms.Normalize(mean=[0.4914, 0.4824, 0.4467],\n std=[0.2471, 0.2435, 0.2616])\n train_set = datasets.CIFAR10('../data', train=True, download=True,\n transform=transforms.Compose([\n transforms.RandomCrop(32, padding=4),\n transforms.RandomHorizontalFlip(),\n transforms.ToTensor(),\n normalize,\n ]))\n val_set = datasets.CIFAR10('../data', train=False,\n transform=transforms.Compose([\n transforms.ToTensor(),\n normalize,\n ]))\n elif args.data == \"cifar100\":\n normalize = transforms.Normalize(mean=[0.5071, 0.4867, 0.4408],\n std=[0.2675, 0.2565, 0.2761])\n train_set = datasets.CIFAR100('../data', train=True, download=True,\n transform=transforms.Compose([\n transforms.RandomCrop(32, padding=4),\n transforms.RandomHorizontalFlip(),\n transforms.ToTensor(),\n normalize,\n ]))\n val_set = datasets.CIFAR100('../data', train=False,\n transform=transforms.Compose([\n transforms.ToTensor(),\n normalize,\n ]))\n else:\n traindir = os.path.join(args.data, 'train')\n valdir = os.path.join(args.data, 'val')\n normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],\n std=[0.229, 0.224, 0.225])\n train_set = datasets.ImageFolder(traindir, transforms.Compose([\n transforms.RandomSizedCrop(224),\n transforms.RandomHorizontalFlip(),\n transforms.ToTensor(),\n normalize,\n ]))\n\n val_set = datasets.ImageFolder(valdir, transforms.Compose([\n transforms.Scale(256),\n transforms.CenterCrop(224),\n transforms.ToTensor(),\n normalize,\n ]))\n\n train_loader = torch.utils.data.DataLoader(\n train_set,\n batch_size=args.batch_size, shuffle=True,\n num_workers=args.workers, pin_memory=True)\n\n val_loader = torch.utils.data.DataLoader(\n val_set,\n batch_size=args.batch_size, shuffle=False,\n num_workers=args.workers, pin_memory=True)\n\n if args.evaluate:\n validate(val_loader, model, criterion)\n return\n\n for epoch in range(args.start_epoch, args.epochs):\n ### Train for one epoch\n tr_prec1, tr_prec5, loss, lr = \\\n train(train_loader, model, criterion, optimizer, epoch)\n\n ### Evaluate on validation set\n val_prec1, val_prec5 = validate(val_loader, model, criterion)\n\n ### Remember best prec@1 and save checkpoint\n is_best = val_prec1 < best_prec1\n best_prec1 = max(val_prec1, best_prec1)\n model_filename = 'checkpoint_%03d.pth.tar' % epoch\n save_checkpoint({\n 'epoch': epoch,\n 'model': args.model,\n 'state_dict': model.state_dict(),\n 'best_prec1': best_prec1,\n 'optimizer': optimizer.state_dict(),\n }, args, is_best, model_filename, \"%.4f %.4f %.4f %.4f %.4f %.4f\\n\" %\n (val_prec1, val_prec5, tr_prec1, tr_prec5, loss, lr))\n\n ### Convert model and test\n model = model.cpu().module\n convert_model(model, args)\n model = nn.DataParallel(model).cuda()\n print(model)\n validate(val_loader, model, criterion)\n n_flops, n_params = measure_model(model, IMAGE_SIZE, IMAGE_SIZE)\n print('FLOPs: %.2fM, Params: %.2fM' % (n_flops / 1e6, n_params / 1e6))\n return\n\n\ndef train(train_loader, model, criterion, optimizer, epoch):\n batch_time = AverageMeter()\n data_time = AverageMeter()\n losses = AverageMeter()\n top1 = AverageMeter()\n top5 = AverageMeter()\n learned_module_list = []\n\n ### Switch to train mode\n model.train()\n ### Find all learned convs to prepare for group lasso loss\n for m in model.modules():\n if m.__str__().startswith('LearnedGroupConv'):\n learned_module_list.append(m)\n running_lr = None\n\n end = time.time()\n for i, (input, target) in enumerate(train_loader):\n progress = float(epoch * len(train_loader) + i) / \\\n (args.epochs * len(train_loader))\n args.progress = progress\n ### Adjust learning rate\n lr = adjust_learning_rate(optimizer, epoch, args, batch=i,\n nBatch=len(train_loader), method=args.lr_type)\n if running_lr is None:\n running_lr = lr\n\n ### Measure data loading time\n data_time.update(time.time() - end)\n\n target = target.cuda(non_blocking=True)\n input_var = torch.autograd.Variable(input)\n target_var = torch.autograd.Variable(target)\n\n ### Compute output\n output = model(input_var, progress)\n loss = criterion(output, target_var)\n\n ### Add group lasso loss\n if args.group_lasso_lambda > 0:\n lasso_loss = 0\n for m in learned_module_list:\n lasso_loss = lasso_loss + m.lasso_loss\n loss = loss + args.group_lasso_lambda * lasso_loss\n\n ### Measure accuracy and record loss\n prec1, prec5 = accuracy(output.data, target, topk=(1, 5))\n losses.update(loss.item(), input.size(0))\n top1.update(prec1.item(), input.size(0))\n top5.update(prec5.item(), input.size(0))\n\n ### Compute gradient and do SGD step\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n\n ### Measure elapsed time\n batch_time.update(time.time() - end)\n end = time.time()\n\n if i % args.print_freq == 0:\n print('Epoch: [{0}][{1}/{2}]\\t'\n 'Time {batch_time.val:.3f}\\t' # ({batch_time.avg:.3f}) '\n 'Data {data_time.val:.3f}\\t' # ({data_time.avg:.3f}) '\n 'Loss {loss.val:.4f}\\t' # ({loss.avg:.4f}) '\n 'Prec@1 {top1.val:.3f}\\t' # ({top1.avg:.3f}) '\n 'Prec@5 {top5.val:.3f}\\t' # ({top5.avg:.3f})'\n 'lr {lr: .4f}'.format(\n epoch, i, len(train_loader), batch_time=batch_time,\n data_time=data_time, loss=losses, top1=top1, top5=top5, lr=lr))\n return 100. - top1.avg, 100. - top5.avg, losses.avg, running_lr\n\n\ndef validate(val_loader, model, criterion):\n batch_time = AverageMeter()\n losses = AverageMeter()\n top1 = AverageMeter()\n top5 = AverageMeter()\n\n ### Switch to evaluate mode\n model.eval()\n\n end = time.time()\n for i, (input, target) in enumerate(val_loader):\n target = target.cuda(non_blocking=True)\n input_var = torch.autograd.Variable(input, volatile=True)\n target_var = torch.autograd.Variable(target, volatile=True)\n\n ### Compute output\n output = model(input_var)\n loss = criterion(output, target_var)\n\n ### Measure accuracy and record loss\n prec1, prec5 = accuracy(output.data, target, topk=(1, 5))\n losses.update(loss.data.item(), input.size(0))\n top1.update(prec1.item(), input.size(0))\n top5.update(prec5.item(), input.size(0))\n\n ### Measure elapsed time\n batch_time.update(time.time() - end)\n end = time.time()\n\n if i % args.print_freq == 0:\n print('Test: [{0}/{1}]\\t'\n 'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\\t'\n 'Loss {loss.val:.4f} ({loss.avg:.4f})\\t'\n 'Prec@1 {top1.val:.3f} ({top1.avg:.3f})\\t'\n 'Prec@5 {top5.val:.3f} ({top5.avg:.3f})'.format(\n i, len(val_loader), batch_time=batch_time, loss=losses,\n top1=top1, top5=top5))\n\n print(' * Prec@1 {top1.avg:.3f} Prec@5 {top5.avg:.3f}'\n .format(top1=top1, top5=top5))\n\n return 100. - top1.avg, 100. - top5.avg\n\n\ndef load_checkpoint(args):\n model_dir = os.path.join(args.savedir, 'save_models')\n latest_filename = os.path.join(model_dir, 'latest.txt')\n if os.path.exists(latest_filename):\n with open(latest_filename, 'r') as fin:\n model_filename = fin.readlines()[0]\n else:\n return None\n print(\"=> loading checkpoint '{}'\".format(model_filename))\n state = torch.load(model_filename)\n print(\"=> loaded checkpoint '{}'\".format(model_filename))\n return state\n\n\ndef save_checkpoint(state, args, is_best, filename, result):\n print(args)\n result_filename = os.path.join(args.savedir, args.filename)\n model_dir = os.path.join(args.savedir, 'save_models')\n model_filename = os.path.join(model_dir, filename)\n latest_filename = os.path.join(model_dir, 'latest.txt')\n best_filename = os.path.join(model_dir, 'model_best.pth.tar')\n os.makedirs(args.savedir, exist_ok=True)\n os.makedirs(model_dir, exist_ok=True)\n print(\"=> saving checkpoint '{}'\".format(model_filename))\n with open(result_filename, 'a') as fout:\n fout.write(result)\n torch.save(state, model_filename)\n with open(latest_filename, 'w') as fout:\n fout.write(model_filename)\n if args.no_save_model:\n shutil.move(model_filename, best_filename)\n elif is_best:\n shutil.copyfile(model_filename, best_filename)\n\n print(\"=> saved checkpoint '{}'\".format(model_filename))\n return\n\n\nclass AverageMeter(object):\n \"\"\"Computes and stores the average and current value\"\"\"\n def __init__(self):\n self.reset()\n\n def reset(self):\n self.val = 0\n self.avg = 0\n self.sum = 0\n self.count = 0\n\n def update(self, val, n=1):\n self.val = val\n self.sum += val * n\n self.count += n\n self.avg = self.sum / self.count\n\n\ndef adjust_learning_rate(optimizer, epoch, args, batch=None,\n nBatch=None, method='cosine'):\n if method == 'cosine':\n T_total = args.epochs * nBatch\n T_cur = (epoch % args.epochs) * nBatch + batch\n lr = 0.5 * args.lr * (1 + math.cos(math.pi * T_cur / T_total))\n elif method == 'multistep':\n if args.data in ['cifar10', 'cifar100']:\n lr, decay_rate = args.lr, 0.1\n if epoch >= args.epochs * 0.75:\n lr *= decay_rate**2\n elif epoch >= args.epochs * 0.5:\n lr *= decay_rate\n else:\n \"\"\"Sets the learning rate to the initial LR decayed by 10 every 30 epochs\"\"\"\n lr = args.lr * (0.1 ** (epoch // 30))\n for param_group in optimizer.param_groups:\n param_group['lr'] = lr\n return lr\n\n\ndef accuracy(output, target, topk=(1,)):\n \"\"\"Computes the precision@k for the specified values of k\"\"\"\n maxk = max(topk)\n batch_size = target.size(0)\n\n _, pred = output.topk(maxk, 1, True, True)\n pred = pred.t()\n correct = pred.eq(target.view(1, -1).expand_as(pred))\n\n res = []\n for k in topk:\n correct_k = correct[:k].view(-1).float().sum(0)\n res.append(correct_k.mul_(100.0 / batch_size))\n return res\n\n\nif __name__ == '__main__':\n main()\n" ]
[ [ "torch.nn.CrossEntropyLoss", "torch.load", "torch.manual_seed", "torch.utils.data.DataLoader", "torch.autograd.Variable", "torch.cuda.manual_seed_all", "torch.nn.DataParallel", "torch.save" ] ]
s-andrews/LipidFinder
[ "c91d6caa8008e0a67188914e48f30913deff888d" ]
[ "LipidFinder/update_params.py" ]
[ "#!/usr/bin/env python\n\n# Copyright (c) 2019 J. Alvarez-Jarreta and C.J. Brasher\n#\n# This file is part of the LipidFinder software tool and governed by the\n# 'MIT License'. Please see the LICENSE file that should have been\n# included as part of this software.\n\"\"\"Transform the old parameters CSV file for PeakFilter and Amalgamator\n(LipidFinder v1.0) to the new parameters JSON files for the same\nmodules. Files \"peakfilter.json\" and \"amalgamator.json\" will be created\nin the given folder or in the current working directory (default).\n\nThe resultant JSON files will be incomplete (some new parameters have\nbeen introduced in the latest release) and will raise an error when used\nas argument for their corresponding module. They must be used as\nargument for \"config_params.py\" first to fill in the missing parameters\nand generate a complete version.\n\"\"\"\n\nimport argparse\nimport ast\nimport json\nimport os\n\nimport pandas\n\nfrom LipidFinder._utils import normalise_path\nfrom LipidFinder._py3k import viewitems\n\n\n# New-to-old parameter name equivalence dict\nPARAMS_DICT = {'firstSampleIndex': 'firstRepOffset',\n 'numSamples': 'numberOfSamples',\n 'numTechReps': 'numberOfTechReps',\n 'numQCReps': 'numberOfQCReps',\n 'numSolventReps': 'numberOfSolventReps',\n 'polarity': 'filePolarityMode',\n 'QCRSD': ['QCLowRSD', 'QCHighRSD'],\n 'removeSolvents': 'removeSolvent',\n 'solventMinFoldDiff': 'solventFoldCutOff',\n 'intenSignifCutOff': 'intensitySignificanceCutOff',\n 'mzFixedError': 'mzFixedError',\n 'mzPPMError': 'mzSizeErrorPPM',\n 'peakMaxRTWidth': 'peakMaxRTWidth',\n 'peakMinFoldDiff': 'peakMinFoldCutOff',\n 'maxRTDiffAdjFrame': 'peakAdjacentFrameMaxRT',\n 'concatAllFrames': 'peakConcatenateAllFrames',\n 'removeContaminants': 'removeContaminant',\n 'removeAdducts': 'removeAdduct',\n 'adductAddition': 'adductAddition',\n 'removeStacks': 'removeStack',\n 'maxStackGap': 'maxStackGap',\n 'lipidStackAddition': 'lipidStackAddition',\n 'intenOutlierCutOff': 'outlierHighIntensityValue',\n 'intensityRSD': ['outlierLowIntensityRSD',\n 'outlierHighIntensityRSD'],\n 'featMassAssignment': 'featureLevelMassAssignment',\n 'negAdductsPairs': 'negativeModeAdductPairs',\n 'posAdductsPairs': 'positiveModeAdductPairs',\n 'outlierMinDiff': 'broadContsdMult',\n 'minNonZeroPoints': 'broadContminPoints',\n 'intenRSDCutOff': 'broadContRSDCutOff',\n 'rtSDCutOff': 'broadContrtSDCutOff',\n 'rtRange': ['retentionTimeLowCutOff',\n 'retentionTimeHighCutOff'],\n 'intensityStDev': 'rtCorrectStDev',\n 'correctRTMeans': 'rtCorrectMeans'\n }\n\n\ndef _adduct_rename(adducts, index):\n # type: (pandas.DataFrame, int) -> str\n \"\"\"Return LipidFinder v2.0 name for the selected adduct: some\n adducts have been renamed to follow the standard nomenclature.\n\n Keyword parameters:\n adducts -- LipidFinder v1.0 adducts dataframe\n index -- row index\n \"\"\"\n name = adducts.iloc[index, 1]\n if (name == 'M+AcO-'):\n name = 'M+OAc'\n elif (name == 'M+Cl-'):\n name = 'M+Cl'\n return name\n\n\ndef main():\n # Create the argument parser and parse the arguments\n parser = argparse.ArgumentParser(\n description=(\n \"Convert LipidFinder's old parameters CSV file to the new \"\n \"JSON format: 'peakfilter.json' and 'amalgamator.json' \"\n \"files will be created.\"))\n parser.add_argument('-p', '--params', required=True, type=str,\n help=\"LipidFinder v1.0 parameters CSV file\")\n parser.add_argument('-a', '--adducts', metavar='FILE', required=True,\n type=str, help=\"LipidFinder v1.0 adducts CSV file\")\n parser.add_argument('-o', '--output', metavar='DIR', type=str,\n help=\"folder where the parameter files will be saved\")\n parser.add_argument('--version', action='version',\n version=\"LipidFinder 2.0\")\n args = parser.parse_args()\n # Check if the output directory exists. If not, create it.\n dst = args.output if (args.output) else '.'\n dst = normalise_path(dst)\n if (not os.path.isdir(dst)):\n os.makedirs(dst)\n # Read old parameters CSV file and generate a dict\n oldParams = pandas.read_csv(args.params, index_col='Parameter name',\n usecols=['Parameter name', 'Current value'])\n oldParams = oldParams.to_dict()['Current value']\n # Use the name equivalence dict to create the new parameters dict\n pfParams = {}\n for key, value in viewitems(PARAMS_DICT):\n if (isinstance(value, list)):\n pfParams[key] = [ast.literal_eval(oldParams[x]) for x in value]\n else:\n # literal_eval() raises an exception when parsing strings\n try:\n pfParams[key] = ast.literal_eval(oldParams[value])\n except ValueError:\n if (oldParams[value] == 'N'):\n pfParams[key] = 'Negative'\n elif (oldParams[value] == 'P'):\n pfParams[key] = 'Positive'\n else:\n # String containing a boolean value\n pfParams[key] = oldParams[value] == 'TRUE'\n # Correct starting index in \"firstSampleIndex\" from 0 to 1\n pfParams['firstSampleIndex'] += 1\n # Ensure float type parameters have float values\n floatParams = ['solventMinFoldDiff', 'mzFixedError', 'mzPPMError',\n 'peakMaxRTWidth', 'peakMinFoldDiff',\n 'maxRTDiffAdjFrame', 'intensityStDev']\n for key in floatParams:\n pfParams[key] = float(pfParams[key])\n pfParams['rtRange'][0] = float(pfParams['rtRange'][0])\n pfParams['rtRange'][1] = float(pfParams['rtRange'][1])\n # Replace indexing by actual adduct names\n adducts = pandas.read_csv(args.adducts, index_col='index_do_not_alter')\n for pair in pfParams['negAdductsPairs']:\n pair[0] = _adduct_rename(adducts, pair[0])\n pair[1] = _adduct_rename(adducts, pair[1])\n for pair in pfParams['posAdductsPairs']:\n pair[0] = _adduct_rename(adducts, pair[0])\n pair[1] = _adduct_rename(adducts, pair[1])\n # \"rtTolMultipler\" parameter removed in version 2.0, so apply it\n # directly to \"maxRTDiffAdjFrame\" parameter as it was done\n pfParams['maxRTDiffAdjFrame'] = \\\n (pfParams['maxRTDiffAdjFrame']\n * ast.literal_eval(oldParams['rtTolMultipler']))\n # Create the new PeakFilter parameters JSON file\n with open(os.path.join(dst, 'peakfilter.json'), 'w') as paramsFile:\n json.dump(pfParams, paramsFile, indent=4)\n # Set Amalgamator parameters based on PeakFilter's ones\n amalParams = {'numSamples': pfParams['numSamples'],\n 'firstSampleIndex': pfParams['firstSampleIndex'],\n 'mzFixedError': pfParams['mzFixedError'],\n 'mzPPMError': pfParams['mzPPMError'],\n 'maxRTDiffAdjFrame': pfParams['maxRTDiffAdjFrame']}\n # Create the new Amalgamator parameters JSON file\n with open(os.path.join(dst, 'amalgamator.json'), 'w') as paramsFile:\n json.dump(amalParams, paramsFile, indent=4)\n\nif (__name__ == \"__main__\"):\n main()\n" ]
[ [ "pandas.read_csv" ] ]
LiuFG/UpdatingHDmapByMonoCamera
[ "68e549661f6e583d09448bd0a0b122a6dc2e9fc9" ]
[ "detection/mmdetection/mmdet/apis/inference.py" ]
[ "import warnings\n\nimport mmcv\nimport numpy as np\nimport pycocotools.mask as maskUtils\nimport torch\nfrom mmcv.runner import load_checkpoint\n\nfrom mmdet.core import get_classes\nfrom mmdet.datasets import to_tensor\nfrom mmdet.datasets.transforms import ImageTransform\nfrom mmdet.models import build_detector\n\n\ndef init_detector(config, checkpoint=None, device='cuda:0'):\n \"\"\"Initialize a detector from config file.\n\n Args:\n config (str or :obj:`mmcv.Config`): Config file path or the config\n object.\n checkpoint (str, optional): Checkpoint path. If left as None, the model\n will not load any weights.\n\n Returns:\n nn.Module: The constructed detector.\n \"\"\"\n if isinstance(config, str):\n config = mmcv.Config.fromfile(config)\n elif not isinstance(config, mmcv.Config):\n raise TypeError('config must be a filename or Config object, '\n 'but got {}'.format(type(config)))\n config.model.pretrained = None\n model = build_detector(config.model, test_cfg=config.test_cfg)\n if checkpoint is not None:\n checkpoint = load_checkpoint(model, checkpoint)\n if 'CLASSES' in checkpoint['meta']:\n model.CLASSES = checkpoint['meta']['CLASSES']\n else:\n warnings.warn('Class names are not saved in the checkpoint\\'s '\n 'meta data, use COCO classes by default.')\n model.CLASSES = get_classes('coco')\n model.cfg = config # save the config in the model for convenience\n model.to(device)\n model.eval()\n return model\n\n\ndef inference_detector(model, imgs):\n \"\"\"Inference image(s) with the detector.\n\n Args:\n model (nn.Module): The loaded detector.\n imgs (str/ndarray or list[str/ndarray]): Either image files or loaded\n images.\n\n Returns:\n If imgs is a str, a generator will be returned, otherwise return the\n detection results directly.\n \"\"\"\n cfg = model.cfg\n img_transform = ImageTransform(\n size_divisor=cfg.data.test.size_divisor, **cfg.img_norm_cfg)\n\n device = next(model.parameters()).device # model device\n if not isinstance(imgs, list):\n return _inference_single(model, imgs, img_transform, device)\n else:\n return _inference_generator(model, imgs, img_transform, device)\n\n\ndef _prepare_data(img, img_transform, cfg, device):\n ori_shape = img.shape\n img, img_shape, pad_shape, scale_factor = img_transform(\n img,\n scale=cfg.data.test.img_scale,\n keep_ratio=cfg.data.test.get('resize_keep_ratio', True))\n img = to_tensor(img).to(device).unsqueeze(0)\n img_meta = [\n dict(\n ori_shape=ori_shape,\n img_shape=img_shape,\n pad_shape=pad_shape,\n scale_factor=scale_factor,\n flip=False)\n ]\n return dict(img=[img], img_meta=[img_meta])\n\n\ndef _inference_single(model, img, img_transform, device):\n img = mmcv.imread(img)\n data = _prepare_data(img, img_transform, model.cfg, device)\n with torch.no_grad():\n result = model(return_loss=False, rescale=True, **data)\n return result\n\n\ndef _inference_generator(model, imgs, img_transform, device):\n for img in imgs:\n yield _inference_single(model, img, img_transform, device)\n\n\n# TODO: merge this method with the one in BaseDetector\ndef show_result(img,\n result,\n class_names,\n score_thr=0.3,\n wait_time=0,\n out_file=None):\n \"\"\"Visualize the detection results on the image.\n\n Args:\n img (str or np.ndarray): Image filename or loaded image.\n result (tuple[list] or list): The detection result, can be either\n (bbox, segm) or just bbox.\n class_names (list[str] or tuple[str]): A list of class names.\n score_thr (float): The threshold to visualize the bboxes and masks.\n wait_time (int): Value of waitKey param.\n out_file (str, optional): If specified, the visualization result will\n be written to the out file instead of shown in a window.\n \"\"\"\n assert isinstance(class_names, (tuple, list))\n img = mmcv.imread(img)\n if isinstance(result, tuple):\n bbox_result, segm_result = result\n else:\n bbox_result, segm_result = result, None\n bboxes = np.vstack(bbox_result)\n # draw segmentation masks\n if segm_result is not None:\n segms = mmcv.concat_list(segm_result)\n inds = np.where(bboxes[:, -1] > score_thr)[0]\n for i in inds:\n # color_mask = np.random.randint(0, 256, (1, 3), dtype=np.uint8)\n color_mask = np.array([[123, 72, 208]], dtype=np.uint8)\n print(type(color_mask), color_mask)\n mask = maskUtils.decode(segms[i]).astype(np.bool)\n img[mask] = img[mask] * 0.5 + color_mask * 0.5\n # draw bounding boxes\n labels = [\n np.full(bbox.shape[0], i, dtype=np.int32)\n for i, bbox in enumerate(bbox_result)\n ]\n labels = np.concatenate(labels)\n mmcv.imshow_det_bboxes(\n img.copy(),\n bboxes,\n labels,\n class_names=class_names,\n score_thr=score_thr,\n show=out_file is None,\n wait_time=wait_time,\n out_file=out_file)\n" ]
[ [ "numpy.full", "numpy.concatenate", "torch.no_grad", "numpy.array", "numpy.where", "numpy.vstack" ] ]
TheChief/charly25lc_test
[ "ee64e9c7568eda5d2f030eba24c17954c345d52f" ]
[ "scripts/setup.py" ]
[ "from setuptools import setup\nimport os\nimport py2exe\nimport matplotlib\n\nincludes = [\n 'sip',\n 'PyQt5',\n 'PyQt5.uic',\n 'PyQt5.QtCore',\n 'PyQt5.QtDesigner',\n 'PyQt5.QtGui',\n 'PyQt5.QtNetwork',\n 'PyQt5.QtMultimedia',\n 'PyQt5.QtPrintSupport',\n 'PyQt5.QtWebSockets',\n 'PyQt5.QtWidgets',\n 'PyQt5.QtXml',\n 'numpy',\n 'matplotlib.backends.backend_qt5agg',\n 'smithplot'\n]\n\nsetup(\n windows = [{'script': 'exec.py'}],\n data_files = matplotlib.get_py2exe_datafiles() + [\n ('', ['c:\\\\Python34\\\\Lib\\\\site-packages\\\\PyQt5\\\\Qt5DesignerComponents.dll', 'c:\\\\Python34\\\\Lib\\\\site-packages\\\\PyQt5\\\\designer.exe']),\n ('platforms', ['c:\\\\Python34\\\\Lib\\\\site-packages\\\\PyQt5\\\\plugins\\\\platforms\\\\qwindows.dll'])\n ],\n options = {\n 'py2exe':{\n 'includes': includes,\n 'bundle_files': 3,\n 'compressed': True\n }\n }\n)\n" ]
[ [ "matplotlib.get_py2exe_datafiles" ] ]
rizwandel/finetuner
[ "7fef9df6b5101d19a4fd710084d54b5be45dc5d5" ]
[ "tests/unit/tailor/test_keras.py" ]
[ "import pytest\nimport tensorflow as tf\nimport numpy as np\n\nfrom finetuner.tailor.keras import KerasTailor\n\n\n@pytest.fixture\ndef dense_model():\n model = tf.keras.models.Sequential()\n model.add(tf.keras.layers.InputLayer(input_shape=(128,))) # (None, 128)\n model.add(tf.keras.layers.Dense(128, activation='relu')) # (None, 128)\n model.add(tf.keras.layers.Dense(64, activation='relu')) # (None, 64)\n model.add(tf.keras.layers.Dense(32, activation='relu')) # (None, 32)\n model.add(tf.keras.layers.Dense(10, activation='softmax')) # (None, 10)\n return model\n\n\n@pytest.fixture\ndef simple_cnn_model():\n model = tf.keras.models.Sequential()\n model.add(tf.keras.layers.InputLayer(input_shape=(28, 28, 1)))\n model.add(tf.keras.layers.Conv2D(32, 3, (1, 1), activation='relu'))\n model.add(tf.keras.layers.Conv2D(64, 3, (1, 1), activation='relu'))\n model.add(tf.keras.layers.MaxPool2D(2))\n model.add(tf.keras.layers.Dropout(0.25))\n model.add(tf.keras.layers.Flatten())\n model.add(tf.keras.layers.Dense(128))\n model.add(tf.keras.layers.Dropout(0.25))\n model.add(tf.keras.layers.Dense(10, activation='softmax'))\n return model\n\n\n@pytest.fixture\ndef vgg16_cnn_model():\n return tf.keras.applications.vgg16.VGG16(weights=None)\n\n\n@pytest.fixture\ndef stacked_lstm():\n model = tf.keras.models.Sequential()\n model.add(tf.keras.layers.Embedding(1000, 1024, input_length=128))\n model.add(\n tf.keras.layers.LSTM(256, return_sequences=True)\n ) # this layer will not considered as candidate layer\n model.add(tf.keras.layers.LSTM(256, return_sequences=True))\n model.add(\n tf.keras.layers.LSTM(256, return_sequences=False)\n ) # this layer will be considered as candidate layer\n model.add(tf.keras.layers.Dense(256, activation='relu'))\n model.add(tf.keras.layers.Dense(5, activation='softmax'))\n return model\n\n\n@pytest.fixture\ndef bidirectional_lstm():\n return tf.keras.Sequential(\n [\n tf.keras.layers.Embedding(input_dim=5000, output_dim=64),\n tf.keras.layers.Bidirectional(tf.keras.layers.LSTM(64)),\n tf.keras.layers.Dense(32),\n ]\n )\n\n\n@pytest.fixture(\n params=[\n 'dense_model',\n 'simple_cnn_model',\n 'vgg16_cnn_model',\n 'stacked_lstm',\n 'bidirectional_lstm',\n ]\n)\ndef model(request):\n return request.getfixturevalue(request.param)\n\n\n@pytest.mark.parametrize(\n 'model, layer_name',\n [\n ('dense_model', 'random_name'),\n ('simple_cnn_model', 'random_name'),\n ('vgg16_cnn_model', 'random_name'),\n ('stacked_lstm', 'random_name'),\n ('bidirectional_lstm', 'random_name'),\n ],\n indirect=['model'],\n)\ndef test_trim_fail_given_unexpected_layer_name(model, layer_name):\n with pytest.raises(KeyError):\n keras_tailor = KerasTailor(model)\n keras_tailor.to_embedding_model(layer_name=layer_name)\n\n\n@pytest.mark.parametrize(\n 'model, layer_name, expected_output_shape',\n [\n ('dense_model', 'dense_3', (None, 10)),\n ('simple_cnn_model', 'dropout_1', (None, 128)),\n ('vgg16_cnn_model', 'fc2', (None, 4096)),\n ('stacked_lstm', 'dense', (None, 256)),\n ('bidirectional_lstm', 'dense', (None, 32)),\n ('dense_model', None, (None, 10)),\n ('simple_cnn_model', None, (None, 10)),\n ('vgg16_cnn_model', None, (None, 1000)),\n ('stacked_lstm', None, (None, 5)),\n ('bidirectional_lstm', None, (None, 32)),\n ],\n indirect=['model'],\n)\ndef test_to_embedding_model(model, layer_name, expected_output_shape):\n keras_tailor = KerasTailor(model)\n model = keras_tailor.to_embedding_model(layer_name=layer_name)\n assert model.output_shape == expected_output_shape\n\n\ndef test_weights_preserved_given_pretrained_model(vgg16_cnn_model):\n weights = vgg16_cnn_model.layers[0].get_weights()\n keras_tailor = KerasTailor(vgg16_cnn_model)\n vgg16_cnn_model = keras_tailor.to_embedding_model(layer_name='fc2')\n weights_after_convert = vgg16_cnn_model.layers[0].get_weights()\n np.testing.assert_array_equal(weights, weights_after_convert)\n\n\n@pytest.mark.parametrize(\n 'model',\n [\n 'dense_model',\n 'simple_cnn_model',\n 'vgg16_cnn_model',\n 'stacked_lstm',\n 'bidirectional_lstm',\n ],\n indirect=['model'],\n)\n@pytest.mark.parametrize('freeze', [True, False])\ndef test_freeze(model, freeze):\n keras_tailor = KerasTailor(model)\n for layer in model.layers:\n assert layer.trainable\n model = keras_tailor.to_embedding_model(freeze=freeze)\n for idx, layer in enumerate(model.layers):\n if freeze:\n assert not layer.trainable\n else:\n assert layer.trainable\n\n\ndef test_freeze_given_bottleneck_model_and_freeze_is_true(simple_cnn_model):\n def _create_bottleneck_model():\n model = tf.keras.models.Sequential()\n model.add(tf.keras.layers.InputLayer(input_shape=(128,)))\n model.add(tf.keras.layers.Dense(64, activation='relu'))\n return model\n\n paddle_tailor = KerasTailor(\n model=simple_cnn_model,\n input_size=(28, 28, 1),\n input_dtype='float32',\n )\n\n model = paddle_tailor.to_embedding_model(\n freeze=True, layer_name='dropout_1', bottleneck_net=_create_bottleneck_model()\n )\n # assert bottleneck model is not freezed\n for layer in model.layers:\n if layer.name == 'dense_2':\n assert layer.trainable == True\n else:\n assert layer.trainable == False\n\n\n@pytest.mark.parametrize(\n 'model, layer_name, input_size, input_dtype, freeze_layers',\n [\n ('dense_model', 10, (128,), 'float32', ['linear_1', 'linear_5']),\n ('simple_cnn_model', 2, (1, 28, 28), 'float32', ['conv2d_1', 'maxpool2d_5']),\n (\n 'vgg16_cnn_model',\n 4,\n (3, 224, 224),\n 'float32',\n ['conv2d_27', 'maxpool2d_31', 'adaptiveavgpool2d_32'],\n ),\n ('stacked_lstm', 10, (128,), 'int64', ['linear_layer_1', 'linear_layer_2']),\n ('bidirectional_lstm', 5, (128,), 'int64', ['lastcell_3', 'linear_4']),\n ],\n indirect=['model'],\n)\ndef test_freeze_given_freeze_layers(\n model, layer_name, input_size, input_dtype, freeze_layers\n):\n pytorch_tailor = KerasTailor(\n model=model,\n input_size=input_size,\n input_dtype=input_dtype,\n )\n model = pytorch_tailor.to_embedding_model(\n freeze=freeze_layers,\n )\n for layer, param in zip(pytorch_tailor.embedding_layers, model.layers):\n layer_name = layer['name']\n if layer_name in freeze_layers:\n assert param.trainable == False\n else:\n assert param.trainable == True\n\n\ndef test_keras_model_parser():\n user_model = tf.keras.Sequential(\n [\n tf.keras.layers.Flatten(input_shape=(28, 28), name='l1'),\n tf.keras.layers.Dense(128, activation='relu', name='l2'),\n tf.keras.layers.Dense(32, name='l3'),\n ]\n )\n\n keras_tailor = KerasTailor(user_model)\n\n r = keras_tailor.embedding_layers\n assert len(r) == 3\n assert r[0]['name'] == 'l1'\n assert r[1]['name'] == 'l2'\n assert r[2]['name'] == 'l3'\n\n assert r[0]['output_features'] == 784\n assert r[0]['nb_params'] == 0\n\n assert r[1]['output_features'] == 128\n assert r[1]['nb_params'] == 100480\n\n assert r[2]['output_features'] == 32\n assert r[2]['nb_params'] == 4128\n\n\ndef test_attach_bottleneck_layer(vgg16_cnn_model):\n def _create_bottleneck_model():\n model = tf.keras.models.Sequential()\n model.add(tf.keras.layers.InputLayer(input_shape=(4096,)))\n model.add(tf.keras.layers.Dense(1024, activation='relu'))\n model.add(tf.keras.layers.Dense(512, activation='softmax'))\n return model\n\n keras_tailor = KerasTailor(\n model=vgg16_cnn_model,\n input_size=(224, 224, 3),\n input_dtype='float32',\n )\n tailed_model = keras_tailor.to_embedding_model(\n layer_name='fc1', freeze=False, bottleneck_net=_create_bottleneck_model()\n )\n assert list(tailed_model.output.shape) == ([None, 512])\n" ]
[ [ "tensorflow.keras.layers.Embedding", "tensorflow.keras.layers.Dense", "tensorflow.keras.applications.vgg16.VGG16", "tensorflow.keras.layers.Conv2D", "tensorflow.keras.layers.InputLayer", "tensorflow.keras.layers.MaxPool2D", "numpy.testing.assert_array_equal", "tensorflow.keras.layers.LSTM", "tensorflow.keras.layers.Dropout", "tensorflow.keras.models.Sequential", "tensorflow.keras.layers.Flatten" ] ]