repo_name
stringlengths 6
130
| hexsha
list | file_path
list | code
list | apis
list |
---|---|---|---|---|
zyjia/tensorlayer-chinese
|
[
"f55986e9ef3aaf0f59dae8e4e7a84812868bce33"
] |
[
"tensorlayer/models/squeezenetv1.py"
] |
[
"# -*- coding: utf-8 -*-\n\"\"\"\nSqueezeNet for ImageNet.\n\"\"\"\n\nimport os\n# import numpy as np\nimport tensorflow as tf\nfrom .. import _logging as logging\nfrom ..layers import (Layer, Conv2d, InputLayer, MaxPool2d, ConcatLayer, DropoutLayer, GlobalMeanPool2d)\nfrom ..files import maybe_download_and_extract, assign_params, load_npz\n\n__all__ = [\n 'SqueezeNetV1',\n]\n\n\nclass SqueezeNetV1(Layer):\n \"\"\"Pre-trained SqueezeNetV1 model.\n\n Parameters\n ------------\n x : placeholder\n shape [None, 224, 224, 3], value range [0, 255].\n end_with : str\n The end point of the model [input, fire2, fire3 ... fire9, output]. Default ``output`` i.e. the whole model.\n is_train : boolean\n Whether the model is used for training i.e. enable dropout.\n reuse : boolean\n Whether to reuse the model.\n\n Examples\n ---------\n Classify ImageNet classes, see `tutorial_models_squeezenetv1.py <https://github.com/tensorlayer/tensorlayer/blob/master/example/tutorial_models_squeezenetv1.py>`__\n\n >>> x = tf.placeholder(tf.float32, [None, 224, 224, 3])\n >>> # get the whole model\n >>> net = tl.models.SqueezeNetV1(x)\n >>> # restore pre-trained parameters\n >>> sess = tf.InteractiveSession()\n >>> net.restore_params(sess)\n >>> # use for inferencing\n >>> probs = tf.nn.softmax(net.outputs)\n\n Extract features and Train a classifier with 100 classes\n\n >>> x = tf.placeholder(tf.float32, [None, 224, 224, 3])\n >>> # get model without the last layer\n >>> cnn = tl.models.SqueezeNetV1(x, end_with='fire9')\n >>> # add one more layer\n >>> net = Conv2d(cnn, 100, (1, 1), (1, 1), padding='VALID', name='output')\n >>> net = GlobalMeanPool2d(net)\n >>> # initialize all parameters\n >>> sess = tf.InteractiveSession()\n >>> tl.layers.initialize_global_variables(sess)\n >>> # restore pre-trained parameters\n >>> cnn.restore_params(sess)\n >>> # train your own classifier (only update the last layer)\n >>> train_params = tl.layers.get_variables_with_name('output')\n\n Reuse model\n\n >>> x1 = tf.placeholder(tf.float32, [None, 224, 224, 3])\n >>> x2 = tf.placeholder(tf.float32, [None, 224, 224, 3])\n >>> # get model without the last layer\n >>> net1 = tl.models.SqueezeNetV1(x1, end_with='fire9')\n >>> # reuse the parameters with different input\n >>> net2 = tl.models.SqueezeNetV1(x2, end_with='fire9', reuse=True)\n >>> # restore pre-trained parameters (as they share parameters, we don’t need to restore net2)\n >>> sess = tf.InteractiveSession()\n >>> net1.restore_params(sess)\n\n \"\"\"\n\n def __init__(self, x, end_with='output', is_train=False, reuse=None):\n\n self.net = self.squeezenetv1(x, end_with, is_train, reuse)\n self.outputs = self.net.outputs\n self.all_params = self.net.all_params\n self.all_layers = self.net.all_layers\n self.all_drop = self.net.all_drop\n self.print_layers = self.net.print_layers\n self.print_params = self.net.print_params\n\n @classmethod\n def squeezenetv1(cls, x, end_with='output', is_train=False, reuse=None):\n with tf.variable_scope(\"squeezenetv1\", reuse=reuse):\n with tf.variable_scope(\"input\"):\n n = InputLayer(x)\n # n = Conv2d(n, 96, (7,7),(2,2),tf.nn.relu,'SAME',name='conv1')\n n = Conv2d(n, 64, (3, 3), (2, 2), tf.nn.relu, 'SAME', name='conv1')\n n = MaxPool2d(n, (3, 3), (2, 2), 'VALID', name='max')\n if end_with in n.outputs.name: return n\n\n with tf.variable_scope(\"fire2\"):\n n = Conv2d(n, 16, (1, 1), (1, 1), tf.nn.relu, 'SAME', name='squeeze1x1')\n n1 = Conv2d(n, 64, (1, 1), (1, 1), tf.nn.relu, 'SAME', name='expand1x1')\n n2 = Conv2d(n, 64, (3, 3), (1, 1), tf.nn.relu, 'SAME', name='expand3x3')\n n = ConcatLayer([n1, n2], -1, name='concat')\n if end_with in n.outputs.name: return n\n\n with tf.variable_scope(\"fire3\"):\n n = Conv2d(n, 16, (1, 1), (1, 1), tf.nn.relu, 'SAME', name='squeeze1x1')\n n1 = Conv2d(n, 64, (1, 1), (1, 1), tf.nn.relu, 'SAME', name='expand1x1')\n n2 = Conv2d(n, 64, (3, 3), (1, 1), tf.nn.relu, 'SAME', name='expand3x3')\n n = ConcatLayer([n1, n2], -1, name='concat')\n n = MaxPool2d(n, (3, 3), (2, 2), 'VALID', name='max')\n if end_with in n.outputs.name: return n\n\n with tf.variable_scope(\"fire4\"):\n n = Conv2d(n, 32, (1, 1), (1, 1), tf.nn.relu, 'SAME', name='squeeze1x1')\n n1 = Conv2d(n, 128, (1, 1), (1, 1), tf.nn.relu, 'SAME', name='expand1x1')\n n2 = Conv2d(n, 128, (3, 3), (1, 1), tf.nn.relu, 'SAME', name='expand3x3')\n n = ConcatLayer([n1, n2], -1, name='concat')\n if end_with in n.outputs.name: return n\n\n with tf.variable_scope(\"fire5\"):\n n = Conv2d(n, 32, (1, 1), (1, 1), tf.nn.relu, 'SAME', name='squeeze1x1')\n n1 = Conv2d(n, 128, (1, 1), (1, 1), tf.nn.relu, 'SAME', name='expand1x1')\n n2 = Conv2d(n, 128, (3, 3), (1, 1), tf.nn.relu, 'SAME', name='expand3x3')\n n = ConcatLayer([n1, n2], -1, name='concat')\n n = MaxPool2d(n, (3, 3), (2, 2), 'VALID', name='max')\n if end_with in n.outputs.name: return n\n\n with tf.variable_scope(\"fire6\"):\n n = Conv2d(n, 48, (1, 1), (1, 1), tf.nn.relu, 'SAME', name='squeeze1x1')\n n1 = Conv2d(n, 192, (1, 1), (1, 1), tf.nn.relu, 'SAME', name='expand1x1')\n n2 = Conv2d(n, 192, (3, 3), (1, 1), tf.nn.relu, 'SAME', name='expand3x3')\n n = ConcatLayer([n1, n2], -1, name='concat')\n if end_with in n.outputs.name: return n\n\n with tf.variable_scope(\"fire7\"):\n n = Conv2d(n, 48, (1, 1), (1, 1), tf.nn.relu, 'SAME', name='squeeze1x1')\n n1 = Conv2d(n, 192, (1, 1), (1, 1), tf.nn.relu, 'SAME', name='expand1x1')\n n2 = Conv2d(n, 192, (3, 3), (1, 1), tf.nn.relu, 'SAME', name='expand3x3')\n n = ConcatLayer([n1, n2], -1, name='concat')\n if end_with in n.outputs.name: return n\n\n with tf.variable_scope(\"fire8\"):\n n = Conv2d(n, 64, (1, 1), (1, 1), tf.nn.relu, 'SAME', name='squeeze1x1')\n n1 = Conv2d(n, 256, (1, 1), (1, 1), tf.nn.relu, 'SAME', name='expand1x1')\n n2 = Conv2d(n, 256, (3, 3), (1, 1), tf.nn.relu, 'SAME', name='expand3x3')\n n = ConcatLayer([n1, n2], -1, name='concat')\n if end_with in n.outputs.name: return n\n\n with tf.variable_scope(\"fire9\"):\n n = Conv2d(n, 64, (1, 1), (1, 1), tf.nn.relu, 'SAME', name='squeeze1x1')\n n1 = Conv2d(n, 256, (1, 1), (1, 1), tf.nn.relu, 'SAME', name='expand1x1')\n n2 = Conv2d(n, 256, (3, 3), (1, 1), tf.nn.relu, 'SAME', name='expand3x3')\n n = ConcatLayer([n1, n2], -1, name='concat')\n if end_with in n.outputs.name: return n\n\n with tf.variable_scope(\"output\"):\n n = DropoutLayer(n, keep=0.5, is_fix=True, is_train=is_train, name='drop1')\n n = Conv2d(n, 1000, (1, 1), (1, 1), padding='VALID', name='conv10') # 13, 13, 1000\n n = GlobalMeanPool2d(n)\n if end_with in n.outputs.name: return n\n\n raise Exception(\"end_with : input, fire2, fire3 ... fire9, output\")\n\n def restore_params(self, sess, path='models'):\n logging.info(\"Restore pre-trained parameters\")\n maybe_download_and_extract(\n 'squeezenet.npz', path, 'https://github.com/tensorlayer/pretrained-models/raw/master/models/',\n expected_bytes=7405613\n ) # ls -al\n params = load_npz(name=os.path.join(path, 'squeezenet.npz'))\n assign_params(sess, params[:len(self.net.all_params)], self.net)\n del params\n"
] |
[
[
"tensorflow.variable_scope"
]
] |
connahKendrickMMU/KerasLandmarkingToAndroid
|
[
"64de938e5b2c0aefdf6510ddf095358b147da082"
] |
[
"TrainBuildTo android/ConvertKerasModelToTensorGraphAndroid.py"
] |
[
"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Fri Mar 31 11:39:31 2017\r\n\r\n@author: 12102083\r\n\"\"\"\r\n\r\nimport os\r\nimport numpy \r\nimport pandas \r\nfrom sklearn.utils import shuffle \r\nfrom keras.wrappers.scikit_learn import KerasRegressor \r\nfrom sklearn.model_selection import cross_val_score \r\nfrom sklearn.model_selection import KFold\r\nfrom sklearn.preprocessing import StandardScaler\r\nimport matplotlib.pyplot as plt \r\nimport time \r\nfrom sklearn.pipeline import Pipeline \r\n\r\nfrom keras.models import Sequential \r\nfrom keras.layers import Dense, Activation \r\nfrom keras.layers import Convolution2D, MaxPooling2D, Flatten\r\nfrom keras.optimizers import SGD \r\nfrom keras.models import model_from_json \r\nfrom keras import backend as K \r\nfrom keras.models import model_from_config \r\nfrom keras.utils.np_utils import convert_kernel\r\nimport tensorflow as tf\r\n#from tensorflow_serving.session_bundle import exporter \r\n# In[ ]: \r\nmodel = model_from_json(open(r'pathToKeraModelStructure',).read()) \r\nmodel.load_weights(r'pathToKerasModelWeights') \r\noutput_tensor = model.output \r\ntensor = k.get_\r\ntf.train.write_graph(output_tensor, 'TensorGraphs', 'train.pb')\r\n\r\n# In[ ]:\r\n\r\nimport keras\r\nimport tensorflow\r\nfrom keras import backend as K\r\nfrom tensorflow.contrib.session_bundle import exporter\r\nfrom keras.models import model_from_config, Sequential\r\nfrom tensorflow.python.framework.graph_util import convert_variables_to_constants \r\n\r\nprint(\"Loading model for exporting to Protocol Buffer format...\")\r\nmodel_path = 'temp.json'\r\nmodel = model_from_json(open('temp.json',).read())\r\nmodel.load_weights('temp.h5') \r\n\r\nK.set_learning_phase(0) # all new operations will be in test mode from now on\r\nops = []\r\nfor layer in model.layers:\r\n print(\"layer name = \" + layer.name)\r\n if layer.__class__.__name__ in ['Convolution1D', 'Convolution2D', 'Convolution3D', 'AtrousConvolution2D']:\r\n original_w = K.get_value(layer.W)\r\n converted_w = convert_kernel(original_w)\r\n ops.append(tf.assign(layer.W, converted_w).op) \r\n print(\"Hi\")\r\n \r\nK.get_session().run(ops)\r\n\r\nsess = K.get_session()\r\n\r\nexport_path = \"TensorGraphs\\\\simple.pb\" # where to save the exported graph\r\nexport_version = 1 # version number (integer)\r\ntf.global_variables_initializer()#initialize_all_variables(); \r\nminimal_graph = convert_variables_to_constants(sess, sess.graph_def, [\"dense_3_b/Assign\"]) # output\r\ntf.train.write_graph(sess.graph_def, './tmp/beginner-export','beginner-graph.pb', as_text=False)\r\n\r\n \r\n# In[ ]: \r\n \r\nimport keras\r\nimport tensorflow\r\nfrom keras import backend as K\r\nfrom tensorflow.contrib.session_bundle import exporter\r\nfrom keras.models import model_from_config, Sequential\r\n\r\nprint(\"Loading model for exporting to Protocol Buffer format...\")\r\nmodel_path = 'temp.h5'\r\nmodel = keras.models.load_model(model_path)\r\n\r\nK.set_learning_phase(0) # all new operations will be in test mode from now on\r\nsess = K.get_session()\r\n\r\n# serialize the model and get its weights, for quick re-building\r\nconfig = model.get_config()\r\nweights = model.get_weights()\r\n\r\n# re-build a model where the learning phase is now hard-coded to 0\r\nnew_model = Sequential.from_config(config)\r\nnew_model.set_weights(weights)\r\n\r\nexport_path = \"TensorGraphs//simple.pb\" # where to save the exported graph\r\nexport_version = 1 # version number (integer)\r\n\r\nsaver = tensorflow.train.Saver(sharded=True)\r\nmodel_exporter = exporter.Exporter(saver)\r\nsignature = exporter.classification_signature(input_tensor=model.input, scores_tensor=model.output)\r\nmodel_exporter.init(sess.graph.as_graph_def(), default_graph_signature=signature)\r\nmodel_exporter.export(export_path, tensorflow.constant(export_version), sess)"
] |
[
[
"tensorflow.constant",
"tensorflow.assign",
"tensorflow.contrib.session_bundle.exporter.classification_signature",
"tensorflow.global_variables_initializer",
"tensorflow.python.framework.graph_util.convert_variables_to_constants",
"tensorflow.contrib.session_bundle.exporter.Exporter",
"tensorflow.train.Saver",
"tensorflow.train.write_graph"
]
] |
mfa/wandb-allennlp
|
[
"29ebba81cdbd83653350d00911c4a54d8da9def1"
] |
[
"tests/models/dummy.py"
] |
[
"from typing import List, Tuple, Union, Dict, Any, Optional\nimport torch\nimport allennlp\nfrom allennlp.models import Model\nfrom allennlp.data.vocabulary import Vocabulary\nfrom allennlp.data.dataset_readers import DatasetReader\nfrom allennlp.data.fields import TensorField\nfrom allennlp.data.instance import Instance\n\n\n@Model.register(\"dummy\")\nclass DummyModel(Model):\n def __init__(self, vocab: Vocabulary, a: float, b: float = 0):\n super().__init__(vocab=vocab)\n self.a = a\n self.b = b\n self.param = torch.nn.Parameter(torch.tensor(10.0))\n self.x = 0\n\n def forward(self, *args, **kwargs):\n self.x += 1\n\n return {\"loss\": self.a * self.x + self.b + self.param}\n\n\n@Model.register(\"parameter-tying\")\nclass DummyModel(Model):\n def __init__(\n self,\n vocab: Vocabulary,\n a: float,\n b: float,\n d: float,\n bool_value: bool,\n bool_value_not: bool,\n int_value: int,\n int_value_10: int,\n ):\n super().__init__(vocab=vocab)\n self.a = a\n self.b = b\n self.d = d\n self.param = torch.nn.Parameter(torch.tensor(10.0))\n assert a == b\n assert isinstance(bool_value, bool)\n assert isinstance(bool_value_not, bool)\n assert bool_value == (not bool_value_not)\n assert isinstance(int_value, int)\n assert isinstance(int_value_10, int)\n assert int_value + 10 == int_value_10\n assert d == 1\n self.x = 0\n\n def forward(self, *args: Any, **kwargs: Any):\n self.x += 1\n\n return {\"loss\": self.a * self.x + self.b + self.param}\n\n\n@DatasetReader.register(\"dummy\")\nclass Dummy(DatasetReader):\n def _read(self, file_path: str):\n for i in range(10):\n yield Instance({\"x\": TensorField(torch.tensor([i]))})\n"
] |
[
[
"torch.tensor"
]
] |
songqiang/autogluon
|
[
"529d7cc65fad411622072aa0349215a15e1e901c"
] |
[
"core/src/autogluon/core/metrics/__init__.py"
] |
[
"import copy\nfrom abc import ABCMeta, abstractmethod\nfrom functools import partial\n\nimport scipy\nimport scipy.stats\nimport sklearn.metrics\n\nfrom . import classification_metrics\nfrom .util import sanitize_array\nfrom ..constants import PROBLEM_TYPES_REGRESSION, PROBLEM_TYPES_CLASSIFICATION, QUANTILE\nfrom ..utils.miscs import warning_filter\nfrom .classification_metrics import *\nfrom . import quantile_metrics\n\n\nclass Scorer(object, metaclass=ABCMeta):\n def __init__(self, name, score_func, optimum, sign, kwargs):\n self.name = name\n self._kwargs = kwargs\n self._score_func = score_func\n self._optimum = optimum\n self._sign = sign\n self.alias = set()\n\n def add_alias(self, alias):\n if alias == self.name:\n raise ValueError(f'The alias \"{alias}\" is the same as the original name \"{self.name}\". '\n f'This is not allowed.')\n self.alias.add(alias)\n\n @property\n def greater_is_better(self) -> bool:\n \"\"\"Return whether the score is greater the better.\n\n We use the stored `sign` object to decide the property.\n\n Returns\n -------\n flag\n The \"greater_is_better\" flag.\n \"\"\"\n return self._sign > 0\n\n def convert_score_to_sklearn_val(self, score):\n \"\"\"Scores are always greater_is_better, this flips the sign of metrics who were originally lower_is_better.\"\"\"\n return self._sign * score\n\n @abstractmethod\n def __call__(self, y_true, y_pred, sample_weight=None):\n pass\n\n def __repr__(self):\n return self.name\n\n def sklearn_scorer(self):\n with warning_filter():\n ret = sklearn.metrics.scorer.make_scorer(score_func=self, greater_is_better=True, needs_proba=self.needs_proba, needs_threshold=self.needs_threshold)\n return ret\n\n @property\n @abstractmethod\n def needs_pred(self):\n raise NotImplementedError\n\n @property\n @abstractmethod\n def needs_proba(self) -> bool:\n raise NotImplementedError\n\n @property\n @abstractmethod\n def needs_threshold(self) -> bool:\n raise NotImplementedError\n\n @property\n @abstractmethod\n def needs_quantile(self) -> bool:\n raise NotImplementedError\n\n\nclass _PredictScorer(Scorer):\n def __call__(self, y_true, y_pred, sample_weight=None):\n \"\"\"Evaluate predicted target values for X relative to y_true.\n\n Parameters\n ----------\n y_true : array-like\n Gold standard target values for X.\n\n y_pred : array-like, [n_samples x n_classes]\n Model predictions\n\n sample_weight : array-like, optional (default=None)\n Sample weights.\n\n Returns\n -------\n score : float\n Score function applied to prediction of estimator on X.\n \"\"\"\n\n if isinstance(y_true, list):\n y_true = np.array(y_true)\n if isinstance(y_pred, list):\n y_pred = np.array(y_pred)\n type_true = type_of_target(y_true)\n\n if len(y_pred.shape) == 1 or y_pred.shape[1] == 1 or type_true == 'continuous':\n pass # must be regression, all other task types would return at least two probabilities\n elif type_true in ['binary', 'multiclass']:\n y_pred = np.argmax(y_pred, axis=1)\n elif type_true == 'multilabel-indicator':\n y_pred[y_pred > 0.5] = 1.0\n y_pred[y_pred <= 0.5] = 0.0\n else:\n raise ValueError(type_true)\n\n if sample_weight is not None:\n return self._sign * self._score_func(y_true, y_pred,\n sample_weight=sample_weight,\n **self._kwargs)\n else:\n return self._sign * self._score_func(y_true, y_pred,\n **self._kwargs)\n\n @property\n def needs_pred(self):\n return True\n\n @property\n def needs_proba(self):\n return False\n\n @property\n def needs_threshold(self):\n return False\n\n @property\n def needs_quantile(self):\n return False\n\n\nclass _ProbaScorer(Scorer):\n def __call__(self, y_true, y_pred, sample_weight=None):\n \"\"\"Evaluate predicted probabilities for X relative to y_true.\n Parameters\n ----------\n y_true : array-like\n Gold standard target values for X. These must be class labels,\n not probabilities.\n\n y_pred : array-like, [n_samples x n_classes]\n Model predictions\n\n sample_weight : array-like, optional (default=None)\n Sample weights.\n\n Returns\n -------\n score : float\n Score function applied to prediction of estimator on X.\n \"\"\"\n if sample_weight is not None:\n return self._sign * self._score_func(y_true, y_pred,\n sample_weight=sample_weight,\n **self._kwargs)\n else:\n return self._sign * self._score_func(y_true, y_pred, **self._kwargs)\n\n @property\n def needs_pred(self):\n return False\n\n @property\n def needs_proba(self):\n return True\n\n @property\n def needs_threshold(self):\n return False\n\n @property\n def needs_quantile(self):\n return False\n\n\nclass _ThresholdScorer(Scorer):\n def __call__(self, y_true, y_pred, sample_weight=None):\n \"\"\"Evaluate decision function output for X relative to y_true.\n\n Parameters\n ----------\n y_true : array-like\n Gold standard target values for X. These must be class labels,\n not probabilities.\n\n y_pred : array-like, [n_samples x n_classes]\n Model predictions\n\n sample_weight : array-like, optional (default=None)\n Sample weights.\n\n Returns\n -------\n score : float\n Score function applied to prediction of estimator on X.\n \"\"\"\n if isinstance(y_true, list):\n y_true = np.array(y_true)\n if isinstance(y_pred, list):\n y_pred = np.array(y_pred)\n y_type = type_of_target(y_true)\n if y_type not in (\"binary\", \"multilabel-indicator\"):\n raise ValueError(\"{0} format is not supported\".format(y_type))\n\n if y_type == \"binary\":\n pass\n # y_pred = y_pred[:, 1]\n elif isinstance(y_pred, list):\n y_pred = np.vstack([p[:, -1] for p in y_pred]).T\n\n if sample_weight is not None:\n return self._sign * self._score_func(y_true, y_pred,\n sample_weight=sample_weight,\n **self._kwargs)\n else:\n return self._sign * self._score_func(y_true, y_pred, **self._kwargs)\n\n @property\n def needs_pred(self):\n return False\n\n @property\n def needs_proba(self):\n return False\n\n @property\n def needs_threshold(self):\n return True\n\n @property\n def needs_quantile(self):\n return False\n\n\nclass _QuantileScorer(Scorer):\n def __call__(self, y_true, y_pred, quantile_levels, sample_weight=None):\n \"\"\"Evaluate predicted quantile target values for X relative to y_true.\n\n Parameters\n ----------\n y_true : array-like\n Gold standard target values for X.\n\n y_pred : array-like, [n_samples x n_quantiles]\n Model quantile predictions\n\n quantile_levels : array-like\n List of quantile levels\n\n sample_weight : array-like, optional (default=None)\n Sample weights.\n\n Returns\n -------\n score : float\n Score function applied to prediction of estimator on X.\n \"\"\"\n\n if isinstance(y_true, list):\n y_true = np.array(y_true)\n if isinstance(y_pred, list):\n y_pred = np.array(y_pred)\n if isinstance(quantile_levels, list):\n quantile_levels = np.array(quantile_levels)\n type_true = type_of_target(y_true)\n\n if len(y_pred.shape) == 2 or y_pred.shape[1] >= 1 or type_true == 'continuous':\n pass # must be quantile regression, all other task types would return at least two probabilities\n else:\n raise ValueError(type_true)\n\n if sample_weight is not None:\n return self._sign * self._score_func(y_true, y_pred,\n quantile_levels,\n sample_weight=sample_weight,\n **self._kwargs)\n else:\n return self._sign * self._score_func(y_true, y_pred,\n quantile_levels,\n **self._kwargs)\n\n @property\n def needs_pred(self):\n return False\n\n @property\n def needs_proba(self):\n return False\n\n @property\n def needs_threshold(self):\n return False\n\n @property\n def needs_quantile(self):\n return True\n\n\ndef make_scorer(name, score_func, optimum=1, greater_is_better=True,\n needs_proba=False, needs_threshold=False, needs_quantile=False, **kwargs) -> Scorer:\n \"\"\"Make a scorer from a performance metric or loss function.\n\n Factory inspired by scikit-learn which wraps scikit-learn scoring functions\n to be used in auto-sklearn.\n\n Parameters\n ----------\n score_func : callable\n Score function (or loss function) with signature\n ``score_func(y, y_pred, **kwargs)``.\n\n optimum : int or float, default=1\n The best score achievable by the score function, i.e. maximum in case of\n scorer function and minimum in case of loss function.\n\n greater_is_better : boolean, default=True\n Whether score_func is a score function (default), meaning high is good,\n or a loss function, meaning low is good. In the latter case, the\n scorer object will sign-flip the outcome of the score_func.\n\n needs_proba : boolean, default=False\n Whether score_func requires predict_proba to get probability estimates\n out of a classifier.\n\n needs_threshold : boolean, default=False\n Whether score_func takes a continuous decision certainty.\n This only works for binary classification.\n\n needs_quantile : boolean, default=False\n Whether score_func is based on quantile predictions.\n This only works for quantile regression.\n\n **kwargs : additional arguments\n Additional parameters to be passed to score_func.\n\n Returns\n -------\n scorer\n Callable object that returns a scalar score; greater is better.\n \"\"\"\n sign = 1 if greater_is_better else -1\n if needs_proba:\n cls = _ProbaScorer\n elif needs_threshold:\n cls = _ThresholdScorer\n elif needs_quantile:\n cls = _QuantileScorer\n else:\n cls = _PredictScorer\n return cls(name, score_func, optimum, sign, kwargs)\n\n\n# Standard regression scores\nr2 = make_scorer('r2',\n sklearn.metrics.r2_score)\nmean_squared_error = make_scorer('mean_squared_error',\n sklearn.metrics.mean_squared_error,\n optimum=0,\n greater_is_better=False)\nmean_squared_error.add_alias('mse')\n\nmean_absolute_error = make_scorer('mean_absolute_error',\n sklearn.metrics.mean_absolute_error,\n optimum=0,\n greater_is_better=False)\nmean_absolute_error.add_alias('mae')\n\nmedian_absolute_error = make_scorer('median_absolute_error',\n sklearn.metrics.median_absolute_error,\n optimum=0,\n greater_is_better=False)\n\n\ndef local_spearmanr(y_true, y_pred):\n return float(scipy.stats.spearmanr(y_true, y_pred)[0])\n\n\nspearmanr = make_scorer('spearmanr',\n local_spearmanr,\n optimum=1.0,\n greater_is_better=True)\n\n\ndef local_pearsonr(y_true, y_pred):\n return float(scipy.stats.pearsonr(y_true, y_pred)[0])\n\n\npearsonr = make_scorer('pearsonr',\n local_pearsonr,\n optimum=1.0,\n greater_is_better=True)\n\n\ndef rmse_func(y_true, y_pred):\n return np.sqrt(((y_true - y_pred) ** 2).mean())\n\n\nroot_mean_squared_error = make_scorer('root_mean_squared_error',\n rmse_func,\n optimum=0,\n greater_is_better=False)\nroot_mean_squared_error.add_alias('rmse')\n\n# Quantile pinball loss\npinball_loss = make_scorer('pinball_loss',\n quantile_metrics.pinball_loss,\n needs_quantile=True,\n optimum=0.0,\n greater_is_better=False)\npinball_loss.add_alias('pinball')\n\n\n# Standard Classification Scores\naccuracy = make_scorer('accuracy',\n sklearn.metrics.accuracy_score)\naccuracy.add_alias('acc')\n\nbalanced_accuracy = make_scorer('balanced_accuracy',\n classification_metrics.balanced_accuracy)\nf1 = make_scorer('f1',\n sklearn.metrics.f1_score)\nmcc = make_scorer('mcc', sklearn.metrics.matthews_corrcoef)\n\n\n# Score functions that need decision values\nroc_auc = make_scorer('roc_auc',\n sklearn.metrics.roc_auc_score,\n greater_is_better=True,\n needs_threshold=True)\n\nroc_auc_ovo_macro = make_scorer('roc_auc_ovo_macro',\n sklearn.metrics.roc_auc_score,\n multi_class='ovo',\n average='macro',\n greater_is_better=True,\n needs_proba=True,\n needs_threshold=False)\n\naverage_precision = make_scorer('average_precision',\n sklearn.metrics.average_precision_score,\n needs_threshold=True)\nprecision = make_scorer('precision',\n sklearn.metrics.precision_score)\nrecall = make_scorer('recall',\n sklearn.metrics.recall_score)\n\n# Register other metrics\nquadratic_kappa = make_scorer('quadratic_kappa', quadratic_kappa, needs_proba=False)\n\n\ndef customized_log_loss(y_true, y_pred, eps=1e-15):\n \"\"\"\n\n Parameters\n ----------\n y_true : array-like or label indicator matrix\n Ground truth (correct) labels for n_samples samples.\n\n y_pred : array-like of float\n The predictions. shape = (n_samples, n_classes) or (n_samples,)\n\n eps : float\n The epsilon\n\n Returns\n -------\n loss\n The negative log-likelihood\n \"\"\"\n assert y_true.ndim == 1\n if y_pred.ndim == 1:\n # First clip the y_pred which is also used in sklearn\n y_pred = np.clip(y_pred, eps, 1 - eps)\n return - (y_true * np.log(y_pred) + (1 - y_true) * np.log(1 - y_pred)).mean()\n else:\n assert y_pred.ndim == 2, 'Only ndim=2 is supported'\n labels = np.arange(y_pred.shape[1], dtype=np.int32)\n return sklearn.metrics.log_loss(y_true.astype(np.int32), y_pred,\n labels=labels,\n eps=eps)\n\n\n# Score function for probabilistic classification\nlog_loss = make_scorer('log_loss',\n customized_log_loss,\n optimum=0,\n greater_is_better=False,\n needs_proba=True)\nlog_loss.add_alias('nll')\n\npac_score = make_scorer('pac_score',\n classification_metrics.pac_score,\n greater_is_better=True,\n needs_proba=True)\n\nREGRESSION_METRICS = dict()\nfor scorer in [r2, mean_squared_error, root_mean_squared_error, mean_absolute_error,\n median_absolute_error, spearmanr, pearsonr]:\n if scorer.name in REGRESSION_METRICS:\n raise ValueError(f'Duplicated score name found! scorer={scorer}, name={scorer.name}. '\n f'Consider to register with a different name.')\n REGRESSION_METRICS[scorer.name] = scorer\n for alias in scorer.alias:\n if alias in REGRESSION_METRICS:\n raise ValueError(f'Duplicated alias found! scorer={scorer}, alias={alias}. '\n f'Consider to use a different alias.')\n REGRESSION_METRICS[alias] = scorer\n\nQUANTILE_METRICS = dict()\nfor scorer in [pinball_loss]:\n if scorer.name in QUANTILE_METRICS:\n raise ValueError(f'Duplicated score name found! scorer={scorer}, name={scorer.name}. '\n f'Consider to register with a different name.')\n QUANTILE_METRICS[scorer.name] = scorer\n for alias in scorer.alias:\n if alias in QUANTILE_METRICS:\n raise ValueError(f'Duplicated alias found! scorer={scorer}, alias={alias}. '\n f'Consider to use a different alias.')\n QUANTILE_METRICS[alias] = scorer\n\nCLASSIFICATION_METRICS = dict()\nfor scorer in [accuracy, balanced_accuracy, mcc, roc_auc, roc_auc_ovo_macro, average_precision,\n log_loss, pac_score, quadratic_kappa]:\n CLASSIFICATION_METRICS[scorer.name] = scorer\n for alias in scorer.alias:\n CLASSIFICATION_METRICS[alias] = scorer\n\n\nfor name, metric in [('precision', sklearn.metrics.precision_score),\n ('recall', sklearn.metrics.recall_score),\n ('f1', sklearn.metrics.f1_score)]:\n globals()[name] = make_scorer(name, metric)\n CLASSIFICATION_METRICS[name] = globals()[name]\n for average in ['macro', 'micro', 'samples', 'weighted']:\n qualified_name = '{0}_{1}'.format(name, average)\n globals()[qualified_name] = make_scorer(qualified_name,\n partial(metric, pos_label=None, average=average))\n CLASSIFICATION_METRICS[qualified_name] = globals()[qualified_name]\n\n\ndef get_metric(metric, problem_type=None, metric_type=None) -> Scorer:\n \"\"\"Returns metric function by using its name if the metric is str.\n Performs basic check for metric compatibility with given problem type.\"\"\"\n all_available_metric_names = list(CLASSIFICATION_METRICS.keys()) + list(REGRESSION_METRICS.keys()) + list(QUANTILE_METRICS.keys()) + ['soft_log_loss']\n\n if metric is not None and isinstance(metric, str):\n if metric in CLASSIFICATION_METRICS:\n if problem_type is not None and problem_type not in PROBLEM_TYPES_CLASSIFICATION:\n raise ValueError(f\"{metric_type}={metric} can only be used for classification problems\")\n return CLASSIFICATION_METRICS[metric]\n elif metric in REGRESSION_METRICS:\n if problem_type is not None and problem_type not in PROBLEM_TYPES_REGRESSION:\n raise ValueError(f\"{metric_type}={metric} can only be used for regression problems\")\n return REGRESSION_METRICS[metric]\n elif metric in QUANTILE_METRICS:\n if problem_type is not None and problem_type != QUANTILE:\n raise ValueError(f\"{metric_type}={metric} can only be used for quantile problems\")\n return QUANTILE_METRICS[metric]\n elif metric == 'soft_log_loss':\n if problem_type == QUANTILE:\n raise ValueError(f\"{metric_type}={metric} can not be used for quantile problems\")\n # Requires mxnet\n from .softclass_metrics import soft_log_loss\n return soft_log_loss\n else:\n raise ValueError(\n f\"{metric} is an unknown metric, all available metrics are \"\n f\"'{all_available_metric_names}'. You can also refer to \"\n f\"autogluon.core.metrics to see how to define your own {metric_type} function\"\n )\n else:\n return metric\n"
] |
[
[
"scipy.stats.spearmanr",
"scipy.stats.pearsonr"
]
] |
aokellermann/gym-minesweeper
|
[
"d071ab103c912f2dd08b2a83129b4ca6df3b3617"
] |
[
"gym_minesweeper/tests/minesweeper_test.py"
] |
[
"\"\"\"Tests for minesweeper env implementation.\"\"\"\nfrom unittest.mock import patch\n\nimport numpy as np\nimport numpy.testing as npt\nimport pytest\nfrom PIL import Image\n\nfrom gym_minesweeper import MinesweeperEnv, SPACE_UNKNOWN, \\\n DEFAULT_REWARD_WIN, DEFAULT_REWARD_LOSE, DEFAULT_REWARD_CLEAR, DEFAULT_REWARD_FAIL_CLEAR\n\nTEST_BOARD_SIZE = (4, 5)\nTEST_NUM_MINES = 3\nTEST_SEED = 42069\n\n\ndef test_no_mines_init():\n \"\"\"Asserts that initializing with no mines works properly\"\"\"\n\n size = (30, 50)\n ms_game = MinesweeperEnv(size, 0)\n assert size == ms_game.board_size\n assert ms_game.num_mines == 0\n npt.assert_array_equal([], ms_game.hist)\n npt.assert_array_equal([[SPACE_UNKNOWN] * size[1]] * size[0], ms_game.board)\n\n\ndef test_no_mines_step():\n \"\"\"Asserts that taking one step with no mines wins\"\"\"\n\n size = (30, 50)\n ms_game = MinesweeperEnv(size, 0)\n action = (21, 5)\n board, reward, done, info = ms_game.step(action)\n\n expected_board = [[0] * size[1]] * size[0]\n npt.assert_array_equal(ms_game.board, expected_board)\n npt.assert_array_equal(ms_game.hist, [action])\n\n npt.assert_array_equal(board, expected_board)\n assert reward == DEFAULT_REWARD_WIN\n assert done\n assert info == dict()\n\n\ndef create_game():\n \"\"\"Creates a deterministic 4x5 game\"\"\"\n size = TEST_BOARD_SIZE\n ms_game = MinesweeperEnv(size, TEST_NUM_MINES)\n ms_game.seed(TEST_SEED)\n return ms_game\n\n\ndef assert_game(ms_game, actions, expected_boards, expected_rewards, expected_dones):\n \"\"\"Given a full list of game steps, plays through the game and asserts all states are correct.\"\"\"\n\n expected_hist = []\n\n def err_msg(idx):\n return \"idx: {}\".format(idx)\n\n for i, action in enumerate(actions):\n board, reward, done, info = ms_game.step(action)\n\n npt.assert_array_equal(ms_game.board, expected_boards[i], err_msg(i))\n npt.assert_array_equal(board, expected_boards[i], err_msg(i))\n\n expected_hist.append(action)\n npt.assert_array_equal(ms_game.hist, expected_hist, err_msg(i))\n\n assert reward == expected_rewards[i], err_msg(i)\n assert done == expected_dones[i], err_msg(i)\n assert info == dict(), err_msg(i)\n\n\ndef test_win(ms_game=create_game()):\n \"\"\"Asserts that a winning playthrough works.\"\"\"\n\n actions = [(0, 0), (3, 3), (0, 3), (1, 2), (0, 4), (1, 4)]\n expected_boards = [\n [[0, 1, -1, -1, -1], [0, 1, -1, -1, -1], [1, 1, -1, -1, -1], [-1, -1, -1, -1, -1]],\n [[0, 1, -1, -1, -1], [0, 1, -1, -1, -1], [1, 1, 1, 1, 1], [-1, 1, 0, 0, 0]],\n [[0, 1, -1, 2, -1], [0, 1, -1, -1, -1], [1, 1, 1, 1, 1], [-1, 1, 0, 0, 0]],\n [[0, 1, -1, 2, -1], [0, 1, 2, -1, -1], [1, 1, 1, 1, 1], [-1, 1, 0, 0, 0]],\n [[0, 1, -1, 2, 1], [0, 1, 2, -1, -1], [1, 1, 1, 1, 1], [-1, 1, 0, 0, 0]],\n [[0, 1, -1, 2, 1], [0, 1, 2, -1, 1], [1, 1, 1, 1, 1], [-1, 1, 0, 0, 0]],\n ]\n\n expected_rewards = [DEFAULT_REWARD_CLEAR] * (len(expected_boards) - 1) + [DEFAULT_REWARD_WIN]\n expected_dones = [False] * (len(expected_boards) - 1) + [True]\n\n assert_game(ms_game, actions, expected_boards, expected_rewards, expected_dones)\n\n\ndef test_lose(ms_game=create_game()):\n \"\"\"Asserts that a losing playthrough works.\"\"\"\n\n actions = [(0, 0), (3, 3), (0, 3), (1, 2), (0, 4), (0, 2)]\n expected_boards = [\n [[0, 1, -1, -1, -1], [0, 1, -1, -1, -1], [1, 1, -1, -1, -1], [-1, -1, -1, -1, -1]],\n [[0, 1, -1, -1, -1], [0, 1, -1, -1, -1], [1, 1, 1, 1, 1], [-1, 1, 0, 0, 0]],\n [[0, 1, -1, 2, -1], [0, 1, -1, -1, -1], [1, 1, 1, 1, 1], [-1, 1, 0, 0, 0]],\n [[0, 1, -1, 2, -1], [0, 1, 2, -1, -1], [1, 1, 1, 1, 1], [-1, 1, 0, 0, 0]],\n [[0, 1, -1, 2, 1], [0, 1, 2, -1, -1], [1, 1, 1, 1, 1], [-1, 1, 0, 0, 0]],\n [[0, 1, -2, 2, 1], [0, 1, 2, -2, 1], [1, 1, 1, 1, 1], [-2, 1, 0, 0, 0]],\n ]\n\n expected_rewards = [DEFAULT_REWARD_CLEAR] * (len(expected_boards) - 1) + [DEFAULT_REWARD_LOSE]\n expected_dones = [False] * (len(expected_boards) - 1) + [True]\n\n assert_game(ms_game, actions, expected_boards, expected_rewards, expected_dones)\n\n\ndef test_clear_cleared_space():\n \"\"\"Asserts that clearing the same space twice yields the correct values.\"\"\"\n\n ms_game = create_game()\n action = (0, 0)\n board, _, done, info = ms_game.step(action)\n board_2, reward_2, done_2, info_2 = ms_game.step(action)\n npt.assert_array_equal(board, board_2)\n assert reward_2 == DEFAULT_REWARD_FAIL_CLEAR\n assert done == done_2 and not done\n assert info == info_2 and info == dict()\n\n\ndef test_reset_and_reseed():\n \"\"\"Tests resetting the game and re-seeding.\"\"\"\n\n size = TEST_BOARD_SIZE\n ms_game = create_game()\n\n test_win(ms_game)\n ms_game.reset()\n ms_game.seed(TEST_SEED) # need to re-seed so it's deterministic\n\n test_lose(ms_game)\n ms_game.reset()\n\n assert ms_game.get_status() is None\n assert ms_game.hist == []\n npt.assert_array_equal(ms_game.board_size, (4, 5))\n assert ms_game.num_mines == TEST_NUM_MINES\n\n expected_board = [[SPACE_UNKNOWN] * size[1]] * size[0]\n npt.assert_array_equal(ms_game.board, expected_board)\n\n\ndef test_render():\n \"\"\"Tests game rendering\"\"\"\n\n # get losing board\n ms_game = create_game()\n test_lose(ms_game)\n\n class WriteSideEffect:\n \"\"\"Mock class for writable classes.\"\"\"\n out = \"\"\n\n def write(self, text):\n \"\"\"Appends text to internal buffer.\"\"\"\n self.out += str(text)\n\n def get(self):\n \"\"\"Gets the internal buffer.\"\"\"\n return self.out\n\n expected_board = \"0 1 X 2 1\\n\" \\\n \"0 1 2 X 1\\n\" \\\n \"1 1 1 1 1\\n\" \\\n \"X 1 0 0 0\"\n\n human_se = WriteSideEffect()\n with patch(\"sys.stdout.write\", side_effect=human_se.write):\n ms_game.render('human')\n assert human_se.get() == expected_board\n\n string_io = ms_game.render('ansi')\n string_io.seek(0)\n assert string_io.read() == expected_board\n\n img = ms_game.render('rgb_array')\n expected_img = np.array(Image.open(\"images/test/render.bmp\"))[:, :, :3]\n npt.assert_array_equal(img, expected_img)\n\n pytest.raises(NotImplementedError, ms_game.render, 'other')\n\n\ndef test_get_possible_moves():\n \"\"\"Asserts that get_possible_moves returns only unknown spaces, or None if the game is over\"\"\"\n\n ms_game = create_game()\n npt.assert_array_equal(\n np.sort(ms_game.get_possible_moves(), axis=0),\n np.sort(np.transpose([\n np.tile(range(TEST_BOARD_SIZE[0]), TEST_BOARD_SIZE[1]),\n np.repeat(range(TEST_BOARD_SIZE[1]), TEST_BOARD_SIZE[0])\n ]),\n axis=0))\n\n ms_game.step((0, 0))\n npt.assert_array_equal(\n np.sort(ms_game.get_possible_moves(), axis=0),\n np.sort([(0, 2), (0, 3), (0, 4), (1, 2), (1, 3), (1, 4), (2, 2), (2, 3), (2, 4), (3, 0), (3, 1), (3, 2), (3, 3),\n (3, 4)],\n axis=0))\n\n ms_game.reset()\n ms_game.seed(TEST_SEED)\n test_win(ms_game)\n assert ms_game.get_possible_moves() is None\n\n ms_game.reset()\n ms_game.seed(TEST_SEED)\n test_lose(ms_game)\n assert ms_game.get_possible_moves() is None\n"
] |
[
[
"numpy.testing.assert_array_equal",
"numpy.sort"
]
] |
esamuel1/pymapd
|
[
"8a4c5093b9d864d3356880bab846cbb6f50d2127"
] |
[
"tests/test_data_no_nulls_cpu.py"
] |
[
"\"\"\"\nThe intent of this file is to be a full integration test. Whenever possible,\nadd a datatype to the main _tests_table_no_nulls function, so that the tests\nwill evaluate not only that a data type works, but that it works in the\npresence of the other data types as well in the same dataframe/database table\n\"\"\"\nimport pytest\nimport pandas as pd\nimport numpy as np\nfrom shapely import wkt\n\nfrom .conftest import _tests_table_no_nulls\n\n\n@pytest.mark.usefixtures(\"mapd_server\")\nclass TestCPUDataNoNulls:\n\n @pytest.mark.parametrize('method', [\"rows\", \"columnar\", \"arrow\", \"infer\"])\n def test_create_load_table_no_nulls_sql_execute(self, con, method):\n \"\"\"\n Demonstrate that regardless of how data loaded, roundtrip answers\n are the same when con.execute()/pd.read_sql called for row-wise\n data retrieval\n \"\"\"\n df_in = _tests_table_no_nulls(10000)\n df_in.drop(columns=[\"point_\",\n \"line_\",\n \"mpoly_\",\n \"poly_\"], inplace=True)\n con.execute(\"drop table if exists test_data_no_nulls;\")\n con.load_table(\"test_data_no_nulls\", df_in, method=method)\n\n # read_sql() uses execute() under the hood\n df_out = pd.read_sql(\"select * from test_data_no_nulls\", con)\n\n # test size and table definition\n assert df_in.shape == df_out.shape\n gtd = con.get_table_details(\"test_data_no_nulls\")\n name_types = [(x.name, x.type) for x in gtd]\n assert name_types == [('tinyint_', 'TINYINT'),\n ('smallint_', 'SMALLINT'),\n ('int_', 'INT'),\n ('bigint_', 'BIGINT'),\n ('float_', 'FLOAT'),\n ('double_', 'DOUBLE'),\n ('bool_', 'BOOL'),\n ('date_', 'DATE'),\n ('datetime_', 'TIMESTAMP'),\n ('time_', 'TIME'),\n ('text_', 'STR'),\n ]\n\n # sort tables to ensure data in same order before compare\n # need to sort by all the columns in case of ties\n df_in.sort_values(by=['tinyint_',\n 'smallint_',\n 'int_',\n 'bigint_'], inplace=True)\n df_in.reset_index(drop=True, inplace=True)\n\n df_out.sort_values(by=['tinyint_',\n 'smallint_',\n 'int_',\n 'bigint_'], inplace=True)\n df_out.reset_index(drop=True, inplace=True)\n\n # pymapd won't necessarily return exact dtype as input using execute()\n # and pd.read_sql() since transport is rows of tuples\n # test that results are the same when dtypes aligned\n assert pd.DataFrame.equals(df_in[\"tinyint_\"],\n df_out[\"tinyint_\"].astype('int8'))\n\n assert pd.DataFrame.equals(df_in[\"smallint_\"],\n df_out[\"smallint_\"].astype('int16'))\n\n assert pd.DataFrame.equals(df_in[\"int_\"],\n df_out[\"int_\"].astype('int32'))\n\n assert pd.DataFrame.equals(df_in[\"bigint_\"], df_out[\"bigint_\"])\n assert all(np.isclose(df_in[\"float_\"], df_out[\"float_\"]))\n assert all(np.isclose(df_in[\"double_\"], df_out[\"double_\"]))\n assert pd.DataFrame.equals(df_in[\"bool_\"], df_out[\"bool_\"].astype('bool')) # noqa\n assert pd.DataFrame.equals(df_in[\"date_\"], df_out[\"date_\"])\n assert pd.DataFrame.equals(df_in[\"datetime_\"], df_out[\"datetime_\"])\n assert pd.DataFrame.equals(df_in[\"time_\"], df_out[\"time_\"])\n assert pd.DataFrame.equals(df_in[\"text_\"], df_out[\"text_\"])\n\n con.execute(\"drop table if exists test_data_no_nulls;\")\n\n @pytest.mark.parametrize('method', [\"rows\", \"columnar\", \"arrow\", \"infer\"])\n def test_create_load_table_no_nulls_select_ipc(self, con, method):\n \"\"\"\n Demonstrate that regardless of how data loaded, roundtrip answer\n is same when con.select_ipc() called to retrieve data using Arrow\n \"\"\"\n # need to manually specify columns since some don't currently work\n # need to drop unsupported columns from df_in\n df_in = _tests_table_no_nulls(10000)\n df_in.drop(columns=[\"point_\",\n \"line_\",\n \"mpoly_\",\n \"poly_\"], inplace=True)\n\n con.execute(\"drop table if exists test_data_no_nulls_ipc;\")\n con.load_table(\"test_data_no_nulls_ipc\", df_in, method=method)\n\n df_out = con.select_ipc(\"\"\"select\n tinyint_,\n smallint_,\n int_,\n bigint_,\n float_,\n double_,\n bool_,\n date_,\n datetime_,\n time_,\n text_\n from test_data_no_nulls_ipc\"\"\")\n\n # test size and table definition\n assert df_in.shape == df_out.shape\n\n # sort tables to ensure data in same order before compare\n # need to sort by all the columns in case of ties\n df_in.sort_values(by=['tinyint_',\n 'smallint_',\n 'int_',\n 'bigint_'], inplace=True)\n df_in.reset_index(drop=True, inplace=True)\n\n df_out.sort_values(by=['tinyint_',\n 'smallint_',\n 'int_',\n 'bigint_'], inplace=True)\n df_out.reset_index(drop=True, inplace=True)\n\n # When Arrow result converted to pandas, dict comes back as category\n # This providies extra functionality above base 'object' type\n df_out[\"text_\"] = df_out[\"text_\"].astype('object')\n\n # select_ipc uses Arrow, so expect exact df dtypes back\n assert pd.DataFrame.equals(df_in, df_out)\n\n con.execute(\"drop table if exists test_data_no_nulls_ipc;\")\n\n @pytest.mark.parametrize('method', [\"rows\", \"columnar\"])\n def test_load_table_text_no_encoding_no_nulls(self, con, method):\n \"\"\"\n Demonstrate that data can be loaded as text encoding none,\n assuming that user creates the table beforehand/inserting to\n pre-existing table\n \"\"\"\n\n con.execute(\"drop table if exists test_text_no_encoding\")\n\n con.execute(\"\"\"create table test_text_no_encoding (\n idx integer,\n text_ text encoding none\n )\"\"\")\n\n # reset_index adds a column to sort by, since results not guaranteed\n # to return in sorted order from OmniSci\n df_in = _tests_table_no_nulls(10000)\n df_test = df_in[\"text_\"].reset_index()\n df_test.columns = [\"idx\", \"text_\"]\n\n con.load_table(\"test_text_no_encoding\", df_test, method=method)\n\n df_out = pd.read_sql(\"\"\"select\n *\n from test_text_no_encoding\n order by idx\"\"\",\n con)\n\n assert pd.DataFrame.equals(df_test, df_out)\n\n con.execute(\"drop table if exists test_text_no_encoding\")\n\n @pytest.mark.parametrize('method', [\"rows\"])\n def test_load_table_geospatial_no_nulls(self, con, method):\n \"\"\"\n Demonstrate that geospatial data can be loaded,\n assuming that user creates the table beforehand/inserting to\n pre-existing table\n \"\"\"\n con.execute(\"drop table if exists test_geospatial_no_nulls\")\n\n con.execute(\"\"\"create table test_geospatial_no_nulls (\n tinyint_ tinyint,\n smallint_ smallint,\n int_ integer,\n bigint_ bigint,\n float_ float,\n double_ double,\n bool_ boolean,\n date_ date,\n datetime_ timestamp,\n time_ time,\n text_ text encoding dict(32),\n point_ point,\n line_ linestring,\n mpoly_ multipolygon,\n poly_ polygon\n )\"\"\")\n\n df_in = _tests_table_no_nulls(10000)\n con.load_table(\"test_geospatial_no_nulls\", df_in, method='rows')\n\n df_out = pd.read_sql(\"\"\"select\n *\n from test_geospatial_no_nulls\"\"\", con)\n\n # sort tables to ensure data in same order before compare\n # need to sort by all the columns in case of ties\n df_in.sort_values(by=['tinyint_',\n 'smallint_',\n 'int_',\n 'bigint_'], inplace=True)\n df_in.reset_index(drop=True, inplace=True)\n\n df_out.sort_values(by=['tinyint_',\n 'smallint_',\n 'int_',\n 'bigint_'], inplace=True)\n df_out.reset_index(drop=True, inplace=True)\n\n # pymapd won't necessarily return exact dtype as input using execute()\n # and pd.read_sql() since transport is rows of tuples\n # test that results are the same when dtypes aligned\n assert pd.DataFrame.equals(df_in[\"tinyint_\"],\n df_out[\"tinyint_\"].astype('int8'))\n\n assert pd.DataFrame.equals(df_in[\"smallint_\"],\n df_out[\"smallint_\"].astype('int16'))\n\n assert pd.DataFrame.equals(df_in[\"int_\"],\n df_out[\"int_\"].astype('int32'))\n\n assert pd.DataFrame.equals(df_in[\"bigint_\"], df_out[\"bigint_\"])\n assert all(np.isclose(df_in[\"float_\"], df_out[\"float_\"]))\n assert all(np.isclose(df_in[\"double_\"], df_out[\"double_\"]))\n assert pd.DataFrame.equals(df_in[\"bool_\"], df_out[\"bool_\"].astype('bool')) # noqa\n assert pd.DataFrame.equals(df_in[\"date_\"], df_out[\"date_\"])\n assert pd.DataFrame.equals(df_in[\"datetime_\"], df_out[\"datetime_\"])\n assert pd.DataFrame.equals(df_in[\"time_\"], df_out[\"time_\"])\n assert pd.DataFrame.equals(df_in[\"text_\"], df_out[\"text_\"])\n\n # convert geospatial data to Shapely objects to prove their equality\n point_in = [wkt.loads(x) for x in df_in[\"point_\"]]\n point_out = [wkt.loads(x) for x in df_out[\"point_\"]]\n assert all([x.equals_exact(y, 0.000001) for x, y\n in zip(point_in, point_out)])\n\n line_in = [wkt.loads(x) for x in df_in[\"line_\"]]\n line_out = [wkt.loads(x) for x in df_out[\"line_\"]]\n assert all([x.equals_exact(y, 0.000001) for x, y\n in zip(line_in, line_out)])\n\n mpoly_in = [wkt.loads(x) for x in df_in[\"mpoly_\"]]\n mpoly_out = [wkt.loads(x) for x in df_out[\"mpoly_\"]]\n assert all([x.equals_exact(y, 0.000001) for x, y\n in zip(mpoly_in, mpoly_out)])\n\n # TODO: tol only passes at 0.011, whereas others pass at much tighter\n # Figure out why\n poly_in = [wkt.loads(x) for x in df_in[\"poly_\"]]\n poly_out = [wkt.loads(x) for x in df_out[\"poly_\"]]\n assert all([x.equals_exact(y, 0.011) for x, y\n in zip(poly_in, poly_out)])\n\n con.execute(\"drop table if exists test_geospatial_no_nulls\")\n"
] |
[
[
"pandas.DataFrame.equals",
"pandas.read_sql",
"numpy.isclose"
]
] |
bbidong/enas
|
[
"759d081ae73ac0a971aa69f51b67a4d78fec6b03"
] |
[
"src/cifar10/main.py"
] |
[
"#-*- coding: utf-8 -*-\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\nimport cPickle as pickle\nimport shutil\nimport sys\nimport time\n\nimport numpy as np\nimport tensorflow as tf\n\nfrom src import utils\nfrom src.utils import Logger\nfrom src.utils import DEFINE_boolean\nfrom src.utils import DEFINE_float\nfrom src.utils import DEFINE_integer\nfrom src.utils import DEFINE_string\nfrom src.utils import print_user_flags\n\nfrom src.cifar10.data_utils import read_data\nfrom src.cifar10.general_controller import GeneralController\nfrom src.cifar10.general_child import GeneralChild\nfrom src.cifar10.micro_controller import MicroController\nfrom src.cifar10.micro_child import MicroChild\n\nflags = tf.app.flags\nFLAGS = flags.FLAGS\n\nDEFINE_boolean(\"reset_output_dir\", False, \"Delete output_dir if exists.\")\nDEFINE_string(\"data_path\", \"\", \"\")\nDEFINE_string(\"output_dir\", \"\", \"\")\nDEFINE_string(\"data_format\", \"NHWC\", \"'NHWC' or 'NCWH'\")\nDEFINE_string(\"search_for\", None, \"Must be [macro|micro]\")\n\nDEFINE_integer(\"batch_size\", 32, \"\")\n\nDEFINE_integer(\"num_epochs\", 300, \"\")\nDEFINE_integer(\"child_lr_dec_every\", 100, \"\")\nDEFINE_integer(\"child_num_layers\", 5, \"\")\nDEFINE_integer(\"child_num_cells\", 5, \"\")\nDEFINE_integer(\"child_filter_size\", 5, \"\")\nDEFINE_integer(\"child_out_filters\", 48, \"\")\nDEFINE_integer(\"child_out_filters_scale\", 1, \"\")\nDEFINE_integer(\"child_num_branches\", 4, \"\")\nDEFINE_integer(\"child_num_aggregate\", None, \"\")\nDEFINE_integer(\"child_num_replicas\", 1, \"\")\nDEFINE_integer(\"child_block_size\", 3, \"\")\nDEFINE_integer(\"child_lr_T_0\", None, \"for lr schedule\")\nDEFINE_integer(\"child_lr_T_mul\", None, \"for lr schedule\")\nDEFINE_integer(\"child_cutout_size\", None, \"CutOut size\")\nDEFINE_float(\"child_grad_bound\", 5.0, \"Gradient clipping\")\nDEFINE_float(\"child_lr\", 0.1, \"\")\nDEFINE_float(\"child_lr_dec_rate\", 0.1, \"\")\nDEFINE_float(\"child_keep_prob\", 0.5, \"\")\nDEFINE_float(\"child_drop_path_keep_prob\", 1.0, \"minimum drop_path_keep_prob\")\nDEFINE_float(\"child_l2_reg\", 1e-4, \"\")\nDEFINE_float(\"child_lr_max\", None, \"for lr schedule\")\nDEFINE_float(\"child_lr_min\", None, \"for lr schedule\")\nDEFINE_string(\"child_skip_pattern\", None, \"Must be ['dense', None]\")\nDEFINE_string(\"child_fixed_arc\", None, \"\")\nDEFINE_boolean(\"child_use_aux_heads\", False, \"Should we use an aux head\")\nDEFINE_boolean(\"child_sync_replicas\", False, \"To sync or not to sync.\")\nDEFINE_boolean(\"child_lr_cosine\", False, \"Use cosine lr schedule\")\n\nDEFINE_float(\"controller_lr\", 1e-3, \"\")\nDEFINE_float(\"controller_lr_dec_rate\", 1.0, \"\")\nDEFINE_float(\"controller_keep_prob\", 0.5, \"\")\nDEFINE_float(\"controller_l2_reg\", 0.0, \"\")\nDEFINE_float(\"controller_bl_dec\", 0.99, \"\")\nDEFINE_float(\"controller_tanh_constant\", None, \"\")\nDEFINE_float(\"controller_op_tanh_reduce\", 1.0, \"\")\nDEFINE_float(\"controller_temperature\", None, \"\")\nDEFINE_float(\"controller_entropy_weight\", None, \"\")\nDEFINE_float(\"controller_skip_target\", 0.8, \"\")\nDEFINE_float(\"controller_skip_weight\", 0.0, \"\")\nDEFINE_integer(\"controller_num_aggregate\", 1, \"\")\nDEFINE_integer(\"controller_num_replicas\", 1, \"\")\nDEFINE_integer(\"controller_train_steps\", 50, \"\")\nDEFINE_integer(\"controller_forwards_limit\", 2, \"\")\nDEFINE_integer(\"controller_train_every\", 2,\n \"train the controller after this number of epochs\")\nDEFINE_boolean(\"controller_search_whole_channels\", False, \"\")\nDEFINE_boolean(\"controller_sync_replicas\", False, \"To sync or not to sync.\")\nDEFINE_boolean(\"controller_training\", True, \"\")\nDEFINE_boolean(\"controller_use_critic\", False, \"\")\n\nDEFINE_integer(\"log_every\", 50, \"How many steps to log\")\nDEFINE_integer(\"eval_every_epochs\", 1, \"How many epochs to eval\")\n\ndef get_ops(images, labels):\n \"\"\"\n Args:\n images: dict with keys {\"train\", \"valid\", \"test\"}.\n labels: dict with keys {\"train\", \"valid\", \"test\"}.\n \"\"\"\n\n assert FLAGS.search_for is not None, \"Please specify --search_for\"\n\n if FLAGS.search_for == \"micro\":\n ControllerClass = MicroController\n ChildClass = MicroChild\n else:\n ControllerClass = GeneralController\n ChildClass = GeneralChild\n\n child_model = ChildClass(\n images,\n labels,\n use_aux_heads=FLAGS.child_use_aux_heads,\n cutout_size=FLAGS.child_cutout_size,\n whole_channels=FLAGS.controller_search_whole_channels,\n num_layers=FLAGS.child_num_layers,\n num_cells=FLAGS.child_num_cells,\n num_branches=FLAGS.child_num_branches,\n fixed_arc=FLAGS.child_fixed_arc,\n out_filters_scale=FLAGS.child_out_filters_scale,\n out_filters=FLAGS.child_out_filters,\n keep_prob=FLAGS.child_keep_prob,\n drop_path_keep_prob=FLAGS.child_drop_path_keep_prob,\n num_epochs=FLAGS.num_epochs,\n l2_reg=FLAGS.child_l2_reg,\n data_format=FLAGS.data_format,\n batch_size=FLAGS.batch_size,\n clip_mode=\"norm\",\n grad_bound=FLAGS.child_grad_bound,\n lr_init=FLAGS.child_lr,\n lr_dec_every=FLAGS.child_lr_dec_every,\n lr_dec_rate=FLAGS.child_lr_dec_rate,\n lr_cosine=FLAGS.child_lr_cosine,\n lr_max=FLAGS.child_lr_max,\n lr_min=FLAGS.child_lr_min,\n lr_T_0=FLAGS.child_lr_T_0,\n lr_T_mul=FLAGS.child_lr_T_mul,\n optim_algo=\"momentum\",\n sync_replicas=FLAGS.child_sync_replicas,\n num_aggregate=FLAGS.child_num_aggregate,\n num_replicas=FLAGS.child_num_replicas,\n )\n\n if FLAGS.child_fixed_arc is None:\n controller_model = ControllerClass(\n search_for=FLAGS.search_for,\n search_whole_channels=FLAGS.controller_search_whole_channels,\n skip_target=FLAGS.controller_skip_target,\n skip_weight=FLAGS.controller_skip_weight,\n num_cells=FLAGS.child_num_cells,\n num_layers=FLAGS.child_num_layers,\n num_branches=FLAGS.child_num_branches,\n out_filters=FLAGS.child_out_filters,\n lstm_size=64,\n lstm_num_layers=1,\n lstm_keep_prob=1.0,\n tanh_constant=FLAGS.controller_tanh_constant,\n op_tanh_reduce=FLAGS.controller_op_tanh_reduce,\n temperature=FLAGS.controller_temperature,\n lr_init=FLAGS.controller_lr,\n lr_dec_start=0,\n lr_dec_every=1000000, # never decrease learning rate\n l2_reg=FLAGS.controller_l2_reg,\n entropy_weight=FLAGS.controller_entropy_weight,\n bl_dec=FLAGS.controller_bl_dec,\n use_critic=FLAGS.controller_use_critic,\n optim_algo=\"adam\",\n sync_replicas=FLAGS.controller_sync_replicas,\n num_aggregate=FLAGS.controller_num_aggregate,\n num_replicas=FLAGS.controller_num_replicas)\n\n child_model.connect_controller(controller_model) # 建立MicroChild模型, 计算loss\n controller_model.build_trainer(child_model) # 计算MicroController模型的loss\n\n controller_ops = {\n \"train_step\": controller_model.train_step,\n \"loss\": controller_model.loss,\n \"train_op\": controller_model.train_op,\n \"lr\": controller_model.lr,\n \"grad_norm\": controller_model.grad_norm,\n \"valid_acc\": controller_model.valid_acc,\n \"optimizer\": controller_model.optimizer,\n \"baseline\": controller_model.baseline,\n \"entropy\": controller_model.sample_entropy,\n \"sample_arc\": controller_model.sample_arc,\n \"skip_rate\": controller_model.skip_rate,\n }\n else:\n assert not FLAGS.controller_training, (\n \"--child_fixed_arc is given, cannot train controller\")\n child_model.connect_controller(None)\n controller_ops = None\n\n child_ops = {\n \"global_step\": child_model.global_step,\n \"loss\": child_model.loss,\n \"train_op\": child_model.train_op,\n \"lr\": child_model.lr,\n \"grad_norm\": child_model.grad_norm,\n \"train_acc\": child_model.train_acc,\n \"optimizer\": child_model.optimizer,\n \"num_train_batches\": child_model.num_train_batches,\n }\n\n ops = {\n \"child\": child_ops,\n \"controller\": controller_ops,\n \"eval_every\": child_model.num_train_batches * FLAGS.eval_every_epochs,\n \"eval_func\": child_model.eval_once,\n \"num_train_batches\": child_model.num_train_batches,\n }\n\n return ops\n\n\ndef train():\n if FLAGS.child_fixed_arc is None:\n images, labels = read_data(FLAGS.data_path)\n else:\n images, labels = read_data(FLAGS.data_path, num_valids=0)\n\n g = tf.Graph()\n with g.as_default():\n ops = get_ops(images, labels)\n child_ops = ops[\"child\"]\n controller_ops = ops[\"controller\"]\n\n saver = tf.train.Saver(max_to_keep=2)\n checkpoint_saver_hook = tf.train.CheckpointSaverHook(\n FLAGS.output_dir, save_steps=child_ops[\"num_train_batches\"], saver=saver)\n\n hooks = [checkpoint_saver_hook]\n if FLAGS.child_sync_replicas:\n sync_replicas_hook = child_ops[\"optimizer\"].make_session_run_hook(True)\n hooks.append(sync_replicas_hook)\n if FLAGS.controller_training and FLAGS.controller_sync_replicas:\n sync_replicas_hook = controller_ops[\"optimizer\"].make_session_run_hook(True)\n hooks.append(sync_replicas_hook)\n\n print(\"-\" * 80)\n print(\"Starting session\")\n config = tf.ConfigProto(allow_soft_placement=True)\n with tf.train.SingularMonitoredSession(\n config=config, hooks=hooks, checkpoint_dir=FLAGS.output_dir) as sess:\n start_time = time.time()\n while True:\n run_ops = [\n child_ops[\"loss\"],\n child_ops[\"lr\"],\n child_ops[\"grad_norm\"],\n child_ops[\"train_acc\"],\n child_ops[\"train_op\"],\n ]\n loss, lr, gn, tr_acc, _ = sess.run(run_ops)\n global_step = sess.run(child_ops[\"global_step\"])\n\n if FLAGS.child_sync_replicas:\n actual_step = global_step * FLAGS.num_aggregate\n else:\n actual_step = global_step\n epoch = actual_step // ops[\"num_train_batches\"]\n curr_time = time.time()\n if global_step % FLAGS.log_every == 0:\n log_string = \"\"\n log_string += \"epoch={:<6d}\".format(epoch)\n log_string += \"ch_step={:<6d}\".format(global_step)\n log_string += \" loss={:<8.6f}\".format(loss)\n log_string += \" lr={:<8.4f}\".format(lr)\n log_string += \" |g|={:<8.4f}\".format(gn)\n log_string += \" tr_acc={:<3d}/{:>3d}\".format(\n tr_acc, FLAGS.batch_size)\n log_string += \" mins={:<10.2f}\".format(\n float(curr_time - start_time) / 60)\n print(log_string)\n \n if actual_step % ops[\"eval_every\"] == 0:\n if (FLAGS.controller_training and\n epoch % FLAGS.controller_train_every == 0):\n print(\"Epoch {}: Training controller\".format(epoch))\n for ct_step in range(FLAGS.controller_train_steps *\n FLAGS.controller_num_aggregate):\n run_ops = [\n controller_ops[\"loss\"],\n controller_ops[\"entropy\"],\n controller_ops[\"lr\"],\n controller_ops[\"grad_norm\"],\n controller_ops[\"valid_acc\"],\n controller_ops[\"baseline\"],\n controller_ops[\"skip_rate\"],\n controller_ops[\"train_op\"],\n ]\n loss, entropy, lr, gn, val_acc, bl, skip, _ = sess.run(run_ops)\n controller_step = sess.run(controller_ops[\"train_step\"])\n\n if ct_step % FLAGS.log_every == 0:\n curr_time = time.time()\n log_string = \"\"\n log_string += \"ctrl_step={:<6d}\".format(controller_step)\n log_string += \" loss={:<7.3f}\".format(loss)\n log_string += \" ent={:<5.2f}\".format(entropy)\n log_string += \" lr={:<6.4f}\".format(lr)\n log_string += \" |g|={:<8.4f}\".format(gn)\n log_string += \" acc={:<6.4f}\".format(val_acc)\n log_string += \" bl={:<5.2f}\".format(bl)\n log_string += \" mins={:<.2f}\".format(\n float(curr_time - start_time) / 60)\n print(log_string)\n\n print(\"Here are 10 architectures\")\n for _ in range(10):\n arc, acc = sess.run([\n controller_ops[\"sample_arc\"],\n controller_ops[\"valid_acc\"],\n ])\n if FLAGS.search_for == \"micro\":\n normal_arc, reduce_arc = arc\n print(np.reshape(normal_arc, [-1]))\n print(np.reshape(reduce_arc, [-1]))\n else:\n start = 0\n for layer_id in range(FLAGS.child_num_layers):\n if FLAGS.controller_search_whole_channels:\n end = start + 1 + layer_id\n else:\n end = start + 2 * FLAGS.child_num_branches + layer_id\n print(np.reshape(arc[start: end], [-1]))\n start = end\n print(\"val_acc={:<6.4f}\".format(acc))\n print(\"-\" * 80)\n\n print(\"Epoch {}: Eval\".format(epoch))\n if FLAGS.child_fixed_arc is None:\n ops[\"eval_func\"](sess, \"valid\")\n ops[\"eval_func\"](sess, \"test\")\n\n if epoch >= FLAGS.num_epochs:\n break\n\n\ndef main(_):\n print(\"-\" * 80)\n if not os.path.isdir(FLAGS.output_dir):\n print(\"Path {} does not exist. Creating.\".format(FLAGS.output_dir))\n os.makedirs(FLAGS.output_dir)\n elif FLAGS.reset_output_dir:\n print(\"Path {} exists. Remove and remake.\".format(FLAGS.output_dir))\n shutil.rmtree(FLAGS.output_dir)\n os.makedirs(FLAGS.output_dir)\n\n print(\"-\" * 80)\n log_file = os.path.join(FLAGS.output_dir, \"stdout\")\n print(\"Logging to {}\".format(log_file))\n sys.stdout = Logger(log_file)\n\n utils.print_user_flags()\n train()\n\n\nif __name__ == \"__main__\":\n tf.app.run()\n"
] |
[
[
"tensorflow.Graph",
"tensorflow.train.SingularMonitoredSession",
"numpy.reshape",
"tensorflow.train.CheckpointSaverHook",
"tensorflow.ConfigProto",
"tensorflow.train.Saver",
"tensorflow.app.run"
]
] |
ian-double-u/solar
|
[
"a455c901947df11202d70235aca6968f2e764fcb"
] |
[
"surfrad_data_prep.py"
] |
[
"import pandas as pd\nimport requests\nfrom requests.auth import HTTPBasicAuth\nfrom pathlib import Path\nimport pandas as pd\nimport numpy as np\n\npath_list = Path('C:\\\\Users\\\\Admin\\\\Desktop\\\\').glob('**/*.dat')\npaths = []\n\nfor path in path_list:\n paths.append(str(path))\n \nframes = [] # hold df for each .dat file\n\ncolumn_names = ['year', 'jday', 'month', 'day', 'hour', 'min', 'dt', 'zen', \n 'dw_solar', 'dw_solar_QC', \n 'uw_solar', 'uw_solar_QC',\n 'direct_n', 'direct_n_QC',\n 'diffuse', 'diffuse_QC',\n 'dw_ir', 'dw_ir_QC',\n 'dw_casetemp', 'dw_casetemp_QC',\n 'dw_dometemp', 'dw_dometemp_QC',\n 'uw_ir', 'uw_ir_QC',\n 'uw_casetemp', 'uw_casetemp_QC',\n 'uw_dometemp', 'uw_dometemp_QC',\n 'uvb', 'uvb_QC',\n 'par', 'par_QC',\n 'netsolar', 'netsolar_QC',\n 'netir', 'netir_QC',\n 'totalnet', 'totalnet_QC',\n 'temp', 'temp_QC',\n 'rh', 'rh_QC',\n 'windspd', 'windspd_QC',\n 'winddir', 'winddir_QC',\n 'pressure', 'pressure_QC']\n\nfor path in paths:\n print(f'Start: load file {(paths.index(path)) + 1}/{len(paths)}')\n\n with open(path,'r') as f:\n df = pd.DataFrame(l.rstrip().split() for l in f)\n \n station_name = df[0][0]\n lat = df[0][1]\n lng = df[1][1]\n alt = df[2][1]\n \n s = df.shape[0]-2\n df = df.tail(s).reset_index(drop=True)\n df.columns = column_names\n \n df['station_name'] = station_name\n df['lat'] = lat\n df['lng'] = lng\n df['alt'] = alt\n \n df.drop(columns=['dw_solar', 'dw_solar_QC', \n 'uw_solar', 'uw_solar_QC',\n 'diffuse', 'diffuse_QC',\n 'dw_ir', 'dw_ir_QC',\n 'dw_casetemp', 'dw_casetemp_QC',\n 'dw_dometemp', 'dw_dometemp_QC',\n 'uw_ir', 'uw_ir_QC',\n 'uw_casetemp', 'uw_casetemp_QC',\n 'uw_dometemp', 'uw_dometemp_QC',\n 'uvb', 'uvb_QC',\n 'par', 'par_QC',\n 'netsolar', 'netsolar_QC',\n 'netir', 'netir_QC',\n 'totalnet', 'totalnet_QC',\n 'temp', 'temp_QC',\n 'rh', 'rh_QC',\n 'windspd', 'windspd_QC',\n 'winddir', 'winddir_QC',\n 'pressure', 'pressure_QC'], inplace=True)\n \n frames.append(df)\n print(f'End: load file {(paths.index(path)) + 1}/{len(paths)}')\n \nprint('Start: combine files.')\nresult = pd.concat(frames)\nresult.drop(result[result['direct_n_QC'] != 0].index, inplace=True) # data cleaning step\nresult.to_csv('surfrad_data.csv', index=False)\nprint('End: combine files. All .dat files now stored in surfrad_data.csv')\n\ndef return_geojson(lat,lng,increment):\n \"\"\"returns geojson box around lat and lng\"\"\"\n\n geojson_geometry = { # (lng,lat)\n \"type\": \"Polygon\",\n \"coordinates\": [\n [\n [\n lng+increment,\n lat+increment\n ],\n [\n lng+increment,\n lat-increment\n ],\n [\n lng-increment,\n lat-increment\n ],\n [\n lng-increment,\n lat+increment\n ],\n [\n lng+increment,\n lat+increment\n ]\n ]\n ]\n }\n \n return geojson_geometry\n\nstations = ['Bondville', # station names\n 'Boulder', \n 'Desert Rock', \n 'Fort Peek', \n 'Goodwin Creek', \n 'Sioux Falls']\n\nlat_lng_s = [(40.5,-88.37), # station lat/lng in order of station names\n (40.13,-105.24), \n (36.624,-116.019), \n (48.31,-105.1), \n (34.25,-89.87), \n (43.73,-96.62)]\n\ndef clouds(geojson):\n \"\"\"gets cloud data from Planet API for daterange\"\"\"\n\n geojson_geometry = geojson # takes lng/lat\n \n geometry_filter = {\n \"type\": \"GeometryFilter\",\n \"field_name\": \"geometry\",\n \"config\": geojson_geometry\n }\n \n date_range_filter = { \n \"type\": \"DateRangeFilter\",\n \"field_name\": \"acquired\",\n \"config\": {\n \"gte\": \"2019-01-01T00:00:00.000Z\", # start date of image capture\n \"lte\": \"2019-01-02T00:00:00.000Z\" # end date of image capture\n }\n }\n \n combined_filter = {\n \"type\": \"AndFilter\",\n \"config\": [geometry_filter, date_range_filter]\n }\n \n os.environ['PL_API_KEY']='' # insert planet API key\n PLANET_API_KEY = os.getenv('PL_API_KEY')\n \n search_request = {\n \"item_types\": [\"PSScene4Band\"],\n \"filter\": combined_filter\n }\n \n search_result = \\\n requests.post(\n 'https://api.planet.com/data/v1/quick-search',\n auth=HTTPBasicAuth(PLANET_API_KEY, ''),\n json=search_request)\n \n cloud_date = [(feature['properties']['cloud_cover'],feature['properties']['acquired']) for feature in search_result.json()['features']]\n \n return cloud_date\n\ninformation = [{'station': stations[i], \n 'lat': lat_lng_s[i][0], \n 'lng': lat_lng_s[i][1], \n 'geojson': return_geojson(lat_lng_s[i][0],lat_lng_s[i][1],0.03663),\n 'clouds' : clouds(return_geojson(lat_lng_s[i][0],lat_lng_s[i][1],0.03663))} for i in range(0,len(stations))]\n\n\nrows = []\nfor i in information:\n for sat in i['clouds']:\n rows.append([i['station'],sat[0],sat[1]]) # [station, clouds, time]\n \ndf = pd.DataFrame(rows, columns=['Station', 'Clouds (0-1)', 'Time'])\ndf.to_csv('clouds.csv', index=False)\nprint('NOAA Station cloud data stored in clouds.csv')\n\nold_stations = ['Bondville', # station names\n 'Table', # Boulder\n 'Desert', # Desert Rock\n 'Fort', # Fort Peek\n 'Goodwin', # Goodwin Creek\n 'Sioux'] # Sioux Falls\n\nnew_stations = ['Bondville', # station names\n 'Boulder', \n 'Desert Rock', \n 'Fort Peek', \n 'Goodwin Creek', \n 'Sioux Falls'] \n\ndf1 = pd.read_csv('surfrad_data.csv')\ndf2 = pd.read_csv('clouds.csv')\n\ndf1['clouds'] = [np.nan]*df1.shape[0] # initalize column\n\n# replace old stations names in df1 with new ones\nfor i in old_stations:\n z = list(df1.loc[df1['station_name'] == i].index)\n old_i = old_stations.index(i)\n \n for j in z:\n df1.at[j, 'station_name'] = new_stations[old_i]\n\n\n# create jday column for cloud data\ndef get_month_days(month_number):\n \"\"\"month_number = 1 in January month_number = 12 in December\"\"\"\n month_days = [31,28,31,30,31,30,31,31,30,31,30,31]\n return month_days[month_number-1]\n \ndf2['jday'] = [i for i in range(0,df2.shape[0])] # initalize column\n\nfor i in range(0,df2.shape[0]): \n month = int(df2.loc[i]['Time'][6:7])\n day = int(df2.loc[i]['Time'][8:10])\n \n k = 0\n for j in range(1,month):\n k += get_month_days(j)\n \n df2.at[i, 'jday'] = (day + k)\n \n# add all same day photos to surfrad\nstation_dicts = [] # list of dictionaries for each station with cloud data\n \nfor i in new_stations: \n cloud_list = [] # list of dicts, one for each jday\n\n jj2 = list(df2.loc[df2['Station'] == i].index)\n\n for j in range(df2['jday'].min(),df2['jday'].max()):\n \n jj1 = list(df2.loc[df2['jday'] == j].index)\n \n jj3 = list(set(jj1) & set(jj2))\n \n try:\n day_dict = {str(j): [df2.loc[k]['Clouds (0-1)'] for k in jj3]}\n cloud_list.append(day_dict)\n \n except:\n pass\n \n cloud_dict = {'station': i, 'clouds': cloud_list}\n\n station_dicts.append(cloud_dict)\n\nmiss = []\nfor i in range(0,df1.shape[0]):\n station_index = new_stations.index(df1.loc[i]['station_name'])\n _jday = df1.loc[i]['jday']\n \n cloud_list = station_dicts[station_index]['clouds'][_jday-df2['jday'].min()][str(_jday)] \n \n if len(cloud_list) != 0:\n df1.at[i, 'clouds'] = np.mean(cloud_list)\n \n else:\n miss.append(i)\n df1.at[i, 'clouds'] = -9999.9\n\n# deal with missing values\ndf3 = df1[df1['clouds'] != -9999.9]\nmean_c = np.mean(df3.clouds)\nfor i in miss:\n df1.at[i, 'clouds'] = mean_c\n\n# save final file\ndf1.to_csv('surfrad_data.csv', index=False)\nprint('Final file prepared. Find surfrad_data.csv')\n"
] |
[
[
"pandas.concat",
"pandas.read_csv",
"numpy.mean",
"pandas.DataFrame"
]
] |
gorilux/incubator-mxnet
|
[
"8ca4f5088072a9b0c50562a476d9892c83d0af48"
] |
[
"python/mxnet/ndarray/numpy/_op.py"
] |
[
"# pylint: disable=C0302\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n\n# pylint: disable=unused-argument\n\"\"\"Namespace for numpy operators used in Gluon dispatched by F=ndarray.\"\"\"\n\nimport numpy as _np\nfrom ...base import numeric_types, integer_types\nfrom ...util import _sanity_check_params, set_module\nfrom ...util import wrap_np_unary_func, wrap_np_binary_func\nfrom ...util import is_np_default_dtype\nfrom ...context import current_context\nfrom . import _internal as _npi\nfrom . import _api_internal\nfrom ..ndarray import NDArray\n\n\n__all__ = ['shape', 'zeros', 'zeros_like', 'ones', 'ones_like', 'full', 'full_like', 'empty_like', 'invert', 'delete',\n 'add', 'broadcast_to', 'subtract', 'multiply', 'divide', 'mod', 'remainder', 'fmod',\n 'power', 'bitwise_not', 'trace', 'transpose', 'copy', 'moveaxis', 'reshape', 'dot',\n 'arctan2', 'sin', 'cos', 'tan', 'sinh', 'cosh', 'tanh', 'log10', 'sqrt', 'cbrt', 'abs', 'insert', 'fabs',\n 'absolute', 'exp', 'expm1', 'arcsin', 'arccos', 'arctan', 'sign', 'log', 'degrees', 'log2', 'matmul',\n 'log1p', 'rint', 'radians', 'reciprocal', 'square', 'negative', 'fix', 'ceil', 'floor', 'histogram',\n 'trunc', 'logical_not', 'arcsinh', 'arccosh', 'arctanh', 'argsort', 'all', 'any', 'sort',\n 'tensordot', 'eye', 'linspace', 'median', 'tril_indices', 'triu_indices_from', 'triu_indices',\n 'logspace', 'expand_dims', 'tile', 'arange', 'array_split', 'split', 'hsplit', 'vsplit', 'dsplit',\n 'concatenate', 'append', 'stack', 'vstack', 'row_stack', 'column_stack', 'hstack', 'dstack',\n 'average', 'mean', 'maximum', 'fmax', 'minimum', 'fmin', 'around', 'round', 'round_', 'flatnonzero',\n 'max', 'min', 'amax', 'amin', 'logical_and', 'logical_or', 'logical_xor',\n 'swapaxes', 'clip', 'argmax', 'argmin', 'std', 'var', 'indices', 'copysign', 'ravel', 'unravel_index',\n 'diag_indices_from', 'hanning', 'hamming', 'blackman', 'flip', 'flipud', 'fliplr',\n 'hypot', 'bitwise_and', 'bitwise_xor', 'bitwise_or', 'rad2deg', 'deg2rad', 'unique', 'lcm', 'gcd',\n 'tril', 'triu', 'tri', 'identity', 'take', 'ldexp', 'vdot', 'inner', 'outer', 'cross', 'kron',\n 'equal', 'not_equal', 'greater', 'less', 'greater_equal', 'less_equal', 'roll', 'rot90', 'einsum',\n 'true_divide', 'nonzero', 'quantile', 'percentile', 'shares_memory', 'may_share_memory', 'interp',\n 'diff', 'ediff1d', 'resize', 'polyval', 'nan_to_num', 'isnan', 'isinf', 'isposinf', 'isneginf', 'isfinite',\n 'atleast_1d', 'atleast_2d', 'atleast_3d', 'fill_diagonal', 'squeeze',\n 'where', 'bincount', 'rollaxis', 'diagflat', 'repeat', 'prod', 'pad', 'cumsum', 'sum', 'diag', 'diagonal',\n 'positive', 'logaddexp', 'floor_divide', 'bitwise_left_shift', 'bitwise_right_shift']\n\n\n@set_module('mxnet.ndarray.numpy')\ndef shape(a):\n \"\"\"\n Return the shape of an array.\n\n Parameters\n ----------\n a : array_like\n Input array.\n\n Returns\n -------\n shape : tuple of ints\n The elements of the shape tuple give the lengths of the\n corresponding array dimensions.\n\n See Also\n --------\n ndarray.shape : Equivalent array method.\n\n Examples\n --------\n >>> np.shape(np.eye(3))\n (3, 3)\n >>> np.shape([[1, 2]])\n (1, 2)\n >>> np.shape([0])\n (1,)\n >>> np.shape(0)\n ()\n \"\"\"\n return a.shape\n\n\n@set_module('mxnet.ndarray.numpy')\ndef zeros(shape, dtype=None, order='C', ctx=None): # pylint: disable=redefined-outer-name\n \"\"\"Return a new array of given shape and type, filled with zeros.\n This function currently only supports storing multi-dimensional data\n in row-major (C-style).\n\n Parameters\n ----------\n shape : int or tuple of int\n The shape of the empty array.\n dtype : str or numpy.dtype, optional\n An optional value type.\n - When npx.is_np_default_dtype() returns False, default dtype is float32;\n - When npx.is_np_default_dtype() returns True, default dtype is float64.\n Note that this behavior is different from NumPy's `zeros` function where `float64`\n is the default value, here we can set 'float32' or 'float64' as your default dtype,\n because `float32` is considered as the default data type in deep learning.\n order : {'C'}, optional, default: 'C'\n How to store multi-dimensional data in memory, currently only row-major\n (C-style) is supported.\n ctx : Context, optional\n An optional device context (default is the current default context).\n\n Returns\n -------\n out : ndarray\n Array of zeros with the given shape, dtype, and ctx.\n \"\"\"\n if order != 'C':\n raise NotImplementedError\n # If the following code (4 lines) regarding ctx is removed\n # np.zeros((3, 4)) can be as fast as 4.96 us\n if ctx is None:\n ctx = str(current_context())\n else:\n ctx = str(ctx)\n if dtype is not None and not isinstance(dtype, str):\n dtype = _np.dtype(dtype).name\n return _api_internal.zeros(shape, dtype, ctx)\n\n\n@set_module('mxnet.ndarray.numpy')\ndef ones(shape, dtype=None, order='C', ctx=None): # pylint: disable=redefined-outer-name\n \"\"\"Return a new array of given shape and type, filled with ones.\n This function currently only supports storing multi-dimensional data\n in row-major (C-style).\n\n Parameters\n ----------\n shape : int or tuple of int\n The shape of the empty array.\n dtype : str or numpy.dtype, optional\n An optional value type.\n - When npx.is_np_default_dtype() returns False, default dtype is float32;\n - When npx.is_np_default_dtype() returns True, default dtype is float64.\n Note that this behavior is different from NumPy's `ones` function where\n `float64` is the default value.\n order : {'C'}, optional, default: 'C'\n How to store multi-dimensional data in memory, currently only row-major\n (C-style) is supported.\n ctx : Context, optional\n An optional device context (default is the current default context).\n\n Returns\n -------\n out : ndarray\n Array of ones with the given shape, dtype, and ctx.\n \"\"\"\n if order != 'C':\n raise NotImplementedError\n if ctx is None:\n ctx = str(current_context())\n else:\n ctx = str(ctx)\n if dtype is not None and not isinstance(dtype, str):\n dtype = _np.dtype(dtype).name\n return _api_internal.ones(shape, dtype, ctx)\n\n\n# pylint: disable=too-many-arguments, redefined-outer-name\n@set_module('mxnet.ndarray.numpy')\ndef zeros_like(a, dtype=None, order='C', ctx=None, out=None):\n \"\"\"\n Return an array of zeros with the same shape and type as a given array.\n\n Parameters\n ----------\n a : ndarray\n The shape and data-type of `a` define these same attributes of\n the returned array.\n dtype : data-type, optional\n Overrides the data type of the result.\n Temporarily do not support boolean type.\n order : {'C'}, optional\n Whether to store multidimensional data in C- or Fortran-contiguous\n (row- or column-wise) order in memory. Currently only supports C order.\n ctx: to specify the device, e.g. the i-th GPU.\n out : ndarray or None, optional\n A location into which the result is stored.\n If provided, it must have the same shape and dtype as input ndarray.\n If not provided or `None`, a freshly-allocated array is returned.\n\n Returns\n -------\n out : ndarray\n Array of zeros with the same shape and type as a.\n\n See Also\n --------\n empty_like : Return an empty array with shape and type of input.\n ones_like : Return an array of ones with shape and type of input.\n zeros_like : Return an array of zeros with shape and type of input.\n full : Return a new array of given shape filled with value.\n\n Examples\n --------\n >>> x = np.arange(6)\n >>> x = x.reshape((2, 3))\n >>> x\n array([[0., 1., 2.],\n [3., 4., 5.]])\n >>> np.zeros_like(x)\n array([[0., 0., 0.],\n [0., 0., 0.]])\n >>> np.zeros_like(x, int)\n array([[0, 0, 0],\n [0, 0, 0]], dtype=int64)\n >>> y = np.arange(3, dtype=float)\n >>> y\n array([0., 1., 2.], dtype=float64)\n >>> np.zeros_like(y)\n array([0., 0., 0.], dtype=float64)\n \"\"\"\n if order != 'C':\n raise NotImplementedError\n return full_like(a, 0, dtype=dtype, order=order, ctx=ctx, out=out)\n\n\n@set_module('mxnet.ndarray.numpy')\ndef ones_like(a, dtype=None, order='C', ctx=None, out=None):\n \"\"\"\n Return an array of ones with the same shape and type as a given array.\n\n Parameters\n ----------\n a : ndarray\n The shape and data-type of `a` define these same attributes of\n the returned array.\n dtype : data-type, optional\n Overrides the data type of the result.\n Temporarily do not support boolean type.\n order : {'C'}, optional\n Whether to store multidimensional data in C- or Fortran-contiguous\n (row- or column-wise) order in memory. Currently only supports C order.\n ctx: to specify the device, e.g. the i-th GPU.\n out : ndarray or None, optional\n A location into which the result is stored.\n If provided, it must have the same shape and dtype as input ndarray.\n If not provided or `None`, a freshly-allocated array is returned.\n\n Returns\n -------\n out : ndarray\n Array of ones with the same shape and type as a.\n\n See Also\n --------\n empty_like : Return an empty array with shape and type of input.\n zeros_like : Return an array of zeros with shape and type of input.\n full_like : Return a new array with shape of input filled with value.\n ones : Return a new array setting values to one.\n\n Examples\n --------\n >>> x = np.arange(6)\n >>> x = x.reshape((2, 3))\n >>> x\n array([[0., 1., 2.],\n [3., 4., 5.]])\n >>> np.ones_like(x)\n array([[1., 1., 1.],\n [1., 1., 1.]])\n >>> np.ones_like(x, int)\n array([[1, 1, 1],\n [1, 1, 1]], dtype=int64)\n >>> y = np.arange(3, dtype=float)\n >>> y\n array([0., 1., 2.], dtype=float64)\n >>> np.ones_like(y)\n array([1., 1., 1.], dtype=float64)\n \"\"\"\n return full_like(a, 1, dtype=dtype, order=order, ctx=ctx, out=out)\n\n\n@set_module('mxnet.ndarray.numpy')\ndef broadcast_to(array, shape):\n \"\"\"\n Broadcast an array to a new shape.\n\n Parameters\n ----------\n array : ndarray or scalar\n The array to broadcast.\n shape : tuple\n The shape of the desired array.\n\n Returns\n -------\n broadcast : array\n A readonly view on the original array with the given shape. It is\n typically not contiguous. Furthermore, more than one element of a\n broadcasted array may refer to a single memory location.\n\n Raises\n ------\n MXNetError\n If the array is not compatible with the new shape according to NumPy's\n broadcasting rules.\n \"\"\"\n if _np.isscalar(array):\n return full(shape, array)\n return _api_internal.broadcast_to(array, shape)\n\n\n@set_module('mxnet.ndarray.numpy')\ndef full(shape, fill_value, dtype=None, order='C', ctx=None, out=None): # pylint: disable=too-many-arguments\n \"\"\"\n Return a new array of given shape and type, filled with `fill_value`.\n\n Parameters\n ----------\n shape : int or sequence of ints\n Shape of the new array, e.g., ``(2, 3)`` or ``2``.\n fill_value : scalar or ndarray\n Fill value.\n dtype : data-type, optional\n The desired data-type for the array. The default, `None`, means\n `np.array(fill_value).dtype`.\n - When npx.is_np_default_dtype() returns False, default dtype is float32;\n - When npx.is_np_default_dtype() returns True, default dtype is float64.\n order : {'C'}, optional\n Whether to store multidimensional data in C- or Fortran-contiguous\n (row- or column-wise) order in memory. Currently only supports C order.\n ctx: to specify the device, e.g. the i-th GPU.\n out : ndarray or None, optional\n A location into which the result is stored.\n If provided, it must have the same shape and dtype as input ndarray.\n If not provided or `None`, a freshly-allocated array is returned.\n\n Returns\n -------\n out : ndarray\n Array of `fill_value` with the given shape, dtype, and order.\n If `fill_value` is an ndarray, out will have the same context as `fill_value`\n regardless of the provided `ctx`.\n\n Notes\n -----\n This function differs from the original `numpy.full\n https://docs.scipy.org/doc/numpy/reference/generated/numpy.full.html`_ in\n the following way(s):\n - Have an additional `ctx` argument to specify the device\n - Have an additional `out` argument\n - Currently does not support `order` selection\n\n See Also\n --------\n empty : Return a new uninitialized array.\n ones : Return a new array setting values to one.\n zeros : Return a new array setting values to zero.\n\n Examples\n --------\n >>> np.full((2, 2), 10)\n array([[10., 10.],\n [10., 10.]])\n >>> np.full((2, 2), 2, dtype=np.int32, ctx=mx.cpu(0))\n array([[2, 2],\n [2, 2]], dtype=int32)\n\n \"\"\"\n if order != 'C':\n raise NotImplementedError\n if isinstance(fill_value, NDArray):\n if dtype is None:\n ret = broadcast_to(fill_value, shape)\n else:\n ret = broadcast_to(fill_value, shape).astype(dtype)\n return ret\n if ctx is None:\n ctx = str(current_context())\n else:\n ctx = str(ctx)\n if isinstance(fill_value, bool):\n fill_value = int(fill_value)\n dtype = _np.bool if dtype is None else dtype\n if dtype is not None and not isinstance(dtype, str):\n dtype = _np.dtype(dtype).name\n return _api_internal.full(shape, dtype, fill_value, ctx, out)\n# pylint: enable=too-many-arguments, redefined-outer-name\n\n\n@set_module('mxnet.ndarray.numpy')\ndef full_like(a, fill_value, dtype=None, order='C', ctx=None, out=None): # pylint: disable=too-many-arguments\n \"\"\"\n Return a full array with the same shape and type as a given array.\n\n Parameters\n ----------\n a : ndarray\n The shape and data-type of `a` define these same attributes of\n the returned array.\n fill_value : scalar\n Fill value.\n dtype : data-type, optional\n Overrides the data type of the result.\n Temporarily do not support boolean type.\n order : {'C'}, optional\n Whether to store multidimensional data in C- or Fortran-contiguous\n (row- or column-wise) order in memory. Currently only supports C order.\n ctx: to specify the device, e.g. the i-th GPU.\n out : ndarray or None, optional\n A location into which the result is stored.\n If provided, it must have the same shape and dtype as input ndarray.\n If not provided or `None`, a freshly-allocated array is returned.\n\n Returns\n -------\n out : ndarray\n Array of `fill_value` with the same shape and type as `a`.\n\n See Also\n --------\n empty_like : Return an empty array with shape and type of input.\n ones_like : Return an array of ones with shape and type of input.\n zeros_like : Return an array of zeros with shape and type of input.\n full : Return a new array of given shape filled with value.\n\n Examples\n --------\n >>> x = np.arange(6, dtype=int)\n >>> np.full_like(x, 1)\n array([1, 1, 1, 1, 1, 1], dtype=int64)\n >>> np.full_like(x, 0.1)\n array([0, 0, 0, 0, 0, 0], dtype=int64)\n >>> np.full_like(x, 0.1, dtype=np.float64)\n array([0.1, 0.1, 0.1, 0.1, 0.1, 0.1], dtype=float64)\n >>> np.full_like(x, np.nan, dtype=np.double)\n array([nan, nan, nan, nan, nan, nan], dtype=float64)\n >>> y = np.arange(6, dtype=np.float32)\n >>> np.full_like(y, 0.1)\n array([0.1, 0.1, 0.1, 0.1, 0.1, 0.1])\n \"\"\"\n if order != 'C':\n raise NotImplementedError\n if isinstance(fill_value, bool):\n fill_value = int(fill_value)\n if ctx is None:\n ctx = str(current_context())\n else:\n ctx = str(ctx)\n if dtype is not None and not isinstance(dtype, str):\n dtype = _np.dtype(dtype).name\n return _api_internal.full_like(a, fill_value, dtype, ctx, out)\n\n\n@set_module('mxnet.ndarray.numpy')\ndef empty_like(prototype, dtype=None, order='C', subok=False, shape=None): # pylint: disable=W0621\n \"\"\"\n Return a new array with the same shape and type as a given array.\n\n Parameters\n ----------\n prototype : ndarray\n The shape and data-type of `prototype` define these same attributes\n of the returned array.\n dtype : data-type, optional\n Overrides the data type of the result.\n order : {'C'}, optional\n Whether to store multidimensional data in C- or Fortran-contiguous\n (row- or column-wise) order in memory. Currently only supports C order.\n subok : {False}, optional\n If True, then the newly created array will use the sub-class\n type of 'a', otherwise it will be a base-class array. Defaults\n to False.\n (Only support False at this moment)\n shape : int or sequence of ints, optional.\n Overrides the shape of the result. If order='K' and the number of\n dimensions is unchanged, will try to keep order, otherwise,\n order='C' is implied.\n (Not supported at this moment)\n\n Returns\n -------\n out : ndarray\n Array of uninitialized (arbitrary) data with the same\n shape and type as `prototype`.\n\n See Also\n --------\n ones_like : Return an array of ones with shape and type of input.\n zeros_like : Return an array of zeros with shape and type of input.\n full_like : Return a new array with shape of input filled with value.\n empty : Return a new uninitialized array.\n\n Notes\n -----\n This function does *not* initialize the returned array; to do that use\n `zeros_like` or `ones_like` instead. It may be marginally faster than\n the functions that do set the array values.\n\n Examples\n --------\n >>> a = np.array([[1,2,3], [4,5,6]])\n >>> np.empty_like(a)\n array([[-5764607523034234880, -2305834244544065442, 4563075075], # uninitialized\n [ 4567052944, -5764607523034234880, 844424930131968]])\n >>> a = np.array([[1., 2., 3.],[4.,5.,6.]])\n >>> np.empty_like(a)\n array([[4.9e-324, 9.9e-324, 1.5e-323], # uninitialized\n [2.0e-323, 2.5e-323, 3.0e-323]])\n \"\"\"\n dtype_list = {_np.float16: 'float16', _np.float32: 'float32', _np.float64: 'float64',\n float: 'float64', _np.int8: 'int8', _np.int16: 'int16', _np.int32: 'int32',\n _np.int64: 'int64', int:'int64', _np.uint8: 'uint8', _np.uint16: 'uint16',\n _np.uint32: 'uint32', _np.uint64: 'uint64', _np.bool: 'bool',\n _np.bool_: 'bool_', bool: 'bool', None: 'None'}\n if order != 'C':\n raise NotImplementedError(\"Only support C-order at this moment\")\n if subok:\n raise NotImplementedError(\"Creating array by using sub-class is not supported at this moment\")\n if shape is not None:\n raise NotImplementedError(\"Assigning new shape is not supported at this moment\")\n try:\n dtype = dtype if isinstance(dtype, str) else dtype_list[dtype]\n except:\n raise NotImplementedError(\"Do not support this dtype at this moment\")\n return _npi.empty_like_fallback(prototype, dtype=dtype, order=order, subok=subok, shape=shape)\n\n\n@set_module('mxnet.ndarray.numpy')\ndef arange(start, stop=None, step=1, dtype=None, ctx=None):\n \"\"\"Return evenly spaced values within a given interval.\n\n Values are generated within the half-open interval ``[start, stop)``\n (in other words, the interval including `start` but excluding `stop`).\n For integer arguments the function is equivalent to the Python built-in\n `range` function, but returns an ndarray rather than a list.\n\n Parameters\n ----------\n start : number, optional\n Start of interval. The interval includes this value. The default\n start value is 0.\n stop : number\n End of interval. The interval does not include this value, except\n in some cases where `step` is not an integer and floating point\n round-off affects the length of `out`.\n step : number, optional\n Spacing between values. For any output `out`, this is the distance\n between two adjacent values, ``out[i+1] - out[i]``. The default\n step size is 1. If `step` is specified as a position argument,\n `start` must also be given.\n dtype : dtype\n The type of the output array.\n - When npx.is_np_default_dtype() returns False, default dtype is float32;\n - When npx.is_np_default_dtype() returns True, default dtype is float64.\n\n Returns\n -------\n arange : ndarray\n Array of evenly spaced values.\n\n For floating point arguments, the length of the result is\n ``ceil((stop - start)/step)``. Because of floating point overflow,\n this rule may result in the last element of `out` being greater\n than `stop`.\n \"\"\"\n if dtype is not None and not isinstance(dtype, str):\n dtype = _np.dtype(dtype).name\n if ctx is None:\n ctx = str(current_context())\n else:\n ctx = str(ctx)\n if stop is None:\n stop = start\n start = 0\n if step is None:\n step = 1\n if start is None and stop is None:\n raise ValueError('start and stop cannot be both None')\n if step == 0:\n raise ZeroDivisionError('step cannot be 0')\n return _api_internal.arange(start, stop, step, dtype, ctx)\n\n\n@set_module('mxnet.ndarray.numpy')\ndef identity(n, dtype=None, ctx=None):\n \"\"\"\n Return the identity array.\n\n The identity array is a square array with ones on\n the main diagonal.\n\n Parameters\n ----------\n n : int\n Number of rows (and columns) in `n` x `n` output.\n dtype : data-type, optional\n Data-type of the output.\n - When npx.is_np_default_dtype() returns False, default dtype is float32;\n - When npx.is_np_default_dtype() returns True, default dtype is float64.\n ctx : Context, optional\n An optional device context (default is the current default context).\n\n Returns\n -------\n out : ndarray\n `n` x `n` array with its main diagonal set to one,\n and all other elements 0.\n\n Examples\n --------\n >>> np.identity(3)\n array([[1., 0., 0.],\n [0., 1., 0.],\n [0., 0., 1.]])\n \"\"\"\n if not isinstance(n, int):\n raise TypeError(\"Input 'n' should be an integer\")\n if n < 0:\n raise ValueError(\"Input 'n' cannot be negative\")\n if ctx is None:\n ctx = str(current_context())\n else:\n ctx = str(ctx)\n shape = (n, n) # pylint: disable=redefined-outer-name\n if dtype is not None and not isinstance(dtype, str):\n dtype = _np.dtype(dtype).name\n return _api_internal.identity(shape, dtype, ctx)\n\n\n# pylint: disable=redefined-outer-name\n@set_module('mxnet.ndarray.numpy')\ndef take(a, indices, axis=None, mode='raise', out=None):\n r\"\"\"\n Take elements from an array along an axis.\n\n When axis is not None, this function does the same thing as \"fancy\"\n indexing (indexing arrays using arrays); however, it can be easier to use\n if you need elements along a given axis. A call such as\n ``np.take(arr, indices, axis=3)`` is equivalent to\n ``arr[:,:,:,indices,...]``.\n\n Explained without fancy indexing, this is equivalent to the following use\n of `ndindex`, which sets each of ``ii``, ``jj``, and ``kk`` to a tuple of\n indices::\n\n Ni, Nk = a.shape[:axis], a.shape[axis+1:]\n Nj = indices.shape\n for ii in ndindex(Ni):\n for jj in ndindex(Nj):\n for kk in ndindex(Nk):\n out[ii + jj + kk] = a[ii + (indices[jj],) + kk]\n\n Parameters\n ----------\n a : ndarray\n The source array.\n indices : ndarray\n The indices of the values to extract. Also allow scalars for indices.\n axis : int, optional\n The axis over which to select values. By default, the flattened\n input array is used.\n out : ndarray, optional\n If provided, the result will be placed in this array. It should\n be of the appropriate shape and dtype.\n mode : {'clip', 'wrap'}, optional\n Specifies how out-of-bounds indices will behave.\n\n * 'clip' -- clip to the range (default)\n * 'wrap' -- wrap around\n\n 'clip' mode means that all indices that are too large are replaced\n by the index that addresses the last element along that axis. Note\n that this disables indexing with negative numbers.\n\n Returns\n -------\n out : ndarray\n The returned array has the same type as `a`.\n\n Notes\n -----\n\n This function differs from the original `numpy.take\n <https://docs.scipy.org/doc/numpy/reference/generated/numpy.take.html>`_ in\n the following way(s):\n\n - Only ndarray or scalar ndarray is accepted as valid input.\n\n Examples\n --------\n >>> a = np.array([4, 3, 5, 7, 6, 8])\n >>> indices = np.array([0, 1, 4])\n >>> np.take(a, indices)\n array([4., 3., 6.])\n\n In this example for `a` is an ndarray, \"fancy\" indexing can be used.\n\n >>> a[indices]\n array([4., 3., 6.])\n\n If `indices` is not one dimensional, the output also has these dimensions.\n\n >>> np.take(a, np.array([[0, 1], [2, 3]]))\n array([[4., 3.],\n [5., 7.]])\n \"\"\"\n if mode not in ('wrap', 'clip', 'raise'):\n raise NotImplementedError(\n \"function take does not support mode '{}'\".format(mode))\n if axis is None:\n return _api_internal.take(reshape(a, -1), indices, 0, mode, out)\n else:\n return _api_internal.take(a, indices, axis, mode, out)\n# pylint: enable=redefined-outer-name\n\n\n@set_module('mxnet.ndarray.numpy')\ndef insert(arr, obj, values, axis=None):\n \"\"\"\n Insert values along the given axis before the given indices.\n\n Parameters\n ----------\n arr : ndarray\n Input array.\n obj : int, slice or ndarray of int64\n Object that defines the index or indices before which `values` is\n inserted.\n Support for multiple insertions when `obj` is a single scalar or a\n sequence with one element (only support int32 and int64 element).\n values : ndarray\n Values to insert into `arr`.\n If the type of values is different from that of arr, values is converted\n to the type of arr.\n axis : int, optional\n Axis along which to insert `values`. If `axis` is None then `arr`\n is flattened first.\n\n Returns\n -------\n out : ndarray\n A copy of `arr` with `values` inserted. Note that `insert`\n does not occur in-place: a new array is returned. If\n `axis` is None, `out` is a flattened array.\n\n Notes\n -----\n - Note that for higher dimensional inserts `obj=0` behaves very different\n from `obj=[0]` just like `arr[:,0,:] = values` is different from\n `arr[:,[0],:] = values`.\n - If obj is a ndarray, it's dtype only supports int64\n\n Examples\n --------\n >>> a = np.array([[1, 1], [2, 2], [3, 3]])\n >>> a\n array([[1., 1.],\n [2., 2.],\n [3., 3.]])\n >>> np.insert(a, 1, np.array(5))\n array([1., 5., 1., 2., 2., 3., 3.])\n >>> np.insert(a, 1, np.array(5), axis=1)\n array([[1., 5., 1.],\n [2., 5., 2.],\n [3., 5., 3.]])\n\n Difference between sequence and scalars:\n\n >>> np.insert(a, np.array([1], dtype=np.int64), np.array([[1],[2],[3]]), axis=1)\n array([[1., 1., 1.],\n [2., 2., 2.],\n [3., 3., 3.]])\n >>> np.insert(a, 1, np.array([1, 2, 3]), axis=1)\n array([[1., 1., 1.],\n [2., 2., 2.],\n [3., 3., 3.]])\n\n >>> b = a.flatten()\n >>> b\n array([1., 1., 2., 2., 3., 3.])\n >>> np.insert(b, np.array([2, 2], dtype=np.int64), np.array([5, 6]))\n array([1., 1., 5., 6., 2., 2., 3., 3.])\n\n >>> np.insert(b, slice(2, 4), np.array([5, 6]))\n array([1., 1., 5., 2., 6., 2., 3., 3.])\n\n # type casting\n >>> np.insert(b.astype(np.int32), np.array([2, 2],dtype='int64'), np.array([7.13, False]))\n array([1, 1, 7, 0, 2, 2, 3, 3], dtype=int32)\n\n >>> x = np.arange(8).reshape(2, 4)\n >>> idx = np.array([1, 3], dtype=np.int64)\n >>> np.insert(x, idx, np.array([999]), axis=1)\n array([[ 0., 999., 1., 2., 999., 3.],\n [ 4., 999., 5., 6., 999., 7.]])\n \"\"\"\n if isinstance(values, numeric_types):\n if isinstance(obj, slice):\n start = obj.start\n stop = obj.stop\n step = 1 if obj.step is None else obj.step\n return _api_internal.insert_slice(arr, values, start, stop, step, axis)\n elif isinstance(obj, integer_types):\n return _api_internal.insert_scalar(arr, values, obj, axis)\n elif isinstance(obj, NDArray):\n return _api_internal.insert_tensor(arr, obj, values, axis)\n\n if not isinstance(arr, NDArray):\n raise TypeError(\"'arr' can not support type {}\".format(str(type(arr))))\n if not isinstance(values, NDArray):\n raise TypeError(\"'values' can not support type {}\".format(str(type(values))))\n if isinstance(obj, slice):\n start = obj.start\n stop = obj.stop\n step = 1 if obj.step is None else obj.step\n return _api_internal.insert_slice(arr, values, start, stop, step, axis)\n elif isinstance(obj, integer_types):\n return _api_internal.insert_scalar(arr, values, obj, axis)\n elif isinstance(obj, NDArray):\n return _api_internal.insert_tensor(arr, values, obj, axis)\n else:\n raise TypeError(\"'obj' can not support type {}\".format(str(type(obj))))\n\n\n#pylint: disable= too-many-arguments, no-member, protected-access\ndef _ufunc_helper(lhs, rhs, fn_array, fn_scalar, lfn_scalar, rfn_scalar=None, out=None):\n \"\"\" Helper function for element-wise operation.\n The function will perform numpy-like broadcasting if needed and call different functions.\n\n Parameters\n --------\n lhs : ndarray or numeric value\n Left-hand side operand.\n\n rhs : ndarray or numeric value\n Right-hand operand,\n\n fn_array : function\n Function to be called if both lhs and rhs are of ``ndarray`` type.\n\n fn_scalar : function\n Function to be called if both lhs and rhs are numeric values.\n\n lfn_scalar : function\n Function to be called if lhs is ``ndarray`` while rhs is numeric value\n\n rfn_scalar : function\n Function to be called if lhs is numeric value while rhs is ``ndarray``;\n if none is provided, then the function is commutative, so rfn_scalar is equal to lfn_scalar\n\n Returns\n --------\n mxnet.numpy.ndarray or scalar\n result array or scalar\n \"\"\"\n from ...numpy import ndarray\n from ...numpy_extension import from_numpy # pylint: disable=unused-import\n if isinstance(lhs, numeric_types):\n if isinstance(rhs, numeric_types):\n return fn_scalar(lhs, rhs, out=out)\n else:\n if rfn_scalar is None:\n # commutative function\n return lfn_scalar(rhs, float(lhs), out=out)\n else:\n return rfn_scalar(rhs, float(lhs), out=out)\n elif isinstance(rhs, numeric_types):\n return lfn_scalar(lhs, float(rhs), out=out)\n elif isinstance(lhs, ndarray) and isinstance(rhs, ndarray):\n return fn_array(lhs, rhs, out=out)\n else:\n raise TypeError('type {} not supported'.format(str(type(rhs))))\n#pylint: enable= too-many-arguments, no-member, protected-access\n\n\n@set_module('mxnet.ndarray.numpy')\ndef unique(ar, return_index=False, return_inverse=False, return_counts=False, axis=None):\n \"\"\"\n Find the unique elements of an array.\n\n Returns the sorted unique elements of an array. There are three optional\n outputs in addition to the unique elements:\n\n * the indices of the input array that give the unique values\n * the indices of the unique array that reconstruct the input array\n * the number of times each unique value comes up in the input array\n\n Parameters\n ----------\n ar : ndarray\n Input array. Unless `axis` is specified, this will be flattened if it\n is not already 1-D.\n return_index : bool, optional\n If True, also return the indices of `ar` (along the specified axis,\n if provided, or in the flattened array) that result in the unique array.\n return_inverse : bool, optional\n If True, also return the indices of the unique array (for the specified\n axis, if provided) that can be used to reconstruct `ar`.\n return_counts : bool, optional\n If True, also return the number of times each unique item appears\n in `ar`.\n axis : int or None, optional\n The axis to operate on. If None, `ar` will be flattened. If an integer,\n the subarrays indexed by the given axis will be flattened and treated\n as the elements of a 1-D array with the dimension of the given axis,\n see the notes for more details. The default is None.\n\n Returns\n -------\n unique : ndarray\n The sorted unique values.\n unique_indices : ndarray, optional\n The indices of the first occurrences of the unique values in the\n original array. Only provided if `return_index` is True.\n unique_inverse : ndarray, optional\n The indices to reconstruct the original array from the\n unique array. Only provided if `return_inverse` is True.\n unique_counts : ndarray, optional\n The number of times each of the unique values comes up in the\n original array. Only provided if `return_counts` is True.\n\n Notes\n -----\n When an axis is specified the subarrays indexed by the axis are sorted.\n This is done by making the specified axis the first dimension of the array\n and then flattening the subarrays in C order. The flattened subarrays are\n then viewed as a structured type with each element given a label, with the\n effect that we end up with a 1-D array of structured types that can be\n treated in the same way as any other 1-D array. The result is that the\n flattened subarrays are sorted in lexicographic order starting with the\n first element.\n\n This function differs from the original `numpy.unique\n <https://docs.scipy.org/doc/numpy/reference/generated/numpy.unique.html>`_ in\n the following aspects:\n\n - Only support ndarray as input.\n - Object arrays or structured arrays are not supported.\n\n Examples\n --------\n >>> np.unique(np.array([1, 1, 2, 2, 3, 3]))\n array([1., 2., 3.])\n >>> a = np.array([[1, 1], [2, 3]])\n >>> np.unique(a)\n array([1., 2., 3.])\n\n Return the unique rows of a 2D array\n\n >>> a = np.array([[1, 0, 0], [1, 0, 0], [2, 3, 4]])\n >>> np.unique(a, axis=0)\n array([[1., 0., 0.],\n [2., 3., 4.]])\n\n Return the indices of the original array that give the unique values:\n\n >>> a = np.array([1, 2, 6, 4, 2, 3, 2])\n >>> u, indices = np.unique(a, return_index=True)\n >>> u\n array([1., 2., 3., 4., 6.])\n >>> indices\n array([0, 1, 5, 3, 2], dtype=int64)\n >>> a[indices]\n array([1., 2., 3., 4., 6.])\n\n Reconstruct the input array from the unique values:\n\n >>> a = np.array([1, 2, 6, 4, 2, 3, 2])\n >>> u, indices = np.unique(a, return_inverse=True)\n >>> u\n array([1., 2., 3., 4., 6.])\n >>> indices\n array([0, 1, 4, 3, 1, 2, 1], dtype=int64)\n >>> u[indices]\n array([1., 2., 6., 4., 2., 3., 2.])\n \"\"\"\n ret = list(_api_internal.unique(ar, return_index, return_inverse, return_counts, axis))\n return ret[0] if len(ret) == 1 else tuple(ret)\n\n\n@set_module('mxnet.ndarray.numpy')\n@wrap_np_binary_func\ndef add(x1, x2, out=None, **kwargs):\n \"\"\"\n Add arguments element-wise.\n\n Parameters\n ----------\n x1, x2 : ndarrays or scalar values\n The arrays to be added. If x1.shape != x2.shape, they must be broadcastable to\n a common shape (which may be the shape of one or the other).\n\n out : ndarray\n A location into which the result is stored. If provided, it must have a shape\n that the inputs broadcast to. If not provided or None, a freshly-allocated array\n is returned.\n\n Returns\n -------\n add : ndarray or scalar\n The sum of x1 and x2, element-wise. This is a scalar if both x1 and x2 are scalars.\n\n Notes\n -----\n This operator now supports automatic type promotion. The resulting type will be determined\n according to the following rules:\n * If both inputs are of floating number types, the output is the more precise type.\n * If only one of the inputs is floating number type, the result is that type.\n * If both inputs are of integer types (including boolean), not supported yet.\n \"\"\"\n if isinstance(x1, numeric_types) and isinstance(x2, numeric_types):\n return _np.add(x1, x2, out=out)\n return _api_internal.add(x1, x2, out)\n\n\n@set_module('mxnet.ndarray.numpy')\n@wrap_np_binary_func\ndef subtract(x1, x2, out=None, **kwargs):\n \"\"\"\n Subtract arguments element-wise.\n\n Parameters\n ----------\n x1, x2 : ndarrays or scalar values\n The arrays to be subtracted from each other. If x1.shape != x2.shape,\n they must be broadcastable to a common shape (which may be the shape\n of one or the other).\n\n out : ndarray\n A location into which the result is stored. If provided, it must have a shape\n that the inputs broadcast to. If not provided or None, a freshly-allocated array\n is returned.\n\n Returns\n -------\n subtract : ndarray or scalar\n The difference of x1 and x2, element-wise. This is a scalar if both x1 and x2 are scalars.\n\n Notes\n -----\n This operator now supports automatic type promotion. The resulting type will be determined\n according to the following rules:\n * If both inputs are of floating number types, the output is the more precise type.\n * If only one of the inputs is floating number type, the result is that type.\n * If both inputs are of integer types (including boolean), not supported yet.\n \"\"\"\n if isinstance(x1, numeric_types) and isinstance(x2, numeric_types):\n return _np.subtract(x1, x2, out=out)\n return _api_internal.subtract(x1, x2, out)\n\n\n@set_module('mxnet.ndarray.numpy')\n@wrap_np_binary_func\ndef multiply(x1, x2, out=None, **kwargs):\n \"\"\"\n Multiply arguments element-wise.\n\n Parameters\n ----------\n x1, x2 : ndarrays or scalar values\n The arrays to be multiplied. If x1.shape != x2.shape, they must be broadcastable to\n a common shape (which may be the shape of one or the other).\n\n out : ndarray\n A location into which the result is stored. If provided, it must have a shape\n that the inputs broadcast to. If not provided or None, a freshly-allocated array\n is returned.\n\n Returns\n -------\n out : ndarray or scalar\n The multiplication of x1 and x2, element-wise. This is a scalar if both x1 and x2\n are scalars.\n\n Notes\n -----\n This operator now supports automatic type promotion. The resulting type will be determined\n according to the following rules:\n * If both inputs are of floating number types, the output is the more precise type.\n * If only one of the inputs is floating number type, the result is that type.\n * If both inputs are of integer types (including boolean), not supported yet.\n \"\"\"\n if isinstance(x1, numeric_types) and isinstance(x2, numeric_types):\n return _np.multiply(x1, x2, out=out)\n return _api_internal.multiply(x1, x2, out)\n\n\n@set_module('mxnet.ndarray.numpy')\n@wrap_np_binary_func\ndef divide(x1, x2, out=None, **kwargs):\n \"\"\"\n Returns a true division of the inputs, element-wise.\n\n Parameters\n ----------\n x1 : ndarray or scalar\n Dividend array.\n\n x2 : ndarray or scalar\n Divisor array.\n\n out : ndarray\n A location into which the result is stored. If provided, it must have a shape\n that the inputs broadcast to. If not provided or None, a freshly-allocated array\n is returned.\n\n Returns\n -------\n out : ndarray or scalar\n This is a scalar if both x1 and x2 are scalars.\n\n Notes\n -----\n This operator now supports automatic type promotion. The resulting type will be determined\n according to the following rules:\n * If both inputs are of floating number types, the output is the more precise type.\n * If only one of the inputs is floating number type, the result is that type.\n * If both inputs are of integer types (including boolean), the output is of default dtype.\n - When npx.is_np_default_dtype() returns False, default dtype is float32;\n - When npx.is_np_default_dtype() returns True, default dtype is float64.\n \"\"\"\n if isinstance(x1, numeric_types) and isinstance(x2, numeric_types):\n return _np.divide(x1, x2, out=out)\n return _api_internal.true_divide(x1, x2, out)\n\n\n@set_module('mxnet.ndarray.numpy')\ndef true_divide(x1, x2, out=None):\n \"\"\"Returns a true division of the inputs, element-wise.\n\n Instead of the Python traditional 'floor division', this returns a true\n division. True division adjusts the output type to present the best\n answer, regardless of input types.\n\n Parameters\n ----------\n x1 : ndarray or scalar\n Dividend array.\n\n x2 : ndarray or scalar\n Divisor array.\n\n out : ndarray\n A location into which the result is stored. If provided, it must have a shape\n that the inputs broadcast to. If not provided or None, a freshly-allocated array\n is returned.\n\n Returns\n -------\n out : ndarray or scalar\n This is a scalar if both x1 and x2 are scalars.\n\n Notes\n -----\n This operator now supports automatic type promotion. The resulting type will be determined\n according to the following rules:\n * If both inputs are of floating number types, the output is the more precise type.\n * If only one of the inputs is floating number type, the result is that type.\n * If both inputs are of integer types (including boolean), the output is of default dtype.\n - When npx.is_np_default_dtype() returns False, default dtype is float32;\n - When npx.is_np_default_dtype() returns True, default dtype is float64.\n \"\"\"\n if isinstance(x1, numeric_types) and isinstance(x2, numeric_types):\n return _np.true_divide(x1, x2, out=out)\n return _api_internal.true_divide(x1, x2, out)\n\n\n@set_module('mxnet.ndarray.numpy')\n@wrap_np_binary_func\ndef floor_divide(x1, x2, out=None):\n \"\"\"Return the largest integer smaller or equal to the division of the inputs.\n It is equivalent to the Python // operator and pairs with the Python % (remainder),\n function so that a = a % b + b * (a // b) up to roundoff.\n\n Parameters\n ----------\n x1 : ndarray or scalar\n Dividend array.\n x2 : ndarray or scalar\n Divisor array.\n out : ndarray\n A location into which the result is stored. If provided, it must have a shape\n that the inputs broadcast to. If not provided or None, a freshly-allocated array\n is returned.\n\n Returns\n -------\n out : ndarray or scalar\n This is a scalar if both x1 and x2 are scalars.\n\n .. note::\n\n This operator now supports automatic type promotion. The resulting type will be determined\n according to the following rules:\n\n * If both inputs are of floating number types, the output is the more precise type.\n * If only one of the inputs is floating number type, the result is that type.\n * If both inputs are of integer types (including boolean), the output is the more\n precise type\n\n \"\"\"\n if isinstance(x1, numeric_types) and isinstance(x2, numeric_types):\n return _np.floor_divide(x1, x2, out=out)\n return _api_internal.floor_divide(x1, x2, out)\n\n\n@set_module('mxnet.ndarray.numpy')\n@wrap_np_binary_func\ndef mod(x1, x2, out=None, **kwargs):\n \"\"\"\n Return element-wise remainder of division.\n\n Parameters\n ----------\n x1 : ndarray or scalar\n Dividend array.\n\n x2 : ndarray or scalar\n Divisor array.\n\n out : ndarray\n A location into which the result is stored. If provided, it must have a shape\n that the inputs broadcast to. If not provided or None, a freshly-allocated array\n is returned.\n\n Returns\n -------\n out : ndarray or scalar\n This is a scalar if both x1 and x2 are scalars.\n \"\"\"\n if isinstance(x1, numeric_types) and isinstance(x2, numeric_types):\n return _np.mod(x1, x2, out=out)\n return _api_internal.mod(x1, x2, out)\n\n\n@set_module('mxnet.ndarray.numpy')\n@wrap_np_binary_func\ndef fmod(x1, x2, out=None, **kwargs):\n \"\"\"\n Return element-wise remainder of division.\n\n Parameters\n ----------\n x1 : ndarray or scalar\n Dividend array.\n\n x2 : ndarray or scalar\n Divisor array.\n\n out : ndarray\n A location into which the result is stored. If provided, it must have a shape\n that the inputs broadcast to. If not provided or None, a freshly-allocated array\n is returned.\n\n Returns\n -------\n out : ndarray or scalar\n This is a scalar if both x1 and x2 are scalars.\n \"\"\"\n if isinstance(x1, numeric_types) and isinstance(x2, numeric_types):\n _np.fmod(x1, x2, out=out)\n return _api_internal.fmod(x1, x2, out)\n\n\n@set_module('mxnet.ndarray.numpy')\ndef delete(arr, obj, axis=None):\n \"\"\"\n Return a new array with sub-arrays along an axis deleted. For a one\n dimensional array, this returns those entries not returned by\n `arr[obj]`.\n\n Parameters\n ----------\n arr : ndarray\n Input array.\n obj : slice, int or ndarray of ints\n Indicate indices of sub-arrays to remove along the specified axis.\n axis : int, optional\n The axis along which to delete the subarray defined by `obj`.\n If `axis` is None, `obj` is applied to the flattened array.\n\n Returns\n -------\n out : ndarray\n A copy of `arr` with the elements specified by `obj` removed. Note\n that `delete` does not occur in-place. If `axis` is None, `out` is\n a flattened array.\n\n Examples\n --------\n >>> arr = np.array([[1,2,3,4], [5,6,7,8], [9,10,11,12]])\n >>> arr\n array([[ 1., 2., 3., 4.],\n [ 5., 6., 7., 8.],\n [ 9., 10., 11., 12.]])\n\n >>> np.delete(arr, 1, 0)\n array([[ 1., 2., 3., 4.],\n [ 9., 10., 11., 12.]])\n\n >>> np.delete(arr, slice(None, None, 2), 1)\n array([[ 2., 4.],\n [ 6., 8.],\n [10., 12.]])\n\n >>> np.delete(arr, np.array([1,3,5]), None)\n array([ 1., 3., 5., 7., 8., 9., 10., 11., 12.])\n >>> np.delete(arr, np.array([1,1,5]), None)\n array([ 1., 3., 4., 5., 7., 8., 9., 10., 11., 12.])\n \"\"\"\n if not isinstance(arr, NDArray):\n raise TypeError(\"'arr' can not support type {}\".format(str(type(arr))))\n if isinstance(obj, slice):\n start = obj.start\n stop = obj.stop\n step = 1 if obj.step is None else obj.step\n return _api_internal.delete(arr, start, stop, step, axis)\n elif isinstance(obj, integer_types):\n return _api_internal.delete(arr, obj, axis)\n elif isinstance(obj, NDArray):\n return _api_internal.delete(arr, obj, axis)\n else:\n raise TypeError(\"'obj' can not support type {}\".format(str(type(obj))))\n\n\n@set_module('mxnet.ndarray.numpy')\n@wrap_np_binary_func\ndef matmul(a, b, out=None):\n \"\"\"\n Matrix product of two arrays.\n\n Parameters\n ----------\n a, b : ndarray\n Input arrays, scalars not allowed.\n out : ndarray, optional\n A location into which the result is stored.\n If provided, it must have a shape that matches the signature (n,k),(k,m)->(n,m).\n If not provided or None, a freshly-allocated array is returned.\n\n Returns\n -------\n y : ndarray\n The matrix product of the inputs.\n This is a scalar only when both x1, x2 are 1-d vectors.\n\n Raises\n ------\n MXNetError\n If the last dimension of a is not the same size as the second-to-last dimension of b.\n If a scalar value is passed in.\n\n See Also\n --------\n tensordot :\n Sum products over arbitrary axes.\n dot :\n alternative matrix product with different broadcasting rules.\n einsum :\n Einstein summation convention.\n\n Notes\n -----\n The behavior depends on the arguments in the following way.\n\n - If both arguments are 2-D they are multiplied like conventional matrices.\n - If either argument is N-D, N > 2, it is treated as a stack of matrices\n residing in the last two indexes and broadcast accordingly.\n - If the first argument is 1-D, it is promoted to a matrix by prepending\n a 1 to its dimensions. After matrix multiplication the prepended 1 is removed.\n - If the second argument is 1-D, it is promoted to a matrix by appending a 1\n to its dimensions. After matrix multiplication the appended 1 is removed.\n\n matmul differs from dot in two important ways:\n\n - Multiplication by scalars is not allowed, use multiply instead.\n - Stacks of matrices are broadcast together as if the matrices were elements,\n respecting the signature (n,k),(k,m)->(n,m):\n >>> a = np.ones([9, 5, 7, 4])\n >>> c = np.ones([9, 5, 4, 3])\n >>> np.dot(a, c).shape\n (9, 5, 7, 9, 5, 3)\n >>> np.matmul(a, c).shape\n (9, 5, 7, 3)\n >>> # n is 7, k is 4, m is 3\n\n Examples\n --------\n For 2-D arrays it is the matrix product:\n >>> a = np.array([[1, 0],\n ... [0, 1]])\n >>> b = np.array([[4, 1],\n ... [2, 2]])\n >>> np.matmul(a, b)\n array([[4., 1.],\n [2., 2.]])\n\n For 2-D mixed with 1-D, the result is the usual.\n >>> a = np.array([[1, 0],\n ... [0, 1]])\n >>> b = np.array([1, 2])\n >>> np.matmul(a, b)\n array([1., 2.])\n >>> np.matmul(b, a)\n array([1., 2.])\n\n Broadcasting is conventional for stacks of arrays\n >>> a = np.arange(2 * 2 * 4).reshape((2, 2, 4))\n >>> b = np.arange(2 * 2 * 4).reshape((2, 4, 2))\n >>> np.matmul(a, b).shape\n (2, 2, 2)\n >>> np.matmul(a, b)[0, 1, 1]\n array(98.)\n >>> sum(a[0, 1, :] * b[0, :, 1])\n array(98.)\n\n Scalar multiplication raises an error.\n >>> np.matmul([1, 2], 3)\n Traceback (most recent call last):\n ...\n mxnet.base.MXNetError: ... : Multiplication by scalars is not allowed.\n \"\"\"\n return _api_internal.matmul(a, b, out)\n\n\n@set_module('mxnet.ndarray.numpy')\n@wrap_np_binary_func\ndef remainder(x1, x2, out=None):\n \"\"\"\n Return element-wise remainder of division.\n\n Parameters\n ----------\n x1 : ndarray or scalar\n Dividend array.\n\n x2 : ndarray or scalar\n Divisor array.\n\n out : ndarray\n A location into which the result is stored. If provided, it must have a shape\n that the inputs broadcast to. If not provided or None, a freshly-allocated array\n is returned.\n\n Returns\n -------\n out : ndarray or scalar\n This is a scalar if both x1 and x2 are scalars.\n \"\"\"\n if isinstance(x1, numeric_types) and isinstance(x2, numeric_types):\n _np.mod(x1, x2, out=out)\n return _api_internal.mod(x1, x2, out)\n\n\n@set_module('mxnet.ndarray.numpy')\n@wrap_np_binary_func\ndef power(x1, x2, out=None, **kwargs):\n \"\"\"\n First array elements raised to powers from second array, element-wise.\n\n Parameters\n ----------\n x1 : ndarray or scalar\n The bases.\n\n x2 : ndarray or scalar\n The exponent.\n\n out : ndarray\n A location into which the result is stored. If provided, it must have a shape\n that the inputs broadcast to. If not provided or None, a freshly-allocated array\n is returned.\n\n Returns\n -------\n out : ndarray or scalar\n The bases in x1 raised to the exponents in x2.\n This is a scalar if both x1 and x2 are scalars.\n \"\"\"\n if isinstance(x1, numeric_types) and isinstance(x2, numeric_types):\n return _np.power(x1, x2, out=out)\n return _api_internal.power(x1, x2, out)\n\n\n@set_module('mxnet.ndarray.numpy')\ndef all(a, axis=None, out=None, keepdims=False):\n \"\"\"\n Test whether all array elements along a given axis evaluate to True.\n\n Parameters\n ----------\n a : ndarray\n Input array or object that can be converted to an array.\n axis : None or int or tuple of ints, optional\n Axis or axes along which a logical AND reduction is performed.\n The default (axis = None) is to perform a logical AND over\n all the dimensions of the input array.\n keepdims : bool, optional\n If this is set to True, the axes which are reduced are left in\n the result as dimensions with size one. With this option,\n the result will broadcast correctly against the input array.\n out : ndarray, optional\n Alternate output array in which to place the result. It must have\n the same shape as the expected output and its type is preserved\n\n Returns\n --------\n all : ndarray, bool\n A new boolean or array is returned unless out is specified,\n in which case a reference to out is returned.\n\n Examples:\n ---------\n >>> np.all([[True,False],[True,True]])\n False\n\n >>> np.all([[True,False],[True,True]], axis=0)\n array([ True, False])\n\n >>> np.all([-1, 4, 5])\n True\n\n >>> np.all([1.0, np.nan])\n True\n\n >>> o=np.array(False)\n >>> z=np.all([-1, 4, 5], out=o)\n >>> id(z), id(o), z\n (28293632, 28293632, array(True)) # may vary\n \"\"\"\n return _api_internal.all(a, axis, keepdims, out)\n\n\n@set_module('mxnet.ndarray.numpy')\ndef any(a, axis=None, out=None, keepdims=False):\n \"\"\"\n Test whether any array element along a given axis evaluates to True.\n Returns single boolean unless axis is not None\n\n Parameters\n ----------\n a : ndarray\n Input array or object that can be converted to an array.\n axis : None or int or tuple of ints, optional\n Axis or axes along which a logical AND reduction is performed.\n The default (axis = None) is to perform a logical AND over\n all the dimensions of the input array.\n keepdims : bool, optional\n If this is set to True, the axes which are reduced are left in\n the result as dimensions with size one. With this option,\n the result will broadcast correctly against the input array.\n out : ndarray, optional\n Alternate output array in which to place the result. It must have\n the same shape as the expected output and its type is preserved\n\n Returns\n --------\n any : bool or ndarray\n A new boolean or ndarray is returned unless out is specified,\n in which case a reference to out is returned.\n\n Examples:\n ---------\n >>> np.any([[True, False], [True, True]])\n True\n\n >>> np.any([[True, False], [False, False]], axis=0)\n array([ True, False])\n\n >>> np.any([-1, 0, 5])\n True\n\n >>> np.any(np.nan)\n True\n\n >>> o=np.array(False)\n >>> z=np.any([-1, 4, 5], out=o)\n >>> z, o\n (array(True), array(True))\n >>> # Check now that z is a reference to o\n >>> z is o\n True\n >>> id(z), id(o) # identity of z and o # doctest: +SKIP\n (191614240, 191614240)\n \"\"\"\n return _api_internal.any(a, axis, keepdims, out)\n\n\n@set_module('mxnet.ndarray.numpy')\ndef argsort(a, axis=-1, descending=False, stable=True):\n \"\"\"\n Returns the indices that sort an array `x` along a specified axis.\n\n Notes\n -----\n `argsort` is a standard API in\n https://data-apis.org/array-api/latest/API_specification/sorting_functions.html#argsort-x-axis-1-descending-false-stable-true\n instead of an official NumPy operator.\n\n Parameters\n ----------\n a : ndarray\n Array to sort.\n axis : int or None, optional\n Axis along which to sort. The default is -1 (the last axis). If None,\n the flattened array is used.\n descending : bool, optional\n sort order. If `True`, the returned indices sort x in descending order (by value).\n If `False`, the returned indices sort x in ascending order (by value).Default: False.\n stable : bool, optional\n sort stability. If `True`, the returned indices must maintain the relative order\n of x values which compare as equal. If `False`, the returned indices may or may not\n maintain the relative order of x values which compare as equal. Default: True.\n\n Returns\n -------\n index_array : ndarray, int\n Array of indices that sort `a` along the specified `axis`.\n If `a` is one-dimensional, ``a[index_array]`` yields a sorted `a`.\n More generally, ``np.take_along_axis(a, index_array, axis=axis)``\n always yields the sorted `a`, irrespective of dimensionality.\n\n Notes\n -----\n This operator does not support different sorting algorithms.\n\n Examples\n --------\n One dimensional array:\n\n >>> x = np.array([3, 1, 2])\n >>> np.argsort(x)\n array([1, 2, 0])\n\n Two-dimensional array:\n\n >>> x = np.array([[0, 3], [2, 2]])\n >>> x\n array([[0, 3],\n [2, 2]])\n >>> ind = np.argsort(x, axis=0) # sorts along first axis (down)\n >>> ind\n array([[0, 1],\n [1, 0]])\n >>> np.take_along_axis(x, ind, axis=0) # same as np.sort(x, axis=0)\n array([[0, 2],\n [2, 3]])\n >>> ind = np.argsort(x, axis=1) # sorts along last axis (across)\n >>> ind\n array([[0, 1],\n [0, 1]])\n >>> np.take_along_axis(x, ind, axis=1) # same as np.sort(x, axis=1)\n array([[0, 3],\n [2, 2]])\n\n Indices of the sorted elements of a N-dimensional array:\n\n >>> ind = np.unravel_index(np.argsort(x, axis=None), x.shape)\n >>> ind\n (array([0, 1, 1, 0]), array([0, 0, 1, 1]))\n >>> x[ind] # same as np.sort(x, axis=None)\n array([0, 2, 2, 3])\n \"\"\"\n return _api_internal.argsort(a, axis, not descending, 'int64')\n\n\n@set_module('mxnet.ndarray.numpy')\ndef sort(a, axis=-1, descending=False, stable=True):\n \"\"\"\n Return a sorted copy of an array.\n\n Notes\n -----\n `sort` is a standard API in\n https://data-apis.org/array-api/latest/API_specification/sorting_functions.html#sort-x-axis-1-descending-false-stable-true\n instead of an official NumPy operator.\n\n Parameters\n ----------\n a : ndarray\n Array to sort.\n axis : int or None, optional\n Axis along which to sort. The default is -1 (the last axis). If None,\n the flattened array is used.\n descending : bool, optional\n sort order. If `True`, the returned indices sort x in descending order (by value).\n If `False`, the returned indices sort x in ascending order (by value).Default: False.\n stable : bool, optional\n sort stability. If `True`, the returned indices must maintain the relative order\n of x values which compare as equal. If `False`, the returned indices may or may not\n maintain the relative order of x values which compare as equal. Default: True.\n\n Returns\n -------\n sorted_array : ndarray\n Array of the same type and shape as `a`.\n\n Notes\n -----\n This operator does not support different sorting algorithms.\n\n Examples\n --------\n >>> a = np.array([[1,4],[3,1]])\n >>> np.sort(a) # sort along the last axis\n array([[1, 4],\n [1, 3]])\n >>> np.sort(a, axis=None) # sort the flattened array\n array([1, 1, 3, 4])\n >>> np.sort(a, axis=0) # sort along the first axis\n array([[1, 1],\n [3, 4]])\n \"\"\"\n return _api_internal.sort(a, axis, not descending)\n\n@set_module('mxnet.ndarray.numpy')\ndef dot(a, b, out=None):\n \"\"\"\n Dot product of two arrays. Specifically,\n\n - If both `a` and `b` are 1-D arrays, it is inner product of vectors\n\n - If both `a` and `b` are 2-D arrays, it is matrix multiplication,\n\n - If either `a` or `b` is 0-D (scalar), it is equivalent to :func:`multiply`\n and using ``np.multiply(a, b)`` or ``a * b`` is preferred.\n\n - If `a` is an N-D array and `b` is a 1-D array, it is a sum product over\n the last axis of `a` and `b`.\n\n - If `a` is an N-D array and `b` is a 2-D array, it is a\n sum product over the last axis of `a` and the second-to-last axis of `b`::\n\n dot(a, b)[i,j,k] = sum(a[i,j,:] * b[:,k])\n\n Parameters\n ----------\n a : ndarray\n First argument.\n b : ndarray\n Second argument.\n\n out : ndarray, optional\n Output argument. It must have the same shape and type as the expected output.\n\n Returns\n -------\n output : ndarray\n Returns the dot product of `a` and `b`. If `a` and `b` are both\n scalars or both 1-D arrays then a scalar is returned; otherwise\n an array is returned.\n If `out` is given, then it is returned\n\n Examples\n --------\n >>> a = np.array(3)\n >>> b = np.array(4)\n >>> np.dot(a, b)\n array(12.)\n\n For 2-D arrays it is the matrix product:\n\n >>> a = np.array([[1, 0], [0, 1]])\n >>> b = np.array([[4, 1], [2, 2]])\n >>> np.dot(a, b)\n array([[4., 1.],\n [2., 2.]])\n\n >>> a = np.arange(3*4*5*6).reshape((3,4,5,6))\n >>> b = np.arange(5*6)[::-1].reshape((6,5))\n >>> np.dot(a, b)[2,3,2,2]\n array(29884.)\n >>> np.sum(a[2,3,2,:] * b[:,2])\n array(29884.)\n \"\"\"\n return _api_internal.dot(a, b, out)\n\n@set_module('mxnet.ndarray.numpy')\ndef tensordot(a, b, axes=2):\n r\"\"\"\n tensordot(a, b, axes=2)\n Compute tensor dot product along specified axes for arrays >= 1-D.\n Given two tensors (arrays of dimension greater than or equal to one),\n `a` and `b`, and an ndarray object containing two ndarray\n objects, ``(a_axes, b_axes)``, sum the products of `a`'s and `b`'s\n elements (components) over the axes specified by ``a_axes`` and\n ``b_axes``. The third argument can be a single non-negative\n integer_like scalar, ``N``; if it is such, then the last ``N``\n dimensions of `a` and the first ``N`` dimensions of `b` are summed\n over.\n Parameters\n ----------\n a, b : ndarray, len(shape) >= 1\n Tensors to \"dot\".\n axes : int or (2,) ndarray\n * integer_like\n If an int N, sum over the last N axes of `a` and the first N axes\n of `b` in order. The sizes of the corresponding axes must match.\n * (2,) ndarray\n Or, a list of axes to be summed over, first sequence applying to `a`,\n second to `b`. Both elements ndarray must be of the same length.\n See Also\n --------\n dot, einsum\n Notes\n -----\n Three common use cases are:\n * ``axes = 0`` : tensor product :math:`a\\otimes b`\n * ``axes = 1`` : tensor dot product :math:`a\\cdot b`\n * ``axes = 2`` : (default) tensor double contraction :math:`a:b`\n When `axes` is integer_like, the sequence for evaluation will be: first\n the -Nth axis in `a` and 0th axis in `b`, and the -1th axis in `a` and\n Nth axis in `b` last.\n When there is more than one axis to sum over - and they are not the last\n (first) axes of `a` (`b`) - the argument `axes` should consist of\n two sequences of the same length, with the first axis to sum over given\n first in both sequences, the second axis second, and so forth.\n Examples\n --------\n >>> a = np.arange(60.).reshape(3,4,5)\n >>> b = np.arange(24.).reshape(4,3,2)\n >>> c = np.tensordot(a,b, axes=([1,0],[0,1]))\n >>> c.shape\n (5, 2)\n >>> c\n array([[ 4400., 4730.],\n [ 4532., 4874.],\n [ 4664., 5018.],\n [ 4796., 5162.],\n [ 4928., 5306.]])\n \"\"\"\n return _api_internal.tensordot(a, b, axes)\n\n\n@set_module('mxnet.ndarray.numpy')\ndef histogram(a, bins=10, range=None, normed=None, weights=None, density=None): # pylint: disable=too-many-arguments\n \"\"\"\n Compute the histogram of a set of data.\n\n Parameters\n ----------\n a : ndarray\n Input data. The histogram is computed over the flattened array.\n bins : int or NDArray\n If `bins` is an int, it defines the number of equal-width\n bins in the given range (10, by default). If `bins` is a\n sequence, it defines a monotonically increasing array of bin edges,\n including the rightmost edge, allowing for non-uniform bin widths.\n .. versionadded:: 1.11.0\n If `bins` is a string, it defines the method used to calculate the\n optimal bin width, as defined by `histogram_bin_edges`.\n range : (float, float)\n The lower and upper range of the bins. Required when `bins` is an integer.\n Values outside the range are ignored. The first element of the range must\n be less than or equal to the second.\n normed : bool, optional\n Not supported yet, coming soon.\n weights : array_like, optional\n Not supported yet, coming soon.\n density : bool, optional\n Not supported yet, coming soon.\n \"\"\"\n if normed is True:\n raise NotImplementedError(\"normed is not supported yet...\")\n if weights is not None:\n raise NotImplementedError(\"weights is not supported yet...\")\n if density is True:\n raise NotImplementedError(\"density is not supported yet...\")\n if isinstance(bins, numeric_types):\n if range is None:\n raise NotImplementedError(\"automatic range is not supported yet...\")\n return tuple(_api_internal.histogram(a, None, bins, range))\n if isinstance(bins, (list, tuple)):\n raise NotImplementedError(\"array_like bins is not supported yet...\")\n if isinstance(bins, str):\n raise NotImplementedError(\"string bins is not supported yet...\")\n if isinstance(bins, NDArray):\n return tuple(_api_internal.histogram(a, bins, None, None))\n raise ValueError(\"np.histogram fails with\", locals())\n\n\n@set_module('mxnet.ndarray.numpy')\ndef eye(N, M=None, k=0, dtype=float, **kwargs):\n \"\"\"\n Return a 2-D array with ones on the diagonal and zeros elsewhere.\n\n Parameters\n ----------\n N : int\n Number of rows in the output.\n M : int, optional\n Number of columns in the output. If None, defaults to N.\n k : int, optional\n Index of the diagonal: 0 (the default) refers to the main diagonal,\n a positive value refers to an upper diagonal,\n and a negative value to a lower diagonal.\n dtype : data-type, optional\n Data-type of the returned array.\n - When npx.is_np_default_dtype() returns False, default dtype is float32;\n - When npx.is_np_default_dtype() returns True, default dtype is float64.\n\n Returns\n -------\n I : ndarray of shape (N,M)\n An array where all elements are equal to zero,\n except for the k-th diagonal, whose values are equal to one.\n \"\"\"\n _sanity_check_params('eye', ['order'], kwargs)\n ctx = kwargs.pop('ctx', current_context())\n if ctx is None:\n ctx = str(current_context())\n else:\n ctx = str(ctx)\n if dtype is None or dtype is float:\n dtype = _np.float64 if is_np_default_dtype() else _np.float32\n if dtype is not None and not isinstance(dtype, str):\n dtype = _np.dtype(dtype).name\n return _api_internal.eye(N, M, k, ctx, dtype)\n\n\n@set_module('mxnet.ndarray.numpy')\ndef linspace(start, stop, num=50, endpoint=True, retstep=False, dtype=None, axis=0, ctx=None): # pylint: disable=too-many-arguments\n r\"\"\"\n Return evenly spaced numbers over a specified interval.\n Returns num evenly spaced samples, calculated over the interval [start, stop].\n The endpoint of the interval can optionally be excluded.\n\n Parameters\n ----------\n start : real number\n The starting value of the sequence.\n stop : real number\n The end value of the sequence, unless endpoint is set to False. In\n that case, the sequence consists of all but the last of num + 1\n evenly spaced samples, so that stop is excluded. Note that the step\n size changes when endpoint is False.\n num : int, optional\n Number of samples to generate. Default is 50. Must be non-negative.\n endpoint : bool, optional\n If True, stop is the last sample. Otherwise, it is not included.\n Default is True.\n retstep : bool, optional\n If True, return (samples, step), where step is the spacing between samples.\n dtype : dtype, optional\n The type of the output array. If dtype is not given, infer the data\n type from the other input arguments.\n axis : int, optional\n The axis in the result to store the samples. Relevant only if start or\n stop are array-like. By default (0), the samples will be along a new\n axis inserted at the beginning. Use -1 to get an axis at the end.\n\n Returns\n -------\n samples : ndarray\n There are num equally spaced samples in the closed interval\n `[start, stop]` or the half-open interval `[start, stop)`\n (depending on whether endpoint is True or False).\n step : float, optional\n Only returned if retstep is True\n Size of spacing between samples.\n\n\n See Also\n --------\n arange : Similar to `linspace`, but uses a step size (instead of the\n number of samples).\n\n Examples\n --------\n >>> np.linspace(2.0, 3.0, num=5)\n array([2. , 2.25, 2.5 , 2.75, 3. ])\n >>> np.linspace(2.0, 3.0, num=5, endpoint=False)\n array([2. , 2.2, 2.4, 2.6, 2.8])\n >>> np.linspace(2.0, 3.0, num=5, retstep=True)\n (array([2. , 2.25, 2.5 , 2.75, 3. ]), 0.25)\n\n Graphical illustration:\n\n >>> import matplotlib.pyplot as plt\n >>> N = 8\n >>> y = np.zeros(N)\n >>> x1 = np.linspace(0, 10, N, endpoint=True)\n >>> x2 = np.linspace(0, 10, N, endpoint=False)\n >>> plt.plot(x1.asnumpy(), y.asnumpy(), 'o')\n [<matplotlib.lines.Line2D object at 0x...>]\n >>> plt.plot(x2.asnumpy(), (y + 0.5).asnumpy(), 'o')\n [<matplotlib.lines.Line2D object at 0x...>]\n >>> plt.ylim([-0.5, 1])\n (-0.5, 1)\n >>> plt.show()\n\n Notes\n -----\n\n This function differs from the original `numpy.linspace\n <https://docs.scipy.org/doc/numpy/reference/generated/numpy.linspace.html>`_ in\n the following aspects:\n\n - `start` and `stop` do not support list, numpy ndarray and mxnet ndarray\n - axis could only be 0\n - There could be an additional `ctx` argument to specify the device, e.g. the i-th\n GPU.\n \"\"\"\n if isinstance(start, (list, _np.ndarray, NDArray)) or \\\n isinstance(stop, (list, _np.ndarray, NDArray)):\n raise NotImplementedError('start and stop only support int')\n if axis != 0:\n raise NotImplementedError(\"the function only support axis 0\")\n if ctx is None:\n ctx = str(current_context())\n else:\n ctx = str(ctx)\n if dtype is not None and not isinstance(dtype, str):\n dtype = _np.dtype(dtype).name\n if retstep:\n step = (stop - start) / (num - 1)\n return _api_internal.linspace(start, stop, num, endpoint, ctx, dtype), step\n else:\n return _api_internal.linspace(start, stop, num, endpoint, ctx, dtype)\n\n\n@set_module('mxnet.ndarray.numpy')\ndef logspace(start, stop, num=50, endpoint=True, base=10.0, dtype=None, axis=0, ctx=None): # pylint: disable=too-many-arguments\n r\"\"\"Return numbers spaced evenly on a log scale.\n\n In linear space, the sequence starts at ``base ** start``\n (`base` to the power of `start`) and ends with ``base ** stop``\n (see `endpoint` below).\n\n Non-scalar `start` and `stop` are now supported.\n\n Parameters\n ----------\n start : int or float\n ``base ** start`` is the starting value of the sequence.\n stop : int or float\n ``base ** stop`` is the final value of the sequence, unless `endpoint`\n is False. In that case, ``num + 1`` values are spaced over the\n interval in log-space, of which all but the last (a sequence of\n length `num`) are returned.\n num : integer, optional\n Number of samples to generate. Default is 50.\n endpoint : boolean, optional\n If true, `stop` is the last sample. Otherwise, it is not included.\n Default is True.\n base : float, optional\n The base of the log space. The step size between the elements in\n ``ln(samples) / ln(base)`` (or ``log_base(samples)``) is uniform.\n Default is 10.0.\n dtype : dtype\n The type of the output array. If `dtype` is not given, infer the data\n type from the other input arguments.\n axis : int, optional\n The axis in the result to store the samples. Relevant only if start\n or stop are array-like. By default (0), the samples will be along a\n new axis inserted at the beginning. Now, axis only support axis = 0.\n ctx : Context, optional\n An optional device context (default is the current default context).\n\n Returns\n -------\n samples : ndarray\n `num` samples, equally spaced on a log scale.\n\n See Also\n --------\n arange : Similar to linspace, with the step size specified instead of the\n number of samples. Note that, when used with a float endpoint, the\n endpoint may or may not be included.\n linspace : Similar to logspace, but with the samples uniformly distributed\n in linear space, instead of log space.\n\n Notes\n -----\n Logspace is equivalent to the code. Now wo only support axis = 0.\n\n >>> y = np.linspace(start, stop, num=num, endpoint=endpoint)\n ...\n >>> power(base, y).astype(dtype)\n ...\n\n Examples\n --------\n >>> np.logspace(2.0, 3.0, num=4)\n array([ 100. , 215.44347, 464.15887, 1000. ])\n >>> np.logspace(2.0, 3.0, num=4, endpoint=False)\n array([100. , 177.82794, 316.22775, 562.3413 ])\n >>> np.logspace(2.0, 3.0, num=4, base=2.0)\n array([4. , 5.0396843, 6.349604 , 8. ])\n >>> np.logspace(2.0, 3.0, num=4, base=2.0, dtype=np.int32)\n array([4, 5, 6, 8], dtype=int32)\n >>> np.logspace(2.0, 3.0, num=4, ctx=npx.gpu(0))\n array([ 100. , 215.44347, 464.15887, 1000. ], ctx=gpu(0))\n \"\"\"\n if isinstance(start, (list, tuple, _np.ndarray, NDArray)) or \\\n isinstance(stop, (list, tuple, _np.ndarray, NDArray)):\n raise NotImplementedError('start and stop only support int and float')\n if axis != 0:\n raise NotImplementedError(\"the function only support axis 0\")\n if ctx is None:\n ctx = str(current_context())\n else:\n ctx = str(ctx)\n if dtype is not None and not isinstance(dtype, str):\n dtype = _np.dtype(dtype).name\n return _api_internal.logspace(start, stop, num, endpoint, base, ctx, dtype)\n\n\n@set_module('mxnet.ndarray.numpy')\ndef expand_dims(a, axis):\n \"\"\"Expand the shape of an array.\n\n Insert a new axis that will appear at the `axis` position in the expanded\n\n Parameters\n ----------\n a : ndarray\n Input array.\n axis : int\n Position in the expanded axes where the new axis is placed.\n\n Returns\n -------\n res : ndarray\n Output array. The number of dimensions is one greater than that of\n the input array.\n \"\"\"\n return _api_internal.expand_dims(a, axis)\n\n\n@set_module('mxnet.ndarray.numpy')\n@wrap_np_binary_func\ndef gcd(x1, x2, out=None, **kwargs):\n \"\"\"\n Returns the greatest common divisor of ``|x1|`` and ``|x2|``\n\n Parameters\n ----------\n x1, x2 : ndarrays or scalar values\n The arrays for computing greatest common divisor. If x1.shape != x2.shape,\n they must be broadcastable to a common shape (which may be the shape of\n one or the other).\n\n out : ndarray or None, optional\n A location into which the result is stored. If provided, it must have a shape\n that the inputs broadcast to. If not provided or None, a freshly-allocated array\n is returned.\n\n Returns\n -------\n y : ndarray or scalar\n The greatest common divisor of the absolute value of the inputs\n This is a scalar if both `x1` and `x2` are scalars.\n\n See Also\n --------\n lcm : The lowest common multiple\n\n Examples\n --------\n >>> np.gcd(12, 20)\n 4\n >>> np.gcd(np.arange(6, dtype=int), 20)\n array([20, 1, 2, 1, 4, 5], dtype=int64)\n \"\"\"\n if isinstance(x1, numeric_types) and isinstance(x2, numeric_types):\n return _np.gcd(x1, x2, out=out)\n return _api_internal.gcd(x1, x2, out)\n\n\n@set_module('mxnet.ndarray.numpy')\n@wrap_np_binary_func\ndef lcm(x1, x2, out=None, **kwargs):\n \"\"\"\n Returns the lowest common multiple of ``|x1|`` and ``|x2|``\n\n Parameters\n ----------\n x1, x2 : ndarrays or scalar values\n The arrays for computing lowest common multiple. If x1.shape != x2.shape,\n they must be broadcastable to a common shape (which may be the shape of\n one or the other).\n\n out : ndarray or None, optional\n A location into which the result is stored. If provided, it must have a shape\n that the inputs broadcast to. If not provided or None, a freshly-allocated array\n is returned.\n\n Returns\n -------\n y : ndarray or scalar\n The lowest common multiple of the absolute value of the inputs\n This is a scalar if both `x1` and `x2` are scalars.\n\n See Also\n --------\n gcd : The greatest common divisor\n\n Examples\n --------\n >>> np.lcm(12, 20)\n 60\n >>> np.lcm(np.arange(6, dtype=int), 20)\n array([ 0, 20, 20, 60, 20, 20], dtype=int64)\n \"\"\"\n if isinstance(x1, numeric_types) and isinstance(x2, numeric_types):\n return _np.lcm(x1, x2, out=out)\n return _api_internal.lcm(x1, x2, out)\n\n\n@set_module('mxnet.ndarray.numpy')\ndef tril(m, k=0):\n r\"\"\"\n Lower triangle of an array.\n\n Return a copy of an array with elements above the `k`-th diagonal zeroed.\n\n Parameters\n ----------\n m : ndarray, shape (M, N)\n Input array.\n k : int, optional\n Diagonal above which to zero elements. `k = 0` (the default) is the\n main diagonal, `k < 0` is below it and `k > 0` is above.\n\n Returns\n -------\n tril : ndarray, shape (M, N)\n Lower triangle of `m`, of same shape and data-type as `m`.\n\n See Also\n --------\n triu : same thing, only for the upper triangle\n\n Examples\n --------\n >>> a = np.array([[1,2,3],[4,5,6],[7,8,9],[10,11,12]])\n >>> np.tril(a, -1)\n array([[ 0., 0., 0.],\n [ 4., 0., 0.],\n [ 7., 8., 0.],\n [10., 11., 12.]])\n \"\"\"\n return _api_internal.tril(m, k)\n\n\n@set_module('mxnet.ndarray.numpy')\ndef triu(m, k=0):\n r\"\"\"\n Upper triangle of an array.\n\n Return a copy of a matrix with the elements below the `k`-th diagonal\n zeroed.\n\n Please refer to the documentation for `tril` for further details.\n\n See Also\n --------\n tril : lower triangle of an array\n\n Examples\n --------\n >>> np.triu(np.array([[1,2,3],[4,5,6],[7,8,9],[10,11,12]]), -1)\n array([[ 1, 2, 3],\n [ 4, 5, 6],\n [ 0, 8, 9],\n [ 0, 0, 12]])\n \"\"\"\n return _api_internal.triu(m, k)\n\n\n@set_module('mxnet.ndarray.numpy')\ndef trace(a, offset=0, axis1=0, axis2=1, out=None):\n \"\"\"\n Return the sum along diagonals of the array.\n If `a` is 2-D, the sum along its diagonal with the given offset\n is returned, i.e., the sum of elements ``a[i,i+offset]`` for all i.\n If `a` has more than two dimensions, then the axes specified by axis1 and\n axis2 are used to determine the 2-D sub-arrays whose traces are returned.\n The shape of the resulting array is the same as that of `a` with `axis1`\n and `axis2` removed.\n\n Parameters\n ----------\n a : ndarray\n Input array, from which the diagonals are taken.\n offset : int, optional\n Offset of the diagonal from the main diagonal. Can be both positive\n and negative. Defaults to 0.\n axis1, axis2 : int, optional\n Axes to be used as the first and second axis of the 2-D sub-arrays\n from which the diagonals should be taken. Defaults are the first two\n axes of `a`.\n out : ndarray, optional\n Array into which the output is placed. It must be of the right shape\n and right type to hold the output.\n\n Returns\n -------\n sum_along_diagonals : ndarray\n If `a` is 2-D, the sum along the diagonal is returned. If `a` has\n larger dimensions, then an array of sums along diagonals is returned.\n\n Examples\n --------\n >>> a = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]])\n >>> np.trace(a)\n array(3.)\n >>> a = np.arange(8).reshape((2, 2, 2))\n >>> np.trace(a)\n array([6., 8.])\n >>> a = np.arange(24).reshape((2, 2, 2, 3))\n >>> np.trace(a).shape\n (2, 3)\n \"\"\"\n return _api_internal.trace(a, offset, axis1, axis2, out)\n\n\n@set_module('mxnet.ndarray.numpy')\ndef tri(N, M=None, k=0, dtype=None, ctx=None):\n r\"\"\"\n An array with ones at and below the given diagonal and zeros elsewhere.\n\n Parameters\n ----------\n N : int\n Number of rows in the array.\n M : int, optional\n Number of columns in the array.\n By default, `M` is taken equal to `N`.\n k : int, optional\n The sub-diagonal at and below which the array is filled.\n `k` = 0 is the main diagonal, while `k` < 0 is below it,\n and `k` > 0 is above. The default is 0.\n dtype : dtype, optional\n Data type of the returned array. The default is float.\n\n Returns\n -------\n tri : ndarray of shape (N, M)\n Array with its lower triangle filled with ones and zero elsewhere;\n in other words ``T[i,j] == 1`` for ``i <= j + k``, 0 otherwise.\n\n Examples\n --------\n >>> np.tri(3, 5, 2, dtype=int)\n array([[1, 1, 1, 0, 0],\n [1, 1, 1, 1, 0],\n [1, 1, 1, 1, 1]])\n\n >>> np.tri(3, 5, -1)\n array([[0., 0., 0., 0., 0.],\n [1., 0., 0., 0., 0.],\n [1., 1., 0., 0., 0.]])\n \"\"\"\n if ctx is None:\n ctx = str(current_context())\n return _api_internal.tri(N, M, k, dtype, ctx)\n\n\n@set_module('mxnet.ndarray.numpy')\ndef triu_indices(n, k=0, m=None, ctx=None):\n r\"\"\"\n Return the indices for the upper-triangle of an (n, m) array.\n\n Parameters\n ----------\n n : int\n The size of the arrays for which the returned indices will\n be valid.\n k : int, optional\n Diagonal offset (see `triu` for details).\n m : int, optional\n .. versionadded:: 1.9.0\n\n The column dimension of the arrays for which the returned\n arrays will be valid.\n By default `m` is taken equal to `n`.\n\n\n Returns\n -------\n inds : tuple, shape(2) of ndarrays, shape(`n`)\n The indices for the triangle. The returned tuple contains two arrays,\n each with the indices along one dimension of the array. Can be used\n to slice a ndarray of shape(`n`, `n`).\n\n See also\n --------\n tril_indices : similar function, for lower-triangular.\n mask_indices : generic function accepting an arbitrary mask function.\n triu, tril\n\n Examples\n --------\n Compute two different sets of indices to access 4x4 arrays, one for the\n upper triangular part starting at the main diagonal, and one starting two\n diagonals further right:\n\n >>> iu1 = np.triu_indices(4)\n >>> iu2 = np.triu_indices(4, 2)\n\n Here is how they can be used with a sample array:\n\n >>> a = np.arange(16).reshape(4, 4)\n >>> a\n array([[ 0, 1, 2, 3],\n [ 4, 5, 6, 7],\n [ 8, 9, 10, 11],\n [12, 13, 14, 15]])\n\n Both for indexing:\n\n >>> a[iu1]\n array([ 0, 1, 2, ..., 10, 11, 15])\n\n And for assigning values:\n\n >>> a[iu1] = -1\n >>> a\n array([[-1, -1, -1, -1],\n [ 4, -1, -1, -1],\n [ 8, 9, -1, -1],\n [12, 13, 14, -1]])\n\n These cover only a small part of the whole array (two diagonals right\n of the main one):\n\n >>> a[iu2] = -10\n >>> a\n array([[ -1, -1, -10, -10],\n [ 4, -1, -1, -10],\n [ 8, 9, -1, -1],\n [ 12, 13, 14, -1]])\n \"\"\"\n return nonzero(~tri(N=n, M=m, k=k-1, dtype=bool, ctx=ctx))\n\n\n\n@set_module('mxnet.ndarray.numpy')\ndef triu_indices_from(arr, k=0):\n \"\"\"\n Return the indices for the upper-triangle of arr.\n See `triu_indices` for full details.\n Parameters\n ----------\n arr : ndarray, shape(N, N)\n The indices will be valid for square arrays.\n k : int, optional\n Diagonal offset (see `triu` for details).\n Returns\n -------\n triu_indices_from : tuple, shape(2) of ndarray, shape(N)\n Indices for the upper-triangle of `arr`.\n See Also\n --------\n triu_indices, triu\n \"\"\"\n if arr.ndim != 2:\n raise ValueError(\"input array must be 2-d\")\n return triu_indices(arr.shape[-2], k=k, m=arr.shape[-1])\n\n\ndef _unary_func_helper(x, fn_array, fn_scalar, out=None, **kwargs):\n \"\"\"Helper function for unary operators with kwargs.\n\n Parameters\n ----------\n x : ndarray or scalar\n Input of the unary operator.\n fn_array : function\n Function to be called if x is of ``ndarray`` type.\n fn_scalar : function\n Function to be called if x is a Python scalar.\n out : ndarray\n The buffer ndarray for storing the result of the unary function.\n\n Returns\n -------\n out : mxnet.numpy.ndarray or scalar\n Result array or scalar.\n \"\"\"\n if isinstance(x, numeric_types):\n return fn_scalar(x, **kwargs)\n elif isinstance(x, NDArray):\n return fn_array(x, out=out, **kwargs)\n else:\n raise TypeError('type {} not supported'.format(str(type(x))))\n\n\ndef _pure_unary_func_helper(x, fn_array, fn_scalar, out=None, **kwargs):\n \"\"\"Helper function for unary operators without support for kwargs.\n\n Parameters\n ----------\n x : ndarray or scalar\n Input of the unary operator.\n fn_array : function\n Function to be called if x is of ``ndarray`` type.\n fn_scalar : function\n Function to be called if x is a Python scalar.\n out : ndarray\n The buffer ndarray for storing the result of the unary function.\n\n Returns\n -------\n out : mxnet.numpy.ndarray or scalar\n Result array or scalar.\n \"\"\"\n if isinstance(x, numeric_types):\n return fn_scalar(x, **kwargs)\n elif isinstance(x, NDArray):\n return fn_array(x, out)\n else:\n raise TypeError('type {} not supported'.format(str(type(x))))\n\n\n@set_module('mxnet.ndarray.numpy')\n@wrap_np_unary_func\ndef sin(x, out=None, **kwargs):\n r\"\"\"\n Trigonometric sine, element-wise.\n\n Parameters\n ----------\n x : ndarray or scalar\n Angle, in radians (:math:`2 \\pi` rad equals 360 degrees).\n out : ndarray or None\n A location into which the result is stored. If provided, it\n must have a shape that the inputs broadcast to. If not provided\n or None, a freshly-allocated array is returned. The dtype of the\n output is the same as that of the input if the input is an ndarray.\n\n Returns\n -------\n y : ndarray or scalar\n The sine of each element of x. This is a scalar if `x` is a scalar.\n\n Notes\n ----\n This function only supports input type of float.\n\n Examples\n --------\n >>> np.sin(np.pi/2.)\n 1.0\n >>> np.sin(np.array((0., 30., 45., 60., 90.)) * np.pi / 180.)\n array([0. , 0.5 , 0.70710677, 0.86602545, 1. ])\n \"\"\"\n return _pure_unary_func_helper(x, _api_internal.sin, _np.sin, out=out, **kwargs)\n\n\n@set_module('mxnet.ndarray.numpy')\n@wrap_np_unary_func\ndef cos(x, out=None, **kwargs):\n r\"\"\"\n Cosine, element-wise.\n\n Parameters\n ----------\n x : ndarray or scalar\n Angle, in radians (:math:`2 \\pi` rad equals 360 degrees).\n out : ndarray or None\n A location into which the result is stored. If provided, it\n must have a shape that the inputs broadcast to. If not provided\n or None, a freshly-allocated array is returned. The dtype of the\n output is the same as that of the input if the input is an ndarray.\n\n Returns\n -------\n y : ndarray or scalar\n The corresponding cosine values. This is a scalar if x is a scalar.\n\n Notes\n ----\n This function only supports input type of float.\n\n Examples\n --------\n >>> np.cos(np.array([0, np.pi/2, np.pi]))\n array([ 1.000000e+00, -4.371139e-08, -1.000000e+00])\n >>> # Example of providing the optional output parameter\n >>> out1 = np.array([0], dtype='f')\n >>> out2 = np.cos(np.array([0.1]), out1)\n >>> out2 is out1\n True\n \"\"\"\n return _pure_unary_func_helper(x, _api_internal.cos, _np.cos, out=out, **kwargs)\n\n\n@set_module('mxnet.ndarray.numpy')\n@wrap_np_unary_func\ndef sinh(x, out=None, **kwargs):\n \"\"\"\n Hyperbolic sine, element-wise.\n Equivalent to ``1/2 * (np.exp(x) - np.exp(-x))`` or ``-1j * np.sin(1j*x)``.\n\n Parameters\n ----------\n x : ndarray or scalar\n Input array or scalar.\n out : ndarray or None\n A location into which the result is stored. If provided, it\n must have a shape that the inputs broadcast to. If not provided\n or None, a freshly-allocated array is returned. The dtype of the\n output is the same as that of the input if the input is an ndarray.\n\n Returns\n -------\n y : ndarray or scalar\n The corresponding hyperbolic sine values. This is a scalar if `x` is a scalar.\n\n Notes\n ----\n This function only supports input type of float.\n\n Examples\n --------\n >>> np.sinh(0)\n 0.0\n >>> # Example of providing the optional output parameter\n >>> out1 = np.array([0], dtype='f')\n >>> out2 = np.sinh(np.array([0.1]), out1)\n >>> out2 is out1\n True\n \"\"\"\n return _pure_unary_func_helper(x, _api_internal.sinh, _np.sinh, out=out, **kwargs)\n\n\n@set_module('mxnet.ndarray.numpy')\n@wrap_np_unary_func\ndef cosh(x, out=None, **kwargs):\n \"\"\"\n Hyperbolic cosine, element-wise.\n Equivalent to ``1/2 * (np.exp(x) + np.exp(-x))`` and ``np.cos(1j*x)``.\n\n Parameters\n ----------\n x : ndarray or scalar\n Input array or scalar.\n out : ndarray or None\n A location into which the result is stored. If provided, it\n must have a shape that the inputs broadcast to. If not provided\n or None, a freshly-allocated array is returned. The dtype of the\n output is the same as that of the input if the input is an ndarray.\n\n Returns\n -------\n y : ndarray or scalar\n The corresponding hyperbolic cosine values. This is a scalar if `x` is a scalar.\n\n Notes\n ----\n This function only supports input type of float.\n\n Examples\n --------\n >>> np.cosh(0)\n 1.0\n \"\"\"\n return _pure_unary_func_helper(x, _api_internal.cosh, _np.cosh, out=out, **kwargs)\n\n\n@set_module('mxnet.ndarray.numpy')\n@wrap_np_unary_func\ndef tanh(x, out=None, **kwargs):\n \"\"\"\n Compute hyperbolic tangent element-wise.\n Equivalent to ``np.sinh(x)/np.cosh(x)``.\n\n Parameters\n ----------\n x : ndarray or scalar.\n Input array.\n out : ndarray or None\n A location into which the result is stored. If provided, it\n must have a shape that the inputs fill into. If not provided\n or None, a freshly-allocated array is returned. The dtype of the\n output and input must be the same.\n\n Returns\n -------\n y : ndarray or scalar\n The corresponding hyperbolic tangent values.\n\n Notes\n -----\n If `out` is provided, the function writes the result into it,\n and returns a reference to `out`. (See Examples)\n - input x does not support complex computation (like imaginary number)\n >>> np.tanh(np.pi*1j)\n TypeError: type <type 'complex'> not supported\n\n Examples\n --------\n >>> np.tanh(np.array[0, np.pi]))\n array([0. , 0.9962721])\n >>> np.tanh(np.pi)\n 0.99627207622075\n >>> # Example of providing the optional output parameter illustrating\n >>> # that what is returned is a reference to said parameter\n >>> out1 = np.array(1)\n >>> out2 = np.tanh(np.array(0.1), out1)\n >>> out2 is out1\n True\n \"\"\"\n return _pure_unary_func_helper(x, _api_internal.tanh, _np.tanh, out=out, **kwargs)\n\n\n@set_module('mxnet.ndarray.numpy')\n@wrap_np_unary_func\ndef log10(x, out=None, **kwargs):\n \"\"\"\n Return the base 10 logarithm of the input array, element-wise.\n\n Parameters\n ----------\n x : ndarray or scalar\n Input array or scalar.\n out : ndarray or None\n A location into which t'absolute', he result is stored. If provided, it\n must have a shape that the inputs broadcast to. If not provided\n or None, a freshly-allocated array is returned. The dtype of the\n output is the same as that of the input if the input is an ndarray.\n\n Returns\n -------\n y : ndarray or scalar\n The logarithm to the base 10 of `x`, element-wise. NaNs are\n returned where x is negative. This is a scalar if `x` is a scalar.\n\n Notes\n ----\n This function only supports input type of float.\n\n Examples\n --------\n >>> np.log10(np.array([1e-15, -3.]))\n array([-15., nan])\n \"\"\"\n return _pure_unary_func_helper(x, _api_internal.log10, _np.log10, out=out, **kwargs)\n\n\n@set_module('mxnet.ndarray.numpy')\n@wrap_np_unary_func\ndef sqrt(x, out=None, **kwargs):\n \"\"\"\n Return the non-negative square-root of an array, element-wise.\n\n Parameters\n ----------\n x : ndarray or scalar\n The values whose square-roots are required.\n out : ndarray, or None, optional\n A location into which the result is stored. If provided, it must have\n a shape that the inputs broadcast to. If not provided or `None`,\n a freshly-allocated array is returned.\n\n Returns\n -------\n y : ndarray or scalar\n An array of the same shape as `x`, containing the positive\n square-root of each element in `x`. This is a scalar if `x` is a scalar.\n\n Notes\n ----\n This function only supports input type of float.\n\n Examples\n --------\n >>> np.sqrt(np.array([1,4,9]))\n array([1., 2., 3.])\n >>> np.sqrt(np.array([4, -1, _np.inf]))\n array([ 2., nan, inf])\n \"\"\"\n return _pure_unary_func_helper(x, _api_internal.sqrt, _np.sqrt, out=out, **kwargs)\n\n\n@set_module('mxnet.ndarray.numpy')\n@wrap_np_unary_func\ndef cbrt(x, out=None, **kwargs):\n r\"\"\"\n Return the cube-root of an array, element-wise.\n\n Parameters\n ----------\n x : ndarray\n The values whose cube-roots are required.\n out : ndarray, optional\n A location into which the result is stored. If provided, it must have a shape that the\n inputs broadcast to. If not provided or None, a freshly-allocated array is returned.\n A tuple (possible only as a keyword argument) must have length equal to the number of outputs.\n\n Returns\n ----------\n y : ndarray\n An array of the same shape as x, containing the cube cube-root of each element in x.\n If out was provided, y is a reference to it. This is a scalar if x is a scalar.\n\n Examples\n ----------\n >>> np.cbrt([1,8,27])\n array([ 1., 2., 3.])\n \"\"\"\n return _pure_unary_func_helper(x, _api_internal.cbrt, _np.cbrt, out=out, **kwargs)\n\n\n@set_module('mxnet.ndarray.numpy')\n@wrap_np_unary_func\ndef abs(x, out=None, **kwargs):\n r\"\"\"\n Calculate the absolute value element-wise.\n\n Parameters\n ----------\n x : ndarray or scalar\n Input array.\n out : ndarray or None, optional\n A location into which the result is stored. If provided, it must have\n a shape that the inputs broadcast to. If not provided or `None`,\n a freshly-allocated array is returned.\n\n Returns\n -------\n absolute : ndarray\n An ndarray containing the absolute value of\n each element in `x`. This is a scalar if `x` is a scalar.\n\n Examples\n --------\n >>> x = np.array([-1.2, 1.2])\n >>> np.abs(x)\n array([1.2, 1.2])\n \"\"\"\n return _pure_unary_func_helper(x, _api_internal.abs, _np.abs, out=out, **kwargs)\n\n\n@set_module('mxnet.ndarray.numpy')\n@wrap_np_unary_func\ndef fabs(x, out=None, **kwargs):\n r\"\"\"\n Calculate the absolute value element-wise.\n\n This function returns the absolute values (positive magnitude) of the\n data in `x`. Complex values are not handled, use `absolute` to find the\n absolute values of complex data.\n\n Parameters\n ----------\n x : ndarray or scalar\n Input array.\n out : ndarray or None, optional\n A location into which the result is stored. If provided, it must have\n a shape that the inputs broadcast to. If not provided or `None`,\n a freshly-allocated array is returned.\n\n Returns\n -------\n absolute : ndarray\n An ndarray containing the absolute value of\n each element in `x`. This is a scalar if `x` is a scalar.\n\n Examples\n --------\n >>> np.fabs(-1)\n 1.0\n >>> np.fabs(np.array([-1.2, 1.2]))s\n array([ 1.2, 1.2])\n \"\"\"\n return _pure_unary_func_helper(x, _api_internal.abs, _np.abs, out=out, **kwargs)\n\n\n@set_module('mxnet.ndarray.numpy')\n@wrap_np_unary_func\ndef absolute(x, out=None, **kwargs):\n r\"\"\"\n Calculate the absolute value element-wise.\n np.abs is a shorthand for this function.\n\n Parameters\n ----------\n x : ndarray\n Input array.\n out : ndarray, optional\n A location into which the result is stored. If provided, it must have a shape\n that the inputs broadcast to. If not provided or None, a freshly-allocated array is returned.\n A tuple (possible only as a keyword argument) must have length equal to the number of outputs.\n\n Returns\n ----------\n absolute : ndarray\n An ndarray containing the absolute value of each element in x.\n\n Examples\n ----------\n >>> x = np.array([-1.2, 1.2])\n >>> np.absolute(x)\n array([ 1.2, 1.2])\n \"\"\"\n return _pure_unary_func_helper(x, _api_internal.abs, _np.abs, out=out, **kwargs)\n\n\n@set_module('mxnet.ndarray.numpy')\n@wrap_np_unary_func\ndef sign(x, out=None, **kwargs):\n r\"\"\"\n Returns an element-wise indication of the sign of a number.\n The `sign` function returns ``-1 if x < 0, 0 if x==0, 1 if x > 0``. Only supports real number.\n\n Parameters\n ----------\n x : ndarray or a scalar\n Input values.\n out : ndarray or None, optional\n A location into which the result is stored.\n If provided, it must have the same shape and dtype as input ndarray.\n If not provided or `None`, a freshly-allocated array is returned.\n\n Returns\n -------\n y : ndarray\n The sign of `x`.\n This is a scalar if `x` is a scalar.\n\n Note\n -------\n - Only supports real number as input elements.\n - Input type does not support Python native iterables(list, tuple, ...).\n - ``out`` param: cannot perform auto broadcasting. ``out`` ndarray's shape must be the same as the expected output.\n - ``out`` param: cannot perform auto type cast. ``out`` ndarray's dtype must be the same as the expected output.\n - ``out`` param does not support scalar input case.\n\n Examples\n --------\n >>> a = np.array([-5., 4.5])\n >>> np.sign(a)\n array([-1., 1.])\n >>> # Use scalars as inputs:\n >>> np.sign(4.0)\n 1.0\n >>> np.sign(0)\n 0\n >>> # Use ``out`` parameter:\n >>> b = np.zeros((2, ))\n >>> np.sign(a, out=b)\n array([-1., 1.])\n >>> b\n array([-1., 1.])\n \"\"\"\n return _pure_unary_func_helper(x, _api_internal.sign, _np.sign, out=out, **kwargs)\n\n\n@set_module('mxnet.ndarray.numpy')\n@wrap_np_unary_func\ndef exp(x, out=None, **kwargs):\n r\"\"\"\n Calculate the exponential of all elements in the input array.\n\n Parameters\n ----------\n x : ndarray or scalar\n Input values.\n out : ndarray or None, optional\n A location into which the result is stored. If provided, it must have\n a shape that the inputs broadcast to. If not provided or `None`,\n a freshly-allocated array is returned.\n\n Returns\n -------\n out : ndarray or scalar\n Output array, element-wise exponential of `x`.\n This is a scalar if `x` is a scalar.\n\n Examples\n --------\n >>> np.exp(1)\n 2.718281828459045\n >>> x = np.array([-1, 1, -2, 2])\n >>> np.exp(x)\n array([0.36787945, 2.7182817 , 0.13533528, 7.389056 ])\n \"\"\"\n return _pure_unary_func_helper(x, _api_internal.exp, _np.exp, out=out, **kwargs)\n\n\n@set_module('mxnet.ndarray.numpy')\n@wrap_np_unary_func\ndef expm1(x, out=None, **kwargs):\n r\"\"\"\n Calculate `exp(x) - 1` of all elements in the input array.\n\n Parameters\n ----------\n x : ndarray or scalar\n Input values.\n out : ndarray or None, optional\n A location into which the result is stored. If provided, it must have\n a shape that the inputs broadcast to. If not provided or `None`,\n a freshly-allocated array is returned.\n\n Returns\n -------\n out : ndarray or scalar\n Output array, element-wise exponential minus one: `out = exp(x) - 1`.\n This is a scalar if `x` is a scalar.\n\n Examples\n --------\n >>> np.expm1(1)\n 1.718281828459045\n >>> x = np.array([-1, 1, -2, 2])\n >>> np.expm1(x)\n array([-0.63212056, 1.71828183, -0.86466472, 6.3890561])\n \"\"\"\n return _pure_unary_func_helper(x, _api_internal.expm1, _np.expm1, out=out, **kwargs)\n\n\n@set_module('mxnet.ndarray.numpy')\n@wrap_np_unary_func\ndef arcsin(x, out=None, **kwargs):\n r\"\"\"\n Inverse sine, element-wise.\n\n Parameters\n ----------\n x : ndarray or scalar\n `y`-coordinate on the unit circle.\n out : ndarray or None, optional\n A location into which the result is stored.\n If provided, it must have the same shape as the input.\n If not provided or None, a freshly-allocated array is returned.\n\n Returns\n -------\n angle : ndarray or scalar\n Output array is same shape and type as x. This is a scalar if x is a scalar.\n The inverse sine of each element in `x`, in radians and in the\n closed interval ``[-pi/2, pi/2]``.\n\n Examples\n --------\n >>> np.arcsin(1) # pi/2\n 1.5707963267948966\n >>> np.arcsin(-1) # -pi/2\n -1.5707963267948966\n >>> np.arcsin(0)\n 0.0\n\n Notes\n -----\n `arcsin` is a multivalued function: for each `x` there are infinitely\n many numbers `z` such that :math:`sin(z) = x`. The convention is to\n return the angle `z` whose real part lies in [-pi/2, pi/2].\n For real-valued input data types, *arcsin* always returns real output.\n For each value that cannot be expressed as a real number or infinity,\n it yields ``nan`` and sets the `invalid` floating point error flag.\n The inverse sine is also known as `asin` or sin^{-1}.\n The output `ndarray` has the same `ctx` as the input `ndarray`.\n This function differs from the original `numpy.arcsin\n <https://docs.scipy.org/doc/numpy/reference/generated/numpy.arcsin.html>`_ in\n the following aspects:\n - Only support ndarray or scalar now.\n - `where` argument is not supported.\n - Complex input is not supported.\n\n References\n ----------\n Abramowitz, M. and Stegun, I. A., *Handbook of Mathematical Functions*,\n 10th printing, New York: Dover, 1964, pp. 79ff.\n http://www.math.sfu.ca/~cbm/aands/\n \"\"\"\n return _pure_unary_func_helper(x, _api_internal.arcsin, _np.arcsin, out=out, **kwargs)\n\n\n@set_module('mxnet.ndarray.numpy')\n@wrap_np_unary_func\ndef arccos(x, out=None, **kwargs):\n r\"\"\"\n Trigonometric inverse cosine, element-wise.\n The inverse of cos so that, if y = cos(x), then x = arccos(y).\n\n Parameters\n ----------\n x : ndarray\n x-coordinate on the unit circle. For real arguments, the domain is [-1, 1].\n out : ndarray, optional\n A location into which the result is stored. If provided, it must have a shape that\n the inputs broadcast to. If not provided or None, a freshly-allocated array is returned.\n A tuple (possible only as a keyword argument) must have length equal to the number of outputs.\n\n Returns\n ----------\n angle : ndarray\n The angle of the ray intersecting the unit circle at the given x-coordinate in radians [0, pi].\n This is a scalar if x is a scalar.\n\n See also\n ----------\n cos, arctan, arcsin\n\n Notes\n ----------\n arccos is a multivalued function: for each x there are infinitely many numbers z such that\n cos(z) = x. The convention is to return the angle z whose real part lies in [0, pi].\n For real-valued input data types, arccos always returns real output.\n For each value that cannot be expressed as a real number or infinity, it yields nan and sets\n the invalid floating point error flag.\n The inverse cos is also known as acos or cos^-1.\n\n Examples\n ----------\n >>> np.arccos([1, -1])\n array([ 0. , 3.14159265])\n \"\"\"\n return _pure_unary_func_helper(x, _api_internal.arccos, _np.arccos, out=out, **kwargs)\n\n\n@set_module('mxnet.ndarray.numpy')\n@wrap_np_unary_func\ndef arctan(x, out=None, **kwargs):\n r\"\"\"\n Trigonometric inverse tangent, element-wise.\n The inverse of tan, so that if ``y = tan(x)`` then ``x = arctan(y)``.\n\n Parameters\n ----------\n x : ndarray or scalar\n Input values.\n out : ndarray or None, optional\n A location into which the result is stored. If provided, it must have\n a shape that the inputs broadcast to. If not provided or `None`,\n a freshly-allocated array is returned.\n\n Returns\n -------\n out : ndarray or scalar\n Out has the same shape as `x`. It lies is in\n ``[-pi/2, pi/2]`` (``arctan(+/-inf)`` returns ``+/-pi/2``).\n This is a scalar if `x` is a scalar.\n\n Notes\n -----\n `arctan` is a multi-valued function: for each `x` there are infinitely\n many numbers `z` such that tan(`z`) = `x`. The convention is to return\n the angle `z` whose real part lies in [-pi/2, pi/2].\n For real-valued input data types, `arctan` always returns real output.\n For each value that cannot be expressed as a real number or infinity,\n it yields ``nan`` and sets the `invalid` floating point error flag.\n For complex-valued input, we do not have support for them yet.\n The inverse tangent is also known as `atan` or tan^{-1}.\n\n Examples\n --------\n >>> x = np.array([0, 1])\n >>> np.arctan(x)\n array([0. , 0.7853982])\n >>> np.pi/4\n 0.7853981633974483\n \"\"\"\n return _pure_unary_func_helper(x, _api_internal.arctan, _np.arctan, out=out, **kwargs)\n\n\n@set_module('mxnet.ndarray.numpy')\n@wrap_np_unary_func\ndef log(x, out=None, **kwargs):\n \"\"\"\n Natural logarithm, element-wise.\n The natural logarithm `log` is the inverse of the exponential function,\n so that `log(exp(x)) = x`. The natural logarithm is logarithm in base\n `e`.\n\n Parameters\n ----------\n x : ndarray\n Input value. Elements must be of real value.\n out : ndarray or None, optional\n A location into which the result is stored.\n If provided, it must have the same shape and dtype as input ndarray.\n If not provided or `None`, a freshly-allocated array is returned.\n\n Returns\n -------\n y : ndarray\n The natural logarithm of `x`, element-wise.\n This is a scalar if `x` is a scalar.\n\n Notes\n -----\n Currently only supports data of real values and ``inf`` as input. Returns data of real value, ``inf``, ``-inf`` and\n ``nan`` according to the input.\n This function differs from the original `numpy.log\n <https://docs.scipy.org/doc/numpy/reference/generated/numpy.log.html>`_ in\n the following aspects:\n - Does not support complex number for now\n - Input type does not support Python native iterables(list, tuple, ...).\n - ``out`` param: cannot perform auto broadcasting. ``out`` ndarray's shape must be the same as the expected output.\n - ``out`` param: cannot perform auto type cast. ``out`` ndarray's dtype must be the same as the expected output.\n - ``out`` param does not support scalar input case.\n\n Examples\n --------\n >>> a = np.array([1, np.exp(1), np.exp(2), 0], dtype=np.float64)\n >>> np.log(a)\n array([ 0., 1., 2., -inf], dtype=float64)\n >>> # Using default float32 dtype may lead to slightly different behavior:\n >>> a = np.array([1, np.exp(1), np.exp(2), 0], dtype=np.float32)\n >>> np.log(a)\n array([ 0., 0.99999994, 2., -inf])\n >>> np.log(1)\n 0.0\n \"\"\"\n return _pure_unary_func_helper(x, _api_internal.log, _np.log, out=out, **kwargs)\n\n\n@set_module('mxnet.ndarray.numpy')\n@wrap_np_unary_func\ndef degrees(x, out=None, **kwargs):\n \"\"\"\n Convert angles from radians to degrees.\n\n Parameters\n ----------\n x : ndarray\n Input value. Elements must be of real value.\n out : ndarray or None, optional\n A location into which the result is stored.\n If provided, it must have the same shape and dtype as input ndarray.\n If not provided or `None`, a freshly-allocated array is returned.\n\n Returns\n -------\n y : ndarray\n The corresponding degree values; if `out` was supplied this is a\n reference to it.\n This is a scalar if `x` is a scalar.\n\n Notes\n -------\n This function differs from the original `numpy.degrees\n <https://docs.scipy.org/doc/numpy/reference/generated/numpy.degrees.html>`_ in\n the following aspects:\n - Input type does not support Python native iterables(list, tuple, ...). Only ndarray is supported.\n - ``out`` param: cannot perform auto broadcasting. ``out`` ndarray's shape must be the same as the expected output.\n - ``out`` param: cannot perform auto type cast. ``out`` ndarray's dtype must be the same as the expected output.\n - ``out`` param does not support scalar input case.\n\n Examples\n --------\n >>> rad = np.arange(12.) * np.pi / 6\n >>> np.degrees(rad)\n array([ 0., 30., 60., 90., 120., 150., 180., 210., 240., 270., 300., 330.])\n >>> # Use specified ``out`` ndarray:\n >>> out = np.zeros((rad.shape))\n >>> np.degrees(rad, out)\n array([ 0., 30., 60., 90., 120., 150., 180., 210., 240., 270., 300., 330.])\n >>> out\n array([ 0., 30., 60., 90., 120., 150., 180., 210., 240., 270., 300., 330.])\n \"\"\"\n return _pure_unary_func_helper(x, _api_internal.degrees, _np.degrees, out=out, **kwargs)\n\n\n@set_module('mxnet.ndarray.numpy')\n@wrap_np_unary_func\ndef rad2deg(x, out=None, **kwargs):\n r\"\"\"\n Convert angles from radians to degrees.\n\n Parameters\n ----------\n x : ndarray or scalar\n Angles in degrees.\n out : ndarray or None, optional\n A location into which the result is stored. If not provided or `None`,\n a freshly-allocated array is returned.\n\n Returns\n -------\n y : ndarray or scalar\n The corresponding angle in radians.\n This is a scalar if `x` is a scalar.\n\n Notes\n -----\n \"rad2deg(x)\" is \"x *180 / pi\".\n\n This function differs from the original numpy.arange in the following aspects:\n - Only support float32 and float64.\n - `out` must be in the same size of input.\n\n Examples\n --------\n >>> np.rad2deg(np.pi/2)\n 90.0\n \"\"\"\n return _pure_unary_func_helper(x, _api_internal.rad2deg, _np.rad2deg, out=out)\n\n\n@set_module('mxnet.ndarray.numpy')\n@wrap_np_unary_func\ndef rint(x, out=None, **kwargs):\n \"\"\"\n Round elements of the array to the nearest integer.\n\n Parameters\n ----------\n x : ndarray or scalar\n Input array.\n out : ndarray or None\n A location into which the result is stored.\n If provided, it must have the same shape and type as the input.\n If not provided or None, a freshly-allocated array is returned.\n\n Returns\n -------\n out : ndarray or scalar\n Output array is same shape and type as x. This is a scalar if x is a scalar.\n\n Notes\n -----\n This function differs from the original `numpy.rint\n <https://docs.scipy.org/doc/numpy/reference/generated/numpy.rint.html>`_ in\n the following way(s):\n - only ndarray or scalar is accpted as valid input, tuple of ndarray is not supported\n - broadcasting to `out` of different shape is currently not supported\n - when input is plain python numerics, the result will not be stored in the `out` param\n\n Examples\n --------\n >>> a = np.array([-1.7, -1.5, -0.2, 0.2, 1.5, 1.7, 2.0])\n >>> np.rint(a)\n array([-2., -2., -0., 0., 1., 2., 2.])\n \"\"\"\n return _pure_unary_func_helper(x, _api_internal.rint, _np.rint, out=out, **kwargs)\n\n\n@set_module('mxnet.ndarray.numpy')\n@wrap_np_unary_func\ndef log2(x, out=None, **kwargs):\n \"\"\"\n Base-2 logarithm of x.\n\n Parameters\n ----------\n x : ndarray or scalar\n Input values.\n out : ndarray or None\n A location into which the result is stored.\n If provided, it must have the same shape and type as the input.\n If not provided or None, a freshly-allocated array is returned.\n\n Returns\n -------\n y : ndarray\n The logarithm base two of `x`, element-wise.\n This is a scalar if `x` is a scalar.\n\n Notes\n -----\n This function differs from the original `numpy.log2\n <https://www.google.com/search?q=numpy+log2>`_ in\n the following way(s):\n - only ndarray or scalar is accpted as valid input, tuple of ndarray is not supported\n - broadcasting to `out` of different shape is currently not supported\n - when input is plain python numerics, the result will not be stored in the `out` param\n\n Examples\n --------\n >>> x = np.array([0, 1, 2, 2**4])\n >>> np.log2(x)\n array([-inf, 0., 1., 4.])\n \"\"\"\n return _pure_unary_func_helper(x, _api_internal.log2, _np.log2, out=out, **kwargs)\n\n\n@set_module('mxnet.ndarray.numpy')\n@wrap_np_unary_func\ndef log1p(x, out=None, **kwargs):\n \"\"\"\n Return the natural logarithm of one plus the input array, element-wise.\n Calculates ``log(1 + x)``.\n\n Parameters\n ----------\n x : ndarray or scalar\n Input array.\n out : ndarray or None\n A location into which the result is stored. If provided, it\n must have a shape that the inputs fill into. If not provided\n or None, a freshly-allocated array is returned. The dtype of the\n output and input must be the same.\n\n Returns\n -------\n y : ndarray or scalar\n Natural logarithm of 1 + x, element-wise. This is a scalar\n if x is a scalar.\n\n Notes\n -----\n For real-valued input, `log1p` is accurate also for `x` so small\n that `1 + x == 1` in floating-point accuracy.\n Logarithm is a multivalued function: for each `x` there is an infinite\n number of `z` such that `exp(z) = 1 + x`. The convention is to return\n the `z` whose imaginary part lies in `[-pi, pi]`.\n For real-valued input data types, `log1p` always returns real output.\n For each value that cannot be expressed as a real number or infinity,\n it yields ``nan`` and sets the `invalid` floating point error flag.\n cannot support complex-valued input.\n\n Examples\n --------\n >>> np.log1p(1e-99)\n 1e-99\n >>> a = np.array([3, 4, 5])\n >>> np.log1p(a)\n array([1.3862944, 1.609438 , 1.7917595])\n \"\"\"\n return _pure_unary_func_helper(x, _api_internal.log1p, _np.log1p, out=out, **kwargs)\n\n\n@set_module('mxnet.ndarray.numpy')\n@wrap_np_unary_func\ndef radians(x, out=None, **kwargs):\n \"\"\"\n Convert angles from degrees to radians.\n\n Parameters\n ----------\n x : ndarray or scalar\n Input array in degrees.\n out : ndarray or None\n A location into which the result is stored.\n If provided, it must have the same shape and type as the input.\n If not provided or None, a freshly-allocated array is returned.\n\n Returns\n -------\n y : ndarray\n The corresponding radian values. This is a scalar if x is a scalar.\n\n Notes\n -----\n This function differs from the original `numpy.radians\n <https://docs.scipy.org/doc/numpy/reference/generated/numpy.radians.html>`_ in\n the following way(s):\n - only ndarray or scalar is accpted as valid input, tuple of ndarray is not supported\n - broadcasting to `out` of different shape is currently not supported\n - when input is plain python numerics, the result will not be stored in the `out` param\n\n Examples\n --------\n >>> deg = np.arange(12.) * 30.\n >>> np.radians(deg)\n array([0. , 0.5235988, 1.0471976, 1.5707964, 2.0943952, 2.6179938,\n 3.1415927, 3.6651914, 4.1887903, 4.712389 , 5.2359877, 5.7595863],\n dtype=float32)\n \"\"\"\n return _pure_unary_func_helper(x, _api_internal.radians, _np.radians, out=out, **kwargs)\n\n\n@set_module('mxnet.ndarray.numpy')\n@wrap_np_unary_func\ndef deg2rad(x, out=None, **kwargs):\n r\"\"\"\n Convert angles from degrees to radians.\n\n Parameters\n ----------\n x : ndarray or scalar\n Angles in degrees.\n out : ndarray or None, optional\n A location into which the result is stored. If not provided or `None`,\n a freshly-allocated array is returned.\n\n Returns\n -------\n y : ndarray or scalar\n The corresponding angle in radians.\n This is a scalar if `x` is a scalar.\n\n Notes\n -----\n \"deg2rad(x)\" is \"x * pi / 180\".\n\n This function differs from the original numpy.arange in the following aspects:\n - Only support float32 and float64.\n - `out` must be in the same size of input.\n\n Examples\n --------\n >>> np.deg2rad(180)\n 3.1415927\n \"\"\"\n return _pure_unary_func_helper(x, _api_internal.deg2rad, _np.deg2rad, out=out)\n\n\n@set_module('mxnet.ndarray.numpy')\n@wrap_np_unary_func\ndef reciprocal(x, out=None, **kwargs):\n r\"\"\"\n Return the reciprocal of the argument, element-wise.\n Calculates ``1/x``.\n\n Parameters\n ----------\n x : ndarray or scalar\n The values whose reciprocals are required.\n out : ndarray or None, optional\n A location into which the result is stored.\n If provided, it must have the same shape as the input.\n If not provided or None, a freshly-allocated array is returned.\n\n Returns\n -------\n y : ndarray or scalar\n Output array is same shape and type as x. This is a scalar if x is a scalar.\n\n Examples\n --------\n >>> np.reciprocal(2.)\n 0.5\n >>> x = np.array([1, 2., 3.33])\n >>> np.reciprocal(x)\n array([1. , 0.5 , 0.3003003])\n\n Notes\n -----\n .. note::\n This function is not designed to work with integers.\n For integer arguments with absolute value larger than 1 the result is\n always zero because of the way Python handles integer division. For\n integer zero the result is an overflow.\n The output `ndarray` has the same `ctx` as the input `ndarray`.\n This function differs from the original `numpy.reciprocal\n <https://docs.scipy.org/doc/numpy/reference/generated/numpy.reciprocal.html>`_ in\n the following aspects:\n - Only support ndarray and scalar now.\n - `where` argument is not supported.\n \"\"\"\n return _pure_unary_func_helper(x, _api_internal.reciprocal, _np.reciprocal, out=out, **kwargs)\n\n\n@set_module('mxnet.ndarray.numpy')\n@wrap_np_unary_func\ndef square(x, out=None, **kwargs):\n r\"\"\"\n Return the element-wise square of the input.\n\n Parameters\n ----------\n x : ndarray or scalar\n The values whose squares are required.\n out : ndarray or None, optional\n A location into which the result is stored.\n If provided, it must have the same shape as the input.\n If not provided or None, a freshly-allocated array is returned.\n\n Returns\n -------\n y : ndarray or scalar\n Output array is same shape and type as x. This is a scalar if x is a scalar.\n\n Examples\n --------\n >>> np.square(2.)\n 4.0\n >>> x = np.array([1, 2., -1])\n >>> np.square(x)\n array([1., 4., 1.])\n\n Notes\n -----\n The output `ndarray` has the same `ctx` as the input `ndarray`.\n This function differs from the original `numpy.square\n <https://docs.scipy.org/doc/numpy/reference/generated/numpy.square.html>`_ in\n the following aspects:\n - Only support ndarray and scalar now.\n - `where` argument is not supported.\n - Complex input is not supported.\n \"\"\"\n return _pure_unary_func_helper(x, _api_internal.square, _np.square, out=out, **kwargs)\n\n\n@set_module('mxnet.ndarray.numpy')\n@wrap_np_unary_func\ndef negative(x, out=None, **kwargs):\n r\"\"\"\n Numerical negative, element-wise.\n\n Parameters:\n ------------\n x : ndarray or scalar\n Input array.\n out : ndarray, None, or tuple of ndarray and None, optional\n A location into which the result is stored.\n\n Returns:\n ---------\n y : ndarray or scalar\n Returned array or scalar: y = -x. This is a scalar if x is a scalar.\n\n Examples:\n ---------\n >>> np.negative(1)\n -1\n \"\"\"\n return _pure_unary_func_helper(x, _api_internal.negative, _np.negative, out=out)\n\n\n@set_module('mxnet.ndarray.numpy')\n@wrap_np_unary_func\ndef positive(x, out=None, **kwargs):\n r\"\"\"\n Computes the numerical positive of each element `x_i` (i.e.,`y_i = +x_i`)\n of the input array x .\n\n Parameters\n ----------\n x : ndarray or scalar\n Input array.\n\n Returns\n -------\n y : ndarray or scalar\n Returned array or scalar: y = +x. This is a scalar if x is a scalar.\n\n Notes\n -----\n Equivalent to `x.copy()`, but only defined for types that support arithmetic.\n\n Examples\n --------\n >>> x1 = np.array(([1., -1.]))\n >>> np.positive(x1)\n array([ 1., -1.])\n >>> +x1\n array([ 1., -1.])\n \"\"\"\n if out is x:\n return x\n return _pure_unary_func_helper(x, _api_internal.copy, _np.positive, out=out)\n\n\n@set_module('mxnet.ndarray.numpy')\n@wrap_np_unary_func\ndef fix(x, out=None, **kwargs):\n r\"\"\"\n Round an array of floats element-wise to nearest integer towards zero.\n The rounded values are returned as floats.\n\n Parameters:\n ----------\n x : ndarray\n An array of floats to be rounded\n out : ndarray, optional\n Output array\n\n Returns:\n -------\n y : ndarray of floats\n\n Examples\n ---------\n >>> np.fix(3.14)\n 3\n \"\"\"\n return _pure_unary_func_helper(x, _api_internal.fix, _np.fix, out=out)\n\n\n@set_module('mxnet.ndarray.numpy')\n@wrap_np_unary_func\ndef tan(x, out=None, **kwargs):\n r\"\"\"\n Compute tangent element-wise.\n Equivalent to np.sin(x)/np.cos(x) element-wise.\n\n Parameters:\n ----------\n x : ndarray\n Input array.\n out : ndarray, None, or tuple of ndarray and None, optional\n A location into which the result is stored. If provided,\n it must have a shape that the inputs broadcast to. If not provided or None,\n a freshly-allocated array is returned. A tuple (possible only as a keyword argument)\n must have length equal to the number of outputs.\n where : ndarray, optional\n Values of True indicate to calculate the ufunc at that position,\n values of False indicate to leave the value in the output alone.\n\n Returns:\n -------\n y : ndarray\n The corresponding tangent values. This is a scalar if x is a scalar.\n\n Examples:\n ---------\n >>> np.tan(0.5)\n 0.5463024898437905\n \"\"\"\n\n return _pure_unary_func_helper(x, _api_internal.tan, _np.tan, out=out, **kwargs)\n\n\n@set_module('mxnet.ndarray.numpy')\n@wrap_np_unary_func\ndef ceil(x, out=None, **kwargs):\n r\"\"\"\n Return the ceiling of the input, element-wise.\n The ceil of the ndarray `x` is the smallest integer `i`, such that\n `i >= x`. It is often denoted as :math:`\\lceil x \\rceil`.\n\n Parameters\n ----------\n x : ndarray or scalar\n Input array.\n out : ndarray or None\n A location into which the result is stored. If provided, it\n must have a same shape that the inputs fill into. If not provided\n or None, a freshly-allocated array is returned. The dtype of the\n output and input must be the same.\n\n Returns\n -------\n y : ndarray or scalar\n The ceiling of each element in `x`, with `float` dtype.\n This is a scalar if `x` is a scalar.\n\n Examples\n --------\n >>> a = np.array([-1.7, -1.5, -0.2, 0.2, 1.5, 1.7, 2.0])\n >>> np.ceil(a)\n array([-1., -1., -0., 1., 2., 2., 2.])\n >>> #if you use parameter out, x and out must be ndarray.\n >>> a = np.array(1)\n >>> np.ceil(np.array(3.5), a)\n array(4.)\n >>> a\n array(4.)\n \"\"\"\n return _pure_unary_func_helper(x, _api_internal.ceil, _np.ceil, out=out, **kwargs)\n\n\n@set_module('mxnet.ndarray.numpy')\n@wrap_np_unary_func\ndef floor(x, out=None, **kwargs):\n r\"\"\"\n Return the floor of the input, element-wise.\n The floor of the ndarray `x` is the largest integer `i`, such that\n `i <= x`. It is often denoted as :math:`\\lfloor x \\rfloor`.\n\n Parameters\n ----------\n x : ndarray or scalar\n Input array.\n out : ndarray or None\n A location into which the result is stored. If provided, it\n must have a same shape that the inputs fill into. If not provided\n or None, a freshly-allocated array is returned. The dtype of the\n output and input must be the same.\n\n Returns\n -------\n y : ndarray or scalar\n The floor of each element in `x`, with `float` dtype.\n This is a scalar if `x` is a scalar.\n\n Examples\n --------\n >>> a = np.array([-1.7, -1.5, -0.2, 0.2, 1.5, 1.7, 2.0])\n >>> np.floor(a)\n array([-2., -2., -1., 0., 1., 1., 2.])\n >>> #if you use parameter out, x and out must be ndarray.\n >>> a = np.array(1)\n >>> np.floor(np.array(3.5), a)\n array(3.)\n >>> a\n array(3.)\n \"\"\"\n return _pure_unary_func_helper(x, _api_internal.floor, _np.floor, out=out, **kwargs)\n\n\n@set_module('mxnet.ndarray.numpy')\n@wrap_np_unary_func\ndef bitwise_not(x, out=None, **kwargs):\n r\"\"\"\n Compute bit-wise inversion, or bit-wise NOT, element-wise.\n Computes the bit-wise NOT of the underlying binary representation of\n the integers in the input arrays. This ufunc implements the C/Python\n operator ``~``.\n\n Parameters\n ----------\n x : array_like\n Only integer and boolean types are handled.\n out : ndarray, None, or tuple of ndarray and None, optional\n A location into which the result is stored. If provided, it must have\n a shape that the inputs broadcast to. If not provided or `None`,\n a freshly-allocated array is returned. A tuple (possible only as a\n keyword argument) must have length equal to the number of outputs.\n\n Returns\n -------\n out : ndarray or scalar\n Result.\n This is a scalar if `x` is a scalar.\n\n See Also\n --------\n bitwise_and, bitwise_or, bitwise_xor\n logical_not\n binary_repr :\n Return the binary representation of the input number as a string.\n\n Examples\n --------\n We've seen that 13 is represented by ``00001101``.\n The invert or bit-wise NOT of 13 is then:\n\n >>> x = np.invert(np.array(13, dtype=np.uint8))\n >>> x\n 242\n >>> np.binary_repr(x, width=8)\n '11110010'\n\n Notes\n -----\n `bitwise_not` is an alias for `invert`:\n\n >>> np.bitwise_not is np.invert\n True\n \"\"\"\n return _pure_unary_func_helper(x, _api_internal.bitwise_not, _np.bitwise_not, out=out, **kwargs)\n\n\n@set_module('mxnet.ndarray.numpy')\n@wrap_np_unary_func\ndef invert(x, out=None, **kwargs):\n r\"\"\"\n Compute bit-wise inversion, or bit-wise NOT, element-wise.\n Computes the bit-wise NOT of the underlying binary representation of\n the integers in the input arrays. This ufunc implements the C/Python\n operator ``~``.\n\n Parameters\n ----------\n x : array_like\n Only integer and boolean types are handled.\n out : ndarray, None, or tuple of ndarray and None, optional\n A location into which the result is stored. If provided, it must have\n a shape that the inputs broadcast to. If not provided or `None`,\n a freshly-allocated array is returned. A tuple (possible only as a\n keyword argument) must have length equal to the number of outputs.\n\n Returns\n -------\n out : ndarray or scalar\n Result.\n This is a scalar if `x` is a scalar.\n\n See Also\n --------\n bitwise_and, bitwise_or, bitwise_xor\n logical_not\n binary_repr :\n Return the binary representation of the input number as a string.\n\n Examples\n --------\n We've seen that 13 is represented by ``00001101``.\n The invert or bit-wise NOT of 13 is then:\n\n >>> x = np.invert(np.array(13, dtype=np.uint8))\n >>> x\n 242\n >>> np.binary_repr(x, width=8)\n '11110010'\n\n Notes\n -----\n `bitwise_not` is an alias for `invert`:\n\n >>> np.bitwise_not is np.invert\n True\n \"\"\"\n return _pure_unary_func_helper(x, _api_internal.bitwise_not, _np.bitwise_not, out=out, **kwargs)\n\n\n@set_module('mxnet.ndarray.numpy')\n@wrap_np_unary_func\ndef trunc(x, out=None, **kwargs):\n r\"\"\"\n Return the truncated value of the input, element-wise.\n The truncated value of the scalar `x` is the nearest integer `i` which\n is closer to zero than `x` is. In short, the fractional part of the\n signed number `x` is discarded.\n\n Parameters\n ----------\n x : ndarray or scalar\n Input data.\n out : ndarray or None, optional\n A location into which the result is stored.\n\n Returns\n -------\n y : ndarray or scalar\n The truncated value of each element in `x`.\n This is a scalar if `x` is a scalar.\n\n Notes\n -----\n This function differs from the original numpy.trunc in the following aspects:\n - Do not support `where`, a parameter in numpy which indicates where to calculate.\n - Cannot cast type automatically. Dtype of `out` must be same as the expected one.\n - Cannot broadcast automatically. Shape of `out` must be same as the expected one.\n - If `x` is plain python numeric, the result won't be stored in out.\n\n Examples\n --------\n >>> a = np.array([-1.7, -1.5, -0.2, 0.2, 1.5, 1.7, 2.0])\n >>> np.trunc(a)\n array([-1., -1., -0., 0., 1., 1., 2.])\n \"\"\"\n return _pure_unary_func_helper(x, _api_internal.trunc, _np.trunc, out=out, **kwargs)\n\n\n@set_module('mxnet.ndarray.numpy')\n@wrap_np_unary_func\ndef logical_not(x, out=None, **kwargs):\n r\"\"\"\n Compute the truth value of NOT x element-wise.\n\n Parameters\n ----------\n x : ndarray or scalar\n Logical NOT is applied to the elements of `x`.\n out : ndarray or None, optional\n A location into which the result is stored.\n\n Returns\n -------\n y : bool or ndarray of bool\n Boolean result with the same shape as `x` of the NOT operation\n on elements of `x`.\n This is a scalar if `x` is a scalar.\n\n Notes\n -----\n This function differs from the original numpy.logical_not in the following aspects:\n - Do not support `where`, a parameter in numpy which indicates where to calculate.\n - Cannot cast type automatically. Dtype of `out` must be same as the expected one.\n - Cannot broadcast automatically. Shape of `out` must be same as the expected one.\n - If `x` is plain python numeric, the result won't be stored in out.\n\n Examples\n --------\n >>> x= np.array([True, False, 0, 1])\n >>> np.logical_not(x)\n array([False, True, True, False])\n\n >>> x = np.arange(5)\n >>> np.logical_not(x<3)\n array([False, False, False, True, True])\n \"\"\"\n return _pure_unary_func_helper(x, _api_internal.logical_not, _np.logical_not, out=out, **kwargs)\n\n\n@set_module('mxnet.ndarray.numpy')\n@wrap_np_unary_func\ndef arcsinh(x, out=None, **kwargs):\n r\"\"\"\n Inverse hyperbolic sine, element-wise.\n\n Parameters\n ----------\n x : ndarray or scalar\n Input array.\n out : ndarray or None, optional\n A location into which the result is stored.\n\n Returns\n -------\n arcsinh : ndarray\n Array of the same shape as `x`.\n This is a scalar if `x` is a scalar.\n\n Notes\n -----\n `arcsinh` is a multivalued function: for each `x` there are infinitely\n many numbers `z` such that `sinh(z) = x`.\n\n For real-valued input data types, `arcsinh` always returns real output.\n For each value that cannot be expressed as a real number or infinity, it\n yields ``nan`` and sets the `invalid` floating point error flag.\n\n This function differs from the original numpy.arcsinh in the following aspects:\n - Do not support `where`, a parameter in numpy which indicates where to calculate.\n - Do not support complex-valued input.\n - Cannot cast type automatically. DType of `out` must be same as the expected one.\n - Cannot broadcast automatically. Shape of `out` must be same as the expected one.\n - If `x` is plain python numeric, the result won't be stored in out.\n\n Examples\n --------\n >>> a = np.array([3.2, 5.0])\n >>> np.arcsinh(a)\n array([1.8309381, 2.2924316])\n >>> np.arcsinh(1)\n 0.0\n \"\"\"\n return _pure_unary_func_helper(x, _api_internal.arcsinh, _np.arcsinh, out=out, **kwargs)\n\n\n@set_module('mxnet.ndarray.numpy')\n@wrap_np_unary_func\ndef arccosh(x, out=None, **kwargs):\n r\"\"\"\n Inverse hyperbolic cosine, element-wise.\n\n Parameters\n ----------\n x : ndarray or scalar\n Input array.\n out : ndarray or None, optional\n A location into which the result is stored.\n\n Returns\n -------\n arccosh : ndarray\n Array of the same shape as `x`.\n This is a scalar if `x` is a scalar.\n\n Notes\n -----\n `arccosh` is a multivalued function: for each `x` there are infinitely\n many numbers `z` such that `cosh(z) = x`.\n\n For real-valued input data types, `arccosh` always returns real output.\n For each value that cannot be expressed as a real number or infinity, it\n yields ``nan`` and sets the `invalid` floating point error flag.\n\n This function differs from the original numpy.arccosh in the following aspects:\n - Do not support `where`, a parameter in numpy which indicates where to calculate.\n - Do not support complex-valued input.\n - Cannot cast type automatically. Dtype of `out` must be same as the expected one.\n - Cannot broadcast automatically. Shape of `out` must be same as the expected one.\n - If `x` is plain python numeric, the result won't be stored in out.\n\n Examples\n --------\n >>> a = np.array([3.2, 5.0])\n >>> np.arccosh(a)\n array([1.8309381, 2.2924316])\n >>> np.arccosh(1)\n 0.0\n \"\"\"\n return _pure_unary_func_helper(x, _api_internal.arccosh, _np.arccosh, out=out, **kwargs)\n\n\n@set_module('mxnet.ndarray.numpy')\n@wrap_np_unary_func\ndef arctanh(x, out=None, **kwargs):\n r\"\"\"\n Inverse hyperbolic tangent, element-wise.\n\n Parameters\n ----------\n x : ndarray or scalar\n Input array.\n out : ndarray or None, optional\n A location into which the result is stored.\n\n Returns\n -------\n arctanh : ndarray\n Array of the same shape as `x`.\n This is a scalar if `x` is a scalar.\n\n Notes\n -----\n `arctanh` is a multivalued function: for each `x` there are infinitely\n many numbers `z` such that `tanh(z) = x`.\n\n For real-valued input data types, `arctanh` always returns real output.\n For each value that cannot be expressed as a real number or infinity, it\n yields ``nan`` and sets the `invalid` floating point error flag.\n\n This function differs from the original numpy.arctanh in the following aspects:\n - Do not support `where`, a parameter in numpy which indicates where to calculate.\n - Do not support complex-valued input.\n - Cannot cast type automatically. Dtype of `out` must be same as the expected one.\n - Cannot broadcast automatically. Shape of `out` must be same as the expected one.\n - If `x` is plain python numeric, the result won't be stored in out.\n\n Examples\n --------\n >>> a = np.array([0.0, -0.5])\n >>> np.arctanh(a)\n array([0., -0.54930615])\n >>> np.arctanh(0.0)\n 0.0\n \"\"\"\n return _pure_unary_func_helper(x, _api_internal.arctanh, _np.arctanh, out=out, **kwargs)\n\n\n@set_module('mxnet.ndarray.numpy')\ndef tile(A, reps):\n r\"\"\"\n Construct an array by repeating A the number of times given by reps.\n\n If `reps` has length ``d``, the result will have dimension of\n ``max(d, A.ndim)``.\n\n If ``A.ndim < d``, `A` is promoted to be d-dimensional by prepending new\n axes. So a shape (3,) array is promoted to (1, 3) for 2-D replication,\n or shape (1, 1, 3) for 3-D replication. If this is not the desired\n behavior, promote `A` to d-dimensions manually before calling this\n function.\n\n If ``A.ndim > d``, `reps` is promoted to `A`.ndim by pre-pending 1's to it.\n Thus for an `A` of shape (2, 3, 4, 5), a `reps` of (2, 2) is treated as\n (1, 1, 2, 2).\n\n Parameters\n ----------\n A : ndarray or scalar\n An input array or a scalar to repeat.\n reps : a single integer or tuple of integers\n The number of repetitions of `A` along each axis.\n\n Returns\n -------\n c : ndarray\n The tiled output array.\n\n Examples\n --------\n >>> a = np.array([0, 1, 2])\n >>> np.tile(a, 2)\n array([0., 1., 2., 0., 1., 2.])\n >>> np.tile(a, (2, 2))\n array([[0., 1., 2., 0., 1., 2.],\n [0., 1., 2., 0., 1., 2.]])\n >>> np.tile(a, (2, 1, 2))\n array([[[0., 1., 2., 0., 1., 2.]],\n [[0., 1., 2., 0., 1., 2.]]])\n\n >>> b = np.array([[1, 2], [3, 4]])\n >>> np.tile(b, 2)\n array([[1., 2., 1., 2.],\n [3., 4., 3., 4.]])\n >>> np.tile(b, (2, 1))\n array([[1., 2.],\n [3., 4.],\n [1., 2.],\n [3., 4.]])\n\n >>> c = np.array([1,2,3,4])\n >>> np.tile(c,(4,1))\n array([[1., 2., 3., 4.],\n [1., 2., 3., 4.],\n [1., 2., 3., 4.],\n [1., 2., 3., 4.]])\n\n Scalar as input:\n\n >>> np.tile(2, 3)\n array([2, 2, 2]) # repeating integer `2`\n\n \"\"\"\n if isinstance(A, numeric_types):\n return _np.tile(A, reps)\n elif isinstance(A, NDArray):\n return _api_internal.tile(A, reps)\n else:\n raise TypeError('type {} not supported'.format(str(type(A))))\n\n\n@set_module('mxnet.ndarray.numpy')\ndef transpose(a, axes=None):\n \"\"\"\n Permute the dimensions of an array.\n\n Parameters\n ----------\n a : ndarray\n Input array.\n axes : list of ints, optional\n By default, reverse the dimensions,\n otherwise permute the axes according to the values given.\n\n Returns\n -------\n p : ndarray\n a with its axes permuted.\n\n Notes\n -----\n This function differs from the original `numpy.transpose\n <https://docs.scipy.org/doc/numpy/reference/generated/numpy.transpose.html>`_ in\n the following way(s):\n\n - only ndarray is accepted as valid input, python iterables are not supported\n - the operator always returns an `ndarray` that does not share the memory with the input\n\n Examples\n --------\n >>> x = np.arange(4).reshape((2,2))\n >>> x\n array([[0., 1.],\n [2., 3.]])\n >>> np.transpose(x)\n array([[0., 2.],\n [1., 3.]])\n >>> x = np.ones((1, 2, 3))\n >>> np.transpose(x, (1, 0, 2)).shape\n (2, 1, 3)\n \"\"\"\n return _api_internal.transpose(a, axes)\n\n\n@set_module('mxnet.ndarray.numpy')\ndef repeat(a, repeats, axis=None):\n \"\"\"\n Repeat elements of an array.\n\n Parameters\n ----------\n a : array_like\n Input array.\n repeats : int\n The number of repetitions for each element.\n axis : int, optional\n The axis along which to repeat values. By default, use the\n flattened input array, and return a flat output array.\n\n Returns\n -------\n repeated_array : ndarray\n Output array which has the same shape as `a`, except along\n the given axis.\n\n See Also\n --------\n tile : Tile an array.\n\n Examples\n --------\n >>> np.repeat(3, 4)\n array([3, 3, 3, 3])\n >>> x = np.array([[1,2],[3,4]])\n >>> np.repeat(x, 2)\n array([1, 1, 2, 2, 3, 3, 4, 4])\n >>> np.repeat(x, 3, axis=1)\n array([[1, 1, 1, 2, 2, 2],\n [3, 3, 3, 4, 4, 4]])\n >>> np.repeat(x, [1, 2], axis=0)\n array([[1, 2],\n [3, 4],\n [3, 4]])\n \"\"\"\n if isinstance(repeats, numeric_types):\n repeats = [repeats]\n if axis is not None:\n tmp = swapaxes(a, 0, axis)\n res = _api_internal.repeats(tmp, repeats, 0)\n return swapaxes(res, 0, axis)\n return _api_internal.repeats(a, repeats, axis)\n\n\n# pylint: disable=redefined-outer-name\n@set_module('mxnet.ndarray.numpy')\ndef split(ary, indices_or_sections, axis=0):\n \"\"\"\n Split an array into multiple sub-arrays.\n\n Parameters\n ----------\n ary : ndarray\n Array to be divided into sub-arrays.\n indices_or_sections : int or 1-D python tuple, list or set.\n If `indices_or_sections` is an integer, N, the array will be divided\n into N equal arrays along `axis`. If such a split is not possible,\n an error is raised.\n If `indices_or_sections` is a 1-D array of sorted integers, the entries\n indicate where along `axis` the array is split. For example,\n ``[2, 3]`` would, for ``axis=0``, result in\n - ary[:2]\n - ary[2:3]\n - ary[3:]\n If an index exceeds the dimension of the array along `axis`,\n an empty sub-array is returned correspondingly.\n axis : int, optional\n The axis along which to split, default is 0.\n\n Returns\n -------\n sub-arrays : list of ndarrays\n A list of sub-arrays.\n\n Raises\n ------\n ValueError\n If `indices_or_sections` is given as an integer, but\n a split does not result in equal division.\n \"\"\"\n if isinstance(indices_or_sections, set):\n indices_or_sections = list(indices_or_sections)\n return list(_api_internal.split(ary, indices_or_sections, axis))\n# pylint: enable=redefined-outer-name\n\n\n# pylint: disable=redefined-outer-name\n@set_module('mxnet.ndarray.numpy')\ndef array_split(ary, indices_or_sections, axis=0):\n \"\"\"Split an array into multiple sub-arrays.\n\n If `indices_or_sections` is an integer, N, the array will be divided\n into N equal arrays along `axis`. If such a split is not possible,\n an array of length l that should be split into n sections, it returns\n l % n sub-arrays of size l//n + 1 and the rest of size l//n.\n\n If `indices_or_sections` is a 1-D array of sorted integers, the entries\n indicate where along `axis` the array is split. For example,\n ``[2, 3]`` would, for ``axis=0``, result in\n - ary[:2]\n - ary[2:3]\n - ary[3:]\n If an index exceeds the dimension of the array along `axis`,\n an empty sub-array is returned correspondingly.\n\n Parameters\n ----------\n ary : ndarray\n Array to be divided into sub-arrays.\n indices_or_sections : int or 1-D Python tuple, list or set.\n Param used to determine the number and size of the subarray.\n axis : int, optional\n The axis along which to split, default is 0.\n\n Returns\n -------\n sub-arrays : list of ndarrays\n A list of sub-arrays.\n\n Examples\n --------\n >>> x = np.arange(9.0)\n >>> np.array_split(x, 3)\n [array([0., 1., 2.]), array([3., 4., 5.]), array([6., 7., 8.])]\n\n >>> np.array_split(x, [3, 5, 6, 8])\n [array([0., 1., 2.]), array([3., 4.]), array([5.]), array([6., 7.]), array([])]\n\n >>> x = np.arange(8.0)\n >>> np.array_split(x, 3)\n [array([0., 1., 2.]), array([3., 4., 5.]), array([6., 7.])]\n\n >>> x = np.arange(7.0)\n >>> np.array_split(x, 3)\n [array([0., 1., 2.]), array([3., 4.]), array([5., 6.])]\n \"\"\"\n if isinstance(indices_or_sections, set):\n indices_or_sections = list(indices_or_sections)\n return list(_api_internal.array_split(ary, indices_or_sections, axis))\n# pylint: enable=redefined-outer-name\n\n\n# pylint: disable=redefined-outer-name\n@set_module('mxnet.ndarray.numpy')\ndef hsplit(ary, indices_or_sections):\n \"\"\"Split an array into multiple sub-arrays horizontally (column-wise).\n\n This is equivalent to ``split`` with ``axis=0`` if ``ary`` has one\n dimension, and otherwise that with ``axis=1``.\n\n Parameters\n ----------\n ary : ndarray\n Array to be divided into sub-arrays.\n indices_or_sections : int, list of ints or tuple of ints.\n If `indices_or_sections` is an integer, N, the array will be divided\n into N equal arrays along `axis`. If such a split is not possible,\n an error is raised.\n\n If `indices_or_sections` is a list of sorted integers, the entries\n indicate where along `axis` the array is split.\n\n If an index exceeds the dimension of the array along `axis`,\n it will raises errors. so index must less than or euqal to\n the dimension of the array along axis.\n\n Returns\n -------\n sub-arrays : list of ndarrays\n A list of sub-arrays.\n\n Notes\n ------\n - If `indices_or_sections` is given as an integer, but a split\n does not result in equal division.It will raises ValueErrors.\n\n - If indices_or_sections is an integer, and the number is 1, it will\n raises an error. Because single output from split is not supported yet...\n\n See Also\n --------\n split : Split an array into multiple sub-arrays of equal size.\n\n Examples\n --------\n >>> x = np.arange(16.0).reshape(4, 4)\n >>> x\n array([[ 0., 1., 2., 3.],\n [ 4., 5., 6., 7.],\n [ 8., 9., 10., 11.],\n [12., 13., 14., 15.]])\n >>> np.hsplit(x, 2)\n [array([[ 0., 1.],\n [ 4., 5.],\n [ 8., 9.],\n [12., 13.]]),\n array([[ 2., 3.],\n [ 6., 7.],\n [10., 11.],\n [14., 15.]])]\n >>> np.hsplit(x, [3, 6])\n [array([[ 0., 1., 2.],\n [ 4., 5., 6.],\n [ 8., 9., 10.],\n [12., 13., 14.]]),\n array([[ 3.],\n [ 7.],\n [11.],\n [15.]]),\n array([], shape=(4, 0), dtype=float32)]\n\n With a higher dimensional array the split is still along the second axis.\n\n >>> x = np.arange(8.0).reshape(2, 2, 2)\n >>> x\n array([[[ 0., 1.],\n [ 2., 3.]],\n [[ 4., 5.],\n [ 6., 7.]]])\n >>> np.hsplit(x, 2)\n [array([[[ 0., 1.]],\n [[ 4., 5.]]]),\n array([[[ 2., 3.]],\n [[ 6., 7.]]])]\n\n If ``ary`` has one dimension, 'axis' = 0.\n >>> x = np.arange(4)\n array([0., 1., 2., 3.])\n >>> np.hsplit(x, 2)\n [array([0., 1.]), array([2., 3.])]\n\n If you want to produce an empty sub-array, you can see an example.\n >>> np.hsplit(x, [2, 2])\n [array([0., 1.]), array([], dtype=float32), array([2., 3.])]\n \"\"\"\n if isinstance(indices_or_sections, set):\n indices_or_sections = list(indices_or_sections)\n return list(_api_internal.hsplit(ary, indices_or_sections))\n# pylint: enable=redefined-outer-name\n\n\n@set_module('mxnet.ndarray.numpy')\ndef vsplit(ary, indices_or_sections):\n r\"\"\"\n vsplit(ary, indices_or_sections)\n\n Split an array into multiple sub-arrays vertically (row-wise).\n\n ``vsplit`` is equivalent to ``split`` with `axis=0` (default): the array is always split\n along the first axis regardless of the array dimension.\n\n Parameters\n ----------\n ary : ndarray\n Array to be divided into sub-arrays.\n indices_or_sections : int or 1 - D Python tuple, list or set.\n If `indices_or_sections` is an integer, N, the array will be divided into N equal arrays\n along axis 0. If such a split is not possible, an error is raised.\n\n If `indices_or_sections` is a 1-D array of sorted integers, the entries indicate where\n along axis 0 the array is split. For example, ``[2, 3]`` would result in\n\n - ary[:2]\n - ary[2:3]\n - ary[3:]\n\n If an index exceeds the dimension of the array along axis 0, an error will be thrown.\n\n Returns\n -------\n sub-arrays : list of ndarrays\n A list of sub-arrays.\n\n See Also\n --------\n split : Split an array into multiple sub-arrays of equal size.\n\n Notes\n -------\n This function differs from the original `numpy.degrees\n <https://docs.scipy.org/doc/numpy/reference/generated/numpy.degrees.html>`_ in\n the following aspects:\n\n - Currently parameter ``indices_or_sections`` does not support ndarray, but supports scalar,\n tuple and list.\n - In ``indices_or_sections``, if an index exceeds the dimension of the array along axis 0,\n an error will be thrown.\n\n Examples\n --------\n >>> x = np.arange(16.0).reshape(4, 4)\n >>> x\n array([[ 0., 1., 2., 3.],\n [ 4., 5., 6., 7.],\n [ 8., 9., 10., 11.],\n [ 12., 13., 14., 15.]])\n >>> np.vsplit(x, 2)\n [array([[0., 1., 2., 3.],\n [4., 5., 6., 7.]]), array([[ 8., 9., 10., 11.],\n [12., 13., 14., 15.]])]\n\n With a higher dimensional array the split is still along the first axis.\n\n >>> x = np.arange(8.0).reshape(2, 2, 2)\n >>> x\n array([[[ 0., 1.],\n [ 2., 3.]],\n [[ 4., 5.],\n [ 6., 7.]]])\n >>> np.vsplit(x, 2)\n [array([[[0., 1.],\n [2., 3.]]]), array([[[4., 5.],\n [6., 7.]]])]\n\n \"\"\"\n if isinstance(indices_or_sections, set):\n indices_or_sections = list(indices_or_sections)\n return list(_api_internal.vsplit(ary, indices_or_sections))\n\n\n# pylint: disable=redefined-outer-name\n@set_module('mxnet.ndarray.numpy')\ndef dsplit(ary, indices_or_sections):\n \"\"\"\n Split array into multiple sub-arrays along the 3rd axis (depth).\n\n Please refer to the `split` documentation. `dsplit` is equivalent\n to `split` with ``axis=2``, the array is always split along the third\n axis provided the array dimension is greater than or equal to 3.\n\n Parameters\n ----------\n ary : ndarray\n Array to be divided into sub-arrays.\n indices_or_sections : int or 1 - D Python tuple, list or set.\n If `indices_or_sections` is an integer, N, the array will be divided into N equal arrays\n along axis 2. If such a split is not possible, an error is raised.\n\n If `indices_or_sections` is a 1-D array of sorted integers, the entries indicate where\n along axis 2 the array is split. For example, ``[2, 3]`` would result in\n\n - ary[:, :, :2]\n - ary[:, :, 2:3]\n - ary[:, :, 3:]\n\n If an index exceeds the dimension of the array along axis 2, an error will be thrown.\n\n Examples\n --------\n >>> x = np.arange(16.0).reshape(2, 2, 4)\n >>> x\n array([[[ 0., 1., 2., 3.],\n [ 4., 5., 6., 7.]],\n [[ 8., 9., 10., 11.],\n [12., 13., 14., 15.]]])\n >>> np.dsplit(x, 2)\n [array([[[ 0., 1.],\n [ 4., 5.]],\n [[ 8., 9.],\n [12., 13.]]]), array([[[ 2., 3.],\n [ 6., 7.]],\n [[10., 11.],\n [14., 15.]]])]\n >>> np.dsplit(x, np.array([3, 6]))\n [array([[[ 0., 1., 2.],\n [ 4., 5., 6.]],\n [[ 8., 9., 10.],\n [12., 13., 14.]]]),\n array([[[ 3.],\n [ 7.]],\n [[11.],\n [15.]]]),\n array([], shape=(2, 2, 0), dtype=float64)]\n \"\"\"\n if isinstance(indices_or_sections, set):\n indices_or_sections = list(indices_or_sections)\n return list(_api_internal.dsplit(ary, indices_or_sections))\n# pylint: enable=redefined-outer-name\n\n\n@set_module('mxnet.ndarray.numpy')\ndef concatenate(seq, axis=0, out=None):\n \"\"\"\n Join a sequence of arrays along an existing axis.\n\n Parameters\n ----------\n a1, a2, ... : sequence of ndarray\n The arrays must have the same shape, except in the dimension\n corresponding to `axis` (the first, by default).\n axis : int, optional\n The axis along which the arrays will be joined. If axis is None,\n arrays are flattened before use. Default is 0.\n out : ndarray, optional\n If provided, the destination to place the result. The shape must be\n correct, matching that of what concatenate would have returned if no\n out argument were specified.\n\n Returns\n -------\n res : ndarray\n The concatenated array.\n\n Examples\n --------\n >>> a = np.array([[1, 2], [3, 4]])\n >>> b = np.array([[5, 6]])\n >>> np.concatenate((a, b), axis=0)\n array([[1., 2.],\n [3., 4.],\n [5., 6.]])\n\n >>> np.concatenate((a, b), axis=None)\n array([1., 2., 3., 4., 5., 6.])\n\n >>> np.concatenate((a, b.T), axis=1)\n array([[1., 2., 5.],\n [3., 4., 6.]])\n \"\"\"\n return _api_internal.concatenate(*seq, axis, out)\n\n\n@set_module('mxnet.ndarray.numpy')\ndef append(arr, values, axis=None): # pylint: disable=redefined-outer-name\n \"\"\"\n Append values to the end of an array.\n\n Parameters\n ----------\n arr : ndarray\n Values are appended to a copy of this array.\n values : ndarray\n These values are appended to a copy of `arr`. It must be of the\n correct shape (the same shape as `arr`, excluding `axis`). If\n `axis` is not specified, `values` can be any shape and will be\n flattened before use.\n axis : int, optional\n The axis along which `values` are appended. If `axis` is not\n given, both `arr` and `values` are flattened before use.\n\n Returns\n -------\n append : ndarray\n A copy of `arr` with `values` appended to `axis`. Note that\n `append` does not occur in-place: a new array is allocated and\n filled. If `axis` is None, `out` is a flattened array.\n\n Examples\n --------\n >>> np.append(np.array([1, 2, 3]), np.array([[4, 5, 6],[7, 8, 9]]))\n array([1., 2., 3., 4., 5., 6., 7., 8., 9.])\n\n When `axis` is specified, `values` must have the correct shape.\n\n >>> np.append(np.array([[1, 2, 3], [4, 5, 6]]), np.array([[7, 8, 9]]), axis=0)\n array([[1., 2., 3.],\n [4., 5., 6.],\n [7., 8., 9.]])\n \"\"\"\n out = None\n return _api_internal.concatenate(arr, values, axis, out)\n\n\n@set_module('mxnet.ndarray.numpy')\ndef stack(arrays, axis=0, out=None):\n \"\"\"Join a sequence of arrays along a new axis.\n The axis parameter specifies the index of the new axis in the dimensions of the result.\n For example, if `axis=0` it will be the first dimension and if `axis=-1` it will be the last dimension.\n\n Parameters\n ----------\n arrays : sequence of ndarray\n Each array must have the same shape.\n axis : int, optional\n The axis in the result array along which the input arrays are stacked.\n out : ndarray, optional\n If provided, the destination to place the result. The shape must be correct,\n matching that of what stack would have returned if no out argument were specified.\n\n Returns\n -------\n stacked : ndarray\n The stacked array has one more dimension than the input arrays.\"\"\"\n def get_list(arrays):\n if not hasattr(arrays, '__getitem__') and hasattr(arrays, '__iter__'):\n raise ValueError(\"expected iterable for arrays but got {}\".format(type(arrays)))\n return [arr for arr in arrays]\n\n arrays = get_list(arrays)\n return _api_internal.stack(*arrays, axis, out)\n\n\n@set_module('mxnet.ndarray.numpy')\ndef vstack(arrays, out=None):\n r\"\"\"Stack arrays in sequence vertically (row wise).\n\n This is equivalent to concatenation along the first axis after 1-D arrays\n of shape `(N,)` have been reshaped to `(1,N)`. Rebuilds arrays divided by\n `vsplit`.\n\n This function makes most sense for arrays with up to 3 dimensions. For\n instance, for pixel-data with a height (first axis), width (second axis),\n and r/g/b channels (third axis). The functions `concatenate` and `stack`\n provide more general stacking and concatenation operations.\n\n Parameters\n ----------\n tup : sequence of ndarrays\n The arrays must have the same shape along all but the first axis.\n 1-D arrays must have the same length.\n\n Returns\n -------\n stacked : ndarray\n The array formed by stacking the given arrays, will be at least 2-D.\n\n Examples\n --------\n >>> a = np.array([1, 2, 3])\n >>> b = np.array([2, 3, 4])\n >>> np.vstack((a, b))\n array([[1., 2., 3.],\n [2., 3., 4.]])\n\n >>> a = np.array([[1], [2], [3]])\n >>> b = np.array([[2], [3], [4]])\n >>> np.vstack((a, b))\n array([[1.],\n [2.],\n [3.],\n [2.],\n [3.],\n [4.]])\n \"\"\"\n def get_list(arrays):\n if not hasattr(arrays, '__getitem__') and hasattr(arrays, '__iter__'):\n raise ValueError(\"expected iterable for arrays but got {}\".format(type(arrays)))\n return [arr for arr in arrays]\n\n arrays = get_list(arrays)\n return _api_internal.vstack(*arrays)\n\n\n@set_module('mxnet.ndarray.numpy')\ndef row_stack(arrays):\n r\"\"\"Stack arrays in sequence vertically (row wise).\n This is equivalent to concatenation along the first axis after 1-D arrays\n of shape `(N,)` have been reshaped to `(1,N)`. Rebuilds arrays divided by\n `vsplit`.\n This function makes most sense for arrays with up to 3 dimensions. For\n instance, for pixel-data with a height (first axis), width (second axis),\n and r/g/b channels (third axis). The functions `concatenate` and `stack`\n provide more general stacking and concatenation operations.\n Parameters\n ----------\n tup : sequence of ndarrays\n The arrays must have the same shape along all but the first axis.\n 1-D arrays must have the same length.\n Returns\n -------\n stacked : ndarray\n The array formed by stacking the given arrays, will be at least 2-D.\n Examples\n --------\n >>> a = np.array([1, 2, 3])\n >>> b = np.array([2, 3, 4])\n >>> np.vstack((a, b))\n array([[1., 2., 3.],\n [2., 3., 4.]])\n >>> a = np.array([[1], [2], [3]])\n >>> b = np.array([[2], [3], [4]])\n >>> np.vstack((a, b))\n array([[1.],\n [2.],\n [3.],\n [2.],\n [3.],\n [4.]])\n \"\"\"\n def get_list(arrays):\n if not hasattr(arrays, '__getitem__') and hasattr(arrays, '__iter__'):\n raise ValueError(\"expected iterable for arrays but got {}\".format(type(arrays)))\n return [arr for arr in arrays]\n\n arrays = get_list(arrays)\n return _api_internal.vstack(*arrays)\n\n\n@set_module('mxnet.ndarray.numpy')\ndef column_stack(tup):\n \"\"\"\n Stack 1-D arrays as columns into a 2-D array.\n Take a sequence of 1-D arrays and stack them as columns\n to make a single 2-D array. 2-D arrays are stacked as-is,\n just like with `hstack`. 1-D arrays are turned into 2-D columns\n first.\n\n Returns\n --------\n stacked : 2-D array\n The array formed by stacking the given arrays.\n\n See Also\n --------\n stack, hstack, vstack, concatenate\n\n Examples\n --------\n >>> a = np.array((1,2,3))\n >>> b = np.array((2,3,4))\n >>> np.column_stack((a,b))\n array([[1., 2.],\n [2., 3.],\n [3., 4.]])\n \"\"\"\n return _api_internal.column_stack(*tup)\n\n\n@set_module('mxnet.ndarray.numpy')\ndef hstack(arrays):\n \"\"\"\n Stack arrays in sequence horizontally (column wise).\n This is equivalent to concatenation along the second axis,\n except for 1-D arrays where it concatenates along the first axis.\n Rebuilds arrays divided by hsplit.\n This function makes most sense for arrays with up to 3 dimensions.\n For instance, for pixel-data with a height (first axis), width (second axis),\n and r/g/b channels (third axis). The functions concatenate,\n stack and block provide more general stacking and concatenation operations.\n\n Parameters\n ----------\n tup : sequence of ndarrays\n The arrays must have the same shape along all but the second axis, except 1-D arrays which can be any length.\n\n Returns\n -------\n stacked : ndarray\n The array formed by stacking the given arrays.\n\n Examples\n --------\n >>> from mxnet import np,npx\n >>> a = np.array((1,2,3))\n >>> b = np.array((2,3,4))\n >>> np.hstack((a,b))\n array([1., 2., 3., 2., 3., 4.])\n >>> a = np.array([[1],[2],[3]])\n >>> b = np.array([[2],[3],[4]])\n >>> np.hstack((a,b))\n array([[1., 2.],\n [2., 3.],\n [3., 4.]])\n \"\"\"\n return _api_internal.hstack(*arrays)\n\n\n@set_module('mxnet.ndarray.numpy')\ndef dstack(arrays):\n \"\"\"\n Stack arrays in sequence depth wise (along third axis).\n This is equivalent to concatenation along the third axis after 2-D arrays\n of shape `(M,N)` have been reshaped to `(M,N,1)` and 1-D arrays of shape\n `(N,)` have been reshaped to `(1,N,1)`. Rebuilds arrays divided by\n `dsplit`.\n This function makes most sense for arrays with up to 3 dimensions. For\n instance, for pixel-data with a height (first axis), width (second axis),\n and r/g/b channels (third axis). The functions `concatenate`, `stack` and\n `block` provide more general stacking and concatenation operations.\n\n Parameters\n ----------\n tup : sequence of arrays\n The arrays must have the same shape along all but the third axis.\n 1-D or 2-D arrays must have the same shape.\n\n Returns\n -------\n stacked : ndarray\n The array formed by stacking the given arrays, will be at least 3-D.\n\n Examples\n --------\n >>> a = np.array((1,2,3))\n >>> b = np.array((2,3,4))\n >>> np.dstack((a,b))\n array([[[1, 2],\n [2, 3],\n [3, 4]]])\n >>> a = np.array([[1],[2],[3]])\n >>> b = np.array([[2],[3],[4]])\n >>> np.dstack((a,b))\n array([[[1, 2]],\n [[2, 3]],\n [[3, 4]]])\n \"\"\"\n return _api_internal.dstack(*arrays)\n\n\n@set_module('mxnet.ndarray.numpy')\n@wrap_np_binary_func\ndef maximum(x1, x2, out=None, **kwargs):\n \"\"\"\n Returns element-wise maximum of the input arrays with broadcasting.\n\n Parameters\n ----------\n x1, x2 : scalar or mxnet.numpy.ndarray\n The arrays holding the elements to be compared. They must have the same shape,\n or shapes that can be broadcast to a single shape.\n\n Returns\n -------\n out : mxnet.numpy.ndarray or scalar\n The maximum of x1 and x2, element-wise. This is a scalar if both x1 and x2 are scalars.\"\"\"\n if isinstance(x1, numeric_types) and isinstance(x2, numeric_types):\n return _np.maximum(x1, x2, out=out)\n return _api_internal.maximum(x1, x2, out)\n\n\n@set_module('mxnet.ndarray.numpy')\n@wrap_np_binary_func\ndef fmax(x1, x2, out=None, **kwargs):\n \"\"\"\n Returns element-wise maximum of the input arrays with broadcasting. (Ignores NaNs)\n\n Parameters\n ----------\n x1, x2 : scalar or mxnet.numpy.ndarray\n The arrays holding the elements to be compared. They must have the same shape,\n or shapes that can be broadcast to a single shape.\n\n Returns\n -------\n out : mxnet.numpy.ndarray or scalar\n The maximum of x1 and x2, element-wise. This is a scalar if both x1 and x2 are scalars.\"\"\"\n if isinstance(x1, numeric_types) and isinstance(x2, numeric_types):\n _np.fmax(x1, x2, out=out)\n return _api_internal.fmax(x1, x2, out)\n\n\n@set_module('mxnet.ndarray.numpy')\n@wrap_np_binary_func\ndef minimum(x1, x2, out=None, **kwargs):\n \"\"\"\n Returns element-wise minimum of the input arrays with broadcasting.\n\n Parameters\n ----------\n x1, x2 : scalar or mxnet.numpy.ndarray\n The arrays holding the elements to be compared. They must have the same shape,\n or shapes that can be broadcast to a single shape.\n\n Returns\n -------\n out : mxnet.numpy.ndarray or scalar\n The minimum of x1 and x2, element-wise. This is a scalar if both x1 and x2 are scalars.\"\"\"\n if isinstance(x1, numeric_types) and isinstance(x2, numeric_types):\n return _np.minimum(x1, x2, out=out)\n return _api_internal.minimum(x1, x2, out)\n\n\n@set_module('mxnet.ndarray.numpy')\n@wrap_np_binary_func\ndef fmin(x1, x2, out=None, **kwargs):\n \"\"\"\n Returns element-wise minimum of the input arrays with broadcasting. (Ignores NaNs)\n\n Parameters\n ----------\n x1, x2 : scalar or mxnet.numpy.ndarray\n The arrays holding the elements to be compared. They must have the same shape,\n or shapes that can be broadcast to a single shape.\n\n Returns\n -------\n out : mxnet.numpy.ndarray or scalar\n The minimum of x1 and x2, element-wise. This is a scalar if both x1 and x2 are scalars.\"\"\"\n if isinstance(x1, numeric_types) and isinstance(x2, numeric_types):\n _np.fmin(x1, x2, out=out)\n return _api_internal.fmin(x1, x2, out)\n\n\n@set_module('mxnet.ndarray.numpy')\ndef max(a, axis=None, out=None, keepdims=False):\n \"\"\"\n Return the maximum of an array or maximum along an axis.\n\n Parameters\n ----------\n a : ndarray\n Input data.\n axis : int, optional\n Axis along which to operate. By default, flattened input is used.\n out : ndarray, optional\n Alternative output array in which to place the result. Must\n be of the same shape and buffer length as the expected output.\n See `doc.ufuncs` (Section \"Output arguments\") for more details.\n keepdims : bool, optional\n If this is set to True, the axes which are reduced are left\n in the result as dimensions with size one. With this option,\n the result will broadcast correctly against the original `arr`.\n\n Returns\n -------\n max : ndarray\n Maximum of `a`. If `axis` is None, the result is an array of dimension 1.\n If `axis` is given, the result is an array of dimension\n ``a.ndim - 1``.\n\n See Also\n --------\n min :\n The minimum value of an array along a given axis, ignoring any nan.\n maximum :\n Element-wise maximum of two arrays, ignoring any nan.\n argmax :\n Return the indices of the maximum values.\n\n Notes\n -----\n NaN in the orginal `numpy` is denoted as nan and will be ignored.\n\n Don't use `max` for element-wise comparison of 2 arrays; when\n ``a.shape[0]`` is 2, ``maximum(a[0], a[1])`` is faster than\n ``max(a, axis=0)``.\n\n Examples\n --------\n >>> a = np.arange(4).reshape((2,2))\n >>> a\n array([[0., 1.],\n [2., 3.]])\n >>> np.max(a) # Maximum of the flattened array\n array(3.)\n >>> np.max(a, axis=0) # Maxima along the first axis\n array([2., 3.])\n >>> np.max(a, axis=1) # Maxima along the second axis\n array([1., 3.])\n\n >>> b = np.arange(5, dtype=np.float32)\n >>> b[2] = np.nan\n >>> np.max(b)\n array(4.)\n \"\"\"\n return _api_internal.max(a, axis, keepdims, out)\n\n\n@set_module('mxnet.ndarray.numpy')\ndef min(a, axis=None, out=None, keepdims=False):\n \"\"\"\n Return the minimum of an array or minimum along an axis.\n\n Parameters\n ----------\n a : ndarray\n Input data.\n axis : int, optional\n Axis along which to operate. By default, flattened input is used.\n out : ndarray, optional\n Alternative output array in which to place the result. Must\n be of the same shape and buffer length as the expected output.\n See `doc.ufuncs` (Section \"Output arguments\") for more details.\n keepdims : bool, optional\n If this is set to True, the axes which are reduced are left\n in the result as dimensions with size one. With this option,\n the result will broadcast correctly against the original `arr`.\n\n Returns\n -------\n min : ndarray\n Minimum of `a`. If `axis` is None, the result is an array of dimension 1.\n If `axis` is given, the result is an array of dimension\n ``a.ndim - 1``.\n\n See Also\n --------\n max :\n The maximum value of an array along a given axis, ignoring any nan.\n minimum :\n Element-wise minimum of two arrays, ignoring any nan.\n\n Notes\n -----\n NaN in the orginal `numpy` is denoted as nan and will be ignored.\n\n Don't use `min` for element-wise comparison of 2 arrays; when\n ``a.shape[0]`` is 2, ``minimum(a[0], a[1])`` is faster than\n ``min(a, axis=0)``.\n\n Examples\n --------\n >>> a = np.arange(4).reshape((2,2))\n >>> a\n array([[0., 1.],\n [2., 3.]])\n >>> np.min(a) # Minimum of the flattened array\n array(0.)\n >>> np.min(a, axis=0) # Minima along the first axis\n array([0., 1.])\n >>> np.min(a, axis=1) # Minima along the second axis\n array([0., 2.])\n >>> b = np.arange(5, dtype=np.float32)\n >>> b[2] = np.nan\n >>> np.min(b)\n array(0.) # nan will be ignored\n \"\"\"\n return _api_internal.min(a, axis, keepdims, out)\n\n\n@set_module('mxnet.ndarray.numpy')\ndef amax(a, axis=None, out=None, keepdims=False):\n \"\"\"\n Return the maximum of an array or maximum along an axis.\n\n Parameters\n ----------\n a : ndarray\n Input data.\n axis : int, optional\n Axis along which to operate. By default, flattened input is used.\n out : ndarray, optional\n Alternative output array in which to place the result. Must\n be of the same shape and buffer length as the expected output.\n See `doc.ufuncs` (Section \"Output arguments\") for more details.\n keepdims : bool, optional\n If this is set to True, the axes which are reduced are left\n in the result as dimensions with size one. With this option,\n the result will broadcast correctly against the original `arr`.\n\n Returns\n -------\n max : ndarray\n Maximum of `a`. If `axis` is None, the result is an array of dimension 1.\n If `axis` is given, the result is an array of dimension\n ``a.ndim - 1``.\n\n See Also\n --------\n min :\n The minimum value of an array along a given axis, ignoring any nan.\n maximum :\n Element-wise maximum of two arrays, ignoring any nan.\n argmax :\n Return the indices of the maximum values.\n\n Notes\n -----\n NaN in the orginal `numpy` is denoted as nan and will be ignored.\n\n Don't use `max` for element-wise comparison of 2 arrays; when\n ``a.shape[0]`` is 2, ``maximum(a[0], a[1])`` is faster than\n ``max(a, axis=0)``.\n\n Examples\n --------\n >>> a = np.arange(4).reshape((2,2))\n >>> a\n array([[0., 1.],\n [2., 3.]])\n >>> np.max(a) # Maximum of the flattened array\n array(3.)\n >>> np.max(a, axis=0) # Maxima along the first axis\n array([2., 3.])\n >>> np.max(a, axis=1) # Maxima along the second axis\n array([1., 3.])\n\n >>> b = np.arange(5, dtype=np.float32)\n >>> b[2] = np.nan\n >>> np.max(b)\n array(4.)\n \"\"\"\n return _api_internal.amax(a, axis, keepdims, out)\n\n\n@set_module('mxnet.ndarray.numpy')\ndef amin(a, axis=None, out=None, keepdims=False):\n \"\"\"\n Return the minimum of an array or minimum along an axis.\n\n Parameters\n ----------\n a : ndarray\n Input data.\n axis : int, optional\n Axis along which to operate. By default, flattened input is used.\n out : ndarray, optional\n Alternative output array in which to place the result. Must\n be of the same shape and buffer length as the expected output.\n See `doc.ufuncs` (Section \"Output arguments\") for more details.\n keepdims : bool, optional\n If this is set to True, the axes which are reduced are left\n in the result as dimensions with size one. With this option,\n the result will broadcast correctly against the original `arr`.\n\n Returns\n -------\n min : ndarray\n Minimum of `a`. If `axis` is None, the result is an array of dimension 1.\n If `axis` is given, the result is an array of dimension\n ``a.ndim - 1``.\n\n See Also\n --------\n max :\n The maximum value of an array along a given axis, ignoring any nan.\n minimum :\n Element-wise minimum of two arrays, ignoring any nan.\n\n Notes\n -----\n NaN in the orginal `numpy` is denoted as nan and will be ignored.\n\n Don't use `min` for element-wise comparison of 2 arrays; when\n ``a.shape[0]`` is 2, ``minimum(a[0], a[1])`` is faster than\n ``min(a, axis=0)``.\n\n Examples\n --------\n >>> a = np.arange(4).reshape((2,2))\n >>> a\n array([[0., 1.],\n [2., 3.]])\n >>> np.min(a) # Minimum of the flattened array\n array(0.)\n >>> np.min(a, axis=0) # Minima along the first axis\n array([0., 1.])\n >>> np.min(a, axis=1) # Minima along the second axis\n array([0., 2.])\n >>> b = np.arange(5, dtype=np.float32)\n >>> b[2] = np.nan\n >>> np.min(b)\n array(0.) # nan will be ignored\n \"\"\"\n return _api_internal.amin(a, axis, keepdims, out)\n\n\n@set_module('mxnet.ndarray.numpy')\ndef swapaxes(a, axis1, axis2):\n \"\"\"Interchange two axes of an array.\n\n Parameters\n ----------\n a : ndarray\n Input array.\n axis1 : int\n First axis.\n axis2 : int\n Second axis.\n\n Returns\n -------\n a_swapped : ndarray\n Swapped array. This is always a copy of the input array.\n \"\"\"\n return _npi.swapaxes(a, dim1=axis1, dim2=axis2)\n\n\n@set_module('mxnet.ndarray.numpy')\ndef clip(a, a_min, a_max, out=None):\n \"\"\"clip(a, a_min, a_max, out=None)\n\n Clip (limit) the values in an array.\n Given an interval, values outside the interval are clipped to\n the interval edges. For example, if an interval of ``[0, 1]``\n is specified, values smaller than 0 become 0, and values larger\n than 1 become 1.\n\n Parameters\n ----------\n a : ndarray\n Array containing elements to clip.\n a_min : scalar or `None`\n Minimum value. If `None`, clipping is not performed on lower\n interval edge. Not more than one of `a_min` and `a_max` may be\n `None`.\n a_max : scalar or `None`\n Maximum value. If `None`, clipping is not performed on upper\n interval edge. Not more than one of `a_min` and `a_max` may be\n `None`.\n out : ndarray, optional\n The results will be placed in this array. It may be the input\n array for in-place clipping. `out` must be of the right shape\n to hold the output. Its type is preserved.\n\n Returns\n -------\n clipped_array : ndarray\n An array with the elements of `a`, but where values\n < `a_min` are replaced with `a_min`, and those > `a_max`\n with `a_max`.\n\n Notes\n -----\n ndarray `a_min` and `a_max` are not supported.\n\n Examples\n --------\n >>> a = np.arange(10)\n >>> np.clip(a, 1, 8)\n array([1., 1., 2., 3., 4., 5., 6., 7., 8., 8.])\n >>> a\n array([0., 1., 2., 3., 4., 5., 6., 7., 8., 9.])\n >>> np.clip(a, 3, 6, out=a)\n array([3., 3., 3., 3., 4., 5., 6., 6., 6., 6.])\n \"\"\"\n if a_min is None and a_max is None:\n raise ValueError('array_clip: must set either max or min')\n return _api_internal.clip(a, a_min, a_max, out)\n\n\n@set_module('mxnet.ndarray.numpy')\ndef tril_indices(n, k=0, m=None):\n \"\"\"\n Return the indices for the lower-triangle of an (n, m) array.\n\n Parameters\n ----------\n n : int\n The row dimension of the arrays for which the returned\n indices will be valid.\n k : int, optional\n Diagonal offset (see `tril` for details).\n m : int, optional\n .. versionadded:: 1.9.0\n\n The column dimension of the arrays for which the returned\n arrays will be valid.\n By default `m` is taken equal to `n`.\n\n Returns\n -------\n inds : tuple of arrays\n The indices for the triangle. The returned tuple contains two arrays,\n each with the indices along one dimension of the array.\n\n See also\n --------\n triu_indices : similar function, for upper-triangular.\n mask_indices : generic function accepting an arbitrary mask function.\n tril, triu\n\n Notes\n -----\n .. versionadded:: 1.4.0\n\n Examples\n --------\n Compute two different sets of indices to access 4x4 arrays, one for the\n lower triangular part starting at the main diagonal, and one starting two\n diagonals further right:\n\n >>> il1 = np.tril_indices(4)\n >>> il2 = np.tril_indices(4, 2)\n\n Here is how they can be used with a sample array:\n\n >>> a = np.arange(16).reshape(4, 4)\n >>> a\n array([[ 0, 1, 2, 3],\n [ 4, 5, 6, 7],\n [ 8, 9, 10, 11],\n [12, 13, 14, 15]])\n\n Both for indexing:\n\n >>> a[il1]\n array([ 0, 4, 5, 8, 9, 10, 12, 13, 14, 15])\n\n And for assigning values:\n\n >>> a[il1] = -1\n >>> a\n array([[-1, 1, 2, 3],\n [-1, -1, 6, 7],\n [-1, -1, -1, 11],\n [-1, -1, -1, -1]])\n\n These cover almost the whole array (two diagonals right of the main one):\n\n >>> a[il2] = -10\n >>> a\n array([[-10, -10, -10, 3],\n [-10, -10, -10, -10],\n [-10, -10, -10, -10],\n [-10, -10, -10, -10]])\n\n \"\"\"\n if m is None:\n m = n\n return tuple(_api_internal.tril_indices(n, k, m))\n\n\n@set_module('mxnet.ndarray.numpy')\ndef argmax(a, axis=None, out=None, keepdims=False):\n r\"\"\"\n Returns the indices of the maximum values along an axis.\n\n Parameters\n ----------\n a : ndarray\n Input array. Only support ndarrays of dtype `float16`, `float32`, and `float64`.\n axis : int, optional\n By default, the index is into the flattened array, otherwise\n along the specified axis.\n out : ndarray or None, optional\n A location into which the result is stored.\n If provided, it must have the same shape and dtype as input ndarray.\n If not provided or `None`, a freshly-allocated array is returned.\n keepdims : bool\n If True, the reduced axes (dimensions) must be included in the result as\n singleton dimensions, and, accordingly, the result must be compatible with\n the input array. Otherwise, if False, the reduced axes (dimensions) must\n not be included in the result. Default: False .\n\n Returns\n -------\n index_array : ndarray of indices whose dtype is same as the input ndarray.\n Array of indices into the array. It has the same shape as `a.shape`\n with the dimension along `axis` removed.\n\n Notes\n -----\n ``keepdims`` param is part of request in data-api-standard\n <https://data-apis.org/array-api/latest/API_specification/searching_functions.html#argmax-x-axis-none-keepdims-false>`_,\n which is not the parameter in official NumPy\n\n In case of multiple occurrences of the maximum values, the indices\n corresponding to the first occurrence are returned.\n\n This function differs from the original `numpy.argmax\n <https://docs.scipy.org/doc/numpy/reference/generated/numpy.argmax.html>`_ in\n the following aspects:\n\n - Input type does not support Python native iterables(list, tuple, ...).\n - ``out`` param: cannot perform auto broadcasting. ``out`` ndarray's shape must be the same as the expected output.\n - ``out`` param: cannot perform auto type cast. ``out`` ndarray's dtype must be the same as the expected output.\n - ``out`` param does not support scalar input case.\n\n Examples\n --------\n >>> a = np.arange(6).reshape(2,3) + 10\n >>> a\n array([[10., 11., 12.],\n [13., 14., 15.]])\n >>> np.argmax(a)\n array(5.)\n >>> np.argmax(a, axis=0)\n array([1., 1., 1.])\n >>> np.argmax(a, axis=1)\n array([2., 2.])\n\n >>> b = np.arange(6)\n >>> b[1] = 5\n >>> b\n array([0., 5., 2., 3., 4., 5.])\n >>> np.argmax(b) # Only the first occurrence is returned.\n array(1.)\n\n Specify ``out`` ndarray:\n\n >>> a = np.arange(6).reshape(2,3) + 10\n >>> b = np.zeros((2,))\n >>> np.argmax(a, axis=1, out=b)\n array([2., 2.])\n >>> b\n array([2., 2.])\n \"\"\"\n return _api_internal.argmax(a, axis, keepdims, out)\n\n\n@set_module('mxnet.ndarray.numpy')\ndef argmin(a, axis=None, out=None, keepdims=False):\n r\"\"\"\n Returns the indices of the maximum values along an axis.\n\n Parameters\n ----------\n a : ndarray\n Input array. Only support ndarrays of dtype `float16`, `float32`, and `float64`.\n axis : int, optional\n By default, the index is into the flattened array, otherwise\n along the specified axis.\n out : ndarray or None, optional\n If provided, the result will be inserted into this array. It should\n be of the appropriate shape and dtype.\n keepdims : bool\n If True, the reduced axes (dimensions) must be included in the result as\n singleton dimensions, and, accordingly, the result must be compatible with\n the input array. Otherwise, if False, the reduced axes (dimensions) must\n not be included in the result. Default: False .\n\n Returns\n -------\n index_array : ndarray of indices whose dtype is same as the input ndarray.\n Array of indices into the array. It has the same shape as `a.shape`\n with the dimension along `axis` removed.\n\n Notes\n -----\n ``keepdims`` param is part of request in data-api-standard\n <https://data-apis.org/array-api/latest/API_specification/searching_functions.html#argmin-x-axis-none-keepdims-false>`_,\n which is not the parameter in official NumPy\n\n In case of multiple occurrences of the maximum values, the indices\n corresponding to the first occurrence are returned.\n\n This function differs from the original `numpy.argmax\n <https://docs.scipy.org/doc/numpy/reference/generated/numpy.argmax.html>`_ in\n the following aspects:\n\n - Input type does not support Python native iterables(list, tuple, ...).\n - ``out`` param: cannot perform auto broadcasting. ``out`` ndarray's shape must be the same as the expected output.\n - ``out`` param: cannot perform auto type cast. ``out`` ndarray's dtype must be the same as the expected output.\n - ``out`` param does not support scalar input case.\n\n Examples\n --------\n >>> a = np.arange(6).reshape(2,3) + 10\n >>> a\n array([[10., 11., 12.],\n [13., 14., 15.]])\n >>> np.argmin(a)\n array(0.)\n >>> np.argmin(a, axis=0)\n array([0., 0., 0.])\n >>> np.argmin(a, axis=1)\n array([0., 0.])\n\n >>> b = np.arange(6)\n >>> b[2] = 0\n >>> b\n array([0., 1., 0., 3., 4., 5.])\n >>> np.argmax(b) # Only the first occurrence is returned.\n array(0.)\n\n Specify ``out`` ndarray:\n\n >>> a = np.arange(6).reshape(2,3) + 10\n >>> b = np.zeros((2,))\n >>> np.argmin(a, axis=1, out=b)\n array([0., 0.])\n >>> b\n array([0., 0.])\n \"\"\"\n return _api_internal.argmin(a, axis, keepdims, out)\n\n\n@set_module('mxnet.ndarray.numpy')\ndef average(a, axis=None, weights=None, returned=False, out=None):\n \"\"\"\n Compute the weighted average along the specified axis.\n\n Parameters\n --------\n a : ndarray\n Array containing data to be averaged.\n axis : None or int or tuple of ints, optional\n Axis or axes along which to average a.\n The default, axis=None, will average over\n all of the elements of the input array.\n If axis is negative it counts from the last to the first axis.\n New in version 1.7.0.\n If axis is a tuple of ints, averaging is\n performed on all of the axes specified in the tuple\n instead of a single axis or all the axes as before.\n weights : ndarray, optional\n An array of weights associated with the values in a, must be the same dtype with a.\n Each value in a contributes to the average according to its associated weight.\n The weights array can either be 1-D (in which case its length must be\n the size of a along the given axis) or of the same shape as a.\n If weights=None, then all data in a are assumed to have a weight equal to one.\n The 1-D calculation is: avg = sum(a * weights) / sum(weights)\n The only constraint on weights is that sum(weights) must not be 0.\n returned : bool, optional\n Default is False.\n If True, the tuple (average, sum_of_weights) is returned,\n otherwise only the average is returned.\n If weights=None, sum_of_weights is equivalent to\n the number of elements over which the average is taken.\n out : ndarray, optional\n If provided, the calculation is done into this array.\n\n Returns\n --------\n retval, [sum_of_weights] : ndarray\n Return the average along the specified axis.\n When returned is True, return a tuple with the average as the first element\n and the sum of the weights as the second element. sum_of_weights is of the same type as retval.\n If a is integral, the result dtype will be current default dtype, otherwise it will be the same\n as dtype of a. (i.e. When npx.is_np_default_dtype() returns False, default dtype is float32; When\n npx.is_np_default_dtype() returns True, default dtype is float64.)\n\n Raises\n --------\n MXNetError\n - When all weights along axis sum to zero.\n - When the length of 1D weights is not the same as the shape of a along axis.\n - When given 1D weights, the axis is not specified or is not int.\n - When the shape of weights and a differ, but weights are not 1D.\n\n See also\n --------\n mean\n\n Notes\n --------\n This function differs from the original `numpy.average`\n <https://numpy.org/devdocs/reference/generated/numpy.average.html>`_ in\n the following way(s):\n\n - Does not guarantee the same behavior with numpy when given float16 dtype and overflow happens\n - Does not support complex dtype\n - The dtypes of a and weights must be the same\n - Integral a results in default dtype.\n i.e. When npx.is_np_default_dtype() returns False, default dtype is float32;\n When npx.is_np_default_dtype() returns True, default dtype is float64.\n\n Examples\n --------\n >>> data = np.arange(1, 5)\n >>> data\n array([1., 2., 3., 4.])\n >>> np.average(data)\n array(2.5)\n >>> np.average(np.arange(1, 11), weights=np.arange(10, 0, -1))\n array(4.)\n >>> data = np.arange(6).reshape((3,2))\n >>> data\n array([[0., 1.],\n [2., 3.],\n [4., 5.]])\n >>> weights = np.array([0.25, 0.75])\n array([0.25, 0.75])\n >>> np.average(data, axis=1, weights=weights)\n array([0.75, 2.75, 4.75])\n \"\"\"\n out = _api_internal.average(a, weights, axis, returned, weights is not None, out)\n if isinstance(out, NDArray):\n return out\n else:\n return list(out)\n\n\n@set_module('mxnet.ndarray.numpy')\ndef mean(a, axis=None, dtype=None, out=None, keepdims=False): # pylint: disable=arguments-differ\n \"\"\"\n mean(a, axis=None, dtype=None, out=None, keepdims=None)\n Compute the arithmetic mean along the specified axis.\n Returns the average of the array elements.\n The average is taken over the flattened array by default, otherwise over the specified axis.\n Parameters\n ----------\n a : ndarray\n ndarray containing numbers whose mean is desired.\n axis : None or int or tuple of ints, optional\n Axis or axes along which the means are computed. The default is to compute the mean of the flattened array.\n If this is a tuple of ints, a mean is performed over multiple axes,\n instead of a single axis or all the axes as before.\n dtype : data-type, optional\n Type to use in computing the mean.\n For integer inputs, the default is your current default dtype (i.e. When npx.is_np_default_dtype() returns\n False, default dtype is float32; When npx.is_np_default_dtype() returns True, default dtype is float64.);\n For floating point inputs, it is the same as the input dtype.\n out : ndarray, optional\n Alternate output array in which to place the result. The default is None; if provided,\n it must have the same shape and type as the expected output\n keepdims : bool, optional\n If this is set to True, the axes which are reduced are left in the result\n as dimensions with size one. With this option, the result will broadcast correctly\n against the input array.\n If the default value is passed, then keepdims will not be passed through to the mean\n method of sub-classes of ndarray, however any non-default value will be. If the sub-class\n method does not implement keepdims any exceptions will be raised.\n Returns\n -------\n m : ndarray, see dtype parameter above\n If out=None, returns a new array containing the mean values,\n otherwise a reference to the output array is returned.\n Notes\n -----\n This function differs from the original `numpy.mean\n <https://docs.scipy.org/doc/numpy/reference/generated/numpy.mean.html>`_ in\n the following way(s):\n - only ndarray is accepted as valid input, python iterables or scalar is not supported\n - default data type for integer input is float32 or float64, which depends on your current default dtype.\n When npx.is_np_default_dtype() returns False, default dtype is float32;\n When npx.is_np_default_dtype() returns True, default dtype is float64.\n Examples\n --------\n >>> a = np.array([[1, 2], [3, 4]])\n >>> np.mean(a)\n array(2.5)\n >>> a = np.zeros((2, 512*512), dtype=np.float32)\n >>> a[0,:] = 1.0\n >>> a[1,:] = 0.1\n >>> np.mean(a)\n array(0.55)\n >>> np.mean(a, dtype=np.float64)\n array(0.55)\n \"\"\"\n if dtype is not None and not isinstance(dtype, str):\n dtype = _np.dtype(dtype).name\n return _api_internal.mean(a, axis, dtype, keepdims, out)\n\n\n@set_module('mxnet.ndarray.numpy')\ndef std(a, axis=None, dtype=None, out=None, ddof=0, keepdims=False): # pylint: disable=too-many-arguments\n \"\"\"\n Compute the standard deviation along the specified axis.\n Returns the standard deviation, a measure of the spread of a distribution,\n of the array elements. The standard deviation is computed for the\n flattened array by default, otherwise over the specified axis.\n\n Parameters\n ----------\n a : ndarray\n Calculate the standard deviation of these values.\n axis : None or int or tuple of ints, optional\n Axis or axes along which the standard deviation is computed. The\n default is to compute the standard deviation of the flattened array.\n .. versionadded:: 1.7.0\n If this is a tuple of ints, a standard deviation is performed over\n multiple axes, instead of a single axis or all the axes as before.\n dtype : dtype, optional\n Type to use in computing the standard deviation. For arrays of\n integer type the default is float64, for arrays of float types it is\n the same as the array type.\n out : ndarray, optional\n Alternative output array in which to place the result. It must have\n the same shape as the expected output but the type (of the calculated\n values) will be cast if necessary.\n ddof : int, optional\n Means Delta Degrees of Freedom. The divisor used in calculations\n is ``N - ddof``, where ``N`` represents the number of elements.\n By default `ddof` is zero.\n keepdims : bool, optional\n If this is set to True, the axes which are reduced are left\n in the result as dimensions with size one. With this option,\n the result will broadcast correctly against the input array.\n If the default value is passed, then `keepdims` will not be\n passed through to the `std` method of sub-classes of\n `ndarray`, however any non-default value will be. If the\n sub-class' method does not implement `keepdims` any\n exceptions will be raised.\n\n Returns\n -------\n standard_deviation : ndarray, see dtype parameter above.\n If `out` is None, return a new array containing the standard deviation,\n otherwise return a reference to the output array.\n\n Examples\n --------\n >>> a = np.array([[1, 2], [3, 4]])\n >>> np.std(a)\n 1.1180339887498949 # may vary\n >>> np.std(a, axis=0)\n array([1., 1.])\n >>> np.std(a, axis=1)\n array([0.5, 0.5])\n In single precision, std() can be inaccurate:\n >>> a = np.zeros((2, 512*512), dtype=np.float32)\n >>> a[0, :] = 1.0\n >>> a[1, :] = 0.1\n >>> np.std(a)\n array(0.45)\n >>> np.std(a, dtype=np.float64)\n array(0.45, dtype=float64)\n \"\"\"\n return _api_internal.std(a, axis, dtype, ddof, keepdims, out)\n\n\n@set_module('mxnet.ndarray.numpy')\ndef var(a, axis=None, dtype=None, out=None, ddof=0, keepdims=False): # pylint: disable=too-many-arguments\n \"\"\"\n Compute the variance along the specified axis.\n Returns the variance of the array elements, a measure of the spread of a\n distribution. The variance is computed for the flattened array by\n default, otherwise over the specified axis.\n\n Parameters\n ----------\n a : ndarray\n Array containing numbers whose variance is desired. If `a` is not an\n array, a conversion is attempted.\n axis : None or int or tuple of ints, optional\n Axis or axes along which the variance is computed. The default is to\n compute the variance of the flattened array.\n .. versionadded:: 1.7.0\n If this is a tuple of ints, a variance is performed over multiple axes,\n instead of a single axis or all the axes as before.\n dtype : data-type, optional\n Type to use in computing the variance.\n For arrays of integer type the default is `float32` or 'float64',\n When npx.is_np_default_dtype() returns False, default dtype is float32,\n When npx.is_np_default_dtype() returns True, default dtype is float64;\n For arrays of float types it is the same as the array type.\n out : ndarray, optional\n Alternate output array in which to place the result. It must have\n the same shape as the expected output, but the type is cast if\n necessary.\n ddof : int, optional\n \"Delta Degrees of Freedom\": the divisor used in the calculation is\n ``N - ddof``, where ``N`` represents the number of elements. By\n default `ddof` is zero.\n keepdims : bool, optional\n If this is set to True, the axes which are reduced are left\n in the result as dimensions with size one. With this option,\n the result will broadcast correctly against the input array.\n If the default value is passed, then `keepdims` will not be\n passed through to the `var` method of sub-classes of\n `ndarray`, however any non-default value will be. If the\n sub-class' method does not implement `keepdims` any\n exceptions will be raised.\n\n Returns\n -------\n variance : ndarray, see dtype parameter above\n If ``out=None``, returns a new array containing the variance;\n otherwise, a reference to the output array is returned.\n\n Examples\n --------\n >>> a = np.array([[1, 2], [3, 4]])\n >>> np.var(a)\n array(1.25)\n >>> np.var(a, axis=0)\n array([1., 1.])\n >>> np.var(a, axis=1)\n array([0.25, 0.25])\n\n >>> a = np.zeros((2, 512*512), dtype=np.float32)\n >>> a[0, :] = 1.0\n >>> a[1, :] = 0.1\n >>> np.var(a)\n array(0.2025)\n >>> np.var(a, dtype=np.float64)\n array(0.2025, dtype=float64)\n >>> ((1-0.55)**2 + (0.1-0.55)**2)/2\n 0.2025\n \"\"\"\n return _api_internal.var(a, axis, dtype, ddof, keepdims, out)\n\n\n# pylint: disable=redefined-outer-name\n@set_module('mxnet.ndarray.numpy')\ndef indices(dimensions, dtype=None, ctx=None):\n \"\"\"Return an array representing the indices of a grid.\n\n Compute an array where the subarrays contain index values 0,1,...\n varying only along the corresponding axis.\n\n Parameters\n ----------\n dimensions : sequence of ints\n The shape of the grid.\n dtype : data-type, optional\n The desired data-type for the array. Default is `int64`.\n ctx : device context, optional\n Device context on which the memory is allocated. Default is\n `mxnet.context.current_context()`.\n\n Returns\n -------\n grid : ndarray\n The array of grid indices,\n ``grid.shape = (len(dimensions),) + tuple(dimensions)``.\n\n Notes\n -----\n The output shape is obtained by prepending the number of dimensions\n in front of the tuple of dimensions, i.e. if `dimensions` is a tuple\n ``(r0, ..., rN-1)`` of length ``N``, the output shape is\n ``(N,r0,...,rN-1)``.\n\n The subarrays ``grid[k]`` contains the N-D array of indices along the\n ``k-th`` axis. Explicitly::\n\n grid[k,i0,i1,...,iN-1] = ik\n\n Examples\n --------\n >>> grid = np.indices((2, 3))\n >>> grid.shape\n (2, 2, 3)\n >>> grid[0] # row indices\n array([[0, 0, 0],\n [1, 1, 1]], dtype=int64)\n >>> grid[1] # column indices\n array([[0, 0, 0],\n [1, 1, 1]], dtype=int64)\n\n The indices can be used as an index into an array.\n\n >>> x = np.arange(20).reshape(5, 4)\n >>> row, col = np.indices((2, 3))\n >>> x[row, col]\n array([[0., 1., 2.],\n [4., 5., 6.]])\n\n Note that it would be more straightforward in the above example to\n extract the required elements directly with ``x[:2, :3]``.\n \"\"\"\n if isinstance(dimensions, (tuple, list)):\n if ctx is None:\n ctx = str(current_context())\n else:\n ctx = str(ctx)\n if dtype is not None and not isinstance(dtype, str):\n dtype = _np.dtype(dtype).name\n return _api_internal.indices(dimensions, dtype, ctx)\n else:\n raise ValueError(\"The dimensions must be sequence of ints\")\n# pylint: enable=redefined-outer-name\n\n\n@set_module('mxnet.ndarray.numpy')\n@wrap_np_binary_func\ndef copysign(x1, x2, out=None, **kwargs):\n r\"\"\"\n Change the sign of x1 to that of x2, element-wise.\n\n If `x2` is a scalar, its sign will be copied to all elements of `x1`.\n\n Parameters\n ----------\n x1 : ndarray or scalar\n Values to change the sign of.\n x2 : ndarray or scalar\n The sign of `x2` is copied to `x1`.\n out : ndarray or None, optional\n A location into which the result is stored. It must be of the\n right shape and right type to hold the output. If not provided\n or `None`,a freshly-allocated array is returned.\n\n Returns\n -------\n out : ndarray or scalar\n The values of `x1` with the sign of `x2`.\n This is a scalar if both `x1` and `x2` are scalars.\n\n Notes\n -------\n This function differs from the original `numpy.copysign\n <https://docs.scipy.org/doc/numpy/reference/generated/numpy.copysign.html>`_ in\n the following aspects:\n\n - ``where`` param is not supported.\n\n Examples\n --------\n >>> np.copysign(1.3, -1)\n -1.3\n >>> 1/np.copysign(0, 1)\n inf\n >>> 1/np.copysign(0, -1)\n -inf\n\n >>> a = np.array([-1, 0, 1])\n >>> np.copysign(a, -1.1)\n array([-1., -0., -1.])\n >>> np.copysign(a, np.arange(3)-1)\n array([-1., 0., 1.])\n \"\"\"\n if isinstance(x1, numeric_types) and isinstance(x2, numeric_types):\n return _np.copysign(x1, x2, out=out)\n return _api_internal.copysign(x1, x2, out)\n\n\n@set_module('mxnet.ndarray.numpy')\ndef ravel(x, order='C'):\n r\"\"\"\n ravel(x)\n\n Return a contiguous flattened array.\n A 1-D array, containing the elements of the input, is returned. A copy is\n made only if needed.\n\n Parameters\n ----------\n x : ndarray\n Input array. The elements in `x` are read in row-major, C-style order and\n packed as a 1-D array.\n order : `C`, optional\n Only support row-major, C-style order.\n\n Returns\n -------\n y : ndarray\n y is an array of the same subtype as `x`, with shape ``(x.size,)``.\n Note that matrices are special cased for backward compatibility, if `x`\n is a matrix, then y is a 1-D ndarray.\n\n Notes\n -----\n This function differs from the original numpy.arange in the following aspects:\n - Only support row-major, C-style order.\n\n Examples\n --------\n It is equivalent to ``reshape(x, -1)``.\n\n >>> x = np.array([[1, 2, 3], [4, 5, 6]])\n >>> print(np.ravel(x))\n [1. 2. 3. 4. 5. 6.]\n\n >>> print(x.reshape(-1))\n [1. 2. 3. 4. 5. 6.]\n\n >>> print(np.ravel(x.T))\n [1. 4. 2. 5. 3. 6.]\n \"\"\"\n if order == 'F':\n raise NotImplementedError('order {} is not supported'.format(order))\n if isinstance(x, numeric_types):\n return _np.reshape(x, -1)\n elif isinstance(x, NDArray):\n return reshape(x, -1)\n else:\n raise TypeError('type {} not supported'.format(str(type(x))))\n\n\n@set_module('mxnet.ndarray.numpy')\ndef unravel_index(indices, shape, order='C'): # pylint: disable=redefined-outer-name\n \"\"\"\n Converts a flat index or array of flat indices into a tuple of coordinate arrays.\n\n Parameters:\n -------------\n indices : array_like\n An integer array whose elements are indices into the flattened version of an array of dimensions shape.\n Before version 1.6.0, this function accepted just one index value.\n shape : tuple of ints\n The shape of the array to use for unraveling indices.\n\n Returns:\n -------------\n unraveled_coords : ndarray\n Each row in the ndarray has the same shape as the indices array.\n Each column in the ndarray represents the unravelled index\n\n Examples:\n -------------\n >>> np.unravel_index([22, 41, 37], (7,6))\n ([3. 6. 6.]\n [4. 5. 1.])\n >>> np.unravel_index(1621, (6,7,8,9))\n (3, 1, 4, 1)\n \"\"\"\n if order == 'C':\n if isinstance(indices, numeric_types):\n return _np.unravel_index(indices, shape)\n return tuple(_npi.unravel_index_fallback(indices, shape=shape))\n else:\n raise NotImplementedError('Do not support column-major (Fortran-style) order at this moment')\n\n\ndef flatnonzero(a):\n r\"\"\"\n Return indices that are non-zero in the flattened version of a.\n\n This is equivalent to np.nonzero(np.ravel(a))[0].\n\n Parameters\n ----------\n a : array_like\n Input data.\n\n Returns\n -------\n res : ndarray\n Output array, containing the indices of the elements of `a.ravel()`\n that are non-zero.\n\n See Also\n --------\n nonzero : Return the indices of the non-zero elements of the input array.\n ravel : Return a 1-D array containing the elements of the input array.\n\n Examples\n --------\n >>> x = np.arange(-2, 3)\n >>> x\n array([-2, -1, 0, 1, 2])\n >>> np.flatnonzero(x)\n array([0, 1, 3, 4])\n\n Use the indices of the non-zero elements as an index array to extract\n these elements:\n\n >>> x.ravel()[np.flatnonzero(x)]\n array([-2, -1, 1, 2])\n \"\"\"\n return nonzero(ravel(a))[0]\n\n\n@set_module('mxnet.ndarray.numpy')\ndef diag_indices_from(arr):\n \"\"\"\n This returns a tuple of indices that can be used to access the main diagonal of an array\n a with a.ndim >= 2 dimensions and shape (n, n, ..., n). For a.ndim = 2 this is\n the usual diagonal, for a.ndim > 2 this is the set of indices to access\n a[i, i, ..., i] for i = [0..n-1].\n\n Parameters:\n -------------\n arr : ndarray\n Input array for acessing the main diagonal. All dimensions\n should have equal length.\n\n Return:\n -------------\n diag: tuple of ndarray\n indices of the main diagonal.\n\n Examples:\n -------------\n >>> a = np.arange(16).reshape(4, 4)\n >>> a\n array([[ 0, 1, 2, 3],\n [ 4, 5, 6, 7],\n [ 8, 9, 10, 11],\n [12, 13, 14, 15]])\n >>> idx = np.diag_indices_from(a)\n >>> idx\n (array([0, 1, 2, 3]), array([0, 1, 2, 3]))\n >>> a[idx] = 100\n >>> a\n array([[100, 1, 2, 3],\n [ 4, 100, 6, 7],\n [ 8, 9, 100, 11],\n [ 12, 13, 14, 100]])\n \"\"\"\n return tuple(_api_internal.diag_indices_from(arr))\n\n\n@set_module('mxnet.ndarray.numpy')\ndef hanning(M, dtype=None, ctx=None):\n r\"\"\"Return the Hanning window.\n\n The Hanning window is a taper formed by using a weighted cosine.\n\n Parameters\n ----------\n M : int\n Number of points in the output window. If zero or less, an\n empty array is returned.\n ctx : Context, optional\n An optional device context (default is the current default context).\n\n Returns\n -------\n out : ndarray, shape(M,)\n The window, with the maximum value normalized to one (the value\n one appears only if `M` is odd).\n When npx.is_np_default_dtype() returns False, default dtype is float32;\n When npx.is_np_default_dtype() returns True, default dtype is float64.\n Note that you need select numpy.float32 or float64 in this operator.\n\n See Also\n --------\n blackman, hamming\n\n Notes\n -----\n The Hanning window is defined as\n\n .. math:: w(n) = 0.5 - 0.5cos\\left(\\frac{2\\pi{n}}{M-1}\\right)\n \\qquad 0 \\leq n \\leq M-1\n\n The Hanning was named for Julius von Hann, an Austrian meteorologist.\n It is also known as the Cosine Bell. Some authors prefer that it be\n called a Hann window, to help avoid confusion with the very similar\n Hamming window.\n\n Most references to the Hanning window come from the signal processing\n literature, where it is used as one of many windowing functions for\n smoothing values. It is also known as an apodization (which means\n \"removing the foot\", i.e. smoothing discontinuities at the beginning\n and end of the sampled signal) or tapering function.\n\n References\n ----------\n .. [1] Blackman, R.B. and Tukey, J.W., (1958) The measurement of power\n spectra, Dover Publications, New York.\n .. [2] E.R. Kanasewich, \"Time Sequence Analysis in Geophysics\",\n The University of Alberta Press, 1975, pp. 106-108.\n .. [3] Wikipedia, \"Window function\",\n http://en.wikipedia.org/wiki/Window_function\n .. [4] W.H. Press, B.P. Flannery, S.A. Teukolsky, and W.T. Vetterling,\n \"Numerical Recipes\", Cambridge University Press, 1986, page 425.\n\n Examples\n --------\n >>> np.hanning(12)\n array([0. , 0.07937324, 0.29229254, 0.5711574 , 0.8274304 ,\n 0.9797465 , 0.97974646, 0.82743025, 0.5711573 , 0.29229245,\n 0.07937312, 0. ])\n\n Plot the window and its frequency response:\n\n >>> import matplotlib.pyplot as plt\n >>> window = np.hanning(51)\n >>> plt.plot(window.asnumpy())\n [<matplotlib.lines.Line2D object at 0x...>]\n >>> plt.title(\"Hann window\")\n Text(0.5, 1.0, 'Hann window')\n >>> plt.ylabel(\"Amplitude\")\n Text(0, 0.5, 'Amplitude')\n >>> plt.xlabel(\"Sample\")\n Text(0.5, 0, 'Sample')\n >>> plt.show()\n \"\"\"\n if ctx is None:\n ctx = str(current_context())\n else:\n ctx = str(ctx)\n if dtype is not None and not isinstance(dtype, str):\n dtype = _np.dtype(dtype).name\n return _api_internal.hanning(M, dtype, ctx)\n\n\n@set_module('mxnet.ndarray.numpy')\ndef hamming(M, dtype=None, ctx=None):\n r\"\"\"Return the hamming window.\n\n The hamming window is a taper formed by using a weighted cosine.\n\n Parameters\n ----------\n M : int\n Number of points in the output window. If zero or less, an\n empty array is returned.\n ctx : Context, optional\n An optional device context (default is the current default context).\n\n Returns\n -------\n out : ndarray, shape(M,)\n The window, with the maximum value normalized to one (the value\n one appears only if `M` is odd).\n When npx.is_np_default_dtype() returns False, default dtype is float32;\n When npx.is_np_default_dtype() returns True, default dtype is float64.\n Note that you need select numpy.float32 or float64 in this operator.\n\n See Also\n --------\n blackman, hanning\n\n Notes\n -----\n The Hamming window is defined as\n\n .. math:: w(n) = 0.54 - 0.46cos\\left(\\frac{2\\pi{n}}{M-1}\\right)\n \\qquad 0 \\leq n \\leq M-1\n\n The Hamming was named for R. W. Hamming, an associate of J. W. Tukey\n and is described in Blackman and Tukey. It was recommended for\n smoothing the truncated autocovariance function in the time domain.\n Most references to the Hamming window come from the signal processing\n literature, where it is used as one of many windowing functions for\n smoothing values. It is also known as an apodization (which means\n \"removing the foot\", i.e. smoothing discontinuities at the beginning\n and end of the sampled signal) or tapering function.\n\n References\n ----------\n .. [1] Blackman, R.B. and Tukey, J.W., (1958) The measurement of power\n spectra, Dover Publications, New York.\n .. [2] E.R. Kanasewich, \"Time Sequence Analysis in Geophysics\", The\n University of Alberta Press, 1975, pp. 109-110.\n .. [3] Wikipedia, \"Window function\",\n https://en.wikipedia.org/wiki/Window_function\n .. [4] W.H. Press, B.P. Flannery, S.A. Teukolsky, and W.T. Vetterling,\n \"Numerical Recipes\", Cambridge University Press, 1986, page 425.\n\n Examples\n --------\n >>> np.hamming(12)\n array([0.08000001, 0.15302339, 0.34890914, 0.6054648 , 0.841236 ,\n 0.9813669 , 0.9813668 , 0.8412359 , 0.6054647 , 0.34890908,\n 0.15302327, 0.08000001])\n\n Plot the window and its frequency response:\n\n >>> import matplotlib.pyplot as plt\n >>> window = np.hamming(51)\n >>> plt.plot(window.asnumpy())\n [<matplotlib.lines.Line2D object at 0x...>]\n >>> plt.title(\"hamming window\")\n Text(0.5, 1.0, 'hamming window')\n >>> plt.ylabel(\"Amplitude\")\n Text(0, 0.5, 'Amplitude')\n >>> plt.xlabel(\"Sample\")\n Text(0.5, 0, 'Sample')\n >>> plt.show()\n \"\"\"\n if ctx is None:\n ctx = str(current_context())\n else:\n ctx = str(ctx)\n if dtype is not None and not isinstance(dtype, str):\n dtype = _np.dtype(dtype).name\n return _api_internal.hamming(M, dtype, ctx)\n\n\n@set_module('mxnet.ndarray.numpy')\ndef blackman(M, dtype=None, ctx=None):\n r\"\"\"Return the Blackman window.\n\n The Blackman window is a taper formed by using the first three\n terms of a summation of cosines. It was designed to have close to the\n minimal leakage possible. It is close to optimal, only slightly worse\n than a Kaiser window.\n\n Parameters\n ----------\n M : int\n Number of points in the output window. If zero or less, an\n empty array is returned.\n ctx : Context, optional\n An optional device context (default is the current default context).\n\n Returns\n -------\n out : ndarray\n The window, with the maximum value normalized to one (the value one\n appears only if the number of samples is odd).\n When npx.is_np_default_dtype() returns False, default dtype is float32;\n When npx.is_np_default_dtype() returns True, default dtype is float64.\n Note that you need select numpy.float32 or float64 in this operator.\n\n See Also\n --------\n hamming, hanning\n\n Notes\n -----\n The Blackman window is defined as\n\n .. math:: w(n) = 0.42 - 0.5 \\cos(2\\pi n/{M-1}) + 0.08 \\cos(4\\pi n/{M-1})\n\n Most references to the Blackman window come from the signal processing\n literature, where it is used as one of many windowing functions for\n smoothing values. It is also known as an apodization (which means\n \"removing the foot\", i.e. smoothing discontinuities at the beginning\n and end of the sampled signal) or tapering function. It is known as a\n \"near optimal\" tapering function, almost as good (by some measures)\n as the kaiser window.\n\n References\n ----------\n Blackman, R.B. and Tukey, J.W., (1958) The measurement of power spectra,\n Dover Publications, New York.\n\n Oppenheim, A.V., and R.W. Schafer. Discrete-Time Signal Processing.\n Upper Saddle River, NJ: Prentice-Hall, 1999, pp. 468-471.\n\n Examples\n --------\n >>> np.blackman(12)\n array([-1.4901161e-08, 3.2606423e-02, 1.5990365e-01, 4.1439798e-01,\n 7.3604530e-01, 9.6704686e-01, 9.6704674e-01, 7.3604506e-01,\n 4.1439781e-01, 1.5990359e-01, 3.2606363e-02, -1.4901161e-08])\n\n Plot the window and its frequency response:\n\n >>> import matplotlib.pyplot as plt\n >>> window = np.blackman(51)\n >>> plt.plot(window.asnumpy())\n [<matplotlib.lines.Line2D object at 0x...>]\n >>> plt.title(\"blackman window\")\n Text(0.5, 1.0, 'blackman window')\n >>> plt.ylabel(\"Amplitude\")\n Text(0, 0.5, 'Amplitude')\n >>> plt.xlabel(\"Sample\")\n Text(0.5, 0, 'Sample')\n >>> plt.show()\n \"\"\"\n if ctx is None:\n ctx = str(current_context())\n else:\n ctx = str(ctx)\n if dtype is not None and not isinstance(dtype, str):\n dtype = _np.dtype(dtype).name\n return _api_internal.blackman(M, dtype, ctx)\n\n\n@set_module('mxnet.ndarray.numpy')\ndef flip(m, axis=None, out=None):\n r\"\"\"\n flip(m, axis=None, out=None)\n\n Reverse the order of elements in an array along the given axis.\n\n The shape of the array is preserved, but the elements are reordered.\n\n Parameters\n ----------\n m : ndarray or scalar\n Input array.\n axis : None or int or tuple of ints, optional\n Axis or axes along which to flip over. The default,\n axis=None, will flip over all of the axes of the input array.\n If axis is negative it counts from the last to the first axis.\n\n If axis is a tuple of ints, flipping is performed on all of the axes\n specified in the tuple.\n out : ndarray or scalar, optional\n Alternative output array in which to place the result. It must have\n the same shape and type as the expected output.\n\n Returns\n -------\n out : ndarray or scalar\n A view of `m` with the entries of axis reversed. Since a view is\n returned, this operation is done in constant time.\n\n Examples\n --------\n >>> A = np.arange(8).reshape((2,2,2))\n >>> A\n array([[[0, 1],\n [2, 3]],\n [[4, 5],\n [6, 7]]])\n >>> np.flip(A, 0)\n array([[[4, 5],\n [6, 7]],\n [[0, 1],\n [2, 3]]])\n >>> np.flip(A, 1)\n array([[[2, 3],\n [0, 1]],\n [[6, 7],\n [4, 5]]])\n >>> np.flip(A)\n array([[[7, 6],\n [5, 4]],\n [[3, 2],\n [1, 0]]])\n >>> np.flip(A, (0, 2))\n array([[[5, 4],\n [7, 6]],\n [[1, 0],\n [3, 2]]])\n \"\"\"\n from ...numpy import ndarray\n if isinstance(m, numeric_types):\n return _np.flip(m, axis)\n elif isinstance(m, ndarray):\n return _api_internal.flip(m, axis, out)\n else:\n raise TypeError('type {} not supported'.format(str(type(m))))\n\n\n@set_module('mxnet.ndarray.numpy')\ndef flipud(m):\n r\"\"\"\n flipud(*args, **kwargs)\n\n Flip array in the up/down direction.\n\n Flip the entries in each column in the up/down direction.\n Rows are preserved, but appear in a different order than before.\n\n Parameters\n ----------\n m : array_like\n Input array.\n\n Returns\n -------\n out : array_like\n A view of `m` with the rows reversed. Since a view is\n returned, this operation is :math:`\\mathcal O(1)`.\n\n See Also\n --------\n fliplr : Flip array in the left/right direction.\n rot90 : Rotate array counterclockwise.\n\n Notes\n -----\n Equivalent to ``m[::-1,...]``.\n Does not require the array to be two-dimensional.\n\n Examples\n --------\n >>> A = np.diag(np.array([1.0, 2, 3]))\n >>> A\n array([[1., 0., 0.],\n [0., 2., 0.],\n [0., 0., 3.]])\n >>> np.flipud(A)\n array([[0., 0., 3.],\n [0., 2., 0.],\n [1., 0., 0.]])\n\n >>> A = np.random.randn(2,3,5)\n >>> np.all(np.flipud(A) == A[::-1,...])\n array(True)\n\n >>> np.flipud(np.array([1,2]))\n array([2., 1.])\n \"\"\"\n return flip(m, 0)\n\n\n@set_module('mxnet.ndarray.numpy')\ndef fliplr(m):\n r\"\"\"\n fliplr(*args, **kwargs)\n\n Flip array in the left/right direction.\n\n Flip the entries in each row in the left/right direction.\n Columns are preserved, but appear in a different order than before.\n\n Parameters\n ----------\n m : array_like\n Input array, must be at least 2-D.\n\n Returns\n -------\n f : ndarray\n A view of `m` with the columns reversed. Since a view\n is returned, this operation is :math:`\\mathcal O(1)`.\n\n See Also\n --------\n flipud : Flip array in the up/down direction.\n rot90 : Rotate array counterclockwise.\n\n Notes\n -----\n Equivalent to m[:,::-1]. Requires the array to be at least 2-D.\n\n Examples\n --------\n >>> A = np.diag(np.array([1.,2.,3.]))\n >>> A\n array([[1., 0., 0.],\n [0., 2., 0.],\n [0., 0., 3.]])\n >>> np.fliplr(A)\n array([[0., 0., 1.],\n [0., 2., 0.],\n [3., 0., 0.]])\n\n >>> A = np.random.randn(2,3,5)\n >>> np.all(np.fliplr(A) == A[:,::-1,...])\n array(True)\n \"\"\"\n return flip(m, 1)\n\n\n@set_module('mxnet.ndarray.numpy')\ndef around(x, decimals=0, out=None, **kwargs):\n r\"\"\"\n around(x, decimals=0, out=None)\n\n Evenly round to the given number of decimals.\n Parameters\n ----------\n x : ndarray or scalar\n Input data.\n decimals : int, optional\n Number of decimal places to round to (default: 0). If\n decimals is negative, it specifies the number of positions to\n the left of the decimal point.\n out : ndarray, optional\n Alternative output array in which to place the result. It must have\n the same shape and type as the expected output.\n\n Returns\n -------\n rounded_array : ndarray or scalar\n An array of the same type as `x`, containing the rounded values.\n A reference to the result is returned.\n\n Notes\n -----\n For values exactly halfway between rounded decimal values, NumPy\n rounds to the nearest even value. Thus 1.5 and 2.5 round to 2.0,\n -0.5 and 0.5 round to 0.0, etc.\n\n This function differs from the original numpy.prod in the following aspects:\n\n - Cannot cast type automatically. Dtype of `out` must be same as the expected one.\n - Cannot support complex-valued number.\n\n Examples\n --------\n >>> np.around([0.37, 1.64])\n array([ 0., 2.])\n >>> np.around([0.37, 1.64], decimals=1)\n array([ 0.4, 1.6])\n >>> np.around([.5, 1.5, 2.5, 3.5, 4.5]) # rounds to nearest even value\n array([ 0., 2., 2., 4., 4.])\n >>> np.around([1, 2, 3, 11], decimals=1) # ndarray of ints is returned\n array([ 1, 2, 3, 11])\n >>> np.around([1, 2, 3, 11], decimals=-1)\n array([ 0, 0, 0, 10])\n \"\"\"\n from ...numpy import ndarray\n if isinstance(x, numeric_types):\n return _np.around(x, decimals, **kwargs)\n elif isinstance(x, ndarray):\n return _api_internal.around(x, decimals, out, **kwargs)\n else:\n raise TypeError('type {} not supported'.format(str(type(x))))\n\n\n@set_module('mxnet.ndarray.numpy')\ndef round(x, decimals=0, out=None, **kwargs):\n r\"\"\"\n round(a, decimals=0, out=None)\n Round an array to the given number of decimals.\n\n See Also\n --------\n around : equivalent function; see for details.\n \"\"\"\n from ...numpy import ndarray\n if isinstance(x, numeric_types):\n return _np.around(x, decimals, **kwargs)\n elif isinstance(x, ndarray):\n return _api_internal.around(x, decimals, out, **kwargs)\n else:\n raise TypeError('type {} not supported'.format(str(type(x))))\n\n\n@set_module('mxnet.ndarray.numpy')\ndef round_(x, decimals=0, out=None, **kwargs):\n r\"\"\"\n round_(a, decimals=0, out=None)\n Round an array to the given number of decimals.\n\n See Also\n --------\n around : equivalent function; see for details.\n \"\"\"\n from ...numpy import ndarray\n if isinstance(x, numeric_types):\n return _np.around(x, decimals, **kwargs)\n elif isinstance(x, ndarray):\n return _npi.around(x, decimals, out=out, **kwargs)\n else:\n raise TypeError('type {} not supported'.format(str(type(x))))\n\n\n@set_module('mxnet.ndarray.numpy')\n@wrap_np_binary_func\ndef arctan2(x1, x2, out=None, **kwargs):\n r\"\"\"\n Element-wise arc tangent of ``x1/x2`` choosing the quadrant correctly.\n\n The quadrant (i.e., branch) is chosen so that ``arctan2(x1, x2)`` is\n the signed angle in radians between the ray ending at the origin and\n passing through the point (1,0), and the ray ending at the origin and\n passing through the point (`x2`, `x1`). (Note the role reversal: the\n \"`y`-coordinate\" is the first function parameter, the \"`x`-coordinate\"\n is the second.) By IEEE convention, this function is defined for\n `x2` = +/-0 and for either or both of `x1` and `x2` = +/-inf (see\n Notes for specific values).\n\n This function is not defined for complex-valued arguments; for the\n so-called argument of complex values, use `angle`.\n\n Parameters\n ----------\n x1 : ndarray or scalar\n `y`-coordinates.\n x2 : ndarray or scalar\n `x`-coordinates. `x2` must be broadcastable to match the shape of\n `x1` or vice versa.\n out : ndarray or None, optional\n A location into which the result is stored. If provided, it must have\n a shape that the inputs broadcast to. If not provided or `None`,\n a freshly-allocated array is returned.\n\n Returns\n -------\n out : ndarray or scalar\n Array of angles in radians, in the range ``[-pi, pi]``. This is a scalar if\n `x1` and `x2` are scalars.\n\n Notes\n -----\n *arctan2* is identical to the `atan2` function of the underlying\n C library. The following special values are defined in the C\n standard: [1]_\n\n ====== ====== ================\n `x1` `x2` `arctan2(x1,x2)`\n ====== ====== ================\n +/- 0 +0 +/- 0\n +/- 0 -0 +/- pi\n > 0 +/-inf +0 / +pi\n < 0 +/-inf -0 / -pi\n +/-inf +inf +/- (pi/4)\n +/-inf -inf +/- (3*pi/4)\n ====== ====== ================\n\n Note that +0 and -0 are distinct floating point numbers, as are +inf\n and -inf.\n\n This function differs from the original numpy.arange in the following aspects:\n - Only support float16, float32 and float64.\n\n References\n ----------\n .. [1] ISO/IEC standard 9899:1999, \"Programming language C.\"\n\n Examples\n --------\n Consider four points in different quadrants:\n\n >>> x = np.array([-1, +1, +1, -1])\n >>> y = np.array([-1, -1, +1, +1])\n >>> np.arctan2(y, x) * 180 / np.pi\n array([-135., -45., 45., 135.])\n\n Note the order of the parameters. `arctan2` is defined also when `x2` = 0\n and at several other special points, obtaining values in\n the range ``[-pi, pi]``:\n\n >>> x = np.array([1, -1])\n >>> y = np.array([0, 0])\n >>> np.arctan2(x, y)\n array([ 1.5707964, -1.5707964])\n \"\"\"\n if isinstance(x1, numeric_types) and isinstance(x2, numeric_types):\n return _np.arctan2(x1, x2, out=out)\n return _api_internal.arctan2(x1, x2, out)\n\n\n@set_module('mxnet.ndarray.numpy')\n@wrap_np_binary_func\ndef hypot(x1, x2, out=None, **kwargs):\n r\"\"\"\n Given the \"legs\" of a right triangle, return its hypotenuse.\n\n Equivalent to ``sqrt(x1**2 + x2**2)``, element-wise. If `x1` or\n `x2` is scalar_like (i.e., unambiguously cast-able to a scalar type),\n it is broadcast for use with each element of the other argument.\n\n Parameters\n ----------\n x1, x2 : ndarray\n Leg of the triangle(s).\n out : ndarray, None, or tuple of ndarray and None, optional\n A location into which the result is stored. If provided, it must have\n a shape that the inputs broadcast to. If not provided or `None`,\n a freshly-allocated array is returned. A tuple (possible only as a\n keyword argument) must have length equal to the number of outputs.\n\n Returns\n -------\n z : ndarray\n The hypotenuse of the triangle(s).\n This is a scalar if both `x1` and `x2` are scalars.\n\n Notes\n -----\n This function differs from the original numpy.arange in the following aspects:\n - Only support float16, float32 and float64.\n\n Examples\n --------\n >>> np.hypot(3*np.ones((3, 3)), 4*np.ones((3, 3)))\n array([[ 5., 5., 5.],\n [ 5., 5., 5.],\n [ 5., 5., 5.]])\n\n Example showing broadcast of scalar_like argument:\n\n >>> np.hypot(3*np.ones((3, 3)), [4])\n array([[ 5., 5., 5.],\n [ 5., 5., 5.],\n [ 5., 5., 5.]])\n \"\"\"\n if isinstance(x1, numeric_types) and isinstance(x2, numeric_types):\n return _np.hypot(x1, x2, out=out)\n return _api_internal.hypot(x1, x2, out)\n\n\n@set_module('mxnet.ndarray.numpy')\n@wrap_np_binary_func\ndef bitwise_and(x1, x2, out=None, **kwargs):\n r\"\"\"\n Compute the bit-wise XOR of two arrays element-wise.\n\n Parameters\n ----------\n x1, x2 : ndarray or scalar\n Only integer and boolean types are handled. If x1.shape != x2.shape,\n they must be broadcastable to a common shape (which becomes the shape of the output).\n out : ndarray, optional\n A location into which the result is stored. If provided, it must have a shape that the\n inputs broadcast to. If not provided or None, a freshly-allocated array is returned.\n\n Returns\n -------\n out : ndarray\n Result.\n\n Examples\n --------\n >>> np.bitwise_and(13, 17)\n 1\n\n >>> np.bitwise_and(14, 13)\n 12\n >>> np.bitwise_and(np.array([14,3], dtype='int32'), 13)\n array([12, 1], dtype=int32)\n\n >>> np.bitwise_and(np.array([11,7], dtype='int32'), np.array([4,25], dtype='int32'))\n array([0, 1], dtype=int32)\n >>> np.bitwise_and(np.array([2,5,255], dtype='int32'), np.array([3,14,16], dtype='int32'))\n array([ 2, 4, 16], dtype=int32)\n >>> np.bitwise_and(np.array([True, True], dtype='bool'), np.array([False, True], dtype='bool'))\n array([False, True])\n \"\"\"\n if isinstance(x1, numeric_types) and isinstance(x2, numeric_types):\n return _np.bitwise_and(x1, x2, out=out)\n return _api_internal.bitwise_and(x1, x2, out)\n\n\n@set_module('mxnet.ndarray.numpy')\n@wrap_np_binary_func\ndef bitwise_xor(x1, x2, out=None, **kwargs):\n r\"\"\"\n Compute the bit-wise XOR of two arrays element-wise.\n\n Parameters\n ----------\n x1, x2 : ndarray or scalar\n Only integer and boolean types are handled. If x1.shape != x2.shape,\n they must be broadcastable to a common shape (which becomes the shape of the output).\n out : ndarray, optional\n A location into which the result is stored. If provided, it must have a shape that the\n inputs broadcast to. If not provided or None, a freshly-allocated array is returned.\n\n Returns\n -------\n out : ndarray\n Result.\n\n Examples\n --------\n >>> np.bitwise_xor(13, 17)\n 28\n\n >>> np.bitwise_xor(31, 5)\n 26\n >>> np.bitwise_xor(np.array([31,3], dtype='int32'), 5)\n array([26, 6])\n\n >>> np.bitwise_xor(np.array([31,3], dtype='int32'), np.array([5,6], dtype='int32'))\n array([26, 5])\n >>> np.bitwise_xor(np.array([True, True], dtype='bool'), np.array([False, True], dtype='bool'))\n array([ True, False])\n \"\"\"\n if isinstance(x1, numeric_types) and isinstance(x2, numeric_types):\n return _np.bitwise_xor(x1, x2, out=out)\n return _api_internal.bitwise_xor(x1, x2, out)\n\n\n@set_module('mxnet.ndarray.numpy')\n@wrap_np_binary_func\ndef bitwise_or(x1, x2, out=None, **kwargs):\n r\"\"\"\n Compute the bit-wise OR of two arrays element-wise.\n\n Parameters\n ----------\n x1, x2 : ndarray or scalar\n Only integer and boolean types are handled. If x1.shape != x2.shape,\n they must be broadcastable to a common shape (which becomes the shape of the output).\n out : ndarray, optional\n A location into which the result is stored. If provided, it must have a shape that the\n inputs broadcast to. If not provided or None, a freshly-allocated array is returned.\n\n Returns\n -------\n out : ndarray\n Result.\n\n Examples\n --------\n >>> np.bitwise_or(13, 17)\n 29\n\n >>> np.bitwise_or(31, 5)\n 31\n >>> np.bitwise_or(np.array([31,3], dtype='int32'), 5)\n array([31, 7])\n\n >>> np.bitwise_or(np.array([31,3], dtype='int32'), np.array([5,6], dtype='int32'))\n array([31, 7])\n >>> np.bitwise_or(np.array([True, True], dtype='bool'), np.array([False, True], dtype='bool'))\n array([ True, True])\n \"\"\"\n if isinstance(x1, numeric_types) and isinstance(x2, numeric_types):\n return _np.bitwise_or(x1, x2, out=out)\n return _api_internal.bitwise_or(x1, x2, out)\n\n\n@set_module('mxnet.ndarray.numpy')\n@wrap_np_binary_func\ndef ldexp(x1, x2, out=None, **kwargs):\n \"\"\"\n Returns x1 * 2**x2, element-wise.\n The mantissas `x1` and twos exponents `x2` are used to construct\n floating point numbers ``x1 * 2**x2``.\n\n Parameters\n ----------\n x1 : ndarray or scalar\n Array of multipliers.\n x2 : ndarray or scalar, int\n Array of twos exponents.\n out : ndarray, optional\n A location into which the result is stored. If provided, it must have\n a shape that the inputs broadcast to. If not, a freshly-allocated array is returned.\n\n Returns\n -------\n y : ndarray or scalar\n The result of ``x1 * 2**x2``.\n This is a scalar if both `x1` and `x2` are scalars.\n\n Notes\n -----\n Complex dtypes are not supported, they will raise a TypeError.\n Different from numpy, we allow x2 to be float besides int.\n `ldexp` is useful as the inverse of `frexp`, if used by itself it is\n more clear to simply use the expression ``x1 * 2**x2``.\n\n Examples\n --------\n >>> np.ldexp(5, np.arange(4))\n array([ 5., 10., 20., 40.])\n \"\"\"\n if isinstance(x1, numeric_types) and isinstance(x2, numeric_types):\n return _np.ldexp(x1, x2, out=out)\n return _api_internal.ldexp(x1, x2, out)\n\n\n@set_module('mxnet.ndarray.numpy')\n@wrap_np_binary_func\ndef logaddexp(x1, x2, out=None, **kwargs):\n \"\"\"\n Logarithm of the sum of exponentiations of the inputs.\n\n Calculates log(exp(x1) + exp(x2)). This function is useful in statistics where\n the calculated probabilities of events may be so small as to exceed the range of\n normal floating point numbers. In such cases the logarithm of the calculate\n probability is stored. This function allows adding probabilities stored\n in such a fashion.\n\n Parameters\n ----------\n x1 : ndarray or scalar\n Array of multipliers.\n x2 : ndarray or scalar, int\n Array of twos exponents.\n out : ndarray, optional\n A location into which the result is stored. If provided, it must have\n a shape that the inputs broadcast to. If not, a freshly-allocated array is returned.\n\n Returns\n -------\n y : ndarray or scalar\n Logarithm of exp(x1) + exp(x2). This is a scalar if both x1 and x2 are scalars.\n\n Examples\n --------\n >>> prob1 = np.log(1e-50)\n >>> prob2 = np.log(2.5e-50)\n >>> prob12 = np.logaddexp(prob1, prob2)\n >>> prob12\n -113.87649168120691\n >>> np.exp(prob12)\n 3.5000000000000057e-50\n \"\"\"\n if isinstance(x1, numeric_types) and isinstance(x2, numeric_types):\n return _np.logaddexp(x1, x2, out=out)\n return _api_internal.logaddexp(x1, x2, out)\n\n\n@set_module('mxnet.ndarray.numpy')\ndef vdot(a, b):\n r\"\"\"\n Return the dot product of two vectors.\n Note that `vdot` handles multidimensional arrays differently than `dot`:\n it does *not* perform a matrix product, but flattens input arguments\n to 1-D vectors first. Consequently, it should only be used for vectors.\n\n Parameters\n ----------\n a : ndarray\n First argument to the dot product.\n b : ndarray\n Second argument to the dot product.\n\n Returns\n -------\n output : ndarray\n Dot product of `a` and `b`.\n\n See Also\n --------\n dot : Return the dot product without using the complex conjugate of the\n first argument.\n\n Examples\n --------\n Note that higher-dimensional arrays are flattened!\n >>> a = np.array([[1, 4], [5, 6]])\n >>> b = np.array([[4, 1], [2, 2]])\n >>> np.vdot(a, b)\n 30\n >>> np.vdot(b, a)\n 30\n >>> 1*4 + 4*1 + 5*2 + 6*2\n 30\n \"\"\"\n return tensordot(a.flatten(), b.flatten(), 1)\n\n\n@set_module('mxnet.ndarray.numpy')\ndef inner(a, b):\n r\"\"\"\n Inner product of two arrays.\n Ordinary inner product of vectors for 1-D arrays (without complex\n conjugation), in higher dimensions a sum product over the last axes.\n\n Parameters\n ----------\n a, b : ndarray\n If `a` and `b` are nonscalar, their last dimensions must match.\n\n Returns\n -------\n out : ndarray\n `out.shape = a.shape[:-1] + b.shape[:-1]`\n\n Raises\n ------\n ValueError\n If the last dimension of `a` and `b` has different size.\n\n See Also\n --------\n tensordot : Sum products over arbitrary axes.\n dot : Generalised matrix product, using second last dimension of `b`.\n einsum : Einstein summation convention.\n\n Notes\n -----\n For vectors (1-D arrays) it computes the ordinary inner-product::\n np.inner(a, b) = sum(a[:]*b[:])\n More generally, if `ndim(a) = r > 0` and `ndim(b) = s > 0`::\n np.inner(a, b) = np.tensordot(a, b, axes=(-1,-1))\n or explicitly::\n np.inner(a, b)[i0,...,ir-1,j0,...,js-1]\n = sum(a[i0,...,ir-1,:]*b[j0,...,js-1,:])\n In addition `a` or `b` may be scalars, in which case::\n np.inner(a,b) = a*b\n\n Examples\n --------\n Ordinary inner product for vectors:\n >>> a = np.array([1,2,3])\n >>> b = np.array([0,1,0])\n >>> np.inner(a, b)\n 2\n A multidimensional example:\n >>> a = np.arange(24).reshape((2,3,4))\n >>> b = np.arange(4)\n >>> np.inner(a, b)\n array([[ 14, 38, 62],\n [ 86, 110, 134]])\n \"\"\"\n return tensordot(a, b, [-1, -1])\n\n\n@set_module('mxnet.ndarray.numpy')\ndef outer(a, b):\n r\"\"\"\n Compute the outer product of two vectors.\n Given two vectors, ``a = [a0, a1, ..., aM]`` and\n ``b = [b0, b1, ..., bN]``,\n the outer product [1]_ is::\n [[a0*b0 a0*b1 ... a0*bN ]\n [a1*b0 .\n [ ... .\n [aM*b0 aM*bN ]]\n\n Parameters\n ----------\n a : (M,) ndarray\n First input vector. Input is flattened if\n not already 1-dimensional.\n b : (N,) ndarray\n Second input vector. Input is flattened if\n not already 1-dimensional.\n\n Returns\n -------\n out : (M, N) ndarray\n ``out[i, j] = a[i] * b[j]``\n See also\n --------\n inner\n einsum : ``einsum('i,j->ij', a.ravel(), b.ravel())`` is the equivalent.\n ufunc.outer : A generalization to N dimensions and other operations.\n ``np.multiply.outer(a.ravel(), b.ravel())`` is the equivalent.\n References\n ----------\n .. [1] : G. H. Golub and C. F. Van Loan, *Matrix Computations*, 3rd\n ed., Baltimore, MD, Johns Hopkins University Press, 1996,\n pg. 8.\n Examples\n --------\n Make a (*very* coarse) grid for computing a Mandelbrot set:\n >>> rl = np.outer(np.ones((5,)), np.linspace(-2, 2, 5))\n >>> rl\n array([[-2., -1., 0., 1., 2.],\n [-2., -1., 0., 1., 2.],\n [-2., -1., 0., 1., 2.],\n [-2., -1., 0., 1., 2.],\n [-2., -1., 0., 1., 2.]])\n \"\"\"\n return tensordot(a.reshape_view((-1, )), b.reshape_view((-1, )), 0)\n\n\n@set_module('mxnet.ndarray.numpy')\ndef cross(a, b, axisa=-1, axisb=-1, axisc=-1, axis=None): # pylint: disable=too-many-arguments\n \"\"\"\n Return the cross product of two (arrays of) vectors.\n\n The cross product of `a` and `b` in :math:`R^3` is a vector perpendicular\n to both `a` and `b`. If `a` and `b` are arrays of vectors, the vectors\n are defined by the last axis of `a` and `b` by default, and these axis\n can have dimensions 2 or 3. Where the dimension of either `a` or `b` is\n 2, the third component of the input vector is assumed to be zero and the\n cross product calculated accordingly. In cases where both input vectors\n have dimension 2, the z-component of the cross product is returned.\n\n Parameters\n ----------\n a : ndarray\n Components of the first vector(s).\n b : ndarray\n Components of the second vector(s).\n axisa : int, optional\n Axis of `a` that defines the vector(s). By default, the last axis.\n axisb : int, optional\n Axis of `b` that defines the vector(s). By default, the last axis.\n axisc : int, optional\n Axis of `c` containing the cross product vector(s). Ignored if\n both input vectors have dimension 2, as the return is scalar.\n By default, the last axis.\n axis : int, optional\n If defined, the axis of `a`, `b` and `c` that defines the vector(s)\n and cross product(s). Overrides `axisa`, `axisb` and `axisc`.\n\n Returns\n -------\n c : ndarray\n Vector cross product(s).\n\n Raises\n ------\n ValueError\n When the dimension of the vector(s) in `a` and/or `b` does not\n equal 2 or 3.\n\n Notes\n -----\n Supports full broadcasting of the inputs.\n\n Examples\n --------\n Vector cross-product.\n\n >>> x = np.array([1., 2., 3.])\n >>> y = np.array([4., 5., 6.])\n >>> np.cross(x, y)\n array([-3., 6., -3.])\n\n One vector with dimension 2.\n\n >>> x = np.array([1., 2.])\n >>> y = np.array([4., 5., 6.])\n >>> np.cross(x, y)\n array([12., -6., -3.])\n\n Equivalently:\n\n >>> x = np.array([1., 2., 0.])\n >>> y = np.array([4., 5., 6.])\n >>> np.cross(x, y)\n array([12., -6., -3.])\n\n Both vectors with dimension 2.\n\n >>> x = np.array([1., 2.])\n >>> y = np.array([4., 5.])\n >>> np.cross(x, y)\n array(-3.)\n\n Multiple vector cross-products. Note that the direction of the cross\n product vector is defined by the `right-hand rule`.\n\n >>> x = np.array([[1., 2., 3.], [4., 5., 6.]])\n >>> y = np.array([[4., 5., 6.], [1., 2., 3.]])\n >>> np.cross(x, y)\n array([[-3., 6., -3.],\n [ 3., -6., 3.]])\n\n The orientation of `c` can be changed using the `axisc` keyword.\n\n >>> np.cross(x, y, axisc=0)\n array([[-3., 3.],\n [ 6., -6.],\n [-3., 3.]])\n\n Change the vector definition of `x` and `y` using `axisa` and `axisb`.\n\n >>> x = np.array([[1., 2., 3.], [4., 5., 6.], [7., 8., 9.]])\n >>> y = np.array([[7., 8., 9.], [4., 5., 6.], [1., 2., 3.]])\n >>> np.cross(x, y)\n array([[ -6., 12., -6.],\n [ 0., 0., 0.],\n [ 6., -12., 6.]])\n >>> np.cross(x, y, axisa=0, axisb=0)\n array([[-24., 48., -24.],\n [-30., 60., -30.],\n [-36., 72., -36.]])\n \"\"\"\n if axis is not None:\n axisa, axisb, axisc = (axis,) * 3\n\n if isinstance(a, NDArray) and isinstance(b, NDArray):\n return _api_internal.cross(a, b, axisa, axisb, axisc)\n else:\n raise TypeError(\"Input data should be NDarray\")\n\n\n@set_module('mxnet.ndarray.numpy')\ndef kron(a, b):\n r\"\"\"\n Kronecker product of two arrays.\n Computes the Kronecker product, a composite array made of blocks of the\n second array scaled by the first.\n Parameters\n ----------\n a, b : ndarray\n Returns\n -------\n out : ndarray\n See Also\n --------\n outer : The outer product\n Notes\n -----\n The function assumes that the number of dimensions of `a` and `b`\n are the same, if necessary prepending the smallest with ones.\n If `a.shape = (r0,r1,..,rN)` and `b.shape = (s0,s1,...,sN)`,\n the Kronecker product has shape `(r0*s0, r1*s1, ..., rN*SN)`.\n The elements are products of elements from `a` and `b`, organized\n explicitly by::\n kron(a,b)[k0,k1,...,kN] = a[i0,i1,...,iN] * b[j0,j1,...,jN]\n where::\n kt = it * st + jt, t = 0,...,N\n In the common 2-D case (N=1), the block structure can be visualized::\n [[ a[0,0]*b, a[0,1]*b, ... , a[0,-1]*b ],\n [ ... ... ],\n [ a[-1,0]*b, a[-1,1]*b, ... , a[-1,-1]*b ]]\n Examples\n --------\n >>> np.kron([1,10,100], [5,6,7])\n array([ 5, 6, 7, 50, 60, 70, 500, 600, 700])\n >>> np.kron([5,6,7], [1,10,100])\n array([ 5, 50, 500, 6, 60, 600, 7, 70, 700])\n \"\"\"\n return _api_internal.kron(a, b)\n\n\n@set_module('mxnet.ndarray.numpy')\ndef equal(x1, x2, out=None):\n \"\"\"\n Return (x1 == x2) element-wise.\n Parameters\n ----------\n x1, x2 : ndarrays or scalars\n Input arrays. If ``x1.shape != x2.shape``, they must be broadcastable to\n a common shape (which becomes the shape of the output).\n out : ndarray, None, or tuple of ndarray and None, optional\n A location into which the result is stored. If provided, it must have\n a shape that the inputs broadcast to. If not provided or `None`,\n a freshly-allocated array is returned.\n Returns\n -------\n out : ndarray or scalar\n Output array of type bool, element-wise comparison of `x1` and `x2`.\n This is a scalar if both `x1` and `x2` are scalars.\n See Also\n --------\n not_equal, greater_equal, less_equal, greater, less\n Examples\n --------\n >>> np.equal(np.ones(2, 1)), np.zeros(1, 3))\n array([[False, False, False],\n [False, False, False]])\n >>> np.equal(1, np.ones(1))\n array([ True])\n \"\"\"\n if isinstance(x1, numeric_types) and isinstance(x2, numeric_types):\n return _np.equal(x1, x2, out=out)\n return _api_internal.equal(x1, x2, out)\n\n\n@set_module('mxnet.ndarray.numpy')\ndef not_equal(x1, x2, out=None):\n \"\"\"\n Return (x1 != x2) element-wise.\n Parameters\n ----------\n x1, x2 : ndarrays or scalars\n Input arrays. If ``x1.shape != x2.shape``, they must be broadcastable to\n a common shape (which becomes the shape of the output).\n out : ndarray, None, or tuple of ndarray and None, optional\n A location into which the result is stored. If provided, it must have\n a shape that the inputs broadcast to. If not provided or `None`,\n a freshly-allocated array is returned.\n Returns\n -------\n out : ndarray or scalar\n Output array of type bool, element-wise comparison of `x1` and `x2`.\n This is a scalar if both `x1` and `x2` are scalars.\n See Also\n --------\n equal, greater, greater_equal, less, less_equal\n Examples\n --------\n >>> np.not_equal(np.ones(2, 1)), np.zeros(1, 3))\n array([[ True, True, True],\n [ True, True, True]])\n >>> np.not_equal(1, np.ones(1))\n array([False])\n \"\"\"\n if isinstance(x1, numeric_types) and isinstance(x2, numeric_types):\n return _np.not_equal(x1, x2, out=out)\n return _api_internal.not_equal(x1, x2, out)\n\n\n\n@set_module('mxnet.ndarray.numpy')\ndef greater(x1, x2, out=None):\n \"\"\"\n Return the truth value of (x1 > x2) element-wise.\n Parameters\n ----------\n x1, x2 : ndarrays or scalars\n Input arrays. If ``x1.shape != x2.shape``, they must be broadcastable to\n a common shape (which becomes the shape of the output).\n out : ndarray, None, or tuple of ndarray and None, optional\n A location into which the result is stored. If provided, it must have\n a shape that the inputs broadcast to. If not provided or `None`,\n a freshly-allocated array is returned.\n Returns\n -------\n out : ndarray or scalar\n Output array of type bool, element-wise comparison of `x1` and `x2`.\n This is a scalar if both `x1` and `x2` are scalars.\n See Also\n --------\n equal, greater, greater_equal, less, less_equal\n Examples\n --------\n >>> np.greater(np.ones(2, 1)), np.zeros(1, 3))\n array([[ True, True, True],\n [ True, True, True]])\n >>> np.greater(1, np.ones(1))\n array([False])\n \"\"\"\n if isinstance(x1, numeric_types) and isinstance(x2, numeric_types):\n return _np.greater(x1, x2, out=out)\n return _api_internal.greater(x1, x2, out)\n\n\n@set_module('mxnet.ndarray.numpy')\ndef less(x1, x2, out=None):\n \"\"\"\n Return the truth value of (x1 < x2) element-wise.\n Parameters\n ----------\n x1, x2 : ndarrays or scalars\n Input arrays. If ``x1.shape != x2.shape``, they must be broadcastable to\n a common shape (which becomes the shape of the output).\n out : ndarray, None, or tuple of ndarray and None, optional\n A location into which the result is stored. If provided, it must have\n a shape that the inputs broadcast to. If not provided or `None`,\n a freshly-allocated array is returned.\n Returns\n -------\n out : ndarray or scalar\n Output array of type bool, element-wise comparison of `x1` and `x2`.\n This is a scalar if both `x1` and `x2` are scalars.\n See Also\n --------\n equal, greater, greater_equal, less, less_equal\n Examples\n --------\n >>> np.less(np.ones(2, 1)), np.zeros(1, 3))\n array([[ True, True, True],\n [ True, True, True]])\n >>> np.less(1, np.ones(1))\n array([False])\n \"\"\"\n if isinstance(x1, numeric_types) and isinstance(x2, numeric_types):\n return _np.less(x1, x2, out=out)\n return _api_internal.less(x1, x2, out)\n\n\n@set_module('mxnet.ndarray.numpy')\ndef greater_equal(x1, x2, out=None):\n \"\"\"\n Return the truth value of (x1 >= x2) element-wise.\n Parameters\n ----------\n x1, x2 : ndarrays or scalars\n Input arrays. If ``x1.shape != x2.shape``, they must be broadcastable to\n a common shape (which becomes the shape of the output).\n out : ndarray, None, or tuple of ndarray and None, optional\n A location into which the result is stored. If provided, it must have\n a shape that the inputs broadcast to. If not provided or `None`,\n a freshly-allocated array is returned.\n Returns\n -------\n out : ndarray or scalar\n Output array of type bool, element-wise comparison of `x1` and `x2`.\n This is a scalar if both `x1` and `x2` are scalars.\n See Also\n --------\n equal, greater, greater_equal, less, less_equal\n Examples\n --------\n >>> np.greater_equal(np.ones(2, 1)), np.zeros(1, 3))\n array([[ True, True, True],\n [ True, True, True]])\n >>> np.greater_equal(1, np.ones(1))\n array([True])\n \"\"\"\n if isinstance(x1, numeric_types) and isinstance(x2, numeric_types):\n return _np.greater_equal(x1, x2, out=out)\n return _api_internal.greater_equal(x1, x2, out)\n\n\n\n@set_module('mxnet.ndarray.numpy')\ndef less_equal(x1, x2, out=None):\n \"\"\"\n Return the truth value of (x1 <= x2) element-wise.\n Parameters\n ----------\n x1, x2 : ndarrays or scalars\n Input arrays. If ``x1.shape != x2.shape``, they must be broadcastable to\n a common shape (which becomes the shape of the output).\n out : ndarray, None, or tuple of ndarray and None, optional\n A location into which the result is stored. If provided, it must have\n a shape that the inputs broadcast to. If not provided or `None`,\n a freshly-allocated array is returned.\n Returns\n -------\n out : ndarray or scalar\n Output array of type bool, element-wise comparison of `x1` and `x2`.\n This is a scalar if both `x1` and `x2` are scalars.\n See Also\n --------\n equal, greater, greater_equal, less, less_equal\n Examples\n --------\n >>> np.less_equal(np.ones(2, 1)), np.zeros(1, 3))\n array([[False, False, False],\n [False, False, False]])\n >>> np.less_equal(1, np.ones(1))\n array([True])\n \"\"\"\n if isinstance(x1, numeric_types) and isinstance(x2, numeric_types):\n return _np.less_equal(x1, x2, out=out)\n return _api_internal.less_equal(x1, x2, out)\n\n\n@set_module('mxnet.ndarray.numpy')\ndef roll(a, shift, axis=None):\n \"\"\"\n Roll array elements along a given axis.\n\n Elements that roll beyond the last position are re-introduced at\n the first.\n\n Parameters\n ----------\n a : ndarray\n Input array.\n shift : int or tuple of ints\n The number of places by which elements are shifted. If a tuple,\n then `axis` must be a tuple of the same size, and each of the\n given axes is shifted by the corresponding number. If an int\n while `axis` is a tuple of ints, then the same value is used for\n all given axes.\n axis : int or tuple of ints, optional\n Axis or axes along which elements are shifted. By default, the\n array is flattened before shifting, after which the original\n shape is restored.\n\n Returns\n -------\n res : ndarray\n Output array, with the same shape as `a`.\n\n Notes\n -----\n Supports rolling over multiple dimensions simultaneously.\n\n Examples\n --------\n >>> x = np.arange(10)\n >>> np.roll(x, 2)\n array([8., 9., 0., 1., 2., 3., 4., 5., 6., 7.])\n >>> np.roll(x, -2)\n array([2., 3., 4., 5., 6., 7., 8., 9., 0., 1.])\n\n >>> x2 = np.reshape(x, (2,5))\n >>> x2\n array([[0., 1., 2., 3., 4.],\n [5., 6., 7., 8., 9.]])\n >>> np.roll(x2, 1)\n array([[9., 0., 1., 2., 3.],\n [4., 5., 6., 7., 8.]])\n >>> np.roll(x2, -1)\n array([[1., 2., 3., 4., 5.],\n [6., 7., 8., 9., 0.]])\n >>> np.roll(x2, 1, axis=0)\n array([[5., 6., 7., 8., 9.],\n [0., 1., 2., 3., 4.]])\n >>> np.roll(x2, -1, axis=0)\n array([[5., 6., 7., 8., 9.],\n [0., 1., 2., 3., 4.]])\n >>> np.roll(x2, 1, axis=1)\n array([[4., 0., 1., 2., 3.],\n [9., 5., 6., 7., 8.]])\n >>> np.roll(x2, -1, axis=1)\n array([[1., 2., 3., 4., 0.],\n [6., 7., 8., 9., 5.]])\n \"\"\"\n return _api_internal.roll(a, shift, axis)\n\n\n@wrap_np_binary_func\ndef logical_and(x1, x2, out=None):\n r\"\"\"\n Compute the truth value of x1 AND x2 element-wise.\n Parameters\n ----------\n x1, x2 : array_like\n Logical AND is applied to the elements of `x1` and `x2`.\n If ``x1.shape != x2.shape``, they must be broadcastable to a common\n shape (which becomes the shape of the output).\n out : ndarray, None, or tuple of ndarray and None, optional\n A location into which the result is stored. If provided, it must have\n a shape that the inputs broadcast to. If not provided or `None`,\n a freshly-allocated array is returned. A tuple (possible only as a\n keyword argument) must have length equal to the number of outputs.\n Returns\n -------\n y : ndarray or bool\n Boolean result of the logical AND operation applied to the elements\n of `x1` and `x2`; the shape is determined by broadcasting.\n This is a scalar if both `x1` and `x2` are scalars.\n See Also\n --------\n logical_or, logical_not, logical_xor, bitwise_or\n Examples\n --------\n >>> np.logical_and(True, False)\n False\n >>> np.logical_and(np.array([True, True], dtype='bool'), np.array([False, True], dtype='bool'))\n array([False, True])\n \"\"\"\n if isinstance(x1, numeric_types) and isinstance(x2, numeric_types):\n return _np.logical_and(x1, x2, out=out)\n return _api_internal.logical_and(x1, x2, out)\n\n\n@set_module('mxnet.ndarray.numpy')\n@wrap_np_binary_func\ndef logical_or(x1, x2, out=None):\n \"\"\"\n Compute the truth value of x1 OR x2 element-wise.\n Parameters\n ----------\n x1, x2 : array_like\n Logical OR is applied to the elements of `x1` and `x2`.\n If ``x1.shape != x2.shape``, they must be broadcastable to a common\n shape (which becomes the shape of the output).\n out : ndarray, None, or tuple of ndarray and None, optional\n A location into which the result is stored. If provided, it must have\n a shape that the inputs broadcast to. If not provided or `None`,\n a freshly-allocated array is returned. A tuple (possible only as a\n keyword argument) must have length equal to the number of outputs.\n Returns\n -------\n y : ndarray or bool\n Boolean result of the logical OR operation applied to the elements\n of `x1` and `x2`; the shape is determined by broadcasting.\n This is a scalar if both `x1` and `x2` are scalars.\n See Also\n --------\n logical_and, logical_not, logical_xor, bitwise_or\n Examples\n --------\n >>> np.logical_or(True, False)\n True\n >>> np.logical_or(np.array([True, True], dtype='bool'), np.array([False, True], dtype='bool'))\n array([True, True])\n \"\"\"\n if isinstance(x1, numeric_types) and isinstance(x2, numeric_types):\n return _np.logical_or(x1, x2, out=out)\n return _api_internal.logical_or(x1, x2, out)\n\n\n@set_module('mxnet.ndarray.numpy')\n@wrap_np_binary_func\ndef logical_xor(x1, x2, out=None):\n \"\"\"\n Compute the truth value of x1 XOR x2 element-wise.\n Parameters\n ----------\n x1, x2 : array_like\n Logical XOR is applied to the elements of `x1` and `x2`.\n If ``x1.shape != x2.shape``, they must be broadcastable to a common\n shape (which becomes the shape of the output).\n out : ndarray, None, or tuple of ndarray and None, optional\n A location into which the result is stored. If provided, it must have\n a shape that the inputs broadcast to. If not provided or `None`,\n a freshly-allocated array is returned. A tuple (possible only as a\n keyword argument) must have length equal to the number of outputs.\n Returns\n -------\n y : ndarray or bool\n Boolean result of the logical XOR operation applied to the elements\n of `x1` and `x2`; the shape is determined by broadcasting.\n This is a scalar if both `x1` and `x2` are scalars.\n See Also\n --------\n logical_and, logical_not, logical_or, bitwise_or\n Examples\n --------\n >>> np.logical_xor(True, False)\n True\n >>> np.logical_xor(np.array([True, True], dtype='bool'), np.array([False, True], dtype='bool'))\n array([ True, False])\n \"\"\"\n if isinstance(x1, numeric_types) and isinstance(x2, numeric_types):\n return _np.logical_xor(x1, x2, out=out)\n return _api_internal.logical_xor(x1, x2, out)\n\n\n@set_module('mxnet.ndarray.numpy')\ndef rot90(m, k=1, axes=(0, 1)):\n \"\"\"\n Rotate an array by 90 degrees in the plane specified by axes.\n Rotation direction is from the first towards the second axis.\n Parameters\n ----------\n m : ndarray\n Array of two or more dimensions.\n k : integer\n Number of times the array is rotated by 90 degrees.\n axes: (2,) array_like\n The array is rotated in the plane defined by the axes.\n Axes must be different.\n\n Returns\n -------\n y : ndarray\n A rotated view of `m`.\n\n -----\n rot90(m, k=1, axes=(1,0)) is the reverse of rot90(m, k=1, axes=(0,1))\n rot90(m, k=1, axes=(1,0)) is equivalent to rot90(m, k=-1, axes=(0,1))\n Examples\n --------\n >>> m = np.array([[1,2],[3,4]], 'int')\n >>> m\n array([[1, 2],\n [3, 4]], dtype=int64)\n >>> np.rot90(m)\n array([[2, 4],\n [1, 3]], dtype=int64)\n >>> np.rot90(m, 2)\n array([[4, 3],\n [2, 1]], dtype=int64)\n >>> m = np.arange(8).reshape((2,2,2))\n >>> np.rot90(m, 1, (1,2))\n array([[[1., 3.],\n [0., 2.]],\n\n [[5., 7.],\n [4., 6.]]])\n \"\"\"\n return _api_internal.rot90(m, k, axes)\n\n\n@set_module('mxnet.ndarray.numpy')\ndef einsum(*operands, **kwargs):\n r\"\"\"\n einsum(subscripts, *operands, out=None, optimize=False)\n\n Evaluates the Einstein summation convention on the operands.\n\n Using the Einstein summation convention, many common multi-dimensional,\n linear algebraic array operations can be represented in a simple fashion.\n In *implicit* mode `einsum` computes these values.\n\n In *explicit* mode, `einsum` provides further flexibility to compute\n other array operations that might not be considered classical Einstein\n summation operations, by disabling, or forcing summation over specified\n subscript labels.\n\n See the notes and examples for clarification.\n\n Parameters\n ----------\n subscripts : str\n Specifies the subscripts for summation as comma separated list of\n subscript labels. An implicit (classical Einstein summation)\n calculation is performed unless the explicit indicator '->' is\n included as well as subscript labels of the precise output form.\n operands : list of ndarray\n These are the arrays for the operation.\n out : ndarray, optional\n If provided, the calculation is done into this array.\n optimize : {False, True}, optional\n Controls if intermediate optimization should occur. No optimization\n will occur if False. Defaults to False.\n\n Returns\n -------\n output : ndarray\n The calculation based on the Einstein summation convention.\n\n Notes\n -----\n The Einstein summation convention can be used to compute\n many multi-dimensional, linear algebraic array operations. `einsum`\n provides a succinct way of representing these.\n\n A non-exhaustive list of these operations,\n which can be computed by `einsum`, is shown below along with examples:\n\n * Trace of an array, :py:func:`np.trace`.\n * Return a diagonal, :py:func:`np.diag`.\n * Array axis summations, :py:func:`np.sum`.\n * Transpositions and permutations, :py:func:`np.transpose`.\n * Matrix multiplication and dot product, :py:func:`np.matmul` :py:func:`np.dot`.\n * Vector inner and outer products, :py:func:`np.inner` :py:func:`np.outer`.\n * Broadcasting, element-wise and scalar multiplication, :py:func:`np.multiply`.\n * Tensor contractions, :py:func:`np.tensordot`.\n\n The subscripts string is a comma-separated list of subscript labels,\n where each label refers to a dimension of the corresponding operand.\n Whenever a label is repeated it is summed, so ``np.einsum('i,i', a, b)``\n is equivalent to :py:func:`np.inner(a,b) <np.inner>`. If a label\n appears only once, it is not summed, so ``np.einsum('i', a)`` produces a\n view of ``a`` with no changes. A further example ``np.einsum('ij,jk', a, b)``\n describes traditional matrix multiplication and is equivalent to\n :py:func:`np.matmul(a,b) <np.matmul>`. Repeated subscript labels in one\n operand take the diagonal. For example, ``np.einsum('ii', a)`` is equivalent\n to :py:func:`np.trace(a) <np.trace>`.\n\n In *implicit mode*, the chosen subscripts are important\n since the axes of the output are reordered alphabetically. This\n means that ``np.einsum('ij', a)`` doesn't affect a 2D array, while\n ``np.einsum('ji', a)`` takes its transpose. Additionally,\n ``np.einsum('ij,jk', a, b)`` returns a matrix multiplication, while,\n ``np.einsum('ij,jh', a, b)`` returns the transpose of the\n multiplication since subscript 'h' precedes subscript 'i'.\n\n In *explicit mode* the output can be directly controlled by\n specifying output subscript labels. This requires the\n identifier '->' as well as the list of output subscript labels.\n This feature increases the flexibility of the function since\n summing can be disabled or forced when required. The call\n ``np.einsum('i->', a)`` is like :py:func:`np.sum(a, axis=-1) <np.sum>`,\n and ``np.einsum('ii->i', a)`` is like :py:func:`np.diag(a) <np.diag>`.\n The difference is that `einsum` does not allow broadcasting by default.\n Additionally ``np.einsum('ij,jh->ih', a, b)`` directly specifies the\n order of the output subscript labels and therefore returns matrix\n multiplication, unlike the example above in implicit mode.\n\n To enable and control broadcasting, use an ellipsis. Default\n NumPy-style broadcasting is done by adding an ellipsis\n to the left of each term, like ``np.einsum('...ii->...i', a)``.\n To take the trace along the first and last axes,\n you can do ``np.einsum('i...i', a)``, or to do a matrix-matrix\n product with the left-most indices instead of rightmost, one can do\n ``np.einsum('ij...,jk...->ik...', a, b)``.\n\n When there is only one operand, no axes are summed, and no output\n parameter is provided, a view into the operand is returned instead\n of a new array. Thus, taking the diagonal as ``np.einsum('ii->i', a)``\n produces a view.\n\n The ``optimize`` argument which will optimize the contraction order\n of an einsum expression. For a contraction with three or more operands this\n can greatly increase the computational efficiency at the cost of a larger\n memory footprint during computation.\n\n Typically a 'greedy' algorithm is applied which empirical tests have shown\n returns the optimal path in the majority of cases. 'optimal' is not supported\n for now.\n\n This function differs from the original `numpy.einsum\n <https://docs.scipy.org/doc/numpy/reference/generated/numpy.einsum.html>`_ in\n the following way(s):\n\n - Does not support 'optimal' strategy\n - Does not support the alternative subscript like\n `einsum(op0, sublist0, op1, sublist1, ..., [sublistout])`\n - Does not produce view in any cases\n\n Examples\n --------\n >>> a = np.arange(25).reshape(5,5)\n >>> b = np.arange(5)\n >>> c = np.arange(6).reshape(2,3)\n\n Trace of a matrix:\n\n >>> np.einsum('ii', a)\n array(60.)\n\n Extract the diagonal (requires explicit form):\n\n >>> np.einsum('ii->i', a)\n array([ 0., 6., 12., 18., 24.])\n\n Sum over an axis (requires explicit form):\n\n >>> np.einsum('ij->i', a)\n array([ 10., 35., 60., 85., 110.])\n >>> np.sum(a, axis=1)\n array([ 10., 35., 60., 85., 110.])\n\n For higher dimensional arrays summing a single axis can be done with ellipsis:\n\n >>> np.einsum('...j->...', a)\n array([ 10., 35., 60., 85., 110.])\n\n Compute a matrix transpose, or reorder any number of axes:\n\n >>> np.einsum('ji', c)\n array([[0., 3.],\n [1., 4.],\n [2., 5.]])\n >>> np.einsum('ij->ji', c)\n array([[0., 3.],\n [1., 4.],\n [2., 5.]])\n >>> np.transpose(c)\n array([[0., 3.],\n [1., 4.],\n [2., 5.]])\n\n Vector inner products:\n\n >>> np.einsum('i,i', b, b)\n array(30.)\n\n Matrix vector multiplication:\n\n >>> np.einsum('ij,j', a, b)\n array([ 30., 80., 130., 180., 230.])\n >>> np.dot(a, b)\n array([ 30., 80., 130., 180., 230.])\n >>> np.einsum('...j,j', a, b)\n array([ 30., 80., 130., 180., 230.])\n\n Broadcasting and scalar multiplication:\n\n >>> np.einsum('..., ...', np.array(3), c)\n array([[ 0., 3., 6.],\n [ 9., 12., 15.]])\n >>> np.einsum(',ij', np.array(3), c)\n array([[ 0., 3., 6.],\n [ 9., 12., 15.]])\n >>> np.multiply(3, c)\n array([[ 0., 3., 6.],\n [ 9., 12., 15.]])\n\n Vector outer product:\n\n >>> np.einsum('i,j', np.arange(2)+1, b)\n array([[0., 1., 2., 3., 4.],\n [0., 2., 4., 6., 8.]])\n\n Tensor contraction:\n\n >>> a = np.arange(60.).reshape(3,4,5)\n >>> b = np.arange(24.).reshape(4,3,2)\n >>> np.einsum('ijk,jil->kl', a, b)\n array([[4400., 4730.],\n [4532., 4874.],\n [4664., 5018.],\n [4796., 5162.],\n [4928., 5306.]])\n\n Example of ellipsis use:\n\n >>> a = np.arange(6).reshape((3,2))\n >>> b = np.arange(12).reshape((4,3))\n >>> np.einsum('ki,jk->ij', a, b)\n array([[10., 28., 46., 64.],\n [13., 40., 67., 94.]])\n >>> np.einsum('ki,...k->i...', a, b)\n array([[10., 28., 46., 64.],\n [13., 40., 67., 94.]])\n >>> np.einsum('k...,jk', a, b)\n array([[10., 28., 46., 64.],\n [13., 40., 67., 94.]])\n\n Chained array operations. For more complicated contractions, speed ups\n might be achieved by repeatedly computing a 'greedy' path. Performance\n improvements can be particularly significant with larger arrays:\n\n >>> a = np.ones(64).reshape(2,4,8)\n # Basic `einsum`: ~42.22ms (benchmarked on 3.4GHz Intel Xeon.)\n >>> for iteration in range(500):\n ... np.einsum('ijk,ilm,njm,nlk,abc->',a,a,a,a,a)\n # Greedy `einsum` (faster optimal path approximation): ~0.117ms\n >>> for iteration in range(500):\n ... np.einsum('ijk,ilm,njm,nlk,abc->',a,a,a,a,a, optimize=True)\n \"\"\"\n # Grab non-einsum kwargs; do not optimize by default.\n optimize_arg = kwargs.pop('optimize', False)\n out = kwargs.pop('out', None)\n\n subscripts = operands[0]\n operands = operands[1:]\n return _api_internal.einsum(*operands, subscripts, out, int(optimize_arg))\n\n\n@set_module('mxnet.ndarray.numpy')\ndef nonzero(a):\n \"\"\"\n Return the indices of the elements that are non-zero.\n\n Returns a tuple of arrays, one for each dimension of `a`,\n containing the indices of the non-zero elements in that\n dimension. The values in `a` are always returned in\n row-major, C-style order.\n\n To group the indices by element, rather than dimension, use `argwhere`,\n which returns a row for each non-zero element.\n\n Parameters\n ----------\n a : ndarray\n Input array.\n\n Returns\n -------\n tuple_of_arrays : tuple\n Indices of elements that are non-zero.\n\n See Also\n --------\n ndarray.nonzero :\n Equivalent ndarray method.\n\n Notes\n -----\n While the nonzero values can be obtained with ``a[nonzero(a)]``, it is\n recommended to use ``x[x.astype(bool)]`` or ``x[x != 0]`` instead, which\n will correctly handle 0-d arrays.\n\n Examples\n --------\n >>> x = np.array([[3, 0, 0], [0, 4, 0], [5, 6, 0]])\n >>> x\n array([[3, 0, 0],\n [0, 4, 0],\n [5, 6, 0]], dtype=int32)\n >>> np.nonzero(x)\n (array([0, 1, 2, 2], dtype=int64), array([0, 1, 0, 1], dtype=int64))\n\n >>> x[np.nonzero(x)]\n array([3, 4, 5, 6])\n >>> np.transpose(np.stack(np.nonzero(x)))\n array([[0, 0],\n [1, 1],\n [2, 0],\n [2, 1]], dtype=int64)\n\n A common use for ``nonzero`` is to find the indices of an array, where\n a condition is True. Given an array `a`, the condition `a` > 3 is a\n boolean array and since False is interpreted as 0, np.nonzero(a > 3)\n yields the indices of the `a` where the condition is true.\n\n >>> a = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]], dtype=np.int32)\n >>> a > 3\n array([[False, False, False],\n [ True, True, True],\n [ True, True, True]])\n >>> np.nonzero(a > 3)\n (array([1, 1, 1, 2, 2, 2], dtype=int64), array([0, 1, 2, 0, 1, 2], dtype=int64))\n\n Using this result to index `a` is equivalent to using the mask directly:\n\n >>> a[np.nonzero(a > 3)]\n array([4, 5, 6, 7, 8, 9], dtype=int32)\n >>> a[a > 3]\n array([4, 5, 6, 7, 8, 9], dtype=int32)\n\n ``nonzero`` can also be called as a method of the array.\n\n >>> (a > 3).nonzero()\n (array([1, 1, 1, 2, 2, 2], dtype=int64), array([0, 1, 2, 0, 1, 2], dtype=int64))\n \"\"\"\n out = _api_internal.nonzero(a).transpose()\n return tuple([out[i] for i in range(len(out))])\n\n\n@set_module('mxnet.ndarray.numpy')\ndef percentile(a, q, axis=None, out=None, overwrite_input=None, interpolation='linear', keepdims=False): # pylint: disable=too-many-arguments\n \"\"\"\n Compute the q-th percentile of the data along the specified axis.\n Returns the q-th percentile(s) of the array elements.\n\n Parameters\n ----------\n a : ndarray\n Input array\n q : ndarray\n Percentile or sequence of percentiles to compute.\n axis : {int, tuple of int, None}, optional\n Axis or axes along which the percentiles are computed. The default is to\n compute the percentile(s) along a flattened version of the array.\n out : ndarray, optional\n Alternative output array in which to place the result. It must have the same\n shape and buffer length as the expected output, but the type (of the output)\n will be cast if necessary.\n overwrite_input : bool, optional (Not supported yet)\n If True, then allow the input array a to be modified by intermediate calculations,\n to save memory. In this case, the contents of the input a after this function\n completes is undefined.\n interpolation : {'linear', 'lower', 'higher', 'midpoint', 'nearest'}\n This optional parameter specifies the interpolation method to use when the\n desired percentile lies between two data points i < j:\n 'linear': i + (j - i) * fraction, where fraction is the fractional part of the\n index surrounded by i and j.\n 'lower': i.\n 'higher': j.\n 'nearest': i or j, whichever is nearest.\n 'midpoint': (i + j) / 2.\n keepdims : bool, optional\n If this is set to True, the axes which are reduced are left in the result as\n dimensions with size one. With this option, the result will broadcast\n correctly against the original array a.\n\n Returns\n -------\n percentile : scalar or ndarray\n Output array.\n\n Examples\n --------\n >>> a = np.array([[10, 7, 4], [3, 2, 1]])\n >>> a\n array([[10, 7, 4],\n [ 3, 2, 1]])\n >>> np.percentile(a, np.array(50))\n array(3.5)\n >>> np.percentile(a, np.array(50), axis=0)\n array([6.5, 4.5, 2.5])\n >>> np.percentile(a, np.array(50), axis=1)\n array([7., 2.])\n >>> np.percentile(a, np.array(50), axis=1, keepdims=True)\n array([[7.],\n [2.]])\n\n >>> m = np.percentile(a, np.array(50), axis=0)\n >>> out = np.zeros_like(m)\n >>> np.percentile(a, np.array(50), axis=0, out=out)\n array([6.5, 4.5, 2.5])\n >>> m\n array([6.5, 4.5, 2.5])\n \"\"\"\n if overwrite_input is not None:\n raise NotImplementedError('overwrite_input is not supported yet')\n return _api_internal.percentile(a, q, axis, interpolation, keepdims, out)\n\n\n@set_module('mxnet.ndarray.numpy')\ndef median(a, axis=None, out=None, overwrite_input=None, keepdims=False):\n r\"\"\"\n Compute the median along the specified axis.\n Returns the median of the array elements.\n Parameters\n ----------\n a : array_like\n Input array or object that can be converted to an array.\n axis : {int, sequence of int, None}, optional\n Axis or axes along which the medians are computed. The default\n is to compute the median along a flattened version of the array.\n A sequence of axes is supported since version 1.9.0.\n out : ndarray, optional\n Alternative output array in which to place the result. It must\n have the same shape and buffer length as the expected output,\n but the type (of the output) will be cast if necessary.\n keepdims : bool, optional\n If this is set to True, the axes which are reduced are left\n in the result as dimensions with size one. With this option,\n the result will broadcast correctly against the original `arr`.\n Returns\n -------\n median : ndarray\n A new array holding the result. If the input contains integers\n or floats smaller than ``float32``, then the output data-type is\n ``np.float32``. Otherwise, the data-type of the output is the\n same as that of the input. If `out` is specified, that array is\n returned instead.\n See Also\n --------\n mean, percentile\n Examples\n --------\n >>> a = np.array([[10, 7, 4], [3, 2, 1]])\n >>> a\n array([[10, 7, 4],\n [ 3, 2, 1]])\n >>> np.median(a)\n 3.5\n >>> np.median(a, axis=0)\n array([6.5, 4.5, 2.5])\n >>> np.median(a, axis=1)\n array([7., 2.])\n \"\"\"\n return quantile(a=a, q=0.5, axis=axis, out=out, overwrite_input=overwrite_input,\n interpolation='midpoint', keepdims=keepdims)\n\n\n@set_module('mxnet.ndarray.numpy')\ndef quantile(a, q, axis=None, out=None, overwrite_input=None, interpolation='linear', keepdims=False): # pylint: disable=too-many-arguments\n \"\"\"\n Compute the q-th quantile of the data along the specified axis.\n New in version 1.15.0.\n Parameters\n ----------\n a : ndarray\n Input array or object that can be converted to an array.\n q : ndarray\n Quantile or sequence of quantiles to compute, which must be between 0 and 1 inclusive.\n axis : {int, tuple of int, None}, optional\n Axis or axes along which the quantiles are computed.\n The default is to compute the quantile(s) along a flattened version of the array.\n out : ndarray, optional\n Alternative output array in which to place the result.\n It must have the same shape and buffer length as the expected output,\n but the type (of the output) will be cast if necessary.\n interpolation : {'linear', 'lower', 'higher', 'midpoint', 'nearest'}\n This optional parameter specifies the interpolation method to use\n when the desired quantile lies between two data points i < j:\n linear: i + (j - i) * fraction, where fraction is the fractional part of the index surrounded by i and j.\n lower: i.\n higher: j.\n nearest: i or j, whichever is nearest.\n midpoint: (i + j) / 2.\n keepdims : bool, optional\n If this is set to True, the axes which are reduced are left in the result as dimensions with size one.\n With this option, the result will broadcast correctly against the original array a.\n Returns\n -------\n quantile : ndarray\n If q is a single quantile and axis=None, then the result is a scalar.\n If multiple quantiles are given, first axis of the result corresponds to the quantiles.\n The other axes are the axes that remain after the reduction of a.\n If out is specified, that array is returned instead.\n See also\n --------\n mean\n Notes\n -----\n Given a vector V of length N, the q-th quantile of V is the value q of the way from the minimum\n to the maximum in a sorted copy of V. The values and distances of the two nearest neighbors\n as well as the interpolation parameter will determine the quantile if the normalized ranking\n does not match the location of q exactly. This function is the same as the median if q=0.5,\n the same as the minimum if q=0.0 and the same as the maximum if q=1.0.\n This function differs from the original `numpy.quantile\n <https://numpy.org/devdocs/reference/generated/numpy.quantile.html>`_ in\n the following aspects:\n - q must be ndarray type even if it is a scalar\n - do not support overwrite_input\n Examples\n --------\n >>> a = np.array([[10, 7, 4], [3, 2, 1]])\n >>> a\n array([[10., 7., 4.],\n [3., 2., 1.]])\n >>> q = np.array(0.5)\n >>> q\n array(0.5)\n >>> np.quantile(a, q)\n array(3.5)\n >>> np.quantile(a, q, axis=0)\n array([6.5, 4.5, 2.5])\n >>> np.quantile(a, q, axis=1)\n array([7., 2.])\n >>> np.quantile(a, q, axis=1, keepdims=True)\n array([[7.],\n [2.]])\n >>> m = np.quantile(a, q, axis=0)\n >>> out = np.zeros_like(m)\n >>> np.quantile(a, q, axis=0, out=out)\n array([6.5, 4.5, 2.5])\n >>> out\n array([6.5, 4.5, 2.5])\n \"\"\"\n if overwrite_input is not None:\n raise NotImplementedError('overwrite_input is not supported yet')\n return _api_internal.percentile(a, q * 100, axis, interpolation, keepdims, out)\n\n\n@set_module('mxnet.ndarray.numpy')\ndef shares_memory(a, b, max_work=None):\n \"\"\"\n Determine if two arrays share memory\n\n Parameters\n ----------\n a, b : ndarray\n Input arrays\n\n Returns\n -------\n out : bool\n\n See Also\n --------\n may_share_memory\n\n Examples\n --------\n >>> np.may_share_memory(np.array([1,2]), np.array([5,8,9]))\n False\n\n This function differs from the original `numpy.shares_memory\n <https://docs.scipy.org/doc/numpy/reference/generated/numpy.shares_memory.html>`_ in\n the following way(s):\n\n - Does not support `max_work`, it is a dummy argument\n - Actually it is same as `may_share_memory` in MXNet np\n \"\"\"\n return _api_internal.share_memory(a, b).item()\n\n\n@set_module('mxnet.ndarray.numpy')\ndef may_share_memory(a, b, max_work=None):\n \"\"\"\n Determine if two arrays might share memory\n\n A return of True does not necessarily mean that the two arrays\n share any element. It just means that they *might*.\n\n Only the memory bounds of a and b are checked by default.\n\n Parameters\n ----------\n a, b : ndarray\n Input arrays\n\n Returns\n -------\n out : bool\n\n See Also\n --------\n shares_memory\n\n Examples\n --------\n >>> np.may_share_memory(np.array([1,2]), np.array([5,8,9]))\n False\n >>> x = np.zeros([3, 4])\n >>> np.may_share_memory(x[:,0], x[:,1])\n True\n\n This function differs from the original `numpy.may_share_memory\n <https://docs.scipy.org/doc/numpy/reference/generated/numpy.may_share_memory.html>`_ in\n the following way(s):\n\n - Does not support `max_work`, it is a dummy argument\n - Actually it is same as `shares_memory` in MXNet np\n \"\"\"\n return _api_internal.share_memory(a, b).item()\n\n\n@set_module('mxnet.ndarray.numpy')\ndef interp(x, xp, fp, left=None, right=None, period=None): # pylint: disable=too-many-arguments\n \"\"\"\n One-dimensional linear interpolation.\n Returns the one-dimensional piecewise linear interpolant to a function\n with given values at discrete data-points.\n\n Parameters\n ----------\n x : ndarray\n The x-coordinates of the interpolated values.\n xp : 1-D array of floats\n The x-coordinates of the data points, must be increasing if argument\n `period` is not specified. Otherwise, `xp` is internally sorted after\n normalizing the periodic boundaries with ``xp = xp % period``.\n fp : 1-D array of floats\n The y-coordinates of the data points, same length as `xp`.\n left : optional float corresponding to fp\n Value to return for `x < xp[0]`, default is `fp[0]`.\n right : optional float corresponding to fp\n Value to return for `x > xp[-1]`, default is `fp[-1]`.\n period : None or float, optional\n A period for the x-coordinates. This parameter allows the proper\n interpolation of angular x-coordinates. Parameters `left` and `right`\n are ignored if `period` is specified.\n .. versionadded:: 1.10.0\n\n Returns\n -------\n y : float (corresponding to fp) or ndarray\n The interpolated values, same shape as `x`.\n Raises\n ------\n ValueError\n If `xp` and `fp` have different length\n If `xp` or `fp` are not 1-D sequences\n If `period == 0`\n\n Notes\n -----\n Does not check that the x-coordinate sequence `xp` is increasing.\n If `xp` is not increasing, the results are nonsense.\n A simple check for increasing is::\n np.all(np.diff(xp) > 0)\n\n Examples\n --------\n >>> xp = [1, 2, 3]\n >>> fp = [3, 2, 0]\n >>> np.interp(2.5, xp, fp)\n 1.0\n >>> np.interp([0, 1, 1.5, 2.72, 3.14], xp, fp)\n array([ 3. , 3. , 2.5 , 0.56, 0. ])\n >>> UNDEF = -99.0\n >>> np.interp(3.14, xp, fp, right=UNDEF)\n -99.0\n Plot an interpolant to the sine function:\n >>> x = np.linspace(0, 2*np.pi, 10)\n >>> y = np.sin(x)\n >>> xvals = np.linspace(0, 2*np.pi, 50)\n >>> yinterp = np.interp(xvals, x, y)\n >>> import matplotlib.pyplot as plt\n >>> plt.plot(x, y, 'o')\n [<matplotlib.lines.Line2D object at 0x...>]\n >>> plt.plot(xvals, yinterp, '-x')\n [<matplotlib.lines.Line2D object at 0x...>]\n >>> plt.show()\n Interpolation with periodic x-coordinates:\n >>> x = [-180, -170, -185, 185, -10, -5, 0, 365]\n >>> xp = [190, -190, 350, -350]\n >>> fp = [5, 10, 3, 4]\n >>> np.interp(x, xp, fp, period=360)\n array([7.5, 5., 8.75, 6.25, 3., 3.25, 3.5, 3.75])\n \"\"\"\n if not isinstance(x, numeric_types):\n x = x.astype(float)\n return _api_internal.interp(xp.astype(float), fp.astype(float), x, left,\n right, period)\n\n\n@set_module('mxnet.ndarray.numpy')\ndef diff(a, n=1, axis=-1, prepend=None, append=None): # pylint: disable=redefined-outer-name\n r\"\"\"\n Calculate the n-th discrete difference along the given axis.\n\n Parameters\n ----------\n a : ndarray\n Input array\n n : int, optional\n The number of times values are differenced. If zero, the input is returned as-is.\n axis : int, optional\n The axis along which the difference is taken, default is the last axis.\n prepend, append : ndarray, optional\n Not supported yet\n\n Returns\n -------\n diff : ndarray\n The n-th differences.\n The shape of the output is the same as a except along axis where the dimension is smaller by n.\n The type of the output is the same as the type of the difference between any two elements of a.\n\n Examples\n --------\n >>> x = np.array([1, 2, 4, 7, 0])\n >>> np.diff(x)\n array([ 1, 2, 3, -7])\n >>> np.diff(x, n=2)\n array([ 1, 1, -10])\n\n >>> x = np.array([[1, 3, 6, 10], [0, 5, 6, 8]])\n >>> np.diff(x)\n array([[2, 3, 4],\n [5, 1, 2]])\n >>> np.diff(x, axis=0)\n array([[-1, 2, 0, -2]])\n\n Notes\n -----\n Optional inputs `prepend` and `append` are not supported yet\n \"\"\"\n if (prepend or append):\n raise NotImplementedError('prepend and append options are not supported yet')\n return _api_internal.diff(a, n, axis)\n\n\n@set_module('mxnet.ndarray.numpy')\ndef ediff1d(ary, to_end=None, to_begin=None):\n \"\"\"\n The differences between consecutive elements of an array.\n\n Parameters\n ----------\n ary : ndarray\n If necessary, will be flattened before the differences are taken.\n to_end : ndarray or scalar, optional\n Number(s) to append at the end of the returned differences.\n to_begin : ndarray or scalar, optional\n Number(s) to prepend at the beginning of the returned differences.\n\n Returns\n -------\n ediff1d : ndarray\n The differences. Loosely, this is ``ary.flat[1:] - ary.flat[:-1]``.\n\n Examples\n --------\n >>> x = np.array([1, 2, 4, 7, 0])\n >>> np.ediff1d(x)\n array([ 1., 2., 3., -7.])\n\n >>> np.ediff1d(x, to_begin=-99, to_end=np.array([88, 99]))\n rray([-99., 1., 2., 3., -7., 88., 99.])\n\n The returned array is always 1D.\n\n >>> y = np.array([[1, 2, 4], [1, 6, 24]])\n >>> np.ediff1d(y)\n array([ 1., 2., -3., 5., 18.])\n\n >>> np.ediff1d(x, to_begin=y)\n array([ 1., 2., 4., 1., 6., 24., 1., 2., 3., -7.])\n \"\"\"\n return _api_internal.ediff1d(ary, to_end, to_begin)\n\n\n@set_module('mxnet.ndarray.numpy')\ndef resize(a, new_shape):\n \"\"\"\n Return a new array with the specified shape.\n If the new array is larger than the original array, then the new\n array is filled with repeated copies of `a`. Note that this behavior\n is different from a.resize(new_shape) which fills with zeros instead\n of repeated copies of `a`.\n\n Parameters\n ----------\n a : ndarray\n Array to be resized.\n new_shape : int or tuple of int\n Shape of resized array.\n\n Returns\n -------\n reshaped_array : ndarray\n The new array is formed from the data in the old array, repeated\n if necessary to fill out the required number of elements. The\n data are repeated in the order that they are stored in memory.\n\n See Also\n --------\n ndarray.resize : resize an array in-place.\n\n Notes\n -----\n Warning: This functionality does **not** consider axes separately,\n i.e. it does not apply interpolation/extrapolation.\n It fills the return array with the required number of elements, taken\n from `a` as they are laid out in memory, disregarding strides and axes.\n (This is in case the new shape is smaller. For larger, see above.)\n This functionality is therefore not suitable to resize images,\n or data where each axis represents a separate and distinct entity.\n\n Examples\n --------\n >>> a = np.array([[0, 1], [2, 3]])\n >>> np.resize(a, (2, 3))\n array([[0., 1., 2.],\n [3., 0., 1.]])\n >>> np.resize(a, (1, 4))\n array([[0., 1., 2., 3.]])\n >>> np.resize(a,(2, 4))\n array([[0., 1., 2., 3.],\n [0., 1., 2., 3.]])\n \"\"\"\n return _npi.resize_fallback(a, new_shape=new_shape)\n\n\n@set_module('mxnet.ndarray.numpy')\ndef fill_diagonal(a, val, wrap=False):\n \"\"\"\n Fill the main diagonal of the given array of any dimensionality.\n For an array `a` with ``a.ndim >= 2``, the diagonal is the list of\n locations with indices ``a[i, ..., i]`` all identical. This function\n modifies the input array in-place, it does not return a value.\n\n Parameters\n ----------\n a : array, at least 2-D.\n Array whose diagonal is to be filled, it gets modified in-place.\n val : scalar\n Value to be written on the diagonal, its type must be compatible with\n that of the array a.\n wrap : bool\n For tall matrices in NumPy version up to 1.6.2, the\n diagonal \"wrapped\" after N columns. You can have this behavior\n with this option. This affects only tall matrices.\n\n Examples\n --------\n >>> a = np.zeros((3, 3), int)\n >>> np.fill_diagonal(a, 5)\n >>> a\n array([[5, 0, 0],\n [0, 5, 0],\n [0, 0, 5]])\n The same function can operate on a 4-D array:\n >>> a = np.zeros((3, 3, 3, 3), int)\n >>> np.fill_diagonal(a, 4)\n We only show a few blocks for clarity:\n >>> a[0, 0]\n array([[4, 0, 0],\n [0, 0, 0],\n [0, 0, 0]])\n >>> a[1, 1]\n array([[0, 0, 0],\n [0, 4, 0],\n [0, 0, 0]])\n >>> a[2, 2]\n array([[0, 0, 0],\n [0, 0, 0],\n [0, 0, 4]])\n The wrap option affects only tall matrices:\n >>> # tall matrices no wrap\n >>> a = np.zeros((5, 3), int)\n >>> np.fill_diagonal(a, 4)\n >>> a\n array([[4, 0, 0],\n [0, 4, 0],\n [0, 0, 4],\n [0, 0, 0],\n [0, 0, 0]])\n >>> # tall matrices wrap\n >>> a = np.zeros((5, 3), int)\n >>> np.fill_diagonal(a, 4, wrap=True)\n >>> a\n array([[4, 0, 0],\n [0, 4, 0],\n [0, 0, 4],\n [0, 0, 0],\n [4, 0, 0]])\n >>> # wide matrices\n >>> a = np.zeros((3, 5), int)\n >>> np.fill_diagonal(a, 4, wrap=True)\n >>> a\n array([[4, 0, 0, 0, 0],\n [0, 4, 0, 0, 0],\n [0, 0, 4, 0, 0]])\n The anti-diagonal can be filled by reversing the order of elements\n using either `numpy.flipud` or `numpy.fliplr`.\n >>> a = np.zeros((3, 3), int);\n >>> np.fill_diagonal(np.fliplr(a), [1,2,3]) # Horizontal flip\n >>> a\n array([[0, 0, 1],\n [0, 2, 0],\n [3, 0, 0]])\n >>> np.fill_diagonal(np.flipud(a), [1,2,3]) # Vertical flip\n >>> a\n array([[0, 0, 3],\n [0, 2, 0],\n [1, 0, 0]])\n Note that the order in which the diagonal is filled varies depending\n on the flip function.\n \"\"\"\n if isinstance(val, list):\n val = [float(v) for v in val]\n else:\n val = [float(val)]\n _api_internal.fill_diagonal(a, val, wrap, a)\n\n\n@set_module('mxnet.ndarray.numpy')\ndef squeeze(x, axis=None):\n \"\"\"\n Remove single-dimensional entries from the shape of an array.\n\n Parameters\n ----------\n a : array_like\n Input data.\n axis : None or int or tuple of ints, optional\n .. versionadded:: 1.7.0\n Selects a subset of the single-dimensional entries in the\n shape. If an axis is selected with shape entry greater than\n one, an error is raised.\n\n Returns\n -------\n squeezed : ndarray\n The input array, but with all or a subset of the\n dimensions of length 1 removed. This is always `a` itself\n or a view into `a`.\n\n Raises\n ------\n ValueError\n If `axis` is not `None`, and an axis being squeezed is not of length 1\n\n See Also\n --------\n expand_dims : The inverse operation, adding singleton dimensions\n reshape : Insert, remove, and combine dimensions, and resize existing ones\n\n Examples\n --------\n >>> x = np.array([[[0], [1], [2]]])\n >>> x.shape\n (1, 3, 1)\n >>> np.squeeze(x).shape\n (3,)\n >>> np.squeeze(x, axis=0).shape\n (3, 1)\n >>> np.squeeze(x, axis=1).shape\n Traceback (most recent call last):\n ...\n ValueError: cannot select an axis to squeeze out which has size not equal to one\n >>> np.squeeze(x, axis=2).shape\n (1, 3)\n \"\"\"\n return _api_internal.squeeze(x, axis)\n\n# pylint: disable=redefined-outer-name\n@set_module('mxnet.ndarray.numpy')\ndef nan_to_num(x, copy=True, nan=0.0, posinf=None, neginf=None, **kwargs):\n \"\"\"\n Replace NaN with zero and infinity with large finite numbers (default\n behaviour) or with the numbers defined by the user using the `nan`,\n `posinf` and/or `neginf` keywords.\n\n If `x` is inexact, NaN is replaced by zero or by the user defined value in\n `nan` keyword, infinity is replaced by the largest finite floating point\n values representable by ``x.dtype`` or by the user defined value in\n `posinf` keyword and -infinity is replaced by the most negative finite\n floating point values representable by ``x.dtype`` or by the user defined\n value in `neginf` keyword.\n\n For complex dtypes, the above is applied to each of the real and\n imaginary components of `x` separately.\n\n If `x` is not inexact, then no replacements are made.\n\n Parameters\n ----------\n x : ndarray\n Input data.\n copy : bool, optional\n Whether to create a copy of `x` (True) or to replace values\n in-place (False). The in-place operation only occurs if\n casting to an array does not require a copy.\n Default is True.\n nan : int, float, optional\n Value to be used to fill NaN values. If no value is passed\n then NaN values will be replaced with 0.0.\n posinf : int, float, optional\n Value to be used to fill positive infinity values. If no value is\n passed then positive infinity values will be replaced with a very\n large number.\n neginf : int, float, optional\n Value to be used to fill negative infinity values. If no value is\n passed then negative infinity values will be replaced with a very\n small (or negative) number.\n\n .. versionadded:: 1.13\n\n Returns\n -------\n out : ndarray\n `x`, with the non-finite values replaced. If `copy` is False, this may\n be `x` itself.\n\n Notes\n -----\n NumPy uses the IEEE Standard for Binary Floating-Point for Arithmetic\n (IEEE 754). This means that Not a Number is not equivalent to infinity.\n\n Examples\n --------\n >>> np.nan_to_num(np.inf)\n 1.7976931348623157e+308\n >>> np.nan_to_num(-np.inf)\n -1.7976931348623157e+308\n >>> np.nan_to_num(np.nan)\n 0.0\n >>> x = np.array([np.inf, -np.inf, np.nan, -128, 128])\n >>> np.nan_to_num(x)\n array([ 3.4028235e+38, -3.4028235e+38, 0.0000000e+00, -1.2800000e+02,\n 1.2800000e+02])\n >>> np.nan_to_num(x, nan=-9999, posinf=33333333, neginf=33333333)\n array([ 3.3333332e+07, 3.3333332e+07, -9.9990000e+03, -1.2800000e+02,\n 1.2800000e+02])\n >>> y = np.array([[-1, 0, 1],[9999,234,-14222]],dtype=\"float64\")/0\n array([[-inf, nan, inf],\n [ inf, inf, -inf]], dtype=float64)\n >>> np.nan_to_num(y)\n array([[-1.79769313e+308, 0.00000000e+000, 1.79769313e+308],\n [ 1.79769313e+308, 1.79769313e+308, -1.79769313e+308]], dtype=float64)\n >>> np.nan_to_num(y, nan=111111, posinf=222222)\n array([[-1.79769313e+308, 1.11111000e+005, 2.22222000e+005],\n [ 2.22222000e+005, 2.22222000e+005, -1.79769313e+308]], dtype=float64)\n >>> y\n array([[-inf, nan, inf],\n [ inf, inf, -inf]], dtype=float64)\n >>> np.nan_to_num(y, copy=False, nan=111111, posinf=222222)\n array([[-1.79769313e+308, 1.11111000e+005, 2.22222000e+005],\n [ 2.22222000e+005, 2.22222000e+005, -1.79769313e+308]], dtype=float64)\n >>> y\n array([[-1.79769313e+308, 1.11111000e+005, 2.22222000e+005],\n [ 2.22222000e+005, 2.22222000e+005, -1.79769313e+308]], dtype=float64)\n \"\"\"\n if isinstance(x, numeric_types):\n return _np.nan_to_num(x, copy, nan, posinf, neginf)\n elif isinstance(x, NDArray):\n if x.dtype in ['int8', 'uint8', 'int32', 'int64']:\n return x\n if not copy:\n return _api_internal.nan_to_num(x, copy, nan, posinf, neginf, x)\n return _api_internal.nan_to_num(x, copy, nan, posinf, neginf, None)\n else:\n raise TypeError('type {} not supported'.format(str(type(x))))\n\n\n@set_module('mxnet.ndarray.numpy')\n@wrap_np_unary_func\ndef isnan(x, out=None, **kwargs):\n \"\"\"\n Test element-wise for NaN and return result as a boolean array.\n\n Parameters\n ----------\n x : ndarray\n Input array.\n out : ndarray or None, optional\n A location into which the result is stored.\n If provided, it must have the same shape and dtype as input ndarray.\n If not provided or `None`, a freshly-allocated array is returned.\n\n Returns\n -------\n y : ndarray or bool\n True where x is NaN, false otherwise.\n This is a scalar if x is a scalar.\n\n Notes\n -----\n NumPy uses the IEEE Standard for Binary Floating-Point for Arithmetic (IEEE 754).\n\n This function differs from the original `numpy.isinf\n <https://docs.scipy.org/doc/numpy/reference/generated/numpy.isnan.html>`_ in\n the following aspects:\n - Does not support complex number for now\n - Input type does not support Python native iterables(list, tuple, ...).\n - ``out`` param: cannot perform auto broadcasting. ``out`` ndarray's shape must be the same as the expected output.\n - ``out`` param: cannot perform auto type cast. ``out`` ndarray's dtype must be the same as the expected output.\n - ``out`` param does not support scalar input case.\n\n Examples\n --------\n >>> np.isnan(np.nan)\n True\n >>> np.isnan(np.inf)\n False\n >>> np.isnan(np.array([np.log(-1.),1.,np.log(0)]))\n array([ True, False, False])\n \"\"\"\n return _pure_unary_func_helper(x, _api_internal.isnan, _np.isnan, out=out, **kwargs)\n\n\n@set_module('mxnet.ndarray.numpy')\n@wrap_np_unary_func\ndef isinf(x, out=None, **kwargs):\n \"\"\"\n Test element-wise for positive or negative infinity.\n\n Parameters\n ----------\n x : ndarray\n Input array.\n out : ndarray or None, optional\n A location into which the result is stored.\n If provided, it must have the same shape and dtype as input ndarray.\n If not provided or `None`, a freshly-allocated array is returned.\n\n Returns\n -------\n y : ndarray or bool\n True where x is positive or negative infinity, false otherwise.\n This is a scalar if x is a scalar.\n\n Notes\n -----\n NumPy uses the IEEE Standard for Binary Floating-Point for Arithmetic (IEEE 754).\n This means that Not a Number is not equivalent to infinity.\n\n This function differs from the original `numpy.isnan\n <https://docs.scipy.org/doc/numpy/reference/generated/numpy.isnan.html>`_ in\n the following aspects:\n - Does not support complex number for now\n - Input type does not support Python native iterables(list, tuple, ...).\n - ``out`` param: cannot perform auto broadcasting. ``out`` ndarray's shape must be the same as the expected output.\n - ``out`` param: cannot perform auto type cast. ``out`` ndarray's dtype must be the same as the expected output.\n - ``out`` param does not support scalar input case.\n\n Examples\n --------\n >>> np.isinf(np.inf)\n True\n >>> np.isinf(np.nan)\n False\n >>> np.isinf(np.array([np.inf, -np.inf, 1.0, np.nan]))\n array([ True, True, False, False])\n >>> x = np.array([-np.inf, 0., np.inf])\n >>> y = np.array([True, True, True], dtype=np.bool_)\n >>> np.isinf(x, y)\n array([ True, False, True])\n >>> y\n array([ True, False, True])\n \"\"\"\n return _pure_unary_func_helper(x, _api_internal.isinf, _np.isinf, out=out, **kwargs)\n\n\n@wrap_np_unary_func\ndef isposinf(x, out=None, **kwargs):\n \"\"\"\n Test element-wise for positive infinity, return result as bool array.\n\n Parameters\n ----------\n x : ndarray\n Input array.\n out : ndarray or None, optional\n A location into which the result is stored.\n If provided, it must have the same shape and dtype as input ndarray.\n If not provided or `None`, a freshly-allocated array is returned.\n\n Returns\n -------\n y : ndarray or bool\n True where x is positive infinity, false otherwise.\n This is a scalar if x is a scalar.\n\n Notes\n -----\n NumPy uses the IEEE Standard for Binary Floating-Point for Arithmetic (IEEE 754).\n This means that Not a Number is not equivalent to infinity.\n\n Examples\n --------\n >>> np.isposinf(np.inf)\n True\n >>> np.isposinf(-np.inf)\n False\n >>> np.isposinf(np.nan)\n False\n >>> np.isposinf(np.array([-np.inf, 0., np.inf]))\n array([False, False, True])\n >>> x = np.array([-np.inf, 0., np.inf])\n >>> y = np.array([True, True, True], dtype=np.bool)\n >>> np.isposinf(x, y)\n array([False, False, True])\n >>> y\n array([False, False, True])\n \"\"\"\n return _pure_unary_func_helper(x, _api_internal.isposinf, _np.isposinf, out=out, **kwargs)\n\n\n@set_module('mxnet.ndarray.numpy')\n@wrap_np_unary_func\ndef isneginf(x, out=None, **kwargs):\n \"\"\"\n Test element-wise for negative infinity, return result as bool array.\n\n Parameters\n ----------\n x : ndarray\n Input array.\n out : ndarray or None, optional\n A location into which the result is stored.\n If provided, it must have the same shape and dtype as input ndarray.\n If not provided or `None`, a freshly-allocated array is returned.\n\n Returns\n -------\n y : ndarray or bool\n True where x is negative infinity, false otherwise.\n This is a scalar if x is a scalar.\n\n Notes\n -----\n NumPy uses the IEEE Standard for Binary Floating-Point for Arithmetic (IEEE 754).\n This means that Not a Number is not equivalent to infinity.\n\n Examples\n --------\n >>> np.isneginf(-np.inf)\n True\n >>> np.isneginf(np.inf)\n False\n >>> np.isneginf(float('-inf'))\n True\n >>> np.isneginf(np.array([-np.inf, 0., np.inf]))\n array([ True, False, False])\n >>> x = np.array([-np.inf, 0., np.inf])\n >>> y = np.array([True, True, True], dtype=np.bool)\n >>> np.isneginf(x, y)\n array([ True, False, False])\n >>> y\n array([ True, False, False])\n \"\"\"\n return _pure_unary_func_helper(x, _api_internal.isneginf, _np.isneginf, out=out, **kwargs)\n\n\n@set_module('mxnet.ndarray.numpy')\n@wrap_np_unary_func\ndef isfinite(x, out=None, **kwargs):\n \"\"\"\n Test element-wise for finiteness (not infinity or not Not a Number).\n\n Parameters\n ----------\n x : ndarray\n Input array.\n out : ndarray or None, optional\n A location into which the result is stored.\n If provided, it must have the same shape and dtype as input ndarray.\n If not provided or `None`, a freshly-allocated array is returned.\n\n Returns\n -------\n y : ndarray or bool\n True where x is negative infinity, false otherwise.\n This is a scalar if x is a scalar.\n\n Notes\n -----\n Not a Number, positive infinity and negative infinity are considered to be non-finite.\n\n NumPy uses the IEEE Standard for Binary Floating-Point for Arithmetic (IEEE 754).\n This means that Not a Number is not equivalent to infinity.\n Also that positive infinity is not equivalent to negative infinity.\n But infinity is equivalent to positive infinity. Errors result if the second argument\n is also supplied when x is a scalar input, or if first and second arguments have different shapes.\n\n Examples\n --------\n >>> np.isfinite(1)\n True\n >>> np.isfinite(0)\n True\n >>> np.isfinite(np.nan)\n False\n >>> np.isfinite(np.inf)\n False\n >>> np.isfinite(-np.inf)\n False\n >>> np.isfinite(np.array([np.log(-1.),1.,np.log(0)]))\n array([False, True, False])\n >>> x = np.array([-np.inf, 0., np.inf])\n >>> y = np.array([True, True, True], dtype=np.bool)\n >>> np.isfinite(x, y)\n array([False, True, False])\n >>> y\n array([False, True, False])\n \"\"\"\n return _pure_unary_func_helper(x, _api_internal.isfinite, _np.isfinite, out=out, **kwargs)\n\n\n@set_module('mxnet.ndarray.numpy')\ndef atleast_1d(*arys):\n \"\"\"\n Convert inputs to arrays with at least one dimension.\n\n Scalar inputs are converted to 1-dimensional arrays, whilst higher-dimensional inputs are preserved.\n\n Parameters\n ----------\n arys1, arys2, ... : ndarray\n One or more input arrays.\n\n Returns\n -------\n ret : ndarray\n An array, or list of arrays, each with a.ndim >= 1. Copies are made only if necessary.\n\n See also\n --------\n atleast_2d, atleast_3d\n\n Examples\n --------\n >>> np.atleast_1d(1.0)\n array([1.])\n >>> x = np.arange(9.0).reshape(3,3)\n >>> np.atleast_1d(x)\n array([[0., 1., 2.],\n [3., 4., 5.],\n [6., 7., 8.]])\n >>> np.atleast_1d(np.array(1), np.array([3, 4]))\n [array([1.]), array([3., 4.])]\n \"\"\"\n if len(arys) == 1:\n return _api_internal.atleast_1d(*arys)[0]\n return list(_api_internal.atleast_1d(*arys))\n\n\n@set_module('mxnet.ndarray.numpy')\ndef atleast_2d(*arys):\n \"\"\"\n Convert inputs to arrays with at least two dimensions.\n\n Parameters\n ----------\n arys1, arys2, ... : ndarray\n One or more input arrays.\n\n Returns\n -------\n ret : ndarray\n An array, or list of arrays, each with a.ndim >= 2. Copies are made only if necessary.\n\n See also\n --------\n atleast_1d, atleast_3d\n\n Examples\n --------\n >>> np.atleast_2d(3.0)\n array([[3.]])\n >>> x = np.arange(3.0)\n >>> np.atleast_2d(x)\n array([[0., 1., 2.]])\n >>> np.atleast_2d(np.array(1), np.array([1, 2]), np.array([[1, 2]]))\n [array([[1.]]), array([[1., 2.]]), array([[1., 2.]])]\n \"\"\"\n if len(arys) == 1:\n return _api_internal.atleast_2d(*arys)[0]\n return list(_api_internal.atleast_2d(*arys))\n\n\n@set_module('mxnet.ndarray.numpy')\ndef atleast_3d(*arys):\n \"\"\"\n Convert inputs to arrays with at least three dimension.\n\n Parameters\n ----------\n arys1, arys2, ... : ndarray\n One or more input arrays.\n\n Returns\n -------\n ret : ndarray\n An array, or list of arrays, each with a.ndim >= 3.\n For example, a 1-D array of shape (N,) becomes a view of shape (1, N, 1),\n and a 2-D array of shape (M, N) becomes a view of shape (M, N, 1).\n\n See also\n --------\n atleast_1d, atleast_2d\n\n Examples\n --------\n >>> np.atleast_3d(3.0)\n array([[[3.]]])\n >>> x = np.arange(3.0)\n >>> np.atleast_3d(x).shape\n (1, 3, 1)\n >>> x = np.arange(12.0).reshape(4,3)\n >>> np.atleast_3d(x).shape\n (4, 3, 1)\n >>> for arr in np.atleast_3d(np.array([1, 2]), np.array([[1, 2]]), np.array([[[1, 2]]])):\n ... print(arr, arr.shape)\n ...\n [[[1.]\n [2.]]] (1, 2, 1)\n [[[1.]\n [2.]]] (1, 2, 1)\n [[[1. 2.]]] (1, 1, 2)\n \"\"\"\n if len(arys) == 1:\n return _api_internal.atleast_3d(*arys)[0]\n return list(_api_internal.atleast_3d(*arys))\n\n\n@set_module('mxnet.ndarray.numpy')\ndef where(condition, x=None, y=None): # pylint: disable=too-many-return-statements\n \"\"\"where(condition, [x, y])\n Return elements chosen from `x` or `y` depending on `condition`.\n\n .. note::\n When only `condition` is provided, this function is a shorthand for\n ``np.asarray(condition).nonzero()``. The rest of this documentation\n covers only the case where all three arguments are provided.\n\n Parameters\n ----------\n condition : ndarray\n Where True, yield `x`, otherwise yield `y`.\n x, y : ndarray\n Values from which to choose. `x`, `y` and `condition` need to be\n broadcastable to some shape. `x` and `y` must have the same dtype.\n\n Returns\n -------\n out : ndarray\n An array with elements from `x` where `condition` is True, and elements\n from `y` elsewhere.\n\n Notes\n -----\n If all the arrays are 1-D, `where` is equivalent to::\n\n [xv if c else yv\n for c, xv, yv in zip(condition, x, y)]\n\n This function differs from the original `numpy.where\n <https://docs.scipy.org/doc/numpy/reference/generated/numpy.where.html>`_ in\n the following way(s):\n\n - If `condition` is a scalar, this operator returns x or y directly without broadcasting.\n - If `condition` is ndarray, while both `x` and `y` are scalars,\n the output dtype will be `float32`.\n\n Examples\n --------\n >>> a = np.arange(10)\n >>> a\n array([0., 1., 2., 3., 4., 5., 6., 7., 8., 9.])\n >>> np.where(a < 5, a, 10*a)\n array([ 0., 1., 2., 3., 4., 50., 60., 70., 80., 90.])\n\n This can be used on multidimensional arrays too:\n\n >>> cond = np.array([[True, False], [True, True]])\n >>> x = np.array([[1, 2], [3, 4]])\n >>> y = np.array([[9, 8], [7, 6]])\n >>> np.where(cond, x, y)\n array([[1., 8.],\n [3., 4.]])\n\n The shapes of x, y, and the condition are broadcast together:\n\n >>> x, y = onp.ogrid[:3, :4]\n >>> x = np.array(x)\n >>> y = np.array(y)\n >>> np.where(x < y, x, 10 + y) # both x and 10+y are broadcast\n array([[10, 0, 0, 0],\n [10, 11, 1, 1],\n [10, 11, 12, 2]], dtype=int64)\n\n >>> a = np.array([[0, 1, 2],\n ... [0, 2, 4],\n ... [0, 3, 6]])\n >>> np.where(a < 4, a, -1) # -1 is broadcast\n array([[ 0., 1., 2.],\n [ 0., 2., -1.],\n [ 0., 3., -1.]])\n \"\"\"\n if x is None and y is None:\n return nonzero(condition)\n else:\n if isinstance(condition, numeric_types):\n if condition != 0:\n return x\n else:\n return y\n else:\n return _api_internal.where(condition, x, y)\n\n\n@set_module('mxnet.ndarray.numpy')\ndef polyval(p, x):\n \"\"\"\n Evaluate a polynomial at specific values.\n If p is of length N, this function returns the value:\n p[0]*x**(N-1) + p[1]*x**(N-2) + ... + p[N-2]*x + p[N-1]\n If x is a sequence, then p(x) is returned for each element of x.\n If x is another polynomial then the composite polynomial p(x(t)) is returned.\n\n Parameters\n ----------\n p : ndarray\n 1D array of polynomial coefficients (including coefficients equal to zero)\n from highest degree to the constant term.\n x : ndarray\n An array of numbers, at which to evaluate p.\n\n Returns\n -------\n values : ndarray\n Result array of polynomials\n\n Notes\n -----\n This function differs from the original `numpy.polyval\n <https://numpy.org/devdocs/reference/generated/numpy.polyval.html>`_ in\n the following way(s):\n - Does not support poly1d.\n - X should be ndarray type even if it contains only one element.\n\n Examples\n --------\n >>> p = np.array([3, 0, 1])\n array([3., 0., 1.])\n >>> x = np.array([5])\n array([5.])\n >>> np.polyval(p, x) # 3 * 5**2 + 0 * 5**1 + 1\n array([76.])\n >>> x = np.array([5, 4])\n array([5., 4.])\n >>> np.polyval(p, x)\n array([76., 49.])\n \"\"\"\n from ...numpy import ndarray\n if isinstance(p, numeric_types) and isinstance(x, numeric_types):\n return _np.polyval(p, x)\n elif isinstance(p, ndarray) and isinstance(x, ndarray):\n return _api_internal.polyval(p, x)\n else:\n raise TypeError('type not supported')\n\n\n@set_module('mxnet.ndarray.numpy')\ndef bincount(x, weights=None, minlength=0):\n \"\"\"\n Count number of occurrences of each value in array of non-negative ints.\n\n Parameters\n ----------\n x : ndarray\n input array, 1 dimension, nonnegative ints.\n weights: ndarray\n input weigths same shape as x. (Optional)\n minlength: int\n A minimum number of bins for the output. (Optional)\n\n Returns\n --------\n out : ndarray\n the result of binning the input array. The length of out is equal to amax(x)+1.\n\n Raises\n --------\n Value Error\n If the input is not 1-dimensional, or contains elements with negative values,\n or if minlength is negative\n TypeError\n If the type of the input is float or complex.\n\n Examples\n --------\n >>> np.bincount(np.arange(5))\n array([1, 1, 1, 1, 1])\n >>> np.bincount(np.array([0, 1, 1, 3, 2, 1, 7]))\n array([1, 3, 1, 1, 0, 0, 0, 1])\n\n >>> x = np.array([0, 1, 1, 3, 2, 1, 7, 23])\n >>> np.bincount(x).size == np.amax(x)+1\n True\n\n >>> np.bincount(np.arange(5, dtype=float))\n Traceback (most recent call last):\n File \"<stdin>\", line 1, in <module>\n TypeError: array cannot be safely cast to required type\n\n >>> w = np.array([0.3, 0.5, 0.2, 0.7, 1., -0.6]) # weights\n >>> x = np.array([0, 1, 1, 2, 2, 2])\n >>> np.bincount(x, weights=w)\n array([ 0.3, 0.7, 1.1])\n \"\"\"\n if minlength < 0:\n raise ValueError(\"Minlength value should greater than 0\")\n return _api_internal.bincount(x, weights, minlength)\n\n\n@set_module('mxnet.ndarray.numpy')\ndef pad(x, pad_width, mode='constant', **kwargs): # pylint: disable=too-many-arguments\n \"\"\"\n Pad an array.\n\n Parameters\n ----------\n array : array_like of rank N\n The array to pad.\n pad_width : {sequence, array_like, int}\n Number of values padded to the edges of each axis.\n ((before_1, after_1), ... (before_N, after_N)) unique pad widths\n for each axis.\n ((before, after),) yields same before and after pad for each axis.\n (pad,) or int is a shortcut for before = after = pad width for all\n axes.\n mode : str or function, optional\n One of the following string values or a user supplied function.\n 'constant' (default)\n Pads with a constant value.\n 'edge'\n Pads with the edge values of array.\n 'linear_ramp'\n not supported yet\n 'maximum'\n Pads with the maximum value of all of the\n vector along each axis.\n 'mean'\n not supported yet\n 'median'\n not supported yet\n 'minimum'\n Pads with the minimum value of all of the\n vector along each axis.\n 'reflect'\n Pads with the reflection of the vector mirrored on\n the first and last values of the vector along each\n axis.\n 'symmetric'\n Pads with the reflection of the vector mirrored\n along the edge of the array.\n 'wrap'\n not supported yet.\n 'empty'\n not supported yet.\n <function>\n not supported yet.\n stat_length : not supported yet\n constant_values : scalar, optional\n Used in 'constant'. The values to set the padded values for each\n axis.\n Default is 0.\n\n end_values : not supported yet\n reflect_type : {'even', 'odd'}, optional\n only support even now\n\n Returns\n -------\n pad : ndarray\n Padded array of rank equal to `array` with shape increased\n according to `pad_width`.\n \"\"\"\n # pylint: disable = too-many-return-statements, inconsistent-return-statements\n if not _np.asarray(pad_width).dtype.kind == 'i':\n raise TypeError('`pad_width` must be of integral type.')\n if not isinstance(pad_width, tuple):\n raise TypeError(\"`pad_width` must be tuple.\")\n if mode == \"linear_ramp\":\n raise ValueError(\"mode {'linear_ramp'} is not supported.\")\n if mode == \"wrap\":\n raise ValueError(\"mode {'wrap'} is not supported.\")\n if mode == \"median\":\n raise ValueError(\"mode {'median'} is not supported.\")\n if mode == \"mean\":\n raise ValueError(\"mode {'mean'} is not supported.\")\n if mode == \"empty\":\n raise ValueError(\"mode {'empty'} is not supported.\")\n if callable(mode):\n raise ValueError(\"mode {'<function>'} is not supported.\")\n\n allowedkwargs = {\n 'constant': ['constant_values'],\n 'edge': [],\n 'linear_ramp': ['end_values'],\n 'maximum': ['stat_length'],\n 'mean': ['stat_length'],\n 'median': ['stat_length'],\n 'minimum': ['stat_length'],\n 'reflect': ['reflect_type'],\n 'symmetric': ['reflect_type'],\n 'wrap': [],\n }\n\n if isinstance(mode, _np.compat.basestring):\n # Make sure have allowed kwargs appropriate for mode\n for key in kwargs:\n if key not in allowedkwargs[mode]:\n raise ValueError('%s keyword not in allowed keywords %s' %(key, allowedkwargs[mode]))\n\n unsupported_kwargs = set(kwargs) - set(allowedkwargs[mode])\n if unsupported_kwargs:\n raise ValueError(\"unsupported keyword arguments for mode '{}': {}\"\n .format(mode, unsupported_kwargs))\n if mode == \"constant\":\n values = kwargs.get(\"constant_values\", 0)\n if isinstance(values, tuple):\n raise TypeError(\"unsupported constant_values type: {'tuple'}.\")\n return _api_internal.pad(x, pad_width, 'constant', values, \"even\")\n elif mode == \"symmetric\":\n values = kwargs.get(\"reflect_type\", \"even\")\n if values != \"even\" and values is not None:\n raise ValueError(\"unsupported reflect_type '{}'\".format(values))\n return _api_internal.pad(x, pad_width, 'symmetric', 0, \"even\")\n elif mode == \"edge\":\n return _api_internal.pad(x, pad_width, 'edge', 0, \"even\")\n elif mode == \"reflect\":\n values = kwargs.get(\"reflect_type\", \"even\")\n if values != \"even\" and values is not None:\n raise ValueError(\"unsupported reflect_type '{}'\".format(values))\n return _api_internal.pad(x, pad_width, 'reflect', 0, \"even\")\n elif mode == \"maximum\":\n values = kwargs.get(\"stat_length\", None)\n if values is not None:\n raise ValueError(\"unsupported stat_length '{}'\".format(values))\n return _api_internal.pad(x, pad_width, 'maximum', 0, \"even\")\n elif mode == \"minimum\":\n values = kwargs.get(\"stat_length\", None)\n if values is not None:\n raise ValueError(\"unsupported stat_length '{}'\".format(values))\n return _api_internal.pad(x, pad_width, 'minimum', 0, \"even\")\n return _api_internal.pad(x, pad_width, 'constant', 0, \"even\")\n\n\n@set_module('mxnet.ndarray.numpy')\ndef prod(a, axis=None, dtype=None, out=None, keepdims=False, initial=None): # pylint: disable=too-many-arguments\n \"\"\"\n Return the product of array elements over a given axis.\n\n Parameters\n ----------\n a : array_like\n Input data.\n axis : None or int or tuple of ints, optional\n Axis or axes along which a product is performed. The default,\n axis=None, will calculate the product of all the elements in the\n input array. If axis is negative it counts from the last to the\n first axis.\n .. versionadded:: 1.7.0\n If axis is a tuple of ints, a product is performed on all of the\n axes specified in the tuple instead of a single axis or all the\n axes as before.\n dtype : dtype, optional\n The type of the returned array, as well as of the accumulator in\n which the elements are multiplied. The dtype of `a` is used by\n default unless `a` has an integer dtype of less precision than the\n default platform integer. In that case, if `a` is signed then the\n platform integer is used while if `a` is unsigned then an unsigned\n integer of the same precision as the platform integer is used.\n out : ndarray, optional\n Alternative output array in which to place the result. It must have\n the same shape as the expected output, but the type of the output\n values will be cast if necessary.\n keepdims : bool, optional\n If this is set to True, the axes which are reduced are left in the\n result as dimensions with size one. With this option, the result\n will broadcast correctly against the input array.\n If the default value is passed, then `keepdims` will not be\n passed through to the `prod` method of sub-classes of\n `ndarray`, however any non-default value will be. If the\n sub-class' method does not implement `keepdims` any\n exceptions will be raised.\n initial : scalar, optional\n The starting value for this product. See `~numpy.ufunc.reduce` for details.\n where : not supported\n\n Returns\n -------\n product_along_axis : ndarray, see `dtype` parameter above.\n An array shaped as `a` but with the specified axis removed.\n Returns a reference to `out` if specified.\n\n Examples\n --------\n By default, calculate the product of all elements:\n >>> np.prod([1.,2.])\n 2.0\n Even when the input array is two-dimensional:\n >>> np.prod([[1.,2.],[3.,4.]])\n 24.0\n But we can also specify the axis over which to multiply:\n >>> np.prod([[1.,2.],[3.,4.]], axis=1)\n array([ 2., 12.])\n Or select specific elements to include:\n >>> np.prod([1., np.nan, 3.], where=[True, False, True])\n 3.0\n If the type of `x` is unsigned, then the output type is\n the unsigned platform integer:\n >>> x = np.array([1, 2, 3], dtype=np.uint8)\n >>> np.prod(x).dtype == np.uint\n True\n If `x` is of a signed integer type, then the output type\n is the default platform integer:\n >>> x = np.array([1, 2, 3], dtype=np.int8)\n >>> np.prod(x).dtype == int\n True\n You can also start the product with a value other than one:\n >>> np.prod([1, 2], initial=5)\n 10\n \"\"\"\n return _api_internal.prod(a, axis, dtype, keepdims, initial, out)\n\n\n@set_module('mxnet.ndarray.numpy')\ndef cumsum(a, axis=None, dtype=None, out=None):\n \"\"\"\n Return the cumulative sum of the elements along a given axis.\n\n Parameters\n ----------\n a : array_like\n Input array.\n axis : int, optional\n Axis along which the cumulative sum is computed. The default\n (None) is to compute the cumsum over the flattened array.\n dtype : dtype, optional\n Type of the returned array and of the accumulator in which the\n elements are summed. If `dtype` is not specified, it defaults\n to the dtype of `a`, unless `a` has an integer dtype with a\n precision less than that of the default platform integer. In\n that case, the default platform integer is used.\n out : ndarray, optional\n Alternative output array in which to place the result. It must\n have the same shape and buffer length as the expected output\n but the type will be cast if necessary. See `doc.ufuncs`\n (Section \"Output arguments\") for more details.\n\n Returns\n -------\n cumsum_along_axis : ndarray.\n A new array holding the result is returned unless `out` is\n specified, in which case a reference to `out` is returned. The\n result has the same size as `a`, and the same shape as `a` if\n `axis` is not None or `a` is a 1-d array.\n\n Examples\n --------\n >>> a = np.array([[1,2,3], [4,5,6]])\n >>> a\n array([[1, 2, 3],\n [4, 5, 6]])\n >>> np.cumsum(a)\n array([ 1, 3, 6, 10, 15, 21])\n >>> np.cumsum(a, dtype=float) # specifies type of output value(s)\n array([ 1., 3., 6., 10., 15., 21.])\n >>> np.cumsum(a,axis=0) # sum over rows for each of the 3 columns\n array([[1, 2, 3],\n [5, 7, 9]])\n >>> np.cumsum(a,axis=1) # sum over columns for each of the 2 rows\n array([[ 1, 3, 6],\n [ 4, 9, 15]])\n \"\"\"\n return _api_internal.cumsum(a, axis, dtype, out)\n\n@set_module('mxnet.ndarray.numpy')\ndef reshape(a, newshape, order='C'):\n \"\"\"\n Gives a new shape to an array without changing its data.\n This function always returns a copy of the input array if\n ``out`` is not provided.\n\n Parameters\n ----------\n a : ndarray\n Array to be reshaped.\n\n newshape : int or tuple of ints\n The new shape should be compatible with the original shape. If\n an integer, then the result will be a 1-D array of that length.\n One shape dimension can be -1. In this case, the value is\n inferred from the length of the array and remaining dimensions.\n\n order : {'C'}, optional\n Read the elements of `a` using this index order, and place the\n elements into the reshaped array using this index order. 'C'\n means to read / write the elements using C-like index order,\n with the last axis index changing fastest, back to the first\n axis index changing slowest. Other order types such as 'F'/'A'\n may be added in the future.\n\n Returns\n -------\n reshaped_array : ndarray\n It will be always a copy of the original array. This behavior is different\n from the official NumPy ``reshape`` operator where views of the original array may be\n generated.\n\n See Also\n --------\n ndarray.reshape : Equivalent method.\n\n Examples\n --------\n >>> a = np.arange(6).reshape((3, 2))\n >>> a\n array([[0., 1.],\n [2., 3.],\n [4., 5.]])\n\n >>> np.reshape(a, (2, 3)) # C-like index ordering\n array([[0., 1., 2.],\n [3., 4., 5.]])\n\n >>> np.reshape(np.ravel(a), (2, 3)) # equivalent to C ravel then C reshape\n array([[0., 1., 2.],\n [3., 4., 5.]])\n\n >>> a = np.array([[1,2,3], [4,5,6]])\n >>> np.reshape(a, 6)\n array([1., 2., 3., 4., 5., 6.])\n\n >>> np.reshape(a, (3,-1)) # the unspecified value is inferred to be 2\n array([[1., 2.],\n [3., 4.],\n [5., 6.]])\n \"\"\"\n return _api_internal.reshape(a, newshape, False, order)\n\n@set_module('mxnet.ndarray.numpy')\ndef moveaxis(a, source, destination):\n \"\"\"Move axes of an array to new positions.\n Other axes remain in their original order.\n\n Parameters\n ----------\n a : ndarray\n The array whose axes should be reordered.\n source : int or sequence of int\n Original positions of the axes to move. These must be unique.\n destination : int or sequence of int\n Destination positions for each of the original axes. These must also be\n unique.\n\n Returns\n -------\n result : ndarray\n Array with moved axes. This array is a view of the input array.\n\n See Also\n --------\n transpose: Permute the dimensions of an array.\n swapaxes: Interchange two axes of an array.\n\n Examples\n --------\n >>> x = np.zeros((3, 4, 5))\n >>> np.moveaxis(x, 0, -1).shape\n (4, 5, 3)\n >>> np.moveaxis(x, -1, 0).shape\n (5, 3, 4)\n These all achieve the same result:\n >>> np.transpose(x).shape\n (5, 4, 3)\n >>> np.swapaxes(x, 0, -1).shape\n (5, 4, 3)\n >>> np.moveaxis(x, [0, 1], [-1, -2]).shape\n (5, 4, 3)\n >>> np.moveaxis(x, [0, 1, 2], [-1, -2, -3]).shape\n (5, 4, 3)\n \"\"\"\n return _api_internal.moveaxis(a, source, destination)\n\n# pylint: disable=redefined-outer-name\n@set_module('mxnet.ndarray.numpy')\ndef copy(a):\n \"\"\"\n Return an array copy of the given object.\n\n Parameters\n ----------\n a :\n Input array.\n\n Returns\n -------\n arr : ndarray\n Array interpretation of a.\n\n -----\n Examples\n --------\n >>> x = np.array([1, 2, 3])\n >>> y = x\n >>> z = np.copy(x)\n >>> x[0] = 10\n >>> x[0] == y[0]\n True\n >>> x[0] == z[0]\n False\n \"\"\"\n return _api_internal.copy(a)\n\n@set_module('mxnet.ndarray.numpy')\ndef rollaxis(a, axis, start=0):\n \"\"\"\n Roll the specified axis backwards, until it lies in a given position.\n a\n Input array.\n axis : integer\n The axis to roll backwards. The positions of the other axes do not\n change relative to one another.\n start: int, optional\n The axis is rolled until it lies before this position.\n The default, 0, results in a “complete” roll.\n\n Returns\n -------\n res : ndarray\n A view after applying rollaxis to `a` is returned.\n\n -----\n Examples\n --------\n >>> a = np.ones((3,4,5,6))\n >>> np.rollaxis(a, 3, 1).shape\n (3, 6, 4, 5)\n >>> np.rollaxis(a, 2).shape\n (5, 3, 4, 6)\n >>> np.rollaxis(a, 1, 4).shape\n (3, 5, 6, 4)\n \"\"\"\n return _api_internal.rollaxis(a, axis, start)\n\n@set_module('mxnet.ndarray.numpy')\ndef diag(v, k=0):\n \"\"\"\n Extracts a diagonal or constructs a diagonal array.\n - 1-D arrays: constructs a 2-D array with the input as its diagonal, all other elements are zero.\n - 2-D arrays: extracts the k-th Diagonal\n\n Parameters\n ----------\n array : ndarray\n The array to apply diag method.\n k : offset\n extracts or constructs kth diagonal given input array\n\n Returns\n ----------\n out : ndarray\n The extracted diagonal or constructed diagonal array.\n\n Examples\n --------\n >>> x = np.arange(9).reshape((3,3))\n >>> x\n array([[0, 1, 2],\n [3, 4, 5],\n [6, 7, 8]])\n >>> np.diag(x)\n array([0, 4, 8])\n >>> np.diag(x, k=1)\n array([1, 5])\n >>> np.diag(x, k=-1)\n array([3, 7])\n\n >>> np.diag(np.diag(x))\n array([[0, 0, 0],\n [0, 4, 0],\n [0, 0, 8]])\n \"\"\"\n return _api_internal.diag(v, k)\n\n\n@set_module('mxnet.ndarray.numpy')\ndef diagflat(v, k=0):\n \"\"\"\n Create a two-dimensional array with the flattened input as a diagonal.\n\n Parameters\n ----------\n v : array_like\n Input data, which is flattened and set as the `k`-th\n diagonal of the output.\n k : int, optional\n Diagonal to set; 0, the default, corresponds to the \"main\" diagonal,\n a positive (negative) `k` giving the number of the diagonal above\n (below) the main.\n\n Returns\n -------\n out : ndarray\n The 2-D output array.\n\n See Also\n --------\n diag : MATLAB work-alike for 1-D and 2-D arrays.\n diagonal : Return specified diagonals.\n trace : Sum along diagonals.\n\n Examples\n --------\n >>> np.diagflat([[1,2], [3,4]])\n array([[1, 0, 0, 0],\n [0, 2, 0, 0],\n [0, 0, 3, 0],\n [0, 0, 0, 4]])\n >>> np.diagflat([1,2], 1)\n array([[0, 1, 0],\n [0, 0, 2],\n [0, 0, 0]])\n \"\"\"\n return _api_internal.diagflat(v, k)\n\n\n@set_module('mxnet.ndarray.numpy')\ndef diagonal(a, offset=0, axis1=0, axis2=1):\n \"\"\"\n If a is 2-D, returns the diagonal of a with the given offset, i.e., the collection of elements of\n the form a[i, i+offset]. If a has more than two dimensions, then the axes specified by axis1 and\n axis2 are used to determine the 2-D sub-array whose diagonal is returned. The shape of the\n resulting array can be determined by removing axis1 and axis2 and appending an index to the\n right equal to the size of the resulting diagonals.\n\n Parameters\n ----------\n a : ndarray\n Input data from which diagonal are taken.\n offset: int, Optional\n Offset of the diagonal from the main diagonal\n axis1: int, Optional\n Axis to be used as the first axis of the 2-D sub-arrays\n axis2: int, Optional\n Axis to be used as the second axis of the 2-D sub-arrays\n\n Returns\n -------\n out : ndarray\n Output result\n\n Raises\n -------\n ValueError: If the dimension of a is less than 2.\n\n Examples\n --------\n >>> a = np.arange(4).reshape(2,2)\n >>> a\n array([[0, 1],\n [2, 3]])\n >>> np.diagonal(a)\n array([0, 3])\n >>> np.diagonal(a, 1)\n array([1])\n\n >>> a = np.arange(8).reshape(2,2,2)\n >>>a\n array([[[0, 1],\n [2, 3]],\n [[4, 5],\n [6, 7]]])\n >>> np.diagonal(a, 0, 0, 1)\n array([[0, 6],\n [1, 7]])\n \"\"\"\n return _api_internal.diagonal(a, offset, axis1, axis2)\n\n\n# pylint:disable=redefined-outer-name, too-many-arguments\n@set_module('mxnet.ndarray.numpy')\ndef sum(a, axis=None, dtype=None, out=None, keepdims=None, initial=None, where=None):\n r\"\"\"\n Sum of array elements over a given axis.\n\n Parameters\n ----------\n a : ndarray\n Input data.\n axis : None or int, optional\n Axis or axes along which a sum is performed. The default,\n axis=None, will sum all of the elements of the input array. If\n axis is negative it counts from the last to the first axis.\n dtype : dtype, optional\n The type of the returned array and of the accumulator in which the\n elements are summed. The default type is float32.\n keepdims : bool, optional\n If this is set to True, the axes which are reduced are left\n in the result as dimensions with size one. With this option,\n the result will broadcast correctly against the input array.\n\n If the default value is passed, then `keepdims` will not be\n passed through to the `sum` method of sub-classes of\n `ndarray`, however any non-default value will be. If the\n sub-classes `sum` method does not implement `keepdims` any\n exceptions will be raised.\n initial: Currently only supports None as input, optional\n Starting value for the sum.\n Currently not implemented. Please use ``None`` as input or skip this argument.\n out : ndarray or None, optional\n Alternative output array in which to place the result. It must have\n the same shape and dtype as the expected output.\n\n Returns\n -------\n sum_along_axis : ndarray\n An ndarray with the same shape as `a`, with the specified\n axis removed. If an output array is specified, a reference to\n `out` is returned.\n\n Notes\n -----\n - Input type does not support Python native iterables.\n - \"out\" param: cannot perform auto type change. out ndarray's dtype must be the same as the expected output.\n - \"initial\" param is not supported yet. Please use None as input.\n - Arithmetic is modular when using integer types, and no error is raised on overflow.\n - The sum of an empty array is the neutral element 0:\n\n >>> a = np.empty(1)\n >>> np.sum(a)\n array(0.)\n\n This function differs from the original `numpy.sum\n <https://docs.scipy.org/doc/numpy/reference/generated/numpy.sum.html>`_ in\n the following aspects:\n\n - Input type does not support Python native iterables(list, tuple, ...).\n - \"out\" param: cannot perform auto type cast. out ndarray's dtype must be the same as the expected output.\n - \"initial\" param is not supported yet. Please use ``None`` as input or skip it.\n - The default type is float32.\n\n Examples\n --------\n >>> a = np.array([0.5, 1.5])\n >>> np.sum(a)\n array(2.)\n >>> a = np.array([0.5, 0.7, 0.2, 1.5])\n >>> np.sum(a, dtype=np.int32)\n array(2, dtype=int32)\n >>> a = np.array([[0, 1], [0, 5]])\n >>> np.sum(a)\n array(6.)\n >>> np.sum(a, axis=0)\n array([0., 6.])\n >>> np.sum(a, axis=1)\n array([1., 5.])\n\n With output ndarray:\n\n >>> a = np.array([[0, 1], [0, 5]])\n >>> b = np.ones((2,), dtype=np.float32)\n >>> np.sum(a, axis=0, out=b)\n array([0., 6.])\n >>> b\n array([0., 6.])\n\n If the accumulator is too small, overflow occurs:\n\n >>> np.ones(128, dtype=np.int8).sum(dtype=np.int8)\n array(-128, dtype=int8)\n \"\"\"\n if where is not None and where is not True:\n raise ValueError(\"only where=None or where=True cases are supported for now\")\n return _api_internal.sum(a, axis, dtype, keepdims, initial, out)\n# pylint:enable=redefined-outer-name, too-many-arguments\n\n\n@set_module('mxnet.ndarray.numpy')\ndef bitwise_left_shift(x1, x2, out=None):\n r\"\"\"\n Shift the bits of and integer to the left. Bits are shifted to the left by\n appending x2 0s at the right of x1. Since the internal representation of numbers\n is in binary format, this operation is equivalent to ``x1 * 2**x2``\n\n Parameters\n ----------\n x1 : ndarray or scalar\n Input values.\n x2 : ndarray or scalar\n Number of zeros to append to x1. Has to be non-negative. If x1.shape != x2.shape,\n they must be broadcastable to a common shape (which becomes the shape of the output).\n out : ndarray, optional\n A location into which the result is stored. If provided, it must have a shape that the\n inputs broadcast to. If not provided or None, a freshly-allocated array is returned.\n\n Returns\n -------\n out : ndarray\n Result.\n\n Examples\n --------\n >>> np.binary_repr(5)\n '101'\n >>> np.left_shift(5, 2)\n 20\n >>> np.binary_repr(20)\n '10100'\n >>> np.left_shift(5, np.array([1,2,3]))\n array([10, 20, 40])\n \"\"\"\n if isinstance(x1, numeric_types) and isinstance(x2, numeric_types):\n return _np.left_shift(x1, x2, out=out)\n return _api_internal.bitwise_left_shift(x1, x2, out)\n\n\n@set_module('mxnet.ndarray.numpy')\ndef bitwise_right_shift(x1, x2, out=None):\n r\"\"\"\n Shift the bits of and integer to the right. Bits are shifted to the right by\n x2. Because the internal representation of numbers is in binary format,\n this operation is equivalent to ``x1 / 2**x2``\n\n Parameters\n ----------\n x1 : ndarray or scalar\n Input values.\n x1 : ndarray or scalar\n Number of bits to remove at the right of x1. If x1.shape != x2.shape,\n they must be broadcastable to a common shape (which becomes the shape of the output).\n out : ndarray, optional\n A location into which the result is stored. If provided, it must have a shape that the\n inputs broadcast to. If not provided or None, a freshly-allocated array is returned.\n\n Returns\n -------\n out : ndarray\n Result.\n\n Examples\n --------\n >>> np.binary_repr(10)\n '1010'\n >>> np.right_shift(10, 1)\n 5\n >>> np.binary_repr(5)\n '101'\n >>> np.right_shift(10, np.array([1,2,3]))\n array([5, 2, 1])\n \"\"\"\n if isinstance(x1, numeric_types) and isinstance(x2, numeric_types):\n return _np.right_shift(x1, x2, out=out)\n return _api_internal.bitwise_right_shift(x1, x2, out)\n"
] |
[
[
"numpy.true_divide",
"numpy.logical_xor",
"numpy.minimum",
"numpy.asarray",
"numpy.around",
"numpy.nan_to_num",
"numpy.dtype",
"numpy.bitwise_xor",
"numpy.arctan2",
"numpy.polyval",
"numpy.divide",
"numpy.hypot",
"numpy.right_shift",
"numpy.fmod",
"numpy.greater",
"numpy.reshape",
"numpy.less",
"numpy.subtract",
"numpy.greater_equal",
"numpy.left_shift",
"numpy.copysign",
"numpy.less_equal",
"numpy.unravel_index",
"numpy.bitwise_or",
"numpy.multiply",
"numpy.power",
"numpy.mod",
"numpy.floor_divide",
"numpy.logical_or",
"numpy.fmax",
"numpy.equal",
"numpy.not_equal",
"numpy.logical_and",
"numpy.flip",
"numpy.fmin",
"numpy.logaddexp",
"numpy.maximum",
"numpy.tile",
"numpy.bitwise_and",
"numpy.ldexp",
"numpy.gcd",
"numpy.isscalar",
"numpy.lcm",
"numpy.add"
]
] |
yl-1993/mmhuman3d
|
[
"61a7427b7882d5e5f5fe623272a5c455c3d3b009"
] |
[
"mmhuman3d/models/losses/prior_loss.py"
] |
[
"import itertools\nimport os\nimport pickle\nimport sys\n\nimport numpy as np\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nfrom mmhuman3d.core.conventions.joints_mapping.standard_joint_angles import (\n STANDARD_JOINT_ANGLE_LIMITS,\n TRANSFORMATION_AA_TO_SJA,\n TRANSFORMATION_SJA_TO_AA,\n)\nfrom mmhuman3d.utils.keypoint_utils import search_limbs\nfrom mmhuman3d.utils.transforms import aa_to_rot6d, aa_to_sja\n\n\nclass ShapePriorLoss(nn.Module):\n \"\"\"Prior loss for body shape parameters.\n\n Args:\n reduction (str, optional): The method that reduces the loss to a\n scalar. Options are \"none\", \"mean\" and \"sum\".\n loss_weight (float, optional): The weight of the loss. Defaults to 1.0\n \"\"\"\n\n def __init__(self, reduction='mean', loss_weight=1.0):\n super().__init__()\n assert reduction in (None, 'none', 'mean', 'sum')\n self.reduction = reduction\n self.loss_weight = loss_weight\n\n def forward(self,\n betas,\n loss_weight_override=None,\n reduction_override=None):\n \"\"\"Forward function of loss.\n\n Args:\n betas (torch.Tensor): The body shape parameters\n loss_weight_override (float, optional): The weight of loss used to\n override the original weight of loss\n reduction_override (str, optional): The reduction method used to\n override the original reduction method of the loss.\n Defaults to None\n Returns:\n torch.Tensor: The calculated loss\n \"\"\"\n assert reduction_override in (None, 'none', 'mean', 'sum')\n reduction = (\n reduction_override if reduction_override else self.reduction)\n loss_weight = (\n loss_weight_override\n if loss_weight_override is not None else self.loss_weight)\n\n shape_prior_loss = loss_weight * betas**2\n\n if reduction == 'mean':\n shape_prior_loss = shape_prior_loss.mean()\n elif reduction == 'sum':\n shape_prior_loss = shape_prior_loss.sum()\n\n return shape_prior_loss\n\n\nclass PoseRegLoss(nn.Module):\n \"\"\"Regulizer loss for body pose parameters.\n\n Args:\n reduction (str, optional): The method that reduces the loss to a\n scalar. Options are \"none\", \"mean\" and \"sum\".\n loss_weight (float, optional): The weight of the loss. Defaults to 1.0\n \"\"\"\n\n def __init__(self, reduction='mean', loss_weight=1.0):\n super().__init__()\n assert reduction in (None, 'none', 'mean', 'sum')\n self.reduction = reduction\n self.loss_weight = loss_weight\n\n def forward(self,\n body_pose,\n weight=None,\n avg_factor=None,\n loss_weight_override=None,\n reduction_override=None):\n reduction = (\n reduction_override if reduction_override else self.reduction)\n loss_weight = (\n loss_weight_override\n if loss_weight_override is not None else self.loss_weight)\n\n pose_prior_loss = loss_weight * (body_pose**2)\n\n if reduction == 'mean':\n pose_prior_loss = pose_prior_loss.mean()\n elif reduction == 'sum':\n pose_prior_loss = pose_prior_loss.sum()\n\n return pose_prior_loss\n\n\nclass LimbLengthLoss(nn.Module):\n \"\"\"Limb length loss for body shape parameters. As betas are associated with\n the height of a person, fitting on limb length help determine body shape\n parameters. It penalizes the L2 distance between target limb length and\n pred limb length. Note that it should take keypoints3d as input, as limb\n length computed from keypoints2d varies with camera.\n\n Args:\n convention (str): Limb convention to search for keypoint connections.\n reduction (str, optional): The method that reduces the loss to a\n scalar. Options are \"none\", \"mean\" and \"sum\".\n loss_weight (float, optional): The weight of the loss. Defaults to 1.0\n eps (float, optional): epsilon for computing normalized limb vector.\n Defaults to 1e-4.\n \"\"\"\n\n def __init__(self,\n convention,\n reduction='mean',\n loss_weight=1.0,\n eps=1e-4):\n super().__init__()\n assert reduction in (None, 'none', 'mean', 'sum')\n self.reduction = reduction\n self.loss_weight = loss_weight\n self.eps = eps\n limb_idxs, _ = search_limbs(data_source=convention)\n limb_idxs = sorted(limb_idxs['body'])\n self.limb_idxs = np.array(\n list(x for x, _ in itertools.groupby(limb_idxs)))\n\n def _compute_limb_length(self, keypoints3d):\n kp_src = keypoints3d[:, self.limb_idxs[:, 0], :3]\n kp_dst = keypoints3d[:, self.limb_idxs[:, 1], :3]\n limb_vec = kp_dst - kp_src\n limb_length = torch.norm(limb_vec, dim=2)\n return limb_length\n\n def _keypoint_conf_to_limb_conf(self, keypoint_conf):\n limb_conf = torch.min(keypoint_conf[:, self.limb_idxs[:, 1]],\n keypoint_conf[:, self.limb_idxs[:, 0]])\n return limb_conf\n\n def forward(self,\n pred,\n target,\n pred_conf=None,\n target_conf=None,\n loss_weight_override=None,\n reduction_override=None):\n \"\"\"Forward function of LimbLengthLoss.\n\n Args:\n pred (torch.Tensor): The predicted smpl keypoints3d.\n Shape should be (N, K, 3).\n B: batch size. K: number of keypoints.\n target (torch.Tensor): The ground-truth keypoints3d.\n Shape should be (N, K, 3).\n pred_conf (torch.Tensor, optional): Confidence of\n predicted keypoints. Shape should be (N, K).\n target_conf (torch.Tensor, optional): Confidence of\n target keypoints. Shape should be (N, K).\n loss_weight_override (float, optional): The weight of loss used to\n override the original weight of loss. Defaults to None.\n reduction_override (str, optional): The reduction method used to\n override the original reduction method of the loss.\n Defaults to None\n Returns:\n torch.Tensor: The calculated loss\n \"\"\"\n assert pred.dim() == 3 and pred.shape[-1] == 3\n assert pred.shape == target.shape\n if pred_conf is not None:\n assert pred_conf.dim() == 2\n assert pred_conf.shape == pred.shape[:2]\n if target_conf is not None:\n assert target_conf.dim() == 2\n assert target_conf.shape == target.shape[:2]\n assert reduction_override in (None, 'none', 'mean', 'sum')\n reduction = (\n reduction_override if reduction_override else self.reduction)\n loss_weight = (\n loss_weight_override\n if loss_weight_override is not None else self.loss_weight)\n\n limb_len_target = self._compute_limb_length(target)\n limb_len_pred = self._compute_limb_length(pred)\n\n if target_conf is None:\n target_conf = torch.ones_like(target[..., 0])\n if pred_conf is None:\n pred_conf = torch.ones_like(pred[..., 0])\n limb_conf_target = self._keypoint_conf_to_limb_conf(target_conf)\n limb_conf_pred = self._keypoint_conf_to_limb_conf(pred_conf)\n limb_conf = limb_conf_target * limb_conf_pred\n\n diff_len = limb_len_target - limb_len_pred\n loss = diff_len**2 * limb_conf\n\n if reduction == 'mean':\n loss = loss.mean()\n elif reduction == 'sum':\n loss = loss.sum()\n\n loss *= loss_weight\n\n return loss\n\n\nclass JointPriorLoss(nn.Module):\n \"\"\"Prior loss for joint angles.\n\n Args:\n reduction (str, optional): The method that reduces the loss to a\n scalar. Options are \"none\", \"mean\" and \"sum\".\n loss_weight (float, optional): The weight of the loss. Defaults to 1.0\n use_full_body (bool, optional): Use full set of joint constraints\n (in standard joint angles).\n smooth_spine (bool, optional): Ensuring smooth spine rotations\n smooth_spine_loss_weight (float, optional): An additional weight\n factor multiplied on smooth spine loss\n \"\"\"\n\n def __init__(self,\n reduction='mean',\n loss_weight=1.0,\n use_full_body=False,\n smooth_spine=False,\n smooth_spine_loss_weight=1.0):\n super().__init__()\n assert reduction in (None, 'none', 'mean', 'sum')\n self.reduction = reduction\n self.loss_weight = loss_weight\n self.use_full_body = use_full_body\n self.smooth_spine = smooth_spine\n self.smooth_spine_loss_weight = smooth_spine_loss_weight\n\n if self.use_full_body:\n self.register_buffer('R_t', TRANSFORMATION_AA_TO_SJA)\n self.register_buffer('R_t_inv', TRANSFORMATION_SJA_TO_AA)\n self.register_buffer('sja_limits', STANDARD_JOINT_ANGLE_LIMITS)\n\n def forward(self,\n body_pose,\n loss_weight_override=None,\n reduction_override=None):\n \"\"\"Forward function of loss.\n\n Args:\n body_pose (torch.Tensor): The body pose parameters\n loss_weight_override (float, optional): The weight of loss used to\n override the original weight of loss\n reduction_override (str, optional): The reduction method used to\n override the original reduction method of the loss.\n Defaults to None\n Returns:\n torch.Tensor: The calculated loss\n \"\"\"\n assert reduction_override in (None, 'none', 'mean', 'sum')\n reduction = (\n reduction_override if reduction_override else self.reduction)\n loss_weight = (\n loss_weight_override\n if loss_weight_override is not None else self.loss_weight)\n\n if self.use_full_body:\n batch_size = body_pose.shape[0]\n body_pose_reshape = body_pose.reshape(batch_size, -1, 3)\n assert body_pose_reshape.shape[1] in (21, 23) # smpl-x, smpl\n body_pose_reshape = body_pose_reshape[:, :21, :]\n\n body_pose_sja = aa_to_sja(body_pose_reshape, self.R_t,\n self.R_t_inv)\n\n lower_limits = self.sja_limits[:, :, 0] # shape: (21, 3)\n upper_limits = self.sja_limits[:, :, 1] # shape: (21, 3)\n\n lower_loss = (torch.exp(F.relu(lower_limits - body_pose_sja)) -\n 1).pow(2)\n upper_loss = (torch.exp(F.relu(body_pose_sja - upper_limits)) -\n 1).pow(2)\n\n standard_joint_angle_prior_loss = (lower_loss + upper_loss).view(\n body_pose.shape[0], -1) # shape: (n, 3)\n\n joint_prior_loss = standard_joint_angle_prior_loss\n\n else:\n # default joint prior loss applied on elbows and knees\n joint_prior_loss = (torch.exp(\n body_pose[:, [55, 58, 12, 15]] *\n torch.tensor([1., -1., -1, -1.], device=body_pose.device)) -\n 1)**2\n\n if self.smooth_spine:\n spine1 = body_pose[:, [9, 10, 11]]\n spine2 = body_pose[:, [18, 19, 20]]\n spine3 = body_pose[:, [27, 28, 29]]\n smooth_spine_loss_12 = (torch.exp(F.relu(-spine1 * spine2)) -\n 1).pow(2) * self.smooth_spine_loss_weight\n smooth_spine_loss_23 = (torch.exp(F.relu(-spine2 * spine3)) -\n 1).pow(2) * self.smooth_spine_loss_weight\n\n joint_prior_loss = torch.cat(\n [joint_prior_loss, smooth_spine_loss_12, smooth_spine_loss_23],\n axis=1)\n\n joint_prior_loss = loss_weight * joint_prior_loss\n\n if reduction == 'mean':\n joint_prior_loss = joint_prior_loss.mean()\n elif reduction == 'sum':\n joint_prior_loss = joint_prior_loss.sum()\n\n return joint_prior_loss\n\n\nclass SmoothJointLoss(nn.Module):\n \"\"\"Smooth loss for joint angles.\n\n Args:\n reduction (str, optional): The method that reduces the loss to a\n scalar. Options are \"none\", \"mean\" and \"sum\".\n loss_weight (float, optional): The weight of the loss. Defaults to 1.0\n degree (bool, optional): The flag which represents whether the input\n tensor is in degree or radian.\n \"\"\"\n\n def __init__(self,\n reduction='mean',\n loss_weight=1.0,\n degree=False,\n loss_func='L1'):\n super().__init__()\n assert reduction in (None, 'none', 'mean', 'sum')\n assert loss_func in ('L1', 'L2')\n self.reduction = reduction\n self.loss_weight = loss_weight\n self.degree = degree\n self.loss_func = loss_func\n\n def forward(self,\n body_pose,\n loss_weight_override=None,\n reduction_override=None):\n \"\"\"Forward function of SmoothJointLoss.\n\n Args:\n body_pose (torch.Tensor): The body pose parameters\n loss_weight_override (float, optional): The weight of loss used to\n override the original weight of loss\n reduction_override (str, optional): The reduction method used to\n override the original reduction method of the loss.\n Defaults to None\n Returns:\n torch.Tensor: The calculated loss\n \"\"\"\n assert reduction_override in (None, 'none', 'mean', 'sum')\n reduction = (\n reduction_override if reduction_override else self.reduction)\n loss_weight = (\n loss_weight_override\n if loss_weight_override is not None else self.loss_weight)\n\n theta = body_pose.reshape(body_pose.shape[0], -1, 3)\n if self.degree:\n theta = torch.deg2rad(theta)\n rot_6d = aa_to_rot6d(theta)\n rot_6d_diff = rot_6d[1:] - rot_6d[:-1]\n\n if self.loss_func == 'L2':\n smooth_joint_loss = (rot_6d_diff**2).sum(dim=[1, 2])\n elif self.loss_func == 'L1':\n smooth_joint_loss = rot_6d_diff.abs().sum(dim=[1, 2])\n else:\n raise TypeError(f'{self.func} is not defined')\n\n # add zero padding to retain original batch_size\n smooth_joint_loss = torch.cat(\n [torch.zeros_like(smooth_joint_loss)[:1], smooth_joint_loss])\n\n if reduction == 'mean':\n smooth_joint_loss = smooth_joint_loss.mean()\n elif reduction == 'sum':\n smooth_joint_loss = smooth_joint_loss.sum()\n\n smooth_joint_loss *= loss_weight\n\n return smooth_joint_loss\n\n\nclass SmoothPelvisLoss(nn.Module):\n \"\"\"Smooth loss for pelvis angles.\n\n Args:\n reduction (str, optional): The method that reduces the loss to a\n scalar. Options are \"none\", \"mean\" and \"sum\".\n loss_weight (float, optional): The weight of the loss. Defaults to 1.0\n degree (bool, optional): The flag which represents whether the input\n tensor is in degree or radian.\n \"\"\"\n\n def __init__(self, reduction='mean', loss_weight=1.0, degree=False):\n super().__init__()\n assert reduction in (None, 'none', 'mean', 'sum')\n self.reduction = reduction\n self.loss_weight = loss_weight\n self.degree = degree\n\n def forward(self,\n global_orient,\n loss_weight_override=None,\n reduction_override=None):\n \"\"\"Forward function of SmoothPelvisLoss.\n\n Args:\n global_orient (torch.Tensor): The global orientation parameters\n loss_weight_override (float, optional): The weight of loss used to\n override the original weight of loss\n reduction_override (str, optional): The reduction method used to\n override the original reduction method of the loss.\n Defaults to None\n Returns:\n torch.Tensor: The calculated loss\n \"\"\"\n assert reduction_override in (None, 'none', 'mean', 'sum')\n reduction = (\n reduction_override if reduction_override else self.reduction)\n loss_weight = (\n loss_weight_override\n if loss_weight_override is not None else self.loss_weight)\n\n if self.degree:\n global_orient = torch.deg2rad(global_orient)\n\n pelvis = global_orient.unsqueeze(1)\n rot_6d = aa_to_rot6d(pelvis)\n\n rot_6d_diff = rot_6d[1:] - rot_6d[:-1]\n smooth_pelvis_loss = rot_6d_diff.abs().sum(dim=-1)\n\n # add zero padding to retain original batch_size\n smooth_pelvis_loss = torch.cat(\n [torch.zeros_like(smooth_pelvis_loss)[:1],\n smooth_pelvis_loss]).sum(dim=-1)\n\n smooth_pelvis_loss = loss_weight * smooth_pelvis_loss\n\n if reduction == 'mean':\n smooth_pelvis_loss = smooth_pelvis_loss.mean()\n elif reduction == 'sum':\n smooth_pelvis_loss = smooth_pelvis_loss.sum()\n\n return smooth_pelvis_loss\n\n\nclass SmoothTranslationLoss(nn.Module):\n \"\"\"Smooth loss for translations.\n\n Args:\n reduction (str, optional): The method that reduces the loss to a\n scalar. Options are \"none\", \"mean\" and \"sum\".\n loss_weight (float, optional): The weight of the loss. Defaults to 1.0\n \"\"\"\n\n def __init__(self, reduction='mean', loss_weight=1.0):\n super().__init__()\n assert reduction in (None, 'none', 'mean', 'sum')\n self.reduction = reduction\n self.loss_weight = loss_weight\n\n def forward(self,\n translation,\n loss_weight_override=None,\n reduction_override=None):\n \"\"\"Forward function of loss.\n\n Args:\n translation (torch.Tensor): The body translation parameters\n loss_weight_override (float, optional): The weight of loss used to\n override the original weight of loss\n reduction_override (str, optional): The reduction method used to\n override the original reduction method of the loss.\n Defaults to None\n Returns:\n torch.Tensor: The calculated loss\n \"\"\"\n assert reduction_override in (None, 'none', 'mean', 'sum')\n reduction = (\n reduction_override if reduction_override else self.reduction)\n loss_weight = (\n loss_weight_override\n if loss_weight_override is not None else self.loss_weight)\n\n translation_diff = translation[1:] - translation[:-1]\n smooth_translation_loss = translation_diff.abs().sum(\n dim=-1, keepdim=True)\n\n # add zero padding to retain original batch_size\n smooth_translation_loss = torch.cat([\n torch.zeros_like(smooth_translation_loss)[:1],\n smooth_translation_loss\n ]).sum(dim=-1)\n\n smooth_translation_loss *= 1e3\n\n smooth_translation_loss = loss_weight * \\\n smooth_translation_loss\n\n if reduction == 'mean':\n smooth_translation_loss = smooth_translation_loss.mean()\n elif reduction == 'sum':\n smooth_translation_loss = smooth_translation_loss.sum()\n\n return smooth_translation_loss\n\n\nclass CameraPriorLoss(nn.Module):\n \"\"\"Prior loss for predicted camera.\n\n Args:\n reduction (str, optional): The method that reduces the loss to a\n scalar. Options are \"none\", \"mean\" and \"sum\".\n scale (float, optional): The scale coefficient for regularizing camera\n parameters. Defaults to 10\n loss_weight (float, optional): The weight of the loss. Defaults to 1.0\n \"\"\"\n\n def __init__(self, scale=10, reduction='mean', loss_weight=1.0):\n super().__init__()\n self.scale = scale\n self.reduction = reduction\n self.loss_weight = loss_weight\n\n def forward(self,\n cameras,\n loss_weight_override=None,\n reduction_override=None):\n \"\"\"Forward function of loss.\n\n Args:\n cameras (torch.Tensor): The predicted camera parameters\n loss_weight_override (float, optional): The weight of loss used to\n override the original weight of loss\n reduction_override (str, optional): The reduction method used to\n override the original reduction method of the loss.\n Defaults to None\n Returns:\n torch.Tensor: The calculated loss\n \"\"\"\n\n assert reduction_override in (None, 'none', 'mean', 'sum')\n reduction = (\n reduction_override if reduction_override else self.reduction)\n loss_weight = (\n loss_weight_override\n if loss_weight_override is not None else self.loss_weight)\n\n camera_prior_loss = torch.exp(-cameras[:, 0] * self.scale)\n camera_prior_loss = torch.pow(camera_prior_loss, 2) * loss_weight\n\n if reduction == 'mean':\n camera_prior_loss = camera_prior_loss.mean()\n elif reduction == 'sum':\n camera_prior_loss = camera_prior_loss.sum()\n\n return camera_prior_loss\n\n\nclass MaxMixturePrior(nn.Module):\n \"\"\"Ref: SMPLify-X\n https://github.com/vchoutas/smplify-x/blob/master/smplifyx/prior.py\n \"\"\"\n\n def __init__(self,\n prior_folder='data',\n num_gaussians=8,\n dtype=torch.float32,\n epsilon=1e-16,\n use_merged=True,\n reduction=None,\n loss_weight=1.0):\n super(MaxMixturePrior, self).__init__()\n\n assert reduction in (None, 'none', 'mean', 'sum')\n self.reduction = reduction\n self.loss_weight = loss_weight\n\n if dtype == torch.float32:\n np_dtype = np.float32\n elif dtype == torch.float64:\n np_dtype = np.float64\n else:\n print('Unknown float type {}, exiting!'.format(dtype))\n sys.exit(-1)\n\n self.num_gaussians = num_gaussians\n self.epsilon = epsilon\n self.use_merged = use_merged\n gmm_fn = 'gmm_{:02d}.pkl'.format(num_gaussians)\n\n full_gmm_fn = os.path.join(prior_folder, gmm_fn)\n if not os.path.exists(full_gmm_fn):\n print('The path to the mixture prior \"{}\"'.format(full_gmm_fn) +\n ' does not exist, exiting!')\n sys.exit(-1)\n\n with open(full_gmm_fn, 'rb') as f:\n gmm = pickle.load(f, encoding='latin1')\n\n if type(gmm) == dict:\n means = gmm['means'].astype(np_dtype)\n covs = gmm['covars'].astype(np_dtype)\n weights = gmm['weights'].astype(np_dtype)\n elif 'sklearn.mixture.gmm.GMM' in str(type(gmm)):\n means = gmm.means_.astype(np_dtype)\n covs = gmm.covars_.astype(np_dtype)\n weights = gmm.weights_.astype(np_dtype)\n else:\n print('Unknown type for the prior: {}, exiting!'.format(type(gmm)))\n sys.exit(-1)\n\n self.register_buffer('means', torch.tensor(means, dtype=dtype))\n\n self.register_buffer('covs', torch.tensor(covs, dtype=dtype))\n\n precisions = [np.linalg.inv(cov) for cov in covs]\n precisions = np.stack(precisions).astype(np_dtype)\n\n self.register_buffer('precisions',\n torch.tensor(precisions, dtype=dtype))\n\n # The constant term:\n sqrdets = np.array([(np.sqrt(np.linalg.det(c)))\n for c in gmm['covars']])\n const = (2 * np.pi)**(69 / 2.)\n\n nll_weights = np.asarray(gmm['weights'] / (const *\n (sqrdets / sqrdets.min())))\n nll_weights = torch.tensor(nll_weights, dtype=dtype).unsqueeze(dim=0)\n self.register_buffer('nll_weights', nll_weights)\n\n weights = torch.tensor(gmm['weights'], dtype=dtype).unsqueeze(dim=0)\n self.register_buffer('weights', weights)\n\n self.register_buffer('pi_term',\n torch.log(torch.tensor(2 * np.pi, dtype=dtype)))\n\n cov_dets = [\n np.log(np.linalg.det(cov.astype(np_dtype)) + epsilon)\n for cov in covs\n ]\n self.register_buffer('cov_dets', torch.tensor(cov_dets, dtype=dtype))\n\n # The dimensionality of the random variable\n self.random_var_dim = self.means.shape[1]\n\n def get_mean(self):\n \"\"\"Returns the mean of the mixture.\"\"\"\n mean_pose = torch.matmul(self.weights, self.means)\n return mean_pose\n\n def merged_log_likelihood(self, pose):\n diff_from_mean = pose.unsqueeze(dim=1) - self.means\n\n prec_diff_prod = torch.einsum('mij,bmj->bmi',\n [self.precisions, diff_from_mean])\n diff_prec_quadratic = (prec_diff_prod * diff_from_mean).sum(dim=-1)\n\n curr_loglikelihood = 0.5 * diff_prec_quadratic - \\\n torch.log(self.nll_weights)\n # curr_loglikelihood = 0.5 * (self.cov_dets.unsqueeze(dim=0) +\n # self.random_var_dim * self.pi_term +\n # diff_prec_quadratic\n # ) - torch.log(self.weights)\n\n min_likelihood, _ = torch.min(curr_loglikelihood, dim=1)\n return min_likelihood\n\n def log_likelihood(self, pose):\n \"\"\"Create graph operation for negative log-likelihood calculation.\"\"\"\n likelihoods = []\n\n for idx in range(self.num_gaussians):\n mean = self.means[idx]\n prec = self.precisions[idx]\n cov = self.covs[idx]\n diff_from_mean = pose - mean\n\n curr_loglikelihood = torch.einsum('bj,ji->bi',\n [diff_from_mean, prec])\n curr_loglikelihood = torch.einsum(\n 'bi,bi->b', [curr_loglikelihood, diff_from_mean])\n cov_term = torch.log(torch.det(cov) + self.epsilon)\n curr_loglikelihood += 0.5 * (\n cov_term + self.random_var_dim * self.pi_term)\n likelihoods.append(curr_loglikelihood)\n\n log_likelihoods = torch.stack(likelihoods, dim=1)\n min_idx = torch.argmin(log_likelihoods, dim=1)\n weight_component = self.nll_weights[:, min_idx]\n weight_component = -torch.log(weight_component)\n\n return weight_component + log_likelihoods[:, min_idx]\n\n def forward(self,\n body_pose,\n loss_weight_override=None,\n reduction_override=None):\n\n assert reduction_override in (None, 'none', 'mean', 'sum')\n reduction = (\n reduction_override if reduction_override else self.reduction)\n loss_weight = (\n loss_weight_override\n if loss_weight_override is not None else self.loss_weight)\n\n if self.use_merged:\n pose_prior_loss = self.merged_log_likelihood(body_pose)\n else:\n pose_prior_loss = self.log_likelihood(body_pose)\n\n pose_prior_loss = loss_weight * pose_prior_loss\n\n if reduction == 'mean':\n pose_prior_loss = pose_prior_loss.mean()\n elif reduction == 'sum':\n pose_prior_loss = pose_prior_loss.sum()\n\n return pose_prior_loss\n"
] |
[
[
"torch.cat",
"torch.pow",
"torch.norm",
"torch.einsum",
"torch.argmin",
"numpy.stack",
"torch.tensor",
"numpy.linalg.det",
"torch.nn.functional.relu",
"torch.ones_like",
"numpy.linalg.inv",
"torch.min",
"torch.deg2rad",
"torch.zeros_like",
"torch.exp",
"torch.log",
"torch.stack",
"torch.det",
"torch.matmul"
]
] |
jhyuuu/pytorch_image_classification
|
[
"20a5585d06a6e8aedffb3d8b86614c467ae4710b"
] |
[
"pytorch_image_classification/utils/env_info.py"
] |
[
"import torch\nimport yacs.config\n\nfrom pytorch_image_classification.config.config_node import ConfigNode\n\n\ndef get_env_info(config: yacs.config.CfgNode) -> yacs.config.CfgNode:\n info = {\n 'pytorch_version': str(torch.__version__),\n 'cuda_version': torch.version.cuda or '',\n 'cudnn_version': torch.backends.cudnn.version() or '',\n }\n if config.device != 'cpu':\n info['num_gpus'] = torch.cuda.device_count()\n info['gpu_name'] = torch.cuda.get_device_name(0)\n capability = torch.cuda.get_device_capability(0)\n info['gpu_capability'] = f'{capability[0]}.{capability[1]}'\n\n return ConfigNode({'env_info': info})\n"
] |
[
[
"torch.cuda.device_count",
"torch.cuda.get_device_capability",
"torch.backends.cudnn.version",
"torch.cuda.get_device_name"
]
] |
JXKun980/TransUNet
|
[
"78fca7d1a87a13bd4d7d95fa5e6598587b09b78a"
] |
[
"select_permutations.py"
] |
[
"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Sep 14 15:50:28 2017\n\n@author: bbrattol\n\"\"\"\nimport argparse\nfrom tqdm import trange\nimport numpy as np\nimport itertools\nfrom scipy.spatial.distance import cdist\n\n\nparser = argparse.ArgumentParser(description='Train network on Imagenet')\nparser.add_argument('--classes', default=1000, type=int, \n help='Number of permutations to select')\nparser.add_argument('--selection', default='max', type=str, \n help='Sample selected per iteration based on hamming distance: [max] highest; [mean] average')\nargs = parser.parse_args()\n\nif __name__ == \"__main__\":\n outname = 'permutations_hamming_%s_%d'%(args.selection,args.classes)\n \n P_hat = np.array(list(itertools.permutations(list(range(9)), 9)))\n n = P_hat.shape[0]\n \n for i in trange(args.classes):\n if i==0:\n j = np.random.randint(n)\n P = np.array(P_hat[j]).reshape([1,-1])\n else:\n P = np.concatenate([P,P_hat[j].reshape([1,-1])],axis=0)\n \n P_hat = np.delete(P_hat,j,axis=0)\n D = cdist(P,P_hat, metric='hamming').mean(axis=0).flatten()\n \n if args.selection=='max':\n j = D.argmax()\n else:\n m = int(D.shape[0]/2)\n S = D.argsort()\n j = S[np.random.randint(m-10,m+10)]\n \n if i%100==0:\n np.save(outname,P)\n \n np.save(outname,P)\n print('file created --> '+outname)\n"
] |
[
[
"scipy.spatial.distance.cdist",
"numpy.save",
"numpy.delete",
"numpy.array",
"numpy.random.randint"
]
] |
mappercore/mapper-core
|
[
"39af3685beecce14cdb55b951f2d5556cdd28f76"
] |
[
"app/enhanced_mapper/mapper.py"
] |
[
"import numpy as np\nfrom typing import List, Dict, Tuple\nfrom itertools import combinations\nfrom sklearn.preprocessing import MinMaxScaler\n\nfrom .cover import Cover\nfrom .oracle import _check_clustering_object, cluster_points, map_overlap_cluster_to_interval\nfrom .graph import EnhancedGraph, Graph, AbstractGraph\nfrom .node import Sign, EnhancedNode, Node\n\n\ndef generate_mapper_graph(X: np.ndarray, lens: np.ndarray, cover: Cover, clusterer: object, enhanced: bool = False, cover_min: float = None, cover_max: float = None, refit_cover=True):\n # Quick checks to fail if input is malformed\n _check_clustering_object(clusterer)\n if len(lens.shape) == 2:\n assert lens.shape[1] == 1, 'Only 1D mapper is supported!'\n lens = lens.reshape(-1)\n \n if refit_cover:\n cover.force_refit()\n if cover_min is not None and cover_max is not None and refit_cover:\n cover.compute_intervals(cover_min, cover_max)\n \n interval_clusterings: List[List[int]] = []\n graph: AbstractGraph = EnhancedGraph() if enhanced else Graph()\n interval_clusterings = []\n\n for i, interval_members in enumerate(cover.fit_intervals(lens)):\n interval_clusterings.append([])\n if len(interval_members) == 0:\n continue\n assignments: np.ndarray = cluster_points(X[interval_members], clusterer)\n num_clusters: int = assignments.max() + 1 # if 3 is a cluster, then there are 4 clusters: see dbscan sklearn docs\n \n for cluster in range(num_clusters):\n cluster_members = interval_members[assignments == cluster]\n interval_clusterings[i].append(cluster_members)\n if len(cluster_members) == 0:\n continue\n if enhanced:\n graph: EnhancedGraph\n positive_node = EnhancedNode(interval_index=i, cluster_index=cluster, sign=Sign.PLUS, members=cluster_members)\n negative_node = EnhancedNode(interval_index=i, cluster_index=cluster, sign=Sign.MINUS, members=cluster_members)\n graph.add_node(positive_node)\n graph.add_node(negative_node)\n graph.add_edge(positive_node, negative_node)\n\n # assign positive node value\n if i+1 < cover.num_intervals:\n graph.set_function_val(positive_node, cover[i+1][0])\n else:\n graph.set_function_val(positive_node, cover[i][1])\n\n # assign negative node value\n if 0 <= i-1:\n graph.set_function_val(negative_node, cover[i-1][1])\n else:\n graph.set_function_val(negative_node, cover[i][0])\n else:\n graph: Graph\n node = Node(i, cluster, cluster_members)\n graph.add_node(node)\n if i > 0: # beyond first interval\n lower_interval_clusters = interval_clusterings[i-1]\n map_down = map_overlap_cluster_to_interval(cluster_members, lower_interval_clusters)\n for down in map_down:\n node2 = graph.get_node(i-1, down)\n graph.add_edge(node, node2)\n\n if enhanced:\n graph: EnhancedGraph\n for i, overlap_members in enumerate(cover.fit_overlaps(lens)):\n if len(overlap_members) == 0:\n continue\n assignments = cluster_points(X[overlap_members], clusterer)\n lower_interval_clusters = interval_clusterings[i]\n upper_interval_clusters = interval_clusterings[i+1]\n num_clusters = assignments.max() + 1\n for cluster in range(num_clusters):\n cluster_members = overlap_members[assignments == cluster]\n map_down = map_overlap_cluster_to_interval(cluster_members, lower_interval_clusters)\n map_up = map_overlap_cluster_to_interval(cluster_members, upper_interval_clusters)\n for down in map_down:\n down_node = graph.get_node(i, down, Sign.PLUS)\n for up in map_up:\n up_node = graph.get_node(i+1, up, Sign.MINUS)\n graph.add_edge(down_node, up_node)\n\n return graph\n\n# For mapper interactive\ndef generate_lens(X, proj, scale = \"MinMax\"):\n if scale == 'MinMax':\n scaler = MinMaxScaler((0, 1), copy=True)\n else:\n scaler = None\n\n if proj == \"sum\":\n ret = np.sum(X, axis=1)\n elif proj == \"mean\":\n ret = np.mean(X, axis=1)\n elif proj == \"median\":\n ret = np.median(X, axis=1)\n elif proj == \"max\":\n ret = np.max(X, axis=1)\n elif proj == \"min\":\n ret = np.min(X, axis=1)\n elif proj == \"std\":\n ret = np.std(X, axis=1)\n elif proj == \"l2norm\":\n ret = np.linalg.norm(X, axis=1)\n\n print(\"Scaler term\", scaler)\n\n return ret if scaler is None else scaler.fit_transform(np.reshape(ret, (-1, 1)))"
] |
[
[
"numpy.min",
"numpy.reshape",
"numpy.median",
"numpy.linalg.norm",
"numpy.max",
"numpy.std",
"numpy.mean",
"numpy.sum",
"sklearn.preprocessing.MinMaxScaler"
]
] |
brightsparc/mlflow
|
[
"31e4f969d3b65a2fd3e246e88a23433b72904d49",
"31e4f969d3b65a2fd3e246e88a23433b72904d49"
] |
[
"tests/pyfunc/test_model_export_with_loader_module_and_data_path.py",
"examples/xgboost/train.py"
] |
[
"import os\nimport pickle\nimport yaml\n\nimport numpy as np\nimport pandas as pd\nimport pytest\nimport six\nimport sklearn.datasets\nimport sklearn.linear_model\nimport sklearn.neighbors\n\nimport mlflow\nimport mlflow.pyfunc\nfrom mlflow.pyfunc import PyFuncModel\nimport mlflow.pyfunc.model\nimport mlflow.sklearn\nfrom mlflow.exceptions import MlflowException\nfrom mlflow.models import Model, infer_signature, ModelSignature\nfrom mlflow.models.utils import _read_example\nfrom mlflow.tracking.artifact_utils import _download_artifact_from_uri\nfrom mlflow.types import Schema, ColSpec\nfrom mlflow.utils.environment import _mlflow_conda_env\nfrom mlflow.utils.file_utils import TempDir\nfrom mlflow.utils.model_utils import _get_flavor_configuration\n\n\ndef _load_pyfunc(path):\n with open(path, \"rb\") as f:\n if six.PY2:\n return pickle.load(f)\n else:\n return pickle.load(f, encoding=\"latin1\") # pylint: disable=unexpected-keyword-arg\n\n\n@pytest.fixture\ndef pyfunc_custom_env_file(tmpdir):\n conda_env = os.path.join(str(tmpdir), \"conda_env.yml\")\n _mlflow_conda_env(\n conda_env,\n additional_conda_deps=[\"scikit-learn\", \"pytest\", \"cloudpickle\"],\n additional_pip_deps=[\"-e \" + os.path.dirname(mlflow.__path__[0])],\n )\n return conda_env\n\n\n@pytest.fixture\ndef pyfunc_custom_env_dict():\n return _mlflow_conda_env(\n additional_conda_deps=[\"scikit-learn\", \"pytest\", \"cloudpickle\"],\n additional_pip_deps=[\"-e \" + os.path.dirname(mlflow.__path__[0])],\n )\n\n\n@pytest.fixture(scope=\"module\")\ndef iris_data():\n iris = sklearn.datasets.load_iris()\n x = iris.data[:, :2]\n y = iris.target\n return x, y\n\n\n@pytest.fixture(scope=\"module\")\ndef sklearn_knn_model(iris_data):\n x, y = iris_data\n knn_model = sklearn.neighbors.KNeighborsClassifier()\n knn_model.fit(x, y)\n return knn_model\n\n\n@pytest.fixture\ndef model_path(tmpdir):\n return os.path.join(str(tmpdir), \"model\")\n\n\n@pytest.mark.large\ndef test_model_save_load(sklearn_knn_model, iris_data, tmpdir, model_path):\n sk_model_path = os.path.join(str(tmpdir), \"knn.pkl\")\n with open(sk_model_path, \"wb\") as f:\n pickle.dump(sklearn_knn_model, f)\n\n model_config = Model(run_id=\"test\", artifact_path=\"testtest\")\n mlflow.pyfunc.save_model(\n path=model_path,\n data_path=sk_model_path,\n loader_module=os.path.basename(__file__)[:-3],\n code_path=[__file__],\n mlflow_model=model_config,\n )\n\n reloaded_model_config = Model.load(os.path.join(model_path, \"MLmodel\"))\n assert model_config.__dict__ == reloaded_model_config.__dict__\n assert mlflow.pyfunc.FLAVOR_NAME in reloaded_model_config.flavors\n assert mlflow.pyfunc.PY_VERSION in reloaded_model_config.flavors[mlflow.pyfunc.FLAVOR_NAME]\n reloaded_model = mlflow.pyfunc.load_pyfunc(model_path)\n np.testing.assert_array_equal(\n sklearn_knn_model.predict(iris_data[0]), reloaded_model.predict(iris_data[0])\n )\n\n\n@pytest.mark.large\ndef test_signature_and_examples_are_saved_correctly(sklearn_knn_model, iris_data):\n data = iris_data\n signature_ = infer_signature(*data)\n example_ = data[0][\n :3,\n ]\n for signature in (None, signature_):\n for example in (None, example_):\n with TempDir() as tmp:\n with open(tmp.path(\"skmodel\"), \"wb\") as f:\n pickle.dump(sklearn_knn_model, f)\n path = tmp.path(\"model\")\n mlflow.pyfunc.save_model(\n path=path,\n data_path=tmp.path(\"skmodel\"),\n loader_module=os.path.basename(__file__)[:-3],\n code_path=[__file__],\n signature=signature,\n input_example=example,\n )\n mlflow_model = Model.load(path)\n assert signature == mlflow_model.signature\n if example is None:\n assert mlflow_model.saved_input_example_info is None\n else:\n assert all((_read_example(mlflow_model, path) == example).all())\n\n\ndef test_schema_enforcement():\n class TestModel(object):\n @staticmethod\n def predict(pdf):\n return pdf\n\n m = Model()\n input_schema = Schema(\n [\n ColSpec(\"integer\", \"a\"),\n ColSpec(\"long\", \"b\"),\n ColSpec(\"float\", \"c\"),\n ColSpec(\"double\", \"d\"),\n ColSpec(\"boolean\", \"e\"),\n ColSpec(\"string\", \"g\"),\n ColSpec(\"binary\", \"f\"),\n ]\n )\n m.signature = ModelSignature(inputs=input_schema)\n pyfunc_model = PyFuncModel(model_meta=m, model_impl=TestModel())\n pdf = pd.DataFrame(\n data=[[1, 2, 3, 4, True, \"x\", bytes([1])]],\n columns=[\"b\", \"d\", \"a\", \"c\", \"e\", \"g\", \"f\"],\n dtype=np.object,\n )\n pdf[\"a\"] = pdf[\"a\"].astype(np.int32)\n pdf[\"b\"] = pdf[\"b\"].astype(np.int64)\n pdf[\"c\"] = pdf[\"c\"].astype(np.float32)\n pdf[\"d\"] = pdf[\"d\"].astype(np.float64)\n # test that missing column raises\n with pytest.raises(MlflowException) as ex:\n res = pyfunc_model.predict(pdf[[\"b\", \"d\", \"a\", \"e\", \"g\", \"f\"]])\n assert \"Model input is missing columns\" in str(ex)\n\n # test that extra column is ignored\n pdf[\"x\"] = 1\n\n # test that columns are reordered, extra column is ignored\n res = pyfunc_model.predict(pdf)\n assert all((res == pdf[input_schema.column_names()]).all())\n\n expected_types = dict(zip(input_schema.column_names(), input_schema.pandas_types()))\n actual_types = res.dtypes.to_dict()\n assert expected_types == actual_types\n\n # Test conversions\n # 1. long -> integer raises\n pdf[\"a\"] = pdf[\"a\"].astype(np.int64)\n with pytest.raises(MlflowException) as ex:\n pyfunc_model.predict(pdf)\n assert \"Incompatible input types\" in str(ex)\n pdf[\"a\"] = pdf[\"a\"].astype(np.int32)\n # 2. integer -> long works\n pdf[\"b\"] = pdf[\"b\"].astype(np.int32)\n res = pyfunc_model.predict(pdf)\n assert all((res == pdf[input_schema.column_names()]).all())\n assert res.dtypes.to_dict() == expected_types\n pdf[\"b\"] = pdf[\"b\"].astype(np.int64)\n\n # 3. double -> float raises\n pdf[\"c\"] = pdf[\"c\"].astype(np.float64)\n with pytest.raises(MlflowException) as ex:\n pyfunc_model.predict(pdf)\n assert \"Incompatible input types\" in str(ex)\n pdf[\"c\"] = pdf[\"c\"].astype(np.float32)\n\n # 4. float -> double works\n pdf[\"d\"] = pdf[\"d\"].astype(np.float32)\n res = pyfunc_model.predict(pdf)\n assert res.dtypes.to_dict() == expected_types\n assert \"Incompatible input types\" in str(ex)\n pdf[\"d\"] = pdf[\"d\"].astype(np.int64)\n\n # 5. floats -> ints raises\n pdf[\"c\"] = pdf[\"c\"].astype(np.int32)\n with pytest.raises(MlflowException) as ex:\n pyfunc_model.predict(pdf)\n assert \"Incompatible input types\" in str(ex)\n pdf[\"c\"] = pdf[\"c\"].astype(np.float32)\n\n pdf[\"d\"] = pdf[\"d\"].astype(np.int64)\n with pytest.raises(MlflowException) as ex:\n pyfunc_model.predict(pdf)\n assert \"Incompatible input types\" in str(ex)\n pdf[\"d\"] = pdf[\"d\"].astype(np.float64)\n\n # 6. ints -> floats raises\n pdf[\"a\"] = pdf[\"a\"].astype(np.float32)\n with pytest.raises(MlflowException) as ex:\n pyfunc_model.predict(pdf)\n assert \"Incompatible input types\" in str(ex)\n pdf[\"a\"] = pdf[\"a\"].astype(np.int32)\n\n pdf[\"b\"] = pdf[\"b\"].astype(np.float64)\n with pytest.raises(MlflowException) as ex:\n pyfunc_model.predict(pdf)\n pdf[\"b\"] = pdf[\"b\"].astype(np.int64)\n assert \"Incompatible input types\" in str(ex)\n\n # 7. objects work\n pdf[\"b\"] = pdf[\"b\"].astype(np.object)\n pdf[\"d\"] = pdf[\"d\"].astype(np.object)\n pdf[\"e\"] = pdf[\"e\"].astype(np.object)\n pdf[\"f\"] = pdf[\"f\"].astype(np.object)\n pdf[\"g\"] = pdf[\"g\"].astype(np.object)\n res = pyfunc_model.predict(pdf)\n assert res.dtypes.to_dict() == expected_types\n\n\ndef test_schema_enforcement_no_col_names():\n class TestModel(object):\n @staticmethod\n def predict(pdf):\n return pdf\n\n m = Model()\n input_schema = Schema([ColSpec(\"double\"), ColSpec(\"double\"), ColSpec(\"double\")])\n m.signature = ModelSignature(inputs=input_schema)\n pyfunc_model = PyFuncModel(model_meta=m, model_impl=TestModel())\n test_data = [[1.0, 2.0, 3.0]]\n\n # Can call with just a list\n assert pyfunc_model.predict(test_data).equals(pd.DataFrame(test_data))\n\n # Or can call with a DataFrame without column names\n assert pyfunc_model.predict(pd.DataFrame(test_data)).equals(pd.DataFrame(test_data))\n\n # Or with column names!\n pdf = pd.DataFrame(data=test_data, columns=[\"a\", \"b\", \"c\"])\n assert pyfunc_model.predict(pdf).equals(pdf)\n\n # Must provide the right number of arguments\n with pytest.raises(MlflowException) as ex:\n pyfunc_model.predict([[1.0, 2.0]])\n assert \"the provided input only has 2 columns.\" in str(ex)\n\n # Must provide the right types\n with pytest.raises(MlflowException) as ex:\n pyfunc_model.predict([[1, 2, 3]])\n assert \"Can not safely convert int64 to float64\" in str(ex)\n\n # Can only provide data frames or lists...\n with pytest.raises(MlflowException) as ex:\n pyfunc_model.predict(set([1, 2, 3]))\n assert \"Expected input to be DataFrame or list. Found: set\" in str(ex)\n\n\n@pytest.mark.large\ndef test_model_log_load(sklearn_knn_model, iris_data, tmpdir):\n sk_model_path = os.path.join(str(tmpdir), \"knn.pkl\")\n with open(sk_model_path, \"wb\") as f:\n pickle.dump(sklearn_knn_model, f)\n\n pyfunc_artifact_path = \"pyfunc_model\"\n with mlflow.start_run():\n mlflow.pyfunc.log_model(\n artifact_path=pyfunc_artifact_path,\n data_path=sk_model_path,\n loader_module=os.path.basename(__file__)[:-3],\n code_path=[__file__],\n )\n pyfunc_model_path = _download_artifact_from_uri(\n \"runs:/{run_id}/{artifact_path}\".format(\n run_id=mlflow.active_run().info.run_id, artifact_path=pyfunc_artifact_path\n )\n )\n\n model_config = Model.load(os.path.join(pyfunc_model_path, \"MLmodel\"))\n assert mlflow.pyfunc.FLAVOR_NAME in model_config.flavors\n assert mlflow.pyfunc.PY_VERSION in model_config.flavors[mlflow.pyfunc.FLAVOR_NAME]\n reloaded_model = mlflow.pyfunc.load_pyfunc(pyfunc_model_path)\n assert model_config.to_yaml() == reloaded_model.metadata.to_yaml()\n np.testing.assert_array_equal(\n sklearn_knn_model.predict(iris_data[0]), reloaded_model.predict(iris_data[0])\n )\n\n\n@pytest.mark.large\ndef test_model_log_load_no_active_run(sklearn_knn_model, iris_data, tmpdir):\n sk_model_path = os.path.join(str(tmpdir), \"knn.pkl\")\n with open(sk_model_path, \"wb\") as f:\n pickle.dump(sklearn_knn_model, f)\n\n pyfunc_artifact_path = \"pyfunc_model\"\n assert mlflow.active_run() is None\n mlflow.pyfunc.log_model(\n artifact_path=pyfunc_artifact_path,\n data_path=sk_model_path,\n loader_module=os.path.basename(__file__)[:-3],\n code_path=[__file__],\n )\n pyfunc_model_path = _download_artifact_from_uri(\n \"runs:/{run_id}/{artifact_path}\".format(\n run_id=mlflow.active_run().info.run_id, artifact_path=pyfunc_artifact_path\n )\n )\n\n model_config = Model.load(os.path.join(pyfunc_model_path, \"MLmodel\"))\n assert mlflow.pyfunc.FLAVOR_NAME in model_config.flavors\n assert mlflow.pyfunc.PY_VERSION in model_config.flavors[mlflow.pyfunc.FLAVOR_NAME]\n reloaded_model = mlflow.pyfunc.load_pyfunc(pyfunc_model_path)\n np.testing.assert_array_equal(\n sklearn_knn_model.predict(iris_data[0]), reloaded_model.predict(iris_data[0])\n )\n mlflow.end_run()\n\n\n@pytest.mark.large\ndef test_save_model_with_unsupported_argument_combinations_throws_exception(model_path):\n with pytest.raises(MlflowException) as exc_info:\n mlflow.pyfunc.save_model(path=model_path, data_path=\"/path/to/data\")\n assert \"Either `loader_module` or `python_model` must be specified\" in str(exc_info)\n\n\n@pytest.mark.large\ndef test_log_model_with_unsupported_argument_combinations_throws_exception():\n with mlflow.start_run(), pytest.raises(MlflowException) as exc_info:\n mlflow.pyfunc.log_model(artifact_path=\"pyfunc_model\", data_path=\"/path/to/data\")\n assert \"Either `loader_module` or `python_model` must be specified\" in str(exc_info)\n\n\n@pytest.mark.large\ndef test_log_model_persists_specified_conda_env_file_in_mlflow_model_directory(\n sklearn_knn_model, tmpdir, pyfunc_custom_env_file\n):\n sk_model_path = os.path.join(str(tmpdir), \"knn.pkl\")\n with open(sk_model_path, \"wb\") as f:\n pickle.dump(sklearn_knn_model, f)\n\n pyfunc_artifact_path = \"pyfunc_model\"\n with mlflow.start_run():\n mlflow.pyfunc.log_model(\n artifact_path=pyfunc_artifact_path,\n data_path=sk_model_path,\n loader_module=os.path.basename(__file__)[:-3],\n code_path=[__file__],\n conda_env=pyfunc_custom_env_file,\n )\n run_id = mlflow.active_run().info.run_id\n\n pyfunc_model_path = _download_artifact_from_uri(\n \"runs:/{run_id}/{artifact_path}\".format(run_id=run_id, artifact_path=pyfunc_artifact_path)\n )\n\n pyfunc_conf = _get_flavor_configuration(\n model_path=pyfunc_model_path, flavor_name=mlflow.pyfunc.FLAVOR_NAME\n )\n saved_conda_env_path = os.path.join(pyfunc_model_path, pyfunc_conf[mlflow.pyfunc.ENV])\n assert os.path.exists(saved_conda_env_path)\n assert saved_conda_env_path != pyfunc_custom_env_file\n\n with open(pyfunc_custom_env_file, \"r\") as f:\n pyfunc_custom_env_parsed = yaml.safe_load(f)\n with open(saved_conda_env_path, \"r\") as f:\n saved_conda_env_parsed = yaml.safe_load(f)\n assert saved_conda_env_parsed == pyfunc_custom_env_parsed\n\n\n@pytest.mark.large\ndef test_log_model_persists_specified_conda_env_dict_in_mlflow_model_directory(\n sklearn_knn_model, tmpdir, pyfunc_custom_env_dict\n):\n sk_model_path = os.path.join(str(tmpdir), \"knn.pkl\")\n with open(sk_model_path, \"wb\") as f:\n pickle.dump(sklearn_knn_model, f)\n\n pyfunc_artifact_path = \"pyfunc_model\"\n with mlflow.start_run():\n mlflow.pyfunc.log_model(\n artifact_path=pyfunc_artifact_path,\n data_path=sk_model_path,\n loader_module=os.path.basename(__file__)[:-3],\n code_path=[__file__],\n conda_env=pyfunc_custom_env_dict,\n )\n run_id = mlflow.active_run().info.run_id\n\n pyfunc_model_path = _download_artifact_from_uri(\n \"runs:/{run_id}/{artifact_path}\".format(run_id=run_id, artifact_path=pyfunc_artifact_path)\n )\n\n pyfunc_conf = _get_flavor_configuration(\n model_path=pyfunc_model_path, flavor_name=mlflow.pyfunc.FLAVOR_NAME\n )\n saved_conda_env_path = os.path.join(pyfunc_model_path, pyfunc_conf[mlflow.pyfunc.ENV])\n assert os.path.exists(saved_conda_env_path)\n\n with open(saved_conda_env_path, \"r\") as f:\n saved_conda_env_parsed = yaml.safe_load(f)\n assert saved_conda_env_parsed == pyfunc_custom_env_dict\n\n\n@pytest.mark.large\ndef test_log_model_without_specified_conda_env_uses_default_env_with_expected_dependencies(\n sklearn_knn_model, tmpdir\n):\n sk_model_path = os.path.join(str(tmpdir), \"knn.pkl\")\n with open(sk_model_path, \"wb\") as f:\n pickle.dump(sklearn_knn_model, f)\n\n pyfunc_artifact_path = \"pyfunc_model\"\n with mlflow.start_run():\n mlflow.pyfunc.log_model(\n artifact_path=pyfunc_artifact_path,\n data_path=sk_model_path,\n loader_module=os.path.basename(__file__)[:-3],\n code_path=[__file__],\n )\n run_id = mlflow.active_run().info.run_id\n\n pyfunc_model_path = _download_artifact_from_uri(\n \"runs:/{run_id}/{artifact_path}\".format(run_id=run_id, artifact_path=pyfunc_artifact_path)\n )\n\n pyfunc_conf = _get_flavor_configuration(\n model_path=pyfunc_model_path, flavor_name=mlflow.pyfunc.FLAVOR_NAME\n )\n conda_env_path = os.path.join(pyfunc_model_path, pyfunc_conf[mlflow.pyfunc.ENV])\n with open(conda_env_path, \"r\") as f:\n conda_env = yaml.safe_load(f)\n\n assert conda_env == mlflow.pyfunc.model.get_default_conda_env()\n",
"import argparse\n\nfrom sklearn import datasets\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.metrics import accuracy_score, log_loss\nimport xgboost as xgb\nimport matplotlib as mpl\n\n\nimport mlflow\nimport mlflow.xgboost\n\nmpl.use(\"Agg\")\n\n\ndef parse_args():\n parser = argparse.ArgumentParser(description=\"XGBoost example\")\n parser.add_argument(\n \"--learning-rate\",\n type=float,\n default=0.3,\n help=\"learning rate to update step size at each boosting step (default: 0.3)\",\n )\n parser.add_argument(\n \"--colsample-bytree\",\n type=float,\n default=1.0,\n help=\"subsample ratio of columns when constructing each tree (default: 1.0)\",\n )\n parser.add_argument(\n \"--subsample\",\n type=float,\n default=1.0,\n help=\"subsample ratio of the training instances (default: 1.0)\",\n )\n return parser.parse_args()\n\n\ndef main():\n # parse command-line arguments\n args = parse_args()\n\n # prepare train and test data\n iris = datasets.load_iris()\n X = iris.data\n y = iris.target\n X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)\n dtrain = xgb.DMatrix(X_train, label=y_train)\n dtest = xgb.DMatrix(X_test, label=y_test)\n\n # enable auto logging\n mlflow.xgboost.autolog()\n\n with mlflow.start_run():\n\n # train model\n params = {\n \"objective\": \"multi:softprob\",\n \"num_class\": 3,\n \"learning_rate\": args.learning_rate,\n \"eval_metric\": \"mlogloss\",\n \"colsample_bytree\": args.colsample_bytree,\n \"subsample\": args.subsample,\n \"seed\": 42,\n }\n model = xgb.train(params, dtrain, evals=[(dtrain, \"train\")])\n\n # evaluate model\n y_proba = model.predict(dtest)\n y_pred = y_proba.argmax(axis=1)\n loss = log_loss(y_test, y_proba)\n acc = accuracy_score(y_test, y_pred)\n\n # log metrics\n mlflow.log_metrics({\"log_loss\": loss, \"accuracy\": acc})\n\n\nif __name__ == \"__main__\":\n main()\n"
] |
[
[
"pandas.DataFrame"
],
[
"matplotlib.use",
"sklearn.datasets.load_iris",
"sklearn.model_selection.train_test_split",
"sklearn.metrics.log_loss",
"sklearn.metrics.accuracy_score"
]
] |
WhiteCrow/zipline
|
[
"ae540c57bac7fa43118dcfde95f5af9ae7efaee4"
] |
[
"zipline/algorithm.py"
] |
[
"#\n# Copyright 2015 Quantopian, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nfrom collections import Iterable\nfrom copy import copy\nimport operator as op\nimport warnings\nfrom datetime import tzinfo, time\nimport logbook\nimport pytz\nimport pandas as pd\nfrom contextlib2 import ExitStack\nfrom pandas._libs.tslib import normalize_date\nimport numpy as np\n\nfrom itertools import chain, repeat\nfrom numbers import Integral\n\nfrom six import (\n exec_,\n iteritems,\n itervalues,\n string_types,\n viewkeys,\n)\n\nfrom zipline._protocol import handle_non_market_minutes\nfrom zipline.assets.synthetic import make_simple_equity_info\nfrom zipline.data.data_portal import DataPortal\nfrom zipline.data.resample import minute_panel_to_session_panel\nfrom zipline.data.us_equity_pricing import PanelBarReader\nfrom zipline.errors import (\n AttachPipelineAfterInitialize,\n CannotOrderDelistedAsset,\n DuplicatePipelineName,\n HistoryInInitialize,\n IncompatibleCommissionModel,\n IncompatibleSlippageModel,\n NoSuchPipeline,\n OrderDuringInitialize,\n OrderInBeforeTradingStart,\n PipelineOutputDuringInitialize,\n RegisterAccountControlPostInit,\n RegisterTradingControlPostInit,\n ScheduleFunctionInvalidCalendar,\n SetBenchmarkOutsideInitialize,\n SetCancelPolicyPostInit,\n SetCommissionPostInit,\n SetSlippagePostInit,\n UnsupportedCancelPolicy,\n UnsupportedDatetimeFormat,\n UnsupportedOrderParameters,\n)\nfrom zipline.finance.trading import TradingEnvironment\nfrom zipline.finance.blotter import Blotter\nfrom zipline.finance.controls import (\n LongOnly,\n MaxOrderCount,\n MaxOrderSize,\n MaxPositionSize,\n MaxLeverage,\n MinLeverage,\n RestrictedListOrder\n)\nfrom zipline.finance.execution import (\n LimitOrder,\n MarketOrder,\n StopLimitOrder,\n StopOrder,\n)\nfrom zipline.finance.asset_restrictions import Restrictions\nfrom zipline.finance.cancel_policy import NeverCancel, CancelPolicy\nfrom zipline.finance.asset_restrictions import (\n NoRestrictions,\n StaticRestrictions,\n SecurityListRestrictions,\n)\nfrom zipline.assets import Asset, Equity, Future\nfrom zipline.gens.tradesimulation import AlgorithmSimulator\nfrom zipline.finance.metrics import MetricsTracker, load as load_metrics_set\nfrom zipline.pipeline import Pipeline\nfrom zipline.pipeline.engine import (\n ExplodingPipelineEngine,\n SimplePipelineEngine,\n)\nfrom zipline.utils.api_support import (\n api_method,\n require_initialized,\n require_not_initialized,\n ZiplineAPI,\n disallowed_in_before_trading_start)\nfrom zipline.utils.input_validation import (\n coerce_string,\n ensure_upper_case,\n error_keywords,\n expect_dtypes,\n expect_types,\n optional,\n)\nfrom zipline.utils.numpy_utils import int64_dtype\nfrom zipline.utils.calendars.trading_calendar import days_at_time\nfrom zipline.utils.cache import ExpiringCache\nfrom zipline.utils.calendars import get_calendar\nfrom zipline.utils.pandas_utils import clear_dataframe_indexer_caches\n\nimport zipline.utils.events\nfrom zipline.utils.events import (\n EventManager,\n make_eventrule,\n date_rules,\n time_rules,\n calendars,\n AfterOpen,\n BeforeClose\n)\nfrom zipline.utils.factory import create_simulation_parameters\nfrom zipline.utils.math_utils import (\n tolerant_equals,\n round_if_near_integer,\n)\nfrom zipline.utils.preprocess import preprocess\nfrom zipline.utils.security_list import SecurityList\n\nimport zipline.protocol\nfrom zipline.sources.requests_csv import PandasRequestsCSV\n\nfrom zipline.gens.sim_engine import MinuteSimulationClock\nfrom zipline.sources.benchmark_source import BenchmarkSource\nfrom zipline.zipline_warnings import ZiplineDeprecationWarning\n\n\nlog = logbook.Logger(\"ZiplineLog\")\n\n\nclass TradingAlgorithm(object):\n \"\"\"A class that represents a trading strategy and parameters to execute\n the strategy.\n\n Parameters\n ----------\n *args, **kwargs\n Forwarded to ``initialize`` unless listed below.\n initialize : callable[context -> None], optional\n Function that is called at the start of the simulation to\n setup the initial context.\n handle_data : callable[(context, data) -> None], optional\n Function called on every bar. This is where most logic should be\n implemented.\n before_trading_start : callable[(context, data) -> None], optional\n Function that is called before any bars have been processed each\n day.\n analyze : callable[(context, DataFrame) -> None], optional\n Function that is called at the end of the backtest. This is passed\n the context and the performance results for the backtest.\n script : str, optional\n Algoscript that contains the definitions for the four algorithm\n lifecycle functions and any supporting code.\n namespace : dict, optional\n The namespace to execute the algoscript in. By default this is an\n empty namespace that will include only python built ins.\n algo_filename : str, optional\n The filename for the algoscript. This will be used in exception\n tracebacks. default: '<string>'.\n data_frequency : {'daily', 'minute'}, optional\n The duration of the bars.\n instant_fill : bool, optional\n Whether to fill orders immediately or on next bar. default: False\n equities_metadata : dict or DataFrame or file-like object, optional\n If dict is provided, it must have the following structure:\n * keys are the identifiers\n * values are dicts containing the metadata, with the metadata\n field name as the key\n If pandas.DataFrame is provided, it must have the\n following structure:\n * column names must be the metadata fields\n * index must be the different asset identifiers\n * array contents should be the metadata value\n If an object with a ``read`` method is provided, ``read`` must\n return rows containing at least one of 'sid' or 'symbol' along\n with the other metadata fields.\n futures_metadata : dict or DataFrame or file-like object, optional\n The same layout as ``equities_metadata`` except that it is used\n for futures information.\n identifiers : list, optional\n Any asset identifiers that are not provided in the\n equities_metadata, but will be traded by this TradingAlgorithm.\n get_pipeline_loader : callable[BoundColumn -> PipelineLoader], optional\n The function that maps pipeline columns to their loaders.\n create_event_context : callable[BarData -> context manager], optional\n A function used to create a context mananger that wraps the\n execution of all events that are scheduled for a bar.\n This function will be passed the data for the bar and should\n return the actual context manager that will be entered.\n history_container_class : type, optional\n The type of history container to use. default: HistoryContainer\n platform : str, optional\n The platform the simulation is running on. This can be queried for\n in the simulation with ``get_environment``. This allows algorithms\n to conditionally execute code based on platform it is running on.\n default: 'zipline'\n adjustment_reader : AdjustmentReader\n The interface to the adjustments.\n \"\"\"\n\n def __init__(self, *args, **kwargs):\n \"\"\"Initialize sids and other state variables.\n\n :Arguments:\n :Optional:\n initialize : function\n Function that is called with a single\n argument at the begninning of the simulation.\n handle_data : function\n Function that is called with 2 arguments\n (context and data) on every bar.\n script : str\n Algoscript that contains initialize and\n handle_data function definition.\n data_frequency : {'daily', 'minute'}\n The duration of the bars.\n capital_base : float <default: 1.0e5>\n How much capital to start with.\n asset_finder : An AssetFinder object\n A new AssetFinder object to be used in this TradingEnvironment\n equities_metadata : can be either:\n - dict\n - pandas.DataFrame\n - object with 'read' property\n If dict is provided, it must have the following structure:\n * keys are the identifiers\n * values are dicts containing the metadata, with the metadata\n field name as the key\n If pandas.DataFrame is provided, it must have the\n following structure:\n * column names must be the metadata fields\n * index must be the different asset identifiers\n * array contents should be the metadata value\n If an object with a 'read' property is provided, 'read' must\n return rows containing at least one of 'sid' or 'symbol' along\n with the other metadata fields.\n identifiers : List\n Any asset identifiers that are not provided in the\n equities_metadata, but will be traded by this TradingAlgorithm\n \"\"\"\n self.sources = []\n\n # List of trading controls to be used to validate orders.\n self.trading_controls = []\n\n # List of account controls to be checked on each bar.\n self.account_controls = []\n\n self._recorded_vars = {}\n self.namespace = kwargs.pop('namespace', {})\n\n self._platform = kwargs.pop('platform', 'zipline')\n\n self.logger = None\n\n self.data_portal = kwargs.pop('data_portal', None)\n\n # If an env has been provided, pop it\n self.trading_environment = kwargs.pop('env', None)\n\n if self.trading_environment is None:\n self.trading_environment = TradingEnvironment()\n\n # Update the TradingEnvironment with the provided asset metadata\n if 'equities_metadata' in kwargs or 'futures_metadata' in kwargs:\n warnings.warn(\n 'passing metadata to TradingAlgorithm is deprecated; please'\n ' write this data into the asset db before passing it to the'\n ' trading environment',\n DeprecationWarning,\n stacklevel=1,\n )\n self.trading_environment.write_data(\n equities=kwargs.pop('equities_metadata', None),\n futures=kwargs.pop('futures_metadata', None),\n )\n\n # If a schedule has been provided, pop it. Otherwise, use NYSE.\n self.trading_calendar = kwargs.pop(\n 'trading_calendar',\n get_calendar('NYSE')\n )\n\n self.sim_params = kwargs.pop('sim_params', None)\n if self.sim_params is None:\n self.sim_params = create_simulation_parameters(\n start=kwargs.pop('start', None),\n end=kwargs.pop('end', None),\n trading_calendar=self.trading_calendar,\n )\n\n self.metrics_tracker = None\n self._last_sync_time = pd.NaT\n self._metrics_set = kwargs.pop('metrics_set', None)\n if self._metrics_set is None:\n self._metrics_set = load_metrics_set('default')\n\n # Pull in the environment's new AssetFinder for quick reference\n self.asset_finder = self.trading_environment.asset_finder\n\n # Initialize Pipeline API data.\n self.init_engine(kwargs.pop('get_pipeline_loader', None))\n self._pipelines = {}\n\n # Create an already-expired cache so that we compute the first time\n # data is requested.\n self._pipeline_cache = ExpiringCache(\n cleanup=clear_dataframe_indexer_caches\n )\n\n self.blotter = kwargs.pop('blotter', None)\n self.cancel_policy = kwargs.pop('cancel_policy', NeverCancel())\n if not self.blotter:\n self.blotter = Blotter(\n data_frequency=self.data_frequency,\n # Default to NeverCancel in zipline\n cancel_policy=self.cancel_policy,\n )\n\n # The symbol lookup date specifies the date to use when resolving\n # symbols to sids, and can be set using set_symbol_lookup_date()\n self._symbol_lookup_date = None\n\n # If string is passed in, execute and get reference to\n # functions.\n self.algoscript = kwargs.pop('script', None)\n\n self._initialize = None\n self._before_trading_start = None\n self._analyze = None\n\n self._in_before_trading_start = False\n\n self.event_manager = EventManager(\n create_context=kwargs.pop('create_event_context', None),\n )\n\n self._handle_data = None\n\n def noop(*args, **kwargs):\n pass\n\n if self.algoscript is not None:\n api_methods = {\n 'initialize',\n 'handle_data',\n 'before_trading_start',\n 'analyze',\n }\n unexpected_api_methods = viewkeys(kwargs) & api_methods\n if unexpected_api_methods:\n raise ValueError(\n \"TradingAlgorithm received a script and the following API\"\n \" methods as functions:\\n{funcs}\".format(\n funcs=unexpected_api_methods,\n )\n )\n\n filename = kwargs.pop('algo_filename', None)\n if filename is None:\n filename = '<string>'\n code = compile(self.algoscript, filename, 'exec')\n exec_(code, self.namespace)\n\n self._initialize = self.namespace.get('initialize', noop)\n self._handle_data = self.namespace.get('handle_data', noop)\n self._before_trading_start = self.namespace.get(\n 'before_trading_start',\n )\n # Optional analyze function, gets called after run\n self._analyze = self.namespace.get('analyze')\n\n else:\n self._initialize = kwargs.pop('initialize', noop)\n self._handle_data = kwargs.pop('handle_data', noop)\n self._before_trading_start = kwargs.pop(\n 'before_trading_start',\n None,\n )\n self._analyze = kwargs.pop('analyze', None)\n\n self.event_manager.add_event(\n zipline.utils.events.Event(\n zipline.utils.events.Always(),\n # We pass handle_data.__func__ to get the unbound method.\n # We will explicitly pass the algorithm to bind it again.\n self.handle_data.__func__,\n ),\n prepend=True,\n )\n\n # Alternative way of setting data_frequency for backwards\n # compatibility.\n if 'data_frequency' in kwargs:\n self.data_frequency = kwargs.pop('data_frequency')\n\n # Prepare the algo for initialization\n self.initialized = False\n self.initialize_args = args\n self.initialize_kwargs = kwargs\n\n self.benchmark_sid = kwargs.pop('benchmark_sid', None)\n\n # A dictionary of capital changes, keyed by timestamp, indicating the\n # target/delta of the capital changes, along with values\n self.capital_changes = kwargs.pop('capital_changes', {})\n\n # A dictionary of the actual capital change deltas, keyed by timestamp\n self.capital_change_deltas = {}\n\n self.restrictions = NoRestrictions()\n\n def init_engine(self, get_loader):\n \"\"\"\n Construct and store a PipelineEngine from loader.\n\n If get_loader is None, constructs an ExplodingPipelineEngine\n \"\"\"\n if get_loader is not None:\n self.engine = SimplePipelineEngine(\n get_loader,\n self.trading_calendar.all_sessions,\n self.asset_finder,\n )\n else:\n self.engine = ExplodingPipelineEngine()\n\n def initialize(self, *args, **kwargs):\n \"\"\"\n Call self._initialize with `self` made available to Zipline API\n functions.\n \"\"\"\n with ZiplineAPI(self):\n self._initialize(self, *args, **kwargs)\n\n def before_trading_start(self, data):\n if self._before_trading_start is None:\n return\n\n self._in_before_trading_start = True\n\n with handle_non_market_minutes(data) if \\\n self.data_frequency == \"minute\" else ExitStack():\n self._before_trading_start(self, data)\n\n self._in_before_trading_start = False\n\n def handle_data(self, data):\n if self._handle_data:\n self._handle_data(self, data)\n\n def analyze(self, perf):\n if self._analyze is None:\n return\n\n with ZiplineAPI(self):\n self._analyze(self, perf)\n\n def __repr__(self):\n \"\"\"\n N.B. this does not yet represent a string that can be used\n to instantiate an exact copy of an algorithm.\n\n However, it is getting close, and provides some value as something\n that can be inspected interactively.\n \"\"\"\n return \"\"\"\n{class_name}(\n capital_base={capital_base}\n sim_params={sim_params},\n initialized={initialized},\n slippage_models={slippage_models},\n commission_models={commission_models},\n blotter={blotter},\n recorded_vars={recorded_vars})\n\"\"\".strip().format(class_name=self.__class__.__name__,\n capital_base=self.sim_params.capital_base,\n sim_params=repr(self.sim_params),\n initialized=self.initialized,\n slippage_models=repr(self.blotter.slippage_models),\n commission_models=repr(self.blotter.commission_models),\n blotter=repr(self.blotter),\n recorded_vars=repr(self.recorded_vars))\n\n def _create_clock(self):\n \"\"\"\n If the clock property is not set, then create one based on frequency.\n \"\"\"\n trading_o_and_c = self.trading_calendar.schedule.ix[\n self.sim_params.sessions]\n market_closes = trading_o_and_c['market_close']\n minutely_emission = False\n\n if self.sim_params.data_frequency == 'minute':\n market_opens = trading_o_and_c['market_open']\n\n minutely_emission = self.sim_params.emission_rate == \"minute\"\n else:\n # in daily mode, we want to have one bar per session, timestamped\n # as the last minute of the session.\n market_opens = market_closes\n\n # The calendar's execution times are the minutes over which we actually\n # want to run the clock. Typically the execution times simply adhere to\n # the market open and close times. In the case of the futures calendar,\n # for example, we only want to simulate over a subset of the full 24\n # hour calendar, so the execution times dictate a market open time of\n # 6:31am US/Eastern and a close of 5:00pm US/Eastern.\n execution_opens = \\\n self.trading_calendar.execution_time_from_open(market_opens)\n execution_closes = \\\n self.trading_calendar.execution_time_from_close(market_closes)\n\n # FIXME generalize these values\n before_trading_start_minutes = days_at_time(\n self.sim_params.sessions,\n time(8, 45),\n \"US/Eastern\"\n )\n\n return MinuteSimulationClock(\n self.sim_params.sessions,\n execution_opens,\n execution_closes,\n before_trading_start_minutes,\n minute_emission=minutely_emission,\n )\n\n def _create_benchmark_source(self):\n if self.benchmark_sid is not None:\n benchmark_asset = self.asset_finder.retrieve_asset(\n self.benchmark_sid)\n benchmark_returns = None\n else:\n benchmark_asset = None\n # get benchmark info from trading environment, which defaults to\n # downloading data from IEX Trading.\n benchmark_returns = self.trading_environment.benchmark_returns\n return BenchmarkSource(\n benchmark_asset=benchmark_asset,\n trading_calendar=self.trading_calendar,\n sessions=self.sim_params.sessions,\n data_portal=self.data_portal,\n emission_rate=self.sim_params.emission_rate,\n benchmark_returns=benchmark_returns,\n )\n\n def _create_metrics_tracker(self):\n return MetricsTracker(\n trading_calendar=self.trading_calendar,\n first_session=self.sim_params.start_session,\n last_session=self.sim_params.end_session,\n capital_base=self.sim_params.capital_base,\n emission_rate=self.sim_params.emission_rate,\n data_frequency=self.sim_params.data_frequency,\n asset_finder=self.asset_finder,\n metrics=self._metrics_set,\n )\n\n def _create_generator(self, sim_params):\n if sim_params is not None:\n self.sim_params = sim_params\n\n self.metrics_tracker = metrics_tracker = self._create_metrics_tracker()\n\n # Set the dt initially to the period start by forcing it to change.\n self.on_dt_changed(self.sim_params.start_session)\n\n if not self.initialized:\n self.initialize(*self.initialize_args, **self.initialize_kwargs)\n self.initialized = True\n\n benchmark_source = self._create_benchmark_source()\n\n self.trading_client = AlgorithmSimulator(\n self,\n sim_params,\n self.data_portal,\n self._create_clock(),\n benchmark_source,\n self.restrictions,\n universe_func=self._calculate_universe\n )\n\n metrics_tracker.handle_start_of_simulation(benchmark_source)\n return self.trading_client.transform()\n\n def _calculate_universe(self):\n # this exists to provide backwards compatibility for older,\n # deprecated APIs, particularly around the iterability of\n # BarData (ie, 'for sid in data`).\n\n # our universe is all the assets passed into `run`.\n return self._assets_from_source\n\n def get_generator(self):\n \"\"\"\n Override this method to add new logic to the construction\n of the generator. Overrides can use the _create_generator\n method to get a standard construction generator.\n \"\"\"\n return self._create_generator(self.sim_params)\n\n def run(self, data=None, overwrite_sim_params=True):\n \"\"\"Run the algorithm.\n\n :Arguments:\n source : DataPortal\n\n :Returns:\n daily_stats : pandas.DataFrame\n Daily performance metrics such as returns, alpha etc.\n\n \"\"\"\n self._assets_from_source = []\n\n if isinstance(data, DataPortal):\n self.data_portal = data\n\n # define the universe as all the assets in the assetfinder\n # This is not great, because multiple runs can accumulate assets\n # in the assetfinder, but it's better than spending time adding\n # functionality in the dataportal to report all the assets it\n # knows about.\n self._assets_from_source = \\\n self.trading_environment.asset_finder.retrieve_all(\n self.trading_environment.asset_finder.sids\n )\n\n else:\n if isinstance(data, pd.DataFrame):\n # If a DataFrame is passed. Promote it to a Panel.\n # The reader will fake volume values.\n data = pd.Panel({'close': data.copy()})\n data = data.swapaxes(0, 2)\n\n if isinstance(data, pd.Panel):\n # Guard against tz-naive index.\n if data.major_axis.tz is None:\n data.major_axis = data.major_axis.tz_localize('UTC')\n\n # For compatibility with existing examples allow start/end\n # to be inferred.\n if overwrite_sim_params:\n self.sim_params = self.sim_params.create_new(\n self.trading_calendar.minute_to_session_label(\n data.major_axis[0]\n ),\n self.trading_calendar.minute_to_session_label(\n data.major_axis[-1]\n ),\n )\n\n # Assume data is daily if timestamp times are\n # standardized, otherwise assume minute bars.\n times = data.major_axis.time\n if np.all(times == times[0]):\n self.sim_params.data_frequency = 'daily'\n else:\n self.sim_params.data_frequency = 'minute'\n\n copy_panel = data.rename(\n # These were the old names for the close/open columns. We\n # need to make a copy anyway, so swap these for backwards\n # compat while we're here.\n minor_axis={'close_price': 'close', 'open_price': 'open'},\n copy=True,\n )\n copy_panel.items = self._write_and_map_id_index_to_sids(\n copy_panel.items, copy_panel.major_axis[0],\n )\n self._assets_from_source = (\n self.asset_finder.retrieve_all(\n copy_panel.items\n )\n )\n\n equity_reader = PanelBarReader(\n self.trading_calendar,\n copy_panel,\n self.sim_params.data_frequency,\n )\n if self.sim_params.data_frequency == 'daily':\n equity_readers = {\n 'equity_daily_reader': equity_reader,\n }\n elif self.sim_params.data_frequency == 'minute':\n equity_readers = {\n 'equity_minute_reader': equity_reader,\n 'equity_daily_reader': PanelBarReader(\n self.trading_calendar,\n minute_panel_to_session_panel(\n copy_panel,\n self.trading_calendar,\n ),\n 'daily',\n ),\n }\n\n self.data_portal = DataPortal(\n self.asset_finder,\n self.trading_calendar,\n first_trading_day=equity_reader.first_trading_day,\n **equity_readers\n )\n\n # Force a reset of the metrics tracker, in case\n # this is a repeat run of the algorithm.\n self.metrics_tracker = None\n\n # Create zipline and loop through simulated_trading.\n # Each iteration returns a perf dictionary\n try:\n perfs = []\n for perf in self.get_generator():\n perfs.append(perf)\n\n # convert perf dict to pandas dataframe\n daily_stats = self._create_daily_stats(perfs)\n\n self.analyze(daily_stats)\n finally:\n self.data_portal = None\n self.metrics_tracker = None\n\n return daily_stats\n\n def _write_and_map_id_index_to_sids(self, identifiers, as_of_date):\n # Build new Assets for identifiers that can't be resolved as\n # sids/Assets\n def is_unknown(asset_or_sid):\n sid = op.index(asset_or_sid)\n return self.asset_finder.retrieve_asset(\n sid=sid,\n default_none=True\n ) is None\n\n new_assets = set()\n new_sids = set()\n new_symbols = set()\n for identifier in identifiers:\n if isinstance(identifier, Asset) and is_unknown(identifier):\n new_assets.add(identifier)\n elif isinstance(identifier, Integral) and is_unknown(identifier):\n new_sids.add(identifier)\n elif isinstance(identifier, (string_types)):\n new_symbols.add(identifier)\n else:\n try:\n new_sids.add(op.index(identifier))\n except TypeError:\n raise TypeError(\n \"Can't convert %s to an asset.\" % identifier\n )\n\n new_assets = tuple(new_assets)\n new_sids = tuple(new_sids)\n new_symbols = tuple(new_symbols)\n\n number_of_kinds_of_new_things = (\n sum((bool(new_assets), bool(new_sids), bool(new_symbols)))\n )\n\n # Nothing to insert, bail early.\n if not number_of_kinds_of_new_things:\n return self.asset_finder.map_identifier_index_to_sids(\n identifiers, as_of_date,\n )\n elif number_of_kinds_of_new_things == 1:\n warnings.warn(\n 'writing unknown identifiers into the assets db of the trading'\n ' environment is deprecated; please write this information'\n ' to the assets db before constructing the environment',\n DeprecationWarning,\n stacklevel=2,\n )\n else:\n raise ValueError(\n \"Mixed types in DataFrame or Panel index.\\n\"\n \"Asset Count: %d, Sid Count: %d, Symbol Count: %d.\\n\"\n \"Choose one type and stick with it.\" % (\n len(new_assets),\n len(new_sids),\n len(new_symbols),\n )\n )\n\n def map_getattr(iterable, attr):\n return [getattr(i, attr) for i in iterable]\n\n if new_assets:\n frame_to_write = pd.DataFrame(\n data=dict(\n symbol=map_getattr(new_assets, 'symbol'),\n start_date=map_getattr(new_assets, 'start_date'),\n end_date=map_getattr(new_assets, 'end_date'),\n exchange=map_getattr(new_assets, 'exchange'),\n ),\n index=map_getattr(new_assets, 'sid'),\n )\n elif new_sids:\n frame_to_write = make_simple_equity_info(\n new_sids,\n start_date=self.sim_params.start_session,\n end_date=self.sim_params.end_session,\n symbols=map(str, new_sids),\n )\n elif new_symbols:\n existing_sids = self.asset_finder.sids\n first_sid = max(existing_sids) + 1 if existing_sids else 0\n fake_sids = range(first_sid, first_sid + len(new_symbols))\n frame_to_write = make_simple_equity_info(\n sids=fake_sids,\n start_date=as_of_date,\n end_date=self.sim_params.end_session,\n symbols=new_symbols,\n )\n else:\n raise AssertionError(\"This should never happen.\")\n\n self.trading_environment.write_data(equities=frame_to_write)\n\n # We need to clear out any cache misses that were stored while trying\n # to do lookups. The real fix for this problem is to not construct an\n # AssetFinder until we `run()` when we actually have all the data we\n # need to so.\n self.asset_finder._reset_caches()\n\n return self.asset_finder.map_identifier_index_to_sids(\n identifiers, as_of_date,\n )\n\n def _create_daily_stats(self, perfs):\n # create daily and cumulative stats dataframe\n daily_perfs = []\n # TODO: the loop here could overwrite expected properties\n # of daily_perf. Could potentially raise or log a\n # warning.\n for perf in perfs:\n if 'daily_perf' in perf:\n\n perf['daily_perf'].update(\n perf['daily_perf'].pop('recorded_vars')\n )\n perf['daily_perf'].update(perf['cumulative_risk_metrics'])\n daily_perfs.append(perf['daily_perf'])\n else:\n self.risk_report = perf\n\n daily_dts = pd.DatetimeIndex(\n [p['period_close'] for p in daily_perfs], tz='UTC'\n )\n daily_stats = pd.DataFrame(daily_perfs, index=daily_dts)\n return daily_stats\n\n def calculate_capital_changes(self, dt, emission_rate, is_interday,\n portfolio_value_adjustment=0.0):\n \"\"\"\n If there is a capital change for a given dt, this means the the change\n occurs before `handle_data` on the given dt. In the case of the\n change being a target value, the change will be computed on the\n portfolio value according to prices at the given dt\n\n `portfolio_value_adjustment`, if specified, will be removed from the\n portfolio_value of the cumulative performance when calculating deltas\n from target capital changes.\n \"\"\"\n try:\n capital_change = self.capital_changes[dt]\n except KeyError:\n return\n\n self._sync_last_sale_prices()\n if capital_change['type'] == 'target':\n target = capital_change['value']\n capital_change_amount = (\n target -\n (\n self.portfolio.portfolio_value -\n portfolio_value_adjustment\n )\n )\n\n log.info('Processing capital change to target %s at %s. Capital '\n 'change delta is %s' % (target, dt,\n capital_change_amount))\n elif capital_change['type'] == 'delta':\n target = None\n capital_change_amount = capital_change['value']\n log.info('Processing capital change of delta %s at %s'\n % (capital_change_amount, dt))\n else:\n log.error(\"Capital change %s does not indicate a valid type \"\n \"('target' or 'delta')\" % capital_change)\n return\n\n self.capital_change_deltas.update({dt: capital_change_amount})\n self.metrics_tracker.capital_change(capital_change_amount)\n\n yield {\n 'capital_change':\n {'date': dt,\n 'type': 'cash',\n 'target': target,\n 'delta': capital_change_amount}\n }\n\n @api_method\n def get_environment(self, field='platform'):\n \"\"\"Query the execution environment.\n\n Parameters\n ----------\n field : {'platform', 'arena', 'data_frequency',\n 'start', 'end', 'capital_base', 'platform', '*'}\n The field to query. The options have the following meanings:\n arena : str\n The arena from the simulation parameters. This will normally\n be ``'backtest'`` but some systems may use this distinguish\n live trading from backtesting.\n data_frequency : {'daily', 'minute'}\n data_frequency tells the algorithm if it is running with\n daily data or minute data.\n start : datetime\n The start date for the simulation.\n end : datetime\n The end date for the simulation.\n capital_base : float\n The starting capital for the simulation.\n platform : str\n The platform that the code is running on. By default this\n will be the string 'zipline'. This can allow algorithms to\n know if they are running on the Quantopian platform instead.\n * : dict[str -> any]\n Returns all of the fields in a dictionary.\n\n Returns\n -------\n val : any\n The value for the field queried. See above for more information.\n\n Raises\n ------\n ValueError\n Raised when ``field`` is not a valid option.\n \"\"\"\n env = {\n 'arena': self.sim_params.arena,\n 'data_frequency': self.sim_params.data_frequency,\n 'start': self.sim_params.first_open,\n 'end': self.sim_params.last_close,\n 'capital_base': self.sim_params.capital_base,\n 'platform': self._platform\n }\n if field == '*':\n return env\n else:\n try:\n return env[field]\n except KeyError:\n raise ValueError(\n '%r is not a valid field for get_environment' % field,\n )\n\n @api_method\n def fetch_csv(self,\n url,\n pre_func=None,\n post_func=None,\n date_column='date',\n date_format=None,\n timezone=pytz.utc.zone,\n symbol=None,\n mask=True,\n symbol_column=None,\n special_params_checker=None,\n **kwargs):\n \"\"\"Fetch a csv from a remote url and register the data so that it is\n queryable from the ``data`` object.\n\n Parameters\n ----------\n url : str\n The url of the csv file to load.\n pre_func : callable[pd.DataFrame -> pd.DataFrame], optional\n A callback to allow preprocessing the raw data returned from\n fetch_csv before dates are paresed or symbols are mapped.\n post_func : callable[pd.DataFrame -> pd.DataFrame], optional\n A callback to allow postprocessing of the data after dates and\n symbols have been mapped.\n date_column : str, optional\n The name of the column in the preprocessed dataframe containing\n datetime information to map the data.\n date_format : str, optional\n The format of the dates in the ``date_column``. If not provided\n ``fetch_csv`` will attempt to infer the format. For information\n about the format of this string, see :func:`pandas.read_csv`.\n timezone : tzinfo or str, optional\n The timezone for the datetime in the ``date_column``.\n symbol : str, optional\n If the data is about a new asset or index then this string will\n be the name used to identify the values in ``data``. For example,\n one may use ``fetch_csv`` to load data for VIX, then this field\n could be the string ``'VIX'``.\n mask : bool, optional\n Drop any rows which cannot be symbol mapped.\n symbol_column : str\n If the data is attaching some new attribute to each asset then this\n argument is the name of the column in the preprocessed dataframe\n containing the symbols. This will be used along with the date\n information to map the sids in the asset finder.\n **kwargs\n Forwarded to :func:`pandas.read_csv`.\n\n Returns\n -------\n csv_data_source : zipline.sources.requests_csv.PandasRequestsCSV\n A requests source that will pull data from the url specified.\n \"\"\"\n\n # Show all the logs every time fetcher is used.\n csv_data_source = PandasRequestsCSV(\n url,\n pre_func,\n post_func,\n self.asset_finder,\n self.trading_calendar.day,\n self.sim_params.start_session,\n self.sim_params.end_session,\n date_column,\n date_format,\n timezone,\n symbol,\n mask,\n symbol_column,\n data_frequency=self.data_frequency,\n special_params_checker=special_params_checker,\n **kwargs\n )\n\n # ingest this into dataportal\n self.data_portal.handle_extra_source(csv_data_source.df,\n self.sim_params)\n\n return csv_data_source\n\n def add_event(self, rule, callback):\n \"\"\"Adds an event to the algorithm's EventManager.\n\n Parameters\n ----------\n rule : EventRule\n The rule for when the callback should be triggered.\n callback : callable[(context, data) -> None]\n The function to execute when the rule is triggered.\n \"\"\"\n self.event_manager.add_event(\n zipline.utils.events.Event(rule, callback),\n )\n\n @api_method\n def schedule_function(self,\n func,\n date_rule=None,\n time_rule=None,\n half_days=True,\n calendar=None):\n \"\"\"Schedules a function to be called according to some timed rules.\n\n Parameters\n ----------\n func : callable[(context, data) -> None]\n The function to execute when the rule is triggered.\n date_rule : EventRule, optional\n The rule for the dates to execute this function.\n time_rule : EventRule, optional\n The rule for the times to execute this function.\n half_days : bool, optional\n Should this rule fire on half days?\n calendar : Sentinel, optional\n Calendar used to reconcile date and time rules.\n\n See Also\n --------\n :class:`zipline.api.date_rules`\n :class:`zipline.api.time_rules`\n \"\"\"\n\n # When the user calls schedule_function(func, <time_rule>), assume that\n # the user meant to specify a time rule but no date rule, instead of\n # a date rule and no time rule as the signature suggests\n if isinstance(date_rule, (AfterOpen, BeforeClose)) and not time_rule:\n warnings.warn('Got a time rule for the second positional argument '\n 'date_rule. You should use keyword argument '\n 'time_rule= when calling schedule_function without '\n 'specifying a date_rule', stacklevel=3)\n\n date_rule = date_rule or date_rules.every_day()\n time_rule = ((time_rule or time_rules.every_minute())\n if self.sim_params.data_frequency == 'minute' else\n # If we are in daily mode the time_rule is ignored.\n time_rules.every_minute())\n\n # Check the type of the algorithm's schedule before pulling calendar\n # Note that the ExchangeTradingSchedule is currently the only\n # TradingSchedule class, so this is unlikely to be hit\n if calendar is None:\n cal = self.trading_calendar\n elif calendar is calendars.US_EQUITIES:\n cal = get_calendar('NYSE')\n elif calendar is calendars.US_FUTURES:\n cal = get_calendar('us_futures')\n else:\n raise ScheduleFunctionInvalidCalendar(\n given_calendar=calendar,\n allowed_calendars=(\n '[calendars.US_EQUITIES, calendars.US_FUTURES]'\n ),\n )\n\n self.add_event(\n make_eventrule(date_rule, time_rule, cal, half_days),\n func,\n )\n\n @api_method\n def record(self, *args, **kwargs):\n \"\"\"Track and record values each day.\n\n Parameters\n ----------\n **kwargs\n The names and values to record.\n\n Notes\n -----\n These values will appear in the performance packets and the performance\n dataframe passed to ``analyze`` and returned from\n :func:`~zipline.run_algorithm`.\n \"\"\"\n # Make 2 objects both referencing the same iterator\n args = [iter(args)] * 2\n\n # Zip generates list entries by calling `next` on each iterator it\n # receives. In this case the two iterators are the same object, so the\n # call to next on args[0] will also advance args[1], resulting in zip\n # returning (a,b) (c,d) (e,f) rather than (a,a) (b,b) (c,c) etc.\n positionals = zip(*args)\n for name, value in chain(positionals, iteritems(kwargs)):\n self._recorded_vars[name] = value\n\n @api_method\n def set_benchmark(self, benchmark):\n \"\"\"Set the benchmark asset.\n\n Parameters\n ----------\n benchmark : Asset\n The asset to set as the new benchmark.\n\n Notes\n -----\n Any dividends payed out for that new benchmark asset will be\n automatically reinvested.\n \"\"\"\n if self.initialized:\n raise SetBenchmarkOutsideInitialize()\n\n self.benchmark_sid = benchmark\n\n @api_method\n @preprocess(symbol_str=ensure_upper_case)\n def symbol(self, symbol_str):\n \"\"\"Lookup an Equity by its ticker symbol.\n\n Parameters\n ----------\n symbol_str : str\n The ticker symbol for the equity to lookup.\n\n Returns\n -------\n equity : Equity\n The equity that held the ticker symbol on the current\n symbol lookup date.\n\n Raises\n ------\n SymbolNotFound\n Raised when the symbols was not held on the current lookup date.\n\n See Also\n --------\n :func:`zipline.api.set_symbol_lookup_date`\n \"\"\"\n # If the user has not set the symbol lookup date,\n # use the end_session as the date for sybmol->sid resolution.\n _lookup_date = self._symbol_lookup_date \\\n if self._symbol_lookup_date is not None \\\n else self.sim_params.end_session\n\n return self.asset_finder.lookup_symbol(\n symbol_str,\n as_of_date=_lookup_date,\n )\n\n @api_method\n @preprocess(root_symbol_str=ensure_upper_case)\n def continuous_future(self,\n root_symbol_str,\n offset=0,\n roll='volume',\n adjustment='mul'):\n \"\"\"Create a specifier for a continuous contract.\n\n Parameters\n ----------\n root_symbol_str : str\n The root symbol for the future chain.\n\n offset : int, optional\n The distance from the primary contract. Default is 0.\n\n roll_style : str, optional\n How rolls are determined. Default is 'volume'.\n\n adjustment : str, optional\n Method for adjusting lookback prices between rolls. Options are\n 'mul', 'add', and None. Default is 'mul'.\n\n Returns\n -------\n continuous_future : ContinuousFuture\n The continuous future specifier.\n \"\"\"\n return self.asset_finder.create_continuous_future(\n root_symbol_str,\n offset,\n roll,\n adjustment,\n )\n\n @api_method\n def symbols(self, *args):\n \"\"\"Lookup multuple Equities as a list.\n\n Parameters\n ----------\n *args : iterable[str]\n The ticker symbols to lookup.\n\n Returns\n -------\n equities : list[Equity]\n The equities that held the given ticker symbols on the current\n symbol lookup date.\n\n Raises\n ------\n SymbolNotFound\n Raised when one of the symbols was not held on the current\n lookup date.\n\n See Also\n --------\n :func:`zipline.api.set_symbol_lookup_date`\n \"\"\"\n return [self.symbol(identifier) for identifier in args]\n\n @api_method\n def sid(self, sid):\n \"\"\"Lookup an Asset by its unique asset identifier.\n\n Parameters\n ----------\n sid : int\n The unique integer that identifies an asset.\n\n Returns\n -------\n asset : Asset\n The asset with the given ``sid``.\n\n Raises\n ------\n SidsNotFound\n When a requested ``sid`` does not map to any asset.\n \"\"\"\n return self.asset_finder.retrieve_asset(sid)\n\n @api_method\n @preprocess(symbol=ensure_upper_case)\n def future_symbol(self, symbol):\n \"\"\"Lookup a futures contract with a given symbol.\n\n Parameters\n ----------\n symbol : str\n The symbol of the desired contract.\n\n Returns\n -------\n future : Future\n The future that trades with the name ``symbol``.\n\n Raises\n ------\n SymbolNotFound\n Raised when no contract named 'symbol' is found.\n \"\"\"\n return self.asset_finder.lookup_future_symbol(symbol)\n\n def _calculate_order_value_amount(self, asset, value):\n \"\"\"\n Calculates how many shares/contracts to order based on the type of\n asset being ordered.\n \"\"\"\n # Make sure the asset exists, and that there is a last price for it.\n # FIXME: we should use BarData's can_trade logic here, but I haven't\n # yet found a good way to do that.\n normalized_date = normalize_date(self.datetime)\n\n if normalized_date < asset.start_date:\n raise CannotOrderDelistedAsset(\n msg=\"Cannot order {0}, as it started trading on\"\n \" {1}.\".format(asset.symbol, asset.start_date)\n )\n elif normalized_date > asset.end_date:\n raise CannotOrderDelistedAsset(\n msg=\"Cannot order {0}, as it stopped trading on\"\n \" {1}.\".format(asset.symbol, asset.end_date)\n )\n else:\n last_price = \\\n self.trading_client.current_data.current(asset, \"price\")\n\n if np.isnan(last_price):\n raise CannotOrderDelistedAsset(\n msg=\"Cannot order {0} on {1} as there is no last \"\n \"price for the security.\".format(asset.symbol,\n self.datetime)\n )\n\n if tolerant_equals(last_price, 0):\n zero_message = \"Price of 0 for {psid}; can't infer value\".format(\n psid=asset\n )\n if self.logger:\n self.logger.debug(zero_message)\n # Don't place any order\n return 0\n\n if isinstance(asset, Future):\n value_multiplier = asset.multiplier\n else:\n value_multiplier = 1\n\n return value / (last_price * value_multiplier)\n\n def _can_order_asset(self, asset):\n if not isinstance(asset, Asset):\n raise UnsupportedOrderParameters(\n msg=\"Passing non-Asset argument to 'order()' is not supported.\"\n \" Use 'sid()' or 'symbol()' methods to look up an Asset.\"\n )\n\n if asset.auto_close_date:\n day = normalize_date(self.get_datetime())\n\n if day > min(asset.end_date, asset.auto_close_date):\n # If we are after the asset's end date or auto close date, warn\n # the user that they can't place an order for this asset, and\n # return None.\n log.warn(\"Cannot place order for {0}, as it has de-listed. \"\n \"Any existing positions for this asset will be \"\n \"liquidated on \"\n \"{1}.\".format(asset.symbol, asset.auto_close_date))\n\n return False\n\n return True\n\n @api_method\n @disallowed_in_before_trading_start(OrderInBeforeTradingStart())\n def order(self,\n asset,\n amount,\n limit_price=None,\n stop_price=None,\n style=None):\n \"\"\"Place an order.\n\n Parameters\n ----------\n asset : Asset\n The asset that this order is for.\n amount : int\n The amount of shares to order. If ``amount`` is positive, this is\n the number of shares to buy or cover. If ``amount`` is negative,\n this is the number of shares to sell or short.\n limit_price : float, optional\n The limit price for the order.\n stop_price : float, optional\n The stop price for the order.\n style : ExecutionStyle, optional\n The execution style for the order.\n\n Returns\n -------\n order_id : str or None\n The unique identifier for this order, or None if no order was\n placed.\n\n Notes\n -----\n The ``limit_price`` and ``stop_price`` arguments provide shorthands for\n passing common execution styles. Passing ``limit_price=N`` is\n equivalent to ``style=LimitOrder(N)``. Similarly, passing\n ``stop_price=M`` is equivalent to ``style=StopOrder(M)``, and passing\n ``limit_price=N`` and ``stop_price=M`` is equivalent to\n ``style=StopLimitOrder(N, M)``. It is an error to pass both a ``style``\n and ``limit_price`` or ``stop_price``.\n\n See Also\n --------\n :class:`zipline.finance.execution.ExecutionStyle`\n :func:`zipline.api.order_value`\n :func:`zipline.api.order_percent`\n \"\"\"\n if not self._can_order_asset(asset):\n return None\n\n amount, style = self._calculate_order(asset, amount,\n limit_price, stop_price, style)\n return self.blotter.order(asset, amount, style)\n\n def _calculate_order(self, asset, amount,\n limit_price=None, stop_price=None, style=None):\n amount = self.round_order(amount)\n\n # Raises a ZiplineError if invalid parameters are detected.\n self.validate_order_params(asset,\n amount,\n limit_price,\n stop_price,\n style)\n\n # Convert deprecated limit_price and stop_price parameters to use\n # ExecutionStyle objects.\n style = self.__convert_order_params_for_blotter(limit_price,\n stop_price,\n style)\n return amount, style\n\n @staticmethod\n def round_order(amount):\n \"\"\"\n Convert number of shares to an integer.\n\n By default, truncates to the integer share count that's either within\n .0001 of amount or closer to zero.\n\n E.g. 3.9999 -> 4.0; 5.5 -> 5.0; -5.5 -> -5.0\n \"\"\"\n return int(round_if_near_integer(amount))\n\n def validate_order_params(self,\n asset,\n amount,\n limit_price,\n stop_price,\n style):\n \"\"\"\n Helper method for validating parameters to the order API function.\n\n Raises an UnsupportedOrderParameters if invalid arguments are found.\n \"\"\"\n\n if not self.initialized:\n raise OrderDuringInitialize(\n msg=\"order() can only be called from within handle_data()\"\n )\n\n if style:\n if limit_price:\n raise UnsupportedOrderParameters(\n msg=\"Passing both limit_price and style is not supported.\"\n )\n\n if stop_price:\n raise UnsupportedOrderParameters(\n msg=\"Passing both stop_price and style is not supported.\"\n )\n\n for control in self.trading_controls:\n control.validate(asset,\n amount,\n self.portfolio,\n self.get_datetime(),\n self.trading_client.current_data)\n\n @staticmethod\n def __convert_order_params_for_blotter(limit_price, stop_price, style):\n \"\"\"\n Helper method for converting deprecated limit_price and stop_price\n arguments into ExecutionStyle instances.\n\n This function assumes that either style == None or (limit_price,\n stop_price) == (None, None).\n \"\"\"\n if style:\n assert (limit_price, stop_price) == (None, None)\n return style\n if limit_price and stop_price:\n return StopLimitOrder(limit_price, stop_price)\n if limit_price:\n return LimitOrder(limit_price)\n if stop_price:\n return StopOrder(stop_price)\n else:\n return MarketOrder()\n\n @api_method\n @disallowed_in_before_trading_start(OrderInBeforeTradingStart())\n def order_value(self,\n asset,\n value,\n limit_price=None,\n stop_price=None,\n style=None):\n \"\"\"Place an order by desired value rather than desired number of\n shares.\n\n Parameters\n ----------\n asset : Asset\n The asset that this order is for.\n value : float\n If the requested asset exists, the requested value is\n divided by its price to imply the number of shares to transact.\n If the Asset being ordered is a Future, the 'value' calculated\n is actually the exposure, as Futures have no 'value'.\n\n value > 0 :: Buy/Cover\n value < 0 :: Sell/Short\n limit_price : float, optional\n The limit price for the order.\n stop_price : float, optional\n The stop price for the order.\n style : ExecutionStyle\n The execution style for the order.\n\n Returns\n -------\n order_id : str\n The unique identifier for this order.\n\n Notes\n -----\n See :func:`zipline.api.order` for more information about\n ``limit_price``, ``stop_price``, and ``style``\n\n See Also\n --------\n :class:`zipline.finance.execution.ExecutionStyle`\n :func:`zipline.api.order`\n :func:`zipline.api.order_percent`\n \"\"\"\n if not self._can_order_asset(asset):\n return None\n\n amount = self._calculate_order_value_amount(asset, value)\n return self.order(asset, amount,\n limit_price=limit_price,\n stop_price=stop_price,\n style=style)\n\n @property\n def recorded_vars(self):\n return copy(self._recorded_vars)\n\n def _sync_last_sale_prices(self, dt=None):\n \"\"\"Sync the last sale prices on the metrics tracker to a given\n datetime.\n\n Parameters\n ----------\n dt : datetime\n The time to sync the prices to.\n\n Notes\n -----\n This call is cached by the datetime. Repeated calls in the same bar\n are cheap.\n \"\"\"\n if dt is None:\n dt = self.datetime\n\n if dt != self._last_sync_time:\n self.metrics_tracker.sync_last_sale_prices(\n dt,\n self.data_portal,\n )\n self._last_sync_time = dt\n\n @property\n def portfolio(self):\n self._sync_last_sale_prices()\n return self.metrics_tracker.portfolio\n\n @property\n def account(self):\n self._sync_last_sale_prices()\n return self.metrics_tracker.account\n\n def set_logger(self, logger):\n self.logger = logger\n\n def on_dt_changed(self, dt):\n \"\"\"\n Callback triggered by the simulation loop whenever the current dt\n changes.\n\n Any logic that should happen exactly once at the start of each datetime\n group should happen here.\n \"\"\"\n self.datetime = dt\n self.blotter.set_date(dt)\n\n @api_method\n @preprocess(tz=coerce_string(pytz.timezone))\n @expect_types(tz=optional(tzinfo))\n def get_datetime(self, tz=None):\n \"\"\"\n Returns the current simulation datetime.\n\n Parameters\n ----------\n tz : tzinfo or str, optional\n The timezone to return the datetime in. This defaults to utc.\n\n Returns\n -------\n dt : datetime\n The current simulation datetime converted to ``tz``.\n \"\"\"\n dt = self.datetime\n assert dt.tzinfo == pytz.utc, \"Algorithm should have a utc datetime\"\n if tz is not None:\n dt = dt.astimezone(tz)\n return dt\n\n @api_method\n def set_slippage(self, us_equities=None, us_futures=None):\n \"\"\"Set the slippage models for the simulation.\n\n Parameters\n ----------\n us_equities : EquitySlippageModel\n The slippage model to use for trading US equities.\n us_futures : FutureSlippageModel\n The slippage model to use for trading US futures.\n\n See Also\n --------\n :class:`zipline.finance.slippage.SlippageModel`\n \"\"\"\n if self.initialized:\n raise SetSlippagePostInit()\n\n if us_equities is not None:\n if Equity not in us_equities.allowed_asset_types:\n raise IncompatibleSlippageModel(\n asset_type='equities',\n given_model=us_equities,\n supported_asset_types=us_equities.allowed_asset_types,\n )\n self.blotter.slippage_models[Equity] = us_equities\n\n if us_futures is not None:\n if Future not in us_futures.allowed_asset_types:\n raise IncompatibleSlippageModel(\n asset_type='futures',\n given_model=us_futures,\n supported_asset_types=us_futures.allowed_asset_types,\n )\n self.blotter.slippage_models[Future] = us_futures\n\n @api_method\n def set_commission(self, us_equities=None, us_futures=None):\n \"\"\"Sets the commission models for the simulation.\n\n Parameters\n ----------\n us_equities : EquityCommissionModel\n The commission model to use for trading US equities.\n us_futures : FutureCommissionModel\n The commission model to use for trading US futures.\n\n See Also\n --------\n :class:`zipline.finance.commission.PerShare`\n :class:`zipline.finance.commission.PerTrade`\n :class:`zipline.finance.commission.PerDollar`\n \"\"\"\n if self.initialized:\n raise SetCommissionPostInit()\n\n if us_equities is not None:\n if Equity not in us_equities.allowed_asset_types:\n raise IncompatibleCommissionModel(\n asset_type='equities',\n given_model=us_equities,\n supported_asset_types=us_equities.allowed_asset_types,\n )\n self.blotter.commission_models[Equity] = us_equities\n\n if us_futures is not None:\n if Future not in us_futures.allowed_asset_types:\n raise IncompatibleCommissionModel(\n asset_type='futures',\n given_model=us_futures,\n supported_asset_types=us_futures.allowed_asset_types,\n )\n self.blotter.commission_models[Future] = us_futures\n\n @api_method\n def set_cancel_policy(self, cancel_policy):\n \"\"\"Sets the order cancellation policy for the simulation.\n\n Parameters\n ----------\n cancel_policy : CancelPolicy\n The cancellation policy to use.\n\n See Also\n --------\n :class:`zipline.api.EODCancel`\n :class:`zipline.api.NeverCancel`\n \"\"\"\n if not isinstance(cancel_policy, CancelPolicy):\n raise UnsupportedCancelPolicy()\n\n if self.initialized:\n raise SetCancelPolicyPostInit()\n\n self.blotter.cancel_policy = cancel_policy\n\n @api_method\n def set_symbol_lookup_date(self, dt):\n \"\"\"Set the date for which symbols will be resolved to their assets\n (symbols may map to different firms or underlying assets at\n different times)\n\n Parameters\n ----------\n dt : datetime\n The new symbol lookup date.\n \"\"\"\n try:\n self._symbol_lookup_date = pd.Timestamp(dt, tz='UTC')\n except ValueError:\n raise UnsupportedDatetimeFormat(input=dt,\n method='set_symbol_lookup_date')\n\n # Remain backwards compatibility\n @property\n def data_frequency(self):\n return self.sim_params.data_frequency\n\n @data_frequency.setter\n def data_frequency(self, value):\n assert value in ('daily', 'minute')\n self.sim_params.data_frequency = value\n\n @api_method\n @disallowed_in_before_trading_start(OrderInBeforeTradingStart())\n def order_percent(self,\n asset,\n percent,\n limit_price=None,\n stop_price=None,\n style=None):\n \"\"\"Place an order in the specified asset corresponding to the given\n percent of the current portfolio value.\n\n Parameters\n ----------\n asset : Asset\n The asset that this order is for.\n percent : float\n The percentage of the portfolio value to allocate to ``asset``.\n This is specified as a decimal, for example: 0.50 means 50%.\n limit_price : float, optional\n The limit price for the order.\n stop_price : float, optional\n The stop price for the order.\n style : ExecutionStyle\n The execution style for the order.\n\n Returns\n -------\n order_id : str\n The unique identifier for this order.\n\n Notes\n -----\n See :func:`zipline.api.order` for more information about\n ``limit_price``, ``stop_price``, and ``style``\n\n See Also\n --------\n :class:`zipline.finance.execution.ExecutionStyle`\n :func:`zipline.api.order`\n :func:`zipline.api.order_value`\n \"\"\"\n if not self._can_order_asset(asset):\n return None\n\n amount = self._calculate_order_percent_amount(asset, percent)\n return self.order(asset, amount,\n limit_price=limit_price,\n stop_price=stop_price,\n style=style)\n\n def _calculate_order_percent_amount(self, asset, percent):\n value = self.portfolio.portfolio_value * percent\n return self._calculate_order_value_amount(asset, value)\n\n @api_method\n @disallowed_in_before_trading_start(OrderInBeforeTradingStart())\n def order_target(self,\n asset,\n target,\n limit_price=None,\n stop_price=None,\n style=None):\n \"\"\"Place an order to adjust a position to a target number of shares. If\n the position doesn't already exist, this is equivalent to placing a new\n order. If the position does exist, this is equivalent to placing an\n order for the difference between the target number of shares and the\n current number of shares.\n\n Parameters\n ----------\n asset : Asset\n The asset that this order is for.\n target : int\n The desired number of shares of ``asset``.\n limit_price : float, optional\n The limit price for the order.\n stop_price : float, optional\n The stop price for the order.\n style : ExecutionStyle\n The execution style for the order.\n\n Returns\n -------\n order_id : str\n The unique identifier for this order.\n\n\n Notes\n -----\n ``order_target`` does not take into account any open orders. For\n example:\n\n .. code-block:: python\n\n order_target(sid(0), 10)\n order_target(sid(0), 10)\n\n This code will result in 20 shares of ``sid(0)`` because the first\n call to ``order_target`` will not have been filled when the second\n ``order_target`` call is made.\n\n See :func:`zipline.api.order` for more information about\n ``limit_price``, ``stop_price``, and ``style``\n\n See Also\n --------\n :class:`zipline.finance.execution.ExecutionStyle`\n :func:`zipline.api.order`\n :func:`zipline.api.order_target_percent`\n :func:`zipline.api.order_target_value`\n \"\"\"\n if not self._can_order_asset(asset):\n return None\n\n amount = self._calculate_order_target_amount(asset, target)\n return self.order(asset, amount,\n limit_price=limit_price,\n stop_price=stop_price,\n style=style)\n\n def _calculate_order_target_amount(self, asset, target):\n if asset in self.portfolio.positions:\n current_position = self.portfolio.positions[asset].amount\n target -= current_position\n\n return target\n\n @api_method\n @disallowed_in_before_trading_start(OrderInBeforeTradingStart())\n def order_target_value(self,\n asset,\n target,\n limit_price=None,\n stop_price=None,\n style=None):\n \"\"\"Place an order to adjust a position to a target value. If\n the position doesn't already exist, this is equivalent to placing a new\n order. If the position does exist, this is equivalent to placing an\n order for the difference between the target value and the\n current value.\n If the Asset being ordered is a Future, the 'target value' calculated\n is actually the target exposure, as Futures have no 'value'.\n\n Parameters\n ----------\n asset : Asset\n The asset that this order is for.\n target : float\n The desired total value of ``asset``.\n limit_price : float, optional\n The limit price for the order.\n stop_price : float, optional\n The stop price for the order.\n style : ExecutionStyle\n The execution style for the order.\n\n Returns\n -------\n order_id : str\n The unique identifier for this order.\n\n Notes\n -----\n ``order_target_value`` does not take into account any open orders. For\n example:\n\n .. code-block:: python\n\n order_target_value(sid(0), 10)\n order_target_value(sid(0), 10)\n\n This code will result in 20 dollars of ``sid(0)`` because the first\n call to ``order_target_value`` will not have been filled when the\n second ``order_target_value`` call is made.\n\n See :func:`zipline.api.order` for more information about\n ``limit_price``, ``stop_price``, and ``style``\n\n See Also\n --------\n :class:`zipline.finance.execution.ExecutionStyle`\n :func:`zipline.api.order`\n :func:`zipline.api.order_target`\n :func:`zipline.api.order_target_percent`\n \"\"\"\n if not self._can_order_asset(asset):\n return None\n\n target_amount = self._calculate_order_value_amount(asset, target)\n amount = self._calculate_order_target_amount(asset, target_amount)\n return self.order(asset, amount,\n limit_price=limit_price,\n stop_price=stop_price,\n style=style)\n\n @api_method\n @disallowed_in_before_trading_start(OrderInBeforeTradingStart())\n def order_target_percent(self, asset, target,\n limit_price=None, stop_price=None, style=None):\n \"\"\"Place an order to adjust a position to a target percent of the\n current portfolio value. If the position doesn't already exist, this is\n equivalent to placing a new order. If the position does exist, this is\n equivalent to placing an order for the difference between the target\n percent and the current percent.\n\n Parameters\n ----------\n asset : Asset\n The asset that this order is for.\n target : float\n The desired percentage of the portfolio value to allocate to\n ``asset``. This is specified as a decimal, for example:\n 0.50 means 50%.\n limit_price : float, optional\n The limit price for the order.\n stop_price : float, optional\n The stop price for the order.\n style : ExecutionStyle\n The execution style for the order.\n\n Returns\n -------\n order_id : str\n The unique identifier for this order.\n\n Notes\n -----\n ``order_target_value`` does not take into account any open orders. For\n example:\n\n .. code-block:: python\n\n order_target_percent(sid(0), 10)\n order_target_percent(sid(0), 10)\n\n This code will result in 20% of the portfolio being allocated to sid(0)\n because the first call to ``order_target_percent`` will not have been\n filled when the second ``order_target_percent`` call is made.\n\n See :func:`zipline.api.order` for more information about\n ``limit_price``, ``stop_price``, and ``style``\n\n See Also\n --------\n :class:`zipline.finance.execution.ExecutionStyle`\n :func:`zipline.api.order`\n :func:`zipline.api.order_target`\n :func:`zipline.api.order_target_value`\n \"\"\"\n if not self._can_order_asset(asset):\n return None\n\n amount = self._calculate_order_target_percent_amount(asset, target)\n return self.order(asset, amount,\n limit_price=limit_price,\n stop_price=stop_price,\n style=style)\n\n def _calculate_order_target_percent_amount(self, asset, target):\n target_amount = self._calculate_order_percent_amount(asset, target)\n return self._calculate_order_target_amount(asset, target_amount)\n\n @api_method\n @expect_types(share_counts=pd.Series)\n @expect_dtypes(share_counts=int64_dtype)\n def batch_market_order(self, share_counts):\n \"\"\"Place a batch market order for multiple assets.\n\n Parameters\n ----------\n share_counts : pd.Series[Asset -> int]\n Map from asset to number of shares to order for that asset.\n\n Returns\n -------\n order_ids : pd.Index[str]\n Index of ids for newly-created orders.\n \"\"\"\n style = MarketOrder()\n order_args = [\n (asset, amount, style)\n for (asset, amount) in iteritems(share_counts)\n if amount\n ]\n return self.blotter.batch_order(order_args)\n\n @error_keywords(sid='Keyword argument `sid` is no longer supported for '\n 'get_open_orders. Use `asset` instead.')\n @api_method\n def get_open_orders(self, asset=None):\n \"\"\"Retrieve all of the current open orders.\n\n Parameters\n ----------\n asset : Asset\n If passed and not None, return only the open orders for the given\n asset instead of all open orders.\n\n Returns\n -------\n open_orders : dict[list[Order]] or list[Order]\n If no asset is passed this will return a dict mapping Assets\n to a list containing all the open orders for the asset.\n If an asset is passed then this will return a list of the open\n orders for this asset.\n \"\"\"\n if asset is None:\n return {\n key: [order.to_api_obj() for order in orders]\n for key, orders in iteritems(self.blotter.open_orders)\n if orders\n }\n if asset in self.blotter.open_orders:\n orders = self.blotter.open_orders[asset]\n return [order.to_api_obj() for order in orders]\n return []\n\n @api_method\n def get_order(self, order_id):\n \"\"\"Lookup an order based on the order id returned from one of the\n order functions.\n\n Parameters\n ----------\n order_id : str\n The unique identifier for the order.\n\n Returns\n -------\n order : Order\n The order object.\n \"\"\"\n if order_id in self.blotter.orders:\n return self.blotter.orders[order_id].to_api_obj()\n\n @api_method\n def cancel_order(self, order_param):\n \"\"\"Cancel an open order.\n\n Parameters\n ----------\n order_param : str or Order\n The order_id or order object to cancel.\n \"\"\"\n order_id = order_param\n if isinstance(order_param, zipline.protocol.Order):\n order_id = order_param.id\n\n self.blotter.cancel(order_id)\n\n @api_method\n @require_initialized(HistoryInInitialize())\n def history(self, bar_count, frequency, field, ffill=True):\n \"\"\"DEPRECATED: use ``data.history`` instead.\n \"\"\"\n warnings.warn(\n \"The `history` method is deprecated. Use `data.history` instead.\",\n category=ZiplineDeprecationWarning,\n stacklevel=4\n )\n\n return self.get_history_window(\n bar_count,\n frequency,\n self._calculate_universe(),\n field,\n ffill\n )\n\n def get_history_window(self, bar_count, frequency, assets, field, ffill):\n if not self._in_before_trading_start:\n return self.data_portal.get_history_window(\n assets,\n self.datetime,\n bar_count,\n frequency,\n field,\n self.data_frequency,\n ffill,\n )\n else:\n # If we are in before_trading_start, we need to get the window\n # as of the previous market minute\n adjusted_dt = \\\n self.trading_calendar.previous_minute(\n self.datetime\n )\n\n window = self.data_portal.get_history_window(\n assets,\n adjusted_dt,\n bar_count,\n frequency,\n field,\n self.data_frequency,\n ffill,\n )\n\n # Get the adjustments between the last market minute and the\n # current before_trading_start dt and apply to the window\n adjs = self.data_portal.get_adjustments(\n assets,\n field,\n adjusted_dt,\n self.datetime\n )\n window = window * adjs\n\n return window\n\n ####################\n # Account Controls #\n ####################\n\n def register_account_control(self, control):\n \"\"\"\n Register a new AccountControl to be checked on each bar.\n \"\"\"\n if self.initialized:\n raise RegisterAccountControlPostInit()\n self.account_controls.append(control)\n\n def validate_account_controls(self):\n for control in self.account_controls:\n control.validate(self.portfolio,\n self.account,\n self.get_datetime(),\n self.trading_client.current_data)\n\n @api_method\n def set_max_leverage(self, max_leverage):\n \"\"\"Set a limit on the maximum leverage of the algorithm.\n\n Parameters\n ----------\n max_leverage : float\n The maximum leverage for the algorithm. If not provided there will\n be no maximum.\n \"\"\"\n control = MaxLeverage(max_leverage)\n self.register_account_control(control)\n\n @api_method\n def set_min_leverage(self, min_leverage, grace_period):\n \"\"\"Set a limit on the minimum leverage of the algorithm.\n\n Parameters\n ----------\n min_leverage : float\n The minimum leverage for the algorithm.\n grace_period : pd.Timedelta\n The offset from the start date used to enforce a minimum leverage.\n \"\"\"\n deadline = self.sim_params.start_session + grace_period\n control = MinLeverage(min_leverage, deadline)\n self.register_account_control(control)\n\n ####################\n # Trading Controls #\n ####################\n\n def register_trading_control(self, control):\n \"\"\"\n Register a new TradingControl to be checked prior to order calls.\n \"\"\"\n if self.initialized:\n raise RegisterTradingControlPostInit()\n self.trading_controls.append(control)\n\n @api_method\n def set_max_position_size(self,\n asset=None,\n max_shares=None,\n max_notional=None,\n on_error='fail'):\n \"\"\"Set a limit on the number of shares and/or dollar value held for the\n given sid. Limits are treated as absolute values and are enforced at\n the time that the algo attempts to place an order for sid. This means\n that it's possible to end up with more than the max number of shares\n due to splits/dividends, and more than the max notional due to price\n improvement.\n\n If an algorithm attempts to place an order that would result in\n increasing the absolute value of shares/dollar value exceeding one of\n these limits, raise a TradingControlException.\n\n Parameters\n ----------\n asset : Asset, optional\n If provided, this sets the guard only on positions in the given\n asset.\n max_shares : int, optional\n The maximum number of shares to hold for an asset.\n max_notional : float, optional\n The maximum value to hold for an asset.\n \"\"\"\n control = MaxPositionSize(asset=asset,\n max_shares=max_shares,\n max_notional=max_notional,\n on_error=on_error)\n self.register_trading_control(control)\n\n @api_method\n def set_max_order_size(self,\n asset=None,\n max_shares=None,\n max_notional=None,\n on_error='fail'):\n \"\"\"Set a limit on the number of shares and/or dollar value of any single\n order placed for sid. Limits are treated as absolute values and are\n enforced at the time that the algo attempts to place an order for sid.\n\n If an algorithm attempts to place an order that would result in\n exceeding one of these limits, raise a TradingControlException.\n\n Parameters\n ----------\n asset : Asset, optional\n If provided, this sets the guard only on positions in the given\n asset.\n max_shares : int, optional\n The maximum number of shares that can be ordered at one time.\n max_notional : float, optional\n The maximum value that can be ordered at one time.\n \"\"\"\n control = MaxOrderSize(asset=asset,\n max_shares=max_shares,\n max_notional=max_notional,\n on_error=on_error)\n self.register_trading_control(control)\n\n @api_method\n def set_max_order_count(self, max_count, on_error='fail'):\n \"\"\"Set a limit on the number of orders that can be placed in a single\n day.\n\n Parameters\n ----------\n max_count : int\n The maximum number of orders that can be placed on any single day.\n \"\"\"\n control = MaxOrderCount(on_error, max_count)\n self.register_trading_control(control)\n\n @api_method\n def set_do_not_order_list(self, restricted_list, on_error='fail'):\n \"\"\"Set a restriction on which assets can be ordered.\n\n Parameters\n ----------\n restricted_list : container[Asset], SecurityList\n The assets that cannot be ordered.\n \"\"\"\n if isinstance(restricted_list, SecurityList):\n warnings.warn(\n \"`set_do_not_order_list(security_lists.leveraged_etf_list)` \"\n \"is deprecated. Use `set_asset_restrictions(\"\n \"security_lists.restrict_leveraged_etfs)` instead.\",\n category=ZiplineDeprecationWarning,\n stacklevel=2\n )\n restrictions = SecurityListRestrictions(restricted_list)\n else:\n warnings.warn(\n \"`set_do_not_order_list(container_of_assets)` is deprecated. \"\n \"Create a zipline.finance.asset_restrictions.\"\n \"StaticRestrictions object with a container of assets and use \"\n \"`set_asset_restrictions(StaticRestrictions(\"\n \"container_of_assets))` instead.\",\n category=ZiplineDeprecationWarning,\n stacklevel=2\n )\n restrictions = StaticRestrictions(restricted_list)\n\n self.set_asset_restrictions(restrictions, on_error)\n\n @api_method\n @expect_types(\n restrictions=Restrictions,\n on_error=str,\n )\n def set_asset_restrictions(self, restrictions, on_error='fail'):\n \"\"\"Set a restriction on which assets can be ordered.\n\n Parameters\n ----------\n restricted_list : Restrictions\n An object providing information about restricted assets.\n\n See Also\n --------\n zipline.finance.asset_restrictions.Restrictions\n \"\"\"\n control = RestrictedListOrder(on_error, restrictions)\n self.register_trading_control(control)\n self.restrictions |= restrictions\n\n @api_method\n def set_long_only(self, on_error='fail'):\n \"\"\"Set a rule specifying that this algorithm cannot take short\n positions.\n \"\"\"\n self.register_trading_control(LongOnly(on_error))\n\n ##############\n # Pipeline API\n ##############\n @api_method\n @require_not_initialized(AttachPipelineAfterInitialize())\n @expect_types(\n pipeline=Pipeline,\n name=string_types,\n chunks=(int, Iterable, type(None)),\n )\n def attach_pipeline(self, pipeline, name, chunks=None):\n \"\"\"Register a pipeline to be computed at the start of each day.\n\n Parameters\n ----------\n pipeline : Pipeline\n The pipeline to have computed.\n name : str\n The name of the pipeline.\n chunks : int or iterator, optional\n The number of days to compute pipeline results for. Increasing\n this number will make it longer to get the first results but\n may improve the total runtime of the simulation. If an iterator\n is passed, we will run in chunks based on values of the itereator.\n\n Returns\n -------\n pipeline : Pipeline\n Returns the pipeline that was attached unchanged.\n\n See Also\n --------\n :func:`zipline.api.pipeline_output`\n \"\"\"\n if chunks is None:\n # Make the first chunk smaller to get more immediate results:\n # (one week, then every half year)\n chunks = chain([5], repeat(126))\n elif isinstance(chunks, int):\n chunks = repeat(chunks)\n\n if name in self._pipelines:\n raise DuplicatePipelineName(name=name)\n\n self._pipelines[name] = pipeline, iter(chunks)\n\n # Return the pipeline to allow expressions like\n # p = attach_pipeline(Pipeline(), 'name')\n return pipeline\n\n @api_method\n @require_initialized(PipelineOutputDuringInitialize())\n def pipeline_output(self, name):\n \"\"\"Get the results of the pipeline that was attached with the name:\n ``name``.\n\n Parameters\n ----------\n name : str\n Name of the pipeline for which results are requested.\n\n Returns\n -------\n results : pd.DataFrame\n DataFrame containing the results of the requested pipeline for\n the current simulation date.\n\n Raises\n ------\n NoSuchPipeline\n Raised when no pipeline with the name `name` has been registered.\n\n See Also\n --------\n :func:`zipline.api.attach_pipeline`\n :meth:`zipline.pipeline.engine.PipelineEngine.run_pipeline`\n \"\"\"\n try:\n p, chunks = self._pipelines[name]\n except KeyError:\n raise NoSuchPipeline(\n name=name,\n valid=list(self._pipelines.keys()),\n )\n return self._pipeline_output(p, chunks, name)\n\n def _pipeline_output(self, pipeline, chunks, name):\n \"\"\"\n Internal implementation of `pipeline_output`.\n \"\"\"\n today = normalize_date(self.get_datetime())\n try:\n data = self._pipeline_cache.get(name, today)\n except KeyError:\n # Calculate the next block.\n data, valid_until = self._run_pipeline(\n pipeline, today, next(chunks),\n )\n self._pipeline_cache.set(name, data, valid_until)\n\n # Now that we have a cached result, try to return the data for today.\n try:\n return data.loc[today]\n except KeyError:\n # This happens if no assets passed the pipeline screen on a given\n # day.\n return pd.DataFrame(index=[], columns=data.columns)\n\n def _run_pipeline(self, pipeline, start_session, chunksize):\n \"\"\"\n Compute `pipeline`, providing values for at least `start_date`.\n\n Produces a DataFrame containing data for days between `start_date` and\n `end_date`, where `end_date` is defined by:\n\n `end_date = min(start_date + chunksize trading days,\n simulation_end)`\n\n Returns\n -------\n (data, valid_until) : tuple (pd.DataFrame, pd.Timestamp)\n\n See Also\n --------\n PipelineEngine.run_pipeline\n \"\"\"\n sessions = self.trading_calendar.all_sessions\n\n # Load data starting from the previous trading day...\n start_date_loc = sessions.get_loc(start_session)\n\n # ...continuing until either the day before the simulation end, or\n # until chunksize days of data have been loaded.\n sim_end_session = self.sim_params.end_session\n\n end_loc = min(\n start_date_loc + chunksize,\n sessions.get_loc(sim_end_session)\n )\n\n end_session = sessions[end_loc]\n\n return \\\n self.engine.run_pipeline(pipeline, start_session, end_session), \\\n end_session\n\n ##################\n # End Pipeline API\n ##################\n\n @classmethod\n def all_api_methods(cls):\n \"\"\"\n Return a list of all the TradingAlgorithm API methods.\n \"\"\"\n return [\n fn for fn in itervalues(vars(cls))\n if getattr(fn, 'is_api_method', False)\n ]\n"
] |
[
[
"numpy.isnan",
"pandas.DatetimeIndex",
"pandas.DataFrame",
"numpy.all",
"pandas._libs.tslib.normalize_date",
"pandas.Timestamp"
]
] |
ver0z/detect-waste
|
[
"7dbe029022d71e3f3643fc76d302fef684390d37"
] |
[
"efficientdet/train.py"
] |
[
"#!/usr/bin/env python\n\"\"\" EfficientDet Training Script\n\nThis script was started from an early version of the PyTorch ImageNet example\n(https://github.com/pytorch/examples/tree/master/imagenet)\n\nNVIDIA CUDA specific speedups adopted from NVIDIA Apex examples\n(https://github.com/NVIDIA/apex/tree/master/examples/imagenet)\n\nHacked together by Ross Wightman (https://github.com/rwightman)\n\"\"\"\nimport os\nimport argparse\nimport time\nimport yaml\nimport logging\nfrom collections import OrderedDict\nfrom contextlib import suppress\nfrom datetime import datetime\n\nimport torch\nimport torchvision.utils\nfrom torch.nn.parallel import DistributedDataParallel as NativeDDP\ntry:\n from apex import amp\n from apex.parallel import DistributedDataParallel as ApexDDP\n from apex.parallel import convert_syncbn_model\n has_apex = True\nexcept ImportError:\n has_apex = False\n\nhas_native_amp = False\ntry:\n if getattr(torch.cuda.amp, 'autocast') is not None:\n has_native_amp = True\nexcept AttributeError:\n pass\n\nfrom effdet import create_model, unwrap_bench, create_loader, create_dataset, create_evaluator\nfrom effdet.data import resolve_input_config, SkipSubset\nfrom effdet.anchors import Anchors, AnchorLabeler\nfrom timm.models import resume_checkpoint, load_checkpoint\nfrom timm.utils import *\nfrom timm.optim import create_optimizer\nfrom timm.scheduler import create_scheduler\n\ntorch.backends.cudnn.benchmark = True\n\n\n# The first arg parser parses out only the --config argument, this argument is used to\n# load a yaml file containing key-values that override the defaults for the main parser below\nconfig_parser = parser = argparse.ArgumentParser(description='Training Config', add_help=False)\nparser.add_argument('-c', '--config', default='', type=str, metavar='FILE',\n help='YAML config file specifying default arguments')\n\nparser = argparse.ArgumentParser(description='PyTorch ImageNet Training')\n# Dataset / Model parameters\nparser.add_argument('root', metavar='DIR',\n default='/dih4/dih4_2/wimlds/data/all_detect_images',\n help='path to dataset')\nparser.add_argument('--ann_name', type=str,\n default='../annotations/binary_mixed_',\n help='path to annotation file (without train or test subset)')\nparser.add_argument('--dataset', default='coco', type=str, metavar='DATASET',\n help='Name of model to train (default: \"coco\"')\nparser.add_argument('--model', default='tf_efficientdet_d2', type=str, metavar='MODEL',\n help='Name of model to train (default: \"tf_efficientdet_d2\"')\nadd_bool_arg(parser, 'redundant-bias', default=None, help='override model config for redundant bias')\nparser.set_defaults(redundant_bias=None)\nparser.add_argument('--val-skip', type=int, default=0, metavar='N',\n help='Skip every N validation samples.')\nparser.add_argument('--num-classes', type=int, default=None, metavar='N',\n help='Override num_classes in model config if set. For fine-tuning from pretrained.')\nparser.add_argument('--pretrained', action='store_true', default=False,\n help='Start with pretrained version of specified network (if avail)')\nparser.add_argument('--no-pretrained-backbone', action='store_true', default=False,\n help='Do not start with pretrained backbone weights, fully random.')\nparser.add_argument('--initial-checkpoint', default='', type=str, metavar='PATH',\n help='Initialize model from this checkpoint (default: none)')\nparser.add_argument('--resume', default='', type=str, metavar='PATH',\n help='Resume full model and optimizer state from checkpoint (default: none)')\nparser.add_argument('--no-resume-opt', action='store_true', default=False,\n help='prevent resume of optimizer state when resuming model')\nparser.add_argument('--mean', type=float, nargs='+', default=None, metavar='MEAN',\n help='Override mean pixel value of dataset')\nparser.add_argument('--std', type=float, nargs='+', default=None, metavar='STD',\n help='Override std deviation of of dataset')\nparser.add_argument('--interpolation', default='', type=str, metavar='NAME',\n help='Image resize interpolation type (overrides model)')\nparser.add_argument('--fill-color', default=None, type=str, metavar='NAME',\n help='Image augmentation fill (background) color (\"mean\" or int)')\nparser.add_argument('-b', '--batch-size', type=int, default=32, metavar='N',\n help='input batch size for training (default: 32)')\nparser.add_argument('--clip-grad', type=float, default=10.0, metavar='NORM',\n help='Clip gradient norm (default: 10.0)')\n\n# Optimizer parameters\nparser.add_argument('--opt', default='momentum', type=str, metavar='OPTIMIZER',\n help='Optimizer (default: \"momentum\"')\nparser.add_argument('--opt-eps', default=1e-3, type=float, metavar='EPSILON',\n help='Optimizer Epsilon (default: 1e-3)')\nparser.add_argument('--momentum', type=float, default=0.9, metavar='M',\n help='SGD momentum (default: 0.9)')\nparser.add_argument('--weight-decay', type=float, default=4e-5,\n help='weight decay (default: 0.00004)')\n\n# Learning rate schedule parameters\nparser.add_argument('--sched', default='cosine', type=str, metavar='SCHEDULER',\n help='LR scheduler (default: \"step\"')\nparser.add_argument('--lr', type=float, default=0.01, metavar='LR',\n help='learning rate (default: 0.01)')\nparser.add_argument('--lr-noise', type=float, nargs='+', default=None, metavar='pct, pct',\n help='learning rate noise on/off epoch percentages')\nparser.add_argument('--lr-noise-pct', type=float, default=0.67, metavar='PERCENT',\n help='learning rate noise limit percent (default: 0.67)')\nparser.add_argument('--lr-noise-std', type=float, default=1.0, metavar='STDDEV',\n help='learning rate noise std-dev (default: 1.0)')\nparser.add_argument('--lr-cycle-mul', type=float, default=1.0, metavar='MULT',\n help='learning rate cycle len multiplier (default: 1.0)')\nparser.add_argument('--lr-cycle-limit', type=int, default=1, metavar='N',\n help='learning rate cycle limit')\nparser.add_argument('--warmup-lr', type=float, default=0.0001, metavar='LR',\n help='warmup learning rate (default: 0.0001)')\nparser.add_argument('--min-lr', type=float, default=1e-5, metavar='LR',\n help='lower lr bound for cyclic schedulers that hit 0 (1e-5)')\nparser.add_argument('--epochs', type=int, default=300, metavar='N',\n help='number of epochs to train (default: 2)')\nparser.add_argument('--start-epoch', default=None, type=int, metavar='N',\n help='manual epoch number (useful on restarts)')\nparser.add_argument('--decay-epochs', type=float, default=30, metavar='N',\n help='epoch interval to decay LR')\nparser.add_argument('--warmup-epochs', type=int, default=5, metavar='N',\n help='epochs to warmup LR, if scheduler supports')\nparser.add_argument('--cooldown-epochs', type=int, default=10, metavar='N',\n help='epochs to cooldown LR at min_lr, after cyclic schedule ends')\nparser.add_argument('--patience-epochs', type=int, default=10, metavar='N',\n help='patience epochs for Plateau LR scheduler (default: 10')\nparser.add_argument('--decay-rate', '--dr', type=float, default=0.1, metavar='RATE',\n help='LR decay rate (default: 0.1)')\n\n# Augmentation parameters\nparser.add_argument('--color-jitter', type=float, default=0.4, metavar='PCT',\n help='Color jitter factor (default: 0.4)')\nparser.add_argument('--aa', type=str, default=None, metavar='NAME',\n help='Use AutoAugment policy. \"v0\" or \"original\". (default: None)'),\nparser.add_argument('--reprob', type=float, default=0., metavar='PCT',\n help='Random erase prob (default: 0.)')\nparser.add_argument('--remode', type=str, default='pixel',\n help='Random erase mode (default: \"pixel\")')\nparser.add_argument('--recount', type=int, default=1,\n help='Random erase count (default: 1)')\nparser.add_argument('--train-interpolation', type=str, default='random',\n help='Training interpolation (random, bilinear, bicubic default: \"random\")')\n\n# loss\nparser.add_argument('--smoothing', type=float, default=None, help='override model config label smoothing')\nadd_bool_arg(parser, 'jit-loss', default=None, help='override model config for torchscript jit loss fn')\nadd_bool_arg(parser, 'new-focal', default=None, help='override model config to use legacy focal loss')\n\n# Model Exponential Moving Average\nparser.add_argument('--model-ema', action='store_true', default=False,\n help='Enable tracking moving average of model weights')\nparser.add_argument('--model-ema-decay', type=float, default=0.9998,\n help='decay factor for model weights moving average (default: 0.9998)')\n\n# Misc\nparser.add_argument('--sync-bn', action='store_true',\n help='Enable NVIDIA Apex or Torch synchronized BatchNorm.')\nparser.add_argument('--dist-bn', type=str, default='',\n help='Distribute BatchNorm stats between nodes after each epoch (\"broadcast\", \"reduce\", or \"\")')\nparser.add_argument('--seed', type=int, default=42, metavar='S',\n help='random seed (default: 42)')\nparser.add_argument('--log-interval', type=int, default=50, metavar='N',\n help='how many batches to wait before logging training status')\nparser.add_argument('--recovery-interval', type=int, default=0, metavar='N',\n help='how many batches to wait before writing recovery checkpoint')\nparser.add_argument('-j', '--workers', type=int, default=4, metavar='N',\n help='how many training processes to use (default: 1)')\nparser.add_argument('--save-images', action='store_true', default=False,\n help='save images of input bathes every log interval for debugging')\nparser.add_argument('--amp', action='store_true', default=False,\n help='use NVIDIA Apex AMP or Native AMP for mixed precision training')\nparser.add_argument('--apex-amp', action='store_true', default=False,\n help='Use NVIDIA Apex AMP mixed precision')\nparser.add_argument('--native-amp', action='store_true', default=False,\n help='Use Native Torch AMP mixed precision')\nparser.add_argument('--channels-last', action='store_true', default=False,\n help='Use channels_last memory layout')\nparser.add_argument('--pin-mem', action='store_true', default=False,\n help='Pin CPU memory in DataLoader for more efficient (sometimes) transfer to GPU.')\nparser.add_argument('--no-prefetcher', action='store_true', default=False,\n help='disable fast prefetcher')\nadd_bool_arg(parser, 'bench-labeler', default=False,\n help='label targets in model bench, increases GPU load at expense of loader processes')\nparser.add_argument('--output', default='', type=str, metavar='PATH',\n help='path to output folder (default: none, current dir)')\nparser.add_argument('--device', default='cuda:7', type=str,\n help='device to train (default: cuda:7)')\nparser.add_argument('--eval-metric', default='map', type=str, metavar='EVAL_METRIC',\n help='Best metric (default: \"map\"')\nparser.add_argument('--tta', type=int, default=0, metavar='N',\n help='Test/inference time augmentation (oversampling) factor. 0=None (default: 0)')\nparser.add_argument(\"--local_rank\", default=0, type=int)\n# Neptune settings\nparser.add_argument('--neptune', action='store_true', default=False,\n help='Launch experiment on neptune (if avail)')\n\n\ndef _parse_args():\n # Do we have a config file to parse?\n args_config, remaining = config_parser.parse_known_args()\n if args_config.config:\n with open(args_config.config, 'r') as f:\n cfg = yaml.safe_load(f)\n parser.set_defaults(**cfg)\n\n # The main arg parser parses the rest of the args, the usual\n # defaults will have been overridden if config file specified.\n args = parser.parse_args(remaining)\n\n # Cache the args as a text string to save them in the output dir later\n args_text = yaml.safe_dump(args.__dict__, default_flow_style=False)\n return args, args_text\n\n\ndef main():\n setup_default_logging()\n\n args, args_text = _parse_args()\n\n if args.neptune:\n import neptune\n # your NEPTUNE_API_TOKEN should be add to ~./bashrc to run this file\n neptune.init(project_qualified_name='detectwaste/efficientdet')\n neptune.create_experiment(name=args.model)\n else:\n neptune = None\n\n args.pretrained_backbone = not args.no_pretrained_backbone\n args.prefetcher = not args.no_prefetcher\n args.distributed = False\n if 'WORLD_SIZE' in os.environ:\n args.distributed = int(os.environ['WORLD_SIZE']) > 1\n args.world_size = 4\n args.rank = 4 # global rank\n args.GPUs = [4, 5, 6, 7]\n torch.cuda.empty_cache()\n torch.cuda.set_device(args.device)\n if args.distributed:\n args.device = 'cuda:%d' % args.GPUs[args.local_rank] \n torch.cuda.set_device(args.device)\n print('Using CUDA:', args.device )\n torch.distributed.init_process_group(backend='nccl', init_method='env://')\n #args.world_size = torch.distributed.get_world_size()\n #args.rank = torch.distributed.get_rank()\n assert args.rank >= 0\n\n if args.distributed:\n logging.info('Training in distributed mode with multiple processes, 1 GPU per process. Process %d, total %d.'\n % (args.rank, args.world_size))\n else:\n logging.info('Training with a single process on 1 GPU.')\n\n use_amp = None\n if args.amp:\n # for backwards compat, `--amp` arg tries apex before native amp\n if has_apex:\n args.apex_amp = True\n elif has_native_amp:\n args.native_amp = True\n else:\n logging.warning(\"Neither APEX or native Torch AMP is available, using float32. \"\n \"Install NVIDA apex or upgrade to PyTorch 1.6.\")\n\n if args.apex_amp:\n if has_apex:\n use_amp = 'apex'\n else:\n logging.warning(\"APEX AMP not available, using float32. Install NVIDA apex\")\n elif args.native_amp:\n if has_native_amp:\n use_amp = 'native'\n else:\n logging.warning(\"Native AMP not available, using float32. Upgrade to PyTorch 1.6.\")\n\n torch.manual_seed(args.seed + args.rank)\n\n model = create_model(\n args.model,\n bench_task='train',\n num_classes=args.num_classes,\n pretrained=args.pretrained,\n pretrained_backbone=args.pretrained_backbone,\n redundant_bias=args.redundant_bias,\n label_smoothing=args.smoothing,\n new_focal=args.new_focal,\n jit_loss=args.jit_loss,\n bench_labeler=args.bench_labeler,\n checkpoint_path=args.initial_checkpoint,\n )\n model_config = model.config # grab before we obscure with DP/DDP wrappers\n\n if args.local_rank == 0:\n logging.info('Model %s created, param count: %d' % (args.model, sum([m.numel() for m in model.parameters()])))\n\n model.to(args.device)\n if args.channels_last:\n model = model.to(memory_format=torch.channels_last)\n\n optimizer = create_optimizer(args, model)\n\n amp_autocast = suppress # do nothing\n loss_scaler = None\n if use_amp == 'apex':\n model, optimizer = amp.initialize(model, optimizer, opt_level='O1')\n loss_scaler = ApexScaler()\n if args.local_rank == 0:\n logging.info('Using NVIDIA APEX AMP. Training in mixed precision.')\n elif use_amp == 'native':\n amp_autocast = torch.cuda.amp.autocast.to(args.device)\n loss_scaler = NativeScaler()\n if args.local_rank == 0:\n logging.info('Using native Torch AMP. Training in mixed precision.')\n else:\n if args.local_rank == 0:\n logging.info('AMP not enabled. Training in float32.')\n\n # optionally resume from a checkpoint\n resume_epoch = None\n if args.resume:\n resume_epoch = resume_checkpoint(\n unwrap_bench(model), args.resume,\n optimizer=None if args.no_resume_opt else optimizer,\n loss_scaler=None if args.no_resume_opt else loss_scaler,\n log_info=args.local_rank == 0)\n\n model_ema = None\n if args.model_ema:\n # Important to create EMA model after cuda(), DP wrapper, and AMP but before SyncBN and DDP wrapper\n model_ema = ModelEma(model, decay=args.model_ema_decay)\n if args.resume:\n # FIXME bit of a mess with bench, cannot use the load in ModelEma\n load_checkpoint(unwrap_bench(model_ema), args.resume, use_ema=True)\n\n if args.distributed:\n if args.sync_bn:\n try:\n if has_apex and use_amp != 'native':\n model = convert_syncbn_model(model)\n else:\n model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(model)\n if args.local_rank == 0:\n logging.info(\n 'Converted model to use Synchronized BatchNorm. WARNING: You may have issues if using '\n 'zero initialized BN layers (enabled by default for ResNets) while sync-bn enabled.')\n except Exception as e:\n logging.error('Failed to enable Synchronized BatchNorm. Install Apex or Torch >= 1.1')\n if has_apex and use_amp != 'native':\n if args.local_rank == 0:\n logging.info(\"Using apex DistributedDataParallel.\")\n model = ApexDDP(model, delay_allreduce=True)\n else:\n if args.local_rank == 0:\n logging.info(\"Using torch DistributedDataParallel.\")\n model = NativeDDP(model, device_ids=[args.device])\n # NOTE: EMA model does not need to be wrapped by DDP\n\n lr_scheduler, num_epochs = create_scheduler(args, optimizer)\n start_epoch = 0\n if args.start_epoch is not None:\n # a specified start_epoch will always override the resume epoch\n start_epoch = args.start_epoch\n elif resume_epoch is not None:\n start_epoch = resume_epoch\n if lr_scheduler is not None and start_epoch > 0:\n lr_scheduler.step(start_epoch)\n\n if args.local_rank == 0:\n logging.info('Scheduled epochs: {}'.format(num_epochs))\n\n loader_train, loader_eval, evaluator = create_datasets_and_loaders(\n args,\n model_config,\n neptune)\n\n if model_config.num_classes < loader_train.dataset.parser.max_label:\n logging.error(\n f'Model {model_config.num_classes} has fewer classes than dataset {loader_train.dataset.parser.max_label}.')\n exit(1)\n if model_config.num_classes > loader_train.dataset.parser.max_label:\n logging.warning(\n f'Model {model_config.num_classes} has more classes than dataset {loader_train.dataset.parser.max_label}.')\n\n eval_metric = args.eval_metric\n best_metric = None\n best_epoch = None\n saver = None\n output_dir = ''\n if args.local_rank == 0:\n output_base = args.output if args.output else './output'\n exp_name = '-'.join([\n datetime.now().strftime(\"%Y%m%d-%H%M%S\"),\n args.model\n ])\n output_dir = get_outdir(output_base, 'train', exp_name)\n decreasing = True if eval_metric == 'loss' else False\n saver = CheckpointSaver(\n model, optimizer, args=args, model_ema=model_ema, amp_scaler=loss_scaler,\n checkpoint_dir=output_dir, decreasing=decreasing, unwrap_fn=unwrap_bench)\n with open(os.path.join(output_dir, 'args.yaml'), 'w') as f:\n f.write(args_text)\n\n # training loop\n try:\n for epoch in range(start_epoch, num_epochs):\n if args.distributed:\n loader_train.sampler.set_epoch(epoch)\n\n train_metrics = train_epoch(\n epoch, model, loader_train, optimizer, args,\n lr_scheduler=lr_scheduler, saver=saver, output_dir=output_dir,\n amp_autocast=amp_autocast, loss_scaler=loss_scaler, model_ema=model_ema,\n neptune=neptune)\n\n if args.distributed and args.dist_bn in ('broadcast', 'reduce'):\n if args.local_rank == 0:\n logging.info(\"Distributing BatchNorm running means and vars\")\n distribute_bn(model, args.world_size, args.dist_bn == 'reduce')\n\n # the overhead of evaluating with coco style datasets is fairly high, so just ema or non, not both\n if model_ema is not None:\n if args.distributed and args.dist_bn in ('broadcast', 'reduce'):\n distribute_bn(model_ema, args.world_size, args.dist_bn == 'reduce')\n\n eval_metrics = validate(model_ema.ema, loader_eval, args,\n evaluator, log_suffix=' (EMA)',\n neptune=neptune)\n else:\n eval_metrics = validate(model, loader_eval, args, evaluator,\n neptune=neptune)\n\n if args.neptune:\n neptune.log_metric('valid/mAP',eval_metrics[eval_metric])\n\n\n if lr_scheduler is not None:\n # step LR for next epoch\n lr_scheduler.step(epoch + 1, eval_metrics[eval_metric])\n\n if saver is not None:\n update_summary(\n epoch, train_metrics, eval_metrics, os.path.join(output_dir, 'summary.csv'),\n write_header=best_metric is None)\n\n # save proper checkpoint with eval metric\n best_metric, best_epoch = saver.save_checkpoint(epoch=epoch, metric=eval_metrics[eval_metric])\n\n except KeyboardInterrupt:\n pass\n if best_metric is not None:\n logging.info('*** Best metric: {0} (epoch {1})'.format(best_metric, best_epoch))\n\n\ndef create_datasets_and_loaders(args, model_config, neptune=None):\n input_config = resolve_input_config(args, model_config=model_config)\n\n dataset_train, dataset_eval = create_dataset(args.dataset, args.root, args.ann_name)\n\n # setup labeler in loader/collate_fn if not enabled in the model bench\n labeler = None\n if not args.bench_labeler:\n labeler = AnchorLabeler(\n Anchors.from_config(model_config), model_config.num_classes, match_threshold=0.5)\n\n loader_train = create_loader(\n dataset_train,\n input_size=input_config['input_size'],\n batch_size=args.batch_size,\n is_training=True,\n use_prefetcher=args.prefetcher,\n re_prob=args.reprob,\n re_mode=args.remode,\n re_count=args.recount,\n # color_jitter=args.color_jitter,\n # auto_augment=args.aa,\n interpolation=args.train_interpolation or input_config['interpolation'],\n fill_color=input_config['fill_color'],\n mean=input_config['mean'],\n std=input_config['std'],\n num_workers=args.workers,\n distributed=args.distributed,\n pin_mem=args.pin_mem,\n anchor_labeler=labeler,\n )\n\n if args.val_skip > 1:\n dataset_eval = SkipSubset(dataset_eval, args.val_skip)\n loader_eval = create_loader(\n dataset_eval,\n input_size=input_config['input_size'],\n batch_size=args.batch_size,\n is_training=False,\n use_prefetcher=args.prefetcher,\n interpolation=input_config['interpolation'],\n fill_color=input_config['fill_color'],\n mean=input_config['mean'],\n std=input_config['std'],\n num_workers=args.workers,\n distributed=args.distributed,\n pin_mem=args.pin_mem,\n anchor_labeler=labeler,\n )\n\n evaluator = create_evaluator(args.dataset, loader_eval.dataset, neptune,\n distributed=args.distributed, pred_yxyx=False)\n\n return loader_train, loader_eval, evaluator\n\n\ndef train_epoch(\n epoch, model, loader, optimizer, args,\n lr_scheduler=None, saver=None, output_dir='', \n amp_autocast=suppress, loss_scaler=None, model_ema=None,\n neptune=None):\n\n batch_time_m = AverageMeter()\n data_time_m = AverageMeter()\n losses_m = AverageMeter()\n\n model.train()\n\n end = time.time()\n last_idx = len(loader) - 1\n num_updates = epoch * len(loader)\n for batch_idx, (input, target) in enumerate(loader):\n last_batch = batch_idx == last_idx\n data_time_m.update(time.time() - end)\n\n if args.channels_last:\n input = input.contiguous(memory_format=torch.channels_last)\n\n with amp_autocast():\n output = model(input, target)\n loss = output['loss']\n if args.neptune:\n neptune.log_metric('train/loss', loss.item())\n \n if not args.distributed:\n losses_m.update(loss.item(), input.size(0))\n\n optimizer.zero_grad()\n if loss_scaler is not None:\n loss_scaler(loss, optimizer, clip_grad=args.clip_grad, parameters=model.parameters())\n else:\n loss.backward()\n if args.clip_grad:\n torch.nn.utils.clip_grad_norm_(model.parameters(), args.clip_grad)\n optimizer.step()\n\n torch.cuda.synchronize()\n if model_ema is not None:\n model_ema.update(model)\n num_updates += 1\n\n batch_time_m.update(time.time() - end)\n if last_batch or batch_idx % args.log_interval == 0:\n lrl = [param_group['lr'] for param_group in optimizer.param_groups]\n lr = sum(lrl) / len(lrl)\n\n if args.distributed:\n reduced_loss = reduce_tensor(loss.data, args.world_size)\n losses_m.update(reduced_loss.item(), input.size(0))\n\n if args.local_rank == 0:\n logging.info(\n 'Train: {} [{:>4d}/{} ({:>3.0f}%)] '\n 'Loss: {loss.val:>9.6f} ({loss.avg:>6.4f}) '\n 'Time: {batch_time.val:.3f}s, {rate:>7.2f}/s '\n '({batch_time.avg:.3f}s, {rate_avg:>7.2f}/s) '\n 'LR: {lr:.3e} '\n 'Data: {data_time.val:.3f} ({data_time.avg:.3f})'.format(\n epoch,\n batch_idx, len(loader),\n 100. * batch_idx / last_idx,\n loss=losses_m,\n batch_time=batch_time_m,\n rate=input.size(0) * args.world_size / batch_time_m.val,\n rate_avg=input.size(0) * args.world_size / batch_time_m.avg,\n lr=lr,\n data_time=data_time_m))\n \n if args.save_images and output_dir:\n torchvision.utils.save_image(\n input,\n os.path.join(output_dir, 'train-batch-%d.jpg' % batch_idx),\n padding=0,\n normalize=True)\n\n if saver is not None and args.recovery_interval and (\n last_batch or (batch_idx + 1) % args.recovery_interval == 0):\n saver.save_recovery(epoch, batch_idx=batch_idx)\n\n if lr_scheduler is not None:\n lr_scheduler.step_update(num_updates=num_updates, metric=losses_m.avg)\n\n end = time.time()\n # end for\n\n if hasattr(optimizer, 'sync_lookahead'):\n optimizer.sync_lookahead()\n\n return OrderedDict([('loss', losses_m.avg)])\n\n\ndef validate(model, loader, args, evaluator=None, log_suffix='',\n neptune=None):\n batch_time_m = AverageMeter()\n losses_m = AverageMeter()\n\n model.eval()\n\n end = time.time()\n last_idx = len(loader) - 1\n with torch.no_grad():\n for batch_idx, (input, target) in enumerate(loader):\n last_batch = batch_idx == last_idx\n\n output = model(input, target)\n loss = output['loss']\n if args.neptune:\n neptune.log_metric('valid/loss', loss.item())\n \n if evaluator is not None:\n evaluator.add_predictions(output['detections'], target)\n\n if args.distributed:\n reduced_loss = reduce_tensor(loss.data, args.world_size)\n else:\n reduced_loss = loss.data\n\n torch.cuda.synchronize()\n\n losses_m.update(reduced_loss.item(), input.size(0))\n\n batch_time_m.update(time.time() - end)\n end = time.time()\n if args.local_rank == 0 and (last_batch or batch_idx % args.log_interval == 0):\n log_name = 'Test' + log_suffix\n logging.info(\n '{0}: [{1:>4d}/{2}] '\n 'Time: {batch_time.val:.3f} ({batch_time.avg:.3f}) '\n 'Loss: {loss.val:>7.4f} ({loss.avg:>6.4f}) '.format(\n log_name, batch_idx, last_idx, batch_time=batch_time_m, loss=losses_m))\n\n metrics = OrderedDict([('loss', losses_m.avg)])\n if evaluator is not None:\n metrics['map'] = evaluator.evaluate()\n \n\n return metrics\n\n\nif __name__ == '__main__':\n main()\n"
] |
[
[
"torch.cuda.synchronize",
"torch.distributed.init_process_group",
"torch.cuda.set_device",
"torch.manual_seed",
"torch.cuda.empty_cache",
"torch.nn.SyncBatchNorm.convert_sync_batchnorm",
"torch.cuda.amp.autocast.to",
"torch.no_grad",
"torch.nn.parallel.DistributedDataParallel"
]
] |
zisikons/deep-rl
|
[
"3c39a194d048618a2a3962cdf5f4b1825e789a22"
] |
[
"core/Noise.py"
] |
[
"import numpy as np\nclass OUNoise(object):\n def __init__(self, act_dim, num_agents, act_low, act_high, mu=0.0,\n theta=0.15, max_sigma=0.7, min_sigma=0.05, decay_period=2500):\n # Parameters\n self.mu = mu\n self.theta = theta\n self.sigma = max_sigma\n self.max_sigma = max_sigma\n self.min_sigma = min_sigma\n self.decay_period = decay_period\n self.num_agents = num_agents\n self.action_dim = act_dim\n self.low = act_low\n self.high = act_high\n self.reset()\n\n def reset(self):\n self.state = np.ones(self.num_agents*self.action_dim) * self.mu\n\n def evolve_state(self):\n x = self.state\n dx = self.theta * (self.mu - x) + \\\n self.sigma * np.random.randn(self.num_agents*self.action_dim)\n self.state = x + dx\n return self.state\n\n def get_action(self, action, t, episode):\n ou_state = self.evolve_state()\n self.sigma = self.max_sigma - \\\n (self.max_sigma - self.min_sigma) * (episode / self.decay_period)\n return np.clip(action + ou_state, self.low, self.high)\n"
] |
[
[
"numpy.ones",
"numpy.random.randn",
"numpy.clip"
]
] |
Luodian/MADAN
|
[
"7a2918da44f5203b72652bc4cba0e70057482114"
] |
[
"cyclegan/options/base_options.py"
] |
[
"import argparse\nimport os\n\nimport torch\nfrom util import util\n\n\nclass BaseOptions():\n\tdef __init__(self):\n\t\tself.parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n\t\tself.initialized = False\n\t\n\tdef initialize(self):\n\t\tself.parser.add_argument('--dataroot', required=True, help='path to images (should have subfolders trainA, trainB, valA, valB, etc)')\n\t\tself.parser.add_argument('--batchSize', type=int, default=1, help='input batch size')\n\t\tself.parser.add_argument('--loadSize', type=int, default=600, help='scale images to this size')\n\t\tself.parser.add_argument('--fineSize', type=int, default=600, help='then crop to this size')\n\t\tself.parser.add_argument('--input_nc', type=int, default=3, help='# of input image channels')\n\t\tself.parser.add_argument('--output_nc', type=int, default=3, help='# of output image channels')\n\t\tself.parser.add_argument('--ngf', type=int, default=64, help='# of gen filters in first conv layer')\n\t\tself.parser.add_argument('--ndf', type=int, default=64, help='# of discrim filters in first conv layer')\n\t\tself.parser.add_argument('--which_model_netD', type=str, default='n_layers', help='selects model to use for netD')\n\t\tself.parser.add_argument('--which_model_netG', type=str, default='resnet_9blocks', help='selects model to use for netG')\n\t\tself.parser.add_argument('--n_layers_D', type=int, default=3, help='only used if which_model_netD==n_layers')\n\t\tself.parser.add_argument('--gpu_ids', type=str, default='0', help='gpu ids: e.g. 0 0,1,2, 0,2. use -1 for CPU')\n\t\tself.parser.add_argument('--name', type=str, default='experiment_name',\n\t\t help='name of the experiment. It decides where to store samples and models')\n\t\tself.parser.add_argument('--dataset_mode', type=str, default='unaligned',\n\t\t help='chooses how datasets are loaded. [unaligned | aligned | single]')\n\t\tself.parser.add_argument('--model', type=str, default='cycle_gan',\n\t\t help='chooses which model to use. cycle_gan, pix2pix, test')\n\t\tself.parser.add_argument('--weights_model_type', type=str, default='drn26',\n\t\t help='chooses which model to use. drn26, fcn8s')\n\t\tself.parser.add_argument('--num_cls', default=19, type=int)\n\t\tself.parser.add_argument('--max_epoch', default=20, type=int)\n\t\tself.parser.add_argument('--current_epoch', default=0, type=int)\n\t\tself.parser.add_argument('--weights_init', type=str)\n\t\tself.parser.add_argument('--which_direction', type=str, default='AtoB', help='AtoB or BtoA')\n\t\tself.parser.add_argument('--nThreads', default=16, type=int, help='# threads for loading data')\n\t\tself.parser.add_argument('--checkpoints_dir', type=str, default='./checkpoints', help='models are saved here')\n\t\tself.parser.add_argument('--norm', type=str, default='instance', help='instance normalization or batch normalization')\n\t\tself.parser.add_argument('--serial_batches', action='store_true',\n\t\t help='if true, takes images in order to make batches, otherwise takes them randomly')\n\t\tself.parser.add_argument('--display_winsize', type=int, default=256, help='display window size')\n\t\tself.parser.add_argument('--display_id', type=int, default=0, help='window id of the web display')\n\t\tself.parser.add_argument('--display_server', type=str, default=\"http://localhost\", help='visdom server of the web display')\n\t\tself.parser.add_argument('--display_port', type=int, default=8097, help='visdom port of the web display')\n\t\tself.parser.add_argument('--no_dropout', action='store_true', help='no dropout for the generator')\n\t\tself.parser.add_argument('--max_dataset_size', type=int, default=float(\"inf\"),\n\t\t help='Maximum number of samples allowed per dataset. If the dataset directory contains more than max_dataset_size, '\n\t\t 'only a subset is loaded.')\n\t\tself.parser.add_argument('--resize_or_crop', type=str, default='scale_width_and_crop',\n\t\t help='scaling and cropping of images at load time [resize_and_crop|crop|scale_width|scale_width_and_crop]')\n\t\tself.parser.add_argument('--no_flip', action='store_true', help='if specified, do not flip the images for data augmentation')\n\t\tself.parser.add_argument('--init_type', type=str, default='normal', help='network initialization [normal|xavier|kaiming|orthogonal]')\n\t\tself.parser.add_argument('--verbose', action='store_true', help='if specified, print more debugging information')\n\t\tself.parser.add_argument('--suffix', default='', type=str,\n\t\t help='customized suffix: opt.name = opt.name + suffix: e.g., {model}_{which_model_netG}_size{loadSize}')\n\t\tself.parser.add_argument('--out_all', action='store_true', help='output all stylized images(fake_B_{})')\n\t\tself.parser.add_argument('--SAD', action='store_true', help='Sub-domain Aggregation Discriminator module')\n\t\tself.parser.add_argument('--CCD', action='store_true', help='Cross-domain Cycle Discriminator module')\n\t\tself.parser.add_argument('--CCD_weight', type=float, default=1, help='weight for cross domain cycle discriminator loss')\n\t\tself.parser.add_argument('--HF_CCD', action='store_true', help='Half Freeze Cross-domain Cycle Discriminator module')\n\t\tself.parser.add_argument('--CCD_frozen_epoch', type=int, default=-1)\n\t\tself.parser.add_argument('--SAD_frozen_epoch', type=int, default=-1)\n\t\tself.parser.add_argument('--Shared_DT', type=bool, default=True, help=\"Through \")\n\t\tself.parser.add_argument('--model_type', type=str, default='fcn8s', help=\"choose to load which type of model (fcn8s, drn26, deeplabv2)\")\n\t\tself.parser.add_argument('--semantic_loss', action='store_true', help='use semantic loss')\n\t\tself.parser.add_argument('--general_semantic_weight', type=float, default=0.2, help='weight for semantic loss')\n\t\tself.parser.add_argument('--weights_syn', type=str, default='', help='init weights for synthia')\n\t\tself.parser.add_argument('--weights_gta', type=str, default='', help='init weights for gta')\n\t\t\n\t\tself.parser.add_argument('--inference_script', type=str, default='', help='inference script')\n\t\tself.parser.add_argument('--dynamic_weight', type=float, default=10, help='Weight for Dynamic Semantic Loss(KL div) loss')\n\t\tself.initialized = True\n\t\n\tdef parse(self):\n\t\tif not self.initialized:\n\t\t\tself.initialize()\n\t\topt = self.parser.parse_args()\n\t\topt.isTrain = self.isTrain # train or test\n\t\t\n\t\tstr_ids = opt.gpu_ids.split(',')\n\t\topt.gpu_ids = []\n\t\tfor str_id in str_ids:\n\t\t\tid = int(str_id)\n\t\t\tif id >= 0:\n\t\t\t\topt.gpu_ids.append(id)\n\t\t\n\t\t# set gpu ids\n\t\tif len(opt.gpu_ids) > 0:\n\t\t\ttorch.cuda.set_device(opt.gpu_ids[0])\n\t\t\n\t\targs = vars(opt)\n\t\t\n\t\tprint('------------ Options -------------')\n\t\tfor k, v in sorted(args.items()):\n\t\t\tprint('%s: %s' % (str(k), str(v)))\n\t\tprint('-------------- End ----------------')\n\t\t\n\t\tif opt.suffix:\n\t\t\tsuffix = ('_' + opt.suffix.format(**vars(opt))) if opt.suffix != '' else ''\n\t\t\topt.name = opt.name + suffix\n\t\t# save to the disk\n\t\texpr_dir = os.path.join(opt.checkpoints_dir, opt.name)\n\t\tutil.mkdirs(expr_dir)\n\t\tfile_name = os.path.join(expr_dir, 'opt.txt')\n\t\twith open(file_name, 'wt') as opt_file:\n\t\t\topt_file.write('------------ Options -------------\\n')\n\t\t\tfor k, v in sorted(args.items()):\n\t\t\t\topt_file.write('%s: %s\\n' % (str(k), str(v)))\n\t\t\topt_file.write('-------------- End ----------------\\n')\n\t\tself.opt = opt\n\t\treturn self.opt\n"
] |
[
[
"torch.cuda.set_device"
]
] |
drMJ/roman
|
[
"9650e73ec6fbb2d8044aa1bbf89fd671843ea54e",
"9650e73ec6fbb2d8044aa1bbf89fd671843ea54e"
] |
[
"roman/ur/realtime/urlib.py",
"test/arm_sim_test.py"
] |
[
"################################################################################################################################\n## Redirects the UR functions needed by the control script to the simulator.\n################################################################################################################################\nimport numpy as np\nfrom scipy.spatial.transform import Rotation\nimport math\nfrom .constants import *\n\n# this abstracts the simulator\nsim = None\n\n#*****************************************************************************\n# URScript-like functions needed by our script layer.\n#*****************************************************************************\ndef ur_get_time():\n return sim.time()\n\ndef get_inverse_kin(pose):\n return sim.arm.get_inverse_kin(pose)\n\ndef get_actual_tcp_pose():\n return sim.arm.get_actual_tcp_pose()\n\ndef get_actual_tcp_speed():\n return sim.arm.get_actual_tcp_speed()\n\ndef get_actual_joint_positions():\n return sim.arm.get_actual_joint_positions()\n\ndef get_actual_joint_speeds():\n return sim.arm.get_actual_joint_speeds()\n\ndef get_target_tcp_pose():\n return sim.arm.get_target_tcp_pose()\n\ndef get_target_tcp_speed():\n return sim.arm.get_target_tcp_speed()\n\ndef get_target_joint_positions():\n return sim.arm.get_target_joint_positions()\n\ndef get_target_joint_speeds():\n return sim.arm.get_target_joint_speeds()\n\ndef get_tcp_force():\n return sim.arm.get_tcp_force()\n\ndef get_joint_torques():\n return sim.arm.get_joint_torques()\n\ndef ur_get_tcp_sensor_force(_=0):\n return sim.arm.ur_get_tcp_sensor_force()\n\ndef ur_get_tcp_acceleration():\n return sim.arm.ur_get_tcp_acceleration()\n\ndef speedj(speed, max_acc):\n sim.arm.speedj(speed, max_acc)\n\ndef set_payload(m, cog):\n sim.arm.set_payload(m, cog)\n\ndef set_tcp(pose):\n sim.arm.set_tcp(pose)\n\n#******************************************************************************\n# various other UR script and custom functions that are not simulation-specific\n#******************************************************************************\n\ndef textmsg(s1, s2=\"\"):\n print(str(s1) + str(s2))\n\ndef norm(v):\n ''' Norm function, as defined by urscript'''\n return np.linalg.norm(v)\n\ndef point_dist(p_from, p_to):\n '''\n Point distance, as defined by urscript.\n Returns the distance between the two tool positions (without considering rotations)\n '''\n return math.dist(p_from[:3], p_to[:3])\n\ndef pose_sub(p_to, p_from):\n ''' Pose subtraction, as defined by urscript.'''\n t = np.subtract(p_to[:3], p_from[:3])\n r_to = Rotation.from_rotvec(p_to[3:])\n r_from = Rotation.from_rotvec(p_from[3:])\n r = (r_from.inv() * r_to).as_rotvec()\n return np.concatenate((t, r))\n\ndef pose_add(p1, p2):\n ''' Pose addition, as defined by urscript.'''\n t = np.add(p1[:3], p2[:3])\n r1 = Rotation.from_rotvec(p1[3:])\n r2 = Rotation.from_rotvec(p2[3:])\n r = (r1 * r2).as_rotvec()\n return np.concatenate((t, r))\n\ndef interpolate_pose(p_from, p_to, alpha):\n ''' Linear interpolation of tool position and orientation, as defined by urscript.'''\n delta = pose_sub(p_to, p_from)\n return pose_add(p_from, delta * alpha)\n\ndef sqrt(a):\n return math.sqrt(a)\n\ndef ur_pose(v):\n return v\n\ndef ur_check_loop_delay(last_loop_time):\n return ur_get_time()\n\ndef ur_force_limit_exceeded(low_bound, high_bound):\n ft = ur_get_tcp_sensor_force()\n return np.any(np.greater(low_bound, ft)) or np.any(np.greater(ft, high_bound))\n\n",
"import numpy as np\nimport math\nimport time\n\nimport pybullet as pb\n\nfrom roman.ur import *\nfrom roman.sim.simenv import SimEnv\nfrom roman.ur.realtime import urlib\nfrom roman.ur.realtime.interface import *\n\n#############################################################\n# Low-level unit tests using the simulated arm.\n#############################################################\n\ndef pose_op_test():\n print(f\"Running {__file__}::{pose_op_test.__name__}()\")\n\n x = pose_sub([1,1,1,1,1,1], [2,2,2,2,2,2])\n assert np.allclose(x, [-1,-1, -1, -1, -1, -1])\n x = interpolate_pose([1,1,1,1,1,1], [2,2,2,2,2,2], 0.2)\n assert np.allclose(x, [1.2, 1.2, 1.2, 1.2, 1.2, 1.2])\n x = interpolate_pose([1,1,1,1,1,1], [0,0,0,0,0,0], 0.1)\n assert np.allclose(x, [0.9, 0.9, 0.9, 0.9, 0.9, 0.9])\n x = interpolate_pose([1,2,3,4,5,6], [0,0,0,0,0,0], 0.1)\n assert np.allclose(x, [0.9,1.8,2.7, 1.02227, 1.27784, 1.53341]) # copied from UR script results\n\n x = interpolate_pose([-4.86900002e-01,-1.09150000e-01, 1.71858996e-01, -2.22144147e+00, -2.22144147e+00, 5.98088608e-09], [-0.4869000017642975, -0.1091499999165535, 0.2718589961528778, -2.221441466386838, -2.221441466386838, 1.5707963327757826], 0.95)\n assert np.allclose(x, [-0.4869 , -0.10915 , 0.266859 , 1.78370568, 1.78370568, -1.18747401])\n\n print(\"Passed.\")\n\ndef get_arm_state_test(env):\n print(f\"Running {__file__}::{get_arm_state_test.__name__}()\")\n urlib.sim = env\n state = State.fromarray(get_arm_state(UR_ZERO, UR_ZERO))\n #print(state)\n print(\"Passed.\")\n\ndef execute_arm_command_test(env):\n print(f\"Running {__file__}::{execute_arm_command_test.__name__}()\")\n urlib.sim = env\n cmd = Command()\n state = State.fromarray(execute_arm_command(cmd, 0))\n cmd.make(kind = UR_CMD_KIND_MOVE_JOINT_POSITIONS, target=Joints(1, 1, 1, 1, 1, 1))\n state = State.fromarray(execute_arm_command(cmd, 0))\n #print(state)\n print(\"Passed.\")\n\ndef move_arm_test(env):\n print(f\"Running {__file__}::{move_arm_test.__name__}()\")\n\n con = SimConnection(env)\n arm_ctrl = BasicController(con)\n cmd = Command()\n state = State()\n arm_ctrl.execute(cmd, state)\n\n print(state.tool_pose())\n marker_visual_id = pb.createVisualShape(pb.GEOM_BOX, halfExtents=[0.005, 0.005, 0.005], rgbaColor=[1,0,0,1])\n #pb.createMultiBody(baseVisualShapeIndex=marker_visual_id, basePosition=state.tool_pose()[:3])\n cmd.make(kind = UR_CMD_KIND_MOVE_TOOL_POSE, target=Tool(-0.4, -0.4, 0.3,0, math.pi, 0))\n pb.createMultiBody(baseVisualShapeIndex=marker_visual_id, basePosition=cmd.target()[:3])\n arm_ctrl.execute(cmd, state)\n while not state.is_goal_reached():\n # st = time.time()\n arm_ctrl.execute(cmd, state)\n env.update()\n # latency = time.time()-st\n # leftover = 1/240. - latency\n # if leftover > 0:\n # time.sleep(leftover)\n\n print(\"Passed.\")\n\n\n#############################################################\n# Runner\n#############################################################\ndef run():\n pose_op_test()\n\n env = SimEnv()\n env.connect()\n get_arm_state_test(env)\n execute_arm_command_test(env)\n move_arm_test(env)\n env.disconnect()\n\nif __name__ == '__main__':\n run()"
] |
[
[
"scipy.spatial.transform.Rotation.from_rotvec",
"numpy.greater",
"numpy.subtract",
"numpy.linalg.norm",
"numpy.concatenate",
"numpy.add"
],
[
"numpy.allclose"
]
] |
modscripps/mixsea
|
[
"b9962e1fd86da509d6649d1e766d8daeb440656f"
] |
[
"mixsea/overturn.py"
] |
[
"import gsw\nimport numpy as np\n\n\ndef nan_eps_overturn(\n depth,\n t,\n SP,\n lon,\n lat,\n **kwargs,\n):\n \"\"\"\n Calculate turbulent dissipation based on the Thorpe scale method attempting to deal NaN values in the input data.\n It does this by removing all NaN values in the input profiles, then computes thorpe scales, then re-inserts NaNs\n at the end.\n\n See `eps_overturn` for more options.\n \"\"\"\n depth = np.asarray(depth)\n t = np.asarray(t)\n SP = np.asarray(SP)\n\n # Find non-NaNs\n if SP.size == 1:\n SP = np.full_like(depth, SP)\n\n notnan = np.isfinite(depth) & np.isfinite(t) & np.isfinite(SP)\n\n isnan = ~notnan\n if isnan.sum() == 0: # If there are no NaNs then return.\n return eps_overturn(depth, t, SP, lon, lat, **kwargs)\n\n eps = np.full_like(depth, np.nan)\n N2 = np.full_like(depth, np.nan)\n\n # Don't want to pass return_diagnostics twice.\n if \"return_diagnostics\" in kwargs:\n return_diagnostics = kwargs.pop(\"return_diagnostics\")\n else:\n return_diagnostics = False\n\n eps[notnan], N2[notnan], diag = eps_overturn(\n depth[notnan],\n t[notnan],\n SP[notnan],\n lon,\n lat,\n return_diagnostics=True,\n **kwargs,\n )\n\n if return_diagnostics:\n # Replace nans in diagnostics if the size and shape seems right:\n Nnotnans = notnan.sum()\n for key in diag:\n if (np.size(diag[key]) == Nnotnans) & (np.ndim(diag[key]) == 1):\n ar = np.full_like(depth, np.nan)\n ar[notnan] = diag[key]\n diag[key] = ar # This will wipe out the old item.\n\n return eps, N2, diag\n\n else:\n return eps, N2\n\n\ndef eps_overturn(\n depth,\n t,\n SP,\n lon,\n lat,\n dnoise=5e-4,\n alpha=0.95,\n Roc=0.2,\n background_eps=np.nan,\n use_ip=False,\n N2_method=\"teos\",\n overturns_from_CT=False,\n return_diagnostics=False,\n):\n \"\"\"\n Calculate turbulent dissipation based on the Thorpe scale method. This function cannot handle\n NaNs in the input data, but there is another called `nan_eps_overturn' that attempts to.\n\n Parameters\n ----------\n depth : array-like\n Depth [m]\n t : array-like\n In-situ temperature [ITS90, °C]\n SP : float or array-like\n Salinity [PSU]. Can be a single constant value. This may be convenient if only temperature data\n are available.\n lon : float\n Longitude of observation\n lat : float\n Latitude of observation\n dnoise : float, optional\n Noise level of density [kg/m^3] or conservative temperature [°C], depending on overturns_from_CT. Default is 5e-4.\n alpha : float, optional\n Ratio of Ozmidov scale to Thorpe scale, alpha = Lo/Lt. Default is 0.95. Care must be taken to choose\n a value appropriate for the setting, e.g. Dillon 1982 [1]_, Ferron et al. 1998 [2]_.\n Convert to Thorpe 1977 [3]_ conventions with C0 = alpha**2.\n Not to be confused with alpha in Equation 4 from Thorpe 1977, which is the inverse of our alpha.\n Roc : float, optional\n Critical value of the overturn ratio Ro. An overturn will be considered\n noise if Ro < Roc.\n background_eps : float, optional\n Background epsilon where no overturn detected. Defaults to numpy.nan.\n use_ip : bool, optional\n Sets whether to use the intermediate profile method. Default is False. If True,\n the dnoise parameter is passed as the `accuracy' argument of the intermediate\n profile method.\n N2_method : string, optional\n Method for calculation of buoyancy frequency. Default is 'teosp1'. Options are 'bulk',\n 'endpt', 'teos' and 'teosp1'.\n overturns_from_CT : bool, optional\n If true, overturning patches will be diagnosed from the conservative temperature CT,\n instead of potential density. Default is False.\n return_diagnostics : bool, optional\n Default is False. If True, this function will return a dictionary containing\n variables such as the Thorpe scale Lt, etc.\n\n Returns\n -------\n eps : ndarray\n Turbulent dissipation [W/kg]\n N2 : ndarray\n Background stratification of each overturn detected [s^-2]\n diag : dict, optional\n Dictionary of diagnositc variables, set return with the `return_diagnostics' argument.\n\n References\n ----------\n .. [1] Dillon, T. M. (1982). Vertical overturns: A comparison of Thorpe and Ozmidov length scales. Journal of Geophysical Research, 87(C12), 9601.\n .. [2] Ferron, B., Mercier, H., Speer, K., Gargett, A., & Polzin, K. (1998). Mixing in the Romanche Fracture Zone. Journal of Physical Oceanography, 28(10), 1929–1945.\n .. [3] Thorpe, S. A. (1977). Turbulence and Mixing in a Scottish Loch. Philosophical Transactions of the Royal Society of London. Series A, Mathematical and Physical Sciences, 286(1334), 125–181.\n\n \"\"\"\n depth = np.asarray(depth)\n t = np.asarray(t)\n SP = np.asarray(SP)\n\n if not np.all(np.isclose(np.maximum.accumulate(depth), depth)):\n raise ValueError(\n \"It appears that depth is not monotonically increasing, please fix.\"\n )\n\n if SP.size == 1:\n SP = np.full_like(depth, SP)\n\n if not (depth.size == t.size == SP.size):\n raise ValueError(\n \"Input array sizes do not match. depth.size = {}, t.size = {}, SP.size = {}\".format(\n depth.size, t.size, SP.size\n )\n )\n\n if not any(s == N2_method for s in [\"teosp1\", \"teos\", \"endpt\", \"bulk\"]):\n raise ValueError(\n \"The 'N2_method' argument must be 'teosp1', 'teos', 'endpt' or 'bulk'.\"\n )\n\n ndata = depth.size\n\n # Estimate pressure from depth.\n p = gsw.p_from_z(-depth, lat)\n dz = 0.5 * (depth[2:] - depth[:-2]) # 'width' of each data point\n dz = np.hstack((dz[0], dz, dz[-1])) # assume width of first and last data point\n\n SA = gsw.SA_from_SP(SP, t, lon, lat)\n CT = gsw.CT_from_t(SA, t, p)\n\n # Initialise arrays for diagnostic variables and flags.\n diag = {}\n diagvar = [\n \"eps\",\n \"N2\",\n \"Lt\",\n \"thorpe_disp\",\n \"sidx\",\n \"dens\",\n \"dens_sorted\",\n \"Ro\",\n \"SA_sorted\",\n \"CT_sorted\",\n ]\n for var in diagvar:\n diag[var] = np.full_like(depth, np.nan)\n\n if use_ip and not overturns_from_CT:\n diag[\"dens_ip\"] = np.full_like(depth, np.nan)\n\n if use_ip and overturns_from_CT:\n diag[\"CT_ip\"] = np.full_like(depth, np.nan)\n\n flagvar = [\"noise_flag\", \"N2_flag\", \"ends_flag\", \"Ro_flag\"]\n for var in flagvar:\n diag[var] = np.full_like(depth, False, dtype=bool)\n\n # Potential density is only meaningful near the reference pressure. For a deep profile\n # we may need to select several reference pressures. To do so, we find the pressure\n # bins that best contain the data\n pbinwidth = 1000.0 # In future we could have this as an argument.\n pbinmin = np.floor(p.min() / pbinwidth) * pbinwidth\n pbinmax = np.ceil(p.max() / pbinwidth) * pbinwidth\n pbins = np.arange(pbinmin, pbinmax + pbinwidth, pbinwidth)\n p_refs = 0.5 * (\n pbins[1:] + pbins[:-1]\n ) # Use mid point pressure as reference pressure.\n nbins = p_refs.size\n\n # Loop over pressure bins.\n for idx_bin in range(nbins):\n\n dens = gsw.pot_rho_t_exact(SA, t, p, p_ref=p_refs[idx_bin])\n\n if overturns_from_CT:\n # Temperature normally decreases towards the bottom which would mean the\n # find_overturns algorithm thinks the whole water column is unstable! Minus fixes that.\n q = -CT\n else:\n q = dens\n\n if use_ip: # Create intermediate density profile\n q = intermediate_profile(\n q, acc=dnoise, hinge=1000, kind=\"down\"\n ) # TODO: make hinge optional\n\n # --->> THORPE SCALES <<---\n (\n Lt,\n thorpe_disp,\n q_sorted,\n noise_flag,\n ends_flag,\n Ro,\n idx_patches,\n sidx,\n ) = thorpe_scale(depth, q, dnoise)\n\n # If there are no overturns, move on to the next pressure bin.\n if not np.any(idx_patches):\n continue\n\n # Thorpe displacements (by definition relative to initial locations, so unsorted)\n unsidx = np.argsort(sidx)\n thorpe_disp = (depth[sidx] - depth)[unsidx]\n\n # Sort other quantities based on the sorting indices.\n dens_sorted = dens[sidx]\n SA_sorted = SA[sidx]\n CT_sorted = CT[sidx]\n\n # Temporary arrays.\n N2 = np.full_like(depth, np.nan)\n N2_flag = np.full_like(depth, False, dtype=bool)\n Ro_flag = np.full_like(depth, False, dtype=bool)\n\n for patch in idx_patches:\n # Get patch indices.\n i0 = patch[0]\n i1 = patch[1]\n pidx = np.arange(i0, i1 + 1, 1) # Need +1 for Python indexing\n\n Lto = np.unique(Lt[pidx])\n\n # Estimate the buoyancy frequency.\n if N2_method == \"teos\":\n N2o, _ = gsw.Nsquared(\n SA_sorted[[i0, i1]],\n CT_sorted[[i0, i1]],\n p[[i0, i1]],\n lat,\n )\n elif N2_method == \"teosp1\":\n # Go beyond overturn. Need to add 1 for this, unless end or beginning.\n addi = 0 if i1 == ndata - 1 else 1\n subi = 0 if i0 == 0 else 1\n\n N2o, _ = gsw.Nsquared(\n SA_sorted[[i0 - subi, i1 + addi]],\n CT_sorted[[i0 - subi, i1 + addi]],\n p[[i0 - subi, i1 + addi]],\n lat,\n )\n elif N2_method == \"bulk\":\n g = gsw.grav(lat, p[pidx].mean())\n densanom = dens[pidx] - dens_sorted[pidx]\n densrms = np.sqrt(np.mean(densanom ** 2))\n N2o = g * densrms / (Lto * np.mean(dens[pidx]))\n elif N2_method == \"endpt\":\n g = gsw.grav(lat, p[pidx].mean())\n ddens = dens_sorted[i1] - dens_sorted[i0]\n ddepth = depth[i1] - depth[i0]\n N2o = g * ddens / (ddepth * np.mean(dens[pidx]))\n else: # May be redundent because of check at beginning of function.\n raise ValueError(\"N2_method '{}' is not available.\".format(N2_method))\n\n N2[pidx] = N2o\n\n # Flag negative N squared.\n if N2o < 0:\n N2_flag[pidx] = True\n\n Roo = np.unique(Ro[pidx])\n\n if Roo < Roc:\n Ro_flag[pidx] = True\n\n # Find and select data for this reference pressure range only.\n inbin = (p > pbins[idx_bin]) & (p <= pbins[idx_bin + 1])\n\n # Fill flags.\n diag[\"noise_flag\"][inbin] = noise_flag[inbin]\n diag[\"N2_flag\"][inbin] = N2_flag[inbin]\n diag[\"ends_flag\"][inbin] = ends_flag[inbin]\n diag[\"Ro_flag\"][inbin] = Ro_flag[inbin]\n\n # Fill other diagnostics.\n diag[\"N2\"][inbin] = N2[inbin]\n diag[\"Lt\"][inbin] = Lt[inbin]\n diag[\"Ro\"][inbin] = Ro[inbin]\n diag[\"thorpe_disp\"][inbin] = thorpe_disp[inbin]\n diag[\"sidx\"][inbin] = sidx[inbin]\n diag[\"dens\"][inbin] = dens[inbin]\n diag[\"dens_sorted\"][inbin] = dens_sorted[inbin]\n diag[\"CT_sorted\"][inbin] = CT_sorted[inbin]\n diag[\"SA_sorted\"][inbin] = SA_sorted[inbin]\n\n if use_ip and not overturns_from_CT:\n diag[\"dens_ip\"][inbin] = q[inbin]\n\n if use_ip and overturns_from_CT:\n diag[\"CT_ip\"][inbin] = q[inbin]\n\n # Finally calculate epsilon for diagnostics, avoid nans, inf and negative N2.\n isgood = np.isfinite(diag[\"N2\"]) & np.isfinite(diag[\"Lt\"]) & ~diag[\"N2_flag\"]\n diag[\"eps\"][isgood] = (\n alpha ** 2 * diag[\"Lt\"][isgood] ** 2 * diag[\"N2\"][isgood] ** 1.5\n )\n\n # Use flags to get rid of bad overturns in basic output\n isbad = diag[\"noise_flag\"] | diag[\"N2_flag\"] | diag[\"Ro_flag\"]\n eps = diag[\"eps\"].copy()\n eps[isbad] = np.nan\n N2 = diag[\"N2\"].copy()\n N2[isbad] = np.nan\n\n # Fill with background epsilon\n eps[np.isnan(eps)] = background_eps\n\n if return_diagnostics:\n return eps, N2, diag\n else:\n return eps, N2\n\n\ndef thorpe_scale(depth, q, dnoise):\n \"\"\"\n Estimate the Thorpe scale from unstable patches in a profile.\n\n Parameters\n ----------\n depth : array-like\n Depth [m] (negative if below sea surface)\n q : array-like\n Quantity from which Thorpe scales will be computed, e.g. density or temperature. If using\n temperature, consider multiplying by -1 to get around the fact that temperature generally\n decreases with depth.\n dnoise : float, optional\n Uncertainty or noise in q.\n\n Returns\n -------\n Lt : ndarray\n Thorpe scale [m]\n thorpe_disp : ndarray\n Thorpe displacement [m]\n q_sorted : ndarray\n q sorted to be monotonically increasing\n noise_flag : ndarray\n True if difference in q from top to bottom patch is less than dnoise\n ends_flag : ndarray\n True if a patch includes and end point\n Ro : ndarray\n Overturn ratio of Gargett & Garner.\n idx_patches : ndarray\n Indices of overturning patches, e.g. idx_patches[:, 0] are start indices and idx_patches[:, 1] are end indices.\n idx_sorted : ndarray\n Indices required to sort q so as to generate q_sorted.\n \"\"\"\n\n depth = np.asarray(depth)\n q = np.asarray(q)\n\n if q[0] > q[-1]:\n raise ValueError(\"The entire profile is unstable, q[0] > q[-1].\")\n\n if not np.all(np.isclose(np.maximum.accumulate(depth), depth)):\n raise ValueError(\n \"It appears that depth is not monotonically increasing, please fix.\"\n )\n\n idx_sorted, idx_patches = find_overturns(q)\n\n ndata = depth.size\n\n # Thorpe displacements\n thorpe_disp = depth[idx_sorted] - depth\n\n q_sorted = q[idx_sorted]\n\n # Initialise arrays.\n Lt = np.full_like(depth, np.nan)\n Ro = np.full_like(depth, np.nan)\n noise_flag = np.full_like(depth, False, dtype=bool)\n ends_flag = np.full_like(depth, False, dtype=bool)\n\n dz = 0.5 * (depth[2:] - depth[:-2]) # 'width' of each data point\n dz = np.hstack((dz[0], dz, dz[-1])) # assume width of first and last data point\n\n for patch in idx_patches:\n # Get patch indices.\n i0 = patch[0]\n i1 = patch[1]\n pidx = np.arange(i0, i1 + 1, 1) # Need +1 for Python indexing\n\n # Thorpe scale is the root mean square thorpe displacement.\n Lto = np.sqrt(np.mean(np.square(thorpe_disp[pidx])))\n Lt[pidx] = Lto\n\n # Flag beginning or end.\n if i0 == 0:\n ends_flag[pidx] = True\n if i1 == ndata - 1:\n ends_flag[pidx] = True\n\n # Flag small difference.\n dq = q_sorted[i1] - q_sorted[i0]\n if dq < dnoise:\n noise_flag[pidx] = True\n\n # Overturn ratio of Gargett & Garner\n Tdo = thorpe_disp[pidx]\n dzo = dz[pidx]\n L_tot = np.sum(dzo)\n L_neg = np.sum(dzo[Tdo < 0])\n L_pos = np.sum(dzo[Tdo > 0])\n Roo = np.minimum(L_neg / L_tot, L_pos / L_tot)\n Ro[pidx] = Roo\n\n return Lt, thorpe_disp, q_sorted, noise_flag, ends_flag, Ro, idx_patches, idx_sorted\n\n\ndef find_overturns(q):\n \"\"\"Find the indices of unstable patches by cumulatively summing the difference between\n sorted and unsorted indices of q.\n\n Parameters\n ----------\n q : array_like 1D\n Profile of some quantity from which overturns can be detected\n e.g. temperature or density.\n\n Returns\n -------\n idx_sorted : 1D ndarray\n Indices that sort the data q.\n idx_patches : (N, 2) ndarray\n Start and end indices of the overturns.\n\n \"\"\"\n idx = np.arange(q.size, dtype=int)\n idx_sorted = np.argsort(q, kind=\"mergesort\")\n idx_cumulative = np.cumsum(idx_sorted - idx)\n idx_patches = contiguous_regions(idx_cumulative > 0)\n return idx_sorted, idx_patches\n\n\ndef intermediate_profile_topdown(q, acc, hinge):\n \"\"\"Generate an intermediate profile starting at q[0] moving along the array.\n\n See Ferron et. al. 1998 and Gargett and Garner 2008.\n\n Parameters\n ----------\n q : array_like 1D\n Profile of some quantity that the intermediate profile method can be\n applied to e.g. temperature or density.\n acc : float, optional\n Accuracy parameter. The intermediate profile change in steps of acc.\n hinge : float, optional\n Intermediate profile values are equal to the hinge plus an integer\n multiple of acc. It should be kept constant across profiles.\n\n Returns\n -------\n qi : 1D ndarray\n Intermediate profile.\n\n \"\"\"\n\n # Initialise.\n qi = np.zeros_like(q)\n n = np.fix((q[0] - hinge) / acc)\n qi[0] = hinge + n * acc\n\n # Step through profile.\n for i in range(len(q) - 1):\n n = np.fix((q[i + 1] - qi[i]) / acc)\n qi[i + 1] = qi[i] + n * acc\n\n return qi\n\n\ndef intermediate_profile(q, acc=5e-4, hinge=1000, kind=\"down\"):\n \"\"\"Generate an intermediate profile of some quantity.\n\n See Ferron et. al. 1998 and Gargett and Garner 2008.\n\n Parameters\n ----------\n q : array_like 1D\n Profile of some quantity that the intermediate profile method can be\n applied to e.g. temperature or density.\n acc : float, optional\n Accuracy parameter. The intermediate profile change in steps of acc.\n hinge : float, optional\n Intermediate profile values are equal to the hinge plus an integer multiple\n of acc. It should be kept constant across profiles.\n kind : string, optional\n Either 'up', 'down' or 'ave'. Default is ave. This argument determines\n whether the method is applied top down (q[0] to q[end]), bottom up\n (q[end] to [q[0]]) or the average of up and down.\n\n Returns\n -------\n qi : 1D ndarray\n Intermediate profile.\n\n \"\"\"\n\n if not any(s in kind for s in [\"up\", \"do\", \"av\"]):\n raise ValueError(\"The 'kind' argument must be 'up', 'down' or 'ave'.\")\n\n q = np.asarray(q)\n\n if \"up\" in kind:\n qf = np.flipud(q)\n qi = np.flipud(intermediate_profile_topdown(qf, acc, hinge))\n elif \"do\" in kind:\n qi = intermediate_profile_topdown(q, acc, hinge)\n elif \"av\" in kind:\n qf = np.flipud(q)\n qtd = intermediate_profile_topdown(q, acc, hinge)\n qbu = np.flipud(intermediate_profile_topdown(qf, acc, hinge))\n qi = (qtd + qbu) / 2.0\n\n return qi\n\n\ndef contiguous_regions(condition):\n \"\"\"Finds the indices of contiguous True regions in a boolean array.\n\n Parameters\n ----------\n condition : array_like\n Array of boolean values.\n\n Returns\n -------\n idx : ndarray\n Array of indices demarking the start and end of contiguous True regions in condition.\n Shape is (N, 2) where N is the number of regions.\n\n Notes\n -----\n Modified from stack overflow: https://stackoverflow.com/a/4495197\n\n \"\"\"\n\n d = np.diff(condition)\n (idx,) = d.nonzero()\n\n # We need to start things after the change in \"condition\". Therefore,\n # we'll shift the index by 1 to the right.\n idx += 1\n\n if condition[0]:\n # If the start of condition is True prepend a 0\n idx = np.r_[0, idx]\n\n if condition[-1]:\n # If the end of condition is True, append the length of the array\n idx = np.r_[idx, condition.size] # Edit\n\n # Reshape the result into two columns\n idx.shape = (-1, 2)\n return idx\n"
] |
[
[
"numpy.minimum",
"numpy.asarray",
"numpy.flipud",
"numpy.cumsum",
"numpy.zeros_like",
"numpy.any",
"numpy.mean",
"numpy.fix",
"numpy.square",
"numpy.hstack",
"numpy.unique",
"numpy.arange",
"numpy.size",
"numpy.diff",
"numpy.isnan",
"numpy.full_like",
"numpy.ndim",
"numpy.argsort",
"numpy.sum",
"numpy.isfinite",
"numpy.maximum.accumulate"
]
] |
joshwalawender/PypeIt
|
[
"f952cbb2aaee640b5c585be823884a237b441e8e"
] |
[
"pypeit/scripts/ql_mos.py"
] |
[
"#!/usr/bin/env python\n#\n# See top-level LICENSE file for Copyright information\n#\n# -*- coding: utf-8 -*-\n\"\"\"\nThis script runs PypeIt on a set of MultiSlit images\n\"\"\"\nimport argparse\n\nfrom pypeit import msgs\n\nimport warnings\n\ndef parser(options=None):\n\n parser = argparse.ArgumentParser(description='Script to run PypeIt in QuickLook on a set of MOS files')\n parser.add_argument('spectrograph', type=str, help='Name of spectograph, e.g. shane_kast_blue')\n parser.add_argument('full_rawpath', type=str, help='Full path to the raw files')\n parser.add_argument('arc', type=str, help='Arc frame filename')\n parser.add_argument('flat', type=str, help='Flat frame filename')\n parser.add_argument('science', type=str, help='Science frame filename')\n parser.add_argument('-b', '--box_radius', type=float, help='Set the radius for the boxcar extraction (arcsec)')\n parser.add_argument('-d', '--det', type=int, default=1, help='Detector number')\n parser.add_argument(\"--ignore_headers\", default=False, action=\"store_true\",\n help=\"Ignore bad headers?\")\n parser.add_argument(\"--user_pixflat\", type=str, help=\"Use a user-supplied pixel flat (e.g. keck_lris_blue)\")\n\n if options is None:\n pargs = parser.parse_args()\n else:\n pargs = parser.parse_args(options)\n return pargs\n\n\ndef main(pargs):\n\n import os\n import numpy as np\n\n from IPython import embed\n\n from pypeit import pypeit\n from pypeit import pypeitsetup\n from pypeit.core import framematch\n\n spec = pargs.spectrograph\n\n # Config the run\n cfg_lines = ['[rdx]']\n cfg_lines += [' spectrograph = {0}'.format(spec)]\n cfg_lines += [' redux_path = {0}_A'.format(os.path.join(os.getcwd(),spec))]\n cfg_lines += [' detnum = {0}'.format(pargs.det)]\n if pargs.ignore_headers:\n cfg_lines += [' ignore_bad_headers = True']\n cfg_lines += ['[calibrations]']\n cfg_lines += [' [[scienceframe]]']\n cfg_lines += [' [[process]]']\n cfg_lines += [' cr_reject = False']\n if pargs.user_pixflat is not None:\n cfg_lines += [' [[flatfield]]']\n cfg_lines += [' frame = {0}'.format(pargs.user_pixflat)]\n cfg_lines += ['[scienceimage]']\n cfg_lines += [' [[extraction]]']\n cfg_lines += [' skip_optimal = True']\n if pargs.box_radius is not None: # Boxcar radius\n cfg_lines += [' boxcar_radius = {0}'.format(pargs.box_radius)]\n cfg_lines += [' [[findobj]]']\n cfg_lines += [' skip_second_find = True']\n\n # Data files\n data_files = [os.path.join(pargs.full_rawpath, pargs.arc),\n os.path.join(pargs.full_rawpath, pargs.flat),\n os.path.join(pargs.full_rawpath,pargs.science)]\n\n # Setup\n ps = pypeitsetup.PypeItSetup(data_files, path='./', spectrograph_name=spec,\n cfg_lines=cfg_lines)\n ps.build_fitstbl()\n # TODO -- Get the type_bits from 'science'\n bm = framematch.FrameTypeBitMask()\n file_bits = np.zeros(3, dtype=bm.minimum_dtype())\n file_bits[0] = bm.turn_on(file_bits[0], ['arc', 'tilt'])\n file_bits[1] = bm.turn_on(file_bits[1],\n ['pixelflat', 'trace'] if pargs.user_pixflat is None else 'trace')\n file_bits[2] = bm.turn_on(file_bits[2], 'science')\n\n # PypeItSetup sorts according to MJD\n # Deal with this\n asrt = []\n for ifile in data_files:\n bfile = os.path.basename(ifile)\n idx = ps.fitstbl['filename'].data.tolist().index(bfile)\n asrt.append(idx)\n asrt = np.array(asrt)\n\n # Set bits\n ps.fitstbl.set_frame_types(file_bits[asrt])\n ps.fitstbl.set_combination_groups()\n # Extras\n ps.fitstbl['setup'] = 'A'\n\n # Write\n ofiles = ps.fitstbl.write_pypeit('', configs=['A'], write_bkg_pairs=True, cfg_lines=cfg_lines)\n if len(ofiles) > 1:\n msgs.error(\"Bad things happened..\")\n\n # Instantiate the main pipeline reduction object\n pypeIt = pypeit.PypeIt(ofiles[0], verbosity=2,\n reuse_masters=True, overwrite=True,\n logname='mos.log', show=False)\n # Run\n pypeIt.reduce_all()\n msgs.info('Data reduction complete')\n # QA HTML\n msgs.info('Generating QA HTML')\n pypeIt.build_qa()\n\n return 0\n\n"
] |
[
[
"numpy.array"
]
] |
henry-eigen/weightnorm
|
[
"017d6288262d5a4e2ffcfd909709bbf8059698f3"
] |
[
"keras_2/weightnorm.py"
] |
[
"from keras import backend as K\nfrom keras.optimizers import SGD,Adam\nimport tensorflow as tf\n\n# adapted from keras.optimizers.SGD\nclass SGDWithWeightnorm(SGD):\n def get_updates(self, loss, params):\n grads = self.get_gradients(loss, params)\n self.updates = []\n\n lr = self.lr\n if self.initial_decay > 0:\n lr.assign(lr * (1. / (1. + self.decay * K.cast(self.iterations, K.floatx()))))\n self.updates .append(K.update_add(self.iterations, 1))\n\n # momentum\n shapes = [K.int_shape(p) for p in params]\n moments = [K.zeros(shape) for shape in shapes]\n self.weights = [self.iterations] + moments\n for p, g, m in zip(params, grads, moments):\n\n # if a weight tensor (len > 1) use weight normalized parameterization\n ps = K.int_shape(p)\n if len(ps) > 1:\n\n # get weight normalization parameters\n V, V_norm, V_scaler, g_param, grad_g, grad_V = get_weightnorm_params_and_grads(p, g)\n\n # momentum container for the 'g' parameter\n V_scaler_shape = K.int_shape(V_scaler)\n m_g = K.zeros(V_scaler_shape)\n\n # update g parameters\n v_g = self.momentum * m_g - lr * grad_g # velocity\n self.updates.append(K.update(m_g, v_g))\n if self.nesterov:\n new_g_param = g_param + self.momentum * v_g - lr * grad_g\n else:\n new_g_param = g_param + v_g\n\n # update V parameters\n v_v = self.momentum * m - lr * grad_V # velocity\n self.updates.append(K.update(m, v_v))\n if self.nesterov:\n new_V_param = V + self.momentum * v_v - lr * grad_V\n else:\n new_V_param = V + v_v\n\n # if there are constraints we apply them to V, not W\n if getattr(p, 'constraint', None) is not None:\n new_V_param = p.constraint(new_V_param)\n\n # wn param updates --> W updates\n add_weightnorm_param_updates(self.updates, new_V_param, new_g_param, p, V_scaler)\n\n else: # normal SGD with momentum\n v = self.momentum * m - lr * g # velocity\n self.updates.append(K.update(m, v))\n\n if self.nesterov:\n new_p = p + self.momentum * v - lr * g\n else:\n new_p = p + v\n\n # apply constraints\n if getattr(p, 'constraint', None) is not None:\n new_p = p.constraint(new_p)\n\n self.updates.append(K.update(p, new_p))\n return self.updates\n\n# adapted from keras.optimizers.Adam\nclass AdamWithWeightnorm(Adam):\n def get_updates(self, loss, params):\n grads = self.get_gradients(loss, params)\n self.updates = [K.update_add(self.iterations, 1)]\n\n lr = self.lr\n if self.initial_decay > 0:\n lr.assign(lr * (1. / (1. + self.decay * K.cast(self.iterations, K.floatx()))))\n\n t = K.cast(self.iterations + 1, K.floatx())\n lr_t = lr * K.sqrt(1. - K.pow(self.beta_2, t)) / (1. - K.pow(self.beta_1, t))\n\n shapes = [K.int_shape(p) for p in params]\n ms = [K.zeros(shape) for shape in shapes]\n vs = [K.zeros(shape) for shape in shapes]\n self.weights = [self.iterations] + ms + vs\n\n for p, g, m, v in zip(params, grads, ms, vs):\n\n # if a weight tensor (len > 1) use weight normalized parameterization\n # this is the only part changed w.r.t. keras.optimizers.Adam\n ps = K.int_shape(p)\n if len(ps)>1:\n\n # get weight normalization parameters\n V, V_norm, V_scaler, g_param, grad_g, grad_V = get_weightnorm_params_and_grads(p, g)\n\n # Adam containers for the 'g' parameter\n V_scaler_shape = K.int_shape(V_scaler)\n m_g = K.zeros(V_scaler_shape)\n v_g = K.zeros(V_scaler_shape)\n\n # update g parameters\n m_g_t = (self.beta_1 * m_g) + (1. - self.beta_1) * grad_g\n v_g_t = (self.beta_2 * v_g) + (1. - self.beta_2) * K.square(grad_g)\n new_g_param = g_param - lr_t * m_g_t / (K.sqrt(v_g_t) + self.epsilon)\n self.updates.append(K.update(m_g, m_g_t))\n self.updates.append(K.update(v_g, v_g_t))\n\n # update V parameters\n m_t = (self.beta_1 * m) + (1. - self.beta_1) * grad_V\n v_t = (self.beta_2 * v) + (1. - self.beta_2) * K.square(grad_V)\n new_V_param = V - lr_t * m_t / (K.sqrt(v_t) + self.epsilon)\n self.updates.append(K.update(m, m_t))\n self.updates.append(K.update(v, v_t))\n\n # if there are constraints we apply them to V, not W\n if getattr(p, 'constraint', None) is not None:\n new_V_param = p.constraint(new_V_param)\n\n # wn param updates --> W updates\n add_weightnorm_param_updates(self.updates, new_V_param, new_g_param, p, V_scaler)\n\n else: # do optimization normally\n m_t = (self.beta_1 * m) + (1. - self.beta_1) * g\n v_t = (self.beta_2 * v) + (1. - self.beta_2) * K.square(g)\n p_t = p - lr_t * m_t / (K.sqrt(v_t) + self.epsilon)\n\n self.updates.append(K.update(m, m_t))\n self.updates.append(K.update(v, v_t))\n\n new_p = p_t\n # apply constraints\n if getattr(p, 'constraint', None) is not None:\n new_p = p.constraint(new_p)\n self.updates.append(K.update(p, new_p))\n return self.updates\n\n\ndef get_weightnorm_params_and_grads(p, g):\n ps = K.int_shape(p)\n\n # construct weight scaler: V_scaler = g/||V||\n V_scaler_shape = (ps[-1],) # assumes we're using tensorflow!\n V_scaler = K.ones(V_scaler_shape) # init to ones, so effective parameters don't change\n\n # get V parameters = ||V||/g * W\n norm_axes = [i for i in range(len(ps) - 1)]\n V = p / tf.reshape(V_scaler, [1] * len(norm_axes) + [-1])\n\n # split V_scaler into ||V|| and g parameters\n V_norm = tf.sqrt(tf.reduce_sum(tf.square(V), norm_axes))\n g_param = V_scaler * V_norm\n\n # get grad in V,g parameters\n grad_g = tf.reduce_sum(g * V, norm_axes) / V_norm\n grad_V = tf.reshape(V_scaler, [1] * len(norm_axes) + [-1]) * \\\n (g - tf.reshape(grad_g / V_norm, [1] * len(norm_axes) + [-1]) * V)\n\n return V, V_norm, V_scaler, g_param, grad_g, grad_V\n\n\ndef add_weightnorm_param_updates(updates, new_V_param, new_g_param, W, V_scaler):\n ps = K.int_shape(new_V_param)\n norm_axes = [i for i in range(len(ps) - 1)]\n\n # update W and V_scaler\n new_V_norm = tf.sqrt(tf.reduce_sum(tf.square(new_V_param), norm_axes))\n new_V_scaler = new_g_param / new_V_norm\n new_W = tf.reshape(new_V_scaler, [1] * len(norm_axes) + [-1]) * new_V_param\n updates.append(K.update(W, new_W))\n updates.append(K.update(V_scaler, new_V_scaler))\n\n\n# data based initialization for a given Keras model\ndef data_based_init(model, input):\n\n # input can be dict, numpy array, or list of numpy arrays\n if type(input) is dict:\n feed_dict = input\n elif type(input) is list:\n feed_dict = {tf_inp: np_inp for tf_inp,np_inp in zip(model.inputs,input)}\n else:\n feed_dict = {model.inputs[0]: input}\n\n # add learning phase if required\n if model.uses_learning_phase and K.learning_phase() not in feed_dict:\n feed_dict.update({K.learning_phase(): 1})\n\n # get all layer name, output, weight, bias tuples\n layer_output_weight_bias = []\n for l in model.layers:\n trainable_weights = l.trainable_weights\n if len(trainable_weights) == 2:\n W,b = trainable_weights\n assert(l.built)\n layer_output_weight_bias.append((l.name,l.get_output_at(0),W,b)) # if more than one node, only use the first\n\n # iterate over our list and do data dependent init\n sess = K.get_session()\n for l,o,W,b in layer_output_weight_bias:\n print('Performing data dependent initialization for layer ' + l)\n m,v = tf.nn.moments(o, [i for i in range(len(o.get_shape())-1)])\n s = tf.sqrt(v + 1e-10)\n updates = tf.group(W.assign(W/tf.reshape(s,[1]*(len(W.get_shape())-1)+[-1])), b.assign((b-m)/s))\n sess.run(updates, feed_dict)\n"
] |
[
[
"tensorflow.reduce_sum",
"tensorflow.sqrt",
"tensorflow.square"
]
] |
khalidsaifullaah/tutorials
|
[
"cf10bed4dd5dd6b069f8f102e2a532a4d03fcf43"
] |
[
"beginner_source/blitz/neural_networks_tutorial.py"
] |
[
"# -*- coding: utf-8 -*-\n\"\"\"\nNeural Networks\n===============\n\nNeural networks can be constructed using the ``torch.nn`` package.\n\nNow that you had a glimpse of ``autograd``, ``nn`` depends on\n``autograd`` to define models and differentiate them.\nAn ``nn.Module`` contains layers, and a method ``forward(input)`` that\nreturns the ``output``.\n\nFor example, look at this network that classifies digit images:\n\n.. figure:: /_static/img/mnist.png\n :alt: convnet\n\n convnet\n\nIt is a simple feed-forward network. It takes the input, feeds it\nthrough several layers one after the other, and then finally gives the\noutput.\n\nA typical training procedure for a neural network is as follows:\n\n- Define the neural network that has some learnable parameters (or\n weights)\n- Iterate over a dataset of inputs\n- Process input through the network\n- Compute the loss (how far is the output from being correct)\n- Propagate gradients back into the network’s parameters\n- Update the weights of the network, typically using a simple update rule:\n ``weight = weight - learning_rate * gradient``\n\nDefine the network\n------------------\n\nLet’s define this network:\n\"\"\"\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\n\nclass Net(nn.Module):\n\n def __init__(self):\n super(Net, self).__init__()\n # 1 input image channel, 6 output channels, 5x5 square convolution\n # kernel\n self.conv1 = nn.Conv2d(1, 6, 5)\n self.conv2 = nn.Conv2d(6, 16, 5)\n # an affine operation: y = Wx + b\n self.fc1 = nn.Linear(16 * 5 * 5, 120) # 5*5 from image dimension \n self.fc2 = nn.Linear(120, 84)\n self.fc3 = nn.Linear(84, 10)\n\n def forward(self, x):\n # Max pooling over a (2, 2) window\n x = F.max_pool2d(F.relu(self.conv1(x)), (2, 2))\n # If the size is a square, you can specify with a single number\n x = F.max_pool2d(F.relu(self.conv2(x)), 2)\n x = x.view(-1, self.num_flat_features(x))\n x = F.relu(self.fc1(x))\n x = F.relu(self.fc2(x))\n x = self.fc3(x)\n return x\n\n def num_flat_features(self, x):\n size = x.size()[1:] # all dimensions except the batch dimension\n num_features = 1\n for s in size:\n num_features *= s\n return num_features\n\n\nnet = Net()\nprint(net)\n\n########################################################################\n# You just have to define the ``forward`` function, and the ``backward``\n# function (where gradients are computed) is automatically defined for you\n# using ``autograd``.\n# You can use any of the Tensor operations in the ``forward`` function.\n#\n# The learnable parameters of a model are returned by ``net.parameters()``\n\nparams = list(net.parameters())\nprint(len(params))\nprint(params[0].size()) # conv1's .weight\n\n########################################################################\n# Let's try a random 32x32 input.\n# Note: expected input size of this net (LeNet) is 32x32. To use this net on\n# the MNIST dataset, please resize the images from the dataset to 32x32.\n\ninput = torch.randn(1, 1, 32, 32)\nout = net(input)\nprint(out)\n\n########################################################################\n# Zero the gradient buffers of all parameters and backprops with random\n# gradients:\nnet.zero_grad()\nout.backward(torch.randn(1, 10))\n\n########################################################################\n# .. note::\n#\n# ``torch.nn`` only supports mini-batches. The entire ``torch.nn``\n# package only supports inputs that are a mini-batch of samples, and not\n# a single sample.\n#\n# For example, ``nn.Conv2d`` will take in a 4D Tensor of\n# ``nSamples x nChannels x Height x Width``.\n#\n# If you have a single sample, just use ``input.unsqueeze(0)`` to add\n# a fake batch dimension.\n#\n# Before proceeding further, let's recap all the classes you’ve seen so far.\n#\n# **Recap:**\n# - ``torch.Tensor`` - A *multi-dimensional array* with support for autograd\n# operations like ``backward()``. Also *holds the gradient* w.r.t. the\n# tensor.\n# - ``nn.Module`` - Neural network module. *Convenient way of\n# encapsulating parameters*, with helpers for moving them to GPU,\n# exporting, loading, etc.\n# - ``nn.Parameter`` - A kind of Tensor, that is *automatically\n# registered as a parameter when assigned as an attribute to a*\n# ``Module``.\n# - ``autograd.Function`` - Implements *forward and backward definitions\n# of an autograd operation*. Every ``Tensor`` operation creates at\n# least a single ``Function`` node that connects to functions that\n# created a ``Tensor`` and *encodes its history*.\n#\n# **At this point, we covered:**\n# - Defining a neural network\n# - Processing inputs and calling backward\n#\n# **Still Left:**\n# - Computing the loss\n# - Updating the weights of the network\n#\n# Loss Function\n# -------------\n# A loss function takes the (output, target) pair of inputs, and computes a\n# value that estimates how far away the output is from the target.\n#\n# There are several different\n# `loss functions <https://pytorch.org/docs/nn.html#loss-functions>`_ under the\n# nn package .\n# A simple loss is: ``nn.MSELoss`` which computes the mean-squared error\n# between the input and the target.\n#\n# For example:\n\noutput = net(input)\ntarget = torch.randn(10) # a dummy target, for example\ntarget = target.view(1, -1) # make it the same shape as output\ncriterion = nn.MSELoss()\n\nloss = criterion(output, target)\nprint(loss)\n\n########################################################################\n# Now, if you follow ``loss`` in the backward direction, using its\n# ``.grad_fn`` attribute, you will see a graph of computations that looks\n# like this:\n#\n# ::\n#\n# input -> conv2d -> relu -> maxpool2d -> conv2d -> relu -> maxpool2d\n# -> view -> linear -> relu -> linear -> relu -> linear\n# -> MSELoss\n# -> loss\n#\n# So, when we call ``loss.backward()``, the whole graph is differentiated\n# w.r.t. the neural net parameters, and all Tensors in the graph that have\n# ``requires_grad=True`` will have their ``.grad`` Tensor accumulated with the\n# gradient.\n#\n# For illustration, let us follow a few steps backward:\n\nprint(loss.grad_fn) # MSELoss\nprint(loss.grad_fn.next_functions[0][0]) # Linear\nprint(loss.grad_fn.next_functions[0][0].next_functions[0][0]) # ReLU\n\n########################################################################\n# Backprop\n# --------\n# To backpropagate the error all we have to do is to ``loss.backward()``.\n# You need to clear the existing gradients though, else gradients will be\n# accumulated to existing gradients.\n#\n#\n# Now we shall call ``loss.backward()``, and have a look at conv1's bias\n# gradients before and after the backward.\n\n\nnet.zero_grad() # zeroes the gradient buffers of all parameters\n\nprint('conv1.bias.grad before backward')\nprint(net.conv1.bias.grad)\n\nloss.backward()\n\nprint('conv1.bias.grad after backward')\nprint(net.conv1.bias.grad)\n\n########################################################################\n# Now, we have seen how to use loss functions.\n#\n# **Read Later:**\n#\n# The neural network package contains various modules and loss functions\n# that form the building blocks of deep neural networks. A full list with\n# documentation is `here <https://pytorch.org/docs/nn>`_.\n#\n# **The only thing left to learn is:**\n#\n# - Updating the weights of the network\n#\n# Update the weights\n# ------------------\n# The simplest update rule used in practice is the Stochastic Gradient\n# Descent (SGD):\n#\n# ``weight = weight - learning_rate * gradient``\n#\n# We can implement this using simple Python code:\n#\n# .. code:: python\n#\n# learning_rate = 0.01\n# for f in net.parameters():\n# f.data.sub_(f.grad.data * learning_rate)\n#\n# However, as you use neural networks, you want to use various different\n# update rules such as SGD, Nesterov-SGD, Adam, RMSProp, etc.\n# To enable this, we built a small package: ``torch.optim`` that\n# implements all these methods. Using it is very simple:\n\nimport torch.optim as optim\n\n# create your optimizer\noptimizer = optim.SGD(net.parameters(), lr=0.01)\n\n# in your training loop:\noptimizer.zero_grad() # zero the gradient buffers\noutput = net(input)\nloss = criterion(output, target)\nloss.backward()\noptimizer.step() # Does the update\n\n\n###############################################################\n# .. Note::\n#\n# Observe how gradient buffers had to be manually set to zero using\n# ``optimizer.zero_grad()``. This is because gradients are accumulated\n# as explained in the `Backprop`_ section.\n"
] |
[
[
"torch.nn.Linear",
"torch.randn",
"torch.nn.Conv2d",
"torch.nn.MSELoss"
]
] |
goldpink/NLP_multiclass_classification
|
[
"ecdd75a12487233e1439e9c8cb1a7b488fcdc852"
] |
[
"data_loader.py"
] |
[
"import os\nimport copy\nimport json\nimport logging\n\nimport torch\nfrom torch.utils.data import TensorDataset\nlogger = logging.getLogger(__name__)\n\n\nclass InputExample(object):\n \"\"\"\n A single training/test example for simple sequence classification.\n \"\"\"\n\n def __init__(self, guid, text_a, text_b, label):\n self.guid = guid\n self.text_a = text_a\n self.text_b = text_b\n self.label = label\n\n def __repr__(self):\n return str(self.to_json_string())\n\n def to_dict(self):\n \"\"\"Serializes this instance to a Python dictionary.\"\"\"\n output = copy.deepcopy(self.__dict__)\n\n def to_json_string(self):\n \"\"\"Serializes this instance to a JSON string.\"\"\"\n return json.dumps(self.to_dict(), indent=2, sort_keys=True) + \"\\n\"\n\n\nclass InputFeatures(object):\n \"\"\"A single set of features of data.\"\"\"\n\n def __init__(self, input_ids, attention_mask, token_type_ids, label):\n self.input_ids = input_ids\n self.attention_mask = attention_mask\n self.token_type_ids = token_type_ids\n self.label = label\n\n def __repr__(self):\n return str(self.to_json_string())\n\n def to_dict(self):\n \"\"\"Serializes this instance to a Python dictionary.\"\"\"\n output = copy.deepcopy(self.__dict__)\n return output\n\n def to_json_string(self):\n \"\"\"Serializes this instance to a JSON string.\"\"\"\n return json.dumps(self.to_dict(), indent=2, sort_keys=True) + \"\\n\"\n\n\ndef convert_examples_to_features(\n args,\n examples,\n tokenizer,\n max_length,\n):\n processor = EthicsProcessor(args)\n label_list_len = len(processor.get_labels())\n\n def convert_to_one_hot_label(label):\n one_hot_label = [0] * label_list_len\n for l in label:\n one_hot_label[l] = 1\n return one_hot_label\n\n labels = [convert_to_one_hot_label(example.label) for example in examples]\n\n batch_encoding = tokenizer.batch_encode_plus(\n [(example.text_a, example.text_b) for example in examples], max_length=max_length, pad_to_max_length=True\n )\n\n features = []\n for i in range(len(examples)):\n inputs = {k: batch_encoding[k][i] for k in batch_encoding}\n\n feature = InputFeatures(**inputs, label=labels[i])\n features.append(feature)\n\n for i, example in enumerate(examples[:10]):\n logger.info(\"*** Example ***\")\n logger.info(\"guid: {}\".format(example.guid))\n logger.info(\"sentence: {}\".format(example.text_a))\n logger.info(\"tokens: {}\".format(\" \".join([str(x) for x in tokenizer.tokenize(example.text_a)])))\n logger.info(\"input_ids: {}\".format(\" \".join([str(x) for x in features[i].input_ids])))\n logger.info(\"attention_mask: {}\".format(\" \".join([str(x) for x in features[i].attention_mask])))\n logger.info(\"token_type_ids: {}\".format(\" \".join([str(x) for x in features[i].token_type_ids])))\n logger.info(\"label: {}\".format(\" \".join([str(x) for x in features[i].label])))\n\n return features\n\n\nclass EthicsProcessor(object):\n \"\"\"Processor for the Ethics data set \"\"\"\n\n def __init__(self, args):\n self.args = args\n\n def get_labels(self):\n labels = []\n with open(os.path.join(self.args.data_dir, self.args.label_file), \"r\", encoding=\"utf-8\") as f:\n \n for line in f:\n labels.append(line.rstrip())\n print(\"label 생김새 : \",labels)\n return labels\n\n @classmethod\n def _read_file(cls, input_file):\n \"\"\"Reads a tab separated value file.\"\"\"\n with open(input_file, \"r\", encoding=\"utf-8\") as f:\n return f.readlines()\n\n def _create_examples(self, lines, set_type):\n \"\"\"Creates examples for the training and dev sets.\"\"\"\n examples = []\n for (i, line) in enumerate(lines):\n guid = \"%s-%s\" % (set_type, i)\n line = line.strip()\n items = line.split(\"\\t\")\n text_a = items[0]\n # print('items값:',items[1])\n # print('index: ', i)\n label = list(map(int, items[1].split(\",\")))\n if i % 5000 == 0:\n logger.info(line)\n examples.append(InputExample(guid=guid, text_a=text_a, text_b=None, label=label))\n return examples\n\n def get_examples(self, mode):\n \"\"\"\n Args:\n mode: train, dev, test\n \"\"\"\n file_to_read = None\n if mode == 'train':\n file_to_read = self.args.train_file\n elif mode == 'dev':\n file_to_read = self.args.dev_file\n elif mode == 'test':\n file_to_read = self.args.test_file\n\n logger.info(\"LOOKING AT {}\".format(os.path.join(self.args.data_dir, file_to_read)))\n return self._create_examples(self._read_file(os.path.join(self.args.data_dir,\n file_to_read)), mode)\n\n\ndef load_and_cache_examples(args, tokenizer, mode):\n processor = EthicsProcessor(args)\n # Load data features from cache or dataset file\n cached_features_file = os.path.join(\n args.data_dir,\n \"cached_{}_{}_{}_{}\".format(\n str(args.task),\n list(filter(None, args.model_name_or_path.split(\"/\"))).pop(),\n str(args.max_seq_len),\n mode\n )\n )\n if os.path.exists(cached_features_file):\n logger.info(\"Loading features from cached file %s\", cached_features_file)\n features = torch.load(cached_features_file)\n else:\n logger.info(\"Creating features from dataset file at %s\", args.data_dir)\n if mode == \"train\":\n examples = processor.get_examples(\"train\")\n elif mode == \"dev\":\n examples = processor.get_examples(\"dev\")\n elif mode == \"test\":\n examples = processor.get_examples(\"test\")\n else:\n raise ValueError(\"For mode, only train, dev, test is available\")\n features = convert_examples_to_features(\n args, examples, tokenizer, max_length=args.max_seq_len\n )\n logger.info(\"Saving features into cached file %s\", cached_features_file)\n torch.save(features, cached_features_file)\n\n # Convert to Tensors and build dataset\n all_input_ids = torch.tensor([f.input_ids for f in features], dtype=torch.long)\n all_attention_mask = torch.tensor([f.attention_mask for f in features], dtype=torch.long)\n all_token_type_ids = torch.tensor([f.token_type_ids for f in features], dtype=torch.long)\n all_labels = torch.tensor([f.label for f in features], dtype=torch.float)\n\n dataset = TensorDataset(all_input_ids, all_attention_mask, all_token_type_ids, all_labels)\n return dataset\n"
] |
[
[
"torch.utils.data.TensorDataset",
"torch.load",
"torch.save",
"torch.tensor"
]
] |
DanPorter/Dans_Diffaction
|
[
"74aea3d2b54d841271f22841f405a9a7c6fa1c81"
] |
[
"Dans_Diffraction/classes_orbitals.py"
] |
[
"# -*- coding: utf-8 -*-\n\"\"\"\n Orbitals class \"classes_orbitals.py\"\n Build an ionic compound or molecule from ions with defined atomic orbitals\n\n orbital = Orbital('4d4')\n atom = Atom('Co3+')\n compound = CompoundString('Ca2RuO4')\n\n Orbital:\n individual atomic orbital with properties, n,l and fill\n functions:\n add_electron\n remove_electron\n next_orbital\n last_orbital\n\n Atom:\n Single element composed of a list of orbitals (self.orbitals)\n functions:\n valence_orbitals\n assign_charge\n unoccupied2charge\n add_electron\n remove_electron\n transfer_electron\n\n Compound:\n Collection of atoms, with charge automatically balanced by assigning standard charges to common valences\n Atoms are collected in the list self.atom_list\n\nBy Dan Porter, PhD\nDiamond\n2020\n\nVersion 1.0.0\nLast updated: 12/07/20\n\nVersion History:\n09/05/20 0.1.0 Version History started.\n12/07/20 1.0.0 Program functional, performed some testing\n\n@author: DGPorter\n\"\"\"\n\nimport sys, os, re\nimport numpy as np\nfrom warnings import warn\n\nfrom . import functions_general as fg\nfrom . import functions_crystallography as fc\n\n__version__ = '1.0.0'\n\nOXIDATION_FILE = os.path.join(fc.datadir, 'Element_OxidationStates.txt')\n\n\ndef oxidation_states(element=None, element_z=None):\n \"\"\"\n Return available oxidation states for element\n :param element: str or None for all elements\n :param element_z: int, alternative to element symbol\n :return: list of float\n \"\"\"\n available_states = {}\n with open(OXIDATION_FILE) as f:\n for n in range(118):\n line = f.readline().split()\n z = int(line[0])\n symbol = line[2]\n if len(line) > 3:\n available_states[symbol] = [int(val) for val in line[3:]]\n else:\n available_states[symbol] = []\n if symbol == element:\n return available_states[symbol]\n elif z == element_z:\n return available_states[symbol]\n return available_states\n\n\ndef atomstring2list(compound_string):\n return [Atom(at) for at in fc.split_compound(compound_string)]\n\n\ndef orbital_list(n_electrons, first_orbital=None, first_n=1, first_l=0):\n \"\"\"\n Create a list of orbitals filling n electrons\n :param n_electrons:\n :param first_orbital: None or str:\n :param first_n:\n :param first_l:\n :return: [Orbital]\n \"\"\"\n if first_orbital is None:\n first_orbital = Orbital(n=first_n, l=first_l)\n else:\n first_orbital = Orbital(first_orbital)\n\n n_electrons = first_orbital.add_electron(n_electrons)\n orbitals = [first_orbital]\n while n_electrons > 0:\n orbitals += [orbitals[-1].next_orbital()]\n n_electrons = orbitals[-1].add_electron(n_electrons)\n return orbitals\n\n\nclass Orbital:\n \"\"\"\n Individual orbital\n orb_3d7 = Orbital('3d7')\n or\n orb_3d7 = Orbital(n=3,l=2,fill=7)\n\n print(orb_3d7)\n >> 3d7\n \"\"\"\n _level_max = [0, 1, 2, 3, 3, 2, 1]\n _state = ['s', 'p', 'd', 'f']\n _state_max = [2, 6, 10, 14]\n\n def __init__(self, standard=None, n=1, l=0, fill=0):\n if standard is not None:\n self.standard = standard\n self.n = int(standard[0])\n self.l_name = standard[1].lower()\n self.l = self._state.index(standard[1].lower())\n if len(standard) == 2:\n self.fill = 0.\n else:\n self.fill = float(standard[2:])\n self.max_fill = self._state_max[self.l]\n self.max_l = self._level_max[self.n - 1]\n else:\n self.n = int(n)\n self.l = int(l)\n self.fill = float(fill)\n self.l_name = self._state[l]\n self.max_fill = self._state_max[self.l]\n self.max_l = self._level_max[self.n - 1]\n self.standard = self.generate_string_standard()\n\n def __call__(self):\n return self.generate_string_standard()\n\n def __repr__(self):\n return 'Orbital: %s' % self.generate_string_standard()\n\n def __eq__(self, other):\n if isinstance(other, Orbital):\n if self.n == other.n and self.l == other.l:\n return True\n return False\n\n def generate_string_standard(self):\n return '%d%s%1.3g' % (self.n, self.l_name, self.fill)\n\n def generate_string_fdmnes(self):\n return '%d %d %4.2f' % (self.n, self.l, self.fill)\n\n def generate_string_latex(self):\n return '%d%s$^{%1.3g}$' % (self.n, self.l_name, self.fill)\n\n def add_electron(self, n=1.):\n new_fill = self.fill + n\n if new_fill > self.max_fill:\n new_fill = 1.0 * self.max_fill\n elif new_fill < 0:\n new_fill = 0.0\n unused = (self.fill + n) - new_fill\n self.fill = new_fill\n return unused\n\n def remove_electron(self, n=1.):\n new_fill = self.fill - n\n if new_fill > self.max_fill:\n new_fill = 1.0 * self.max_fill\n elif new_fill < 0:\n new_fill = 0.0\n unused = (self.fill - n) - new_fill\n self.fill = new_fill\n return unused\n\n def next_orbital(self, fill=0):\n if self.l + 1 > self.max_l:\n next_l = 0\n next_n = self.n + 1\n else:\n next_l = self.l + 1\n next_n = self.n + 0\n return Orbital(n=next_n, l=next_l, fill=fill)\n\n def last_orbital(self, fill=0):\n if self.l == 0:\n next_n = self.n - 1\n if next_n < 0: next_n = 0\n next_l = self._level_max[next_n]\n else:\n next_n = self.n + 0\n next_l = self.l - 1\n return Orbital(n=next_n, l=next_l, fill=fill)\n\n\nclass Atom:\n \"\"\"\n Atom - collection of orbitals\n Co = Atom('Co')\n print(Co)\n >> 27 Co 1s2 2s2 2p6 3s2 3p6 3d7 4s2\n Co = Atom('Co3+')\n print(Co)\n >> 27 Co3+ 1s2 2s2 2p6 3s2 3p6 3d5 4s1\n \"\"\"\n\n def __init__(self, element, charge=None, occupancy=None):\n element, str_occupancy, str_charge = fc.split_element_symbol(element)\n if charge is None:\n charge = str_charge\n if occupancy is None:\n occupancy = str_occupancy\n self.element_str = element\n self.element_symbol = element\n self.occupancy = occupancy\n self.z, self.name, config = fc.atom_properties(element, ['Z', 'Name', 'Config'])[0]\n self.orbitals = [Orbital(s) for s in config.split('.')]\n #self.oxidation_states = oxidation_states(element)\n self.charge = charge\n self.assign_charge(charge)\n\n def __repr__(self):\n return 'Atom: %s\\n' % self.generate_string_standard()\n\n def valence_orbitals(self):\n valence = []\n for orbital in self.orbitals[:-1]:\n if orbital.max_fill - orbital.fill > 0.01:\n valence += [orbital]\n valence += [self.orbitals[-1]]\n return valence\n\n def find_orbital(self, orbital):\n \"\"\"\n Returns requested orbital (useful for findind Orbital.last_orbital\n :param orbital: Orbital\n :return: Orbital\n \"\"\"\n idx = self.orbitals.index(orbital)\n return self.orbitals[idx]\n\n def find_orbital_str(self, orbital_str):\n \"\"\"\n Returns requested orbital\n :param orbital_str: str, e.g. '4d'\n :return: Orbital\n \"\"\"\n neworb = Orbital(orbital_str)\n return self.find_orbital(neworb)\n\n def assign_standard_charge(self):\n \"\"\"\n Add standard charge based on element group\n Group 1 metals always +1\n Group 2 metals always +2\n Oxygen usually -2 except in peroxides and F2O (see below)\n Hydrogen usually +1 except in metal hydrides where it is -1 (see below)\n Fluorine always -1\n Chlorine usually -1 except in compounds with O or F (see below)\n :return:\n \"\"\"\n if self.z in [3, 11, 19, 37, 55, 87]: # group 1\n self.assign_charge(1)\n elif self.z in [4, 12, 20, 38, 56, 88]: # group 2\n self.assign_charge(2)\n elif self.z in [8]: # O\n self.assign_charge(-2)\n elif self.z in [1]: # H\n self.assign_charge(1)\n elif self.z in [9]: # F\n self.assign_charge(-1)\n elif self.z in [17]: # Cl\n self.assign_charge(-1)\n\n def assign_charge(self, charge):\n \"\"\"\n charge = number of missing electrons per atom\n Add charge to atom, select new orbital configuration based on surrounding element't config.\n For non-integer charge, remaining component will be added or subtracted from final orbital\n Charge is averaged over the full occupancy of the sites\n :param charge: flaot\n :return:\n \"\"\"\n\n self.charge = charge\n intcharge = np.floor(charge)\n deccharge = charge % 1\n self.orbitals = [Orbital(s) for s in fc.orbital_configuration(self.element_symbol, intcharge)]\n if deccharge > 0:\n self.orbitals[-1].remove_electron(deccharge)\n\n def assign_occupancy(self, occupancy):\n self.occupancy = occupancy\n self.assign_charge(self.charge)\n\n def unoccupied2charge(self):\n \"\"\"\n Convert non-integer occupancy to charge\n :return:\n \"\"\"\n full_occ = np.ceil(self.occupancy)\n newcharge = self.charge * self.occupancy / full_occ # average the charge among multiple ions\n if abs(newcharge) > 0.01:\n self.occupancy = full_occ\n self.assign_charge(newcharge)\n\n def check_charge(self):\n return self.occupancy*(self.z - sum([orb.fill for orb in self.orbitals]))\n\n def add_electron(self, n_electron, add_to_state=None):\n \"\"\"\n Adds electrons to lowest unfilled or selectred orbital, adding additinal orbitals if full\n :param n_electron: float number of electrons to add\n :param add_to_state: None (lowest unfilled orbital) or str e.g. '4d'\n :return:\n \"\"\"\n if add_to_state is None:\n state = self.valence_orbitals()[0]\n else:\n state = self.find_orbital_str(add_to_state)\n\n unused = state.add_electron(n_electron)\n while unused > 0:\n next_orbital = state.next_orbital()\n try:\n state = self.find_orbital(next_orbital)\n except ValueError: # orbital not in list\n self.orbitals += [next_orbital]\n state = self.orbitals[-1]\n unused = state.remove_electron(unused)\n self.charge -= n_electron\n\n def remove_electron(self, n_electron, add_to_state=None):\n \"\"\"\n Removes electrons from highest or selectred orbital, removing orbitals if empty\n :param n_electron: float number of electrons to add\n :param add_to_state: None (lowest unfilled orbital) or str e.g. '4d'\n :return:\n \"\"\"\n if add_to_state is None:\n state = self.orbitals[-1]\n else:\n state = self.find_orbital_str(add_to_state)\n\n unused = state.remove_electron(n_electron)\n while unused > 0:\n prev_orbital = state.last_orbital()\n state = self.find_orbital(prev_orbital)\n unused = state.remove_electron(unused)\n self.charge += n_electron\n self.clean_orbitals()\n\n def transfer_electron(self, state_from, state_to, n_electron=1.):\n \"\"\"\n Transfer an electron from one state to another\n :param state_from: str e.g. '5s' transfer electrons from this orbital\n :param state_to: str e.g. '4d' transfer electrons to this orbital (or next available)\n :param n_electron: float number of electrons\n :return: None\n \"\"\"\n self.remove_electron(n_electron, state_from)\n self.add_electron(n_electron, state_to)\n\n def clean_orbitals(self):\n \"\"\"\n Removes highest orbitals with no electrons\n \"\"\"\n while abs(self.orbitals[-1].fill) < 0.01:\n self.orbitals = self.orbitals[:-1]\n\n def generate_string_standard(self):\n elestr = fc.element_charge_string(self.element_symbol, occupancy=self.occupancy, charge=self.charge)\n orbstr = ' '.join([orb.generate_string_standard() for orb in self.orbitals])\n return '%3d %10s %s' % (self.z, elestr, orbstr)\n\n def generate_string_fdmnes(self):\n elestr = fc.element_charge_string(self.element_symbol, occupancy=self.occupancy, charge=self.charge)\n empty_orbital = self.orbitals[-1].next_orbital()\n valence = self.valence_orbitals()\n orbitals = valence + [empty_orbital]\n fdm = ' '.join([orb.generate_string_fdmnes() for orb in orbitals])\n std = ' '.join([orb.generate_string_standard() for orb in orbitals])\n return '%d %s ! %s %s' % (len(orbitals), fdm, elestr, std)\n\n def generate_string_latex(self):\n orbstr = ' '.join([orb.generate_string_latex() for orb in self.orbitals])\n elestr = fc.element_charge_string(self.element_symbol, self.occupancy, self.charge, latex=True)\n return '%4s: %s' % (elestr, orbstr)\n\n\nclass Compound:\n \"\"\"\n Compound - collection of atoms\n LiCoO2 = Compound('Li0.7CoO2')\n print(LiCoO2)\n >> Li0.7CoO2:\n >> 3 Li0.7+ 1s2 2s0.3\n >> 27 Co3.3+ 1s2 2s2 2p6 3s2 3p6 3d5 4s0.7\n >> 8 O2- 1s2 2s2 2p6\n >> 8 O2- 1s2 2s2 2p6\n \"\"\"\n\n def __init__(self, atom_list):\n self.atom_list = atom_list\n self.balance_charge()\n self.compound_string = self.generate_charge_name()\n\n def __repr__(self):\n return 'Compound: %s:\\n%s' % (self.compound_string, self.generate_string_standard())\n\n def charge_list(self):\n return [at.occupancy*at.check_charge() for at in self.atom_list]\n\n def balance_charge(self):\n \"\"\"\n Add together known charges of elements (e.g. O, Na, Ca), use to estimate the charges of remaining ions\n :return:\n \"\"\"\n for at in self.atom_list:\n if abs(at.charge) < 0.1:\n at.assign_standard_charge()\n at.unoccupied2charge()\n\n charge_list = [at.check_charge() for at in self.atom_list]\n charge_sum = sum(charge_list)\n\n # Assign remaining charge\n tot_uncharged = np.sum([at.occupancy for at in self.atom_list if abs(at.charge) < 0.1])\n if tot_uncharged < 0.1: return\n for at in self.atom_list:\n if abs(at.charge) < 0.1:\n at.assign_charge(-charge_sum/tot_uncharged)\n at.unoccupied2charge()\n\n def check_charge(self):\n return sum([at.check_charge() for at in self.atom_list])\n\n def generate_charge_name(self):\n names = [fc.element_charge_string(at.element_symbol, at.occupancy, at.charge) for at in self.atom_list]\n cnames = []\n for name in names:\n if names.count(name) > 1:\n newname = '%1.3g(%s)' % (names.count(name), name)\n else:\n newname = name\n if newname not in cnames:\n cnames += [newname]\n return ' '.join(cnames)\n\n def generate_string_standard(self):\n return '\\n'.join([at.generate_string_standard() for at in self.atom_list])\n\n def generate_string_fdmnes(self):\n return '\\n'.join([at.generate_string_fdmnes() for at in self.atom_list])\n\n def generate_string_latex(self):\n return '\\n'.join([at.generate_string_latex() for at in self.atom_list])\n\n\nclass CompoundString(Compound):\n \"\"\"\n Compound - collection of atoms\n LiCoO2 = Compound('Li0.7CoO2')\n print(LiCoO2)\n >> Li0.7CoO2:\n >> 3 Li0.7+ 1s2 2s0.3\n >> 27 Co3.3+ 1s2 2s2 2p6 3s2 3p6 3d5 4s0.7\n >> 8 O2- 1s2 2s2 2p6\n >> 8 O2- 1s2 2s2 2p6\n \"\"\"\n def __init__(self, compound_string):\n self.compound_string = compound_string\n atom_list = atomstring2list(compound_string)\n super().__init__(atom_list)\n\n\nclass CrystalOrbitals(Compound):\n \"\"\"\n Crystal Orbitals\n The compound is created as a class structure with each atom having a set of orbital electronic states\n \"\"\"\n def __init__(self, xtl):\n self.xtl = xtl\n\n atom_type = np.asarray(self.xtl.Structure.type)\n atom_occ = np.asarray(self.xtl.Structure.occupancy)\n\n # Count elements\n _, atom_count = np.unique(atom_type, return_counts=True)\n atlist = fc.count_atoms(atom_type, occupancy=atom_occ, divideby=np.min(atom_count))\n atomlist = [Atom(a) for a in atlist]\n super().__init__(atomlist)\n"
] |
[
[
"numpy.min",
"numpy.asarray",
"numpy.unique",
"numpy.ceil",
"numpy.floor"
]
] |
RadwaSK/deep-text-recognition-benchmark
|
[
"cea2e093c4b4e3f0144f1eefa7cd899419375d92"
] |
[
"dataset.py"
] |
[
"import os\nimport sys\nimport re\nimport six\nimport math\nimport lmdb\nimport torch\n\nfrom natsort import natsorted\nfrom PIL import Image\nimport numpy as np\nfrom torch.utils.data import Dataset, ConcatDataset, Subset\nfrom torch._utils import _accumulate\nimport torchvision.transforms as transforms\n\n\nclass Batch_Balanced_Dataset(object):\n\n def __init__(self, opt):\n \"\"\"\n Modulate the data ratio in the batch.\n For example, when select_data is \"MJ-ST\" and batch_ratio is \"0.5-0.5\",\n the 50% of the batch is filled with MJ and the other 50% of the batch is filled with ST.\n \"\"\"\n log = open(f'./saved_models/{opt.exp_name}/log_dataset.txt', 'a')\n dashed_line = '-' * 80\n print(dashed_line)\n log.write(dashed_line + '\\n')\n print(f'dataset_root: {opt.train_data}\\nopt.select_data: {opt.select_data}\\nopt.batch_ratio: {opt.batch_ratio}')\n log.write(f'dataset_root: {opt.train_data}\\nopt.select_data: {opt.select_data}\\nopt.batch_ratio: {opt.batch_ratio}\\n')\n assert len(opt.select_data) == len(opt.batch_ratio)\n\n _AlignCollate = AlignCollate(imgH=opt.imgH, imgW=opt.imgW, keep_ratio_with_pad=opt.PAD)\n self.data_loader_list = []\n self.dataloader_iter_list = []\n batch_size_list = []\n Total_batch_size = 0\n for selected_d, batch_ratio_d in zip(opt.select_data, opt.batch_ratio):\n _batch_size = max(round(opt.batch_size * float(batch_ratio_d)), 1)\n print(dashed_line)\n log.write(dashed_line + '\\n')\n _dataset, _dataset_log = hierarchical_dataset(root=opt.train_data, opt=opt, select_data=[selected_d])\n total_number_dataset = len(_dataset)\n log.write(_dataset_log)\n\n \"\"\"\n The total number of data can be modified with opt.total_data_usage_ratio.\n ex) opt.total_data_usage_ratio = 1 indicates 100% usage, and 0.2 indicates 20% usage.\n See 4.2 section in our paper.\n \"\"\"\n number_dataset = int(total_number_dataset * float(opt.total_data_usage_ratio))\n dataset_split = [number_dataset, total_number_dataset - number_dataset]\n indices = range(total_number_dataset)\n _dataset, _ = [Subset(_dataset, indices[offset - length:offset])\n for offset, length in zip(_accumulate(dataset_split), dataset_split)]\n selected_d_log = f'num total samples of {selected_d}: {total_number_dataset} x {opt.total_data_usage_ratio} (total_data_usage_ratio) = {len(_dataset)}\\n'\n selected_d_log += f'num samples of {selected_d} per batch: {opt.batch_size} x {float(batch_ratio_d)} (batch_ratio) = {_batch_size}'\n print(selected_d_log)\n log.write(selected_d_log + '\\n')\n batch_size_list.append(str(_batch_size))\n Total_batch_size += _batch_size\n\n _data_loader = torch.utils.data.DataLoader(\n _dataset, batch_size=_batch_size,\n shuffle=True,\n num_workers=int(opt.workers),\n collate_fn=_AlignCollate, pin_memory=True)\n self.data_loader_list.append(_data_loader)\n self.dataloader_iter_list.append(iter(_data_loader))\n\n Total_batch_size_log = f'{dashed_line}\\n'\n batch_size_sum = '+'.join(batch_size_list)\n Total_batch_size_log += f'Total_batch_size: {batch_size_sum} = {Total_batch_size}\\n'\n Total_batch_size_log += f'{dashed_line}'\n opt.batch_size = Total_batch_size\n\n print(Total_batch_size_log)\n log.write(Total_batch_size_log + '\\n')\n log.close()\n\n def get_batch(self):\n balanced_batch_images = []\n balanced_batch_texts = []\n\n for i, data_loader_iter in enumerate(self.dataloader_iter_list):\n try:\n image, text = data_loader_iter.next()\n balanced_batch_images.append(image)\n balanced_batch_texts += text\n except StopIteration:\n self.dataloader_iter_list[i] = iter(self.data_loader_list[i])\n image, text = self.dataloader_iter_list[i].next()\n balanced_batch_images.append(image)\n balanced_batch_texts += text\n except ValueError:\n pass\n\n balanced_batch_images = torch.cat(balanced_batch_images, 0)\n\n return balanced_batch_images, balanced_batch_texts\n\n\ndef hierarchical_dataset(root, opt, select_data='/'):\n \"\"\" select_data='/' contains all sub-directory of root directory \"\"\"\n dataset_list = []\n dataset_log = f'dataset_root: {root}\\t dataset: {select_data[0]}'\n print(dataset_log)\n dataset_log += '\\n'\n for dirpath, dirnames, filenames in os.walk(root):\n if not dirnames:\n # select_flag = False\n # for selected_d in select_data:\n # if selected_d in dirpath:\n # select_flag = True\n # break\n\n if True:\n dataset = LmdbDataset(dirpath, opt)\n sub_dataset_log = f'sub-directory:\\t/{os.path.relpath(dirpath, root)}\\t num samples: {len(dataset)}'\n print(sub_dataset_log)\n dataset_log += f'{sub_dataset_log}\\n'\n dataset_list.append(dataset)\n concatenated_dataset = ConcatDataset(dataset_list)\n\n return concatenated_dataset, dataset_log\n\n\nclass LmdbDataset(Dataset):\n\n def __init__(self, root, opt):\n\n self.root = root\n self.opt = opt\n self.env = lmdb.open(root, max_readers=32, readonly=True, lock=False, readahead=False, meminit=False)\n if not self.env:\n print('cannot create lmdb from %s' % (root))\n sys.exit(0)\n\n with self.env.begin(write=False) as txn:\n nSamples = int(txn.get('num-samples'.encode()))\n self.nSamples = nSamples\n\n if self.opt.data_filtering_off:\n # for fast check or benchmark evaluation with no filtering\n self.filtered_index_list = [index + 1 for index in range(self.nSamples)]\n else:\n \"\"\" Filtering part\n If you want to evaluate IC15-2077 & CUTE datasets which have special character labels,\n use --data_filtering_off and only evaluate on alphabets and digits.\n see https://github.com/clovaai/deep-text-recognition-benchmark/blob/6593928855fb7abb999a99f428b3e4477d4ae356/dataset.py#L190-L192\n\n And if you want to evaluate them with the model trained with --sensitive option,\n use --sensitive and --data_filtering_off,\n see https://github.com/clovaai/deep-text-recognition-benchmark/blob/dff844874dbe9e0ec8c5a52a7bd08c7f20afe704/test.py#L137-L144\n \"\"\"\n self.filtered_index_list = []\n for index in range(self.nSamples):\n index += 1 # lmdb starts with 1\n label_key = 'label-%09d'.encode() % index\n label = txn.get(label_key).decode('utf-8')\n\n if len(label) > self.opt.batch_max_length:\n # print(f'The length of the label is longer than max_length: length\n # {len(label)}, {label} in dataset {self.root}')\n continue\n\n # By default, images containing characters which are not in opt.character are filtered.\n # You can add [UNK] token to `opt.character` in utils.py instead of this filtering.\n out_of_char = f'[^{self.opt.character}]'\n if re.search(out_of_char, label.lower()):\n continue\n\n self.filtered_index_list.append(index)\n\n self.nSamples = len(self.filtered_index_list)\n\n def __len__(self):\n return self.nSamples\n\n def __getitem__(self, index):\n assert index <= len(self), 'index range error'\n index = self.filtered_index_list[index]\n\n with self.env.begin(write=False) as txn:\n label_key = 'label-%09d'.encode() % index\n label = txn.get(label_key).decode('utf-8')\n img_key = 'image-%09d'.encode() % index\n imgbuf = txn.get(img_key)\n\n buf = six.BytesIO()\n buf.write(imgbuf)\n buf.seek(0)\n try:\n if self.opt.rgb:\n img = Image.open(buf).convert('RGB') # for color image\n else:\n img = Image.open(buf).convert('L')\n\n except IOError:\n print(f'Corrupted image for {index}')\n # make dummy image and dummy label for corrupted image.\n if self.opt.rgb:\n img = Image.new('RGB', (self.opt.imgW, self.opt.imgH))\n else:\n img = Image.new('L', (self.opt.imgW, self.opt.imgH))\n label = '[dummy_label]'\n\n if not self.opt.sensitive:\n label = label.lower()\n\n # We only train and evaluate on alphanumerics (or pre-defined character set in train.py)\n out_of_char = f'[^{self.opt.character}]'\n label = re.sub(out_of_char, '', label)\n\n return (img, label)\n\n\nclass RawDataset(Dataset):\n\n def __init__(self, root, opt):\n self.opt = opt\n self.image_path_list = []\n for dirpath, dirnames, filenames in os.walk(root):\n for name in filenames:\n _, ext = os.path.splitext(name)\n ext = ext.lower()\n if ext == '.jpg' or ext == '.jpeg' or ext == '.png':\n self.image_path_list.append(os.path.join(dirpath, name))\n\n self.image_path_list = natsorted(self.image_path_list)\n self.nSamples = len(self.image_path_list)\n\n def __len__(self):\n return self.nSamples\n\n def __getitem__(self, index):\n\n try:\n if self.opt.rgb:\n img = Image.open(self.image_path_list[index]).convert('RGB') # for color image\n else:\n img = Image.open(self.image_path_list[index]).convert('L')\n\n except IOError:\n print(f'Corrupted image for {index}')\n # make dummy image and dummy label for corrupted image.\n if self.opt.rgb:\n img = Image.new('RGB', (self.opt.imgW, self.opt.imgH))\n else:\n img = Image.new('L', (self.opt.imgW, self.opt.imgH))\n\n return (img, self.image_path_list[index])\n\n\nclass ResizeNormalize(object):\n\n def __init__(self, size, interpolation=Image.BICUBIC):\n self.size = size\n self.interpolation = interpolation\n self.toTensor = transforms.ToTensor()\n\n def __call__(self, img):\n img = img.resize(self.size, self.interpolation)\n img = self.toTensor(img)\n img.sub_(0.5).div_(0.5)\n return img\n\n\nclass NormalizePAD(object):\n\n def __init__(self, max_size, PAD_type='right'):\n self.toTensor = transforms.ToTensor()\n self.max_size = max_size\n self.max_width_half = math.floor(max_size[2] / 2)\n self.PAD_type = PAD_type\n\n def __call__(self, img):\n img = self.toTensor(img)\n img.sub_(0.5).div_(0.5)\n c, h, w = img.size()\n Pad_img = torch.FloatTensor(*self.max_size).fill_(0)\n Pad_img[:, :, :w] = img # right pad\n if self.max_size[2] != w: # add border Pad\n Pad_img[:, :, w:] = img[:, :, w - 1].unsqueeze(2).expand(c, h, self.max_size[2] - w)\n\n return Pad_img\n\n\nclass AlignCollate(object):\n\n def __init__(self, imgH=32, imgW=100, keep_ratio_with_pad=False):\n self.imgH = imgH\n self.imgW = imgW\n self.keep_ratio_with_pad = keep_ratio_with_pad\n\n def __call__(self, batch):\n batch = filter(lambda x: x is not None, batch)\n images, labels = zip(*batch)\n\n if self.keep_ratio_with_pad: # same concept with 'Rosetta' paper\n resized_max_w = self.imgW\n input_channel = 3 if images[0].mode == 'RGB' else 1\n transform = NormalizePAD((input_channel, self.imgH, resized_max_w))\n\n resized_images = []\n for image in images:\n w, h = image.size\n ratio = w / float(h)\n if math.ceil(self.imgH * ratio) > self.imgW:\n resized_w = self.imgW\n else:\n resized_w = math.ceil(self.imgH * ratio)\n\n resized_image = image.resize((resized_w, self.imgH), Image.BICUBIC)\n resized_images.append(transform(resized_image))\n # resized_image.save('./image_test/%d_test.jpg' % w)\n\n image_tensors = torch.cat([t.unsqueeze(0) for t in resized_images], 0)\n\n else:\n transform = ResizeNormalize((self.imgW, self.imgH))\n image_tensors = [transform(image) for image in images]\n image_tensors = torch.cat([t.unsqueeze(0) for t in image_tensors], 0)\n\n return image_tensors, labels\n\n\ndef tensor2im(image_tensor, imtype=np.uint8):\n image_numpy = image_tensor.cpu().float().numpy()\n if image_numpy.shape[0] == 1:\n image_numpy = np.tile(image_numpy, (3, 1, 1))\n image_numpy = (np.transpose(image_numpy, (1, 2, 0)) + 1) / 2.0 * 255.0\n return image_numpy.astype(imtype)\n\n\ndef save_image(image_numpy, image_path):\n image_pil = Image.fromarray(image_numpy)\n image_pil.save(image_path)\n"
] |
[
[
"torch.cat",
"numpy.tile",
"torch.utils.data.ConcatDataset",
"torch.FloatTensor",
"torch._utils._accumulate",
"numpy.transpose",
"torch.utils.data.Subset"
]
] |
Synerise/recsys-challenge-2021
|
[
"f8e8005a1553c14bae16951d787d6864094f7a3b"
] |
[
"src/bert_finetuning.py"
] |
[
"import os\nimport argparse\nimport logging\nimport yaml\nfrom transformers import DistilBertForMaskedLM, DistilBertTokenizer, DataCollatorForLanguageModeling\nimport numpy as np\nfrom transformers import Trainer\nfrom transformers import TrainingArguments\nfrom read_dataset_utils import all_features_to_idx\nfrom datasets import Dataset\nimport random\n\n\nlog = logging.getLogger(__name__)\n\n\ndef get_parser():\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--config-file\", type=str, default='config.yaml', help='Configuration file.')\n parser.add_argument(\"--max-N-parts\", type=int, default=1,\n help='Maximum number of training parts that are taken to bert finetuning.')\n parser.add_argument(\"--num-epochs\", type=int, default=1, help='Number of epochs.')\n parser.add_argument(\"--max_num_tokens\", type=int, default=100, help='Maximum number of tokens from tweet.')\n parser.add_argument(\"--batch-size\", type=int, default=16, help='Batch size.')\n parser.add_argument(\"--save-steps\", type=int, default=100_000, help=' Every N Steps model checkpoint is saved')\n return parser\n\n\ndef update_data(data, tokens, max_num_tokens):\n data['text'].append('')\n data['input_ids'].append(tokens)\n data['attention_mask'].append(np.array(np.array(tokens) != np.zeros(max_num_tokens), dtype='int'))\n\n\ndef finetune_bert(params):\n with open(params.config_file) as f:\n config = yaml.load(f)\n\n VALIDATION_PERCENTAGE = 0.02\n BERT_CONFIG_NAME = 'distilbert-base-multilingual-cased'\n tokenizer = DistilBertTokenizer.from_pretrained(BERT_CONFIG_NAME)\n model = DistilBertForMaskedLM.from_pretrained(BERT_CONFIG_NAME).cuda()\n\n\n filenames = [os.path.join(config['recsys_data'], f) for f in os.listdir(config['recsys_data']) if 'part' in f][:params.max_N_parts]\n log.info(f\"Used input filenames: {filenames}\")\n\n data_collator = DataCollatorForLanguageModeling(\n tokenizer=tokenizer,\n mlm_probability=0.15,\n pad_to_multiple_of=8\n )\n\n\n training_args = TrainingArguments(output_dir = \"./distilbert_checkpoints\",\n logging_dir='./logs',\n logging_steps=0,\n warmup_steps=1000,\n weight_decay=0.01,\n save_strategy = \"steps\",\n evaluation_strategy=\"steps\",\n eval_steps = params.save_steps,\n save_steps = params.save_steps,\n per_device_train_batch_size = params.batch_size,\n num_train_epochs = params.num_epochs,\n do_predict= True,\n dataloader_num_workers=8,\n )\n\n data_train = {\"text\": [], \"input_ids\": [], \"attention_mask\": []}\n data_valid = {\"text\": [], \"input_ids\": [], \"attention_mask\": []}\n\n for filename in filenames:\n log.info(f'Processing {filename}')\n with open(filename, encoding=\"utf-8\") as f:\n for line in f:\n line = line.strip()\n features = line.split(\"\\x01\")\n tokens = features[all_features_to_idx['text_ tokens']].split('\\t')\n tokens = list(map(int, tokens))\n tokens = tokens[:params.max_num_tokens] + [0]*(params.max_num_tokens-len(tokens))\n\n if random.uniform(0, 1) > VALIDATION_PERCENTAGE:\n update_data(data_train, tokens, params.max_num_tokens)\n else:\n update_data(data_valid, tokens, params.max_num_tokens)\n\n dataset_train = Dataset.from_dict(data_train)\n dataset_valid = Dataset.from_dict(data_valid)\n\n trainer = Trainer(\n model=model,\n args=training_args,\n train_dataset=dataset_train,\n eval_dataset=dataset_valid,\n data_collator=data_collator\n )\n trainer.train()\n\n\nif __name__ == \"__main__\":\n logging.basicConfig(level=logging.INFO)\n parser = get_parser()\n params = parser.parse_args()\n log.info(\"Finetuning bert model on tweets\")\n finetune_bert(params)\n"
] |
[
[
"numpy.array",
"numpy.zeros"
]
] |
vbob/CarSpeedDetection
|
[
"a8862317ce3868ca35e747c5450383c3fbea9bc3"
] |
[
"trackCarNeuralNework.py"
] |
[
"import cv2\nimport sys\nimport time\nimport tensorflow as tf\nimport numpy as np\n\nvideo = cv2.VideoCapture(\"v7.mp4\")\n\nif not video.isOpened():\n print(\"Could not open video\")\n sys.exit()\n\ntotalFPS = 0\ntotalFrames = 0\ntimer = cv2.getTickCount()\n\nbtn_down = False\n\ndef get_points(im):\n # Set up data to send to mouse handler\n data = {}\n data['im'] = im.copy()\n data['lines'] = []\n\n # Set the callback function for any mouse event\n cv2.imshow(\"Image\", im)\n cv2.setMouseCallback(\"Image\", mouse_handler, data)\n cv2.waitKey(0)\n\n # Convert array to np.array in shape n,2,2\n points = np.uint16(data['lines'])\n print(points)\n\n return points, data['im']\n\ndef mouse_handler(event, x, y, flags, data):\n global btn_down\n\n if event == cv2.EVENT_LBUTTONUP and btn_down:\n #if you release the button, finish the line\n btn_down = False\n data['lines'][0].append((x, y)) #append the seconf point\n cv2.circle(data['im'], (x, y), 3, (0, 0, 255),5)\n cv2.line(data['im'], data['lines'][0][0], data['lines'][0][1], (0,0,255), 2)\n cv2.imshow(\"Image\", data['im'])\n\n elif event == cv2.EVENT_MOUSEMOVE and btn_down:\n #thi is just for a ine visualization\n image = data['im'].copy()\n cv2.line(image, data['lines'][0][0], (x, y), (0,0,0), 1)\n cv2.imshow(\"Image\", image)\n\n elif event == cv2.EVENT_LBUTTONDOWN and len(data['lines']) < 2:\n btn_down = True\n data['lines'].insert(0,[(x, y)]) #prepend the point\n cv2.circle(data['im'], (x, y), 3, (0, 0, 255), 5, 16)\n cv2.imshow(\"Image\", data['im'])\n\n\n# Read the graph.\nwith tf.gfile.FastGFile('./resnet_v2_283776.pb', 'rb') as f:\n graph_def = tf.GraphDef()\n graph_def.ParseFromString(f.read())\n\nwith tf.Session() as sess:\n sess.graph.as_default()\n tf.import_graph_def(graph_def, name='')\n start_time = time.time()\n while (video.isOpened()):\n k, img = video.read()\n\n if not k:\n break\n img = cv2.resize(img, (1280, 720))\n totalFPS += cv2.getTickFrequency() / (cv2.getTickCount() - timer)\n totalFrames += 1\n timer = cv2.getTickCount()\n\n rows = img.shape[0]\n cols = img.shape[1]\n inp = cv2.resize(img, (300, 300))\n inp = inp[:, :, [2, 1, 0]] # BGR2RGB\n\n # Run the model\n with tf.device('/gpu:0'):\n out = sess.run([sess.graph.get_tensor_by_name('num_detections:0'),\n sess.graph.get_tensor_by_name('detection_scores:0'),\n sess.graph.get_tensor_by_name('detection_boxes:0'),\n sess.graph.get_tensor_by_name('detection_classes:0')],\n feed_dict={'image_tensor:0': inp.reshape(1, inp.shape[0], inp.shape[1], 3)})\n\n num_detections = int(out[0][0])\n\n for i in range(num_detections):\n classId = int(out[3][0][i])\n score = float(out[1][0][i])\n bbox = [float(v) for v in out[2][0][i]]\n if score > 0.9:\n x = bbox[1] * cols\n y = bbox[0] * rows\n right = bbox[3] * cols\n bottom = bbox[2] * rows\n cv2.rectangle(img, (int(x), int(y)), (int(right),\n int(bottom)), (125, 255, 51), thickness=2)\n pts, final_image = get_points(img)\n cv2.imshow('TensorFlow MobileNet-SSD', img)\n k = cv2.waitKey() & 0xff\n if k == 27: \n break\n\nprint(\"Average FPS: \" + str(int(totalFPS/totalFrames)) + \" FPS\")\nprint(\"Total Frames: \" + str(totalFrames))\nprint(\"--- %s seconds ---\" % (time.time() - start_time))\nprint(\"--- %.3f frametime ---\" % ((time.time() - start_time)/totalFrames))"
] |
[
[
"tensorflow.device",
"tensorflow.import_graph_def",
"numpy.uint16",
"tensorflow.Session",
"tensorflow.GraphDef",
"tensorflow.gfile.FastGFile"
]
] |
Investimentos-do-Vitor/Beta-ibov-calculator
|
[
"ab122310b7915d09f338bf1f689d6c05e3351759"
] |
[
"Analise.py"
] |
[
"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Nov 1 12:43:07 2020\nfor Python 3.7\n\n@author: vitor\n\"\"\"\n\nimport numpy as np\nimport pandas as pd\n#import pandas_datareader as wb\nimport matplotlib.pyplot as plt\nimport math\n\n\nticker = 'TRPL4 Historical Data.csv'\ntickername = 'TRPL4'\n\n#Lê os dados do CSV e armazena na variável\nibov = pd.read_csv('Bovespa Historical Data.csv')\n\n#Transforma a coluna Price em Números (em vez de Strings)\ndef corrigir_virgulas(price):\n price = price.replace(',', '')\n #Remove as virgulas\n return price\n\nibov['Price'] = ibov['Price'].apply(corrigir_virgulas)\nibov['Price'] = ibov['Price'].astype(float)\n\n\"\"\"\nibov = wb.get_data_yahoo('^BVSP', \n start = '2015-11-01', \n end = '2020-10-29', \n interval = 'mo')\n\"\"\"\n\nativo = pd.read_csv(ticker)\n#ativo = wb.get_data_yahoo('TRPL4.SA', interval = 'mo')\n\n\n#Elimina possíveis valores em branco\nibov = ibov.dropna()\nativo = ativo.dropna()\n\n#Cria uma coluna com o retorno em relação ao mês anterior\nibov['Retorno']=((ibov['Price']/ibov['Price'].shift(1))-1)\nativo['Retorno']=((ativo['Price']/ativo['Price'].shift(1))-1)\n\n#Calcula a média dos retornos\nmean_ibov = np.mean(ibov['Retorno'])\nmean_ativo = np.mean(ativo['Retorno'])\n\nprint(f'A média do IBOV é de {round(mean_ibov*100,2)}% ao mês')\nprint(f'A média do Ativo {tickername} é de {round(mean_ativo*100,2)}% ao mês')\n\n#CONTA QUANTOS MESES FORAM RECEBIDOS NA VARIAVEL IBOV\nmeses = len(ibov['Price'])\n\n\n#Calcula a Variancia, isto é, os desvios em relação à média\nibov['var_1'] = ((ibov['Retorno']-mean_ibov) ** 2)\nsum_var1 = np.sum(ibov.var_1)\nvariancia_ibov = sum_var1/(meses-1)\n#variancia_ibov = round(variancia_ibov, 2)\n\nativo['var_1'] = ((ativo['Retorno']-mean_ativo) ** 2)\nsum_var2 = np.sum(ativo.var_1)\nvariancia_ativo = sum_var2/(meses-1)\n#variancia_ativo = (variancia_ativo)\n\nprint(f'\\n'\n f'A variância do IBOV para o período é de {round(variancia_ibov*100,2)}% \\n'\n f'A variância do ativo {tickername} para o período é de {round(variancia_ativo*100,2)}%')\n\n#Calcula o Desvio-Padrão\ndesvio_ibov = round((math.sqrt(variancia_ibov)*100),2)\ndesvio_ativo = round((math.sqrt(variancia_ativo)*100),2)\n\n#calcula as probabilidades\n\ndef printdesvio(desvio_ibov, desvio_ativo):\n print(f'\\n'\n f'O desvio-padrão do IBOV é de {round(desvio_ibov, 2)}% \\n'\n f'isso significa que: \\n'\n f'Os retornos do IBOV vão estar entre:\\n'\n f'{round((mean_ibov*100)-desvio_ibov, 2)}% e {round((mean_ibov*100)+desvio_ibov,2)}% com 68% de confiança \\n'\n f'{round((mean_ibov*100)-(desvio_ibov*1.96), 2)}% e {round((mean_ibov*100)+(desvio_ibov*1.96),2)}% com 95% de confiança \\n'\n f'{round((mean_ibov*100)-(desvio_ibov*3), 2)}% e {round((mean_ibov*100)+(desvio_ibov*3),2)}% com 99% de confiança')\n print(f'\\n'\n f'O desvio-padrão do ativo {tickername} é de {round(desvio_ativo,2)}% \\n'\n f'isso significa que: \\n'\n f'Os retornos do {tickername} vão estar entre:\\n'\n f'{round((mean_ativo*100)-desvio_ativo, 2)}% e {round((mean_ativo*100)+desvio_ativo,2)}% com 68% de confiança \\n'\n f'{round((mean_ativo*100)-(desvio_ativo*1.96), 2)}% e {round((mean_ativo*100)+(desvio_ativo*1.96),2)}% com 95% de confiança \\n'\n f'{round((mean_ativo*100)-(desvio_ativo*3), 2)}% e {round((mean_ativo*100)+(desvio_ativo*3),2)}% com 99% de confiança')\n\nprintdesvio(desvio_ibov, desvio_ativo)\n\n\n#Calcula a covariância entre o ativo e o IBOV\nco_var = pd.DataFrame()\nco_var['Ativo'] = (ativo['Retorno']-mean_ativo).values\nco_var['Ibov'] = (ibov['Retorno']-mean_ibov).values\n#Multiplica ativo por ibov\nco_var['multi'] = co_var['Ativo'] * co_var['Ibov']\n#Soma todas as multiplicações e divide por N-1\ncovariancia = (np.sum(co_var.multi))/(meses-1)\n\n#CALCULA O BETA DO ATIVO\nbeta = covariancia / variancia_ibov\nprint(f'\\n'\n f'O BETA do ativo é de {round(beta,2)}')\n\n\n#Função de gerar histograma\ndef histograma(dados):\n title = 'Retornos da ação ' + tickername\n plt.title(title)\n plt.ylabel('Quantidade de vezes')\n plt.xlabel('Retorno')\n plt.hist(dados)\n\nhistograma(ativo['Retorno']*100)\n\n\n \n\n"
] |
[
[
"pandas.read_csv",
"matplotlib.pyplot.title",
"pandas.DataFrame",
"numpy.mean",
"matplotlib.pyplot.hist",
"matplotlib.pyplot.xlabel",
"numpy.sum",
"matplotlib.pyplot.ylabel"
]
] |
rosario-riccio/MLWeatherLabeling
|
[
"8dd0b43ac877647d70d6c96c79c9a7f660e88982"
] |
[
"mlMain1.py"
] |
[
"#mlMain1.py\nimport numpy as np\nimport csv\nfrom keras.models import Sequential\nfrom keras.layers import Dense,Dropout\nfrom keras.optimizers import SGD\nfrom keras.utils import to_categorical\nfrom keras.models import load_model\nimport pandas as pd\nimport tensorflow as tf\nimport glob\nimport os\nimport sys\nfrom pathlib import Path\nfrom sklearn.model_selection import train_test_split\nfrom dbMongo import *\nimport matplotlib.pyplot as plt\nfrom keras.optimizers import Adam\nfrom sklearn.utils.class_weight import compute_class_weight\nfrom imblearn.under_sampling import RandomUnderSampler\nfrom imblearn.over_sampling import RandomOverSampler\nfrom urllib.parse import urlparse\n\n#path where there are csv files for training/test\nsrc = \"/home/rosario/Scrivania/MLcsvTraining\"\n#path where there are csv files for evaluate/prediction\nsrc1 = \"/home/rosario/Scrivania/MLcsvEvaluation\"\n#path where there are csv files for prediction\nsrc2 = \"/home/rosario/Scrivania/MLcsvPrediction\"\n#path where there will be csv files of prediction\nsrc3 = \"/home/rosario/Scrivania/out\"\n#if flag is true the h5 file exists, otherwise it'll be created\nflag1 = True\n#if flag is true,the evaluation, otherwise prediction\nflag2 = False\n\ndef main():\n global flag1\n global flag2\n class_weight = {}\n if not flag1:\n print(\"h5 file doesn't exist\")\n try:\n countLabel = managedb.countLabelDB()\n except Exception as e:\n print(\"Error DB\",str(e))\n sys.exit(1)\n class_weight.update({0:1.})\n for i in range(1,countLabel):\n class_weight.update({i:80.})\n print(class_weight)\n\n model = Sequential()\n\n model.add(Dense(100, input_dim=10, activation=\"relu\",kernel_initializer = 'uniform'))\n model.add(Dropout(rate = 0.1))\n\n # Adding the second hidden layer\n model.add(Dense(activation='relu',units=100,kernel_initializer='uniform'))\n # Adding the output layer\n model.add(Dropout(rate = 0.1))\n # # Adding the third hidden layer\n model.add(Dense(activation='relu', units=100, kernel_initializer='uniform'))\n # # Adding the output layer\n model.add(Dropout(rate=0.1))\n\n model.add(Dense(countLabel, activation=\"softmax\"))\n\n model.compile(optimizer=Adam(lr=0.001),loss=\"categorical_crossentropy\",metrics=[\"accuracy\"])\n model.summary()\n try:\n all_files = glob.glob(src+\"/*.csv\")\n print(all_files)\n df_from_each_file = (pd.read_csv(f,usecols=[\"T2C\",\"SLP\", \"WSPD10\",\"WDIR10\",\"RH2\",\"UH\",\"TC500\",\"GPH500\",\"CLDFRA_TOTAL\",\"DELTA_RAIN\",\"type\"]) for f in all_files)\n concatenated_df = pd.concat(df_from_each_file, ignore_index=True)\n concatenated_df = concatenated_df.sample(frac=0.5)\n dataset = concatenated_df.values\n except Exception as e:\n print(\"There are no csv file\",str(e))\n sys.exit(1)\n\n y = dataset[:, 10]\n X = dataset[:, 0:10]\n X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1)\n y_train_cat = to_categorical(y_train,num_classes=countLabel)\n y_test_cat = to_categorical(y_test,num_classes=countLabel)\n print(\"-------------------------------------------------------------\\n\")\n #fit\n history = model.fit(X_train,y_train_cat,validation_data=(X_test,y_test_cat),epochs=30,batch_size=256,shuffle=True,verbose=1,class_weight=class_weight)\n print(\"-------------------------------------------------------------\\n\")\n # Plot training & validation accuracy values\n plt.plot(history.history['acc'])\n plt.plot(history.history['val_acc'])\n plt.title('Model accuracy')\n plt.ylabel('Accuracy')\n plt.xlabel('Epoch')\n plt.legend(['Train', 'Test'], loc='upper left')\n plt.show()\n # Plot training & validation loss values\n plt.plot(history.history['loss'])\n plt.plot(history.history['val_loss'])\n plt.title('Model loss')\n plt.ylabel('Loss')\n plt.xlabel('Epoch')\n plt.legend(['Train', 'Test'], loc='upper left')\n plt.show()\n model.save('weatherML1.h5')\n else:\n print(\"h5 file exists\")\n try:\n countLabel = managedb.countLabelDB()\n except Exception as e:\n print(\"Error DB\", str(e))\n sys.exit(1)\n try:\n model = load_model('weatherML1.h5')\n except Exception as e:\n print(\"There are no h5 file\", str(e))\n sys.exit(1)\n if flag2 == True:\n # evaluation\n try:\n all_files = glob.glob(src1 + \"/*.csv\")\n print(all_files)\n df_from_each_file = (pd.read_csv(f,usecols=[\"T2C\",\"SLP\", \"WSPD10\",\"WDIR10\",\"RH2\",\"UH\",\"TC500\",\"GPH500\",\"CLDFRA_TOTAL\",\"DELTA_RAIN\",\"type\"]) for f in all_files)\n concatenated_df = pd.concat(df_from_each_file, ignore_index=True)\n dataset = concatenated_df.values\n except Exception as e:\n print(\"There are no csv file\")\n sys.exit(1)\n y1 = dataset[:, 10]\n X1 = dataset[:, 0:10]\n y_cat1 = to_categorical(y1, num_classes=countLabel)\n results = model.evaluate(X1, y_cat1, verbose=1, batch_size=128)\n print(\" \")\n print('test loss, test acc:', results)\n else:\n # prediction\n try:\n all_files = glob.glob(src2 + \"/*.csv\")\n print(all_files)\n for file in all_files:\n print(file)\n pdfiles = pd.read_csv(file,usecols=[\"LONGITUDE\", \"LATITUDE\", \"T2C\", \"SLP\", \"WSPD10\", \"WDIR10\", \"RH2\",\"UH\", \"TC500\", \"GPH500\", \"CLDFRA_TOTAL\", \"DELTA_RAIN\", \"type\"])\n dataset = pdfiles.values\n lng = dataset[:, 0]\n lat = dataset[:, 1]\n y1 = dataset[:, 12]\n X1 = dataset[:, 2:12]\n y_cat1 = to_categorical(y1, num_classes=countLabel)\n pre, ext = os.path.splitext(os.path.basename(urlparse(file).path))\n csvfile = \"out1_\" + pre + \".csv\"\n print(csvfile)\n prediction = model.predict_classes(X1, verbose=1)\n with open(src3 + \"/\" + csvfile, \"w\") as f:\n fieldnames = [\"lon\", \"lat\", \"class_id\"]\n writer1 = csv.DictWriter(f, extrasaction='ignore', fieldnames=fieldnames)\n writer1.writeheader()\n for i in range(len(prediction)):\n print(prediction[i], y_cat1[i])\n if prediction[i] != 0:\n writer1.writerow({\"lon\": lng[i], \"lat\": lat[i], \"class_id\": prediction[i]})\n except Exception as e:\n print(str(e))\n sys.exit(1)\n\nif __name__ == \"__main__\":\n main()\n"
] |
[
[
"matplotlib.pyplot.legend",
"pandas.concat",
"pandas.read_csv",
"matplotlib.pyplot.title",
"sklearn.model_selection.train_test_split",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.show",
"matplotlib.pyplot.ylabel"
]
] |
olivier2311/Quantropy
|
[
"5d678a802adb4720c17e6ae4c313b1e37db8f313"
] |
[
"quantitative_analysis/time_series_analysis/time_series_behaviors.py"
] |
[
"import numpy as np\nfrom numpy.random import randn\nimport pandas as pd\nimport statsmodels.tsa.stattools as ts\n\n\nclass TimeSeriesBehavior:\n \"\"\"\n This class provides functions to determine the behavior of a time series,\n specifically whether it is\n - A random walk. It has no memory. Examples: Brownian motion\n - A mean reverting series. Examples: Ornstein-Uhlenbeck process\n - A trending series\n \"\"\"\n\n def __init__(self, time_series: pd.Series):\n self.time_series = time_series\n\n def augmented_dickey_fuller_test(self, max_lag: int = 1, pvalue_thresh: float = 0.05):\n \"\"\"\n\n Null Hypothesis: The time series is not mean reverting\n\n :param max_lag:\n :param p_value_thresh:\n :return: True if time series is mean reverting, False if not\n \"\"\"\n adfstat, pvalue, usedlag, nobs, critvalues = ts.adfuller(self.time_series, maxlag=max_lag)\n return pvalue < pvalue_thresh \\\n and (adfstat < critvalues[0] and adfstat < critvalues[1] and adfstat < critvalues[2])\n\n def johansen_test(self, p_value_thresh: float = 0.95):\n pass\n\n def hurst_exponent(self, lag_range: tuple = (2, 100)):\n \"\"\"\n Calculates the stationarity of the series. A time series (or stochastic process) is defined to be strongly\n stationary if its joint probability distribution is invariant under translations in time or space.\n In particular, and of key importance for traders, the mean and variance of the process do not change over time\n or space and they each do not follow a trend.\n\n A critical feature of stationary price series is that the prices within the series diffuse from their initial\n value at a rate slower than that of a Geometric Brownian Motion. By measuring the rate of this diffusive\n behaviour we can identify the nature of the time series.\n\n The Hurst Exponent tells you whether a series is\n * Geometric random walk (H=0.5)\n * Mean-reverting series (H<0.5)\n * Trending Series (H>0.5)\n\n Category: Measure of Autocorrelation\n\n :return:\n \"\"\"\n\n lags = range(lag_range[0], lag_range[1]) # Create the range of lag values\n # Calculate the array of the variances of the lagged differences\n tau = [np.sqrt(np.std(np.subtract(self.time_series[lag:], self.time_series[:-lag]))) for lag in lags]\n poly = np.polyfit(np.log(lags), np.log(tau), 1) # Use a linear fit to estimate the Hurst Exponent\n return poly[0] * 2.0 # Return the Hurst exponent from the polyfit output\n\n\nif __name__ == '__main__':\n geometric_brownian_motion = np.log(np.cumsum(randn(100000)) + 1000)\n mean_reverting_series = np.log(randn(100000) + 1000)\n trending_series = np.log(np.cumsum(randn(100000) + 1) + 1000)\n"
] |
[
[
"numpy.subtract",
"numpy.log",
"numpy.random.randn"
]
] |
haoyuying/PaddleSeg
|
[
"6607d88df39500330a7b6ed160b4626d9f38df66"
] |
[
"contrib/EISeg/eiseg/scripts/annotations_conversion/coco_lvis.py"
] |
[
"import cv2\nimport pickle\nimport numpy as np\nfrom pathlib import Path\nfrom tqdm import tqdm\n\nfrom data.datasets import LvisDataset, CocoDataset\nfrom util.misc import get_bbox_from_mask, get_bbox_iou\nfrom scripts.annotations_conversion.common import get_masks_hierarchy, get_iou, encode_masks\n\n\ndef create_annotations(lvis_path: Path, coco_path: Path, dataset_split='train', min_object_area=80):\n lvis_dataset = LvisDataset(lvis_path, split=dataset_split)\n lvis_samples = lvis_dataset.dataset_samples\n lvis_annotations = lvis_dataset.annotations\n\n coco_dataset = CocoDataset(coco_path, split=dataset_split + '2017')\n\n coco_lvis_mapping = []\n lvis_images = {x['coco_url'].split('/')[-1].split('.')[0]: lvis_indx\n for lvis_indx, x in enumerate(lvis_samples)}\n for indx, coco_sample in enumerate(coco_dataset.dataset_samples):\n lvis_indx = lvis_images.get(coco_sample['file_name'].split('.')[0], None)\n if lvis_indx is not None:\n coco_lvis_mapping.append((indx, lvis_indx))\n\n output_masks_path = lvis_path / dataset_split / 'masks'\n output_masks_path.mkdir(parents=True, exist_ok=True)\n\n hlvis_annotation = dict()\n for coco_indx, lvis_indx in tqdm(coco_lvis_mapping):\n coco_sample = get_coco_sample(coco_dataset, coco_indx)\n\n lvis_info = lvis_samples[lvis_indx]\n lvis_annotation = lvis_annotations[lvis_info['id']]\n empty_mask = np.zeros((lvis_info['height'], lvis_info['width']))\n image_name = lvis_info['coco_url'].split('/')[-1].split('.')[0]\n\n lvis_masks = []\n lvis_bboxes = []\n for obj_annotation in lvis_annotation:\n obj_mask = lvis_dataset.get_mask_from_polygon(obj_annotation, empty_mask)\n obj_mask = obj_mask == 1\n if obj_mask.sum() >= min_object_area:\n lvis_masks.append(obj_mask)\n lvis_bboxes.append(get_bbox_from_mask(obj_mask))\n\n coco_bboxes = []\n coco_masks = []\n for inst_id in coco_sample['instances_info'].keys():\n obj_mask = coco_sample['instances_mask'] == inst_id\n if obj_mask.sum() >= min_object_area:\n coco_masks.append(obj_mask)\n coco_bboxes.append(get_bbox_from_mask(obj_mask))\n\n masks = []\n for coco_j, coco_bbox in enumerate(coco_bboxes):\n for lvis_i, lvis_bbox in enumerate(lvis_bboxes):\n if get_bbox_iou(lvis_bbox, coco_bbox) > 0.70 and \\\n get_iou(lvis_masks[lvis_i], coco_masks[coco_j]) > 0.70:\n break\n else:\n masks.append(coco_masks[coco_j])\n\n for ti, (lvis_mask, lvis_bbox) in enumerate(zip(lvis_masks, lvis_bboxes)):\n for tj_mask, tj_bbox in zip(lvis_masks[ti + 1:], lvis_bboxes[ti + 1:]):\n bbox_iou = get_bbox_iou(lvis_bbox, tj_bbox)\n if bbox_iou > 0.7 and get_iou(lvis_mask, tj_mask) > 0.85:\n break\n else:\n masks.append(lvis_mask)\n\n masks_meta = [(get_bbox_from_mask(x), x.sum()) for x in masks]\n if not masks:\n continue\n\n hierarchy = get_masks_hierarchy(masks, masks_meta)\n\n for obj_id, obj_info in list(hierarchy.items()):\n if obj_info['parent'] is None and len(obj_info['children']) == 0:\n hierarchy[obj_id] = None\n\n merged_mask = np.max(masks, axis=0)\n num_instance_masks = len(masks)\n for obj_id in coco_sample['semantic_info'].keys():\n obj_mask = coco_sample['semantic_map'] == obj_id\n obj_mask = np.logical_and(obj_mask, np.logical_not(merged_mask))\n if obj_mask.sum() > 500:\n masks.append(obj_mask)\n\n hlvis_annotation[image_name] = {\n 'num_instance_masks': num_instance_masks,\n 'hierarchy': hierarchy\n }\n\n with open(output_masks_path / f'{image_name}.pickle', 'wb') as f:\n pickle.dump(encode_masks(masks), f)\n\n with open(lvis_path / dataset_split / 'hannotation.pickle', 'wb') as f:\n pickle.dump(hlvis_annotation, f, protocol=pickle.HIGHEST_PROTOCOL)\n\n\ndef get_coco_sample(dataset, index):\n dataset_sample = dataset.dataset_samples[index]\n\n image_path = dataset.images_path / dataset.get_image_name(dataset_sample['file_name'])\n label_path = dataset.labels_path / dataset_sample['file_name']\n\n image = cv2.imread(str(image_path))\n image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\n label = cv2.imread(str(label_path), cv2.IMREAD_UNCHANGED).astype(np.int32)\n label = 256 * 256 * label[:, :, 0] + 256 * label[:, :, 1] + label[:, :, 2]\n\n instance_map = np.full_like(label, 0)\n semantic_map = np.full_like(label, 0)\n semantic_info = dict()\n instances_info = dict()\n for segment in dataset_sample['segments_info']:\n class_id = segment['category_id']\n obj_id = segment['id']\n if class_id not in dataset._things_labels_set:\n semantic_map[label == obj_id] = obj_id\n semantic_info[obj_id] = {'ignore': False}\n continue\n\n instance_map[label == obj_id] = obj_id\n ignore = segment['iscrowd'] == 1\n instances_info[obj_id] = {\n 'ignore': ignore\n }\n\n sample = {\n 'image': image,\n 'instances_mask': instance_map,\n 'instances_info': instances_info,\n 'semantic_map': semantic_map,\n 'semantic_info': semantic_info\n }\n\n return sample"
] |
[
[
"numpy.full_like",
"numpy.max",
"numpy.logical_not",
"numpy.zeros"
]
] |
DmitriyValetov/ai-testing-platform
|
[
"6ddb453db247b571082202d247672c674e20b13d"
] |
[
"tests/test_main.py"
] |
[
"import os\nimport io\nimport time\nimport json\nimport socket\nimport base64\nimport psutil\nimport shutil\nimport sqlite3\nimport tempfile\nimport requests\nimport subprocess\nimport numpy as np\nimport pandas as pd\n\n\nfrom calc_metrics import write_img_return_base64\nfrom db import empty_db, execute_query, execute_read_query, create_connection\n\n\ndef launch_server():\n \"\"\"\n Launches app.py and returns psutil process object\n \"\"\"\n process = subprocess.Popen(\n ['python3', 'app.py'], \n stdout=subprocess.PIPE, \n stderr=subprocess.STDOUT,\n )\n process.daemon = True\n return psutil.Process(process.pid)\n\ndef shutdown_server(psutil_process):\n \"\"\"\n Shutodowns the server\n \"\"\"\n try:\n children = psutil_process.children(recursive=True)\n for child_proc in children:\n try:\n child_proc.kill()\n except:\n pass\n\n try:\n psutil_process.kill()\n except:\n pass\n\n except psutil.NoSuchProcess:\n pass\n\ndef wait_until_server_responds(psutil_process=None, host='127.0.0.1', port=5000):\n \"\"\"\n Waits until the server responds.\n If the psutil_process is passed, it is checked by is_running\n \"\"\"\n while socket.socket(socket.AF_INET, socket.SOCK_STREAM).connect_ex((host, port)) != 0:\n time.sleep(0.1)\n if psutil_process:\n assert psutil_process.is_running()\n print('Server responded')\n\ndef add_test_servce_into_db(id, name, token):\n with create_connection() as conn:\n execute_query(conn, f'INSERT INTO services (id, name, token) VALUES (\"{id}\", \"{name}\", \"{token}\");')\n\ndef create_test_files_pack(dataset, n):\n tmp_dir = os.path.join('datasets', dataset)\n if os.path.exists(tmp_dir):\n shutil.rmtree(tmp_dir)\n os.makedirs(tmp_dir)\n\n tmp_files = []\n for i in range(n):\n f_name = f'file_{i}.zip'\n tmp_files.append(f_name)\n f_path = os.path.join(tmp_dir, f_name)\n with open(file=f_path, mode='w') as f:\n f.write(f'file content {i}')\n \n return tmp_dir, tmp_files\n\ndef add_test_dataset_into_db(title='test_dataset'):\n tmp_dir, tmp_files = create_test_files_pack(title, 10)\n with create_connection() as conn:\n for i, f in enumerate(tmp_files):\n execute_query(conn, f'INSERT INTO datasets (id, title, filename) VALUES (\"{i}\", \"{title}\", \"{f}\");')\n return tmp_dir, tmp_files\n\n\ndef auth(base_url, name, token, dataset):\n \"\"\"\n returns dict with keys:\n session_token,\n expire_in,\n number_of_items\n \"\"\"\n data = {\n \"name\": name,\n \"token\": token,\n \"dataset\": dataset,\n }\n return requests.post(base_url + '/auth', json=data).json()\n\ndef pull(base_url, name, session_token):\n data = {\n \"name\": name,\n \"session_token\": session_token,\n }\n return requests.post(base_url + '/pull', json=data)\n\ndef push(base_url, name, session_token, testing_item_id):\n data = {\n 'name': name,\n 'session_token': session_token,\n 'testing_item_id': testing_item_id,\n 'ct': 0.0,\n 'left': {\n 'affected_part': 0.0,\n 'total_volume': 0.0,\n 'affected_volume': 0.0,\n },\n 'right': {\n 'affected_part': 0.0,\n 'total_volume': 0.0,\n 'affected_volume': 0.0,\n },\n 'viewer_url': 'localhost',\n 'description': 'testing',\n }\n return requests.post(base_url + '/push', json=data).json()\n \ndef push_mask(base_url, return_type='json', input_type='list'):\n mask_1 = np.zeros((100, 100)).astype(int)\n mask_2 = mask_1\n data = {\n 'return_type': return_type,\n 'input_type': input_type,\n }\n if input_type == 'list':\n data['mask_1'] = mask_1.tolist()\n data['mask_2'] = mask_2.tolist()\n\n elif input_type == 'base64':\n data['mask_1'] = write_img_return_base64(mask_1)\n data['mask_2'] = write_img_return_base64(mask_2)\n\n return requests.post(base_url + '/push_mask', json=data)\n\n\nclass Test_main:\n \"\"\"\n Class to test launchability of the application\n \"\"\"\n def setup_class(self):\n # This method is triggered before all\n self.host = '127.0.0.1'\n self.port = 5000\n self.base_url = f'http://{self.host}:{self.port}'\n self.server_process = launch_server()\n wait_until_server_responds(self.server_process)\n empty_db()\n self.name, self.token, self.dataset = 'test_service', '123qwerty654', 'test_dataset'\n self.tmp_dir, self.tmp_files = add_test_dataset_into_db(self.dataset)\n\n def teardown_class(self):\n # This method is triggered after all is done\n shutdown_server(self.server_process)\n empty_db()\n shutil.rmtree(self.tmp_dir)\n\n def test_root_route(self):\n # testing /\n responce = requests.get(self.base_url)\n assert responce.status_code == 200\n assert isinstance(responce.json()['message'], str)\n\n\n def test_auth_true(self):\n # testing /auth with correct data\n add_test_servce_into_db(0, self.name, self.token)\n responce = auth(self.base_url, self.name, self.token, self.dataset)\n assert isinstance(responce.get('session_token'), str)\n assert 'expire_in' in responce\n assert isinstance(responce.get('number_of_items'), int)\n assert responce['number_of_items'] == len(self.tmp_files)\n\n def test_auth_false_1(self):\n # testing /auth with invalid name\n responce = auth(self.base_url, self.name+'123123', self.token, self.dataset)\n assert isinstance(responce.get('error_message'), str)\n assert '401' in responce['error_message']\n\n def test_auth_false_2(self):\n # testing /auth with invalid token\n responce = auth(self.base_url, self.name, self.token+'123123', self.dataset)\n assert isinstance(responce.get('error_message'), str)\n assert '401' in responce['error_message']\n\n\n def test_pull_true_1(self):\n # testing single request /pull\n auth_responce = auth(self.base_url, self.name, self.token, self.dataset)\n assert 'session_token' in auth_responce\n pull_responce = pull(self.base_url, self.name, auth_responce['session_token'])\n assert 'file content' in pull_responce.content.decode('utf-8')\n\n def test_pull_false_1(self):\n # testing /pull with invalid name\n auth_responce = auth(self.base_url, self.name, self.token, self.dataset)\n assert 'session_token' in auth_responce\n pull_responce = pull(self.base_url, self.name+'123', auth_responce['session_token'])\n assert '401' in pull_responce.json()['error_message']\n\n def test_pull_false_2(self):\n # testing /pull with session_token\n auth_responce = auth(self.base_url, self.name, self.token, self.dataset)\n assert 'session_token' in auth_responce\n pull_responce = pull(self.base_url, self.name, auth_responce['session_token']+'123')\n assert '401' in pull_responce.json()['error_message']\n\n def test_pull_true_2(self):\n # testing /pull & /push\n auth_responce = auth(self.base_url, self.name, self.token, self.dataset)\n assert 'session_token' in auth_responce\n\n for _ in range(len(self.tmp_files)):\n pull_responce = pull(self.base_url, self.name, auth_responce['session_token'])\n assert 'file content' in pull_responce.content.decode('utf-8')\n push_responce = push(self.base_url, self.name, auth_responce['session_token'], pull_responce.headers['testing_item_id'])\n assert 'message' in push_responce and 'time_to_response' in push_responce\n\n pull_responce = pull(self.base_url, self.name, auth_responce['session_token'])\n assert 'No available items for this session_token' in pull_responce.json()['error_message']\n\n def test_push_mask_json_send_list(self):\n responce = push_mask(self.base_url, return_type='json', input_type='list').json()\n assert 'metrics' in responce\n assert isinstance(responce['metrics'], dict)\n\n def test_push_mask_csv_send_list(self):\n responce = push_mask(self.base_url, return_type='csv', input_type='list')\n assert 'filename' in responce.headers\n assert 'metrics.csv' == responce.headers['filename']\n assert len(responce.content) > 0\n df = pd.read_csv(io.BytesIO(responce.content), encoding='utf-8')\n assert df.shape[1] > 1\n\n def test_push_mask_json_send_base64(self):\n responce = push_mask(self.base_url, return_type='json', input_type='base64').json()\n assert 'metrics' in responce\n assert isinstance(responce['metrics'], dict)\n\n def test_push_mask_csv_send_base64(self):\n responce = push_mask(self.base_url, return_type='csv', input_type='base64')\n assert 'filename' in responce.headers\n assert 'metrics.csv' == responce.headers['filename']\n assert len(responce.content) > 0\n df = pd.read_csv(io.BytesIO(responce.content), encoding='utf-8')\n assert df.shape[1] > 1\n"
] |
[
[
"numpy.zeros"
]
] |
josephhic/AutoDot
|
[
"9acd0ddab9191b8a90afc6f1f6373cf711b40b89"
] |
[
"Investigation/condition_functions.py"
] |
[
"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Nov 15 07:11:05 2019\n\n@author: thele\n\"\"\"\n\nimport scipy.signal as signal\nimport pickle\nimport numpy as np\nfrom .scoring.Last_score import final_score_cls\nfrom skimage.feature import blob_log\nimport time\n\ndef mock_peak_check(anchor,minc,maxc,configs,**kwags):\n a = configs.get('a',None)\n b = configs.get('b',None)\n verb = configs.get('verbose',False)\n \n if a is None and b is None:\n prob = configs.get('prob',0.5)\n \n c_peak = np.random.uniform(0,1)<prob\n if verb: print(c_peak)\n return c_peak, c_peak, None\n \n lb, ub = np.minimum(a,b), np.maximum(a,b)\n \n c_peak = np.all(anchor<ub) and np.all(anchor>lb)\n if verb: print(c_peak)\n return c_peak, c_peak, None\n\n\ndef mock_score_func(anchor,minc,maxc,configs,**kwags):\n a = np.array(configs.get('target',[-500,-500]))\n \n score = 100/ np.linalg.norm(a-anchor)\n print(score)\n return score, False, None\n \n \n\ndef check_nothing(trace,minc,maxc,configs,**kwags):\n output = configs.get('output',False)\n return output, output, None\n\ndef peak_check(trace,minc,maxc,configs,**kwags):\n prominence = configs['prominance']\n \n #norm settings\n offset = minc\n maxval = maxc\n \n #peak detector settings\n height = configs.get('height',0.0178)\n \n trace_norm=trace.copy()-offset\n trace_norm[trace_norm<0]=0\n trace_norm = (trace_norm)/((maxval-offset)) #normalize the current amplitude\n peaks, data = signal.find_peaks(trace_norm,prominence=prominence,height=height)\n return len(peaks)>=configs['minimum'], len(peaks)>=configs['minimum'], peaks\n\n\ndef reduce_then_clf_2dmap(data,minc,maxc,configs,**kwags):\n \n dim_reduction_fname = configs['dim_reduction']\n clf_fname = configs['clf']\n \n with open(dim_reduction_fname,'rb') as drf:\n dim_red = pickle.load(drf)\n \n with open(clf_fname,'rb') as cf:\n clf = pickle.load(cf)\n \n X = normilise(data,configs['norm'],minc,maxc)\n \n X_red = dim_red.transform(np.expand_dims(X,axis=0))\n \n Y = np.squeeze(clf.predict(X_red))\n return Y, Y, None\n\n\ndef last_score(data,minc,maxc,configs,**kwags):\n fsc = final_score_cls(minc,maxc,configs['noise'],configs['segmentation_thresh'])\n \n score = getattr(fsc,configs.get('mode','score'))(data,diff=configs.get('diff',1))\n \n\n \n s_cond = False\n \n \n \n print(\"Score: %f\"%score)\n \n return score, s_cond, None\n\n\n\ndef last_score_then_blob(data,minc,maxc,configs,**kwags):\n fsc = final_score_cls(minc,maxc,configs['noise'],configs['segmentation_thresh'])\n \n score = getattr(fsc,configs.get('mode','score'))(data,diff=configs.get('diff',1))\n \n score_thresh = configs.get('score_thresh',None)\n if score_thresh is None:\n score_thresh = kwags.get('score_thresh')\n \n \n print(\"Score: %f\"%score)\n \n blobs = blob_detect_rough(data,minc,maxc)\n \n return score, score>score_thresh, {\"kwags\":{\"blobs\":blobs,\"size_last\":configs['size'],\"res_last\":configs['res']}}\n\n\n\n\ndef clf_then_blob(data,minc,maxc,configs,**kwags):\n \n data = normilise(data,configs['norm'],minc,maxc)\n clf_fname = configs['clf']\n\n with open(clf_fname,'rb') as cf:\n clf = pickle.load(cf)\n \n \n Y = np.squeeze(clf.predict(np.expand_dims(data,axis=0)))\n \n if Y:\n pass\n return\n\n\n\ndef count_above_thresh(data,minc,maxc,configs,**kwags):\n split_thresh = configs.get('split_thresh',0.0001)\n \n count_required = configs.get('count_required',0.0001)\n \n data_above = data[data>split_thresh]\n \n count_ratio = data_above.size/data.size\n \n blobs = blob_detect_rough(data,minc,maxc)\n \n return count_ratio<count_required,count_ratio<count_required,{\"kwags\":{\"cr\":count_ratio,\"blobs\":blobs,\"size_last\":configs['size'],\"res_last\":configs['res']}}\n\n\n\n\n\ndef blob_detect_rough(data,minc,maxc):\n blobs = blob_log(normilise(data,'device_domain',minc,maxc),min_sigma=2,threshold=0.0001)[:,:2]\n return np.array([blobs[:,1],blobs[:,0]])\n \n \ndef normilise(data,norm_type,minc,maxc):\n \n if norm_type is None:\n return data\n \n if isinstance(norm_type,list):\n min_val = norm_type[0]\n max_val = norm_type[1]\n elif norm_type == 'device_domain':\n min_val = minc\n max_val = maxc\n else:\n min_val = data.min()\n max_val = data.max()\n \n data_norm = np.copy(data)\n data_norm[data_norm>max_val] = max_val\n data_norm[data_norm<min_val] = min_val\n \n data_norm = (data_norm - min_val)/(max_val-min_val)\n return data_norm\n \n\ndef plot_image_and_blobs(data,blobs):\n blob = blobs[:,:2]\n blob = np.array([blob[:,1],blob[:,0]])\n \n plt.imshow(data)\n for i in range(blob.shape[-1]):\n plt.scatter(*blob[:,i])\n plt.show()\n \n "
] |
[
[
"numpy.expand_dims",
"numpy.maximum",
"scipy.signal.find_peaks",
"numpy.minimum",
"numpy.linalg.norm",
"numpy.all",
"numpy.copy",
"numpy.random.uniform",
"numpy.array"
]
] |
biomass-dev/biomass
|
[
"789a747bb293a52eaf65ce2c441d063d7a6d0671"
] |
[
"biomass/dynamics/temporal_dynamics.py"
] |
[
"import os\nfrom dataclasses import dataclass\nfrom math import isnan\nfrom typing import List, Optional\n\nimport numpy as np\nfrom matplotlib import pyplot as plt\nfrom matplotlib.axes._axes import _log as matplotlib_axes_logger\n\nfrom ..exec_model import ExecModel, ModelObject\nfrom ..plotting import MultipleObservables, SingleObservable\n\nmatplotlib_axes_logger.setLevel(\"ERROR\")\n\n\n@dataclass\nclass TemporalDynamics(ExecModel):\n model: ModelObject\n\n def plot_timecourse(\n self,\n n_file: List[int],\n viz_type: str,\n show_all: bool,\n stdev: bool,\n simulations_all: np.ndarray,\n ) -> None:\n \"\"\"\n Plot time course of each observable.\n\n Parameters\n ----------\n n_file : list of integers\n Optimized parameter sets in out/.\n\n viz_type : str\n One of ['average', 'best', 'original', 'n(=1,2,...)', 'experiment'].\n\n show_all : bool\n Whether to show all simulation results.\n\n stdev : bool\n If True, the standard deviation of simulated values will be shown\n (only available for 'average' visualization type).\n\n simulations_all : numpy array\n Array containing all simulated values.\n\n \"\"\"\n os.makedirs(\n os.path.join(self.model.path, \"figure\", \"simulation\", f\"{viz_type}\"),\n exist_ok=True,\n )\n self.model.problem.set_data()\n self.model.viz.set_timecourse_rcParams()\n singleplotting = self.model.viz.get_single_observable_options()\n multiplotting = self.model.viz.get_multiple_observables_options()\n for mode in range(2):\n # mode 0 : timecourse_for_each_observable\n # mode 1 : multiple_observables\n set_fig = False\n for i, obs_name in enumerate(self.model.observables):\n if mode == 1 and obs_name not in multiplotting.observables:\n continue\n if mode == 0:\n plt.figure(figsize=singleplotting[i].figsize)\n elif mode == 1 and not set_fig:\n plt.figure(figsize=multiplotting.figsize)\n set_fig = True\n plt.gca().spines[\"right\"].set_visible(False)\n plt.gca().spines[\"top\"].set_visible(False)\n if viz_type != \"experiment\":\n if show_all:\n self._plot_show_all(\n n_file, simulations_all, obs_name, mode, singleplotting, multiplotting\n )\n if viz_type == \"average\":\n normalized = self._normalize_array(\n n_file, simulations_all, obs_name, mode, singleplotting, multiplotting\n )\n if (\n self.model.problem.normalization\n and self.model.problem.normalization[obs_name][\"timepoint\"] is None\n ):\n normalized = self._divide_by_maximum(normalized, obs_name)\n self._plot_average(\n normalized, obs_name, mode, singleplotting, multiplotting\n )\n if stdev:\n self._show_sd(\n normalized, obs_name, mode, singleplotting, multiplotting\n )\n else:\n self._plot_simulations(obs_name, mode, singleplotting, multiplotting)\n if (\n viz_type == \"experiment\" or singleplotting[i].exp_data\n ) and self.model.problem.experiments[i] is not None:\n exp_t = self.model.problem.get_timepoint(obs_name)\n if self.model.problem.error_bars[i] is not None:\n self._plot_experimental_data_with_error_bars(\n viz_type, exp_t, obs_name, mode, singleplotting, multiplotting\n )\n else:\n self._plot_experimental_data_without_error_bars(\n viz_type, exp_t, obs_name, mode, singleplotting, multiplotting\n )\n if mode == 0:\n self._save_mode_0(obs_name, singleplotting, viz_type)\n if mode == 1 and multiplotting.observables:\n self._save_mode_1(multiplotting, viz_type)\n\n def _plot_show_all(\n self,\n n_file: List[int],\n simulations_all: np.ndarray,\n obs_name: str,\n mode: int,\n singleplotting: List[SingleObservable],\n multiplotting: MultipleObservables,\n ) -> None:\n \"\"\"\n Plot time course simulated values (show_all == True).\n \"\"\"\n i = self.model.observables.index(obs_name)\n for j, _ in enumerate(n_file):\n for l, condition in enumerate(self.model.problem.conditions):\n if (mode == 0 and condition not in singleplotting[i].dont_show) or (\n mode == 1 and condition == multiplotting.condition\n ):\n plt.plot(\n np.array(self.model.problem.t) / singleplotting[i].divided_by,\n simulations_all[i, j, :, l]\n / (\n 1\n if not self.model.problem.normalization\n or np.max(simulations_all[i, j, :, l]) == 0.0\n else np.max(\n simulations_all[\n i,\n j,\n self.model.problem.normalization[obs_name][\"timepoint\"],\n [\n self.model.problem.conditions.index(c)\n for c in self.model.problem.normalization[obs_name][\n \"condition\"\n ]\n ],\n ]\n )\n if self.model.problem.normalization[obs_name][\"timepoint\"] is not None\n else np.max(\n simulations_all[\n i,\n j,\n :,\n [\n self.model.problem.conditions.index(c)\n for c in self.model.problem.normalization[obs_name][\n \"condition\"\n ]\n ],\n ]\n )\n ),\n color=singleplotting[i].cmap[l]\n if mode == 0\n else multiplotting.cmap[multiplotting.observables.index(obs_name)],\n alpha=0.05,\n )\n\n def _normalize_array(\n self,\n n_file: List[int],\n simulations_all: np.ndarray,\n obs_name: str,\n mode: int,\n singleplotting: List[SingleObservable],\n multiplotting: MultipleObservables,\n ) -> np.ndarray:\n \"\"\"\n Normalize the array simulations_all using problem.normalization set in observable.py.\n \"\"\"\n normalized: np.ndarray = np.empty_like(simulations_all)\n i = self.model.observables.index(obs_name)\n for j, _ in enumerate(n_file):\n for l, condition in enumerate(self.model.problem.conditions):\n if (mode == 0 and condition not in singleplotting[i].dont_show) or (\n mode == 1 and condition == multiplotting.condition\n ):\n normalized[i, j, :, l] = simulations_all[i, j, :, l] / (\n 1\n if not self.model.problem.normalization\n or np.max(simulations_all[i, j, :, l]) == 0.0\n else np.max(\n simulations_all[\n i,\n j,\n self.model.problem.normalization[obs_name][\"timepoint\"],\n [\n self.model.problem.conditions.index(c)\n for c in self.model.problem.normalization[obs_name][\n \"condition\"\n ]\n ],\n ]\n )\n if self.model.problem.normalization[obs_name][\"timepoint\"] is not None\n else np.max(\n simulations_all[\n i,\n j,\n :,\n [\n self.model.problem.conditions.index(c)\n for c in self.model.problem.normalization[obs_name][\n \"condition\"\n ]\n ],\n ]\n )\n )\n return normalized\n\n def _divide_by_maximum(self, normalized: np.ndarray, obs_name: str) -> np.ndarray:\n \"\"\"\n Divide the array by its maximum.\n \"\"\"\n mean_vec = []\n for c in self.model.problem.normalization[obs_name][\"condition\"]:\n mean_vec.append(\n np.nanmean(\n normalized[\n self.model.observables.index(obs_name),\n :,\n :,\n self.model.problem.conditions.index(c),\n ],\n axis=0,\n )\n )\n norm_max = np.max(mean_vec)\n if not isnan(norm_max) and norm_max != 0.0:\n normalized[self.model.observables.index(obs_name), :, :, :] /= norm_max\n\n return normalized\n\n def _plot_average(\n self,\n normalized: np.ndarray,\n obs_name: str,\n mode: int,\n singleplotting: List[SingleObservable],\n multiplotting: MultipleObservables,\n ) -> None:\n \"\"\"\n Plot time course simulated values (viz_type == 'average').\n \"\"\"\n i = self.model.observables.index(obs_name)\n for l, condition in enumerate(self.model.problem.conditions):\n if (mode == 0 and condition not in singleplotting[i].dont_show) or (\n mode == 1 and condition == multiplotting.condition\n ):\n plt.plot(\n np.array(self.model.problem.t) / singleplotting[i].divided_by,\n np.nanmean(normalized[i, :, :, l], axis=0),\n color=singleplotting[i].cmap[l]\n if mode == 0\n else multiplotting.cmap[multiplotting.observables.index(obs_name)],\n label=condition if mode == 0 else singleplotting[i].ylabel,\n )\n\n def _show_sd(\n self,\n normalized: np.ndarray,\n obs_name: str,\n mode: int,\n singleplotting: List[SingleObservable],\n multiplotting: MultipleObservables,\n ) -> None:\n \"\"\"\n Plot standard deviation (SD) as shaded area when stdev == True.\n \"\"\"\n i = self.model.observables.index(obs_name)\n for l, condition in enumerate(self.model.problem.conditions):\n if (mode == 0 and condition not in singleplotting[i].dont_show) or (\n mode == 1 and condition == multiplotting.condition\n ):\n y_mean = np.nanmean(normalized[i, :, :, l], axis=0)\n y_std = [\n np.nanstd(normalized[i, :, k, l], ddof=1)\n for k, _ in enumerate(self.model.problem.t)\n ]\n plt.fill_between(\n np.array(self.model.problem.t) / singleplotting[i].divided_by,\n y_mean - y_std,\n y_mean + y_std,\n lw=0,\n color=singleplotting[i].cmap[l]\n if mode == 0\n else multiplotting.cmap[multiplotting.observables.index(obs_name)],\n alpha=0.1,\n )\n\n def _plot_simulations(\n self,\n obs_name: str,\n mode: int,\n singleplotting: List[SingleObservable],\n multiplotting: MultipleObservables,\n ) -> None:\n \"\"\"\n Plot time course simulated values (viz_type not in ['average', 'experiment']).\n \"\"\"\n i = self.model.observables.index(obs_name)\n for l, condition in enumerate(self.model.problem.conditions):\n if (mode == 0 and condition not in singleplotting[i].dont_show) or (\n mode == 1 and condition == multiplotting.condition\n ):\n plt.plot(\n np.array(self.model.problem.t) / singleplotting[i].divided_by,\n self.model.problem.simulations[i, :, l]\n / (\n 1\n if not self.model.problem.normalization\n or np.max(self.model.problem.simulations[i, :, l]) == 0.0\n else np.max(\n self.model.problem.simulations[\n i,\n self.model.problem.normalization[obs_name][\"timepoint\"],\n [\n self.model.problem.conditions.index(c)\n for c in self.model.problem.normalization[obs_name][\n \"condition\"\n ]\n ],\n ]\n )\n if self.model.problem.normalization[obs_name][\"timepoint\"] is not None\n else np.max(\n self.model.problem.simulations[\n i,\n :,\n [\n self.model.problem.conditions.index(c)\n for c in self.model.problem.normalization[obs_name][\n \"condition\"\n ]\n ],\n ]\n )\n ),\n color=singleplotting[i].cmap[l]\n if mode == 0\n else multiplotting.cmap[multiplotting.observables.index(obs_name)],\n label=condition if mode == 0 else singleplotting[i].ylabel,\n )\n\n def _plot_experimental_data_with_error_bars(\n self,\n viz_type: str,\n exp_t: Optional[List[int]],\n obs_name: str,\n mode: int,\n singleplotting: List[SingleObservable],\n multiplotting: MultipleObservables,\n ) -> None:\n \"\"\"\n Plot experimental measurements with error bars.\n \"\"\"\n i = self.model.observables.index(obs_name)\n for l, condition in enumerate(self.model.problem.conditions):\n if (\n condition in self.model.problem.experiments[i]\n and (mode == 0 and condition not in singleplotting[i].dont_show)\n or (mode == 1 and condition == multiplotting.condition)\n ):\n exp_data = plt.errorbar(\n np.array(exp_t) / singleplotting[i].divided_by,\n self.model.problem.experiments[i][condition],\n yerr=self.model.problem.error_bars[i][condition],\n color=singleplotting[i].cmap[l]\n if mode == 0\n else multiplotting.cmap[multiplotting.observables.index(obs_name)],\n ecolor=singleplotting[i].cmap[l]\n if mode == 0\n else multiplotting.cmap[multiplotting.observables.index(obs_name)],\n elinewidth=1,\n capsize=8,\n markerfacecolor=\"None\",\n markeredgecolor=singleplotting[i].cmap[l]\n if mode == 0\n else multiplotting.cmap[multiplotting.observables.index(obs_name)],\n fmt=singleplotting[i].shape[l]\n if mode == 0\n else multiplotting.shape[multiplotting.observables.index(obs_name)],\n clip_on=False,\n label=singleplotting[i].ylabel\n if mode == 1 and viz_type == \"experiment\"\n else None,\n )\n for capline in exp_data[1]:\n capline.set_clip_on(False)\n for barlinecol in exp_data[2]:\n barlinecol.set_clip_on(False)\n\n def _plot_experimental_data_without_error_bars(\n self,\n viz_type: str,\n exp_t: Optional[List[int]],\n obs_name: str,\n mode: int,\n singleplotting: List[SingleObservable],\n multiplotting: MultipleObservables,\n ) -> None:\n \"\"\"\n Plot experimental measurements when model.problem.error_bars[i] is None.\n \"\"\"\n i = self.model.observables.index(obs_name)\n for l, condition in enumerate(self.model.problem.conditions):\n if (\n condition in self.model.problem.experiments[i]\n and (mode == 0 and condition not in singleplotting[i].dont_show)\n or (mode == 1 and condition == multiplotting.condition)\n ):\n plt.plot(\n np.array(exp_t) / singleplotting[i].divided_by,\n self.model.problem.experiments[i][condition],\n singleplotting[i].shape[l]\n if mode == 0\n else multiplotting.shape[multiplotting.observables.index(obs_name)],\n markerfacecolor=\"None\",\n markeredgecolor=singleplotting[i].cmap[l]\n if mode == 0\n else multiplotting.cmap[multiplotting.observables.index(obs_name)],\n color=singleplotting[i].cmap[l]\n if mode == 0\n else multiplotting.cmap[multiplotting.observables.index(obs_name)],\n clip_on=False,\n label=singleplotting[i].ylabel\n if mode == 1 and viz_type == \"experiment\"\n else None,\n )\n\n def _save_mode_0(\n self,\n obs_name: str,\n singleplotting: List[SingleObservable],\n viz_type: str,\n ) -> None:\n \"\"\"\n Plot time course of each observable.\n \"\"\"\n i = self.model.observables.index(obs_name)\n if singleplotting[i].xlim:\n plt.xlim(singleplotting[i].xlim)\n if singleplotting[i].xticks is not None:\n plt.xticks(singleplotting[i].xticks)\n plt.xlabel(singleplotting[i].xlabel)\n if singleplotting[i].ylim:\n plt.ylim(singleplotting[i].ylim)\n if singleplotting[i].yticks is not None:\n plt.yticks(singleplotting[i].yticks)\n plt.ylabel(singleplotting[i].ylabel)\n if singleplotting[i].legend_kws is not None:\n plt.legend(**singleplotting[i].legend_kws)\n plt.savefig(\n os.path.join(\n self.model.path,\n \"figure\",\n \"simulation\",\n f\"{viz_type}\",\n f\"{obs_name}\",\n ),\n )\n plt.close()\n\n def _save_mode_1(\n self,\n multiplotting: MultipleObservables,\n viz_type: str,\n ) -> None:\n \"\"\"\n Plot time course of multiple observables in one figure.\n \"\"\"\n if multiplotting.xlim:\n plt.xlim(multiplotting.xlim)\n if multiplotting.xticks is not None:\n plt.xticks(multiplotting.xticks)\n plt.xlabel(multiplotting.xlabel)\n if multiplotting.ylim:\n plt.ylim(multiplotting.ylim)\n if multiplotting.yticks is not None:\n plt.yticks(multiplotting.yticks)\n plt.ylabel(multiplotting.ylabel)\n plt.legend(**multiplotting.legend_kws)\n plt.savefig(\n os.path.join(\n self.model.path,\n \"figure\",\n \"simulation\",\n f\"{viz_type}\",\n f\"{multiplotting.fname}\",\n ),\n )\n plt.close()\n"
] |
[
[
"matplotlib.pyplot.legend",
"matplotlib.pyplot.yticks",
"matplotlib.pyplot.gca",
"numpy.empty_like",
"matplotlib.pyplot.ylim",
"matplotlib.pyplot.figure",
"matplotlib.axes._axes._log.setLevel",
"numpy.max",
"matplotlib.pyplot.xlim",
"numpy.nanmean",
"matplotlib.pyplot.close",
"numpy.nanstd",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.xticks",
"numpy.array",
"matplotlib.pyplot.ylabel"
]
] |
bmistry4/nalm-benchmark
|
[
"273c95cc75241f56e48bcd0b18b043969ef82004"
] |
[
"stable_nalu/functional/golden_ratio_base.py"
] |
[
"import math\nimport torch\n\ngolden_ratio = (1 + math.sqrt(5)) / 2.\ntanh = lambda x: (torch.pow(golden_ratio, 2 * x) - 1) / (torch.pow(golden_ratio, 2 * x) + 1)\nsigmoid = lambda x: 1 / (1 + torch.pow(golden_ratio, -x))\n"
] |
[
[
"torch.pow"
]
] |
sjtuytc/AAAI21-RoutineAugmentedPolicyLearning
|
[
"7192f0bf26378d8aacb21c0220cc705cb577c6dc",
"7192f0bf26378d8aacb21c0220cc705cb577c6dc"
] |
[
"make_demo_discover_rt/baseline_a2c.py",
"make_demo_discover_rt/sq_rt_proposal.py"
] |
[
"import time\nimport functools\nimport tensorflow as tf\n\nfrom baselines import logger\n\nfrom baselines.common import set_global_seeds, explained_variance\nfrom baselines.common import tf_util\nfrom baselines.common.policies import build_policy\n\nfrom baselines.a2c.utils import Scheduler, find_trainable_variables\nfrom baselines.a2c.runner import Runner\nfrom baselines.ppo2.ppo2 import safemean\nfrom collections import deque\n\nfrom tensorflow import losses\n\n\nclass Model(object):\n\n \"\"\"\n We use this class to :\n __init__:\n - Creates the step_model\n - Creates the train_model\n\n train():\n - Make the training part (feedforward and retropropagation of gradients)\n\n save/load():\n - Save load the model\n \"\"\"\n def __init__(self, policy, env, nsteps,\n ent_coef=0.01, vf_coef=0.5, max_grad_norm=0.5, lr=7e-4,\n alpha=0.99, epsilon=1e-5, total_timesteps=int(80e6), lrschedule='linear', variable_scope=\"a2c_model\"):\n config = tf.ConfigProto(log_device_placement=False, allow_soft_placement=True)\n config.gpu_options.allow_growth = True\n sess = tf_util.get_session(config=config)\n nenvs = env.num_envs\n nbatch = nenvs*nsteps\n\n with tf.variable_scope(variable_scope, reuse=tf.AUTO_REUSE):\n # step_model is used for sampling\n step_model = policy(nenvs, 1, sess)\n\n # train_model is used to train our network\n train_model = policy(nbatch, nsteps, sess)\n\n A = tf.placeholder(train_model.action.dtype, train_model.action.shape)\n ADV = tf.placeholder(tf.float32, [nbatch])\n R = tf.placeholder(tf.float32, [nbatch])\n LR = tf.placeholder(tf.float32, [])\n\n # Calculate the loss\n # Total loss = Policy gradient loss - entropy * entropy coefficient + Value coefficient * value loss\n\n # Policy loss\n neglogpac = train_model.pd.neglogp(A)\n # L = A(s,a) * -logpi(a|s)\n pg_loss = tf.reduce_mean(ADV * neglogpac)\n\n # Entropy is used to improve exploration by limiting the premature convergence to suboptimal policy.\n entropy = tf.reduce_mean(train_model.pd.entropy())\n\n # Value loss\n vf_loss = losses.mean_squared_error(tf.squeeze(train_model.vf), R)\n\n loss = pg_loss - entropy*ent_coef + vf_loss * vf_coef\n\n # Update parameters using loss\n # 1. Get the model parameters\n params = find_trainable_variables(variable_scope)\n\n # 2. Calculate the gradients\n grads = tf.gradients(loss, params)\n if max_grad_norm is not None:\n # Clip the gradients (normalize)\n grads, grad_norm = tf.clip_by_global_norm(grads, max_grad_norm)\n grads = list(zip(grads, params))\n # zip aggregate each gradient with parameters associated\n # For instance zip(ABCD, xyza) => Ax, By, Cz, Da\n\n # 3. Make op for one policy and value update step of A2C\n trainer = tf.train.RMSPropOptimizer(learning_rate=LR, decay=alpha, epsilon=epsilon)\n\n _train = trainer.apply_gradients(grads)\n\n lr = Scheduler(v=lr, nvalues=total_timesteps, schedule=lrschedule)\n\n def train(obs, states, rewards, masks, actions, values):\n # Here we calculate advantage A(s,a) = R + yV(s') - V(s)\n # rewards = R + yV(s')\n advs = rewards - values\n for step in range(len(obs)):\n cur_lr = lr.value()\n\n td_map = {train_model.X:obs, A:actions, ADV:advs, R:rewards, LR:cur_lr}\n if states is not None:\n td_map[train_model.S] = states\n td_map[train_model.M] = masks\n policy_loss, value_loss, policy_entropy, _ = sess.run(\n [pg_loss, vf_loss, entropy, _train],\n td_map\n )\n return policy_loss, value_loss, policy_entropy\n\n\n self.train = train\n self.train_model = train_model\n self.step_model = step_model\n self.step = step_model.step\n self.value = step_model.value\n self.initial_state = step_model.initial_state\n self.save = functools.partial(tf_util.save_variables, sess=sess)\n self.load = functools.partial(tf_util.load_variables, sess=sess)\n tf.global_variables_initializer().run(session=sess)\n\n\ndef learn(network, env,\n seed=None,\n nsteps=5,\n total_timesteps=int(80e6),\n vf_coef=0.5,\n ent_coef=0.01,\n max_grad_norm=0.5,\n lr=7e-4,\n lrschedule='constant',\n epsilon=1e-5,\n alpha=0.99,\n gamma=0.99,\n log_interval=100,\n load_path=None,\n variable_scope='a2c_model',\n **network_kwargs):\n\n set_global_seeds(seed)\n\n # Get the nb of env\n nenvs = env.num_envs\n policy = build_policy(env, network, **network_kwargs)\n\n # Instantiate the model object (that creates step_model and train_model)\n model = Model(policy=policy, env=env, nsteps=nsteps, ent_coef=ent_coef, vf_coef=vf_coef,\n max_grad_norm=max_grad_norm, lr=lr, alpha=alpha, epsilon=epsilon, total_timesteps=total_timesteps,\n lrschedule=lrschedule, variable_scope=variable_scope)\n if load_path is not None:\n model.load(load_path)\n\n # Instantiate the runner object\n runner = Runner(env, model, nsteps=nsteps, gamma=gamma)\n epinfobuf = deque(maxlen=100)\n\n # Calculate the batch_size\n nbatch = nenvs*nsteps\n\n # Start total timer\n tstart = time.time()\n\n for update in range(1, total_timesteps//nbatch+1):\n # Get mini batch of experiences\n obs, states, rewards, masks, actions, values, epinfos = runner.run()\n epinfobuf.extend(epinfos)\n\n policy_loss, value_loss, policy_entropy = model.train(obs, states, rewards, masks, actions, values)\n nseconds = time.time() - tstart\n\n # Calculate the fps (frame per second)\n fps = int((update*nbatch)/nseconds)\n if update % log_interval == 0 or update == 1:\n # Calculates if value function is a good predicator of the returns (ev > 1)\n # or if it's just worse than predicting nothing (ev =< 0)\n ev = explained_variance(values, rewards)\n logger.record_tabular(\"nupdates\", update)\n logger.record_tabular(\"total_timesteps\", update*nbatch)\n logger.record_tabular(\"fps\", fps)\n logger.record_tabular(\"policy_entropy\", float(policy_entropy))\n logger.record_tabular(\"value_loss\", float(value_loss))\n logger.record_tabular(\"explained_variance\", float(ev))\n logger.record_tabular(\"eprewmean\", safemean([epinfo['r'] for epinfo in epinfobuf]))\n logger.record_tabular(\"eplenmean\", safemean([epinfo['l'] for epinfo in epinfobuf]))\n logger.dump_tabular()\n return model\n",
"\"\"\"\nauthor: Anonymous Author\ncontact: Anonymous@anonymous\nfilename: cm_mini_sq\ndescription: minimum routine learning function.\n\"\"\"\nimport sys\nimport numpy as np\nfrom Levenshtein import distance\nsys.path.insert(0, \"../\")\nfrom utils.tensor_list import calculate_rank, whether_a_contain_b, rank_one_by_another, combine_list_to_str, combine_lists\nfrom make_demo_discover_rt.pysequitur.main import Sequencer3, print_grammar, AlphabetsTransformer\n\n\nclass CmSqRtProposal:\n \"\"\"\n Compress an action seq into routines.\n \"\"\"\n def __init__(self, all_action_seq, prim_num):\n self.prim_n = prim_num\n self.prim_ids = [[i] for i in range(self.prim_n)]\n self.id2rts = [[i] for i in range(self.prim_n)]\n self.all_action_seq = all_action_seq\n # transformer used in Sequitur algorithm.\n self.a_trans = AlphabetsTransformer()\n self.parsed_result, self.rt_actions_idxs, self.avg_freq, self.rt_frequencies, self.rt_scores = \\\n None, None, None, None, None\n\n def cal_dis(self, action_a, action_b):\n encoded_a = combine_list_to_str(self.a_trans.list_ids2alphabets(action_a))\n encoded_b = combine_list_to_str(self.a_trans.list_ids2alphabets(action_b))\n cur_dis = distance(encoded_a, encoded_b)\n return cur_dis\n\n def run(self, similar_thre=3, select_num=3, size_weight=0.1):\n \"\"\"\n Main entrance to the sq routine learning algorithm.\n \"\"\"\n # run sequitur algorithm and get parsed action sequence.\n self.run_sequitur()\n # calculate frequency of the abstracted routines.\n self.evaluate_routines(similar_thre=similar_thre, select_num=select_num)\n return self.rt_actions_idxs\n\n def run_sequitur(self):\n print(\"Begin run sequitur for size:\", len(self.all_action_seq))\n # we first encode all action seq to alphabets in order to run sequitur algorithm.\n encoded_action_seq = self.a_trans.list_ids2alphabets(self.all_action_seq)\n structure = Sequencer3(encoded_action_seq)\n self.parsed_result = structure.get()\n\n # collect results to form routines\n rt_actions = []\n rt_considered_nonterminal = []\n for idx, cur_gram in enumerate(self.parsed_result):\n if cur_gram is None:\n continue\n for jdx, cur_ele in enumerate(cur_gram):\n if type(cur_ele) != str and cur_ele not in rt_considered_nonterminal:\n rt_considered_nonterminal.append(cur_ele)\n cur_idx = cur_ele.real\n cur_raw_actions = self.get_actions_for_routine(cur_idx)\n cur_actions = self.a_trans.list_alphabets2ids(cur_raw_actions)\n cur_actions = combine_lists([self.id2rts[routine_id] for routine_id in cur_actions])\n if cur_actions in rt_actions:\n continue\n rt_actions.append(cur_actions)\n self.rt_actions_idxs = rt_actions\n\n def get_actions_for_routine(self, cur_idx):\n # find until no non-terminal variables are killed.\n cur_ori_repre = self.parsed_result[cur_idx]\n return_actions = []\n for idx, ele in enumerate(cur_ori_repre):\n if type(ele) == str:\n return_actions.append(ele)\n else:\n cur_idx = ele.real\n cur_actions = self.get_actions_for_routine(cur_idx)\n return_actions += cur_actions\n return return_actions\n\n def evaluate_routines(self, similar_thre, select_num, size_weight=0):\n # calculate frequencies and sizes for all routines.\n rt_freqs = []\n rt_sizes = []\n for idx, cur_rt_action in enumerate(self.rt_actions_idxs):\n cur_rt_freq = 0\n for begin_pos, cur_ele in enumerate(self.all_action_seq):\n end_pos = int(begin_pos + len(cur_rt_action))\n if end_pos >= len(self.all_action_seq):\n break\n if self.all_action_seq[begin_pos:end_pos] == cur_rt_action:\n # One routine detected.\n cur_rt_freq += 1\n rt_freqs.append(cur_rt_freq)\n rt_sizes.append(len(cur_rt_action))\n avg_freq, avg_size = np.mean(rt_freqs), np.mean(rt_sizes)\n\n # collect rt infos\n rt_and_infos = []\n for idx, cur_rt_action in enumerate(self.rt_actions_idxs):\n cur_len = len(cur_rt_action)\n cur_score = rt_freqs[idx] / avg_freq + size_weight * cur_len / avg_size\n cur_rt_and_info = {'rt': cur_rt_action, 'freq': rt_freqs[idx], 'size': cur_len, 'score': cur_score}\n cur_rt_is_worse = False\n # check whether this routine is too similar in comparison to primitive actions\n for jdx, prim_rt in enumerate(self.prim_ids):\n if self.cal_dis(cur_rt_action, prim_rt) < similar_thre:\n cur_rt_is_worse = True\n break\n for jdx, ana_info in enumerate(rt_and_infos):\n if cur_rt_is_worse:\n break\n ana_rt_action = ana_info['rt']\n ana_score = ana_info['score']\n # too similar routines are detected\n if self.cal_dis(cur_rt_action, ana_rt_action) < similar_thre or whether_a_contain_b(cur_rt_action, ana_rt_action) \\\n or whether_a_contain_b(ana_rt_action, cur_rt_action):\n # if cur score is larger than one existed routine, replace that routine by cur routine\n if cur_score > ana_score:\n rt_and_infos.remove(ana_info)\n cur_rt_is_worse = False\n # else ignore current routine\n else:\n cur_rt_is_worse = True\n break\n # if cur routine is not similar and worse, append it to routine library\n if not cur_rt_is_worse:\n rt_and_infos.append(cur_rt_and_info)\n\n # self.rt_frequencies = [np.round(cur_freq / abs(self.avg_freq), 3) for cur_freq in return_freq]\n # self.rt_frequencies = all_freqs\n\n # parse results\n rt_action_idxs = [cur_info['rt'] for cur_info in rt_and_infos]\n rt_scores = [cur_info['score'] for cur_info in rt_and_infos]\n rt_freqs = [cur_info['freq'] for cur_info in rt_and_infos]\n rt_action_idxs, re_arranged_score = rank_one_by_another(rt_action_idxs, rt_scores)\n rt_freqs, re_arranged_score = rank_one_by_another(rt_freqs, rt_scores)\n rt_scores = re_arranged_score\n self.rt_actions_idxs = rt_action_idxs[:select_num]\n self.rt_frequencies = rt_freqs[:select_num]\n self.rt_scores = rt_scores[:select_num]\n\n print(\"Finish abstracting routine.\")\n print(\"Result routines:\", self.rt_actions_idxs)\n print(\"Result routine frequencies:\", self.rt_frequencies)\n print(\"Result routine scores:\", self.rt_scores)\n\n return self.rt_actions_idxs\n\n def select_routines(self, size_weight, select_num):\n final_rt_actions, final_rt_freqs, final_rt_scores = [], [], []\n for idx, cur_f in enumerate(self.rt_frequencies):\n actions = self.rt_actions_idxs[idx]\n cur_score = cur_f + size_weight * len(actions)\n final_rt_scores.append(cur_score)\n self.rt_actions_idxs, re_arranged_score = rank_one_by_another(self.rt_actions_idxs, final_rt_scores)\n self.rt_frequencies, re_arranged_score = rank_one_by_another(self.rt_frequencies, final_rt_scores)\n final_rt_scores = re_arranged_score\n self.rt_actions_idxs = self.rt_actions_idxs[:select_num]\n self.rt_frequencies = self.rt_frequencies[:select_num]\n return final_rt_scores[:select_num]\n\n def transfer_routines(self):\n final_rt_actions = []\n for idx, cur_act in enumerate(self.rt_actions_idxs):\n cur_rt_action = []\n for jdx, cur_idx in enumerate(cur_act):\n sub_routine_names = list(self.asm.id2name(cur_idx))\n cur_rt_action += sub_routine_names\n final_rt_actions.append(cur_rt_action)\n return final_rt_actions\n\n \"\"\"\n Display and save routines.\n \"\"\"\n\n def d(self):\n for idx, cur_act in enumerate(self.rt_actions_idxs):\n print(idx, \"th rt; acts:\", cur_act, \"; freq:\", self.rt_frequencies[idx], \"; len:\", len(cur_act))\n\n\nif __name__ == '__main__':\n data_num = 800\n"
] |
[
[
"tensorflow.train.RMSPropOptimizer",
"tensorflow.reduce_mean",
"tensorflow.gradients",
"tensorflow.placeholder",
"tensorflow.squeeze",
"tensorflow.ConfigProto",
"tensorflow.global_variables_initializer",
"tensorflow.clip_by_global_norm",
"tensorflow.variable_scope"
],
[
"numpy.mean"
]
] |
Razoff/prepa_cahier
|
[
"86bd4671a50ad06ee247089ae01d50c1a2479415"
] |
[
"game.py"
] |
[
"import headers\nimport pgn_move_processing\nimport networkx as nx\nimport matplotlib.pyplot as plt\nfrom networkx.drawing.nx_pydot import graphviz_layout\n\n\"\"\"\nGame class. A game is a header object and and list of half_move objects\n\"\"\"\nclass Game:\n def __init__(self, header, moves):\n self.header = header\n self.moves = moves\n self.pgn_graph = nx.Graph()\n self.label_dict = {}\n self.color_dict = {}\n\n \"\"\"\n Print all object : headers and then moves\n \"\"\"\n def print_all(self):\n self.header.print_headers()\n print(\"\\n\")\n\n for move in self.moves:\n move.print_full_object()\n print(\"\\n\")\n\n \"\"\"\n Create graph of game by recursively process move and adding them to the graph\n \"\"\"\n def make_graph(self):\n self.add_move(self.find_first_move())\n\n \"\"\"\n Get pgn_graph element setup colors and label and show it\n \"\"\"\n def display_graph(self):\n print(nx.get_node_attributes(self.pgn_graph, self.find_first_move()))\n values = [self.color_dict.get(node, 0.25) for node in self.pgn_graph.nodes()]\n print(values)\n pos = graphviz_layout(self.pgn_graph, prog=\"dot\")\n nx.draw(self.pgn_graph, pos, labels=self.label_dict, with_labels=True, node_color=values, cmap=plt.cm.Greys)\n plt.show()\n\n \"\"\"\n Find first move of the game in the list of moves by looking which move has no parent\n \"\"\"\n def find_first_move(self):\n for move in self.moves:\n if move.parent is None:\n move.print_full_object()\n return move\n\n \"\"\"\n Add move and recursively add its children to the graph with correct edges\n \"\"\"\n def add_move(self, move):\n self.pgn_graph.add_node(move)\n self.label_dict[move] = move.move_name\n if move.white_move:\n self.color_dict[move] = 0.0\n else:\n self.color_dict[move] = 2.0\n\n if move.parent is not None:\n self.pgn_graph.add_edge(move.parent, move)\n\n for child in move.children:\n self.add_move(child)\n\n \"\"\"\n Return a Game object from a path to a pgn file\n \"\"\"\n @staticmethod\n def game_from_pgn(path):\n head = headers.Headers.get_headers_from_pgn(path)\n moves = pgn_move_processing.PGNManipulator(path)\n moves.process_file()\n\n return Game(head, moves.move_list)\n"
] |
[
[
"matplotlib.pyplot.show"
]
] |
DRL-CASIA/Perception
|
[
"a0e7d3957267ce92a82b03ab3eca96916d22c4f2"
] |
[
"demo/main.py"
] |
[
"##v4版本可以识别多车并对应,并行运算\r\n# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Thu Aug 20 12:53:40 2020\r\n\r\n@author: Administrator\r\n\"\"\"\r\nimport sys\r\nsys.path.append(\"./angle_classify\")\r\nsys.path.append(\"./armor_classify\")\r\nsys.path.append(\"./car_classify\")\r\nimport numpy as np\r\nimport cv2\r\nfrom armor_detect_withlightbox import read_morphology_withlightbox,find_contours_withlightbox\r\nfrom armor_detect import read_morphology_temp,find_contours\r\nfrom yolo_detect_v2 import output\r\nfrom position_predict import *\r\nfrom utils.utils_mulanchor import *\r\nimport torch\r\nfrom models_nolambda_focallossw import *\r\nimport time\r\nfrom classification import *\r\nfrom classification_car import *\r\nfrom classification_angle_camera import *\r\nfrom multiprocessing.dummy import Pool as ThreadPool\r\n\r\n\r\ncamera = 'left'\r\n\r\ndef camera_calibration(img,camera='left'):\r\n # # TODO 获取相机内参,获取二维码四点世界坐标\r\n np.set_printoptions(suppress=True)\r\n object_3d_points = np.array(([-75, -75, 0],\r\n [75, -75, 0],\r\n [75, 75, 0],\r\n [-75, 75, 0]), dtype=np.double)\r\n # TODO 将 object_2d_point 的值设为 detect得到的二维码四点坐标\r\n object_2d_point = np.array(([954., 534.],\r\n [1004., 536.],\r\n [1006., 579.],\r\n [956., 577.]), dtype=np.double)\r\n if camera == 'left':\r\n camera_matrix = np.array([[6.570931846420799e+02,0,3.196939147616254e+02],\r\n [0,6.190714811365291e+02,2.520205008433231e+02],\r\n [0,0,1]], dtype=\"double\")\r\n dist_coeffs = np.transpose([-0.216248222896496, 0.226313370014235, -0.001139415943532, \r\n -0.004624035593808, -0.059067986510048])\r\n \r\n if camera == 'right':\r\n camera_matrix = np.array([[653.528968471312,0,316.090142900466],\r\n [0,616.850241871879,242.354349211058],\r\n [0,0,1]], dtype=\"double\")\r\n dist_coeffs = np.transpose([-0.203713353732576, 0.178375149377498, -0.000880727909602325, \r\n -0.00023370151705564, -0.0916209128198407])\r\n found, rvec, tvec = cv2.solvePnP(object_3d_points, object_2d_point, camera_matrix, dist_coeffs)\r\n rotM = cv2.Rodrigues(rvec)[0]\r\n return np.array(rotM).T, np.array(tvec)\r\n \r\n\r\ndef point_sort(box):\r\n x = [box[0][0],box[1][0],box[2][0],box[3][0]]\r\n index = np.argsort(x)\r\n left = [box[index[0]],box[index[1]]]\r\n right = [box[index[2]],box[index[3]]]\r\n if left[0][1]< left[1][1]:\r\n left_up = left[0]\r\n left_down = left[1]\r\n else:\r\n left_up = left[1]\r\n left_down = left[0]\r\n if right[0][1]< right[1][1]:\r\n right_up = right[0]\r\n right_down = right[1]\r\n else:\r\n right_up = right[1]\r\n right_down = right[0]\r\n return left_up,left_down,right_up,right_down\r\n\r\ndef get_test_input(input_dim, CUDA):\r\n img = cv2.imread(\"dog-cycle-car.png\")\r\n img = cv2.resize(img, (input_dim[1], input_dim[0])) # resize: w h\r\n img_ = img[:,:,::-1].transpose((2,0,1))\r\n img_ = img_[np.newaxis,:,:,:]/255.0\r\n img_ = torch.from_numpy(img_).float()\r\n img_ = Variable(img_)\r\n \r\n if CUDA:\r\n img_ = img_.cuda()\r\n \r\n return img_\r\n\r\ndef draw_position_rect(im, left_up,left_down,right_up,right_down):\r\n # 原理是:::PNP算法\r\n # 找到四个对应点,根据摄像头参数求解实际世界坐标\r\n # 找外接矩形的四个图像点\r\n # 分别设置为(0,0,0),(0,车体长度,0),(0,车体长度,车体高度),(0,0,车体高度)///\r\n # 但是这样做不对,因为车体在旋转过程中无法在图像上找到精确的位置,无法计算。\r\n # 应该以检测装甲板的位置作为四个对应点,这样他的大小是死的,是可以计算的。“\r\n image_points = np.array([\r\n (left_up[0], left_up[1]),\r\n (right_up[0], right_up[1]),\r\n (right_down[0], right_down[1]),\r\n (left_down[0], left_down[1]),\r\n ], dtype=\"double\")\r\n high = 60 #mm\r\n width = 137 #mm\r\n model_points = np.array([\r\n (-width/2, -high/2, 0),\r\n (width/2, -high/2, 0),\r\n (width/2, high/2, 0),\r\n (-width/2, high/2, 0),\r\n ])\r\n\r\n camera_matrix = np.array([[6.570931846420799e+02,0,3.196939147616254e+02],\r\n [0,6.190714811365291e+02,2.520205008433231e+02],\r\n [0,0,1]], dtype=\"double\")\r\n dist_coeffs = np.transpose([-0.216248222896496, 0.226313370014235, -0.001139415943532, \r\n -0.004624035593808, -0.059067986510048])\r\n (success, rotation_vector, translation_vector) = cv2.solvePnP(model_points,\r\n image_points, camera_matrix, dist_coeffs,\r\n flags=cv2.SOLVEPNP_ITERATIVE)\r\n rotationtion_vector = cv2.Rodrigues(rotation_vector)[0]\r\n distance = np.sqrt(translation_vector[0]**2+translation_vector[1]**2+translation_vector[2]**2)\r\n \r\n return rotationtion_vector, translation_vector,distance/1000\r\n\r\ndef armor_6(fig): \r\n\r\n array = fig\r\n fig = cv2.resize(array,(48, 48))\r\n fig = torch.Tensor(fig)\r\n fig = fig.permute((2,0,1))\r\n img = torch.unsqueeze(fig, 0)\r\n outputs = net_model(img.cuda())\r\n _, predicted = torch.max(outputs.data, 1)\r\n\r\n return int(predicted)\r\n\r\ndef car_6(fig): \r\n array = fig\r\n fig = cv2.resize(array,(56,56))\r\n fig = torch.Tensor(fig)\r\n fig = fig.permute((2,0,1))\r\n img = torch.unsqueeze(fig, 0)\r\n outputs = net_model_car(img)\r\n _, predicted = torch.max(outputs.data, 1)\r\n\r\n return int(predicted)\r\n\r\ndef world_angle_6(fig, pose,camera = 'left'):\r\n\r\n pose_array = pose\r\n\r\n pose_x = pose_array[0]\r\n pose_y = pose_array[1]\r\n pose_x = float(pose_x)\r\n pose_y = float(pose_y)\r\n pose_array = (pose_x, pose_y)\r\n pose_array = np.array(pose_array, dtype='float').reshape(1,2)\r\n pose_array = torch.tensor(pose_array)\r\n\r\n array = fig\r\n fig = cv2.resize(array, (56, 56))\r\n fig = torch.Tensor(fig)\r\n fig = fig.permute(2, 0, 1)\r\n img = torch.unsqueeze(fig, 0)\r\n outputs = net_model_angle(img.cuda(), pose_array.cuda())\r\n _, predicted = torch.max(outputs.data, 1)\r\n \r\n predicted = int(predicted)\r\n # 坐标转换\r\n pi = math.pi\r\n alpha = 0\r\n di = pi / 8\r\n theta = di * (2 * predicted + 1)\r\n try:\r\n if (theta >= pi / 2 + math.atan(pose_x / pose_y) and theta < pi):\r\n alpha = theta - pi / 2 - math.atan(pose_x / pose_y)\r\n elif(theta >= pi * 2 - math.atan(pose_y / pose_x) and theta < pi * 2):\r\n alpha = theta - pi * 3 + math.atan(pose_y / pose_x)\r\n else:\r\n alpha = theta - pi + math.atan(pose_y / pose_x)\r\n except:\r\n pass\r\n return alpha, predicted\r\n\r\ncap = cv2.VideoCapture(\"video_footage/1cars.avi\")\r\n\r\nif (cap.isOpened() == False):\r\n print(\"Error opening video stream or file\")\r\nposition_data = []\r\nn =0\r\nframe_id = 0\r\n\r\n\r\n#-----------yolo model------------------#\r\ncfgfile = \"cfg/yolov3_camera_raw_3_pre_resprune_sp0.001_p0.01_sp0.001_p0.01.cfg\"\r\nweightsfile = \"cfg/yolov3_camera_raw_3_pre_resprune_sp0.001_p0.01_sp0.001_p0.01.weights\"\r\nnames = \"cfg/camera_raw_0817_3.names\"\r\nclasses = load_classes(names)\r\nnum_classes = 2\r\n\r\nstart = 0\r\ndevice = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\r\nCUDA = torch.cuda.is_available()\r\ninp_dim = [416,416]\r\nbbox_attrs = 5 + num_classes\r\n\r\nprint(\"Loading network.....\")\r\nmodel = Darknet(cfgfile, inp_dim).to(device)\r\nmodel.load_darknet_weights(weightsfile)\r\n\r\n\r\n\r\n#--------------------------distance nihe------------------------#\r\nmlp_model = load_mlp_model(camera)\r\nmlp_model.eval()\r\nprint(\"Network successfully loaded\")\r\n\r\n\r\n#-----------------------class model---------------------------#\r\nnet_model = classification_modelload()\r\nnet_model_car = car_classification_modelload()\r\n#-----------------------anger model---------------------------#\r\nnet_model_angle = classification_angle_camer_modelload(camera)\r\n\r\n\r\nif CUDA:\r\n model.cuda()\r\n mlp_model.cuda()\r\n \r\nmodel(get_test_input(inp_dim, CUDA)).to(device)\r\nmodel.eval().to(device)\r\n\r\nlog_path = './video_footage/20200824/log.log' #读取车位姿信息\r\n\r\nf=open(log_path,\"r\")\r\nlines = f.readlines()\r\nf.close()\r\n\r\nret, frame = cap.read()\r\nrotationtion_vector_cam,translation_vector_cam = camera_calibration(frame,'left')\r\n\r\ntime_start = time.time() \r\nwhile (cap.isOpened()):\r\n try:\r\n ret, frame = cap.read()\r\n size_img = frame.shape[:2]\r\n frame_show = frame.copy()\r\n x_r,y_r = float(lines[(frame_id+1)*2].split(' ')[1]),float(lines[(frame_id+1)*2].split(' ')[2]) \r\n d_r = np.sqrt(x_r**2+y_r**2+2.07**2) #每张图对应车位置\r\n frame_id += 1\r\n except:\r\n print('time cost:', time_stop-time_start)\r\n break\r\n if ret == True:\r\n t_start = time.time()\r\n output_dict = output(frame, CUDA, model,device,num_classes)\r\n #t_yolo = time.time()\r\n for i in range(len(output_dict)):\r\n #global n,frame_show\r\n light = 0\r\n output_dict[i]['img_id'] = []\r\n output_dict[i]['car_class'] = []\r\n output_dict[i]['car_angle'] = []\r\n output_dict[i]['light_box'] = np.zeros((len(output_dict[i]['armor_box'])+1,4,2))\r\n output_dict[i]['position'] = np.zeros((len(output_dict[i]['armor_box'])+1,2))\r\n if len(output_dict[i]['armor_box']) != 0:\r\n y0,h = int(round(output_dict[i]['armor_box'][0][1]))-5,int(round(output_dict[i]['armor_box'][0][3])) - int(round(output_dict[i]['armor_box'][0][1]))+10\r\n x0,w = int(round(output_dict[i]['armor_box'][0][0]))-5,int(round(output_dict[i]['armor_box'][0][2])) - int(round(output_dict[i]['armor_box'][0][0]))+10\r\n robot = frame[y0:y0+h,x0:x0+w]\r\n if np.shape(robot)[0] !=0 and np.shape(robot)[1] !=0:\r\n car_class = armor_6(robot)\r\n output_dict[i]['car_class'] = car_class\r\n for j in range(len(output_dict[i]['armor_box'])):\r\n\r\n index = j\r\n y0,h = int(round(output_dict[i]['armor_box'][j][1]))-5,int(round(output_dict[i]['armor_box'][j][3])) - int(round(output_dict[i]['armor_box'][j][1]))+10\r\n x0,w = int(round(output_dict[i]['armor_box'][j][0]))-5,int(round(output_dict[i]['armor_box'][j][2])) - int(round(output_dict[i]['armor_box'][j][0]))+10\r\n robot = frame[y0:y0+h,x0:x0+w]\r\n n +=1\r\n if np.shape(robot)[0] !=0 and np.shape(robot)[1] !=0:\r\n dst_dilate,robot_resize, factor = read_morphology_withlightbox(robot)\r\n #cv2.rectangle(frame_show, (x0, y0), (x0 + w, y0 + h), (255, 0, 0), 1)\r\n _, box = find_contours_withlightbox(dst_dilate,robot_resize,index)\r\n if len(box) != 1:\r\n time_calculate1 = time.time()\r\n light += 1\r\n for l in range(len(box)):\r\n box[l][0] = box[l][0]/factor + x0\r\n box[l][1] = box[l][1]/factor + y0\r\n box = np.int0(box)\r\n frame_show = cv2.drawContours(frame_show,[box],0,(0,0,255),2)\r\n left_up,left_down,right_up,right_down = point_sort(box)\r\n print('%d.jpg'%(frame_id))\r\n if frame_id == 258:\r\n break\r\n rotationtion_vector, translation_vector,distance = draw_position_rect(frame_show, left_up,left_down,right_up,right_down )\r\n #-------from Camera coordinate system to world coordinate system-----#\r\n\r\n position_world = np.dot(np.linalg.inv(rotationtion_vector_cam),(translation_vector-translation_vector_cam))\r\n #print(position_world)\r\n x = (position_world[2] + 3260)/1000\r\n y = (-position_world[0] + 440)/1000+0.3\r\n output_dict[i]['light_box'][j] = box\r\n output_dict[i]['position'][j] = (x,y)\r\n\r\n if np.sqrt(((x0+w/2)-257)**2+((y0+h/2)-220)**2) > 50:\r\n cv2.rectangle(frame_show, (x0, y0), (x0 + w, y0 + h), (255, 0, 0), 1)\r\n elif len(output_dict[i]['armor_box']) == 0 or light == 0:\r\n y0,h = int(round(output_dict[i]['car_box'][1]))-5,int(round(output_dict[i]['car_box'][3])) - int(round(output_dict[i]['car_box'][1]))+10\r\n x0,w = int(round(output_dict[i]['car_box'][0]))-5,int(round(output_dict[i]['car_box'][2])) - int(round(output_dict[i]['car_box'][0]))+10\r\n robot = frame[y0:y0+h,x0:x0+w]\r\n if np.shape(robot)[0] !=0 and np.shape(robot)[1] !=0:\r\n car_class = car_6(robot)\r\n\r\n n +=1\r\n if np.shape(robot)[0] !=0 and np.shape(robot)[1] !=0:\r\n dst_dilate, robot_resize, factor = read_morphology_temp(robot)\r\n #cv2.rectangle(frame_show, (x0, y0), (x0 + w, y0 + h), (0, 0, 255), 1)\r\n \r\n _, box = find_contours(dst_dilate,robot_resize,0)\r\n if len(box) != 1:\r\n for l in range(len(box)):\r\n box[l][0] = box[l][0]/factor + x0\r\n box[l][1] = box[l][1]/factor + y0\r\n box = np.int0(box)\r\n #frame_show = cv2.drawContours(frame_show,[box],0,(0,0,255),2)\r\n left_up,left_down,right_up,right_down = point_sort(box)\r\n print('%d.jpg'%(frame_id))\r\n rotationtion_vector, translation_vector,distance = draw_position_rect(frame_show, left_up,left_down,right_up,right_down )\r\n #-------from Camera coordinate system to world coordinate system-----#\r\n\r\n position_world = np.dot(np.linalg.inv(rotationtion_vector_cam),(translation_vector-translation_vector_cam))\r\n\r\n x = (position_world[2] + 3260)/1000\r\n y = (-position_world[0] + 440)/1000+0.3\r\n output_dict[i]['position'][-1] = (x,y)\r\n\r\n \r\n # -------------MLP 位置预测 --------------------------------#\r\n if 'car_box' in output_dict[i]:\r\n time_positionpre1=time.time()\r\n mlp_x,mlp_y=position_prediction(mlp_model,output_dict[i]['car_box'])\r\n output_dict[i]['position_mlp'] = [mlp_x, mlp_y]\r\n time_positionpre2=time.time()\r\n\r\n # fusion\r\n position_f = position_fusion(output_dict[i])\r\n output_dict[i]['position_fusion'] = position_f\r\n # ------------angle predicted------------------------------#\r\n if len(output_dict[i]['car_box']) != 0 :\r\n y0,h = int(round(output_dict[i]['car_box'][1])),int(round(output_dict[i]['car_box'][3])) - int(round(output_dict[i]['car_box'][1]))\r\n x0,w = int(round(output_dict[i]['car_box'][0])),int(round(output_dict[i]['car_box'][2])) - int(round(output_dict[i]['car_box'][0]))\r\n robot = frame[y0:y0+h,x0:x0+w]\r\n if np.shape(robot)[0] !=0 and np.shape(robot)[1] !=0:\r\n\r\n pose = output_dict[i]['position_mlp']\r\n angle, predicted = world_angle_6(robot, pose)\r\n output_dict[i]['car_angle'] = angle\r\n time_anglepre2=time.time()\r\n car_class_dict = ['blue-1','blue-2','red-1','red-2','grey-1','grey-1']\r\n try:\r\n if len(output_dict[i]['armor_box']) != 0:\r\n cv2.rectangle(frame_show, (x0, y0), (x0 + w, y0 + h), (0, 0, 255), 1)\r\n text = 'ID: ' + car_class_dict[output_dict[i]['car_class']] + ' Pose: ' + str(round(output_dict[i]['car_angle'], 3))\r\n cv2.putText(frame_show, text, (x0,y0-20), cv2.FONT_HERSHEY_PLAIN, 1, [0,255,0], 1)\r\n\r\n\r\n final_out = output_dict[i]['position_mlp']\r\n frame_show = cv2.putText(frame_show, 'x=%.2f,y=%.2f' % (final_out[0],final_out[1]), (x0,y0),cv2.FONT_HERSHEY_PLAIN, 1, (0, 255, 0), 1)\r\n except:\r\n pass\r\n\r\n t_stop = time.time()\r\n time_stop = time.time()\r\n print('t cost:', t_stop - t_start)\r\n #-----------test log------------------------------#\r\n '''text = 'car_1 x: ' + str(x_r) + ' car_1 y: ' + str(y_r)\r\n cv2.putText(frame_show, text, (50,50), cv2.FONT_HERSHEY_PLAIN, 1, [0,255,0], 1)\r\n text = 'car_2 x: ' + str(x_r1) + ' car_2 y: ' + str(y_r1)\r\n cv2.putText(frame_show, text, (50,70), cv2.FONT_HERSHEY_PLAIN, 1, [0,0,255], 1)'''\r\n img_name = str(frame_id) + '.jpg'\r\n img_p = './fig4/' + img_name\r\n\r\n cv2.imshow('img', frame_show)\r\n cv2.imwrite(img_p,frame_show)\r\n cv2.moveWindow('img', 0, 0) \r\n cv2.waitKey(5)\r\n\r\n else:\r\n print('time cost:', time_stop-time_start)\r\n break\r\n"
] |
[
[
"numpy.int0",
"numpy.sqrt",
"torch.Tensor",
"torch.max",
"numpy.linalg.inv",
"numpy.set_printoptions",
"torch.from_numpy",
"torch.unsqueeze",
"torch.tensor",
"numpy.shape",
"torch.cuda.is_available",
"numpy.transpose",
"numpy.argsort",
"numpy.array"
]
] |
ptklx/segmentation_models.pytorch
|
[
"16c68a7e6bff9644b97f340d67912c4785219818"
] |
[
"my_timm/models/dla.py"
] |
[
"\"\"\" Deep Layer Aggregation and DLA w/ Res2Net\nDLA original adapted from Official Pytorch impl at:\nDLA Paper: `Deep Layer Aggregation` - https://arxiv.org/abs/1707.06484\n\nRes2Net additions from: https://github.com/gasvn/Res2Net/\nRes2Net Paper: `Res2Net: A New Multi-scale Backbone Architecture` - https://arxiv.org/abs/1904.01169\n\"\"\"\nimport math\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nfrom .registry import register_model\nfrom .helpers import load_pretrained\nfrom .layers import SelectAdaptivePool2d\nfrom my_timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD\n\n\n__all__ = ['DLA']\n\n\ndef _cfg(url='', **kwargs):\n return {\n 'url': url,\n 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7),\n 'crop_pct': 0.875, 'interpolation': 'bilinear',\n 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD,\n 'first_conv': 'base_layer.0', 'classifier': 'fc',\n **kwargs\n }\n\n\ndefault_cfgs = {\n 'dla34': _cfg(url='http://dl.yf.io/dla/models/imagenet/dla34-ba72cf86.pth'),\n 'dla46_c': _cfg(url='http://dl.yf.io/dla/models/imagenet/dla46_c-2bfd52c3.pth'),\n 'dla46x_c': _cfg(url='http://dl.yf.io/dla/models/imagenet/dla46x_c-d761bae7.pth'),\n 'dla60x_c': _cfg(url='http://dl.yf.io/dla/models/imagenet/dla60x_c-b870c45c.pth'),\n 'dla60': _cfg(url='http://dl.yf.io/dla/models/imagenet/dla60-24839fc4.pth'),\n 'dla60x': _cfg(url='http://dl.yf.io/dla/models/imagenet/dla60x-d15cacda.pth'),\n 'dla102': _cfg(url='http://dl.yf.io/dla/models/imagenet/dla102-d94d9790.pth'),\n 'dla102x': _cfg(url='http://dl.yf.io/dla/models/imagenet/dla102x-ad62be81.pth'),\n 'dla102x2': _cfg(url='http://dl.yf.io/dla/models/imagenet/dla102x2-262837b6.pth'),\n 'dla169': _cfg(url='http://dl.yf.io/dla/models/imagenet/dla169-0914e092.pth'),\n 'dla60_res2net': _cfg(\n url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-res2net/res2net_dla60_4s-d88db7f9.pth'),\n 'dla60_res2next': _cfg(\n url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-res2net/res2next_dla60_4s-d327927b.pth'),\n}\n\n\nclass DlaBasic(nn.Module):\n \"\"\"DLA Basic\"\"\"\n def __init__(self, inplanes, planes, stride=1, dilation=1, **_):\n super(DlaBasic, self).__init__()\n self.conv1 = nn.Conv2d(\n inplanes, planes, kernel_size=3, stride=stride, padding=dilation, bias=False, dilation=dilation)\n self.bn1 = nn.BatchNorm2d(planes)\n self.relu = nn.ReLU(inplace=True)\n self.conv2 = nn.Conv2d(\n planes, planes, kernel_size=3, stride=1, padding=dilation, bias=False, dilation=dilation)\n self.bn2 = nn.BatchNorm2d(planes)\n self.stride = stride\n\n def forward(self, x, residual=None):\n if residual is None:\n residual = x\n\n out = self.conv1(x)\n out = self.bn1(out)\n out = self.relu(out)\n\n out = self.conv2(out)\n out = self.bn2(out)\n\n out += residual\n out = self.relu(out)\n\n return out\n\n\nclass DlaBottleneck(nn.Module):\n \"\"\"DLA/DLA-X Bottleneck\"\"\"\n expansion = 2\n\n def __init__(self, inplanes, outplanes, stride=1, dilation=1, cardinality=1, base_width=64):\n super(DlaBottleneck, self).__init__()\n self.stride = stride\n mid_planes = int(math.floor(outplanes * (base_width / 64)) * cardinality)\n mid_planes = mid_planes // self.expansion\n\n self.conv1 = nn.Conv2d(inplanes, mid_planes, kernel_size=1, bias=False)\n self.bn1 = nn.BatchNorm2d(mid_planes)\n self.conv2 = nn.Conv2d(\n mid_planes, mid_planes, kernel_size=3, stride=stride, padding=dilation,\n bias=False, dilation=dilation, groups=cardinality)\n self.bn2 = nn.BatchNorm2d(mid_planes)\n self.conv3 = nn.Conv2d(mid_planes, outplanes, kernel_size=1, bias=False)\n self.bn3 = nn.BatchNorm2d(outplanes)\n self.relu = nn.ReLU(inplace=True)\n\n def forward(self, x, residual=None):\n if residual is None:\n residual = x\n\n out = self.conv1(x)\n out = self.bn1(out)\n out = self.relu(out)\n\n out = self.conv2(out)\n out = self.bn2(out)\n out = self.relu(out)\n\n out = self.conv3(out)\n out = self.bn3(out)\n\n out += residual\n out = self.relu(out)\n\n return out\n\n\nclass DlaBottle2neck(nn.Module):\n \"\"\" Res2Net/Res2NeXT DLA Bottleneck\n Adapted from https://github.com/gasvn/Res2Net/blob/master/dla.py\n \"\"\"\n expansion = 2\n\n def __init__(self, inplanes, outplanes, stride=1, dilation=1, scale=4, cardinality=8, base_width=4):\n super(DlaBottle2neck, self).__init__()\n self.is_first = stride > 1\n self.scale = scale\n mid_planes = int(math.floor(outplanes * (base_width / 64)) * cardinality)\n mid_planes = mid_planes // self.expansion\n self.width = mid_planes\n\n self.conv1 = nn.Conv2d(inplanes, mid_planes * scale, kernel_size=1, bias=False)\n self.bn1 = nn.BatchNorm2d(mid_planes * scale)\n\n num_scale_convs = max(1, scale - 1)\n convs = []\n bns = []\n for _ in range(num_scale_convs):\n convs.append(nn.Conv2d(\n mid_planes, mid_planes, kernel_size=3, stride=stride,\n padding=dilation, dilation=dilation, groups=cardinality, bias=False))\n bns.append(nn.BatchNorm2d(mid_planes))\n self.convs = nn.ModuleList(convs)\n self.bns = nn.ModuleList(bns)\n if self.is_first:\n self.pool = nn.AvgPool2d(kernel_size=3, stride=stride, padding=1)\n\n self.conv3 = nn.Conv2d(mid_planes * scale, outplanes, kernel_size=1, bias=False)\n self.bn3 = nn.BatchNorm2d(outplanes)\n self.relu = nn.ReLU(inplace=True)\n\n def forward(self, x, residual=None):\n if residual is None:\n residual = x\n\n out = self.conv1(x)\n out = self.bn1(out)\n out = self.relu(out)\n\n spx = torch.split(out, self.width, 1)\n spo = []\n for i, (conv, bn) in enumerate(zip(self.convs, self.bns)):\n sp = spx[i] if i == 0 or self.is_first else sp + spx[i]\n sp = conv(sp)\n sp = bn(sp)\n sp = self.relu(sp)\n spo.append(sp)\n if self.scale > 1 :\n spo.append(self.pool(spx[-1]) if self.is_first else spx[-1])\n out = torch.cat(spo, 1)\n\n out = self.conv3(out)\n out = self.bn3(out)\n\n out += residual\n out = self.relu(out)\n\n return out\n\n\nclass DlaRoot(nn.Module):\n def __init__(self, in_channels, out_channels, kernel_size, residual):\n super(DlaRoot, self).__init__()\n self.conv = nn.Conv2d(\n in_channels, out_channels, 1, stride=1, bias=False, padding=(kernel_size - 1) // 2)\n self.bn = nn.BatchNorm2d(out_channels)\n self.relu = nn.ReLU(inplace=True)\n self.residual = residual\n\n def forward(self, *x):\n children = x\n x = self.conv(torch.cat(x, 1))\n x = self.bn(x)\n if self.residual:\n x += children[0]\n x = self.relu(x)\n\n return x\n\n\nclass DlaTree(nn.Module):\n def __init__(self, levels, block, in_channels, out_channels, stride=1,\n dilation=1, cardinality=1, base_width=64,\n level_root=False, root_dim=0, root_kernel_size=1, root_residual=False):\n super(DlaTree, self).__init__()\n if root_dim == 0:\n root_dim = 2 * out_channels\n if level_root:\n root_dim += in_channels\n cargs = dict(dilation=dilation, cardinality=cardinality, base_width=base_width)\n if levels == 1:\n self.tree1 = block(in_channels, out_channels, stride, **cargs)\n self.tree2 = block(out_channels, out_channels, 1, **cargs)\n else:\n cargs.update(dict(root_kernel_size=root_kernel_size, root_residual=root_residual))\n self.tree1 = DlaTree(\n levels - 1, block, in_channels, out_channels, stride, root_dim=0, **cargs)\n self.tree2 = DlaTree(\n levels - 1, block, out_channels, out_channels, root_dim=root_dim + out_channels, **cargs)\n if levels == 1:\n self.root = DlaRoot(root_dim, out_channels, root_kernel_size, root_residual)\n self.level_root = level_root\n self.root_dim = root_dim\n self.downsample = nn.MaxPool2d(stride, stride=stride) if stride > 1 else None\n self.project = None\n if in_channels != out_channels:\n self.project = nn.Sequential(\n nn.Conv2d(in_channels, out_channels, kernel_size=1, stride=1, bias=False),\n nn.BatchNorm2d(out_channels)\n )\n self.levels = levels\n\n def forward(self, x, residual=None, children=None):\n children = [] if children is None else children\n bottom = self.downsample(x) if self.downsample else x\n residual = self.project(bottom) if self.project else bottom\n if self.level_root:\n children.append(bottom)\n x1 = self.tree1(x, residual)\n if self.levels == 1:\n x2 = self.tree2(x1)\n x = self.root(x2, x1, *children)\n else:\n children.append(x1)\n x = self.tree2(x1, children=children)\n return x\n\n\nclass DLA(nn.Module):\n def __init__(self, levels, channels, num_classes=1000, in_chans=3, cardinality=1, base_width=64,\n block=DlaBottle2neck, residual_root=False, linear_root=False,\n drop_rate=0.0, global_pool='avg'):\n super(DLA, self).__init__()\n self.channels = channels\n self.num_classes = num_classes\n self.cardinality = cardinality\n self.base_width = base_width\n self.drop_rate = drop_rate\n\n self.base_layer = nn.Sequential(\n nn.Conv2d(in_chans, channels[0], kernel_size=7, stride=1, padding=3, bias=False),\n nn.BatchNorm2d(channels[0]),\n nn.ReLU(inplace=True))\n self.level0 = self._make_conv_level(channels[0], channels[0], levels[0])\n self.level1 = self._make_conv_level(channels[0], channels[1], levels[1], stride=2)\n cargs = dict(cardinality=cardinality, base_width=base_width, root_residual=residual_root)\n self.level2 = DlaTree(levels[2], block, channels[1], channels[2], 2, level_root=False, **cargs)\n self.level3 = DlaTree(levels[3], block, channels[2], channels[3], 2, level_root=True, **cargs)\n self.level4 = DlaTree(levels[4], block, channels[3], channels[4], 2, level_root=True, **cargs)\n self.level5 = DlaTree(levels[5], block, channels[4], channels[5], 2, level_root=True, **cargs)\n\n self.num_features = channels[-1]\n self.global_pool = SelectAdaptivePool2d(pool_type=global_pool)\n self.fc = nn.Conv2d(self.num_features * self.global_pool.feat_mult(), num_classes, 1, bias=True)\n\n for m in self.modules():\n if isinstance(m, nn.Conv2d):\n n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels\n m.weight.data.normal_(0, math.sqrt(2. / n))\n elif isinstance(m, nn.BatchNorm2d):\n m.weight.data.fill_(1)\n m.bias.data.zero_()\n\n def _make_conv_level(self, inplanes, planes, convs, stride=1, dilation=1):\n modules = []\n for i in range(convs):\n modules.extend([\n nn.Conv2d(inplanes, planes, kernel_size=3, stride=stride if i == 0 else 1,\n padding=dilation, bias=False, dilation=dilation),\n nn.BatchNorm2d(planes),\n nn.ReLU(inplace=True)])\n inplanes = planes\n return nn.Sequential(*modules)\n\n def get_classifier(self):\n return self.fc\n\n def reset_classifier(self, num_classes, global_pool='avg'):\n self.num_classes = num_classes\n self.global_pool = SelectAdaptivePool2d(pool_type=global_pool)\n if num_classes:\n self.fc = nn.Conv2d(self.num_features * self.global_pool.feat_mult(), num_classes, 1, bias=True)\n else:\n self.fc = None\n\n def forward_features(self, x):\n x = self.base_layer(x)\n x = self.level0(x)\n x = self.level1(x)\n x = self.level2(x)\n x = self.level3(x)\n x = self.level4(x)\n x = self.level5(x)\n return x\n\n def forward(self, x):\n x = self.forward_features(x)\n x = self.global_pool(x)\n if self.drop_rate > 0.:\n x = F.dropout(x, p=self.drop_rate, training=self.training)\n x = self.fc(x)\n return x.flatten(1)\n\n\n@register_model\ndef dla60_res2net(pretrained=None, num_classes=1000, in_chans=3, **kwargs):\n default_cfg = default_cfgs['dla60_res2net']\n model = DLA(levels=(1, 1, 1, 2, 3, 1), channels=(16, 32, 128, 256, 512, 1024),\n block=DlaBottle2neck, cardinality=1, base_width=28,\n num_classes=num_classes, in_chans=in_chans, **kwargs)\n model.default_cfg = default_cfg\n if pretrained:\n load_pretrained(model, default_cfg, num_classes, in_chans)\n return model\n\n\n@register_model\ndef dla60_res2next(pretrained=None, num_classes=1000, in_chans=3, **kwargs):\n default_cfg = default_cfgs['dla60_res2next']\n model = DLA(levels=(1, 1, 1, 2, 3, 1), channels=(16, 32, 128, 256, 512, 1024),\n block=DlaBottle2neck, cardinality=8, base_width=4,\n num_classes=num_classes, in_chans=in_chans, **kwargs)\n model.default_cfg = default_cfg\n if pretrained:\n load_pretrained(model, default_cfg, num_classes, in_chans)\n return model\n\n\n@register_model\ndef dla34(pretrained=None, num_classes=1000, in_chans=3, **kwargs): # DLA-34\n default_cfg = default_cfgs['dla34']\n model = DLA([1, 1, 1, 2, 2, 1], [16, 32, 64, 128, 256, 512], block=DlaBasic, **kwargs)\n model.default_cfg = default_cfg\n if pretrained:\n load_pretrained(model, default_cfg, num_classes, in_chans)\n return model\n\n\n@register_model\ndef dla46_c(pretrained=None, num_classes=1000, in_chans=3, **kwargs): # DLA-46-C\n default_cfg = default_cfgs['dla46_c']\n model = DLA(levels=[1, 1, 1, 2, 2, 1], channels=[16, 32, 64, 64, 128, 256],\n block=DlaBottleneck, num_classes=num_classes, in_chans=in_chans, **kwargs)\n model.default_cfg = default_cfg\n if pretrained:\n load_pretrained(model, default_cfg, num_classes, in_chans)\n return model\n\n\n@register_model\ndef dla46x_c(pretrained=None, num_classes=1000, in_chans=3, **kwargs): # DLA-X-46-C\n default_cfg = default_cfgs['dla46x_c']\n model = DLA(levels=[1, 1, 1, 2, 2, 1], channels=[16, 32, 64, 64, 128, 256],\n block=DlaBottleneck, cardinality=32, base_width=4,\n num_classes=num_classes, in_chans=in_chans, **kwargs)\n model.default_cfg = default_cfg\n if pretrained:\n load_pretrained(model, default_cfg, num_classes, in_chans)\n return model\n\n\n@register_model\ndef dla60x_c(pretrained=None, num_classes=1000, in_chans=3, **kwargs): # DLA-X-60-C\n default_cfg = default_cfgs['dla60x_c']\n model = DLA([1, 1, 1, 2, 3, 1], [16, 32, 64, 64, 128, 256],\n block=DlaBottleneck, cardinality=32, base_width=4,\n num_classes=num_classes, in_chans=in_chans, **kwargs)\n model.default_cfg = default_cfg\n if pretrained:\n load_pretrained(model, default_cfg, num_classes, in_chans)\n return model\n\n\n@register_model\ndef dla60(pretrained=None, num_classes=1000, in_chans=3, **kwargs): # DLA-60\n default_cfg = default_cfgs['dla60']\n model = DLA([1, 1, 1, 2, 3, 1], [16, 32, 128, 256, 512, 1024],\n block=DlaBottleneck, num_classes=num_classes, in_chans=in_chans, **kwargs)\n model.default_cfg = default_cfg\n if pretrained:\n load_pretrained(model, default_cfg, num_classes, in_chans)\n return model\n\n\n@register_model\ndef dla60x(pretrained=None, num_classes=1000, in_chans=3, **kwargs): # DLA-X-60\n default_cfg = default_cfgs['dla60x']\n model = DLA([1, 1, 1, 2, 3, 1], [16, 32, 128, 256, 512, 1024],\n block=DlaBottleneck, cardinality=32, base_width=4,\n num_classes=num_classes, in_chans=in_chans, **kwargs)\n model.default_cfg = default_cfg\n if pretrained:\n load_pretrained(model, default_cfg, num_classes, in_chans)\n return model\n\n\n@register_model\ndef dla102(pretrained=None, num_classes=1000, in_chans=3, **kwargs): # DLA-102\n default_cfg = default_cfgs['dla102']\n model = DLA([1, 1, 1, 3, 4, 1], [16, 32, 128, 256, 512, 1024],\n block=DlaBottleneck, residual_root=True,\n num_classes=num_classes, in_chans=in_chans, **kwargs)\n model.default_cfg = default_cfg\n if pretrained:\n load_pretrained(model, default_cfg, num_classes, in_chans)\n return model\n\n\n@register_model\ndef dla102x(pretrained=None, num_classes=1000, in_chans=3, **kwargs): # DLA-X-102\n default_cfg = default_cfgs['dla102x']\n model = DLA([1, 1, 1, 3, 4, 1], [16, 32, 128, 256, 512, 1024],\n block=DlaBottleneck, cardinality=32, base_width=4, residual_root=True,\n num_classes=num_classes, in_chans=in_chans, **kwargs)\n model.default_cfg = default_cfg\n if pretrained:\n load_pretrained(model, default_cfg, num_classes, in_chans)\n return model\n\n\n@register_model\ndef dla102x2(pretrained=None, num_classes=1000, in_chans=3, **kwargs): # DLA-X-102 64\n default_cfg = default_cfgs['dla102x2']\n model = DLA([1, 1, 1, 3, 4, 1], [16, 32, 128, 256, 512, 1024],\n block=DlaBottleneck, cardinality=64, base_width=4, residual_root=True,\n num_classes=num_classes, in_chans=in_chans, **kwargs)\n model.default_cfg = default_cfg\n if pretrained:\n load_pretrained(model, default_cfg, num_classes, in_chans)\n return model\n\n\n@register_model\ndef dla169(pretrained=None, num_classes=1000, in_chans=3, **kwargs): # DLA-169\n default_cfg = default_cfgs['dla169']\n model = DLA([1, 1, 2, 3, 5, 1], [16, 32, 128, 256, 512, 1024],\n block=DlaBottleneck, residual_root=True,\n num_classes=num_classes, in_chans=in_chans, **kwargs)\n model.default_cfg = default_cfg\n if pretrained:\n load_pretrained(model, default_cfg, num_classes, in_chans)\n return model\n"
] |
[
[
"torch.nn.Sequential",
"torch.cat",
"torch.nn.functional.dropout",
"torch.nn.ModuleList",
"torch.nn.Conv2d",
"torch.nn.MaxPool2d",
"torch.nn.AvgPool2d",
"torch.nn.BatchNorm2d",
"torch.split",
"torch.nn.ReLU"
]
] |
mlberkeley/multiarchy
|
[
"18ceae308efe67ad3e575a3e76a784af036b25a6",
"18ceae308efe67ad3e575a3e76a784af036b25a6"
] |
[
"multiarchy/algorithms/ddpg.py",
"multiarchy/distributions/gaussian.py"
] |
[
"\"\"\"Author: Brandon Trabucco, Copyright 2019, MIT License\"\"\"\n\n\nfrom multiarchy.algorithms.algorithm import Algorithm\nimport tensorflow as tf\n\n\nclass DDPG(Algorithm):\n\n def __init__(\n self,\n policy,\n target_policy,\n qf,\n target_qf,\n replay_buffer,\n reward_scale=1.0,\n discount=0.99,\n observation_key=\"observation\",\n batch_size=32,\n update_every=1,\n update_after=0,\n logger=None,\n logging_prefix=\"ddpg/\"\n ):\n # train a policy using the deep deterministic policy gradient\n Algorithm.__init__(\n self,\n replay_buffer,\n batch_size=batch_size,\n update_every=update_every,\n update_after=update_after,\n logger=logger,\n logging_prefix=logging_prefix)\n\n # each neural network is probabilistic\n self.policy = policy\n self.target_policy = target_policy\n self.qf = qf\n self.target_qf = target_qf\n\n # select into the observation dictionary\n self.observation_key = observation_key\n\n # control some parameters that are important for ddpg\n self.reward_scale = reward_scale\n self.discount = discount\n\n def update_algorithm(\n self,\n observations,\n actions,\n rewards,\n next_observations,\n terminals\n ):\n # select from the observation dictionary\n observations = observations[self.observation_key]\n next_observations = next_observations[self.observation_key]\n\n # build a tape to collect gradients from the policy and critics\n with tf.GradientTape(persistent=True) as tape:\n mean_actions, log_pi = self.policy.expected_value(observations)\n next_mean_actions, next_log_pi = self.target_policy.expected_value(\n next_observations)\n\n # build the q function target value\n inputs = tf.concat([next_observations, next_mean_actions], -1)\n target_qf_value = self.target_qf(inputs)[..., 0]\n self.record(\"target_qf_value\", tf.reduce_mean(target_qf_value).numpy())\n qf_targets = tf.stop_gradient(\n self.reward_scale * rewards + terminals * self.discount * (\n target_qf_value))\n self.record(\"qf_targets\", tf.reduce_mean(qf_targets).numpy())\n\n # build the q function loss\n inputs = tf.concat([observations, actions], -1)\n qf_value = self.qf(inputs)[..., 0]\n self.record(\"qf_value\", tf.reduce_mean(qf_value).numpy())\n qf_loss = tf.reduce_mean(tf.keras.losses.logcosh(qf_targets, qf_value))\n self.record(\"qf_loss\", qf_loss.numpy())\n\n # build the policy loss\n inputs = tf.concat([observations, mean_actions], -1)\n policy_qf_value = self.qf(inputs)[..., 0]\n self.record(\"policy_qf_value\", tf.reduce_mean(policy_qf_value).numpy())\n policy_loss = -tf.reduce_mean(policy_qf_value)\n self.record(\"policy_loss\", policy_loss.numpy())\n\n # back prop gradients\n self.policy.apply_gradients(\n self.policy.compute_gradients(policy_loss, tape))\n self.qf.apply_gradients(\n self.qf.compute_gradients(qf_loss, tape))\n\n # soft update target parameters\n self.target_policy.soft_update(self.policy.get_weights())\n self.target_qf.soft_update(self.qf.get_weights())\n",
"\"\"\"Author: Brandon Trabucco, Copyright 2019, MIT License\"\"\"\n\n\nfrom multiarchy.distributions.distribution import Distribution\nimport tensorflow as tf\nimport math\n\n\nclass Gaussian(Distribution):\n\n def __init__(\n self,\n model,\n std=1.0,\n tau=0.01,\n optimizer_class=tf.keras.optimizers.Adam,\n optimizer_kwargs=None,\n ):\n # create a gaussian distribution with fixed or learned standard deviation\n Distribution.__init__(\n self,\n model,\n tau=tau,\n optimizer_class=optimizer_class,\n optimizer_kwargs=optimizer_kwargs)\n self.std = std\n\n def __getstate__(\n self\n ):\n # handle pickle actions so the agent can be sent between threads\n state = Distribution.__getstate__(self)\n return dict(std=self.std, **state)\n\n def __setstate__(\n self,\n state\n ):\n # handle pickle actions so the agent can be sent between threads\n Distribution.__setstate__(self, state)\n self.std = state[\"std\"]\n\n def get_parameters(\n self,\n *inputs\n ):\n # get the mean and the log standard deviation of the distribution\n x = self.model(tf.concat(inputs, (-1)))\n if self.std is None:\n return tf.split(x, 2, axis=(-1))\n else:\n return x, tf.math.log(tf.fill(tf.shape(x), self.std))\n\n def sample(\n self,\n *inputs\n ):\n # get the mean and the log standard deviation of the distribution\n mean, log_std = self.get_parameters(*inputs)\n std = tf.math.exp(log_std)\n\n # re parameterized sample from the distribution\n gaussian_samples = mean + tf.random.normal(tf.shape(mean)) * std\n\n # compute the log probability density of the samples\n return gaussian_samples, tf.reduce_sum(\n - 0.5 * ((gaussian_samples - mean) / std) ** 2\n - log_std\n - 0.5 * tf.math.log(2 * math.pi), axis=(-1))\n\n def expected_value(\n self,\n *inputs\n ):\n # get the mean and the log standard deviation of the distribution\n mean, log_std = self.get_parameters(*inputs)\n\n # compute the log probability density of the mean\n return mean, tf.reduce_sum(\n - log_std\n - 0.5 * tf.math.log(2 * math.pi), axis=(-1))\n\n def log_prob(\n self,\n gaussian_samples,\n *inputs\n ):\n # get the mean and the log standard deviation of the distribution\n mean, log_std = self.get_parameters(*inputs)\n std = tf.math.exp(log_std)\n\n # compute the log probability density of the samples\n return tf.reduce_sum(\n - 0.5 * ((gaussian_samples - mean) / std) ** 2\n - log_std\n - 0.5 * tf.math.log(2 * math.pi), axis=(-1))\n"
] |
[
[
"tensorflow.concat",
"tensorflow.reduce_mean",
"tensorflow.keras.losses.logcosh",
"tensorflow.stop_gradient",
"tensorflow.GradientTape"
],
[
"tensorflow.concat",
"tensorflow.shape",
"tensorflow.math.log",
"tensorflow.math.exp",
"tensorflow.split"
]
] |
scrime-u-bordeaux/dereverberation-ml
|
[
"ba335d400bf2235afabd4151fdae4906c0cd87a8"
] |
[
"src/datagen/utils.py"
] |
[
"# -*- coding: utf-8 -*-\n\n\"\"\"Set and get audio properties\"\"\"\n\nimport src.utils.path as pth\nimport src.utils.logger as log\n\nimport numpy as np\nimport fleep\n\n\n## Useful to avoid picking non audio files ##\n\"\"\"\nfrom mimetypes import guess_type\n\ndef __is_audio_file(fpath):\n \\\"\"\"Return True if <fpath> is an audio file.\n Otherwise return False.\n \\\"\"\"\n if not pth.__is_file(fpath):\n return False\n\n info = guess_type(fpath)[0]\n \n return info.split('/')[0] == 'audio'\n\"\"\"\ndef __is_audio_file(fpath):\n \"\"\"Return True if <fpath> is an audio file.\n Otherwise return False.\n \"\"\"\n if not pth.__is_file(fpath):\n return False\n\n f = pth.__open_file(fpath, _mode='rb')\n info = fleep.get(f.read(128))\n pth.__close_file(f)\n \n return info.type_matches('audio')\n\ndef __list_audio_files(path, recursively=True):\n \"\"\"Return a list of audio files at <path>.\n If <recursively> is set to True, look for files recursively in-depth.\n \"\"\"\n return list(filter(__is_audio_file, pth.__list_files(path, recursively)))\n\n## Various functions based on audio properties ##\n \ndef __mono(audio_segment):\n \"\"\"Return a mono version of <audio_segment>.\"\"\"\n return audio_segment.set_channels(1)\n\ndef __with_sample_rate(audio_segment, sample_rate):\n \"\"\"Return a version of <audio_segment> with updated sample rate.\"\"\"\n return audio_segment.set_frame_rate(sample_rate)\n\ndef __with_bit_depth(audio_segment, bit_depth):\n \"\"\"Return a version of <audio_segment> with updated bit depth.\"\"\"\n if bit_depth % 8 != 0:\n log.error(\"Bit depth should be a multiple of 8, used value here is {0}\".format(bit_depth))\n \n return audio_segment.set_sample_width(bit_depth // 8)\n\ndef __convert(audio_segment, _type=None):\n \"\"\"Convert <audio_segment> into numpy array with dtype <_type>.\n If <_type> is None, by default 'float64' dtype is used.\n \"\"\"\n return np.array(audio_segment.get_array_of_samples(), dtype=_type)\n\ndef __normalize(npy_array):\n \"\"\"Normalize <npy_array> by its maximum in absolute value.\n If maximum is null, return the same array.\n \"\"\"\n M = max(abs(npy_array))\n if M:\n return npy_array / M\n \n return npy_array\n\ndef __float2pcm(npy_array, _type='int16'):\n \"\"\"Convert <npy_array> from float to pcm.\n Default conversion type is 'int16'.\n \"\"\"\n info = np.iinfo(_type)\n amp = 2**(info.bits - 1)\n offset = info.min + amp\n \n npy_array = npy_array * amp + offset\n\n return npy_array.clip(info.min, info.max).astype(_type)\n\ndef __pcm2float(npy_array, _type='float64'):\n \"\"\"Convert <npy_array> from pcm to float.\n Default conversion type is 'float64'.\n \"\"\"\n if npy_array.dtype.kind != 'i':\n log.error(\"\\'__pcm2float\\' takes an array of integers, forcing conversion to int16\")\n npy_array = npy_array.astype('int16')\n \n info = np.iinfo(npy_array.dtype)\n amp = 2**(info.bits - 1)\n offset = info.min + amp\n \n npy_array = (npy_array - offset) / amp\n\n return npy_array.clip(-1., 1.).astype(_type)\n"
] |
[
[
"numpy.iinfo"
]
] |
fatiando/v0.1
|
[
"1ab9876b247c67834b8e1c874d5b1d86f82802e2"
] |
[
"_static/cookbook/seismic_srtomo_sparse.py"
] |
[
"\"\"\"\nSeismic: 2D straight-ray tomography of large data sets and models using\nsparse matrices\n\nUses synthetic data and a model generated from an image file.\n\nSince the image is big, use sparse matrices and a steepest descent solver\n(it doesn't require Hessians).\n\nWARNING: may take a long time to calculate.\n\n\"\"\"\nimport urllib\nimport time\nfrom os import path\nimport numpy\nfrom fatiando import logger, mesher, utils, seismic, vis, inversion\n\nlog = logger.get()\nlog.info(logger.header())\nlog.info(__doc__)\n\narea = (0, 100000, 0, 100000)\nshape = (100, 100)\nmodel = mesher.SquareMesh(area, shape)\n# Fetch the image from the online docs\nurllib.urlretrieve(\n 'http://fatiando.readthedocs.org/en/latest/_static/logo.png', 'logo.png')\nmodel.img2prop('logo.png', 4000, 10000, 'vp')\n\n# Make some travel time data and add noise\nlog.info(\"Generating synthetic travel-time data\")\nsrc_loc = utils.random_points(area, 200)\nrec_loc = utils.circular_points(area, 80, random=True)\nsrcs, recs = utils.connect_points(src_loc, rec_loc)\nstart = time.time()\nttimes = seismic.ttime2d.straight(model, 'vp', srcs, recs, par=True)\nlog.info(\" time: %s\" % (utils.sec2hms(time.time() - start)))\nttimes, error = utils.contaminate(ttimes, 0.01, percent=True,\n return_stddev=True)\n# Make the mesh\nmesh = mesher.SquareMesh(area, shape)\n# Since the matrices are big, use the Steepest Descent solver to avoid dealing\n# with Hessian matrices. It needs a starting guess, so start with 1000\ninversion.gradient.use_sparse()\nsolver = inversion.gradient.steepest(1000*numpy.ones(mesh.size))\n# and run the inversion\nestimate, residuals = seismic.srtomo.run(ttimes, srcs, recs, mesh, sparse=True,\n solver=solver, smooth=0.01)\n# Convert the slowness estimate to velocities and add it the mesh\nmesh.addprop('vp', seismic.srtomo.slowness2vel(estimate))\n\n# Calculate and print the standard deviation of the residuals\n# it should be close to the data error if the inversion was able to fit the data\nlog.info(\"Assumed error: %f\" % (error))\nlog.info(\"Standard deviation of residuals: %f\" % (numpy.std(residuals)))\n\nvis.mpl.figure(figsize=(14, 5))\nvis.mpl.subplot(1, 2, 1)\nvis.mpl.axis('scaled')\nvis.mpl.title('Vp synthetic model of the Earth')\nvis.mpl.squaremesh(model, prop='vp', vmin=4000, vmax=10000,\n cmap=vis.mpl.cm.seismic)\ncb = vis.mpl.colorbar()\ncb.set_label('Velocity')\nvis.mpl.points(src_loc, '*y', label=\"Sources\")\nvis.mpl.points(rec_loc, '^r', label=\"Receivers\")\nvis.mpl.legend(loc='lower left', shadow=True, numpoints=1, prop={'size':10})\nvis.mpl.subplot(1, 2, 2)\nvis.mpl.axis('scaled')\nvis.mpl.title('Tomography result')\nvis.mpl.squaremesh(mesh, prop='vp', vmin=4000, vmax=10000,\n cmap=vis.mpl.cm.seismic)\ncb = vis.mpl.colorbar()\ncb.set_label('Velocity')\nvis.mpl.figure()\nvis.mpl.grid()\nvis.mpl.title('Residuals (data with %.4f s error)' % (error))\nvis.mpl.hist(residuals, color='gray', bins=15)\nvis.mpl.xlabel(\"seconds\")\nvis.mpl.show()\nvis.mpl.show()\n"
] |
[
[
"numpy.std",
"numpy.ones"
]
] |
varun-jois/KAIR
|
[
"90c04671c6eb32a6765edfec94f7db3ba1f53f1e"
] |
[
"main_test_bsrgan.py"
] |
[
"import os.path\nimport logging\nimport torch\n\nfrom utils import utils_logger\nfrom utils import utils_image as util\n# from utils import utils_model\nfrom models.network_rrdbnet import RRDBNet as net\n\n\n\"\"\"\nSpyder (Python 3.6-3.7)\nPyTorch 1.4.0-1.8.1\nWindows 10 or Linux\nKai Zhang (cskaizhang@gmail.com)\ngithub: https://github.com/cszn/BSRGAN\n https://github.com/cszn/KAIR\nIf you have any question, please feel free to contact with me.\nKai Zhang (e-mail: cskaizhang@gmail.com)\nby Kai Zhang ( March/2020 --> March/2021 --> )\nThis work was previously submitted to CVPR2021.\n\n# --------------------------------------------\n@inproceedings{zhang2021designing,\n title={Designing a Practical Degradation Model for Deep Blind Image Super-Resolution},\n author={Zhang, Kai and Liang, Jingyun and Van Gool, Luc and Timofte, Radu},\n booktitle={arxiv},\n year={2021}\n}\n# --------------------------------------------\n\n\"\"\"\n\n\ndef main():\n\n utils_logger.logger_info('blind_sr_log', log_path='blind_sr_log.log')\n logger = logging.getLogger('blind_sr_log')\n\n# print(torch.__version__) # pytorch version\n# print(torch.version.cuda) # cuda version\n# print(torch.backends.cudnn.version()) # cudnn version\n\n testsets = 'testsets' # fixed, set path of testsets\n testset_Ls = ['RealSRSet'] # ['RealSRSet','DPED']\n\n model_names = ['RRDB','ESRGAN','FSSR_DPED','FSSR_JPEG','RealSR_DPED','RealSR_JPEG']\n model_names = ['BSRGAN'] # 'BSRGANx2' for scale factor 2\n\n\n\n save_results = True\n sf = 4\n device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n\n for model_name in model_names:\n if model_name in ['BSRGANx2']:\n sf = 2\n model_path = os.path.join('model_zoo', model_name+'.pth') # set model path\n logger.info('{:>16s} : {:s}'.format('Model Name', model_name))\n\n # torch.cuda.set_device(0) # set GPU ID\n logger.info('{:>16s} : {:<d}'.format('GPU ID', torch.cuda.current_device()))\n torch.cuda.empty_cache()\n\n # --------------------------------\n # define network and load model\n # --------------------------------\n model = net(in_nc=3, out_nc=3, nf=64, nb=23, gc=32, sf=sf) # define network\n\n# model_old = torch.load(model_path)\n# state_dict = model.state_dict()\n# for ((key, param),(key2, param2)) in zip(model_old.items(), state_dict.items()):\n# state_dict[key2] = param\n# model.load_state_dict(state_dict, strict=True)\n\n model.load_state_dict(torch.load(model_path), strict=True)\n model.eval()\n for k, v in model.named_parameters():\n v.requires_grad = False\n model = model.to(device)\n torch.cuda.empty_cache()\n\n for testset_L in testset_Ls:\n\n L_path = os.path.join(testsets, testset_L)\n #E_path = os.path.join(testsets, testset_L+'_'+model_name)\n E_path = os.path.join(testsets, testset_L+'_results_x'+str(sf))\n util.mkdir(E_path)\n\n logger.info('{:>16s} : {:s}'.format('Input Path', L_path))\n logger.info('{:>16s} : {:s}'.format('Output Path', E_path))\n idx = 0\n\n for img in util.get_image_paths(L_path):\n\n # --------------------------------\n # (1) img_L\n # --------------------------------\n idx += 1\n img_name, ext = os.path.splitext(os.path.basename(img))\n logger.info('{:->4d} --> {:<s} --> x{:<d}--> {:<s}'.format(idx, model_name, sf, img_name+ext))\n\n img_L = util.imread_uint(img, n_channels=3)\n img_L = util.uint2tensor4(img_L)\n img_L = img_L.to(device)\n\n # --------------------------------\n # (2) inference\n # --------------------------------\n img_E = model(img_L)\n\n # --------------------------------\n # (3) img_E\n # --------------------------------\n img_E = util.tensor2uint(img_E)\n if save_results:\n util.imsave(img_E, os.path.join(E_path, img_name+'_'+model_name+'.png'))\n\n\nif __name__ == '__main__':\n\n main()\n"
] |
[
[
"torch.cuda.current_device",
"torch.cuda.empty_cache",
"torch.cuda.is_available",
"torch.load"
]
] |
eridgd/texar
|
[
"9c699e8143fd8ecb5d65a41ceef09c45832b9258"
] |
[
"examples/transformer/bleu_tool.py"
] |
[
"# Copyright 2018 The Tensor2Tensor Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n# Modifications copyright (C) 2018 Texar\n# ==============================================================================\n\"\"\"BLEU metric utililities used for MT eval.\n\nUsage: python bleu_tool.py --translation=my-wmt13.de --reference=wmt13_deen.de\n\"\"\"\n# This also:\n# Put compounds in ATAT format (comparable to papers like GNMT, ConvS2S).\n# See https://nlp.stanford.edu/projects/nmt/ :\n# 'Also, for historical reasons, we split compound words, e.g.,\n# \"rich-text format\" --> rich ##AT##-##AT## text format.\"'\n# BLEU score will be similar to the one obtained using: mteval-v14.pl\n# Note:compound splitting is not implemented in this module\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\n\nfrom argparse import ArgumentParser\nfrom io import open\nimport collections\nimport math\nimport re\nimport sys\nimport unicodedata\n\n# Dependency imports\n\nimport numpy as np\nimport six\n# pylint: disable=redefined-builtin\nfrom six.moves import xrange\nfrom six.moves import zip\n\n\n# pylint: enable=redefined-builtin\n\n\ndef _get_ngrams(segment, max_order):\n \"\"\"Extracts all n-grams upto a given maximum order from an input segment.\n\n Args:\n segment: text segment from which n-grams will be extracted.\n max_order: maximum length in tokens of the n-grams returned by this\n methods.\n\n Returns:\n The Counter containing all n-grams upto max_order in segment\n with a count of how many times each n-gram occurred.\n \"\"\"\n ngram_counts = collections.Counter()\n for order in xrange(1, max_order + 1):\n for i in xrange(0, len(segment) - order + 1):\n ngram = tuple(segment[i:i + order])\n ngram_counts[ngram] += 1\n return ngram_counts\n\n\ndef compute_bleu(reference_corpus,\n translation_corpus,\n max_order=4,\n use_bp=True):\n \"\"\"Computes BLEU score of translated segments against references.\n\n Args:\n reference_corpus: list of references for each translation. Each\n reference should be tokenized into a list of tokens.\n translation_corpus: list of translations to score. Each translation\n should be tokenized into a list of tokens.\n max_order: Maximum n-gram order to use when computing BLEU score.\n use_bp: boolean, whether to apply brevity penalty.\n Returns:\n BLEU score.\n \"\"\"\n\n reference_length = 0\n translation_length = 0\n bp = 1.0\n geo_mean = 0\n\n matches_by_order = [0] * max_order\n possible_matches_by_order = [0] * max_order\n precisions = []\n\n for (references, translations) in zip(reference_corpus, translation_corpus):\n reference_length += len(references)\n translation_length += len(translations)\n ref_ngram_counts = _get_ngrams(references, max_order)\n translation_ngram_counts = _get_ngrams(translations, max_order)\n\n overlap = dict((ngram,\n min(count, translation_ngram_counts[ngram]))\n for ngram, count in ref_ngram_counts.items())\n\n for ngram in overlap:\n matches_by_order[len(ngram) - 1] += overlap[ngram]\n for ngram in translation_ngram_counts:\n possible_matches_by_order[len(ngram) - 1] += \\\n translation_ngram_counts[ngram]\n precisions = [0] * max_order\n smooth = 1.0\n for i in xrange(0, max_order):\n if possible_matches_by_order[i] > 0:\n precisions[i] = matches_by_order[i] / possible_matches_by_order[i]\n if matches_by_order[i] > 0:\n precisions[i] = matches_by_order[i] / \\\n possible_matches_by_order[i]\n else:\n smooth *= 2\n precisions[i] = 1.0 / (smooth * possible_matches_by_order[i])\n else:\n precisions[i] = 0.0\n\n if max(precisions) > 0:\n p_log_sum = sum(math.log(p) for p in precisions if p)\n geo_mean = math.exp(p_log_sum / max_order)\n\n if use_bp:\n ratio = translation_length / reference_length\n bp = math.exp(1 - 1. / ratio) if ratio < 1.0 else 1.0\n bleu = geo_mean * bp\n return np.float32(bleu)\n\n\nclass UnicodeRegex(object):\n \"\"\"Ad-hoc hack to recognize all punctuation and symbols.\"\"\"\n # pylint:disable=too-few-public-methods\n def __init__(self):\n punctuation = self.property_chars(\"P\")\n self.nondigit_punct_re = re.compile(r\"([^\\d])([\" + punctuation + r\"])\")\n self.punct_nondigit_re = re.compile(r\"([\" + punctuation + r\"])([^\\d])\")\n self.symbol_re = re.compile(\"([\" + self.property_chars(\"S\") + \"])\")\n\n def property_chars(self, prefix):\n #pylint:disable=no-self-use\n return \"\".join(six.unichr(x) for x in range(sys.maxunicode) \\\n if unicodedata.category(six.unichr(x)).startswith(prefix))\n\n\nuregex = UnicodeRegex()\n\n\ndef bleu_tokenize(string):\n r\"\"\"Tokenize a string following the official BLEU implementation.\n\n See https://github.com/moses-smt/mosesdecoder/\"\n \"blob/master/scripts/generic/mteval-v14.pl#L954-L983\n In our case, the input string is expected to be just one line\n and no HTML entities de-escaping is needed.\n So we just tokenize on punctuation and symbols,\n except when a punctuation is preceded and followed by a digit\n (e.g. a comma/dot as a thousand/decimal separator).\n\n Note that a numer (e.g. a year) followed by a dot at the end of sentence\n is NOT tokenized,\n i.e. the dot stays with the number because `s/(\\p{P})(\\P{N})/ $1 $2/g`\n does not match this case (unless we add a space after each sentence).\n However, this error is already in the original mteval-v14.pl\n and we want to be consistent with it.\n\n Args:\n string: the input string\n\n Returns:\n a list of tokens\n \"\"\"\n string = uregex.nondigit_punct_re.sub(r\"\\1 \\2 \", string)\n string = uregex.punct_nondigit_re.sub(r\" \\1 \\2\", string)\n string = uregex.symbol_re.sub(r\" \\1 \", string)\n return string.split()\n\n\ndef bleu_wrapper(ref_filename, hyp_filename, case_sensitive=False):\n \"\"\"Compute BLEU for two files (reference and hypothesis translation).\"\"\"\n ref_lines = open(ref_filename, encoding='utf-8').read().splitlines()\n hyp_lines = open(hyp_filename, encoding='utf-8').read().splitlines()\n assert len(ref_lines) == len(hyp_lines)\n if not case_sensitive:\n ref_lines = [x.lower() for x in ref_lines]\n hyp_lines = [x.lower() for x in hyp_lines]\n ref_tokens = [bleu_tokenize(x) for x in ref_lines]\n hyp_tokens = [bleu_tokenize(x) for x in hyp_lines]\n return compute_bleu(ref_tokens, hyp_tokens)\n\n\nif __name__ == \"__main__\":\n parser = ArgumentParser(description='Compute BLEU score. \\\n Usage: t2t-bleu --translation=my-wmt13.de --reference=wmt13_deen.de')\n\n parser.add_argument('--translation', type=str)\n parser.add_argument('--reference', type=str)\n args = parser.parse_args()\n\n bleu = 100 * bleu_wrapper(args.reference,\n args.translation,\n case_sensitive=False)\n print(\"BLEU_uncased = %6.2f\" % bleu)\n bleu = 100 * bleu_wrapper(args.reference,\n args.translation,\n case_sensitive=True)\n print(\"BLEU_cased = %6.2f\" % bleu)\n"
] |
[
[
"numpy.float32"
]
] |
shelizi/GPT2-Chinese
|
[
"8d4989e058453caf06e2a6ef5173b258d7fc3336"
] |
[
"generate.py"
] |
[
"import torch\nimport torch.nn.functional as F\nimport os\nimport argparse\nfrom tqdm import trange\nfrom transformers import GPT2LMHeadModel\nimport transformers\n\ndef is_word(word):\n for item in list(word):\n if item not in 'qwertyuiopasdfghjklzxcvbnm':\n return False\n return True\n\n\ndef _is_chinese_char(char):\n \"\"\"Checks whether CP is the codepoint of a CJK character.\"\"\"\n # This defines a \"chinese character\" as anything in the CJK Unicode block:\n # https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)\n #\n # Note that the CJK Unicode block is NOT all Japanese and Korean characters,\n # despite its name. The modern Korean Hangul alphabet is a different block,\n # as is Japanese Hiragana and Katakana. Those alphabets are used to write\n # space-separated words, so they are not treated specially and handled\n # like the all of the other languages.\n cp = ord(char)\n if ((cp >= 0x4E00 and cp <= 0x9FFF) or #\n (cp >= 0x3400 and cp <= 0x4DBF) or #\n (cp >= 0x20000 and cp <= 0x2A6DF) or #\n (cp >= 0x2A700 and cp <= 0x2B73F) or #\n (cp >= 0x2B740 and cp <= 0x2B81F) or #\n (cp >= 0x2B820 and cp <= 0x2CEAF) or\n (cp >= 0xF900 and cp <= 0xFAFF) or #\n (cp >= 0x2F800 and cp <= 0x2FA1F)): #\n return True\n\n return False\n\n\ndef top_k_top_p_filtering(logits, top_k=0, top_p=0.0, filter_value=-float('Inf')):\n \"\"\" Filter a distribution of logits using top-k and/or nucleus (top-p) filtering\n Args:\n logits: logits distribution shape (vocabulary size)\n top_k > 0: keep only top k tokens with highest probability (top-k filtering).\n top_p > 0.0: keep the top tokens with cumulative probability >= top_p (nucleus filtering).\n Nucleus filtering is described in Holtzman et al. (http://arxiv.org/abs/1904.09751)\n From: https://gist.github.com/thomwolf/1a5a29f6962089e871b94cbd09daf317\n \"\"\"\n assert logits.dim() == 1 # batch size 1 for now - could be updated for more but the code would be less clear\n top_k = min(top_k, logits.size(-1)) # Safety check\n if top_k > 0:\n # Remove all tokens with a probability less than the last token of the top-k\n indices_to_remove = logits < torch.topk(logits, top_k)[0][..., -1, None]\n logits[indices_to_remove] = filter_value\n\n if top_p > 0.0:\n sorted_logits, sorted_indices = torch.sort(logits, descending=True)\n cumulative_probs = torch.cumsum(F.softmax(sorted_logits, dim=-1), dim=-1)\n\n # Remove tokens with cumulative probability above the threshold\n sorted_indices_to_remove = cumulative_probs > top_p\n # Shift the indices to the right to keep also the first token above the threshold\n sorted_indices_to_remove[..., 1:] = sorted_indices_to_remove[..., :-1].clone()\n sorted_indices_to_remove[..., 0] = 0\n\n indices_to_remove = sorted_indices[sorted_indices_to_remove]\n logits[indices_to_remove] = filter_value\n return logits\n\n\ndef sample_sequence(model, context, length, n_ctx, tokenizer, temperature=1.0, top_k=30, top_p=0.0, repitition_penalty=1.0,\n device='cpu'):\n context = torch.tensor(context, dtype=torch.long, device=device)\n context = context.unsqueeze(0)\n generated = context\n with torch.no_grad():\n for _ in trange(length):\n inputs = {'input_ids': generated[0][-(n_ctx - 1):].unsqueeze(0)}\n outputs = model(\n **inputs) # Note: we could also use 'past' with GPT-2/Transfo-XL/XLNet (cached hidden-states)\n next_token_logits = outputs[0][0, -1, :]\n for id in set(generated):\n next_token_logits[id] /= repitition_penalty\n next_token_logits = next_token_logits / temperature\n next_token_logits[tokenizer.convert_tokens_to_ids('[UNK]')] = -float('Inf')\n filtered_logits = top_k_top_p_filtering(next_token_logits, top_k=top_k, top_p=top_p)\n next_token = torch.multinomial(F.softmax(filtered_logits, dim=-1), num_samples=1)\n generated = torch.cat((generated, next_token.unsqueeze(0)), dim=1)\n return generated.tolist()[0]\n\n\ndef fast_sample_sequence(model, context, length, temperature=1.0, top_k=30, top_p=0.0, device='cpu'):\n inputs = torch.LongTensor(context).view(1, -1).to(device)\n if len(context) > 1:\n _, past = model(inputs[:, :-1], None)[:2]\n prev = inputs[:, -1].view(1, -1)\n else:\n past = None\n prev = inputs\n generate = [] + context\n with torch.no_grad():\n for i in trange(length):\n output = model(prev, past=past)\n output, past = output[:2]\n output = output[-1].squeeze(0) / temperature\n filtered_logits = top_k_top_p_filtering(output, top_k=top_k, top_p=top_p)\n next_token = torch.multinomial(torch.softmax(filtered_logits, dim=-1), num_samples=1)\n generate.append(next_token.item())\n prev = next_token.view(1, 1)\n return generate\n\n\n# 通过命令行参数--fast_pattern,指定模式\ndef generate(n_ctx, model, context, length, tokenizer, temperature=1, top_k=0, top_p=0.0, repitition_penalty=1.0, device='cpu',\n is_fast_pattern=False):\n if is_fast_pattern:\n return fast_sample_sequence(model, context, length, temperature=temperature, top_k=top_k, top_p=top_p,\n device=device)\n else:\n return sample_sequence(model, context, length, n_ctx, tokenizer=tokenizer, temperature=temperature, top_k=top_k, top_p=top_p,\n repitition_penalty=repitition_penalty, device=device)\n\n\ndef main():\n parser = argparse.ArgumentParser()\n parser.add_argument('--device', default='0,1,2,3', type=str, required=False, help='生成设备')\n parser.add_argument('--length', default=-1, type=int, required=False, help='生成长度')\n parser.add_argument('--batch_size', default=1, type=int, required=False, help='生成的batch size')\n parser.add_argument('--nsamples', default=10, type=int, required=False, help='生成几个样本')\n parser.add_argument('--temperature', default=1, type=float, required=False, help='生成温度')\n parser.add_argument('--topk', default=8, type=int, required=False, help='最高几选一')\n parser.add_argument('--topp', default=0, type=float, required=False, help='最高积累概率')\n parser.add_argument('--model_config', default='config/model_config.json', type=str, required=False,\n help='模型参数')\n parser.add_argument('--tokenizer_path', default='cache/vocab.txt', type=str, required=False, help='词表路径')\n parser.add_argument('--model_path', default='model/final_model', type=str, required=False, help='模型路径')\n parser.add_argument('--prefix', default='萧炎', type=str, required=False, help='生成文章的开头')\n parser.add_argument('--no_wordpiece', action='store_true', help='不做word piece切词')\n parser.add_argument('--segment', action='store_true', help='中文以词为单位')\n parser.add_argument('--fast_pattern', action='store_true', help='采用更加快的方式生成文本')\n parser.add_argument('--save_samples', action='store_true', help='保存产生的样本')\n parser.add_argument('--save_samples_path', default='.', type=str, required=False, help=\"保存样本的路径\")\n parser.add_argument('--repetition_penalty', default=1.0, type=float, required=False)\n\n args = parser.parse_args()\n print('args:\\n' + args.__repr__())\n\n if args.segment:\n from tokenizations import tokenization_bert_word_level as tokenization_bert\n else:\n from tokenizations import tokenization_bert\n\n model_config = transformers.modeling_gpt2.GPT2Config.from_json_file(args.model_config)\n os.environ[\"CUDA_VISIBLE_DEVICES\"] = args.device # 此处设置程序使用哪些显卡\n length = args.length\n batch_size = args.batch_size\n nsamples = args.nsamples\n temperature = args.temperature\n topk = args.topk\n topp = args.topp\n repetition_penalty = args.repetition_penalty\n\n device = \"cuda\" if torch.cuda.is_available() else \"cpu\"\n\n tokenizer = tokenization_bert.BertTokenizer(vocab_file=args.tokenizer_path)\n # model = GPT2LMHeadModel.from_pretrained(args.model_path)\n model = GPT2LMHeadModel(config=model_config)\n model.load_state_dict(torch.load(args.model_path + 'final_model'))\n model.to(device)\n model.eval()\n\n n_ctx = model.config.n_ctx\n\n if length == -1:\n length = model.config.n_ctx\n if args.save_samples:\n if not os.path.exists(args.save_samples_path):\n os.makedirs(args.save_samples_path)\n samples_file = open(args.save_samples_path + '/samples.txt', 'w', encoding='utf8')\n while True:\n raw_text = args.prefix\n context_tokens = tokenizer.convert_tokens_to_ids(tokenizer.tokenize(raw_text))\n generated = 0\n for _ in range(nsamples // batch_size):\n out = generate(\n n_ctx=n_ctx,\n model=model,\n context=context_tokens,\n length=length,\n is_fast_pattern=args.fast_pattern, tokenizer=tokenizer,\n temperature=temperature, top_k=topk, top_p=topp, repitition_penalty=repetition_penalty, device=device\n )\n for i in range(batch_size):\n generated += 1\n text = tokenizer.convert_ids_to_tokens(out)\n for i, item in enumerate(text[:-1]): # 确保英文前后有空格\n if is_word(item) and is_word(text[i + 1]):\n text[i] = item + ' '\n for i, item in enumerate(text):\n if item == '[MASK]':\n text[i] = ''\n elif item == '[CLS]':\n text[i] = '\\n\\n'\n elif item == '[SEP]':\n text[i] = '\\n'\n info = \"=\" * 40 + \" SAMPLE \" + str(generated) + \" \" + \"=\" * 40 + \"\\n\"\n print(info)\n text = ''.join(text).replace('##', '').strip()\n print(text)\n if args.save_samples:\n samples_file.write(info)\n samples_file.write(text)\n samples_file.write('\\n')\n samples_file.write('=' * 90)\n samples_file.write('\\n' * 2)\n print(\"=\" * 80)\n if generated == nsamples:\n # close file when finish writing.\n if args.save_samples:\n samples_file.close()\n break\n\n\nif __name__ == '__main__':\n main()\n"
] |
[
[
"torch.nn.functional.softmax",
"torch.softmax",
"torch.LongTensor",
"torch.load",
"torch.tensor",
"torch.no_grad",
"torch.sort",
"torch.cuda.is_available",
"torch.topk"
]
] |
eonu/sigment
|
[
"926237642f1fa5b63921ddf82341a4bbd7243394"
] |
[
"lib/sigment/transforms.py"
] |
[
"# -*- coding: utf-8 -*-\n\nimport numpy as np, librosa\nfrom itertools import chain\nfrom math import ceil\nfrom copy import copy\nfrom .base import _Base\nfrom .internals import _Validator\n\n__all__ = [\n 'Transform', 'Identity',\n 'GaussianWhiteNoise',\n 'TimeStretch', 'PitchShift',\n 'EdgeCrop', 'RandomCrop',\n 'LinearFade',\n 'Normalize', 'PreEmphasize', 'ExtractLoudestSection',\n 'MedianFilter',\n 'Reverb', 'ClipDistort'\n]\n\nclass Transform(_Base):\n \"\"\"Base class representing a single transformation or augmentation.\n\n .. note::\n As this is a base class, it should **not** be directly instantiated.\n\n You can however, use it to `create your own transformations <https://nbviewer.jupyter.org/github/eonu/sigment/blob/master/notebooks/Custom%20Transformations.ipynb>`_, following the\n implementation of the pre-defined transformations in Sigment.\n\n Parameters\n ----------\n p: float [0 ≤ p ≤ 1]\n The probability of executing the transformation.\n\n random_state: numpy.RandomState, int, optional\n A random state object or seed for reproducible randomness.\n \"\"\"\n\n def __init__(self, p, random_state):\n if self.__class__ == Transform:\n raise NotImplementedError('Transform is a base class for creating augmentations as a subclass - ' \\\n 'you cannot directly instantiate it')\n self._val = _Validator()\n self.p = self._val.restricted_float(\n p, 'p (probability)',\n lambda x: 0. <= x <= 1., 'between zero and one')\n self.random_state = self._val.random_state(random_state)\n\n def __call__(self, X, sr=None):\n \"\"\"Runs the transformation on a provided input signal.\n\n Parameters\n ----------\n X: numpy.ndarray [shape (T,) or (1xT) for mono, (2xT) for stereo]\n The input signal to transform.\n\n sr: int [sr > 0], optional\n The sample rate for the input signal.\n\n .. note::\n Not required if using transformations that **do not** require a sample rate.\n\n Returns\n -------\n transformed: numpy.ndarray [shape (T,) for mono, (2xT) for stereo]\n The transformed signal, clipped so that it fits into the :math:`[-1,1]` range required for 32-bit floating point WAVs.\n\n .. note::\n If a mono signal `X` of shape `(1xT)` was used, the output is reshaped to `(T,)`.\n\n Examples\n --------\n >>> import numpy as np\n >>> from sigment.transforms import PitchShift\n >>> # Create an example stereo signal.\n >>> X = np.array([\n >>> [0.325, 0.53 , 0.393, 0.211],\n >>> [0.21 , 0.834, 0.022, 0.38 ]\n >>> ])\n >>> # Create the pitch-shifting transformation object.\n >>> shift = PitchShift(n_steps=(-1., 1.))\n >>> # Run the __call__ method on the transformation object to transform X.\n >>> # NOTE: Pitch shifting requires a sample rate when called.\n >>> X_shift = shift(X, sr=10)\n \"\"\"\n return self._flatten(self._transform(copy(X), sr) if self._apply() else copy(X)).clip(min=-1., max=1.)\n\n def generate(self, X, n, sr=None):\n \"\"\"Runs the transformation on a provided input signal, producing multiple augmented copies of the input signal.\n\n Parameters\n ----------\n X: numpy.ndarray [shape (T,) or (1xT) for mono, (2xT) for stereo]\n The input signal to transform.\n\n n: int [n > 0]\n Number of augmented copies of `X` to generate.\n\n sr: int [sr > 0], optional\n The sample rate for the input signal.\n\n .. note::\n Not required if using transformations that **do not** require a sample rate.\n\n Returns\n -------\n augmented: List[numpy.ndarray] or numpy.ndarray\n The augmented copies (or copy if `n=1`) of the signal `X`, clipped so that they fit into the :math:`[-1,1]` range required for 32-bit floating point WAVs.\n\n .. note::\n If a mono signal `X` of shape `(1xT)` was used, the output is reshaped to `(T,)`.\n\n Examples\n --------\n >>> import numpy as np\n >>> from sigment.transforms import GaussianWhiteNoise\n >>> # Create an example stereo signal.\n >>> X = np.array([\n >>> [0.325, 0.53 , 0.393, 0.211],\n >>> [0.21 , 0.834, 0.022, 0.38 ]\n >>> ])\n >>> # Create the Gaussian white noise transformation object.\n >>> add_noise = GaussianWhiteNoise(scale=(0.05, 0.15))\n >>> # Generate 5 augmented versions of X, using the noise transformation.\n >>> Xs_noisy = add_noise.generate(X, n=5)\n \"\"\"\n X = self._val.signal(X)\n n = self._val.restricted_integer(\n n, 'n (number of augmented copies)',\n lambda x: x > 0, 'positive')\n sr = sr if sr is None else self._val.restricted_integer(\n sr, 'sr (sample rate)',\n lambda x: x > 0, 'positive')\n X = [self.__call__(X, sr) for _ in range(n)]\n return X[0] if n == 1 else X\n\n def _transform(self, X, sr):\n raise NotImplementedError\n\n def __repr__(self, indent=4, level=0):\n module = self.__class__.__module__\n attrs = [(k, v) for k, v in self.__dict__.items() if\n k not in ['p', 'random_state'] and not k.startswith('_')]\n return (' ' * indent * level) + '{}{}({}{})'.format(\n '' if module == '__main__' else '{}.'.format(module),\n self.__class__.__name__,\n '' if len(attrs) == 0 else (', '.join('{}={}'.format(k, v) for k, v in attrs) + ', '),\n 'p={}'.format(self.p)\n )\n\nclass Identity(Transform):\n \"\"\"Applies an identity transformation to a signal.\n\n Notes\n -----\n - A sampling rate **is not** required when applying this transformation.\n \"\"\"\n\n def __init__(self):\n super().__init__(p=1., random_state=None)\n\n def __call__(self, X, sr=None):\n return self._flatten(self._val.signal(copy(X)))\n\nclass GaussianWhiteNoise(Transform):\n \"\"\"Applies additive Gaussian white noise to the signal.\n\n Parameters\n ----------\n scale: float [scale > 0] or (float, float)\n | Amount to scale the value sampled from the standard normal distribution.\n | Essentially the variance :math:`\\sigma^2`.\n\n Notes\n -----\n - A sampling rate **is not** required when applying this transformation.\n \"\"\"\n\n def __init__(self, scale, p=1., random_state=None):\n super().__init__(p, random_state)\n self.scale = self._val.float_value(\n scale, 'scale (scale parameter)',\n lambda a, b: 0. < a <= b, 'positive')\n\n def _transform(self, X, sr):\n X = self._val.signal(X)\n scale = self.random_state.uniform(*self.scale)\n\n # Generate the additive Gaussian white signal noise\n noise = self.random_state.normal(loc=0, scale=scale, size=X.shape)\n\n # Return the signal with added noise\n return X + noise\n\nclass TimeStretch(Transform):\n \"\"\"Stretches the duration or speed of the signal without affecting its pitch.\n\n Parameters\n ----------\n rate: float [rate > 0] or (float, float)\n Stretch rate.\n\n - If `rate < 1`, the signal is slowed down.\n - If `rate > 1`, the signal is sped up.\n\n Notes\n -----\n - A sampling rate **is not** required when applying this transformation.\n \"\"\"\n\n def __init__(self, rate, p=1., random_state=None):\n super().__init__(p, random_state)\n self.rate = self._val.float_value(\n rate, 'rate (stretch rate)',\n lambda a, b: 0. < a <= b, 'positive')\n\n def _transform(self, X, sr):\n X = self._val.signal(X)\n rate = self.random_state.uniform(*self.rate)\n\n # Return the signal with time stretching applied to each channel independently\n return np.apply_along_axis(librosa.effects.time_stretch, 1, np.asfortranarray(X.T).T, rate=rate)\n\nclass PitchShift(Transform):\n \"\"\"Shifts the pitch of the signal without changing its duration or speed.\n\n Parameters\n ----------\n n_steps: float [-12 ≤ n_steps ≤ 12] or (float, float)\n Number of semitones to shift.\n\n Notes\n -----\n - A sampling rate **is** required when applying this transformation.\n \"\"\"\n\n def __init__(self, n_steps, p=1., random_state=None):\n super().__init__(p, random_state)\n self.n_steps = self._val.float_value(\n n_steps, 'n_steps (number of semitones to shift)',\n lambda a, b: -12. <= a <= b <= 12., 'between -12 and 12')\n\n def _transform(self, X, sr):\n X = self._val.signal(X)\n sr = self._val.restricted_integer(\n sr, 'sr (sample rate)',\n lambda x: x > 0, 'positive')\n n_steps = self.random_state.uniform(*self.n_steps)\n\n # Return the signal with pitch shifting applied to each channel independently\n return np.apply_along_axis(librosa.effects.pitch_shift, 1, np.asfortranarray(X.T).T, sr=sr, n_steps=n_steps)\n\nclass EdgeCrop(Transform):\n \"\"\"Crops a section from the start or end of the signal.\n\n Parameters\n ----------\n side: {'start', 'end'}\n The side of the signal to crop.\n\n crop_size: float [0 < crop_size ≤ 0.5] or (float, float)\n The fraction of the signal duration to crop from the chosen `side`.\n\n Notes\n -----\n - A sampling rate **is not** required when applying this transformation.\n \"\"\"\n\n def __init__(self, side, crop_size, p=1., random_state=None):\n super().__init__(p, random_state)\n self.side = self._val.one_of(\n side, 'side (side to crop)',\n ['start', 'end'])\n self.crop_size = self._val.float_value(\n crop_size, 'crop_size (fraction of signal duration)',\n lambda a, b: 0. < a <= b <= 0.5, 'between zero and a half')\n\n def _transform(self, X, sr):\n X = self._val.signal(X)\n crop_size = self.random_state.uniform(*self.crop_size)\n\n # Calculate the number of frames to crop\n crop_frames = int(crop_size * X.shape[1])\n\n # Remove the frames from the start or end of the signal\n return X[:, crop_frames:] if self.side == 'start' else X[:, :-crop_frames]\n\nclass RandomCrop(Transform):\n \"\"\"Randomly crops multiple sections from the signal.\n\n Parameters\n ----------\n crop_size: float [0 < crop_size < 1] or (float, float)\n The fraction of the signal duration to crop.\n\n n_crops: int [n_crops > 0] or (int, int)\n The number of random crops of size `crop_size` to make.\n\n Notes\n -----\n - Chunking is done according to the algorithm defined at [1]_.\n - `crop_size` :math:`\\\\times` `n_crops` must not exceed 1.\n - A sampling rate **is not** required when applying this transformation.\n\n References\n ----------\n .. [1] https://stackoverflow.com/a/49944026\n \"\"\"\n\n def __init__(self, crop_size, n_crops, p=1., random_state=None):\n super().__init__(p, random_state)\n self.crop_size = self._val.float_value(\n crop_size, 'crop_size (fraction of signal duration)',\n lambda a, b: 0. < a <= b < 1., 'between zero and one')\n self.n_crops = self._val.integer_value(\n n_crops, 'n_crops (number of crops)',\n lambda a, b: b >= a > 0, 'positive')\n if self.crop_size[1] * self.n_crops[1] >= 1.:\n raise ValueError('Expected maximum possible crop_size * n_crops to be less than one')\n\n def _transform(self, X, sr):\n X = self._val.signal(X)\n n_crops = self.random_state.randint(self.n_crops[0], self.n_crops[1] + 1)\n\n # Convert crop_size fraction to number of frames\n length = X.shape[1]\n lower_crop_size, upper_crop_size = int(self.crop_size[0] * length), int(self.crop_size[1] * length)\n\n # Get at least enough random chunk sizes in the specified range (i.e. lower <= n <= upper)\n ns = self.random_state.randint(lower_crop_size, upper_crop_size + 1, size=length//lower_crop_size)\n # Add up the chunk sizes to get the indices at which we'll slice up the input array\n idxs = np.add.accumulate(ns)\n # Truncate idxs so that its contents are all valid indices with respect to signal\n idxs = idxs[:np.searchsorted(idxs, length)]\n # Retrieve chunks from the signal using idxs\n chunks = [X[:, start:end] for start, end in zip(chain([None], idxs), chain(idxs, [None]))]\n\n # Return signal with chunks removed\n remove_idxs = self.random_state.choice(range(len(chunks)), n_crops, replace=False)\n return np.hstack([c for i, c in enumerate(chunks) if i not in remove_idxs])\n\nclass LinearFade(Transform):\n \"\"\"Linearly fades the signal in or out.\n\n Parameters\n ----------\n direction: {'in', 'out'}\n The direction to fade the signal.\n\n fade_size: float [0 < fade_size ≤ 0.5] or (float, float)\n The fraction of the signal to fade in the chosen `direction`.\n\n Notes\n -----\n - A sampling rate **is not** required when applying this transformation.\n \"\"\"\n\n def __init__(self, direction, fade_size, p=1., random_state=None):\n super().__init__(p, random_state)\n self.direction = self._val.one_of(\n direction, 'direction (direction to fade)',\n ['in', 'out'])\n self.fade_size = self._val.float_value(\n fade_size, 'fade_size (fraction of signal duration)',\n lambda a, b: 0. < a <= b <= 0.5, 'between zero and a half')\n\n def _transform(self, X, sr):\n X = self._val.signal(X)\n fade_size = self.random_state.uniform(*self.fade_size)\n\n # Calculate the number of frames to fade\n fade_frames = int(fade_size * X.shape[1])\n # Generate scalars for fading\n scalars = np.arange(1, fade_frames + 1).reshape(1, -1) / float(fade_frames)\n # Fade the signal from the start or end with the scalars\n if self.direction == 'in':\n X[:, :fade_frames] *= scalars\n else:\n X[:, -fade_frames:] *= np.flip(scalars)\n\n # Return the faded signal\n return X\n\nclass Normalize(Transform):\n \"\"\"Normalizes the signal by dividing each sample by the maximum absolute sample amplitude.\n\n Parameters\n ----------\n independent: bool\n Whether or not to normalize each channel independently.\n\n Notes\n -----\n - A sampling rate **is not** required when applying this transformation.\n \"\"\"\n\n def __init__(self, independent=True, p=1., random_state=None):\n super().__init__(p, random_state)\n self.independent = self._val.boolean(\n independent, 'independent (whether to independently normalize channels)')\n\n def _transform(self, X, sr):\n X = self._val.signal(X)\n\n # Return the normalized signal (treat each channel separately if independent=True)\n return X / (np.max(np.abs(X), axis=1, keepdims=True) if self.independent else np.max(np.abs(X)))\n\nclass PreEmphasize(Transform):\n \"\"\"Pre-emphasizes the signal by applying a first-order high-pass filter.\n\n .. math::\n x'[t] = \\\\begin{cases}\n x[t] & \\\\text{if $t=0$} \\\\\\\\\n x[t] - \\\\alpha x[t-1] & \\\\text{otherwise}\n \\\\end{cases}\n\n Parameters\n ----------\n alpha: float [0 < alpha ≤ 1] or (float, float)\n Pre-emphasis coefficient.\n\n Notes\n -----\n - A sampling rate **is not** required when applying this transformation.\n \"\"\"\n\n def __init__(self, alpha=0.95, p=1., random_state=None):\n super().__init__(p, random_state)\n self.alpha = self._val.float_value(\n alpha, 'alpha (pre-emphasis coefficient)',\n lambda a, b: 0. < a <= b <= 1., 'between zero and one')\n\n def _transform(self, X, sr):\n X = self._val.signal(X)\n alpha = self.random_state.uniform(*self.alpha)\n\n # Return the pre-emphasized signal\n return np.append(X[:, 0][:, None], X[:, 1:] - alpha * X[:, :-1], axis=1)\n\nclass ExtractLoudestSection(Transform):\n \"\"\"Extracts the loudest section from the signal using sliding window aggregation over amplitudes.\n\n Parameters\n ----------\n duration: float [0 < duration ≤ 1] or (float, float)\n The duration of the section to extract, as a fraction of the original signal duration.\n\n Notes\n -----\n - See [2]_ for more details on the implementation.\n - A sampling rate **is not** required when applying this transformation.\n\n References\n ----------\n .. [2] https://github.com/petewarden/extract_loudest_section\n \"\"\"\n\n def __init__(self, duration, p=1., random_state=None):\n super().__init__(p, random_state)\n self.duration = self._val.float_value(\n duration, 'duration (fraction of signal duration)',\n lambda a, b: 0. < a <= b <= 1., 'between zero and one')\n\n def _transform(self, X, sr):\n X = self._val.signal(X)\n duration = self.random_state.uniform(*self.duration)\n\n # Convert stereo signals to mono and take the absolute value\n mono_amp = np.abs(librosa.to_mono(X))\n # Calculate the length of the section in terms of frames\n total_frames = len(mono_amp)\n frames = ceil(total_frames * duration)\n # Initialize variables for keeping track of loudest section\n previous_amp, section_amp = None, 0\n start, end = 0, frames\n loudest_amp, loudest_idx = -1, (start, end)\n\n # Slide the moving section window\n while end < total_frames:\n # Calculate volume for current section\n section_amp += mono_amp[start:end].sum() if previous_amp is None else mono_amp[end] - previous_amp\n # Update loudest section indices if current section is loudest\n if section_amp > loudest_amp:\n loudest_amp, loudest_idx = section_amp, (start, end)\n # Store volume of the frame leaving the moving window\n previous_amp = mono_amp[start]\n # Update section indices\n start, end = start + 1, end + 1\n\n # Return section of the original signal which was the loudest\n return X[:, loudest_idx[0]:loudest_idx[1]]\n\nclass MedianFilter(Transform):\n \"\"\"Applies a median filter to the signal.\n\n .. math::\n x'[t] = \\\\mathrm{median}\n \\\\underbrace{\\\\Big[\n \\\\ldots, x[t-1], x[t], x[t+1], \\\\ldots\n \\\\Big]}_\\\\text{window size}\n\n Parameters\n ----------\n window_size: int [window_size > 1] or (int, int)\n The size of the window of neighbouring samples.\n\n Notes\n -----\n - A sampling rate **is not** required when applying this transformation.\n \"\"\"\n\n def __init__(self, window_size, p=1., random_state=None):\n super().__init__(p, random_state)\n self.window_size = self._val.integer_value(\n window_size, 'window_size (filter window size)',\n lambda a, b: 0 < a <= b, 'positive')\n\n def _transform(self, X, sr):\n X = self._val.signal(X)\n window_size = self.random_state.randint(self.window_size[0], self.window_size[1] + 1)\n\n # Create array to store filtered samples\n filtered = np.zeros(X.shape)\n # Calculate number of elements to the right and left\n right = window_size // 2\n left = (window_size - 1) - right\n # Slide the moving window and store filtered samples\n for i in range(X.shape[1]):\n l, m, r = X[:, ((i - left) * (left < i)):i], X[:, i][:, None], X[:, (i + 1):(i + 1 + right)]\n filtered[:, i] = np.median(np.hstack((l, m, r)), axis=1)\n\n # Return the filtered signal\n return filtered\n\nclass Reverb(Transform):\n \"\"\"Applies reverb to the signal.\n\n Parameters\n ----------\n delay: float [0 < delay ≤ 1] or (float, float)\n Fraction of signal diration to delay reverberated samples by.\n\n decay: float [0 < decay ≤ 1] or (float, float)\n Scalar to decay reverberated samples by.\n\n Notes\n -----\n - See [3]_ for more details on the implementation.\n\n References\n ----------\n .. [3] https://stackoverflow.com/a/1117249\n \"\"\"\n\n def __init__(self, delay, decay, p=1., random_state=None):\n super().__init__(p, random_state)\n self.delay = self._val.float_value(\n delay, 'delay (fraction of signal duration)',\n lambda a, b: 0. < a <= b <= 1., 'between zero and one')\n self.decay = self._val.float_value(\n decay, 'decay (scalar to decay samples by)',\n lambda a, b: 0. < a <= b <= 1., 'between zero and one')\n\n def _transform(self, X, sr):\n X = self._val.signal(X)\n delay = self.random_state.uniform(*self.delay)\n decay = self.random_state.uniform(*self.decay)\n\n # Calculate the number of frames to delay\n C, T = X.shape\n delay_frames = int(delay * T)\n\n # Decay and delay the signal\n out = np.zeros((C, T))\n for t in range(T):\n out[:, t] += X[:, t]\n if t < T - delay_frames:\n out[:, t + delay_frames] += X[:, t] * decay\n\n # Return the reverberated signal\n return out\n\nclass ClipDistort(Transform):\n \"\"\"Applies clipping distortion to the signal according to a percentile clipping threshold.\n\n Parameters\n ----------\n percentile: int [0 < percentile ≤ 100]\n Percentile of sample amplitudes to use as a clipping threshold.\n\n independent: boolean\n Whether or not to independently distort channels by calculating individual percentiles.\n \"\"\"\n\n def __init__(self, percentile, independent=False, p=1., random_state=None):\n super().__init__(p, random_state)\n self.percentile = self._val.integer_value(\n percentile, 'percentile (clipping threshold)',\n lambda a, b: 0 < a <= b <= 100, 'between zero and 100')\n self.independent = self._val.boolean(\n independent, 'independent (whether to independently distort channels)')\n\n def _transform(self, X, sr):\n X = self._val.signal(X)\n percentile = self.random_state.randint(self.percentile[0], self.percentile[1] + 1)\n\n # Return the distorted signal by clipping at the percentile threshold\n clip = lambda signal, percentile: signal.clip(max=np.percentile(signal, percentile))\n return np.apply_along_axis(clip, 1, X, percentile) if self.independent else clip(X, percentile)"
] |
[
[
"numpy.hstack",
"numpy.abs",
"numpy.arange",
"numpy.asfortranarray",
"numpy.percentile",
"numpy.append",
"numpy.apply_along_axis",
"numpy.searchsorted",
"numpy.flip",
"numpy.add.accumulate",
"numpy.zeros"
]
] |
visiope/simpeg
|
[
"94295102afc664c001f77c88f902772e06a467c0"
] |
[
"SimPEG/VRM/ProblemVRM.py"
] |
[
"from SimPEG import Problem, mkvc, Maps, Props, Survey\nfrom SimPEG.VRM.SurveyVRM import SurveyVRM\nimport numpy as np\nimport scipy.sparse as sp\n\n############################################\n# BASE VRM PROBLEM CLASS\n############################################\n\n\nclass Problem_BaseVRM(Problem.BaseProblem):\n \"\"\"\n\n \"\"\"\n\n # SET CLASS ATTRIBUTES\n _refFact = None\n _refRadius = None\n _indActive = None\n _AisSet = False\n\n def __init__(self, mesh, **kwargs):\n\n # **kwargs\n self._refFact = kwargs.get('refFact', 3)\n self._refRadius = kwargs.get('refRadius', list(1.25*np.mean(np.r_[np.min(mesh.h[0]), np.min(mesh.h[1]), np.min(mesh.h[2])])*np.arange(1, self.refFact+1)))\n self._indActive = kwargs.get('indActive', np.ones(mesh.nC, dtype=bool))\n\n # Assertions\n assert len(mesh.h) == 3, 'Problem requires 3D tensor or OcTree mesh'\n assert isinstance(self._refFact, int), \"Refinement factor must be set as an integer\"\n assert isinstance(self._refRadius, list), \"Refinement radii must be a list with at least 1 entry\"\n assert len(self._refRadius) >= self._refFact, 'Number of refinement radii must equal or greater than refinement factor'\n assert list(self._indActive).count(True) + list(self._indActive).count(False) == len(self._indActive), \"indActive must be a boolean array\"\n\n if self.refFact > 4:\n print(\"Refinement factor larger than 4 may result in computations which exceed memory limits\")\n\n super(Problem_BaseVRM, self).__init__(mesh, **kwargs)\n\n @property\n def refFact(self):\n return self._refFact\n\n @refFact.setter\n def refFact(self, Val):\n\n assert isinstance(Val, int) and Val > -1, \"Refinement factor must be an integer value equal or larger than 0\"\n\n if Val != len(self._refRadius):\n print(\"Refinement factor no longer matches length of refinement radii array. Please ensure refinement factor is equal or less to number of elements in refinement radii\")\n\n if Val > 4:\n print(\"Refinement factor larger than 4 may result in computations which exceed memory limits\")\n\n self._refFact = Val\n\n @property\n def refRadius(self):\n return self._refRadius\n\n @refRadius.setter\n def refRadius(self, radList):\n assert isinstance(radList, (list, tuple)), \"Array must be a numpy array\"\n\n if self._refFact != len(radList):\n print(\"Refinement factor no longer matches length of refinement radii array. Please ensure that the number of elements in refinement radii is equal or greater than the refinement factor\")\n\n self._refRadius = radList\n\n @property\n def indActive(self):\n return self._indActive\n\n @indActive.setter\n def indActive(self, Vec):\n\n assert list(self._indActive).count(True) + list(self._indActive).count(False) == len(self._indActive), \"indActive must be a boolean array\"\n self._AisSet = False\n self._indActive = Vec\n\n def _getH0matrix(self, xyz, pp):\n\n \"\"\"\n Creates sparse matrix containing inducing field components\n for source pp\n\n.. REQUIRED ARGUMENTS:\n..\n.. xyz: N X 3 array of locations to predict field\n..\n.. pp: Source index\n..\n.. OUTPUTS:\n..\n.. H0: A 3N X N sparse array containing Hx, Hy and Hz at all locations\n..\n \"\"\"\n\n SrcObj = self.survey.srcList[pp]\n\n H0 = SrcObj.getH0(xyz)\n\n Hx0 = sp.diags(H0[:, 0], format=\"csr\")\n Hy0 = sp.diags(H0[:, 1], format=\"csr\")\n Hz0 = sp.diags(H0[:, 2], format=\"csr\")\n\n H0 = sp.vstack([Hx0, Hy0, Hz0])\n\n return H0\n\n def _getGeometryMatrix(self, xyzc, xyzh, pp):\n\n \"\"\"\n Creates the dense geometry matrix which maps from the magnetized voxel\n cells to the receiver locations for source pp\n..\n.. REQUIRED ARGUMENTS:\n..\n.. xyzc: N by 3 numpy array containing cell center locations [xc,yc,zc]\n..\n.. xyzh: N by 3 numpy array containing cell dimensions [hx,hy,hz]\n..\n.. pp: Source index\n..\n.. OUTPUTS:\n..\n.. G: Linear geometry operator\n\n \"\"\"\n\n srcObj = self.survey.srcList[pp]\n\n N = np.shape(xyzc)[0] # Number of cells\n K = srcObj.nRx # Number of receiver in all rxList\n\n ax = np.reshape(xyzc[:, 0] - xyzh[:, 0]/2, (1, N))\n bx = np.reshape(xyzc[:, 0] + xyzh[:, 0]/2, (1, N))\n ay = np.reshape(xyzc[:, 1] - xyzh[:, 1]/2, (1, N))\n by = np.reshape(xyzc[:, 1] + xyzh[:, 1]/2, (1, N))\n az = np.reshape(xyzc[:, 2] - xyzh[:, 2]/2, (1, N))\n bz = np.reshape(xyzc[:, 2] + xyzh[:, 2]/2, (1, N))\n\n G = np.zeros((K, 3*N))\n C = -(1/(4*np.pi))\n tol = 1e-10 # Tolerance constant for numerical stability\n tol2 = 1000. # Tolerance constant for numerical stability\n\n COUNT = 0\n\n for qq in range(0, len(srcObj.rxList)):\n\n rxObj = srcObj.rxList[qq]\n dComp = rxObj.fieldComp\n locs = rxObj.locs\n M = np.shape(locs)[0]\n\n if dComp is 'x':\n for rr in range(0, M):\n u1 = locs[rr, 0] - ax\n u1[np.abs(u1) < tol] = np.min(xyzh[:, 0])/tol2\n u2 = locs[rr, 0] - bx\n u2[np.abs(u2) < tol] = -np.min(xyzh[:, 0])/tol2\n v1 = locs[rr, 1] - ay\n v1[np.abs(v1) < tol] = np.min(xyzh[:, 1])/tol2\n v2 = locs[rr, 1] - by\n v2[np.abs(v2) < tol] = -np.min(xyzh[:, 1])/tol2\n w1 = locs[rr, 2] - az\n w1[np.abs(w1) < tol] = np.min(xyzh[:, 2])/tol2\n w2 = locs[rr, 2] - bz\n w2[np.abs(w2) < tol] = -np.min(xyzh[:, 2])/tol2\n\n Gxx = (\n np.arctan((v1*w1)/(u1*np.sqrt(u1**2+v1**2+w1**2)+tol)) -\n np.arctan((v1*w1)/(u2*np.sqrt(u2**2+v1**2+w1**2)+tol)) +\n np.arctan((v2*w1)/(u2*np.sqrt(u2**2+v2**2+w1**2)+tol)) -\n np.arctan((v2*w1)/(u1*np.sqrt(u1**2+v2**2+w1**2)+tol)) +\n np.arctan((v2*w2)/(u1*np.sqrt(u1**2+v2**2+w2**2)+tol)) -\n np.arctan((v1*w2)/(u1*np.sqrt(u1**2+v1**2+w2**2)+tol)) +\n np.arctan((v1*w2)/(u2*np.sqrt(u2**2+v1**2+w2**2)+tol)) -\n np.arctan((v2*w2)/(u2*np.sqrt(u2**2+v2**2+w2**2)+tol))\n )\n\n Gyx = (\n np.log(np.sqrt(u1**2+v1**2+w1**2)-w1) -\n np.log(np.sqrt(u2**2+v1**2+w1**2)-w1) +\n np.log(np.sqrt(u2**2+v2**2+w1**2)-w1) -\n np.log(np.sqrt(u1**2+v2**2+w1**2)-w1) +\n np.log(np.sqrt(u1**2+v2**2+w2**2)-w2) -\n np.log(np.sqrt(u1**2+v1**2+w2**2)-w2) +\n np.log(np.sqrt(u2**2+v1**2+w2**2)-w2) -\n np.log(np.sqrt(u2**2+v2**2+w2**2)-w2)\n )\n\n Gzx = (\n np.log(np.sqrt(u1**2+v1**2+w1**2)-v1) -\n np.log(np.sqrt(u2**2+v1**2+w1**2)-v1) +\n np.log(np.sqrt(u2**2+v2**2+w1**2)-v2) -\n np.log(np.sqrt(u1**2+v2**2+w1**2)-v2) +\n np.log(np.sqrt(u1**2+v2**2+w2**2)-v2) -\n np.log(np.sqrt(u1**2+v1**2+w2**2)-v1) +\n np.log(np.sqrt(u2**2+v1**2+w2**2)-v1) -\n np.log(np.sqrt(u2**2+v2**2+w2**2)-v2)\n )\n\n G[COUNT, :] = C*np.c_[Gxx, Gyx, Gzx]\n COUNT = COUNT + 1\n\n elif dComp is 'y':\n for rr in range(0, M):\n u1 = locs[rr, 0] - ax\n u1[np.abs(u1) < tol] = np.min(xyzh[:, 0])/tol2\n u2 = locs[rr, 0] - bx\n u2[np.abs(u2) < tol] = -np.min(xyzh[:, 0])/tol2\n v1 = locs[rr, 1] - ay\n v1[np.abs(v1) < tol] = np.min(xyzh[:, 1])/tol2\n v2 = locs[rr, 1] - by\n v2[np.abs(v2) < tol] = -np.min(xyzh[:, 1])/tol2\n w1 = locs[rr, 2] - az\n w1[np.abs(w1) < tol] = np.min(xyzh[:, 2])/tol2\n w2 = locs[rr, 2] - bz\n w2[np.abs(w2) < tol] = -np.min(xyzh[:, 2])/tol2\n\n Gxy = (\n np.log(np.sqrt(u1**2+v1**2+w1**2)-w1) -\n np.log(np.sqrt(u2**2+v1**2+w1**2)-w1) +\n np.log(np.sqrt(u2**2+v2**2+w1**2)-w1) -\n np.log(np.sqrt(u1**2+v2**2+w1**2)-w1) +\n np.log(np.sqrt(u1**2+v2**2+w2**2)-w2) -\n np.log(np.sqrt(u1**2+v1**2+w2**2)-w2) +\n np.log(np.sqrt(u2**2+v1**2+w2**2)-w2) -\n np.log(np.sqrt(u2**2+v2**2+w2**2)-w2)\n )\n\n Gyy = (\n np.arctan((u1*w1)/(v1*np.sqrt(u1**2+v1**2+w1**2)+tol)) -\n np.arctan((u2*w1)/(v1*np.sqrt(u2**2+v1**2+w1**2)+tol)) +\n np.arctan((u2*w1)/(v2*np.sqrt(u2**2+v2**2+w1**2)+tol)) -\n np.arctan((u1*w1)/(v2*np.sqrt(u1**2+v2**2+w1**2)+tol)) +\n np.arctan((u1*w2)/(v2*np.sqrt(u1**2+v2**2+w2**2)+tol)) -\n np.arctan((u1*w2)/(v1*np.sqrt(u1**2+v1**2+w2**2)+tol)) +\n np.arctan((u2*w2)/(v1*np.sqrt(u2**2+v1**2+w2**2)+tol)) -\n np.arctan((u2*w2)/(v2*np.sqrt(u2**2+v2**2+w2**2)+tol))\n )\n\n Gzy = (\n np.log(np.sqrt(u1**2+v1**2+w1**2)-u1) -\n np.log(np.sqrt(u2**2+v1**2+w1**2)-u2) +\n np.log(np.sqrt(u2**2+v2**2+w1**2)-u2) -\n np.log(np.sqrt(u1**2+v2**2+w1**2)-u1) +\n np.log(np.sqrt(u1**2+v2**2+w2**2)-u1) -\n np.log(np.sqrt(u1**2+v1**2+w2**2)-u1) +\n np.log(np.sqrt(u2**2+v1**2+w2**2)-u2) -\n np.log(np.sqrt(u2**2+v2**2+w2**2)-u2)\n )\n\n G[COUNT, :] = C*np.c_[Gxy, Gyy, Gzy]\n COUNT = COUNT + 1\n\n elif dComp is 'z':\n for rr in range(0, M):\n u1 = locs[rr, 0] - ax\n u1[np.abs(u1) < tol] = np.min(xyzh[:, 0])/tol2\n u2 = locs[rr, 0] - bx\n u2[np.abs(u2) < tol] = -np.min(xyzh[:, 0])/tol2\n v1 = locs[rr, 1] - ay\n v1[np.abs(v1) < tol] = np.min(xyzh[:, 1])/tol2\n v2 = locs[rr, 1] - by\n v2[np.abs(v2) < tol] = -np.min(xyzh[:, 1])/tol2\n w1 = locs[rr, 2] - az\n w1[np.abs(w1) < tol] = np.min(xyzh[:, 2])/tol2\n w2 = locs[rr, 2] - bz\n w2[np.abs(w2) < tol] = -np.min(xyzh[:, 2])/tol2\n\n Gxz = (\n np.log(np.sqrt(u1**2+v1**2+w1**2)-v1) -\n np.log(np.sqrt(u2**2+v1**2+w1**2)-v1) +\n np.log(np.sqrt(u2**2+v2**2+w1**2)-v2) -\n np.log(np.sqrt(u1**2+v2**2+w1**2)-v2) +\n np.log(np.sqrt(u1**2+v2**2+w2**2)-v2) -\n np.log(np.sqrt(u1**2+v1**2+w2**2)-v1) +\n np.log(np.sqrt(u2**2+v1**2+w2**2)-v1) -\n np.log(np.sqrt(u2**2+v2**2+w2**2)-v2)\n )\n\n Gyz = (\n np.log(np.sqrt(u1**2+v1**2+w1**2)-u1) -\n np.log(np.sqrt(u2**2+v1**2+w1**2)-u2) +\n np.log(np.sqrt(u2**2+v2**2+w1**2)-u2) -\n np.log(np.sqrt(u1**2+v2**2+w1**2)-u1) +\n np.log(np.sqrt(u1**2+v2**2+w2**2)-u1) -\n np.log(np.sqrt(u1**2+v1**2+w2**2)-u1) +\n np.log(np.sqrt(u2**2+v1**2+w2**2)-u2) -\n np.log(np.sqrt(u2**2+v2**2+w2**2)-u2)\n )\n\n Gzz = (\n - np.arctan((v1*w1)/(u1*np.sqrt(u1**2+v1**2+w1**2)+tol)) +\n np.arctan((v1*w1)/(u2*np.sqrt(u2**2+v1**2+w1**2)+tol)) -\n np.arctan((v2*w1)/(u2*np.sqrt(u2**2+v2**2+w1**2)+tol)) +\n np.arctan((v2*w1)/(u1*np.sqrt(u1**2+v2**2+w1**2)+tol)) -\n np.arctan((v2*w2)/(u1*np.sqrt(u1**2+v2**2+w2**2)+tol)) +\n np.arctan((v1*w2)/(u1*np.sqrt(u1**2+v1**2+w2**2)+tol)) -\n np.arctan((v1*w2)/(u2*np.sqrt(u2**2+v1**2+w2**2)+tol)) +\n np.arctan((v2*w2)/(u2*np.sqrt(u2**2+v2**2+w2**2)+tol))\n )\n\n Gzz = (\n Gzz -\n np.arctan((u1*w1)/(v1*np.sqrt(u1**2+v1**2+w1**2)+tol)) +\n np.arctan((u2*w1)/(v1*np.sqrt(u2**2+v1**2+w1**2)+tol)) -\n np.arctan((u2*w1)/(v2*np.sqrt(u2**2+v2**2+w1**2)+tol)) +\n np.arctan((u1*w1)/(v2*np.sqrt(u1**2+v2**2+w1**2)+tol)) -\n np.arctan((u1*w2)/(v2*np.sqrt(u1**2+v2**2+w2**2)+tol)) +\n np.arctan((u1*w2)/(v1*np.sqrt(u1**2+v1**2+w2**2)+tol)) -\n np.arctan((u2*w2)/(v1*np.sqrt(u2**2+v1**2+w2**2)+tol)) +\n np.arctan((u2*w2)/(v2*np.sqrt(u2**2+v2**2+w2**2)+tol))\n )\n\n G[COUNT, :] = C*np.c_[Gxz, Gyz, Gzz]\n COUNT = COUNT + 1\n\n return np.matrix(G)\n\n def _getAMatricies(self):\n\n \"\"\"Returns the full geometric operator\"\"\"\n\n indActive = self.indActive\n\n # GET CELL INFORMATION FOR FORWARD MODELING\n meshObj = self.mesh\n xyzc = meshObj.gridCC[indActive, :]\n xyzh = meshObj.h_gridded[indActive, :]\n\n # GET LIST OF A MATRICIES\n A = []\n for pp in range(0, self.survey.nSrc):\n\n # Create initial A matrix\n G = self._getGeometryMatrix(xyzc, xyzh, pp)\n H0 = self._getH0matrix(xyzc, pp)\n A.append(G*H0)\n\n # Refine A matrix\n refFact = self.refFact\n refRadius = self.refRadius\n\n if refFact > 0:\n\n srcObj = self.survey.srcList[pp]\n refFlag = srcObj._getRefineFlags(xyzc, refFact, refRadius)\n\n for qq in range(1, refFact+1):\n if len(refFlag[refFlag == qq]) != 0:\n A[pp][:, refFlag == qq] = self._getSubsetAcolumns(xyzc, xyzh, pp, qq, refFlag)\n\n return A\n\n def _getSubsetAcolumns(self, xyzc, xyzh, pp, qq, refFlag):\n\n \"\"\"\n This method returns the refined sensitivities for columns that will be\n replaced in the A matrix for source pp and refinement factor qq.\n..\n.. INPUTS:\n..\n.. xyzc -- Cell centers of topo mesh cells N X 3 array\n..\n.. xyzh -- Cell widths of topo mesh cells N X 3 array\n..\n.. pp -- Source ID\n..\n.. qq -- Mesh refinement factor\n..\n.. refFlag -- refinement factors for all topo mesh cells\n..\n.. OUTPUTS:\n..\n.. Acols -- Columns containing replacement sensitivities\n\n \"\"\"\n\n # GET SUBMESH GRID\n n = 2**qq\n [nx, ny, nz] = np.meshgrid(np.linspace(1, n, n)-0.5, np.linspace(1, n, n)-0.5, np.linspace(1, n, n)-0.5)\n nxyz_sub = np.c_[mkvc(nx), mkvc(ny), mkvc(nz)]\n\n xyzh_sub = xyzh[refFlag == qq, :] # Get widths of cells to be refined\n xyzc_sub = xyzc[refFlag == qq, :] - xyzh[refFlag == qq, :]/2 # Get bottom southwest corners of cells to be refined\n m = np.shape(xyzc_sub)[0]\n xyzc_sub = np.kron(xyzc_sub, np.ones((n**3, 1))) # Kron for n**3 refined cells\n xyzh_sub = np.kron(xyzh_sub/n, np.ones((n**3, 1))) # Kron for n**3 refined cells with widths h/n\n nxyz_sub = np.kron(np.ones((m, 1)), nxyz_sub) # Kron for n**3 refined cells\n xyzc_sub = xyzc_sub + xyzh_sub*nxyz_sub\n\n # GET SUBMESH A MATRIX AND COLLAPSE TO COLUMNS\n G = self._getGeometryMatrix(xyzc_sub, xyzh_sub, pp)\n H0 = self._getH0matrix(xyzc_sub, pp)\n Acols = (G*H0)*sp.kron(sp.diags(np.ones(m)), np.ones((n**3, 1)))\n\n return Acols\n\n\n#############################################################################\n# VRM CHARACTERISTIC DECAY FORMULATION (SINGLE MODEL PARAMETER AND INVERSION)\n#############################################################################\n\n\nclass Problem_Linear(Problem_BaseVRM):\n\n \"\"\"\n\n \"\"\"\n\n _A = None\n _T = None\n _TisSet = False\n _xiMap = None\n\n surveyPair = SurveyVRM # Only linear problem can have survey and be inverted\n\n # xi = Props.PhysicalProperty(\"Amalgamated Viscous Remanent Magnetization Parameter xi = dchi/ln(tau2/tau1)\")\n xi, xiMap, xiDeriv = Props.Invertible(\"Amalgamated Viscous Remanent Magnetization Parameter xi = dchi/ln(tau2/tau1)\")\n\n def __init__(self, mesh, **kwargs):\n\n super(Problem_Linear, self).__init__(mesh, **kwargs)\n\n nAct = list(self._indActive).count(True)\n if self.xiMap is None:\n self.xiMap = Maps.IdentityMap(nP=nAct)\n\n @property\n def A(self):\n\n \"\"\"\n This function constructs the geometric sensitivity matrix for the\n linear VRM problem. This function requires that the problem be paired\n with a survey object.\n\n \"\"\"\n\n if self._AisSet is False:\n\n assert self.ispaired, \"Problem must be paired with survey to generate A matrix\"\n\n # Remove any previously stored A matrix\n if self._A is not None:\n self._A = None\n\n print('CREATING A MATRIX')\n\n # COLLAPSE ALL A MATRICIES INTO SINGLE OPERATOR\n self._A = np.vstack(self._getAMatricies())\n self._AisSet = True\n\n return self._A\n\n elif self._AisSet is True:\n\n return self._A\n\n @property\n def T(self):\n\n \"\"\"\n This function returns the characteristic decay matrix. This function\n requires that the problem has been paired with a survey object.\n\n \"\"\"\n\n if self._TisSet is False:\n\n assert self.ispaired, \"Problem must be paired with survey to generate T matrix\"\n\n # Remove any previously stored T matrix\n if self._T is not None:\n self._T = None\n\n print('CREATING T MATRIX')\n\n srcList = self.survey.srcList\n nSrc = len(srcList)\n T = []\n\n for pp in range(0, nSrc):\n\n rxList = srcList[pp].rxList\n nRx = len(rxList)\n waveObj = srcList[pp].waveform\n\n for qq in range(0, nRx):\n\n times = rxList[qq].times\n nLoc = np.shape(rxList[qq].locs)[0]\n\n I = sp.diags(np.ones(nLoc))\n eta = waveObj.getCharDecay(rxList[qq].fieldType, times)\n eta = np.matrix(eta).T\n\n T.append(sp.kron(I, eta))\n\n self._T = sp.block_diag(T)\n self._TisSet = True\n\n return self._T\n\n elif self._TisSet is True:\n\n return self._T\n\n def fields(self, m):\n\n \"\"\"Computes the fields d = T*A*m\"\"\"\n\n assert self.ispaired, \"Problem must be paired with survey to predict data\"\n\n self.model = m # Initiates/updates model and initiates mapping\n\n # Project to active mesh cells\n # m = np.matrix(self.xiMap * m).T\n m = np.matrix(self.xi).T\n\n # Must return as a numpy array\n return mkvc(sp.coo_matrix.dot(self.T, np.dot(self.A, m)))\n\n def Jvec(self, m, v, f=None):\n\n \"\"\"Compute Pd*T*A*dxidm*v\"\"\"\n\n assert self.ispaired, \"Problem must be paired with survey to predict data\"\n\n # Jacobian of xi wrt model\n dxidm = self.xiMap.deriv(m)\n\n # dxidm*v\n v = np.matrix(dxidm*v).T\n\n # Dot product with A\n v = self.A*v\n\n # Get active time rows of T\n T = self.T.tocsr()[self.survey.tActive, :]\n\n # Must return an array\n return mkvc(sp.csr_matrix.dot(T, v))\n\n def Jtvec(self, m, v, f=None):\n\n \"\"\"Compute (Pd*T*A*dxidm)^T * v\"\"\"\n\n assert self.ispaired, \"Problem must be paired with survey to predict data\"\n\n # Define v as a column vector\n v = np.matrix(v).T\n\n # Get T'*Pd'*v\n T = self.T.tocsr()[self.survey.tActive, :]\n v = sp.csc_matrix.dot(T.transpose(), v)\n\n # Multiply by A'\n v = (np.dot(v.T, self.A)).T\n\n # Jacobian of xi wrt model\n dxidm = self.xiMap.deriv(m)\n\n # Must return an array\n return mkvc(dxidm.T*v)\n\n def unpair(self):\n \"\"\"Unbind a survey from this problem instance.\"\"\"\n if not self.ispaired:\n return\n self.survey._prob = None\n self._survey = None\n self._A = None\n self._T = None\n self._AisSet = False\n self._TisSet = False\n\n\nclass Problem_LogUniform(Problem_BaseVRM):\n\n \"\"\"\n\n \"\"\"\n\n _A = None\n _T = None\n _TisSet = False\n # _xiMap = None\n\n surveyPair = Survey.BaseSurvey\n\n chi0 = Props.PhysicalProperty(\"DC susceptibility\")\n dchi = Props.PhysicalProperty(\"Frequency dependence\")\n tau1 = Props.PhysicalProperty(\"Low bound time-relaxation constant\")\n tau2 = Props.PhysicalProperty(\"Upper bound time-relaxation constant\")\n\n def __init__(self, mesh, **kwargs):\n\n super(Problem_LogUniform, self).__init__(mesh, **kwargs)\n\n @property\n def A(self):\n\n \"\"\"\n This function constructs the geometric sensitivity matrix for the VRM\n problem. This function requires that the problem be paired with a\n survey object.\n\n \"\"\"\n\n if self._AisSet is False:\n\n assert self.ispaired, \"Problem must be paired with survey to generate A matrix\"\n\n # Remove any previously stored A matrix\n if self._A is not None:\n self._A = None\n\n print('CREATING A MATRIX')\n\n # COLLAPSE ALL A MATRICIES INTO SINGLE OPERATOR\n self._A = self._getAMatricies()\n self._AisSet = True\n\n return self._A\n\n elif self._AisSet is True:\n\n return self._A\n\n def fields(self, m=None):\n\n \"\"\"Computes the fields at every time d(t) = G*M(t)\"\"\"\n\n assert self.ispaired, \"Problem must be paired with survey to predict data\"\n\n # Fields from each source\n srcList = self.survey.srcList\n nSrc = len(srcList)\n f = []\n\n for pp in range(0, nSrc):\n\n rxList = srcList[pp].rxList\n nRx = len(rxList)\n waveObj = srcList[pp].waveform\n\n for qq in range(0, nRx):\n\n times = rxList[qq].times\n eta = waveObj.getLogUniformDecay(rxList[qq].fieldType, times, self.chi0, self.dchi, self.tau1, self.tau2)\n\n f.append(mkvc((self.A[qq] * np.matrix(eta)).T))\n\n return np.array(np.hstack(f))\n"
] |
[
[
"numpy.matrix",
"numpy.dot",
"numpy.hstack",
"numpy.abs",
"numpy.linspace",
"numpy.min",
"numpy.reshape",
"numpy.arange",
"numpy.sqrt",
"scipy.sparse.block_diag",
"scipy.sparse.diags",
"numpy.ones",
"scipy.sparse.csr_matrix.dot",
"numpy.shape",
"scipy.sparse.vstack",
"scipy.sparse.kron",
"numpy.zeros"
]
] |
zhangrj91/DarkPose
|
[
"dd8403633b64936e73a3d8d44d4b34f422d6a6a0"
] |
[
"src/architecture/model_prune.py"
] |
[
"# Does human brain prune the useless neural cells?\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nimport logging\nlogger = logging.getLogger(__name__)\n\nclass Empty_Cell(nn.Module):\n r\"\"\"\n This class is used to replace the useless `Cell` object whose output has no computing contributions\n for the next layer's cells (i.e. all the beta parameters associated with it are zero values).\n\n Args: like the `__init__` of the `Cell` Module\n \n pos_i : determine the spatial size of the cell\n pos_j : determine the layer depth of the cell\n c : the channel of hidden states in the current cell\n\n Input: imitate the input of the `forward` function of the `Cell` Module\n\n Return: [N, c, H, W]\n \"\"\"\n def __init__(self,pos_i,pos_j,c):\n super(Empty_Cell, self).__init__()\n self.output_channel = c\n self.pos=(pos_i, pos_j)\n\n def forward(self, prev_paral, prev_above, prev_below , prev_prev, alphas, betas, other_input=None):\n N, H, W = prev_paral.size(0), prev_paral.size(2), prev_paral.size(3)\n output = torch.zeros(\n size=(N, self.output_channel, H, W), \n device=prev_paral.device,\n dtype=betas.dtype)\n return output\n\ndef associated_cell_is_useful(a_i, a_j, type_c, pos_i, pos_j, cell_id, useful_cell_positions, useful_cell_ids, betas):\n r\"\"\"\n `Input`:\n `a_i`: the i-position of the associated cell of the current cell(pos_i, pos_j)\n `a_j`: the j-position of the associated cell of the current cell(pos_i, pos_j)\n `type_c`: the type-id of the connected association [0,1,2] (direct_connect, reduce, upsampling)\n\n `pos_i, pos_j, cell_id --> the curret cell`\n\n `useful_cell_positions`: a list recording the position (i,j) of cells which have been regarded as useful.\n `useful_cell_ids`: a list recording the cell-id (betas index) of cells which have been regarded as useful.\n\n `betas`: `[cell_num, 3]`\n\n `Output`:\n if the associated cell is existing and useful:\n return `True`\n else:\n return `False`\n \"\"\"\n if (a_i, a_j) in useful_cell_positions:\n _c_id = useful_cell_ids[ useful_cell_positions.index((a_i, a_j)) ]\n else:\n return False\n associated_beta = betas[_c_id][type_c] \n if associated_beta > 0:\n useful_cell_positions.append((pos_i, pos_j))\n useful_cell_ids.append(cell_id)\n return True\n else:\n return False\n\nfrom architecture.meta_arch import Meta_Arch\n\ndef Prune_the_Useless_Cells(arch):\n\n for name, cnf in arch.named_modules():\n # hasattr(cnf, 'betas'): # prune all the useless cells in the `Sub_Arch` module (subnetworks, CNFs)\n if not isinstance(cnf, Meta_Arch): \n continue\n depth = cnf.arch_depth\n betas = F.softmax(cnf.betas,dim=-1) # [cells_num, 3]\n Num = cnf.Num.copy() # not change the original num\n cells_num = cnf.cells_num\n if sum(Num) != cells_num:\n # in part_representation, we add the number of backbone feature pyrmiads in the cut layer position\n Num[cnf.cut_layers_num - 1] = 0 \n useful_cell_positions = []\n useful_cell_ids = []\n id = 0\n logger.info(betas)\n # if a cell is useful for comptuing, \n # we must find one path from it to the final cell with non-zero associated beta values.\n # so we need to make judgement from the final layer to the previous layers by the reverse 'cell_id' order\n # if the cell is connected by a useful cell in the next layer, it will be useful\n for pos_j_, num in enumerate(Num[::-1]): # Reverse order\n # num==0: the layer has no cells\n if num ==0: \n continue\n\n pos_j = (depth-1) - pos_j_ # absolute position of layer\n for pos_i_ in range(num): \n pos_i = (num - 1) - pos_i_ # Reverse order\n id +=1\n cell_id = cells_num - id # cell_id: index in `betas`\n\n if pos_i ==0 and pos_j == (depth-1): # num==1: the cell in the final layer will always be preserved.\n useful_cell_positions.append((pos_i, pos_j))\n useful_cell_ids.append(cell_id)\n else:\n # associated cells array: (pos_i, pos_j, beta_type_id)\n associations = [(pos_i-1, pos_j+1, 2), (pos_i, pos_j+1, 0), (pos_i+1, pos_j+1, 1)] \n # (pos_i, pos_j) is the prev_below the (pos_i-1, pos_j+1) tpec_c = 2\n # (pos_i, pos_j) is the prev_paral the (pos_i, pos_j+1) type_c = 0\n # (pos_i, pos_j) is the prev_above the (pos_i+1, pos_j+1) type_c =1\n \n prune_the_cell = True \n \n for (x,y,t) in associations:\n if associated_cell_is_useful(x, y, t, \n pos_i, pos_j, cell_id, \n useful_cell_positions, useful_cell_ids, betas):\n # once the cell associated it is useful, then this cell is regarded as useful\n # so do not prune this cell\n prune_the_cell = False\n break\n \n if prune_the_cell:\n if hasattr(cnf, 'cell_{}_{}'.format(pos_j, pos_i)):\n output_channel = eval('cnf.'+'cell_{}_{}'.format(pos_j, pos_i)).cell_inner_channel\n setattr(cnf, 'cell_{}_{}'.format(pos_j, pos_i), Empty_Cell(pos_i, pos_j, output_channel))\n logger.info('Cell Pruning... xxx===>000: replace the [{}-CNF]-[cell_{}_{}] by a empty cell'.format(name, pos_j, pos_i))\n\n return arch\n\nfrom architecture.operators import *\nfrom architecture.meta_cell import Cell\n\ndef Prune_the_Useless_Operations(Arch):\n for n, m in Arch.named_modules():\n # find all the 'Sub_Arch'\n #if hasattr(m, 'alphas'):\n if isinstance(m, Meta_Arch):\n # one-shot-search, \n # alphas = [h(h+1)/2, the candidate operation numbers]; \n # h: the number of hidden nodes\n alphas = F.softmax(m.alphas, dim=-1)\n \n for nn, mm in m.named_modules():\n # find all the existing useful `Cell` modules in the `Sub_Arch`\n if hasattr(mm, 'cell_arch'):\n for e, alpha_operations in enumerate(alphas):\n for o, alpha in enumerate(alpha_operations):\n if not isinstance(mm.cell_arch[e].ops[o], Zero) and alpha==0:\n mm.cell_arch[e].ops[o] = Zero()\n logger.info(\n \"Operation Pruning... &&&-->000: replace the [{}-CNF]-[{}]-[edge{}]-[{}]-th operation by a Zero operation\"\n .format(n,nn,e,o))\n \n return Arch\n\ndef Prune(Arch, prune_cells=True, prune_operations=True):\n if prune_cells:\n Arch = Prune_the_Useless_Cells(Arch)\n if prune_operations:\n Arch = Prune_the_Useless_Operations(Arch)\n return Arch\n\n \n\n \n\n\n"
] |
[
[
"torch.nn.functional.softmax",
"torch.zeros"
]
] |
yashpatel5400/dl-playground
|
[
"acf71dab5bb29b253bb28b966115d72d18b76a8e"
] |
[
"tf_tutorial.py"
] |
[
"import nltk\nfrom nltk.tokenize import word_tokenize\nfrom nltk.stem import WordNetLemmatizer\n\nimport numpy as np\nimport random\nimport pickle\nfrom collections import Counter\n\nimport tensorflow as tf\n\nlemmatizer = WordNetLemmatizer()\nnum_lines = 10000000\n\ndef create_lexicon(pos, neg):\n lexicon = []\n for fn in (pos, neg):\n with open(fn, \"r\") as f:\n contents = f.readlines()\n for line in contents:\n lexicon += list(word_tokenize(line.lower()))\n lexicon = [lemmatizer.lemmatize(word) for word in lexicon]\n word_counts = Counter(lexicon)\n LOW_THRESH = 25\n HIGH_THRESH = 1000\n filtered_words = [word for word in word_counts if\n LOW_THRESH < word_counts[word] < HIGH_THRESH]\n return filtered_words\n\ndef sample_handling(sample, lexicon, classification):\n feature_set = []\n with open(sample, \"r\") as f:\n contents = f.readlines()\n for l in contents:\n words = word_tokenize(l.lower())\n feature = np.zeros(len(lexicon))\n for word in words:\n if word in lexicon:\n ind = lexicon.index(word)\n feature[ind] += 1\n feature = list(feature)\n feature_set.append([feature, classification])\n return feature_set\n\ndef create_feature_sets_and_labels(pos,neg,test_prop=0.1):\n lexicon = create_lexicon(pos,neg)\n samples = []\n samples += sample_handling(pos, lexicon, [1,0])\n samples += sample_handling(neg, lexicon, [0,1])\n random.shuffle(samples)\n samples = np.array(samples)\n test_len = int(len(samples) * test_prop)\n trainX, trainY = list(samples[:,0][:-test_len]), list(samples[:,1][:-test_len])\n testX, testY = list(samples[:,0][-test_len:]), list(samples[:,1][-test_len:])\n return trainX, trainY, testX, testY\n\ndef dcreate_feature_sets_and_labels(pos,neg,test_size = 0.1):\n lexicon = create_lexicon(pos,neg)\n features = []\n features += sample_handling('pos.txt',lexicon,[1,0])\n features += sample_handling('neg.txt',lexicon,[0,1])\n random.shuffle(features)\n features = np.array(features)\n\n testing_size = int(test_size*len(features))\n\n train_x = list(features[:,0][:-testing_size])\n train_y = list(features[:,1][:-testing_size])\n test_x = list(features[:,0][-testing_size:])\n test_y = list(features[:,1][-testing_size:])\n\n return train_x,train_y,test_x,test_y\n\n\ndef create_model(lexicon_size, output_size):\n x = tf.placeholder(tf.float32, shape=[None, lexicon_size])\n y = tf.placeholder(tf.float32, shape=[None, output_size])\n\n h1_size = 64\n h2_size = 128\n h3_size = 256\n\n h1 = {\n \"weights\": tf.Variable(tf.random_normal(([lexicon_size, h1_size]))),\n \"biases\": tf.Variable(tf.random_normal(([h1_size])))\n }\n\n h2 = {\n \"weights\": tf.Variable(tf.random_normal(([h1_size, h2_size]))),\n \"biases\": tf.Variable(tf.random_normal(([h2_size])))\n }\n\n h3 = {\n \"weights\": tf.Variable(tf.random_normal(([h2_size, h3_size]))),\n \"biases\": tf.Variable(tf.random_normal(([h3_size])))\n }\n\n output = {\n \"weights\": tf.Variable(tf.random_normal(([h3_size, output_size]))),\n \"biases\": tf.Variable(tf.random_normal(([output_size])))\n }\n\n l1 = tf.add(tf.matmul(x, h1[\"weights\"]), h1[\"biases\"])\n u1 = tf.nn.relu(l1)\n\n l2 = tf.add(tf.matmul(u1, h2[\"weights\"]), h2[\"biases\"])\n u2 = tf.nn.relu(l2)\n\n l3 = tf.add(tf.matmul(u2, h3[\"weights\"]), h3[\"biases\"])\n u3 = tf.nn.relu(l3)\n prediction = tf.add(tf.matmul(u3, output[\"weights\"]), output[\"biases\"])\n return x, y, prediction\n\ndef run():\n trainX, trainY, testX, testY = pickle.load(open(\"sentiment_set.pickle\", \"rb\"))\n # trainX, trainY, testX, testY = create_feature_sets_and_labels(\"pos.txt\", \"neg.txt\")\n lexicon_size = len(trainX[0])\n output_size = 2\n\n x, y, prediction = create_model(lexicon_size, output_size)\n cost = tf.nn.softmax_cross_entropy_with_logits(logits=prediction, labels=y)\n avg_cost = tf.reduce_mean(cost)\n optimizer = tf.train.AdamOptimizer().minimize(avg_cost)\n\n num_epochs = 30\n batch_size = 100\n\n with tf.Session() as sess:\n sess.run(tf.initialize_all_variables())\n for epoch in range(num_epochs):\n epoch_cost = 0\n for batch_num in range(int(len(trainX)/batch_size)):\n start_batch = batch_num * batch_size\n sampleX = trainX[start_batch:start_batch+batch_size]\n sampleY = trainY[start_batch:start_batch + batch_size]\n _, c = sess.run([optimizer, avg_cost], feed_dict={\n y: np.array(sampleY),\n x: np.array(sampleX)\n })\n epoch_cost += c\n print(\"Epoch {} completed w/ cost: {}\".format(epoch, epoch_cost))\n\n accuracy = tf.equal(tf.argmax(prediction, 1), tf.argmax(y, 1))\n acc_cost = tf.reduce_mean(tf.cast(accuracy, tf.float32))\n acc_val = acc_cost.eval({\n x: testX,\n y: testY\n })\n print(\"Completed w/ accuracy: {}\".format(acc_val))\n\nif __name__ == \"__main__\":\n trainX, trainY, testX, testY = \\\n create_feature_sets_and_labels(\"pos.txt\", \"neg.txt\")\n with open(\"sentiment_set.pickle\", \"wb\") as f:\n pickle.dump([trainX, trainY, testX, testY], f)\n run()"
] |
[
[
"tensorflow.nn.relu",
"tensorflow.nn.softmax_cross_entropy_with_logits",
"tensorflow.matmul",
"tensorflow.reduce_mean",
"tensorflow.cast",
"tensorflow.placeholder",
"tensorflow.initialize_all_variables",
"tensorflow.Session",
"tensorflow.train.AdamOptimizer",
"tensorflow.argmax",
"numpy.array",
"tensorflow.random_normal"
]
] |
fratambot/api-template-2
|
[
"1bbb67c21298835e24328169cedb862dae3cf481"
] |
[
"app/models/algebra/array.py"
] |
[
"import numpy as np\n\n\ndef get_random(dim=5):\n arr = np.random.rand(dim)\n return arr\n"
] |
[
[
"numpy.random.rand"
]
] |
hdert/2018.py
|
[
"66fc5afc853af2ed5d6b2fc5f280e73be200a542",
"66fc5afc853af2ed5d6b2fc5f280e73be200a542"
] |
[
"classes/Examples/graphing6.py",
"classes/Plotty.py"
] |
[
"import numpy as np\nimport matplotlib.pyplot as plt\n\nh = [77.1, 75.5, 76.9, 77.4, 77.9, 78.9, 79.2, 80.5, 81.9, 82.3, 83.5, 84.1, 84.4, 84.6,\n 85.1, 85.3 ,85.7, 85.5, 85.2, 84.6, 83.9, 83.5, 83.0, 82.4, 81.8, 81.5, 81.3, 81.9,\n 82.1, 83.5, 83.5, 85.1, 85.8, 86.5, 86.5, 86.0, 85.9, 85.4, 85.2, 85.1, 85.0, 84.8] \n\nhealth = np.array(h)\n\ne = [74.53532758, 74.14920369, 73.55451996, 73.50181443, 73.33681143, 72.80185448,\n 72.16652471, 72.45639481, 73.19282134, 73.82114234, 74.98103152, 75.84512345,\n 75.84364914, 75.95060123, 75.86911601, 75.92343971, 76.14301516, 76.96309168,\n 77.62766177, 78.11191872, 78.86685859, 78.99124597, 78.43518191, 77.26731199,\n 75.81493264, 75.12525621, 75.03519921, 75.1637013, 75.48616027, 75.83816206,\n 76.69214284, 77.37522931, 78.64424394, 78.54494815, 78.65074774, 79.06712173,\n 78.68630551, 78.72141311, 79.19632674, 79.5329087, 79.61862451, 79.43281184]\n\neducation = np.array(e)\n\ncs = [13.6, 13.6, 14.9, 16.4, 18.9, 19.8, 23.9, 25.7, 28.1, 30.2, 32.5, 34.8, 36.3, 37.1,\n 36.8, 35.7, 34.7, 32.4, 30.8, 29.9, 29.4, 28.7, 28.2, 28.5, 28.5, 27.5, 27.1, 26.8,\n 27.0, 28.1, 27.7, 27.6, 27.0, 25.1, 22.2, 20.6, 18.6, 17.6, 17.8, 18.1, 17.6, 18.2]\n\ncomputer_science = np.array(cs)\n\ny = [1970, 1971, 1972, 1973, 1974, 1975, 1976, 1977, 1978, 1979, 1980, 1981, 1982, 1983,\n 1984, 1985, 1986, 1987, 1988, 1989, 1990, 1991, 1992, 1993, 1994, 1995, 1996, 1997,\n 1998, 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011]\n\nyear = np.array(y)\nps = [13.8, 14.9, 14.8, 16.5, 18.2, 19.1, 20.0, 21.3, 22.5, 23.7, 24.6, 25.7, 27.3, 27.6,\n 28.0, 27.5, 28.4, 30.4, 29.7, 31.3, 31.6, 32.6, 32.6, 33.6, 34.8, 35.9, 37.3, 38.3,\n 39.7, 40.2, 41.0, 42.2, 41.1, 41.7, 42.1, 41.6, 40.8, 40.7, 40.7, 40.7, 40.2, 40.1]\n\nphysical_sciences = np.array(ps)\n\n\n\n# Create a figure with 2x2 subplot layout and make the top left subplot active\nplt.subplot(2,2,1)\n\n# Plot in blue the % of degrees awarded to women in the Physical Sciences\nplt.plot(year, physical_sciences, color='blue')\nplt.title('Physical Sciences')\n\n# Make the top right subplot active in the current 2x2 subplot grid \nplt.subplot(2,2,2)\n\n# Plot in red the % of degrees awarded to women in Computer Science\nplt.plot(year, computer_science, color='red')\nplt.title('Computer Science')\n\n# Make the bottom left subplot active in the current 2x2 subplot grid\nplt.subplot(2,2,3)\n\n# Plot in green the % of degrees awarded to women in Health Professions\nplt.plot(year, health, color='green')\nplt.title('Health Professions')\n\n# Make the bottom right subplot active in the current 2x2 subplot grid\nplt.subplot(2,2,4)\n\n# Plot in yellow the % of degrees awarded to women in Education\nplt.plot(year, education, color='yellow')\nplt.title('Education')\n\n# Improve the spacing between subplots and display them\nplt.tight_layout()\nplt.show()\n\n",
"import matplotlib.pyplot as plt\r\n\r\nyear = [1950, 1970, 1990, 2010, 2020]\r\npop = [2.519, 3.692, 5.263, 6.972, 7.123]\r\n\r\nplt.scatter(year, pop)\r\n\r\nplt.show()"
] |
[
[
"matplotlib.pyplot.tight_layout",
"matplotlib.pyplot.title",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.subplot",
"numpy.array",
"matplotlib.pyplot.show"
],
[
"matplotlib.pyplot.show",
"matplotlib.pyplot.scatter"
]
] |
opconty/Transformer_STR
|
[
"f6c7521618d9640cc78135e38ed003075c686753"
] |
[
"utils/model_util.py"
] |
[
"#-*- coding: utf-8 -*-\n#'''\n# @date: 2020/5/18 下午6:06\n#\n# @author: laygin\n#\n#'''\nimport math\nimport numpy as np\nimport torch\nimport torch.nn as nn\nfrom torch.nn.init import xavier_uniform_\nfrom torch.nn.init import constant_\nfrom torch.nn.init import xavier_normal_\nimport copy\nimport torch.nn.functional as F\nfrom config import logger\n\n\nclass TPS_STN(nn.Module):\n \"\"\" Rectification Network of RARE, namely TPS based STN \"\"\"\n\n def __init__(self, F, I_size, I_r_size, device, I_channel_num=1):\n \"\"\" Based on RARE TPS\n input:\n batch_I: Batch Input Image [batch_size x I_channel_num x I_height x I_width]\n I_size : (height, width) of the input image I\n I_r_size : (height, width) of the rectified image I_r\n I_channel_num : the number of channels of the input image I\n output:\n batch_I_r: rectified image [batch_size x I_channel_num x I_r_height x I_r_width]\n \"\"\"\n super(TPS_STN, self).__init__()\n self.F = F\n self.I_size = I_size\n self.I_r_size = I_r_size # = (I_r_height, I_r_width)\n self.I_channel_num = I_channel_num\n self.LocalizationNetwork = LocalizationNetwork(self.F, self.I_channel_num)\n self.GridGenerator = GridGenerator(self.F, self.I_r_size, device)\n\n def forward(self, batch_I):\n batch_C_prime = self.LocalizationNetwork(batch_I) # batch_size x K x 2\n build_P_prime = self.GridGenerator.build_P_prime(batch_C_prime) # batch_size x n (= I_r_width x I_r_height) x 2\n build_P_prime_reshape = build_P_prime.reshape([build_P_prime.size(0), self.I_r_size[0], self.I_r_size[1], 2])\n batch_I_r = F.grid_sample(batch_I, build_P_prime_reshape, padding_mode='border')\n\n return batch_I_r\n\n\nclass LocalizationNetwork(nn.Module):\n \"\"\" Localization Network of RARE, which predicts C' (K x 2) from I (I_width x I_height) \"\"\"\n\n def __init__(self, F, I_channel_num):\n super(LocalizationNetwork, self).__init__()\n self.F = F\n self.I_channel_num = I_channel_num\n self.conv = nn.Sequential(\n nn.Conv2d(in_channels=self.I_channel_num, out_channels=64, kernel_size=3, stride=1, padding=1,\n bias=False), nn.BatchNorm2d(64), nn.ReLU(True),\n nn.MaxPool2d(2, 2), # batch_size x 64 x I_height/2 x I_width/2\n nn.Conv2d(64, 128, 3, 1, 1, bias=False), nn.BatchNorm2d(128), nn.ReLU(True),\n nn.MaxPool2d(2, 2), # batch_size x 128 x I_height/4 x I_width/4\n nn.Conv2d(128, 256, 3, 1, 1, bias=False), nn.BatchNorm2d(256), nn.ReLU(True),\n nn.MaxPool2d(2, 2), # batch_size x 256 x I_height/8 x I_width/8\n nn.Conv2d(256, 512, 3, 1, 1, bias=False), nn.BatchNorm2d(512), nn.ReLU(True),\n nn.AdaptiveAvgPool2d(1) # batch_size x 512\n )\n\n self.localization_fc1 = nn.Sequential(nn.Linear(512, 256), nn.ReLU(True))\n self.localization_fc2 = nn.Linear(256, self.F * 2)\n\n # Init fc2 in LocalizationNetwork\n self.localization_fc2.weight.data.fill_(0)\n \"\"\" see RARE paper Fig. 6 (a) \"\"\"\n ctrl_pts_x = np.linspace(-1.0, 1.0, int(F / 2))\n ctrl_pts_y_top = np.linspace(0.0, -1.0, num=int(F / 2))\n ctrl_pts_y_bottom = np.linspace(1.0, 0.0, num=int(F / 2))\n ctrl_pts_top = np.stack([ctrl_pts_x, ctrl_pts_y_top], axis=1)\n ctrl_pts_bottom = np.stack([ctrl_pts_x, ctrl_pts_y_bottom], axis=1)\n initial_bias = np.concatenate([ctrl_pts_top, ctrl_pts_bottom], axis=0)\n self.localization_fc2.bias.data = torch.from_numpy(initial_bias).float().view(-1)\n\n def forward(self, batch_I):\n \"\"\"\n input: batch_I : Batch Input Image [batch_size x I_channel_num x I_height x I_width]\n output: batch_C_prime : Predicted coordinates of fiducial points for input batch [batch_size x F x 2]\n \"\"\"\n batch_size = batch_I.size(0)\n features = self.conv(batch_I).view(batch_size, -1)\n batch_C_prime = self.localization_fc2(self.localization_fc1(features)).view(batch_size, self.F, 2)\n return batch_C_prime\n\n\nclass GridGenerator(nn.Module):\n \"\"\" Grid Generator of RARE, which produces P_prime by multipling T with P \"\"\"\n\n def __init__(self, F, I_r_size, device):\n \"\"\" Generate P_hat and inv_delta_C for later \"\"\"\n super(GridGenerator, self).__init__()\n self.device = device\n self.eps = 1e-6\n self.I_r_height, self.I_r_width = I_r_size\n self.F = F\n self.C = self._build_C(self.F) # F x 2\n self.P = self._build_P(self.I_r_width, self.I_r_height)\n ## for multi-gpu, you need register buffer\n self.register_buffer(\"inv_delta_C\", torch.tensor(self._build_inv_delta_C(self.F, self.C)).float()) # F+3 x F+3\n self.register_buffer(\"P_hat\", torch.tensor(self._build_P_hat(self.F, self.C, self.P)).float()) # n x F+3\n ## for fine-tuning with different image width, you may use below instead of self.register_buffer\n #self.inv_delta_C = torch.tensor(self._build_inv_delta_C(self.F, self.C)).float().cuda() # F+3 x F+3\n #self.P_hat = torch.tensor(self._build_P_hat(self.F, self.C, self.P)).float().cuda() # n x F+3\n\n def _build_C(self, F):\n \"\"\" Return coordinates of fiducial points in I_r; C \"\"\"\n ctrl_pts_x = np.linspace(-1.0, 1.0, int(F / 2))\n ctrl_pts_y_top = -1 * np.ones(int(F / 2))\n ctrl_pts_y_bottom = np.ones(int(F / 2))\n ctrl_pts_top = np.stack([ctrl_pts_x, ctrl_pts_y_top], axis=1)\n ctrl_pts_bottom = np.stack([ctrl_pts_x, ctrl_pts_y_bottom], axis=1)\n C = np.concatenate([ctrl_pts_top, ctrl_pts_bottom], axis=0)\n return C # F x 2\n\n def _build_inv_delta_C(self, F, C):\n \"\"\" Return inv_delta_C which is needed to calculate T \"\"\"\n hat_C = np.zeros((F, F), dtype=float) # F x F\n for i in range(0, F):\n for j in range(i, F):\n r = np.linalg.norm(C[i] - C[j])\n hat_C[i, j] = r\n hat_C[j, i] = r\n np.fill_diagonal(hat_C, 1)\n hat_C = (hat_C ** 2) * np.log(hat_C)\n # print(C.shape, hat_C.shape)\n delta_C = np.concatenate( # F+3 x F+3\n [\n np.concatenate([np.ones((F, 1)), C, hat_C], axis=1), # F x F+3\n np.concatenate([np.zeros((2, 3)), np.transpose(C)], axis=1), # 2 x F+3\n np.concatenate([np.zeros((1, 3)), np.ones((1, F))], axis=1) # 1 x F+3\n ],\n axis=0\n )\n inv_delta_C = np.linalg.inv(delta_C)\n return inv_delta_C # F+3 x F+3\n\n def _build_P(self, I_r_width, I_r_height):\n I_r_grid_x = (np.arange(-I_r_width, I_r_width, 2) + 1.0) / I_r_width # self.I_r_width\n I_r_grid_y = (np.arange(-I_r_height, I_r_height, 2) + 1.0) / I_r_height # self.I_r_height\n P = np.stack( # self.I_r_width x self.I_r_height x 2\n np.meshgrid(I_r_grid_x, I_r_grid_y),\n axis=2\n )\n return P.reshape([-1, 2]) # n (= self.I_r_width x self.I_r_height) x 2\n\n def _build_P_hat(self, F, C, P):\n n = P.shape[0] # n (= self.I_r_width x self.I_r_height)\n P_tile = np.tile(np.expand_dims(P, axis=1), (1, F, 1)) # n x 2 -> n x 1 x 2 -> n x F x 2\n C_tile = np.expand_dims(C, axis=0) # 1 x F x 2\n P_diff = P_tile - C_tile # n x F x 2\n rbf_norm = np.linalg.norm(P_diff, ord=2, axis=2, keepdims=False) # n x F\n rbf = np.multiply(np.square(rbf_norm), np.log(rbf_norm + self.eps)) # n x F\n P_hat = np.concatenate([np.ones((n, 1)), P, rbf], axis=1)\n return P_hat # n x F+3\n\n def build_P_prime(self, batch_C_prime):\n \"\"\" Generate Grid from batch_C_prime [batch_size x F x 2] \"\"\"\n batch_size = batch_C_prime.size(0)\n batch_inv_delta_C = self.inv_delta_C.repeat(batch_size, 1, 1)\n batch_P_hat = self.P_hat.repeat(batch_size, 1, 1)\n batch_C_prime_with_zeros = torch.cat((batch_C_prime, torch.zeros(\n batch_size, 3, 2).float().to(self.device)), dim=1) # batch_size x F+3 x 2\n batch_T = torch.bmm(batch_inv_delta_C, batch_C_prime_with_zeros) # batch_size x F+3 x 2\n batch_P_prime = torch.bmm(batch_P_hat, batch_T) # batch_size x n x 2\n return batch_P_prime # batch_size x n x 2\n\n\nclass BasicBlockRes(nn.Module):\n expansion = 1\n\n def __init__(self, inplanes, planes, stride=1, downsample=None):\n super(BasicBlockRes, self).__init__()\n self.conv1 = self._conv3x3(inplanes, planes)\n self.bn1 = nn.BatchNorm2d(planes)\n self.conv2 = self._conv3x3(planes, planes)\n self.bn2 = nn.BatchNorm2d(planes)\n self.relu = nn.ReLU(inplace=True)\n self.downsample = downsample\n self.stride = stride\n\n def _conv3x3(self, in_planes, out_planes, stride=1):\n \"3x3 convolution with padding\"\n return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,\n padding=1, bias=False)\n\n def forward(self, x):\n residual = x\n\n out = self.conv1(x)\n out = self.bn1(out)\n out = self.relu(out)\n\n out = self.conv2(out)\n out = self.bn2(out)\n\n if self.downsample is not None:\n residual = self.downsample(x)\n out += residual\n out = self.relu(out)\n\n return out\n\n\nclass ResNet(nn.Module):\n\n def __init__(self, input_channel, output_channel, block, layers):\n super(ResNet, self).__init__()\n\n self.output_channel_block = [int(output_channel / 4), int(output_channel / 2), output_channel, output_channel]\n\n self.inplanes = int(output_channel / 8)\n self.conv0_1 = nn.Conv2d(input_channel, int(output_channel / 16),\n kernel_size=3, stride=1, padding=1, bias=False)\n self.bn0_1 = nn.BatchNorm2d(int(output_channel / 16))\n self.conv0_2 = nn.Conv2d(int(output_channel / 16), self.inplanes,\n kernel_size=3, stride=1, padding=1, bias=False)\n self.bn0_2 = nn.BatchNorm2d(self.inplanes)\n self.relu = nn.ReLU(inplace=True)\n\n self.maxpool1 = nn.MaxPool2d(kernel_size=2, stride=2, padding=0)\n self.layer1 = self._make_layer(block, self.output_channel_block[0], layers[0])\n self.conv1 = nn.Conv2d(self.output_channel_block[0], self.output_channel_block[\n 0], kernel_size=3, stride=1, padding=1, bias=False)\n self.bn1 = nn.BatchNorm2d(self.output_channel_block[0])\n\n self.maxpool2 = nn.MaxPool2d(kernel_size=2, stride=2, padding=0)\n self.layer2 = self._make_layer(block, self.output_channel_block[1], layers[1], stride=1)\n self.conv2 = nn.Conv2d(self.output_channel_block[1], self.output_channel_block[\n 1], kernel_size=3, stride=1, padding=1, bias=False)\n self.bn2 = nn.BatchNorm2d(self.output_channel_block[1])\n\n self.maxpool3 = nn.MaxPool2d(kernel_size=2, stride=(2, 1), padding=(0, 1))\n self.layer3 = self._make_layer(block, self.output_channel_block[2], layers[2], stride=1)\n self.conv3 = nn.Conv2d(self.output_channel_block[2], self.output_channel_block[\n 2], kernel_size=3, stride=1, padding=1, bias=False)\n self.bn3 = nn.BatchNorm2d(self.output_channel_block[2])\n\n self.layer4 = self._make_layer(block, self.output_channel_block[3], layers[3], stride=1)\n self.conv4_1 = nn.Conv2d(self.output_channel_block[3], self.output_channel_block[\n 3], kernel_size=2, stride=(2, 1), padding=(0, 1), bias=False)\n self.bn4_1 = nn.BatchNorm2d(self.output_channel_block[3])\n self.conv4_2 = nn.Conv2d(self.output_channel_block[3], self.output_channel_block[\n 3], kernel_size=2, stride=1, padding=0, bias=False)\n self.bn4_2 = nn.BatchNorm2d(self.output_channel_block[3])\n\n def _make_layer(self, block, planes, blocks, stride=1):\n downsample = None\n if stride != 1 or self.inplanes != planes * block.expansion:\n downsample = nn.Sequential(\n nn.Conv2d(self.inplanes, planes * block.expansion,\n kernel_size=1, stride=stride, bias=False),\n nn.BatchNorm2d(planes * block.expansion),\n )\n\n layers = []\n layers.append(block(self.inplanes, planes, stride, downsample))\n self.inplanes = planes * block.expansion\n for i in range(1, blocks):\n layers.append(block(self.inplanes, planes))\n\n return nn.Sequential(*layers)\n\n def forward(self, x):\n x = self.conv0_1(x)\n x = self.bn0_1(x)\n x = self.relu(x)\n x = self.conv0_2(x)\n x = self.bn0_2(x)\n x = self.relu(x)\n\n x = self.maxpool1(x)\n x = self.layer1(x)\n x = self.conv1(x)\n x = self.bn1(x)\n x = self.relu(x)\n\n x = self.maxpool2(x)\n x = self.layer2(x)\n x = self.conv2(x)\n x = self.bn2(x)\n x = self.relu(x)\n\n x = self.maxpool3(x)\n x = self.layer3(x)\n x = self.conv3(x)\n x = self.bn3(x)\n x = self.relu(x)\n\n x = self.layer4(x)\n x = self.conv4_1(x)\n x = self.bn4_1(x)\n x = self.relu(x)\n x = self.conv4_2(x)\n x = self.bn4_2(x)\n x = self.relu(x)\n\n return x\n\n\nclass ResNet50(nn.Module):\n\n def __init__(self, input_channel, output_channel=512):\n super(ResNet50, self).__init__()\n self.ConvNet = ResNet(input_channel, output_channel, BasicBlockRes, [1, 2, 5, 3])\n\n def forward(self, input):\n return self.ConvNet(input)\n\n\nclass BiLSTM(nn.Module):\n\n def __init__(self, input_size, hidden_size, output_size):\n super(BiLSTM, self).__init__()\n self.rnn = nn.LSTM(input_size, hidden_size, bidirectional=True, batch_first=True)\n self.linear = nn.Linear(hidden_size * 2, output_size)\n\n def forward(self, input):\n \"\"\"\n input : visual feature [batch_size x T x input_size]\n output : contextual feature [batch_size x T x output_size]\n \"\"\"\n self.rnn.flatten_parameters()\n recurrent, _ = self.rnn(input) # batch_size x T x input_size -> batch_size x T x (2*hidden_size)\n output = self.linear(recurrent) # batch_size x T x output_size\n return output\n\n\ndef _get_clones(module, N):\n return nn.ModuleList([copy.deepcopy(module) for i in range(N)])\n\n\nclass TransformerEncoder(nn.Module):\n __constants__ = ['norm']\n\n def __init__(self, encoder_layer, num_layers, norm=None):\n super(TransformerEncoder, self).__init__()\n self.layers = _get_clones(encoder_layer, num_layers)\n self.num_layers = num_layers\n self.norm = norm\n\n def forward(self, src, mask=None, src_key_padding_mask=None):\n # type: (Tensor, Optional[Tensor], Optional[Tensor]) -> Tensor\n r\"\"\"Pass the input through the encoder layers in turn.\n Args:\n src: the sequence to the encoder (required).\n mask: the mask for the src sequence (optional).\n src_key_padding_mask: the mask for the src keys per batch (optional).\n Shape:\n see the docs in Transformer class.\n \"\"\"\n output = src\n\n for mod in self.layers:\n output = mod(output, src_mask=mask, src_key_padding_mask=src_key_padding_mask)\n\n if self.norm is not None:\n output = self.norm(output)\n\n return output\n\n\ndef _get_activation_fn(activation):\n if activation == \"relu\":\n return F.relu\n elif activation == \"gelu\":\n return F.gelu\n\n raise RuntimeError(\"activation should be relu/gelu, not {}\".format(activation))\n\n\nclass MultiheadAttention(nn.Module):\n __annotations__ = {\n 'bias_k': torch._jit_internal.Optional[torch.Tensor],\n 'bias_v': torch._jit_internal.Optional[torch.Tensor],\n }\n __constants__ = ['q_proj_weight', 'k_proj_weight', 'v_proj_weight', 'in_proj_weight']\n\n def __init__(self, embed_dim, num_heads, dropout=0., bias=True, add_bias_kv=False, add_zero_attn=False, kdim=None, vdim=None):\n super(MultiheadAttention, self).__init__()\n self.embed_dim = embed_dim\n self.kdim = kdim if kdim is not None else embed_dim\n self.vdim = vdim if vdim is not None else embed_dim\n self._qkv_same_embed_dim = self.kdim == embed_dim and self.vdim == embed_dim\n\n self.num_heads = num_heads\n self.dropout = dropout\n self.head_dim = embed_dim // num_heads\n assert self.head_dim * num_heads == self.embed_dim, \"embed_dim must be divisible by num_heads\"\n\n if self._qkv_same_embed_dim is False:\n self.q_proj_weight = nn.Parameter(torch.Tensor(embed_dim, embed_dim))\n self.k_proj_weight = nn.Parameter(torch.Tensor(embed_dim, self.kdim))\n self.v_proj_weight = nn.Parameter(torch.Tensor(embed_dim, self.vdim))\n self.register_parameter('in_proj_weight', None)\n else:\n self.in_proj_weight = nn.Parameter(torch.empty(3 * embed_dim, embed_dim))\n self.register_parameter('q_proj_weight', None)\n self.register_parameter('k_proj_weight', None)\n self.register_parameter('v_proj_weight', None)\n\n if bias:\n self.in_proj_bias = nn.Parameter(torch.empty(3 * embed_dim))\n else:\n self.register_parameter('in_proj_bias', None)\n self.out_proj = nn.Linear(embed_dim, embed_dim, bias=bias)\n\n if add_bias_kv:\n self.bias_k = nn.Parameter(torch.empty(1, 1, embed_dim))\n self.bias_v = nn.Parameter(torch.empty(1, 1, embed_dim))\n else:\n self.bias_k = self.bias_v = None\n\n self.add_zero_attn = add_zero_attn\n\n self._reset_parameters()\n\n def _reset_parameters(self):\n if self._qkv_same_embed_dim:\n xavier_uniform_(self.in_proj_weight)\n else:\n xavier_uniform_(self.q_proj_weight)\n xavier_uniform_(self.k_proj_weight)\n xavier_uniform_(self.v_proj_weight)\n\n if self.in_proj_bias is not None:\n constant_(self.in_proj_bias, 0.)\n constant_(self.out_proj.bias, 0.)\n if self.bias_k is not None:\n xavier_normal_(self.bias_k)\n if self.bias_v is not None:\n xavier_normal_(self.bias_v)\n\n def __setstate__(self, state):\n # Support loading old MultiheadAttention checkpoints generated by v1.1.0\n if '_qkv_same_embed_dim' not in state:\n state['_qkv_same_embed_dim'] = True\n\n super(MultiheadAttention, self).__setstate__(state)\n\n def forward(self, query, key, value, key_padding_mask=None,\n need_weights=True, attn_mask=None):\n if not self._qkv_same_embed_dim:\n return F.multi_head_attention_forward(\n query, key, value, self.embed_dim, self.num_heads,\n self.in_proj_weight, self.in_proj_bias,\n self.bias_k, self.bias_v, self.add_zero_attn,\n self.dropout, self.out_proj.weight, self.out_proj.bias,\n training=self.training,\n key_padding_mask=key_padding_mask, need_weights=need_weights,\n attn_mask=attn_mask, use_separate_proj_weight=True,\n q_proj_weight=self.q_proj_weight, k_proj_weight=self.k_proj_weight,\n v_proj_weight=self.v_proj_weight)\n else:\n return F.multi_head_attention_forward(\n query, key, value, self.embed_dim, self.num_heads,\n self.in_proj_weight, self.in_proj_bias,\n self.bias_k, self.bias_v, self.add_zero_attn,\n self.dropout, self.out_proj.weight, self.out_proj.bias,\n training=self.training,\n key_padding_mask=key_padding_mask, need_weights=need_weights,\n attn_mask=attn_mask)\n\n\nclass TransformerEncoderLayer(nn.Module):\n def __init__(self, d_model, nhead, dim_feedforward=2048, dropout=0.1, activation=\"relu\"):\n super(TransformerEncoderLayer, self).__init__()\n self.self_attn = MultiheadAttention(d_model, nhead, dropout=dropout)\n # Implementation of Feedforward model\n self.linear1 = nn.Linear(d_model, dim_feedforward)\n self.dropout = nn.Dropout(dropout)\n self.linear2 = nn.Linear(dim_feedforward, d_model)\n\n self.norm1 = nn.LayerNorm(d_model)\n self.norm2 = nn.LayerNorm(d_model)\n self.dropout1 = nn.Dropout(dropout)\n self.dropout2 = nn.Dropout(dropout)\n\n self.activation = _get_activation_fn(activation)\n\n def __setstate__(self, state):\n if 'activation' not in state:\n state['activation'] = F.relu\n super(TransformerEncoderLayer, self).__setstate__(state)\n\n def forward(self, src, src_mask=None, src_key_padding_mask=None):\n src2 = self.self_attn(src, src, src, attn_mask=src_mask,\n key_padding_mask=src_key_padding_mask)[0]\n src = src + self.dropout1(src2)\n src = self.norm1(src)\n src2 = self.linear2(self.dropout(self.activation(self.linear1(src))))\n src = src + self.dropout2(src2)\n src = self.norm2(src)\n return src\n\n\nclass PositionalEncoding(nn.Module):\n\n def __init__(self, d_model, dropout=0.1, max_len=5000):\n super(PositionalEncoding, self).__init__()\n self.dropout = nn.Dropout(p=dropout)\n\n pe = torch.zeros(max_len, d_model)\n position = torch.arange(0, max_len, dtype=torch.float).unsqueeze(1)\n div_term = torch.exp(torch.arange(0, d_model, 2).float() * (-math.log(10000.0) / d_model))\n pe[:, 0::2] = torch.sin(position * div_term)\n pe[:, 1::2] = torch.cos(position * div_term)\n pe = pe.unsqueeze(0).transpose(0, 1)\n self.register_buffer('pe', pe)\n\n def forward(self, x):\n x = x + self.pe[:x.size(0), :]\n return self.dropout(x)\n\n\nclass Transformer(nn.Module):\n\n def __init__(self, ntoken, ninp, nhid=256, nhead=2, nlayers=2, dropout=0.2):\n super(Transformer, self).__init__()\n self.src_mask = None\n self.pos_encoder = PositionalEncoding(ninp, dropout)\n encoder_layers = TransformerEncoderLayer(ninp, nhead, nhid, dropout)\n self.transformer_encoder = TransformerEncoder(encoder_layers, nlayers)\n self.ninp = ninp\n self.decoder = nn.Linear(ninp, ntoken)\n\n self.init_weights()\n\n def _generate_square_subsequent_mask(self, sz):\n mask = (torch.triu(torch.ones(sz, sz)) == 1).transpose(0, 1)\n mask = mask.float().masked_fill(mask == 0, float('-inf')).masked_fill(mask == 1, float(0.0))\n return mask\n\n def init_weights(self):\n initrange = 0.1\n self.decoder.bias.data.zero_()\n self.decoder.weight.data.uniform_(-initrange, initrange)\n\n def forward(self, src):\n if self.src_mask is None or self.src_mask.size(0) != len(src):\n mask = self._generate_square_subsequent_mask(len(src)).to(src.device)\n self.src_mask = mask\n\n src = src * math.sqrt(self.ninp)\n src = self.pos_encoder(src)\n output = self.transformer_encoder(src, self.src_mask)\n output = self.decoder(output)\n return output\n"
] |
[
[
"numpy.expand_dims",
"torch.zeros",
"torch.sin",
"numpy.concatenate",
"numpy.fill_diagonal",
"numpy.square",
"torch.nn.Dropout",
"torch.ones",
"numpy.arange",
"torch.from_numpy",
"numpy.stack",
"torch.bmm",
"torch.arange",
"numpy.zeros",
"torch.cos",
"torch.nn.Sequential",
"numpy.log",
"torch.empty",
"numpy.linalg.inv",
"torch.nn.init.constant_",
"torch.nn.Conv2d",
"torch.nn.init.xavier_normal_",
"torch.nn.functional.multi_head_attention_forward",
"torch.nn.Linear",
"torch.nn.BatchNorm2d",
"numpy.transpose",
"numpy.meshgrid",
"torch.Tensor",
"torch.nn.LSTM",
"numpy.linalg.norm",
"torch.nn.LayerNorm",
"numpy.ones",
"torch.nn.MaxPool2d",
"torch.nn.functional.grid_sample",
"torch.nn.AdaptiveAvgPool2d",
"torch.nn.init.xavier_uniform_",
"torch.nn.ReLU"
]
] |
tmarkn/covid-twitter
|
[
"741661d1440663f4fce5c436f314e0b0c90cc027"
] |
[
"graphs.py"
] |
[
"import json\nimport datetime\nimport numpy as np\nimport dateutil.parser\nimport operator\nimport pytz\nimport matplotlib.pyplot as plt\n\ndef deEmojify(inputString):\n return inputString.encode('ascii', 'ignore').decode('ascii')\n\n# turn save to True to save the graphs as .png images\nsave = False\nfilePath='data/twitter.json'\nlocalTweetList = []\nglobalTweetCounter = 0\nfrequencyMap = {}\npeople = {}\ntimeFormat = \"%a %b %d %H:%M:%S +0000 %Y\"\nwith open(filePath, 'r') as f:\n tweets = json.loads(f.readline())\n for tweet in tweets:\n # Try to extract the time of the tweet\n currentTime = dateutil.parser.parse(tweet['created_at'])\n currentTime = currentTime.replace(hour=0, minute=0, second=0)\n\n # print(currentTime)\n # Increment tweet count\n globalTweetCounter += 1\n \n # If our frequency map already has this time, use it, otherwise add\n if currentTime in frequencyMap.keys():\n timeMap = frequencyMap[currentTime]\n timeMap[\"count\"] += 1\n timeMap[\"list\"].append(tweet)\n else:\n frequencyMap[currentTime] = {\"count\":1, \"list\":[tweet]}\n\n # If our user is already added, use, otherwise add\n if tweet['user']['screen_name'] in people:\n people[tweet['user']['screen_name']].append(tweet)\n else:\n people[tweet['user']['screen_name']] = [tweet]\n\n# sort words before first reported case of Covid-19 2020-01-21\n# adjust times to utc\nutc = pytz.UTC\ntargetTime = utc.localize(dateutil.parser.parse('2020-01-21'))\nwordsBefore = {}\nwordsAfter = {}\nfor person in people:\n print(person, len(people[person]))\n # count number of words used\n wordsBefore[person] = {}\n wordsAfter[person] = {}\n for tweet in people[person]:\n # split text and clean words\n tempWords = tweet['text'].split()\n tempWords = [deEmojify(i).strip().lower() for i in tempWords]\n # check if before or after first case of covid and add to dictionary\n timeCreated = dateutil.parser.parse(tweet['created_at'])\n timeCreated = timeCreated.replace(hour=0, minute=0, second=0)\n\n # before first case\n if timeCreated < targetTime:\n for word in tempWords:\n if not word:\n continue\n if word in wordsBefore[person]:\n wordsBefore[person][word] += 1\n else:\n wordsBefore[person][word] = 1\n # after first case \n else:\n for word in tempWords:\n if not word:\n continue\n if word in wordsAfter[person]:\n wordsAfter[person][word] += 1\n else:\n wordsAfter[person][word] = 1\n\n # remove noisey words\n entriesToRemove = ('the', 'and', 'to', 'of', 'a', 'an', 'are', 'in', 'is', 'on')\n \n adjustedBefore = wordsBefore\n adjustedAfter = wordsAfter\n\n for k in entriesToRemove:\n adjustedBefore[person].pop(k, None)\n adjustedAfter[person].pop(k, None)\n\n # sort words\n adjustedBefore[person] = sorted(adjustedBefore[person].items(), key=lambda item: item[1], reverse=True)\n adjustedAfter[person] = sorted(adjustedAfter[person].items(), key=lambda item: item[1], reverse=True)\n\n print('\\t', adjustedBefore[person][:10])\n print('\\t', adjustedAfter[person][:10])\n\n # plot graphs\n fig, ax = plt.subplots()\n fig.set_size_inches(18,6)\n fig.tight_layout()\n fig.subplots_adjust(bottom=0.15)\n plt.subplot(1, 2, 1)\n \n plt.title(\"Word Frequency: \" + person + \" (Before First Case of Covid: 2020-01-21)\")\n plt.xticks(rotation=45, ha=\"right\") \n plt.bar([x[0] for x in adjustedBefore[person]][:30], [x[1] for x in adjustedBefore[person]][:30], color='blue', label='Word Frequency')\n \n\n plt.subplot(1, 2, 2)\n plt.title(\"Word Frequency: \" + person + \" (After First Case of Covid: 2020-01-21)\")\n plt.xticks(rotation=45, ha=\"right\") \n plt.bar([x[0] for x in adjustedAfter[person]][:30], [x[1] for x in adjustedAfter[person]][:30], color='red', label='Word Frequency')\n ax.legend()\n if save == True:\n plt.savefig(\"graphs/\" + person + \".png\", bbox_inches='tight')\n plt.show()\n\n# Fill in any gaps\ntimes = sorted(frequencyMap.keys())\nfirstTime = times[0]\nlastTime = times[-1]\nthisTime = firstTime\n\ntimeIntervalStep = datetime.timedelta(hours=24)\nwhile ( thisTime <= lastTime ):\n if ( thisTime not in frequencyMap.keys() ):\n frequencyMap[thisTime] = {\"count\":0, \"list\":[]}\n \n thisTime = thisTime + timeIntervalStep\n\nprint (\"Processed Tweet Count:\", globalTweetCounter)\n\nfig, ax = plt.subplots()\nfig.set_size_inches(10,4)\nfig.tight_layout()\nfig.subplots_adjust(bottom=0.3)\n\nplt.title(\"Tweet Frequency (First case of COVID-19 at 2020-01-21)\")\n\n# Sort the times into an array for future use\nsortedTimes = sorted(frequencyMap.keys())\n\n# What time span do these tweets cover?\nprint (\"Time Frame:\", sortedTimes[0], sortedTimes[-1])\n\n# Get a count of tweets per minute\npostFreqList = [frequencyMap[x][\"count\"] for x in sortedTimes]\n\n# We'll have ticks every 10 days\nsmallerXTicks = range(0, len(sortedTimes), 10)\nplt.xticks(smallerXTicks, [sortedTimes[x].strftime('%Y:%m:%d') for x in smallerXTicks], rotation=45, ha=\"right\")\n\n# Plot the post frequency\nax.plot(range(len(frequencyMap)), [x if x > 0 else 0 for x in postFreqList], color=\"blue\", label=\"Posts\")\nax.grid(b=True, which=u'major')\nax.legend()\nif save == True:\n plt.savefig(\"graphs/tweetFreq.png\", bbox_inches='tight')\nplt.show()\n\n# covid data\ncovid = np.loadtxt(\"data/us-counties.csv\", delimiter=\",\", dtype='str')\ncovid = np.delete(covid, (0), axis=0)\ncases = {}\nfor time in sortedTimes:\n cases[time] = 0\n\ncount = 0\nfor reported in covid:\n # print(reported)\n time = utc.localize(dateutil.parser.parse(reported[0]))\n count += int(reported[4])\n # print(reported[4])\n cases[time] = count\n # print(cases[time])\n\n# print(len(cases))\n# print(len(frequencyMap))\n# print(count)\n# for k in cases:\n# print(k, cases[k])\n\npostCaseList = np.array([cases[x] for x in cases])\n# print(postCaseList)\n\nfig2, ax2 = plt.subplots()\nfig2.set_size_inches(10,4)\nfig2.tight_layout()\nfig2.subplots_adjust(left=0.1, bottom=0.3)\nplt.title(\"Reported Cases of COVID-19 (First case at 2020-01-21)\")\nplt.xticks(smallerXTicks, [sortedTimes[x].strftime('%Y:%m:%d') for x in smallerXTicks], rotation=45, ha=\"right\")\nax2.plot(range(len(sorted([k for k in cases]))), postCaseList, color=\"red\", label='Reported Cases')\nax2.grid(b=True, which=u'major')\nax2.legend()\nif save == True:\n plt.savefig(\"graphs/reportedCases.png\", bbox_inches='tight')\nplt.show()\n\n# A map for hashtag counts\nhashtagCounter = {}\n\n# For each minute, pull the list of hashtags and add to the counter\nfor t in sortedTimes:\n timeObj = frequencyMap[t]\n \n for tweet in timeObj[\"list\"]:\n hashtagList = tweet[\"entities\"][\"hashtags\"]\n \n for hashtagObj in hashtagList:\n \n # We lowercase the hashtag to avoid duplicates (e.g., #MikeBrown vs. #mikebrown)\n hashtagString = hashtagObj[\"text\"].lower()\n \n if ( hashtagString not in hashtagCounter ):\n hashtagCounter[hashtagString] = 1\n else:\n hashtagCounter[hashtagString] += 1\n\nprint (\"Unique Hashtags:\", len(hashtagCounter.keys()))\nsortedHashtags = sorted(hashtagCounter, key=hashtagCounter.get, reverse=True)\n# print (\"Top Twenty Hashtags:\")\n# for ht in sortedHashtags[:20]:\n# print (\"\\t\", \"#\" + ht, hashtagCounter[ht])\n\nfig, ax = plt.subplots()\nfig.set_size_inches(10,4)\nfig.subplots_adjust(bottom=0.3)\nplt.title(\"Hashtag Count\")\nplt.xticks(range(20), [ht for ht in sortedHashtags][:20], rotation=45, ha=\"right\")\nax.bar(range(20), [hashtagCounter[ht] for ht in sortedHashtags][:20], color=\"red\", label='Hashtags')\nax.grid(b=True, which=u'major')\nax.legend()\nif save == True:\n plt.savefig(\"graphs/hashtags.png\", bbox_inches='tight')\nplt.show()"
] |
[
[
"matplotlib.pyplot.title",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.xticks",
"numpy.delete",
"matplotlib.pyplot.subplot",
"matplotlib.pyplot.bar",
"numpy.array",
"matplotlib.pyplot.show",
"numpy.loadtxt"
]
] |
physimals/quantiphyse
|
[
"34f40424941414ce139c4612a903de3f24883576"
] |
[
"quantiphyse/packages/core/smoothing/process.py"
] |
[
"\"\"\"\nQuantiphyse - Analysis processes for data smoothing\n\nCopyright (c) 2013-2020 University of Oxford\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n\"\"\"\n\nimport numpy as np\nimport scipy.ndimage.filters\n\nfrom quantiphyse.processes import Process\nfrom quantiphyse.data import NumpyData\n\nclass SmoothingProcess(Process):\n \"\"\"\n Simple process for Gaussian smoothing\n \"\"\"\n PROCESS_NAME = \"Smooth\"\n\n def __init__(self, ivm, **kwargs):\n Process.__init__(self, ivm, **kwargs)\n\n def run(self, options):\n data = self.get_data(options)\n\n output_name = options.pop(\"output-name\", \"%s_smoothed\" % data.name)\n #kernel = options.pop(\"kernel\", \"gaussian\")\n order = options.pop(\"order\", 0)\n mode = options.pop(\"boundary-mode\", \"reflect\")\n sigma = options.pop(\"sigma\", 1.0)\n\n # Sigma is in mm so scale with data voxel sizes\n if isinstance(sigma, (int, float)):\n sigmas = [float(sigma) / size for size in data.grid.spacing]\n else:\n sigmas = [float(sig) / size for sig, size in zip(sigma, data.grid.spacing)]\n\n # Smooth multiple volumes independently\n if data.nvols > 1:\n sigmas += [0, ]\n\n output = self._norm_conv(data.raw(), sigmas, order=order, mode=mode)\n self.ivm.add(NumpyData(output, grid=data.grid, name=output_name), make_current=True)\n\n def _norm_conv(self, data, sigma, **kwargs):\n \"\"\"\n Normalized convolution\n\n This is a way to compensate for data having nan/infinite values.\n Taken from stackoverflow.com/questions/18697532/gaussian-filtering-a-image-with-nan-in-python\n \"\"\"\n v = data.copy()\n v[~np.isfinite(data)] = 0\n vv = scipy.ndimage.filters.gaussian_filter(v, sigma, **kwargs)\n\n w = 0*data.copy()+1\n w[~np.isfinite(data)] = 0\n ww = scipy.ndimage.filters.gaussian_filter(w, sigma, **kwargs)\n\n return vv/ww\n"
] |
[
[
"numpy.isfinite"
]
] |
Vergenter/familyWealth
|
[
"eefbc8d3e7cbe6e59add97dffb35ac3a19f17ea9"
] |
[
"web_scraper/every_item_by_country_in_usd/web_scraper.py"
] |
[
"# source: https://www.thepythoncode.com/article/convert-html-tables-into-csv-files-in-python\n\nimport requests\nimport pandas as pd\nfrom bs4 import BeautifulSoup as bs\n\nUSER_AGENT = \"Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/44.0.2403.157 Safari/537.36\"\n# US english\nLANGUAGE = \"en-US,en;q=0.5\"\n\ndef get_soup(url):\n \"\"\"Constructs and returns a soup using the HTML content of `url` passed\"\"\"\n # initialize a session\n session = requests.Session()\n # set the User-Agent as a regular browser\n session.headers['User-Agent'] = USER_AGENT\n # request for english content (optional)\n session.headers['Accept-Language'] = LANGUAGE\n session.headers['Content-Language'] = LANGUAGE\n # make the request\n html = session.get(url)\n # return the soup\n return bs(html.content, \"html.parser\")\n\ndef get_all_tables(soup):\n \"\"\"Extracts and returns all tables in a soup object\"\"\"\n return soup.find_all(\"table\")\n\ndef get_table_headers(table):\n \"\"\"Given a table soup, returns all the headers\"\"\"\n headers = []\n for th in table.find(\"tr\").find_all(\"th\"):\n headers.append(th.text.strip())\n return headers\n\ndef get_table_rows(table):\n \"\"\"Given a table, returns all its rows\"\"\"\n rows = []\n for tr in table.find_all(\"tr\")[1:]:\n cells = []\n # grab all td tags in this table row\n tds = tr.find_all(\"td\")\n if len(tds) == 0:\n # if no td tags, search for th tags\n # can be found especially in wikipedia tables below the table\n ths = tr.find_all(\"th\")\n for th in ths:\n cells.append(th.text.strip())\n else:\n # use regular td tags\n for td in tds:\n cells.append(td.text.strip())\n rows.append(cells)\n return rows\n\ndef save_as_csv(table_name, headers, rows):\n pd.DataFrame(rows, columns=headers).to_csv(f\"{table_name}.csv\")\n\ndef main(url, url_args, item_id):\n # itemId = 24 -> gasonline\n url = url + str(url_args) + \"&itemId=\" + str(item_id)\n # get the soup\n soup = get_soup(url)\n # extract all the tables from the web page\n tables = get_all_tables(soup)\n print(f\"[+] Found a total of {len(tables)} tables.\")\n # iterate over all tables\n for i, table in enumerate(tables, start=1):\n # get the table headers\n headers = get_table_headers(table)\n # get all the rows of the table\n rows = get_table_rows(table)\n # save table as csv file\n table_name = f\"table-{i}\"\n print(f\"[+] Saving {table_name}\")\n save_as_csv(table_name + '-' + str(url_args) + '-' + str(item_id), headers, rows)\n\nif __name__ == \"__main__\":\n url = \"https://www.numbeo.com/cost-of-living/historical-prices-by-country?displayCurrency=USD&year=\"\n url_args = range(2010, 2021)\n item_ids = [101, 100, 228, 224, 60, 66, 64, 62, 110, 118, 121, 14, 19, 17, 15, 11, 16, \\\n 113, 9, 12, 8, 119, 111, 112, 115, 116, 13, 27, 26, 29, 28, 114, \\\n 6, 4, 5, 3, 2, 1, 7, 105, 106, 44, 40, 42, 24, 20, 18, 109, 108, \\\n 107, 206, 25, 32, 30, 33]\n for i in url_args:\n for j in item_ids:\n main(url, i, j)"
] |
[
[
"pandas.DataFrame"
]
] |
MIC-Surgery-Heidelberg/HyperGUI_1.0
|
[
"0ee8e0da85049076bb22a542d15d6c3adf6ea106"
] |
[
"HyperGuiModules/csv_saver.py"
] |
[
"from HyperGuiModules.utility import *\nimport numpy as np\nimport os\n\n\nclass CSVSaver:\n def __init__(self, csv_frame, listener):\n self.root = csv_frame\n\n # Listener\n self.listener = listener\n\n self.ogr_butt = None\n self.ogrp_butt = None\n self.normr_butt = None\n self.normrp_butt = None\n self.oga_butt = None\n self.ogap_butt = None\n self.norma_butt = None\n self.normap_butt = None\n self.rec_butt = None\n self.norm_rec_butt = None\n self.new_butt = None\n self.norm_new_butt = None\n self.og_butt = None\n self.norm_og_butt = None\n self.all_butt = None\n self.reflectance_text = None\n self.absorbance_text = None\n self.image_text = None\n\n self.info_label = None\n\n self._init_widget()\n\n # ------------------------------------------------ INITIALIZATION ------------------------------------------------\n\n def _init_widget(self):\n self._build_og_reflectance()\n self._build_og_reflectance_positive()\n self._build_norm_reflectance()\n self._build_norm_reflectance_positive()\n self._build_og_absorbance()\n self._build_og_absorbance_positive()\n self._build_norm_absorbance()\n self._build_norm_absorbance_positive()\n self._build_rec()\n self._build_norm_rec()\n self._build_new()\n self._build_norm_new()\n self._build_og()\n self._build_norm_og()\n self._build_all()\n self._build_text()\n self._build_info_label()\n\n # --------------------------------------------------- BUILDERS ---------------------------------------------------\n\n def _build_og_reflectance(self):\n self.ogr_butt = make_button(self.root, text=\"1. Original to CSV (Original Data Cube)\",\n command=self.__ogr_to_csv, row=2, column=0, outer_pady=(0, 5), outer_padx=15,\n width=32)\n\n def _build_og_reflectance_positive(self):\n self.ogrp_butt = make_button(self.root, text=\"2. Original without Negative Values to CSV\",\n command=self.__ogrp_to_csv, row=3, column=0, outer_pady=(0, 5), outer_padx=15,\n width=32)\n\n def _build_norm_reflectance(self):\n self.normr_butt = make_button(self.root, text=\"3. Normalised to CSV\", command=self.__normr_to_csv, row=4,\n column=0, outer_pady=(0, 5), outer_padx=15, width=32)\n\n def _build_norm_reflectance_positive(self):\n self.normrp_butt = make_button(self.root, text=\"4. Normalised without Negative Values to CSV\",\n command=self.__normrp_to_csv, row=5, column=0, outer_pady=(0, 5), outer_padx=15,\n width=32)\n\n def _build_og_absorbance(self):\n self.oga_butt = make_button(self.root, text=\"5. Original to CSV\", command=self.__oga_to_csv, row=7, column=0,\n outer_pady=(0, 5), outer_padx=15, width=32)\n\n def _build_og_absorbance_positive(self):\n self.ogap_butt = make_button(self.root, text=\"6. Original without Negative Values to CSV\",\n command=self.__ogap_to_csv, row=8, column=0, outer_pady=(0, 5), outer_padx=15,\n width=32)\n\n def _build_norm_absorbance(self):\n self.norma_butt = make_button(self.root, text=\"7. Normalised to CSV\", command=self.__norma_to_csv, row=9,\n column=0, outer_pady=(0, 5), outer_padx=15, width=32)\n\n def _build_norm_absorbance_positive(self):\n self.normap_butt = make_button(self.root, text=\"8. Normalised without Negative Values to CSV\",\n command=self.__normap_to_csv, row=10, column=0, outer_pady=(0, 5), outer_padx=15,\n width=32)\n\n def _build_og(self):\n self.og_butt = make_button(self.root, text=\"9. Original Image to CSV\", command=self.__og_to_csv, row=12,\n column=0, outer_pady=(0, 5), outer_padx=15, width=32)\n\n def _build_norm_og(self):\n self.og_new_butt = make_button(self.root, text=\"10. Normalised Original Image to CSV\",\n command=self.__norm_og_to_csv, row=13, column=0, outer_pady=(0, 5),\n outer_padx=15, width=32)\n\n def _build_rec(self):\n self.rec_butt = make_button(self.root, text=\"11. Recreated Image to CSV\", command=self.__rec_to_csv, row=14,\n column=0, outer_pady=(0, 5), outer_padx=15, width=32)\n\n def _build_norm_rec(self):\n self.norm_rec_butt = make_button(self.root, text=\"12. Normalised Recreated Image to CSV\",\n command=self.__norm_rec_to_csv, row=15, column=0, outer_pady=(0, 5),\n outer_padx=15, width=32)\n\n def _build_new(self):\n self.new_butt = make_button(self.root, text=\"13. New Image to CSV\", command=self.__new_to_csv, row=16, column=0,\n outer_pady=(0, 5), outer_padx=15, width=32)\n\n def _build_norm_new(self):\n self.norm_new_butt = make_button(self.root, text=\"14. Normalised New Image to CSV\",\n command=self.__norm_new_to_csv, row=17, column=0, outer_pady=(0, 5),\n outer_padx=15, width=32)\n\n def _build_all(self):\n self.all_butt = make_button(self.root, text=\"All to CSV\", command=self.__all_to_csv, row=18, column=0,\n outer_pady=(5, 15), outer_padx=15, width=32)\n\n def _build_text(self):\n self.reflectance_text = make_text(self.root, content=\"Reflectance:\", bg=tkcolour_from_rgb(BACKGROUND),\n column=0, row=1, width=12, pady=(0, 5))\n self.absorbance_text = make_text(self.root, content=\"Absorbance:\", bg=tkcolour_from_rgb(BACKGROUND), column=0,\n row=6, width=11, pady=(5, 5))\n self.image_text = make_text(self.root, content=\"Images:\", bg=tkcolour_from_rgb(BACKGROUND), column=0, row=11,\n width=7, pady=(5, 5))\n\n def _build_info_label(self):\n self.info_label = make_label_button(self.root, text='Data to CSV', command=self.__info, width=9)\n self.info_label.grid(padx=(0, 200))\n\n # ----------------------------------------------------- MISC -----------------------------------------------------\n\n def new_info(self):\n image_mode = self.listener.modules[NEW_COLOUR].displayed_image_mode\n if image_mode == WL:\n return self.listener.get_csv_new_info(mode='WL')\n elif image_mode == IDX:\n return self.listener.get_csv_new_info(mode='IDX')\n\n @staticmethod\n def _make_direc(direc):\n if not os.path.isdir(direc):\n os.mkdir(direc)\n\n # -------------------------------------------------- CALLBACKS ---------------------------------------------------\n\n def __info(self):\n info = self.listener.modules[INFO].csv_info\n title = \"Data to CSV Information\"\n make_info(title=title, info=info)\n\n def __all_to_csv(self):\n self.__ogr_to_csv()\n self.__ogrp_to_csv()\n self.__oga_to_csv()\n self.__ogap_to_csv()\n self.__normr_to_csv()\n self.__normrp_to_csv()\n self.__norma_to_csv()\n self.__normap_to_csv()\n self.__rec_to_csv()\n self.__norm_rec_to_csv()\n self.__new_to_csv()\n self.__norm_new_to_csv()\n self.__og_to_csv()\n self.__norm_og_to_csv()\n\n def __ogr_to_csv(self):\n for path, _ in self.listener.results.items():\n selected_paths = self.listener.selected_paths\n if path in selected_paths:\n data = np.flipud(np.rot90(self.listener.ref_data_cube(path)))\n direc = os.path.dirname(path) + '/01_Reflectance_Original'\n self._make_direc(direc)\n for i in range(100):\n num = i * 5 + 500\n progress(i, 100)\n big_path = direc + '/' + '01_refl_og_slice_' + str(num) + '.csv'\n np.savetxt(big_path, np.flipud(data[:, :, i]), delimiter=\",\", fmt='%s')\n\n def __ogrp_to_csv(self):\n for path, _ in self.listener.results.items():\n selected_paths = self.listener.selected_paths\n if path in selected_paths:\n data = np.flipud(np.rot90(self.listener.ref_non_neg_cube(path)))\n direc = os.path.dirname(path) + '/02_Reflectance_Original_without_Negative_Values'\n self._make_direc(direc)\n for i in range(100):\n num = i * 5 + 500\n progress(i, 100)\n big_path = direc + '/' + '02_refl_og_wo_neg_slice_' + str(num) + '.csv'\n np.savetxt(big_path, np.flipud(data[:, :, i]), delimiter=\",\", fmt='%s')\n\n def __normr_to_csv(self):\n for path, _ in self.listener.results.items():\n selected_paths = self.listener.selected_paths\n if path in selected_paths:\n data = np.flipud(np.rot90(self.listener.ref_norm_cube(path)))\n direc = os.path.dirname(path) + '/03_Reflectance_Normalised'\n self._make_direc(direc)\n for i in range(100):\n num = i * 5 + 500\n progress(i, 100)\n big_path = direc + '/' + '03_refl_norm_slice_' + str(num) + '.csv'\n np.savetxt(big_path, np.flipud(data[:, :, i]), delimiter=\",\", fmt='%s')\n\n def __normrp_to_csv(self):\n for path, _ in self.listener.results.items():\n selected_paths = self.listener.selected_paths\n if path in selected_paths:\n data = np.flipud(np.rot90(self.listener.ref_norm_non_neg_cube(path)))\n direc = os.path.dirname(path) + '/04_Reflectance_Normalised_without_Negative_Values'\n self._make_direc(direc)\n for i in range(100):\n num = i * 5 + 500\n progress(i, 100)\n big_path = direc + '/' + '04_refl_norm_wo_neg_slice_' + str(num) + '.csv'\n np.savetxt(big_path, np.flipud(data[:, :, i]), delimiter=\",\", fmt='%s')\n\n def __oga_to_csv(self):\n for path, _ in self.listener.results.items():\n selected_paths = self.listener.selected_paths\n if path in selected_paths:\n data = np.flipud(np.rot90(self.listener.ab_data_cube(path)))\n direc = os.path.dirname(path) + '/05_Absorbance_Original'\n self._make_direc(direc)\n for i in range(100):\n num = i * 5 + 500\n progress(i, 100)\n big_path = direc + '/' + '05_abs_og_slice_' + str(num) + '.csv'\n np.savetxt(big_path, np.flipud(data[:, :, i]), delimiter=\",\", fmt='%s')\n\n def __ogap_to_csv(self):\n for path, _ in self.listener.results.items():\n selected_paths = self.listener.selected_paths\n if path in selected_paths:\n data = np.flipud(np.rot90(self.listener.ab_non_neg_cube(path)))\n direc = os.path.dirname(path) + '/06_Absorbance_Original_without_Negative_Values'\n self._make_direc(direc)\n for i in range(100):\n num = i * 5 + 500\n progress(i, 100)\n big_path = direc + '/' + '06_abs_og_wo_neg_slice_' + str(num) + '.csv'\n np.savetxt(big_path, np.flipud(data[:, :, i]), delimiter=\",\", fmt='%s')\n\n def __norma_to_csv(self):\n for path, _ in self.listener.results.items():\n selected_paths = self.listener.selected_paths\n if path in selected_paths:\n data = np.flipud(np.rot90(self.listener.ab_norm_cube(path)))\n direc = os.path.dirname(path) + '/07_Absorbance_Normalised'\n self._make_direc(direc)\n for i in range(100):\n num = i * 5 + 500\n progress(i, 100)\n big_path = direc + '/' + '07_abs_norm_slice_' + str(num) + '.csv'\n np.savetxt(big_path, np.flipud(data[:, :, i]), delimiter=\",\", fmt='%s')\n\n def __normap_to_csv(self):\n for path, _ in self.listener.results.items():\n selected_paths = self.listener.selected_paths\n if path in selected_paths:\n data = np.flipud(np.rot90(self.listener.ab_norm_non_neg_cube(path)))\n direc = os.path.dirname(path) + '/08_Absorbance_Normalised_without_Negative_Values'\n self._make_direc(direc)\n for i in range(100):\n num = i * 5 + 500\n progress(i, 100)\n big_path = direc + '/' + '08_abs_norm_wo_neg_slice_' + str(num) + '.csv'\n np.savetxt(big_path, np.flipud(data[:, :, i]), delimiter=\",\", fmt='%s')\n\n def __og_to_csv(self):\n for path, _ in self.listener.results.items():\n selected_paths = self.listener.selected_paths\n if path in selected_paths:\n data = self.listener.get_current_original_data()\n arr = []\n val = np.ma.is_masked(data)\n mask = [[False for _ in range(640)] for _ in range(480)]\n if val:\n mask = data.mask\n for i in range(len(data)):\n for j in range(len(data[i])):\n if data[i][j] == '--' or data[i][j] is None or mask[i][j]:\n arr.append(str(''))\n else:\n arr.append(str(float(data[i][j])))\n data = np.asarray(arr).reshape((480, 640))\n info = self.listener.get_csv_og_info()\n direc = os.path.dirname(path) + '/9_Original_Image'\n self._make_direc(direc)\n big_path = direc + '/' + '9_original_image' + info + '.csv'\n np.savetxt(big_path, data, delimiter=\",\", fmt='%s')\n\n def __norm_og_to_csv(self):\n for path, _ in self.listener.results.items():\n selected_paths = self.listener.selected_paths\n if path in selected_paths:\n data = self.listener.get_current_norm_original_data()\n arr = []\n val = np.ma.is_masked(data)\n mask = [[False for _ in range(640)] for _ in range(480)]\n if val:\n mask = data.mask\n for i in range(len(data)):\n for j in range(len(data[i])):\n if data[i][j] == '--' or data[i][j] is None or mask[i][j]:\n arr.append(str(''))\n else:\n arr.append(str(float(data[i][j])))\n data = np.asarray(arr).reshape((480, 640))\n info = self.listener.get_csv_og_info()\n direc = os.path.dirname(path) + '/10_Normalised_Original_Image'\n self._make_direc(direc)\n big_path = direc + '/' + '10_norm_original_image' + info + '.csv'\n np.savetxt(big_path, data, delimiter=\",\", fmt='%s')\n\n def __rec_to_csv(self):\n for path, _ in self.listener.results.items():\n selected_paths = self.listener.selected_paths\n if path in selected_paths:\n data = self.listener.get_current_rec_data().T\n arr = []\n val = np.ma.is_masked(data)\n mask = [[False for _ in range(640)] for _ in range(480)]\n if val:\n mask = data.mask\n for i in range(len(data)):\n for j in range(len(data[i])):\n if data[i][j] == '--' or data[i][j] is None or mask[i][j]:\n arr.append(str(''))\n else:\n arr.append(str(float(data[i][j])))\n data = np.asarray(arr).reshape((480, 640))\n info = self.listener.get_csv_rec_info()\n direc = os.path.dirname(path) + '/11_Recreated_Image'\n self._make_direc(direc)\n big_path = direc + '/' + '11_recreated_image' + info + '.csv'\n np.savetxt(big_path, np.flipud(data), delimiter=\",\", fmt='%s')\n\n def __norm_rec_to_csv(self):\n for path, _ in self.listener.results.items():\n selected_paths = self.listener.selected_paths\n if path in selected_paths:\n data = self.listener.get_current_norm_rec_data().T\n arr = []\n val = np.ma.is_masked(data)\n mask = [[False for _ in range(640)] for _ in range(480)]\n if val:\n mask = data.mask\n for i in range(len(data)):\n for j in range(len(data[i])):\n if data[i][j] == '--' or data[i][j] is None or mask[i][j]:\n arr.append(str(''))\n else:\n arr.append(str(float(data[i][j])))\n data = np.asarray(arr).reshape((480, 640))\n info = self.listener.get_csv_rec_info()\n direc = os.path.dirname(path) + '/12_Normalised_Recreated_Image'\n self._make_direc(direc)\n big_path = direc + '/' + '12_norm_recreated_image' + info + '.csv'\n np.savetxt(big_path, np.flipud(data), delimiter=\",\", fmt='%s')\n\n def __new_to_csv(self):\n for path, _ in self.listener.results.items():\n selected_paths = self.listener.selected_paths\n if path in selected_paths:\n data = self.listener.get_current_new_data().T\n arr = []\n val = np.ma.is_masked(data)\n mask = [[False for _ in range(640)] for _ in range(480)]\n if val:\n mask = data.mask\n for i in range(len(data)):\n for j in range(len(data[i])):\n if data[i][j] == '--' or data[i][j] is None or mask[i][j]:\n arr.append(str(''))\n else:\n arr.append(str(float(data[i][j])))\n data = np.asarray(arr).reshape((480, 640))\n info = self.new_info()\n direc = os.path.dirname(path) + '/13_New_Image'\n self._make_direc(direc)\n big_path = direc + '/' + '13_new_image' + info + '.csv'\n np.savetxt(big_path, np.flipud(data), delimiter=\",\", fmt='%s')\n\n def __norm_new_to_csv(self):\n for path, _ in self.listener.results.items():\n selected_paths = self.listener.selected_paths\n if path in selected_paths:\n data = self.listener.get_current_norm_new_data().T\n arr = []\n val = np.ma.is_masked(data)\n mask = [[False for _ in range(640)] for _ in range(480)]\n if val:\n mask = data.mask\n for i in range(len(data)):\n for j in range(len(data[i])):\n if data[i][j] == '--' or data[i][j] is None or mask[i][j]:\n arr.append(str(''))\n else:\n arr.append(str(float(data[i][j])))\n data = np.asarray(arr).reshape((480, 640))\n info = self.new_info()\n direc = os.path.dirname(path) + '/14_Normalised_New_Image'\n self._make_direc(direc)\n big_path = direc + '/' + '14_norm_new_image' + info + '.csv'\n np.savetxt(big_path, np.flipud(data), delimiter=\",\", fmt='%s')\n"
] |
[
[
"numpy.savetxt",
"numpy.flipud",
"numpy.ma.is_masked",
"numpy.asarray"
]
] |
Wi11iamDing/toad
|
[
"0b6973e910c337b779b6c95087f6d24b89a20eed"
] |
[
"toad/stats_test.py"
] |
[
"import pytest\nimport numpy as np\nimport pandas as pd\n\nfrom .stats import IV, WOE, gini, gini_cond, entropy_cond, quality, _IV, VIF\n\n\nnp.random.seed(1)\n\nfeature = np.random.rand(500)\ntarget = np.random.randint(2, size = 500)\nA = np.random.randint(100, size = 500)\nB = np.random.randint(100, size = 500)\nmask = np.random.randint(8, size = 500)\n\ndf = pd.DataFrame({\n 'feature': feature,\n 'target': target,\n 'A': A,\n 'B': B,\n})\n\n\ndef test_woe():\n value = WOE(0.2, 0.3)\n assert value == -0.4054651081081643\n\ndef test_iv_priv():\n value, _ = _IV(df['feature'], df['target'])\n assert value == 0.010385942643745403\n\ndef test_iv():\n value = IV(df['feature'], df['target'], n_bins = 10, method = 'dt')\n assert value == 0.2735917707743619\n\ndef test_iv_return_sub():\n _, sub = IV(mask, df['target'], return_sub = True, n_bins = 10, method = 'dt')\n assert len(sub) == 8\n assert sub[4] == 0.006449386778057019\n\ndef test_iv_frame():\n res = IV(df, 'target', n_bins = 10, method = 'chi')\n assert res.loc[0, 'A'] == 0.226363832867123\n\ndef test_gini():\n value = gini(df['target'])\n assert value == 0.499352\n\ndef test_gini_cond():\n value = gini_cond(df['feature'], df['target'])\n assert value == 0.4970162601626016\n\ndef test_entropy_cond():\n value = entropy_cond(df['feature'], df['target'])\n assert value == 0.6924990371522171\n\ndef test_quality():\n result = quality(df, 'target')\n assert result.loc['feature', 'iv'] == 0.2735917707743619\n assert result.loc['A', 'gini'] == 0.49284164671885444\n assert result.loc['B', 'entropy'] == 0.6924956879070063\n assert result.loc['feature', 'unique'] == 500\n\ndef test_quality_iv_only():\n result = quality(df, 'target', iv_only = True)\n assert np.isnan(result.loc['feature', 'gini'])\n\ndef test_quality_with_merge():\n result = quality(df, 'target', n_bins = 5, method = 'chi')\n assert result.loc['feature', 'iv'] == 0.13367825777558\n\ndef test_quality_object_type_array_with_nan():\n feature = np.array([np.nan, 'A', 'B', 'C', 'D', 'E', 'F', 'G'], dtype = 'O')[mask]\n\n df = pd.DataFrame({\n 'feature': feature,\n 'target': target,\n })\n result = quality(df)\n assert result.loc['feature', 'iv'] == 0.016379338180530334\n\ndef test_vif():\n vif = VIF(df)\n assert vif['A'] == 2.969336442640111\n"
] |
[
[
"numpy.random.seed",
"numpy.isnan",
"pandas.DataFrame",
"numpy.random.rand",
"numpy.array",
"numpy.random.randint"
]
] |
lulongfei-luffy/once-for-all
|
[
"d3c5f5f613bb3454fd18043e1d217f583db9f4b0"
] |
[
"ofa/elastic_nn/networks/ofa_mbv3.py"
] |
[
"# Once for All: Train One Network and Specialize it for Efficient Deployment\n# Han Cai, Chuang Gan, Tianzhe Wang, Zhekai Zhang, Song Han\n# International Conference on Learning Representations (ICLR), 2020.\n\nimport copy\nimport random\n\nimport torch\n\nfrom ofa.elastic_nn.modules.dynamic_layers import DynamicMBConvLayer, DynamicConvLayer, DynamicLinearLayer\nfrom ofa.layers import ConvLayer, IdentityLayer, LinearLayer, MBInvertedConvLayer\nfrom ofa.imagenet_codebase.networks.mobilenet_v3 import MobileNetV3, MobileInvertedResidualBlock\nfrom ofa.imagenet_codebase.utils import make_divisible, int2list\n\n\nclass OFAMobileNetV3(MobileNetV3):\n \"\"\"\n et = OFAMobileNetV3(\n dropout_rate=0, width_mult_list=1.2, ks_list=[3, 5, 7], expand_ratio_list=[3, 4, 6], depth_list=[2, 3, 4],\n )\n \"\"\"\n def __init__(self, n_classes=1000, bn_param=(0.1, 1e-5), dropout_rate=0.1, base_stage_width=None,\n width_mult_list=1.0, ks_list=3, expand_ratio_list=6, depth_list=4):\n\n self.width_mult_list = int2list(width_mult_list, 1)\n self.ks_list = int2list(ks_list, 1)\n self.expand_ratio_list = int2list(expand_ratio_list, 1)\n self.depth_list = int2list(depth_list, 1)\n self.base_stage_width = base_stage_width\n\n self.width_mult_list.sort()\n self.ks_list.sort()\n self.expand_ratio_list.sort()\n self.depth_list.sort()\n\n # base_stage_width = [16, 24, 40, 80, 112, 160, 960, 1280]\n base_stage_width = [16, 24, 40, 80, 112, 160, 24, 40, 80, 112, 160, 960, 1280]\n final_expand_width = [\n make_divisible(base_stage_width[-2] * max(self.width_mult_list), 8) for _ in self.width_mult_list\n ]\n last_channel = [\n make_divisible(base_stage_width[-1] * max(self.width_mult_list), 8) for _ in self.width_mult_list\n ]\n\n # stride_stages = [1, 2, 2, 2, 1, 2]\n # act_stages = ['relu', 'relu', 'relu', 'h_swish', 'h_swish', 'h_swish']\n # se_stages = [False, False, True, False, True, True]\n\n stride_stages = [1, 2, 2, 2, 1, 2, 2, 2, 2, 1, 2]\n act_stages = ['relu', 'relu', 'relu', 'h_swish', 'h_swish', 'h_swish','relu', 'relu', 'h_swish', 'h_swish', 'h_swish']\n se_stages = [False, False, True, False, True, True, False, True, False, True, True]\n if depth_list is None:\n n_block_list = [1, 2, 3, 4, 2, 3]\n self.depth_list = [4, 4]\n print('Use MobileNetV3 Depth Setting')\n else:\n n_block_list = [1] + [max(self.depth_list)] * 10 # depth_list = [2,3,4]\n # [1, 2, 3, 4, 2, 3, 4, 2, 3, 4, 2, 3, 4, 2, 3, 4]\n width_list = []\n for base_width in base_stage_width[:-2]:\n width = [make_divisible(base_width * width_mult, 8) for width_mult in self.width_mult_list]\n width_list.append(width)\n\n input_channel = width_list[0]\n # first conv layer\n if len(set(input_channel)) == 1:\n first_conv = ConvLayer(3, max(input_channel), kernel_size=3, stride=2, act_func='h_swish')\n first_block_conv = MBInvertedConvLayer(\n in_channels=max(input_channel), out_channels=max(input_channel), kernel_size=3, stride=stride_stages[0],\n expand_ratio=1, act_func=act_stages[0], use_se=se_stages[0],\n )\n else:\n first_conv = DynamicConvLayer(\n in_channel_list=int2list(3, len(input_channel)), out_channel_list=input_channel, kernel_size=3,\n stride=2, act_func='h_swish',\n )\n first_block_conv = DynamicMBConvLayer(\n in_channel_list=input_channel, out_channel_list=input_channel, kernel_size_list=3, expand_ratio_list=1,\n stride=stride_stages[0], act_func=act_stages[0], use_se=se_stages[0],\n )\n first_block = MobileInvertedResidualBlock(first_block_conv, IdentityLayer(input_channel, input_channel))\n\n # inverted residual blocks\n self.block_group_info = []\n blocks = [first_block]\n _block_index = 1\n feature_dim = input_channel\n\n for width, n_block, s, act_func, use_se in zip(width_list[1:], n_block_list[1:],\n stride_stages[1:], act_stages[1:], se_stages[1:]):\n self.block_group_info.append([_block_index + i for i in range(n_block)])\n _block_index += n_block\n\n output_channel = width\n for i in range(n_block):\n if i == 0:\n stride = s\n else:\n stride = 1\n mobile_inverted_conv = DynamicMBConvLayer(\n in_channel_list=feature_dim, out_channel_list=output_channel, kernel_size_list=ks_list,\n expand_ratio_list=expand_ratio_list, stride=stride, act_func=act_func, use_se=use_se,\n )\n if stride == 1 and feature_dim == output_channel:\n shortcut = IdentityLayer(feature_dim, feature_dim)\n else:\n shortcut = None\n blocks.append(MobileInvertedResidualBlock(mobile_inverted_conv, shortcut))\n feature_dim = output_channel\n # final expand layer, feature mix layer & classifier\n if len(final_expand_width) == 1:\n final_expand_layer = ConvLayer(max(feature_dim), max(final_expand_width), kernel_size=1, act_func='h_swish')\n feature_mix_layer = ConvLayer(\n max(final_expand_width), max(last_channel), kernel_size=1, bias=False, use_bn=False, act_func='h_swish',\n )\n else:\n final_expand_layer = DynamicConvLayer(\n in_channel_list=feature_dim, out_channel_list=final_expand_width, kernel_size=1, act_func='h_swish'\n )\n feature_mix_layer = DynamicConvLayer(\n in_channel_list=final_expand_width, out_channel_list=last_channel, kernel_size=1,\n use_bn=False, act_func='h_swish',\n )\n if len(set(last_channel)) == 1:\n classifier = LinearLayer(max(last_channel), n_classes, dropout_rate=dropout_rate)\n else:\n classifier = DynamicLinearLayer(\n in_features_list=last_channel, out_features=n_classes, bias=True, dropout_rate=dropout_rate\n )\n super(OFAMobileNetV3, self).__init__(first_conv, blocks, final_expand_layer, feature_mix_layer, classifier)\n\n # set bn param\n self.set_bn_param(momentum=bn_param[0], eps=bn_param[1])\n\n # runtime_depth\n self.runtime_depth = [len(block_idx) for block_idx in self.block_group_info]\n\n \"\"\" MyNetwork required methods \"\"\"\n\n @staticmethod\n def name():\n return 'OFAMobileNetV3'\n\n def forward(self, x):\n # first conv\n x = self.first_conv(x)\n # first block\n x = self.blocks[0](x)\n\n # blocks\n for stage_id, block_idx in enumerate(self.block_group_info):\n depth = self.runtime_depth[stage_id]\n active_idx = block_idx[:depth]\n for idx in active_idx:\n x = self.blocks[idx](x)\n\n x = self.final_expand_layer(x)\n x = x.mean(3, keepdim=True).mean(2, keepdim=True) # global average pooling\n x = self.feature_mix_layer(x)\n x = torch.squeeze(x)\n x = self.classifier(x)\n return x\n\n @property\n def module_str(self):\n _str = self.first_conv.module_str + '\\n'\n _str += self.blocks[0].module_str + '\\n'\n\n for stage_id, block_idx in enumerate(self.block_group_info):\n depth = self.runtime_depth[stage_id]\n active_idx = block_idx[:depth]\n for idx in active_idx:\n _str += self.blocks[idx].module_str + '\\n'\n\n _str += self.final_expand_layer.module_str + '\\n'\n _str += self.feature_mix_layer.module_str + '\\n'\n _str += self.classifier.module_str + '\\n'\n return _str\n\n @property\n def config(self):\n return {\n 'name': OFAMobileNetV3.__name__,\n 'bn': self.get_bn_param(),\n 'first_conv': self.first_conv.config,\n 'blocks': [\n block.config for block in self.blocks\n ],\n 'final_expand_layer': self.final_expand_layer.config,\n 'feature_mix_layer': self.feature_mix_layer.config,\n 'classifier': self.classifier.config,\n }\n\n @staticmethod\n def build_from_config(config):\n raise ValueError('do not support this function')\n\n def load_weights_from_net(self, src_model_dict):\n model_dict = self.state_dict()\n for key in src_model_dict:\n if key in model_dict:\n new_key = key\n elif '.bn.bn.' in key:\n new_key = key.replace('.bn.bn.', '.bn.')\n elif '.conv.conv.weight' in key:\n new_key = key.replace('.conv.conv.weight', '.conv.weight')\n elif '.linear.linear.' in key:\n new_key = key.replace('.linear.linear.', '.linear.')\n ##############################################################################\n elif '.linear.' in key:\n new_key = key.replace('.linear.', '.linear.linear.')\n elif 'bn.' in key:\n new_key = key.replace('bn.', 'bn.bn.')\n elif 'conv.weight' in key:\n new_key = key.replace('conv.weight', 'conv.conv.weight')\n else:\n raise ValueError(key)\n assert new_key in model_dict, '%s' % new_key\n model_dict[new_key] = src_model_dict[key]\n self.load_state_dict(model_dict)\n\n \"\"\" set, sample and get active sub-networks \"\"\"\n\n def set_active_subnet(self, wid=None, ks=None, e=None, d=None):\n width_mult_id = int2list(wid, 4 + len(self.block_group_info))\n ks = int2list(ks, len(self.blocks) - 1)\n expand_ratio = int2list(e, len(self.blocks) - 1)\n depth = int2list(d, len(self.block_group_info))\n\n for block, k, e in zip(self.blocks[1:], ks, expand_ratio):\n if k is not None:\n block.mobile_inverted_conv.active_kernel_size = k\n if e is not None:\n block.mobile_inverted_conv.active_expand_ratio = e\n\n for i, d in enumerate(depth):\n if d is not None:\n self.runtime_depth[i] = min(len(self.block_group_info[i]), d)\n\n def set_constraint(self, include_list, constraint_type='depth'):\n if constraint_type == 'depth':\n self.__dict__['_depth_include_list'] = include_list.copy()\n elif constraint_type == 'expand_ratio':\n self.__dict__['_expand_include_list'] = include_list.copy()\n elif constraint_type == 'kernel_size':\n self.__dict__['_ks_include_list'] = include_list.copy()\n elif constraint_type == 'width_mult':\n self.__dict__['_widthMult_include_list'] = include_list.copy()\n else:\n raise NotImplementedError\n\n def clear_constraint(self):\n self.__dict__['_depth_include_list'] = None\n self.__dict__['_expand_include_list'] = None\n self.__dict__['_ks_include_list'] = None\n self.__dict__['_widthMult_include_list'] = None\n\n def sample_active_subnet(self):\n ks_candidates = self.ks_list if self.__dict__.get('_ks_include_list', None) is None \\\n else self.__dict__['_ks_include_list']\n expand_candidates = self.expand_ratio_list if self.__dict__.get('_expand_include_list', None) is None \\\n else self.__dict__['_expand_include_list']\n depth_candidates = self.depth_list if self.__dict__.get('_depth_include_list', None) is None else \\\n self.__dict__['_depth_include_list']\n\n # sample width_mult\n width_mult_setting = None\n\n # sample kernel size\n ks_setting = []\n if not isinstance(ks_candidates[0], list):\n ks_candidates = [ks_candidates for _ in range(len(self.blocks) - 1)]\n for k_set in ks_candidates:\n k = random.choice(k_set)\n ks_setting.append(k)\n\n # sample expand ratio\n expand_setting = []\n if not isinstance(expand_candidates[0], list):\n expand_candidates = [expand_candidates for _ in range(len(self.blocks) - 1)]\n for e_set in expand_candidates:\n e = random.choice(e_set)\n expand_setting.append(e)\n\n # sample depth\n depth_setting = []\n if not isinstance(depth_candidates[0], list):\n depth_candidates = [depth_candidates for _ in range(len(self.block_group_info))]\n for d_set in depth_candidates:\n d = random.choice(d_set)\n depth_setting.append(d)\n\n self.set_active_subnet(width_mult_setting, ks_setting, expand_setting, depth_setting)\n\n return {\n 'wid': width_mult_setting,\n 'ks': ks_setting,\n 'e': expand_setting,\n 'd': depth_setting,\n }\n\n def get_active_subnet(self, preserve_weight=True):\n first_conv = copy.deepcopy(self.first_conv)\n blocks = [copy.deepcopy(self.blocks[0])]\n\n final_expand_layer = copy.deepcopy(self.final_expand_layer)\n feature_mix_layer = copy.deepcopy(self.feature_mix_layer)\n classifier = copy.deepcopy(self.classifier)\n\n input_channel = blocks[0].mobile_inverted_conv.out_channels\n # blocks\n for stage_id, block_idx in enumerate(self.block_group_info):\n depth = self.runtime_depth[stage_id]\n active_idx = block_idx[:depth]\n stage_blocks = []\n for idx in active_idx:\n stage_blocks.append(MobileInvertedResidualBlock(\n self.blocks[idx].mobile_inverted_conv.get_active_subnet(input_channel, preserve_weight),\n copy.deepcopy(self.blocks[idx].shortcut)\n ))\n input_channel = stage_blocks[-1].mobile_inverted_conv.out_channels\n blocks += stage_blocks\n\n _subnet = MobileNetV3(first_conv, blocks, final_expand_layer, feature_mix_layer, classifier)\n _subnet.set_bn_param(**self.get_bn_param())\n return _subnet\n\n def get_active_net_config(self):\n # first conv\n first_conv_config = self.first_conv.config\n first_block_config = self.blocks[0].config\n if isinstance(self.first_conv, DynamicConvLayer):\n first_conv_config = self.first_conv.get_active_subnet_config(3)\n first_block_config = {\n 'name': MobileInvertedResidualBlock.__name__,\n 'mobile_inverted_conv': self.blocks[0].mobile_inverted_conv.get_active_subnet_config(\n first_conv_config['out_channels']\n ),\n 'shortcut': self.blocks[0].shortcut.config if self.blocks[0].shortcut is not None else None,\n }\n final_expand_config = self.final_expand_layer.config\n feature_mix_layer_config = self.feature_mix_layer.config\n if isinstance(self.final_expand_layer, DynamicConvLayer):\n final_expand_config = self.final_expand_layer.get_active_subnet_config(\n self.blocks[-1].mobile_inverted_conv.active_out_channel)\n feature_mix_layer_config = self.feature_mix_layer.get_active_subnet_config(\n final_expand_config['out_channels'])\n classifier_config = self.classifier.config\n if isinstance(self.classifier, DynamicLinearLayer):\n classifier_config = self.classifier.get_active_subnet_config(self.feature_mix_layer.active_out_channel)\n\n block_config_list = [first_block_config]\n input_channel = first_block_config['mobile_inverted_conv']['out_channels']\n for stage_id, block_idx in enumerate(self.block_group_info):\n depth = self.runtime_depth[stage_id]\n active_idx = block_idx[:depth]\n stage_blocks = []\n for idx in active_idx:\n middle_channel = make_divisible(round(input_channel *\n self.blocks[idx].mobile_inverted_conv.active_expand_ratio), 8)\n stage_blocks.append({\n 'name': MobileInvertedResidualBlock.__name__,\n 'mobile_inverted_conv': {\n 'name': MBInvertedConvLayer.__name__,\n 'in_channels': input_channel,\n 'out_channels': self.blocks[idx].mobile_inverted_conv.active_out_channel,\n 'kernel_size': self.blocks[idx].mobile_inverted_conv.active_kernel_size,\n 'stride': self.blocks[idx].mobile_inverted_conv.stride,\n 'expand_ratio': self.blocks[idx].mobile_inverted_conv.active_expand_ratio,\n 'mid_channels': middle_channel,\n 'act_func': self.blocks[idx].mobile_inverted_conv.act_func,\n 'use_se': self.blocks[idx].mobile_inverted_conv.use_se,\n },\n 'shortcut': self.blocks[idx].shortcut.config if self.blocks[idx].shortcut is not None else None,\n })\n input_channel = self.blocks[idx].mobile_inverted_conv.active_out_channel\n block_config_list += stage_blocks\n\n return {\n 'name': MobileNetV3.__name__,\n 'bn': self.get_bn_param(),\n 'first_conv': first_conv_config,\n 'blocks': block_config_list,\n 'final_expand_layer': final_expand_config,\n 'feature_mix_layer': feature_mix_layer_config,\n 'classifier': classifier_config,\n }\n\n \"\"\" Width Related Methods \"\"\"\n\n def re_organize_middle_weights(self, expand_ratio_stage=0):\n for block in self.blocks[1:]:\n block.mobile_inverted_conv.re_organize_middle_weights(expand_ratio_stage)\n\n"
] |
[
[
"torch.squeeze"
]
] |
AutumnWormSun/pyGAT
|
[
"9bd7c5c738b8919153694c9390d92b4c9d99a33b"
] |
[
"layers.py"
] |
[
"import numpy as np\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\n\nclass GraphAttentionLayer(nn.Module):\n \"\"\"\n Simple GAT layer, similar to https://arxiv.org/abs/1710.10903\n \"\"\"\n def __init__(self, in_features, out_features, dropout, alpha, concat=True):\n super(GraphAttentionLayer, self).__init__()\n self.dropout = dropout\n self.in_features = in_features\n self.out_features = out_features\n self.alpha = alpha\n self.concat = concat\n\n self.W = nn.Parameter(torch.empty(size=(in_features, out_features)))\n nn.init.xavier_uniform_(self.W.data, gain=1.414)\n self.a = nn.Parameter(torch.empty(size=(2*out_features, 1)))\n nn.init.xavier_uniform_(self.a.data, gain=1.414)\n\n self.leakyrelu = nn.LeakyReLU(self.alpha)\n\n def forward(self, h, adj):\n Wh = torch.mm(h, self.W) # h.shape: (N, in_features), Wh.shape: (N, out_features) 节点数量,输入特征长度;节点数量,输出特征长度\n a_input = self._prepare_attentional_mechanism_input(Wh) # a(.)映射的输入矩阵,大小为(N, N, 2*out_features)\n e = self.leakyrelu(torch.matmul(a_input, self.a).squeeze(2)) # eij为相关系数(注意力得分),后面还要做规范化计算,大小为(N, N, N).sequeeze(2)\n\n zero_vec = -9e15*torch.ones_like(e)\n # masked-attention是利用邻接矩阵来实现的,attention矩阵和adj矩阵大小相同,\n # 将adj矩阵中非零元素替换为eij,这个操作只对和节点i相邻的节点j有效\n attention = torch.where(adj > 0, e, zero_vec)\n attention = F.softmax(attention, dim=1) # 按行对注意力权重矩阵做softmax操作\n attention = F.dropout(attention, self.dropout, training=self.training)\n h_prime = torch.matmul(attention, Wh) # 注意力权重和隐层表达做加权求和\n\n if self.concat:\n return F.elu(h_prime)\n else:\n return h_prime\n\n def _prepare_attentional_mechanism_input(self, Wh):\n N = Wh.size()[0] # number of nodes\n\n # Below, two matrices are created that contain embeddings in their rows in different orders.\n # (e stands for embedding)\n # These are the rows of the first matrix (Wh_repeated_in_chunks):\n # e1, e1, ..., e1, e2, e2, ..., e2, ..., eN, eN, ..., eN\n # '-------------' -> N times '-------------' -> N times '-------------' -> N times\n # \n # These are the rows of the second matrix (Wh_repeated_alternating): \n # e1, e2, ..., eN, e1, e2, ..., eN, ..., e1, e2, ..., eN \n # '----------------------------------------------------' -> N times\n \n # 沿行方向重复每个元素N次,每行最终包含元素个数不变,\n # 但是列的数量变为N*N,即Wh的输出大小为(N*N, out_features)\n Wh_repeated_in_chunks = Wh.repeat_interleave(N, dim=0) # Whi\n # 沿行方向重复每行元素N次,每行最终包含元素个数不变,\n # 但是列的数量变为N*N,即Wh的输出大小为(N*N, out_features)\n Wh_repeated_alternating = Wh.repeat(N, 1) # Whj\n # Wh_repeated_in_chunks.shape == Wh_repeated_alternating.shape == (N * N, out_features)\n\n # The all_combination_matrix, created below, will look like this (|| denotes concatenation):\n # e1 || e1\n # e1 || e2\n # e1 || e3\n # ...\n # e1 || eN\n # e2 || e1\n # e2 || e2\n # e2 || e3\n # ...\n # e2 || eN\n # ...\n # eN || e1\n # eN || e2\n # eN || e3\n # ...\n # eN || eN\n\n all_combinations_matrix = torch.cat([Wh_repeated_in_chunks, Wh_repeated_alternating], dim=1) # 实际在求Whi||Whj隐层表达\n # all_combinations_matrix.shape == (N * N, 2 * out_features)\n\n return all_combinations_matrix.view(N, N, 2 * self.out_features) # [N, :, :]存储了每一个节点i与邻居j的Whi||Whj隐层表达\n\n def __repr__(self):\n return self.__class__.__name__ + ' (' + str(self.in_features) + ' -> ' + str(self.out_features) + ')'\n\n\nclass SpecialSpmmFunction(torch.autograd.Function):\n \"\"\"Special function for only sparse region backpropataion layer.\"\"\"\n @staticmethod\n def forward(ctx, indices, values, shape, b):\n assert indices.requires_grad == False\n a = torch.sparse_coo_tensor(indices, values, shape)\n ctx.save_for_backward(a, b)\n ctx.N = shape[0]\n return torch.matmul(a, b)\n\n @staticmethod\n def backward(ctx, grad_output):\n a, b = ctx.saved_tensors\n grad_values = grad_b = None\n if ctx.needs_input_grad[1]:\n grad_a_dense = grad_output.matmul(b.t())\n edge_idx = a._indices()[0, :] * ctx.N + a._indices()[1, :]\n grad_values = grad_a_dense.view(-1)[edge_idx]\n if ctx.needs_input_grad[3]:\n grad_b = a.t().matmul(grad_output)\n return None, grad_values, None, grad_b\n\n\nclass SpecialSpmm(nn.Module):\n def forward(self, indices, values, shape, b):\n return SpecialSpmmFunction.apply(indices, values, shape, b)\n\n \nclass SpGraphAttentionLayer(nn.Module):\n \"\"\"\n Sparse version GAT layer, similar to https://arxiv.org/abs/1710.10903\n \"\"\"\n\n def __init__(self, in_features, out_features, dropout, alpha, concat=True):\n super(SpGraphAttentionLayer, self).__init__()\n self.in_features = in_features\n self.out_features = out_features\n self.alpha = alpha\n self.concat = concat\n\n self.W = nn.Parameter(torch.zeros(size=(in_features, out_features)))\n nn.init.xavier_normal_(self.W.data, gain=1.414)\n \n self.a = nn.Parameter(torch.zeros(size=(1, 2*out_features)))\n nn.init.xavier_normal_(self.a.data, gain=1.414)\n\n self.dropout = nn.Dropout(dropout)\n self.leakyrelu = nn.LeakyReLU(self.alpha)\n self.special_spmm = SpecialSpmm()\n\n def forward(self, input, adj):\n dv = 'cuda' if input.is_cuda else 'cpu'\n\n N = input.size()[0]\n edge = adj.nonzero().t()\n\n h = torch.mm(input, self.W)\n # h: N x out\n assert not torch.isnan(h).any()\n\n # Self-attention on the nodes - Shared attention mechanism\n edge_h = torch.cat((h[edge[0, :], :], h[edge[1, :], :]), dim=1).t()\n # edge: 2*D x E\n\n edge_e = torch.exp(-self.leakyrelu(self.a.mm(edge_h).squeeze()))\n assert not torch.isnan(edge_e).any()\n # edge_e: E\n\n e_rowsum = self.special_spmm(edge, edge_e, torch.Size([N, N]), torch.ones(size=(N,1), device=dv))\n # e_rowsum: N x 1\n\n edge_e = self.dropout(edge_e)\n # edge_e: E\n\n h_prime = self.special_spmm(edge, edge_e, torch.Size([N, N]), h)\n assert not torch.isnan(h_prime).any()\n # h_prime: N x out\n \n h_prime = h_prime.div(e_rowsum)\n # h_prime: N x out\n assert not torch.isnan(h_prime).any()\n\n if self.concat:\n # if this layer is not last layer,\n return F.elu(h_prime)\n else:\n # if this layer is last layer,\n return h_prime\n\n def __repr__(self):\n return self.__class__.__name__ + ' (' + str(self.in_features) + ' -> ' + str(self.out_features) + ')'\n"
] |
[
[
"torch.nn.functional.softmax",
"torch.mm",
"torch.nn.Dropout",
"torch.empty",
"torch.Size",
"torch.nn.functional.dropout",
"torch.cat",
"torch.zeros",
"torch.ones",
"torch.isnan",
"torch.nn.init.xavier_normal_",
"torch.sparse_coo_tensor",
"torch.matmul",
"torch.nn.LeakyReLU",
"torch.where",
"torch.nn.init.xavier_uniform_",
"torch.nn.functional.elu",
"torch.ones_like"
]
] |
kazuto1011/dusty-gan
|
[
"63ea1757660806cd04976b24fc7733ab26b2a3a1",
"63ea1757660806cd04976b24fc7733ab26b2a3a1"
] |
[
"models/gans/dcgan_eqlr.py",
"evaluate_reconstruction.py"
] |
[
"import models.ops.common as ops\nimport torch\nfrom torch import nn\n\n\nclass Proj(nn.Sequential):\n def __init__(self, in_ch, out_ch, kernel=(4, 16)):\n super().__init__(\n ops.EqualLR(nn.ConvTranspose2d(in_ch, out_ch, kernel, 1, 0, bias=False)),\n ops.FusedLeakyReLU(out_ch),\n )\n\n def forward(self, x):\n h = x[..., None, None]\n h = super().forward(h)\n return h\n\n\nclass Up(nn.Sequential):\n def __init__(self, in_ch, out_ch, ring=True):\n horizontal = \"circular\" if ring else \"reflect\"\n super().__init__(\n ops.Pad(padding=1, horizontal=horizontal, vertical=\"reflect\"),\n ops.EqualLR(nn.ConvTranspose2d(in_ch, out_ch, 4, 2, 1 + 2, bias=False)),\n ops.FusedLeakyReLU(out_ch),\n )\n\n\nclass Head(nn.Module):\n def __init__(self, in_ch, out_ch={\"rgb\": 3}, ring=True):\n super().__init__()\n assert isinstance(out_ch, dict)\n self.in_ch = in_ch\n self.heads = nn.ModuleDict()\n horizontal = \"circular\" if ring else \"reflect\"\n for name, ch in out_ch.items():\n self.heads[name] = nn.Sequential(\n ops.Pad(padding=1, horizontal=horizontal, vertical=\"reflect\"),\n ops.EqualLR(nn.ConvTranspose2d(in_ch, ch, 4, 2, 1 + 2, bias=True)),\n )\n\n def forward(self, x):\n h = {}\n for name, head in self.heads.items():\n h[name] = head(x)\n return h\n\n\nclass Generator(nn.Sequential):\n def __init__(\n self,\n in_ch,\n out_ch,\n ch_base=64,\n ch_max=512,\n shape=(64, 256),\n ring=True,\n ):\n shape_in = (shape[0] >> 4, shape[1] >> 4)\n ch = lambda i: min(ch_base << i, ch_max)\n super().__init__(\n Proj(in_ch, ch(3), shape_in),\n Up(ch(3), ch(2), ring),\n Up(ch(2), ch(1), ring),\n Up(ch(1), ch(0), ring),\n Head(ch(0), out_ch, ring),\n )\n\n def forward(self, latent):\n h = super().forward(latent)\n h[\"depth\"] = torch.tanh(h[\"depth\"])\n return h\n\n\nclass Down(nn.Sequential):\n def __init__(self, in_ch, out_ch, ring=True):\n horizontal = \"circular\" if ring else \"reflect\"\n super().__init__(\n ops.Pad(padding=1, horizontal=horizontal, vertical=\"reflect\"),\n ops.EqualLR(nn.Conv2d(in_ch, out_ch, 4, 2, 0, bias=False)),\n ops.FusedLeakyReLU(out_ch),\n )\n\n\nclass Discriminator(nn.Sequential):\n def __init__(self, in_ch, ch_base=64, ch_max=512, shape=(64, 256), ring=True):\n shape_out = (shape[0] >> 4, shape[1] >> 4)\n ch = lambda i: min(ch_base << i, ch_max)\n super().__init__(\n ops.BlurVH(ring),\n Down(in_ch * 2, ch(0), ring),\n Down(ch(0), ch(1), ring),\n Down(ch(1), ch(2), ring),\n Down(ch(2), ch(3), ring),\n ops.EqualLR(nn.Conv2d(ch(3), 1, shape_out, 1, 0)),\n )\n\n\nif __name__ == \"__main__\":\n d = Discriminator(1)\n x = torch.randn(5, 1, 64, 256)\n y = d(x)\n print(x.shape)\n print(y.shape)\n\n g = Generator(100, {\"depth\": 1, \"confidence\": 2})\n x = torch.randn(5, 100)\n y = g(x)\n print(x.shape)\n for k, v in y.items():\n print(k, v.shape)\n",
"import argparse\nimport datetime\nimport os\nimport os.path as osp\nfrom collections import defaultdict\n\nimport numpy as np\nimport pandas as pd\nimport torch\nfrom torch.nn.parallel import DataParallel as DP\nfrom tqdm import tqdm\n\nimport utils\nfrom datasets import define_dataset\nfrom utils.metrics.cov_mmd_1nna import compute_cd\nfrom utils.metrics.depth import compute_depth_accuracy, compute_depth_error\n\nif __name__ == \"__main__\":\n\n torch.backends.cudnn.enabled = True\n torch.backends.cudnn.benchmark = True\n\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--model-path\", type=str, required=True)\n parser.add_argument(\"--config-path\", type=str, required=True)\n parser.add_argument(\"--save-dir-path\", type=str, default=\".\")\n parser.add_argument(\"--tol\", type=float, default=0)\n parser.add_argument(\"--batch-size\", type=int, default=512)\n parser.add_argument(\"--distance\", default=\"l1\", choices=[\"l1\", \"l2\"])\n args = parser.parse_args()\n\n cfg, G, lidar, device = utils.setup(\n args.model_path,\n args.config_path,\n ema=True,\n fix_noise=True,\n )\n\n utils.set_requires_grad(G, False)\n G = DP(G)\n\n # hyperparameters\n num_step = 1000\n perturb_latent = True\n noise_ratio = 0.75\n noise_sigma = 1.0\n lr_rampup_ratio = 0.05\n lr_rampdown_ratio = 0.25\n\n # prepare reference\n dataset = define_dataset(cfg.dataset, phase=\"test\")\n loader = torch.utils.data.DataLoader(\n dataset,\n batch_size=args.batch_size,\n shuffle=False,\n num_workers=cfg.num_workers,\n drop_last=False,\n )\n\n # -------------------------------------------------------------------\n # utilities\n # -------------------------------------------------------------------\n def preprocess_reals(raw_batch):\n xyz = raw_batch[\"xyz\"].to(device)\n depth = raw_batch[\"depth\"].to(device)\n mask = raw_batch[\"mask\"].to(device).float()\n inv = lidar.invert_depth(depth)\n inv = mask * inv + (1 - mask) * 0.0\n return inv, mask, xyz\n\n # stylegan2's schedule\n def lr_schedule(iteration):\n t = iteration / num_step\n gamma = min(1.0, (1.0 - t) / lr_rampdown_ratio)\n gamma = 0.5 - 0.5 * np.cos(gamma * np.pi)\n gamma = gamma * min(1.0, t / lr_rampup_ratio)\n return gamma\n\n # -------------------------------------------------------------------\n # run inversion\n # -------------------------------------------------------------------\n\n n = 0\n results = defaultdict(list)\n for i, item in enumerate(tqdm(loader)):\n inv_ref, mask_ref, xyz_ref = preprocess_reals(item)\n batch_size_i = len(inv_ref)\n\n # trainable latent code\n latent = torch.randn(batch_size_i, cfg.model.gen.in_ch, device=device)\n latent.div_(latent.pow(2).mean(dim=1, keepdim=True).add(1e-9).sqrt())\n latent = torch.nn.Parameter(latent).requires_grad_()\n\n optim = utils.SphericalOptimizer(params=[latent], lr=0.1)\n scheduler = torch.optim.lr_scheduler.LambdaLR(optim, lr_lambda=lr_schedule)\n\n # optimize the latent\n for current_step in tqdm(range(num_step), leave=False):\n progress = current_step / num_step\n\n # noise\n w = max(0.0, 1.0 - progress / noise_ratio)\n noise_strength = 0.05 * noise_sigma * w ** 2\n noise = noise_strength * torch.randn_like(latent)\n\n # forward G\n out = G(latent + noise if perturb_latent else latent)\n\n if \"dusty\" in cfg.model.gen.arch:\n inv_gen = utils.tanh_to_sigmoid(out[\"depth_orig\"])\n else:\n inv_gen = utils.tanh_to_sigmoid(out[\"depth\"])\n\n # loss\n loss = utils.masked_loss(inv_ref, inv_gen, mask_ref, args.distance)\n\n # per-sample gradients\n optim.zero_grad()\n loss.backward(gradient=torch.ones_like(loss))\n optim.step()\n scheduler.step()\n\n # post-processing\n out = utils.postprocess(out, lidar, tol=args.tol)\n points_gen = utils.flatten(out[\"points\"])\n points_ref = utils.flatten(xyz_ref)\n depth_gen = lidar.revert_depth(inv_gen, norm=False)\n depth_ref = lidar.revert_depth(inv_ref, norm=False)\n\n # evaluation\n cd = compute_cd(points_ref, points_gen)\n results[\"cd\"] += cd.tolist()\n accuracies = compute_depth_accuracy(depth_ref, depth_gen, mask_ref)\n results[\"accuracy_1\"] += accuracies[\"accuracy_1\"].tolist()\n results[\"accuracy_2\"] += accuracies[\"accuracy_2\"].tolist()\n results[\"accuracy_3\"] += accuracies[\"accuracy_3\"].tolist()\n errors = compute_depth_error(depth_ref, depth_gen, mask_ref)\n results[\"rmse\"] += errors[\"rmse\"].tolist()\n results[\"rmse_log\"] += errors[\"rmse_log\"].tolist()\n results[\"abs_rel\"] += errors[\"abs_rel\"].tolist()\n results[\"sq_rel\"] += errors[\"sq_rel\"].tolist()\n results[\"tol\"] += [args.tol] * batch_size_i\n\n _, _, H, W = out[\"depth\"].shape\n if \"dusty\" in cfg.model.gen.arch:\n total_drop = (1 - out[\"mask\"]).sum(dim=[1, 2, 3]) / (H * W)\n results[\"drop_gen\"] += total_drop.tolist()\n else:\n mask = (torch.abs(out[\"depth\"] - 0.0) > args.tol).float()\n total_drop = (1 - mask).sum(dim=[1, 2, 3]) / (H * W)\n results[\"drop_gen\"] += total_drop.tolist()\n\n _, _, H, W = mask_ref.shape\n total_drop = (1 - mask_ref).sum(dim=[1, 2, 3]) / (H * W)\n results[\"drop_ref\"] += total_drop.tolist()\n\n n += batch_size_i\n\n # save results\n os.makedirs(args.save_dir_path, exist_ok=True)\n timestamp = datetime.datetime.now().isoformat()\n save_path = osp.join(args.save_dir_path, f\"{timestamp}.csv\")\n df = pd.DataFrame(results)\n df.to_csv(save_path)\n print(f\"Saved: {save_path}\")\n"
] |
[
[
"torch.nn.ConvTranspose2d",
"torch.randn",
"torch.nn.ModuleDict",
"torch.nn.Conv2d",
"torch.tanh"
],
[
"torch.randn_like",
"torch.abs",
"torch.optim.lr_scheduler.LambdaLR",
"torch.nn.Parameter",
"torch.nn.parallel.DataParallel",
"torch.randn",
"torch.utils.data.DataLoader",
"numpy.cos",
"pandas.DataFrame",
"torch.ones_like"
]
] |
dinaatia/gender_novels
|
[
"35158916967fc0f748ce601e1453af6e4eeff7fa"
] |
[
"gender_novels/analysis/visualizations/datagraphs_functions.py"
] |
[
"import matplotlib.pyplot as plt\nimport seaborn as sns\nfrom gender_novels.corpus import Corpus\n\n\ndef plt_pubyears(pub_years,corpus_name):\n '''\n Creates a histogram displaying the frequency of books that were published within a 20 year \n period\n :param years: list\n RETURNS a pyplot histogram\n '''\n sns.set_style('ticks')\n sns.color_palette('colorblind')\n ax1=plt.subplot2grid((1,1),(0,0))\n plt.figure(figsize=(10,6))\n bins=[num for num in range(min(pub_years),max(pub_years)+4,5)]\n plt.hist(pub_years,bins,histtype='bar',rwidth=.8,color='c')\n plt.xlabel('Year', size=15,weight='bold',color='k')\n plt.ylabel('Frequency',size=15,weight='bold',color='k')\n plt.title('Publication Year Concentration for '+corpus_name.title(),size=18,weight='bold',\n color='k')\n plt.yticks(size=15,color='k')\n plt.xticks([i for i in range(min(pub_years),max(pub_years)+9,10)],size=15,color='k')\n for label in ax1.xaxis.get_ticklabels():\n label.set_rotation(60)\n plt.subplots_adjust(left=.1,bottom=.18,right=.95,top=.9)\n plt.savefig('date_of_pub_for_'+corpus_name+'.png')\n\ndef plt_pubcountries(pub_country,corpus_name):\n '''\n Creates a bar graph displaying the frequency of books that were published in each country\n :param pub_country: list\n RETURNS a pyplot bargraph\n '''\n sns.set_style('ticks')\n sns.color_palette('colorblind')\n plt.figure(figsize=(10,6))\n ax1=plt.subplot2grid((1,1),(0,0))\n country_counter={}\n totalbooks=0\n for country in pub_country:\n country_counter[country]=country_counter.setdefault(country,0)+1\n totalbooks+=1\n country_counter2={}\n for country in country_counter:\n if country=='':\n pass\n elif country_counter[country]>(.001*totalbooks): #must be higher than .1% of the total books\n # to have its own country name otherwise it is classified under others\n country_counter2[country]=country_counter[country]\n x=[country for country in country_counter2]\n y=[country_counter2[country] for country in country_counter2]\n for label in ax1.xaxis.get_ticklabels():\n label.set_rotation(15)\n plt.bar(x,y,color='c')\n plt.xlabel('Countries',size=15,weight='bold',color='k')\n plt.ylabel('Frequency',size=15,weight='bold',color='k')\n plt.title('Country of Publication for '+corpus_name.title(),size=18,color='k',\n weight='bold')\n plt.xticks(color='k',size=15)\n plt.yticks(color='k',size=15)\n plt.subplots_adjust(left=.1,bottom=.18,right=.95,top=.9)\n plt.savefig('country_of_pub_for_'+corpus_name+'.png')\n\ndef plt_gender_breakdown(pub_gender,corpus_name):\n '''\n Creates a pie chart displaying the composition of male and female writers in the data\n :param pub_gender: list\n :param name_of_data: str\n RETURNS a pie chart\n '''\n sns.set_color_codes('colorblind')\n gendercount={}\n for i in pub_gender:\n if i=='both' or i=='unknown' or i=='Both' or i=='Unknown':\n gendercount['Unknown']=gendercount.setdefault('Unknown',0)+1\n else:\n gendercount[i]=gendercount.setdefault(i,0)+1\n total=0\n for i in gendercount:\n total+=gendercount[i]\n slices=[gendercount[i]/total for i in gendercount]\n genders=[i for i in gendercount]\n labelgenders=[]\n for i in range(len(genders)):\n labelgenders.append((genders[i]+': ' + str(int(round(slices[i],2)*100))+'%').title())\n colors=['c','b','g']\n plt.figure(figsize=(10,6))\n plt.pie(slices,colors=colors,labels=labelgenders,textprops={'fontsize':15})\n plt.title('Gender Breakdown for '+corpus_name.title(),size=18,color='k',weight='bold')\n plt.legend()\n plt.subplots_adjust(left=.1,bottom=.1,right=.9,top=.9)\n plt.savefig('gender_breakdown_for_'+corpus_name+'.png')\n\ndef create_corpus_summary_visualizations(corpus_name):\n '''\n Runs through all plt functions given a corpus name\n :param corpus_name: str\n '''\n c = Corpus(corpus_name)\n pubyears=[novel.date for novel in c.novels]\n pubgender=[novel.author_gender for novel in c.novels]\n pubcountry=[novel.country_publication for novel in c.novels]\n corpus_name=corpus_name.replace('_',' ')\n plt_gender_breakdown(pubgender, corpus_name)\n plt_pubyears(pubyears,corpus_name)\n plt_pubcountries(pubcountry,corpus_name)\n\nif __name__=='__main__':\n create_corpus_summary_visualizations('gutenberg')\n create_corpus_summary_visualizations('sample_novels')\n"
] |
[
[
"matplotlib.pyplot.legend",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.subplot2grid",
"matplotlib.pyplot.bar",
"matplotlib.pyplot.subplots_adjust",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.yticks",
"matplotlib.pyplot.pie",
"matplotlib.pyplot.hist",
"matplotlib.pyplot.xticks",
"matplotlib.pyplot.ylabel"
]
] |
amzn/image-to-recipe-transformers
|
[
"96e257e910c79a5411c3f65f598dd818f72fc262"
] |
[
"src/eval.py"
] |
[
"# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.\n# SPDX-License-Identifier: Apache-2.0\n\nimport numpy as np\nfrom config import get_eval_args\nimport random\nrandom.seed(1234)\nimport os\nimport pickle\nfrom utils.metrics import compute_metrics\nimport argparse\n\n\ndef computeAverageMetrics(imfeats, recipefeats, k, t, forceorder=False):\n \"\"\"Computes retrieval metrics for two sets of features\n\n Parameters\n ----------\n imfeats : np.ndarray [n x d]\n The image features..\n recipefeats : np.ndarray [n x d]\n The recipe features.\n k : int\n Ranking size.\n t : int\n Number of evaluations to run (function returns the average).\n forceorder : bool\n Whether to force a particular order instead of picking random samples\n\n Returns\n -------\n dict\n Dictionary with metric values for all t runs.\n\n \"\"\"\n\n glob_metrics = {}\n i = 0\n for _ in range(t):\n\n if forceorder:\n # pick the same samples in the same order for evaluation\n # forceorder is only True when the function is used during training\n sub_ids = np.array(range(i, i + k))\n i += k\n else:\n sub_ids = random.sample(range(0, len(imfeats)), k)\n imfeats_sub = imfeats[sub_ids, :]\n recipefeats_sub = recipefeats[sub_ids, :]\n\n metrics = compute_metrics(imfeats_sub, recipefeats_sub,\n recall_klist=(1, 5, 10))\n\n for metric_name, metric_value in metrics.items():\n if metric_name not in glob_metrics:\n glob_metrics[metric_name] = []\n glob_metrics[metric_name].append(metric_value)\n return glob_metrics\n\n\ndef eval(args):\n\n # Load embeddings\n with open(args.embeddings_file, 'rb') as f:\n imfeats = pickle.load(f)\n recipefeats = pickle.load(f)\n ids = pickle.load(f)\n ids = np.array(ids)\n\n # sort by name so that we always pick the same samples\n idxs = np.argsort(ids)\n ids = ids[idxs]\n recipefeats = recipefeats[idxs]\n imfeats = imfeats[idxs]\n\n if args.retrieval_mode == 'image2recipe':\n glob_metrics = computeAverageMetrics(imfeats, recipefeats, args.medr_N, args.ntimes)\n else:\n glob_metrics = computeAverageMetrics(recipefeats, imfeats, args.medr_N, args.ntimes)\n\n for k, v in glob_metrics.items():\n print (k + ':', np.mean(v))\n\nif __name__ == \"__main__\":\n\n args = get_eval_args()\n eval(args)\n"
] |
[
[
"numpy.argsort",
"numpy.array",
"numpy.mean"
]
] |
zouxlin3/StockDataAnalysis
|
[
"65a8d76a148150b85883c096938ff315a6a4df1b"
] |
[
"StockData.py"
] |
[
"from typing import List\nimport os\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport calendar\nfrom math import pow\n\n\nclass StockData:\n def __init__(self, path: str): # path为csv数据文件所在路径\n self.path = os.path.normpath(path)\n\n self.filenames = {}\n\n def add_filename(): # 遍历文件夹将csv文件路径加入到filenames\n for filename in os.listdir(self.path):\n filepath = os.path.join(self.path, filename)\n if os.path.isfile(filepath):\n self.filenames[filename[:6]] = filepath\n else:\n add_filename(filepath)\n add_filename()\n\n self.dataframes = {}\n\n def read(self, symbols: List[str]): # dict结构\n for i in symbols:\n filepath = self.__get_filepath(i)\n if filepath:\n self.dataframes[i] = pd.read_csv(filepath)\n self.format_date(i)\n else:\n self.dataframes[i] = pd.DataFrame(columns=('', 'OBJECT_ID', 'S_INFO_WINDCODE', 'TRADE_DT', 'CRNCY_CODE',\n 'S_DQ_PRECLOSE', 'S_DQ_OPEN', 'S_DQ_HIGH', 'S_DQ_LOW', 'S_DQ_CLOSE',\n 'S_DQ_CHANGE', 'S_DQ_PCTCHANGE', 'S_DQ_VOLUME', 'S_DQ_AMOUNT',\n 'S_DQ_ADJPRECLOSE', 'S_DQ_ADJOPEN', 'S_DQ_ADJHIGH', 'S_DQ_ADJLOW',\n 'S_DQ_ADJCLOSE', 'S_DQ_ADJFACTOR', 'S_DQ_AVGPRICE', 'S_DQ_TRADESTATUS'\n , 'OPDATE', 'OPMODE')) # 没有该股票csv时添加空dataframe\n\n def get_data_by_symbol(self, symbol: str, start_date: str, end_date: str):\n data = self.dataframes[symbol]\n\n timedelta = pd.Timedelta(days=1)\n start_date = self.__str2timestamp(start_date)\n end_date = self.__str2timestamp(end_date)\n\n while len(data[(data['TRADE_DT'] == start_date)].index) == 0: # 起始日期没有数据时,向后延\n start_date = start_date + timedelta\n while len(data[(data['TRADE_DT'] == end_date)].index) == 0: # 终止日期没有数据时,向前延\n end_date = end_date - timedelta\n\n start_index = data[(data['TRADE_DT'] == start_date)].index[0] # index返回int64index类型,取[0]即为int\n end_index = data[(data['TRADE_DT'] == end_date)].index[0]\n\n output = data.iloc[start_index:end_index+1]\n output = output.loc(axis=1)['TRADE_DT', 'S_DQ_OPEN', 'S_DQ_HIGH', 'S_DQ_LOW', 'S_DQ_CLOSE']\n output.columns = ['date', 'open', 'high', 'low', 'close']\n\n return output\n\n def get_data_by_date(self, adate: str, symbols: List[str]):\n output = pd.DataFrame(columns=('symbols', 'open', 'high', 'low', 'close'))\n adate = self.__str2timestamp(adate)\n\n for i in symbols:\n data = self.dataframes[i]\n\n date_index = data[(data['TRADE_DT'] == adate)].index\n if len(date_index) == 0: # 该日期没有数据时\n output.append(pd.Series({'symbols': i}), ignore_index=True)\n continue\n\n astock = data.iloc[date_index[0]] # 取该股票日频数据中的一行\n astock = astock.loc(axis=0)['S_INFO_WINDCODE', 'S_DQ_OPEN', 'S_DQ_HIGH', 'S_DQ_LOW', 'S_DQ_CLOSE']\n astock.index = ['symbols', 'open', 'high', 'low', 'close']\n output = output.append(astock) # 不能直接output.append\n\n output['symbols'] = output['symbols'].apply(lambda x: x[:6]) # 取S_INFO_WINDCODE的前6位,即股票代码\n output = output.reset_index(drop=True)\n return output\n\n def get_data_by_field(self, field: str, symbols: List[str]):\n output = pd.DataFrame(columns=['date']+symbols)\n\n for i in symbols:\n data = self.dataframes[i]\n afield = data.loc(axis=1)['TRADE_DT', self.__field_change(field)] # 读取股票i的date和field列数据\n afield.columns = ['date', i]\n\n for j in range(afield.shape[0]): # 遍历afield所有行\n date_index = output[(output['date'] == afield['date'][j])].index\n if len(date_index) == 0: # 判断output中date列有无该时间戳,没有的话添加新的一行\n output = output.append(afield.iloc[j])\n else:\n output.loc[date_index, i] = afield.loc[j, i]\n\n output = output.sort_values(by='date')\n output = output.reset_index(drop=True)\n return output\n\n def plot(self, symbol: str, field: str):\n data = self.dataframes[symbol]\n changed_field = self.__field_change(field)\n\n data[changed_field] = data[changed_field].apply(self.__int_remove_nan) # y轴及直方图数据\n y = data[changed_field].values.tolist()\n plt.ylabel(field)\n\n labels = data['TRADE_DT'] # x轴\n x = range(labels.shape[0])\n plt.xlabel('date')\n # plt.xticks(x, labels)\n\n plt.title(symbol)\n\n if field == 'volume' or field == 'turnover':\n plt.bar(x, y, width=0.35, linewidth=0.8, facecolor='tomato', edgecolor='orangered')\n else:\n plt.plot(x, y, c='tomato', linewidth=0.8)\n\n plt.savefig(os.path.join('figures', 'E2.2.jpg'), dpi=200)\n\n def format_date(self, symbol: str):\n self.dataframes[symbol]['TRADE_DT'] = self.dataframes[symbol]['TRADE_DT'].apply(self.__str2timestamp)\n self.dataframes[symbol] = self.dataframes[symbol].sort_values(by='TRADE_DT') # 根据时间排序\n self.dataframes[symbol] = self.dataframes[symbol].reset_index(drop=True)\n\n def adjust_data(self, symbol: str):\n data = self.dataframes[symbol]\n\n rows = data.shape[0]\n data.loc[rows-1, 'forward_af'] = 1.0 # 计算前复权因子\n for i in range(rows-1):\n data.loc[rows-2-i, 'forward_af'] = self.__get_forward_af(data.loc[rows - 2 - i, 'S_DQ_CLOSE'],\n data.loc[rows-1-i, 'S_DQ_PRECLOSE'],\n data.loc[rows-1-i, 'forward_af'])\n\n for i in ['open', 'high', 'low', 'close']: # 对代表价格计算前复权价\n data.loc[rows-1, 'forward_adjust_'+i] = data.loc[rows-1, self.__field_change(i)] # 最新日期的前复权价取原值\n for j in range(rows-1):\n data.loc[rows-2-j, 'forward_adjust_'+i] = self.__forward_adjust_price(data.loc[rows - 2 - j, self.__field_change(i)],\n data.loc[rows-2-j, 'forward_af'],\n data.loc[rows-1-j, 'forward_af'])\n\n self.dataframes[symbol] = data\n\n def resample(self, symbol: str, freq: int):\n data = self.dataframes[symbol]\n output = pd.DataFrame(columns=('date', 'open', 'close', 'high', 'low', 'volume', 'turnover', 'vwap'))\n\n rows = data.shape[0]\n for i in range(rows): # 遍历data每一行\n if (i+1) % freq == 0: # 行数达到freq倍数时进行一次重采样\n clip = data[i+1-freq:i+1] # 选取前freq行\n clip = clip.reset_index(drop=True)\n aline = pd.Series({\n 'date': clip.loc[freq-1, 'TRADE_DT'],\n 'open': clip.loc[0, 'S_DQ_OPEN'],\n 'close': clip.loc[freq-1, 'S_DQ_CLOSE'],\n 'high': max(clip.loc(axis=1)['S_DQ_HIGH'].values.tolist()),\n 'low': min(clip.loc(axis=1)['S_DQ_LOW'].values.tolist()),\n 'volume': round(sum(clip.loc(axis=1)['S_DQ_VOLUME'].values.tolist()), 0),\n 'turnover': sum(clip.loc(axis=1)['S_DQ_AMOUNT'].values.tolist()),\n })\n if aline['volume'] != 0:\n aline['vwap'] = round(aline['turnover']/aline['volume'], 4) # 保留4位小数\n output = output.append(aline, ignore_index=True)\n\n return output\n\n def moving_average(self, symbol: str, field: str, window: int): # 先使用adjust_data\n data = self.dataframes[symbol]\n\n output = data['forward_adjust_'+field].rolling(window).mean()\n output.index = data['TRADE_DT']\n return output\n\n '''\n EMA Calculation\n There are three steps to calculate the EMA. Here is the formula for a 5 Period EMA\n\n 1. Calculate the SMA # values为S_DQ_AVGPRICE\n (Period Values / Number of Periods)\n \n 2. Calculate the Multiplier\n 2 / (Number of Periods + 1)\n\n 3. Calculate the EMA\n For the first EMA, we use the SMA(previous day) instead of EMA(previous day).\n EMA = {Close - EMA(previous day)} x multiplier + EMA(previous day)\n '''\n def ema(self, symbol: str, periods: int): # 先使用adjust_data\n data = self.dataframes[symbol]\n\n data['sma_'+str(periods)] = data['S_DQ_AVGPRICE'].rolling(periods).mean() # 计算sma\n multiplier = 2/(periods+1)\n\n data.loc[periods-1, 'ema_'+str(periods)] = data.loc[periods-1, 'sma_'+str(periods)] # 将第一个ema设为同日的sma\n for i in range(data.shape[0]-periods): # 从第二个ema开始计算\n data.loc[i+periods, 'ema_'+str(periods)] = (data.loc[i+periods, 'forward_adjust_close']-data.loc[i+periods-1, 'ema_'+str(periods)])\\\n * multiplier+data.loc[i+periods-1, 'ema_'+str(periods)]\n\n output = data['ema_'+str(periods)]\n output.index = data['TRADE_DT']\n return output\n\n def atr(self, symbol: str, periods: int): # 先使用adjust_data\n data = self.dataframes[symbol]\n\n for i in range(data.shape[0]): # 遍历每一行,计算mtr\n data.loc[i, 'mtr'] = self.__calculate_mtr(data.loc[i, 'forward_adjust_high'], data.loc[i, 'forward_adjust_low'], data.loc[i, 'S_DQ_PRECLOSE'])\n\n data['atr_'+str(periods)] = data['mtr'].rolling(periods).mean() # 过去periods天的mtr平均值\n output = data.loc(axis=1)['atr_'+str(periods)]\n output.index = data[\"TRADE_DT\"]\n return output\n\n def rsi(self, symbol: str, periods: int): # 先使用adjust_data\n data = self.dataframes[symbol]\n\n for i in range(data.shape[0]-periods): # 从第periods行开始遍历每一行\n pctchanges = data.loc[i:i+periods, 'S_DQ_PCTCHANGE'].values.tolist() # 将一个periosd内的pctchange放入列表\n data.loc[i+periods, 'rsi_'+str(periods)] = self.__calculate_rsi(pctchanges) # 计算该periods内的rsi\n\n output = data['rsi_'+str(periods)]\n output.index = data['TRADE_DT']\n return output\n\n '''\n DIF=EMA(short)-EMA(long)\n DEA为DIF移动平均\n MACD=(DIF-DEA)*2\n '''\n def macd(self, symbol: str, long: int, short: int, dea_periods: int): # 先使用adjust_data\n data = self.dataframes[symbol]\n\n self.ema(symbol, long)\n self.ema(symbol, short)\n\n for i in range(data.shape[0]): # 计算dif\n data.loc[i, 'dif'] = data.loc[i, 'ema_'+str(short)] - data.loc[i, 'ema_'+str(long)]\n\n data['dea_'+str(dea_periods)] = data['dif'].rolling(dea_periods).mean() # 计算dea\n\n for i in range(data.shape[0]): # 计算macd\n data.loc[i, 'macd'] = (data.loc[i, 'dif'] - data.loc[i, 'dea_'+str(dea_periods)])\n\n output = data['macd']\n output.index = data['TRADE_DT']\n return output\n\n def calc_return(self, symbol: str, freq: str): # 先使用adjust_data\n dates = self.__get_date_by_freq(symbol, freq) # 该freq下所有时间段的起始终止时间\n data = self.dataframes[symbol]\n output = pd.DataFrame(columns=('date', 'return_'+freq))\n\n for i in range(dates.shape[0]):\n output = output.append(pd.Series({'date': dates.loc[i, 'end']}), ignore_index=True)\n\n start_index = data[(data['TRADE_DT'] == dates.loc[i, 'start'])].index[0] # 用close计算return\n end_index = data[(data['TRADE_DT'] == dates.loc[i, 'end'])].index[0]\n start_close = data.loc[start_index, 'forward_adjust_close']\n end_close = data.loc[end_index, 'forward_adjust_close']\n ret = ((end_close - start_close)/start_close)*100 # 百分比\n\n output.loc[i, 'return'] = round(ret, 4) # 保留4位小数\n return output\n\n def calc_sharpe_ratio(self, symbol: str, freq: str): # 先使用adjust_data\n return_df = self.calc_return(symbol, freq)\n\n std = return_df.std(axis=0)['return']\n mean = return_df.mean(axis=0)['return']\n rfr_year = 0.03\n rfrs = {\n 'm': pow(1+rfr_year, 1/12)-1,\n 'q': pow(1+rfr_year, 1/4)-1,\n 'h': pow(1+rfr_year, 1/2)-1,\n 'y': rfr_year\n } # 不同freq的无风险利率(%)\n\n return (mean-rfrs[freq]*100)/std # 夏普比率=(收益率-无风险利率)/收益率方差\n\n def calc_max_drawdown_ratio(self, symbol: str): # 先使用adjust_data\n data = self.dataframes[symbol]\n\n pre_max = 0\n max_drawdown_ratio = 0\n\n for i in range(data.shape[0]): # 遍历data每一行\n close = data.loc[i, 'forward_adjust_close']\n if close > pre_max: # 当日之前的最大值\n pre_max = close\n\n drawdown_ratio = (1-close/pre_max)*100 # 计算当日回撤率\n if drawdown_ratio > max_drawdown_ratio: # 判断最大回撤率\n max_drawdown_ratio = drawdown_ratio\n\n return max_drawdown_ratio\n\n def __get_filepath(self, stock: str): # 判断该股票csv是否存在,返回csv路径\n filepath = self.filenames.get(stock)\n if filepath:\n return filepath\n else:\n return False\n\n def __str2timestamp(self, date: str):\n date = int(date)\n year = date//10000\n month = (date % 10000)//100\n day = date % 100\n return pd.Timestamp(year, month, day)\n\n def __field_change(self, field):\n fields = {\n 'open': 'S_DQ_OPEN',\n 'high': 'S_DQ_HIGH',\n 'low': 'S_DQ_LOW',\n 'close': 'S_DQ_CLOSE',\n 'vwap': 'S_DQ_VWAP',\n 'volume': 'S_DQ_VOLUME',\n 'turnover': 'S_DQ_AMOUNT',\n 'preclose': 'S_DQ_PRECLOSE'\n }\n\n changed_field = fields.get(field)\n if changed_field:\n return changed_field\n else:\n return field\n\n def __int_remove_nan(self, value: str): # str2int, 将NaN设为0\n if value != value:\n return 0\n else:\n return int(value)\n\n # close:当天收盘价 preclose:明天前收 foraf:明天复权因子\n def __get_forward_af(self, close: float, preclose: float, foraf: float):\n return round((preclose/close)*foraf, 5) # 最多5位小数\n\n # price:当天价格\n def __forward_adjust_price(self, price: float, today_af: float, torm_af: float):\n return price*today_af/torm_af\n\n def __calculate_mtr(self, high: float, low: float, preclose: float):\n return max(high-low, abs(preclose-low), abs(high-preclose))\n\n '''\n RSI指标的计算公式:RSI = [上升平均数÷(上升平均数+下跌平均数)]×100\n (其中,上升平均数是某一时期内升幅数的平均;而下跌平均数则是同一时期内跌幅数的平均)\n '''\n def __calculate_rsi(self, pctchanges: List[float]):\n down_sum = 0.0\n up_sum = 0.0\n down_num = 0\n up_num = 0\n\n for i in pctchanges:\n if i > 0:\n up_num = up_num + 1\n up_sum = up_sum + i\n if i < 0:\n down_num = down_num + 1\n down_sum = down_sum + i\n\n if up_num == 0:\n if down_num == 0:\n return np.nan\n return 0\n else:\n up_avg = up_sum/up_num\n if down_num == 0:\n return 100\n else:\n down_avg = down_sum/down_num\n\n return (up_avg/(up_avg+abs(down_avg)))*100\n\n def __get_end_date(self, symbol: str, freq: str, start_date): # 获取不同freq下时间段的最后一天\n if freq == 'm':\n days_of_month = calendar.monthrange(start_date.year, start_date.month)[1]\n end_date = pd.Timestamp(start_date.year, start_date.month, days_of_month)\n\n if freq == 'q':\n end_month = start_date.month\n while end_month != 3 and end_month != 6 and end_month != 9 and end_month != 12: # 判断开始日期所属季节的最后一月\n end_month = end_month + 1\n days_of_month = calendar.monthrange(start_date.year, end_month)[1]\n end_date = pd.Timestamp(start_date.year, end_month, days_of_month)\n\n if freq == 'h':\n if start_date.month <= 6: # 判断开始日期属于上半年还是下半年\n end_date = pd.Timestamp(start_date.year, 6, 30)\n else:\n end_date = pd.Timestamp(start_date.year, 12, 31)\n\n if freq == 'y':\n end_date = pd.Timestamp(start_date.year, 12, 31)\n\n data = self.dataframes[symbol]\n timedelta = pd.Timedelta(days=1)\n while len(data[(data['TRADE_DT'] == end_date)].index) == 0: # 该日期没有数据时,往前延一天\n end_date = end_date - timedelta\n\n return end_date\n\n def __get_date_by_freq(self, symbol: str, freq: str): # 返回该freq下所有时间段的start_date和end_date\n output = pd.DataFrame(columns=('start', 'end'))\n data = self.dataframes[symbol]\n\n output.loc[0, 'start'] = data.loc[0, 'TRADE_DT']\n output.loc[0, 'end'] = self.__get_end_date(symbol, freq, output.loc[0, 'start'])\n\n data_rows = data.shape[0]\n output_rows = output.shape[0]\n\n while output.loc[output_rows-1, 'end'] != data.loc[data_rows-1, 'TRADE_DT']:\n last_end_index = data[(data['TRADE_DT'] == output.loc[output_rows-1, 'end'])].index[0] # 上一个enddate即下一个startdate\n output = output.append(pd.Series({'start': output.loc[output_rows-1, 'end']}), ignore_index=True)\n output_rows = output.shape[0]\n output.loc[output_rows-1, 'end'] = self.__get_end_date(symbol, freq, data.loc[last_end_index+1, 'TRADE_DT']) # 用startdate的下一天计算该freq下的enddate\n\n return output\n"
] |
[
[
"pandas.read_csv",
"pandas.Series",
"matplotlib.pyplot.title",
"pandas.Timedelta",
"pandas.DataFrame",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.bar",
"matplotlib.pyplot.xlabel",
"pandas.Timestamp",
"matplotlib.pyplot.ylabel"
]
] |
yxgeee/BAKE
|
[
"07c4f668ea19311d5b50121026e73d2f035d5765"
] |
[
"small_scale/train.py"
] |
[
"from __future__ import print_function\r\n\r\nimport argparse\r\nimport csv\r\nimport os, logging\r\nimport random\r\n\r\nimport numpy as np\r\nimport torch\r\nfrom torch.autograd import Variable, grad\r\nimport torch.backends.cudnn as cudnn\r\nimport torch.nn as nn\r\nimport torch.nn.functional as F\r\nimport torch.optim as optim\r\nimport torchvision.transforms as transforms\r\n\r\nimport models\r\nfrom utils import progress_bar, set_logging_defaults\r\nfrom datasets import load_dataset\r\n\r\nparser = argparse.ArgumentParser(description='Small-scale Datasets Training')\r\n\r\nparser.add_argument('--name', default='cifar_res18_train', type=str, help='name of experiment')\r\nparser.add_argument('--seed', default=0, type=int, help='random seed')\r\nparser.add_argument('--arch', '-a', default=\"CIFAR_ResNet18\", type=str, help='model type (32x32: CIFAR_ResNet18, CIFAR_DenseNet121, 224x224: resnet18, densenet121)')\r\nparser.add_argument('--resume', '-r', default=\"\", help='resume from checkpoint')\r\nparser.add_argument('--eval', action='store_true', help='only evaluate')\r\nparser.add_argument('--sgpu', default=0, type=int, help='gpu index (start)')\r\nparser.add_argument('--ngpu', default=1, type=int, help='number of gpu')\r\nparser.add_argument('--dataroot', default='./data', type=str, help='data directory')\r\nparser.add_argument('--saveroot', default='./results', type=str, help='save directory')\r\nparser.add_argument('--dataset', '-d', default='cifar100', type=str, help='the name for dataset cifar100 | tinyimagenet | CUB200 | STANFORD120 | MIT67')\r\n\r\nparser.add_argument('--epoch', default=200, type=int, help='total epochs to run')\r\nparser.add_argument('--lr', default=0.1, type=float, help='learning rate')\r\nparser.add_argument('--decay', default=1e-4, type=float, help='weight decay')\r\nparser.add_argument('--batch-size', '-n', default=128, type=int, help='batch size, N')\r\nparser.add_argument('--intra-imgs', '-m', default=3, type=int, help='intra-class images, M')\r\n\r\nparser.add_argument('--temp', default=4.0, type=float, help='temperature scaling')\r\nparser.add_argument('--lamda', default=1.0, type=float, help='kd loss weight ratio')\r\nparser.add_argument('--omega', default=0.5, type=float, help='ensembling weight')\r\n\r\nargs = parser.parse_args()\r\nuse_cuda = torch.cuda.is_available()\r\nargs.num_instances = args.intra_imgs + 1\r\nargs.batch_size = args.batch_size // args.num_instances\r\n\r\nbest_val = 0 # best validation accuracy\r\nstart_epoch = 0 # start from epoch 0 or last checkpoint epoch\r\n\r\nrandom.seed(args.seed)\r\nnp.random.seed(args.seed)\r\ntorch.manual_seed(args.seed)\r\ntorch.cuda.manual_seed_all(args.seed)\r\n\r\ncudnn.benchmark = True\r\n\r\n# Data\r\nprint('==> Preparing dataset: {}'.format(args.dataset))\r\ntrainloader, valloader = load_dataset(args.dataset, args.dataroot,\r\n\t\t\t\t\t\t\t\t\tbatch_size=args.batch_size,\r\n\t\t\t\t\t\t\t\t\tnum_instances=args.num_instances)\r\n\r\nnum_class = trainloader.dataset.num_classes\r\nprint('Number of train dataset: ' ,len(trainloader.dataset))\r\nprint('Number of validation dataset: ' ,len(valloader.dataset))\r\n\r\n# Model\r\nprint('==> Building model: {}'.format(args.arch))\r\nnet = models.load_model(args.arch, num_class)\r\n\r\nif use_cuda:\r\n torch.cuda.set_device(args.sgpu)\r\n net.cuda()\r\n print(torch.cuda.device_count())\r\n print('Using CUDA..')\r\n\r\nif args.ngpu > 1:\r\n net = torch.nn.DataParallel(net, device_ids=list(range(args.sgpu, args.sgpu + args.ngpu)))\r\n\r\noptimizer = optim.SGD(net.parameters(), lr=args.lr, momentum=0.9, weight_decay=args.decay)\r\n\r\nlogdir = os.path.join(args.saveroot, args.dataset, args.arch, args.name)\r\nset_logging_defaults(logdir, args)\r\nlogger = logging.getLogger('main')\r\nlogname = os.path.join(logdir, 'log.csv')\r\n\r\n# Resume\r\nif args.resume:\r\n # Load checkpoint.\r\n print('==> Resuming from checkpoint..')\r\n checkpoint = torch.load(args.resume)\r\n net.load_state_dict(checkpoint['net'])\r\n optimizer.load_state_dict(checkpoint['optimizer'])\r\n best_acc = checkpoint['acc']\r\n start_epoch = checkpoint['epoch'] + 1\r\n rng_state = checkpoint['rng_state']\r\n torch.set_rng_state(rng_state)\r\n\r\ncriterion = nn.CrossEntropyLoss()\r\n\r\nclass KDLoss(nn.Module):\r\n def __init__(self, temp_factor):\r\n super(KDLoss, self).__init__()\r\n self.temp_factor = temp_factor\r\n self.kl_div = nn.KLDivLoss(reduction=\"sum\")\r\n\r\n def forward(self, input, target):\r\n log_p = torch.log_softmax(input/self.temp_factor, dim=1)\r\n loss = self.kl_div(log_p, target)*(self.temp_factor**2)/input.size(0)\r\n return loss\r\n\r\nkdloss = KDLoss(args.temp)\r\nsoftmax = nn.Softmax(dim=1)\r\n\r\ndef knowledge_ensemble(feats, logits):\r\n batch_size = logits.size(0)\r\n masks = torch.eye(batch_size)\r\n if use_cuda:\r\n masks = masks.cuda()\r\n feats = nn.functional.normalize(feats, p=2, dim=1)\r\n logits = nn.functional.softmax(logits/args.temp, dim=1)\r\n W = torch.matmul(feats, feats.permute(1, 0)) - masks * 1e9\r\n W = softmax(W)\r\n W = (1 - args.omega) * torch.inverse(masks - args.omega * W)\r\n return torch.matmul(W, logits)\r\n\r\ndef train(epoch):\r\n\tprint('\\nEpoch: %d' % epoch)\r\n\tnet.train()\r\n\ttrain_ce_loss = 0\r\n\tcorrect = 0\r\n\ttotal = 0\r\n\ttrain_kd_loss = 0\r\n\tfor batch_idx, (inputs, targets) in enumerate(trainloader):\r\n\t\tif use_cuda:\r\n\t\t\tinputs, targets = inputs.cuda(), targets.cuda()\r\n\r\n\t\tbatch_size = inputs.size(0)\r\n\r\n\t\tfeatures, outputs = net(inputs)\r\n\t\tloss = criterion(outputs, targets)\r\n\t\ttrain_ce_loss += loss.item()\r\n\r\n\t\t############\r\n\t\twith torch.no_grad():\r\n\t\t\tkd_targets = knowledge_ensemble(features.detach(), outputs.detach())\r\n\t\tkd_loss = kdloss(outputs, kd_targets.detach())\r\n\t\tloss += args.lamda * kd_loss\r\n\t\ttrain_kd_loss += kd_loss.item()\r\n\t\t############\r\n\r\n\t\t_, predicted = torch.max(outputs, 1)\r\n\t\ttotal += targets.size(0)\r\n\t\tcorrect += predicted.eq(targets.data).sum().float().cpu()\r\n\r\n\t\toptimizer.zero_grad()\r\n\t\tloss.backward()\r\n\t\toptimizer.step()\r\n\t\tprogress_bar(batch_idx, len(trainloader),\r\n\t\t\t'CE loss: %.3f | KD loss: %.3f | Acc: %.3f%% (%d/%d)'\r\n\t\t % (train_ce_loss/(batch_idx+1), train_kd_loss/(batch_idx+1), 100.*correct/total, correct, total))\r\n\r\n\tlogger = logging.getLogger('train')\r\n\tlogger.info('[Epoch {}] [CE loss {:.3f}] [KD loss {:.3f}] [Acc {:.3f}]'.format(\r\n\t\t\t\tepoch,\r\n\t\t\t\ttrain_ce_loss/(batch_idx+1),\r\n\t\t\t\ttrain_kd_loss/(batch_idx+1),\r\n\t\t\t\t100.*correct/total))\r\n\r\n\treturn 100.*correct/total\r\n\r\ndef val(epoch):\r\n global best_val\r\n net.eval()\r\n val_loss = 0.0\r\n correct = 0.0\r\n total = 0.0\r\n\r\n # Define a data loader for evaluating\r\n loader = valloader\r\n\r\n with torch.no_grad():\r\n for batch_idx, (inputs, targets) in enumerate(loader):\r\n if use_cuda:\r\n inputs, targets = inputs.cuda(), targets.cuda()\r\n\r\n _, outputs = net(inputs)\r\n loss = torch.mean(criterion(outputs, targets))\r\n\r\n val_loss += loss.item()\r\n _, predicted = torch.max(outputs, 1)\r\n total += targets.size(0)\r\n correct += predicted.eq(targets.data).cpu().sum().float()\r\n\r\n progress_bar(batch_idx, len(loader),\r\n 'Loss: %.3f | Acc: %.3f%% (%d/%d) '\r\n % (val_loss/(batch_idx+1), 100.*correct/total, correct, total))\r\n\r\n acc = 100.*correct/total\r\n if acc > best_val:\r\n best_val = acc\r\n checkpoint(acc, epoch)\r\n logger = logging.getLogger('val')\r\n logger.info('[Epoch {}] [Loss {:.3f}] [Acc {:.3f}] [Best Acc {:.3f}]'.format(\r\n epoch,\r\n val_loss/(batch_idx+1),\r\n acc, best_val))\r\n\r\n return (val_loss/(batch_idx+1), acc)\r\n\r\n\r\ndef checkpoint(acc, epoch):\r\n # Save checkpoint.\r\n print('Saving..')\r\n state = {\r\n 'net': net.state_dict(),\r\n 'optimizer': optimizer.state_dict(),\r\n 'acc': acc,\r\n 'epoch': epoch,\r\n 'rng_state': torch.get_rng_state()\r\n }\r\n torch.save(state, os.path.join(logdir, 'ckpt.t7'))\r\n\r\n\r\ndef adjust_learning_rate(optimizer, epoch):\r\n \"\"\"decrease the learning rate at 100 and 150 epoch\"\"\"\r\n lr = args.lr\r\n if epoch >= 0.5 * args.epoch:\r\n lr /= 10\r\n if epoch >= 0.75 * args.epoch:\r\n lr /= 10\r\n for param_group in optimizer.param_groups:\r\n param_group['lr'] = lr\r\n\r\nif (not args.eval):\r\n\t# Logs\r\n\tfor epoch in range(start_epoch, args.epoch):\r\n\t train_acc = train(epoch)\r\n\t val_loss, val_acc = val(epoch)\r\n\t adjust_learning_rate(optimizer, epoch)\r\nelse:\r\n\tval_loss, val_acc = val(0)\r\n\r\nprint(\"Best Accuracy : {}\".format(best_val))\r\nlogger = logging.getLogger('best')\r\nlogger.info('[Acc {:.3f}]'.format(best_val))\r\n"
] |
[
[
"torch.set_rng_state",
"torch.nn.Softmax",
"torch.nn.functional.softmax",
"torch.max",
"torch.load",
"torch.no_grad",
"torch.cuda.is_available",
"torch.cuda.manual_seed_all",
"torch.nn.CrossEntropyLoss",
"torch.eye",
"torch.inverse",
"torch.get_rng_state",
"torch.cuda.device_count",
"torch.nn.functional.normalize",
"torch.nn.KLDivLoss",
"numpy.random.seed",
"torch.cuda.set_device",
"torch.manual_seed",
"torch.log_softmax",
"torch.matmul"
]
] |
spacecataz/HydroQuebecRemix
|
[
"5dc0a88a55def420728029255e241f13fb8c8d38"
] |
[
"code/swtools.py"
] |
[
"# Tools for working with IMP8/ISEE data\nimport os\nimport datetime as dt\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport spacepy.datamodel as dm\nimport spacepy.time as spt\nimport spacepy.datamanager as dman\nimport spacepy.plot as splot\n\n\ndef readIMP8plasmafile(fname):\n \"\"\"\n ftp://space.mit.edu/pub/plasma/imp/fine_res/1989/\n\n\n yr doy hh mm ss sc decimal yr rg md xse yse zse ysm zsm speed thermal speed density E/W angle N/S angle\n mom nonlin mom nonlin mom nonlin mom best thresh threshs\n \"\"\"\n header = []\n with open(fname, 'r') as fh:\n while True:\n pos = fh.tell()\n line = fh.readline().strip()\n if not line:\n # empty line, skip\n continue\n if not line[0].isdigit():\n # this is header, save it\n header.append(line)\n else:\n # first line of data, roll back to start and pass to numpy\n fh.seek(pos)\n break\n data = np.loadtxt(fh)\n\n def toDate(yr, doy, hh, mm, ss):\n MM, DD = spt.doy2date(yr, doy)\n dates = dm.dmfilled(len(yr), fillval=None, dtype=object)\n for idx, (mon, day) in enumerate(zip(MM, DD)):\n dates[idx] = dt.datetime(yr[idx], mon, day, hh[idx], mm[idx], ss[idx])\n return dates\n\n region = data[:, 7]\n outdata = dm.SpaceData(attrs={'header': header, 'fname': fname})\n outdata['time'] = toDate(data[:, 0].astype(int), data[:, 1].astype(int),\n data[:, 2].astype(int), data[:, 3].astype(int),\n data[:, 4].astype(int))\n outdata['region'] = dm.dmarray(region)\n outdata['pos_gse'] = dm.dmarray(data[:, 9:12], attrs={'coord_sys': 'gse'})\n outdata['pos_gsm'] = dm.dmfilled(outdata['pos_gse'].shape, fillval=0,\n dtype=float, attrs={'coord_sys': 'gsm'})\n outdata['pos_gsm'][:, 0] = data[:, 9]\n outdata['pos_gsm'][:, 1:] = data[:, 12:14]\n outdata['speed'] = dm.dmarray(data[:, 14],\n attrs={'description': 'speed from moments'})\n # outdata['speed'][region > 2] = np.nan # region 3 is sheath\n outdata['speed_nl'] = dm.dmarray(data[:, 15])\n vmask = outdata['speed_nl'] >= 9000\n # outdata['speed_nl'][region > 2] = np.nan # region 3 is sheath\n outdata['speed_nl'][vmask] = np.nan # region 3 is sheath\n outdata['n_dens'] = dm.dmarray(data[:, 18],\n attrs={'description': 'number density from moments'})\n outdata['n_dens_nl'] = dm.dmarray(data[:, 19])\n outdata['temp'] = 60.5*dm.dmarray(data[:, 16])**2\n outdata['temp_nl'] = 60.5*dm.dmarray(data[:, 17])**2\n outdata['data'] = data\n return outdata\n\n\ndef readIMPplasmaLANL(fname):\n \"\"\"\n https://spdf.gsfc.nasa.gov/pub/data/imp/imp8/plasma_lanl/solarwind_2min/data/\n 13 Flow Speed AP E11.4 Relative Flow Speed km/sec\n space X1\n 14 Flow Azimuth AP E11.4 Relative Flow Angle deg\n space X1\n 15 TRatio E11.4 Alpha Temp/Proton Temp\n space X1\n 16 Temp E11.4 Alpha Temp Anisotropy\n space X1\n 17 PSIT E11.4 Alpha Pressure deg\n Asymmetry Axis\n \"\"\"\n with open(fname, 'r') as fh:\n # first line of data, roll back to start and read all\n data = fh.readlines()\n\n def toDate(ymd, hms):\n dates = dm.dmfilled(len(ymd), fillval=None, dtype=object)\n for idx, (p1, p2) in enumerate(zip(ymd, hms)):\n yr = p1//10000\n mon = (p1 - yr*10000)//100\n day = p1 - (yr*10000 + mon*100)\n hh = p2//10000\n mm = (p2 - hh*10000)//100\n ss = int(p2 - (hh*10000 + mm*100))\n dates[idx] = dt.datetime(int(yr)+1900, int(mon), int(day)) +\\\n dt.timedelta(hours=int(hh), minutes=int(mm), seconds=ss)\n return dates\n data = np.array([line.strip().split()[1:] for line in data], dtype=float)\n outdata = dm.SpaceData()\n outdata['time'] = toDate(data[:, 0], data[:, 2])\n outdata['n_dens'] = dm.dmarray(data[:, 6])\n outdata['speed'] = dm.dmarray(data[:, 7])\n outdata['speed_rel'] = dm.dmarray(data[:, 13])\n outdata['alpha_proton_ratio'] = dm.dmarray(data[:, 12])\n return outdata\n\n\ndef readISEEmag(fname):\n \"\"\"Read an ISEE magnetometer data file\n\n Data source: https://spdf.sci.gsfc.nasa.gov/pub/data/isee/isee3/magnetic_fields/1min_ascii_extracted/1min_hgi_1984_1990/\n \"\"\"\n data = np.loadtxt(fname)\n\n def toDate(yr, doy, hh, mm):\n MM, DD = spt.doy2date(yr, doy)\n dates = dm.dmfilled(len(yr), fillval=None, dtype=object)\n for idx, (mon, day) in enumerate(zip(MM, DD)):\n dates[idx] = dt.datetime(yr[idx], mon, day, hh[idx], mm[idx])\n return dates\n outdata = dm.SpaceData()\n outdata['time'] = dm.dmarray(toDate(data[:, 0].astype(int), data[:, 1].astype(int),\n data[:, 2].astype(int), data[:, 3].astype(int)))\n outdata['B'] = dm.dmarray(data[:, 4:7], attrs={'coord_sys': 'GSE'})\n # replace bad values with NaN fill\n outdata['B'][outdata['B'] == 999.9] = np.nan\n return outdata\n\n\ndef plotEvent(st=dt.datetime(1989, 3, 11), en=dt.datetime(1989, 3, 15)):\n datapath = os.path.abspath(os.path.join('..', 'ref_data', '1989'))\n # When plotting, use spacepy.datamanager to insert fill between contiguous regions\n fig, ax = plt.subplots(5, sharex=True, figsize=(12, 8))\n # Do MIT plasma\n mit_pl = readIMP8plasmafile(os.path.join(datapath, 'imp8.data.1989.060.090'))\n mit_t, mit_sp = dman.insert_fill(np.asarray(mit_pl['time']), np.asarray(mit_pl['speed']))\n ax[0].plot(mit_t, mit_sp, label='MIT plasma')\n mit_t, mit_nd = dman.insert_fill(np.asarray(mit_pl['time']), np.asarray(mit_pl['n_dens']))\n ax[1].plot(mit_t, mit_nd, label='MIT plasma')\n\n # Do LANL plasma\n lanl_pl = readIMPplasmaLANL(os.path.join(datapath, '198903_imp8_lanl_sw_2min.asc'))\n lanl_t, lanl_sp = dman.insert_fill(np.asarray(lanl_pl['time']), np.asarray(lanl_pl['speed']))\n ax[0].plot(lanl_t, lanl_sp, label='LANL plasma')\n lanl_t, lanl_nd = dman.insert_fill(np.asarray(lanl_pl['time']), np.asarray(lanl_pl['n_dens']))\n ax[1].plot(lanl_t, lanl_nd, label='LANL plasma')\n\n # Do IMF from ISEE-3\n isee_ma = readISEEmag(os.path.join(datapath, '198903_isee3_mag03_1min.asc'))\n ax[2].plot(isee_ma['time'], isee_ma['B'][:, 0], label='ISEE-3 Bx')\n ax[2].plot(isee_ma['time'], isee_ma['B'][:, 1], label='ISEE-3 By')\n ax[2].plot(isee_ma['time'], isee_ma['B'][:, 2], label='ISEE-3 Bz')\n ax[2].axhline(linestyle='--', color=(0.3, 0.3, 0.3))\n\n # Add stuff from burton_test\n from burton import invert_example\n invert_example(axes=[ax[3], ax[4]], show=False)\n\n # Finalize\n ax[0].legend(loc='upper left', fancybox=True, framealpha=0.5)\n ax[0].set_ylabel('Speed\\n[km/s]')\n ax[1].legend(loc='upper left', fancybox=True, framealpha=0.5)\n ax[1].set_ylabel('Number Density\\n[cm$^{-3}$]')\n ax[2].legend(loc='upper left', fancybox=True, framealpha=0.5)\n ax[2].set_ylabel('ISEE3\\nIMF [nT]')\n ax[2].set_ylim([-15, 15])\n splot.applySmartTimeTicks(ax[2], [st, en], dolabel=False)\n ax[2].set_xlim([st, en])\n plt.show()\n\n\ndef plotOrbits(st=dt.datetime(1989, 3, 11), en=dt.datetime(1989, 3, 15)):\n # Need to show orbits of IMP-8 and ISEE-3, along with estimated MP and bow shock locations...\n pass\n"
] |
[
[
"numpy.asarray",
"matplotlib.pyplot.show",
"matplotlib.pyplot.subplots",
"numpy.loadtxt"
]
] |
xinyuhuang97/LU3IN003-Projet
|
[
"c0b80752a6e4454108d0e300a344e2b985c325d2"
] |
[
"projet/methode_generalise.py"
] |
[
"import numpy as np\nM=4\nN=4\n\ngrille=np.full((M,N), -1)\ngrille[1][0]=0\nsequence1=[1,1]\nsequence2=[2,1]\n# non-colore -1\n# blanche 0\n# noire 1\n\n\"\"\"def annalyse_ligne(grille ,i):\n j=0\n case_j=-1\n nb_block=0\n while( j<N and grille[i][j]!=-1 ):\n if grille[i][j]==1:\n case_j=j\n else:\n if grille[i][j]==0 and case_j!=-1:\n nb_block=nb_block+1\n j=j+1\n if case_j!=-1 and nb_block==0:\n nb_block=1\n return [case_j, nb_block]\n\"\"\"\n\ndef compare_block(grille, i, j, sl):\n while(grille[i][j]!=-1 and sl>0):\n if grille[i][j]==0:\n return False\n j=j+1\n sl=sl-1\n return True\n\ndef coloriage_possible(grille, sequence, i, j, l):\n # problem de syntaxe\n # cas a: si l depasse le nb d'element de la sequence, inviolement de syntaxe\n # cas b, i n'est pas compris entre 0 et N-1, inviolement de syntaxe\n # cas c, j < 0 , inviolement de syntaxe\n\n if (len(sequence)<l) or (i<0) or (i>N-1) or(j<0):\n return False\n\n # cas 1 : l=0:\n # -si j=0, vrai\n # -sinon faux\n if (l==0):\n if (j==0):\n return True\n return False\n else:\n val=sequence[l-1]\n # cas 2a : si j < sl -1\n if (j<(sequence[l-1]-1)):\n return False\n # cas 2b : si j == sl-1\n # -si l == 1, vrai\n # -sinon faux\n elif (j==(sequence[l-1]-1)):\n cpt=j\n bool=0\n while(j>=0):\n if grille[i][j]!=1:\n bool=1\n break\n if l==1 and bool==0:\n return True\n return False\n else:\n #cas 2c\n return coloriage_possible_rec(grille, sequence, i, j, l, -1 )#, case_j ,nb_block)\n\ndef coloriage_possible_rec(grille, sequence, i, j, l, check):#, case_j ,nb_block):\n if (l==0) and (j>=0):\n return True\n if j<0:\n return False\n # Pour la premiere iteration, on ne sait pas si c'est une case blanche ou noire\n compare_1=compare_block(grille, i, j-sequence[l-1]-1, sequence[l-1])\n compare_2=compare_block(grille, i, j-sequence[l-1], sequence[l-1])\n print(grille)\n if check ==-1:\n if grille[i][j]==-1:\n if grille[i][j-sequence[l-1]-1]==1 and compare_1:\n return coloriage_possible_rec(grille, sequence, i ,j-(sequence[l-1])-1, l-1, 0)\n elif grille[i][j-sequence[l-1]]==1 and compare_2:\n return coloriage_possible_rec(grille, sequence, i ,j-(sequence[l-1]), l-1 ,0)\n elif not (compare_1 or compare_2):\n return False\n else:\n if grille[i][j-sequence[l-1]-1]==0:\n return coloriage_possible_rec(grille, sequence, i ,j-(sequence[l-1]), l-1 ,0)\n else:\n return coloriage_possible_rec(grille, sequence, i ,j-(sequence[l-1]), l-1 ,0) or coloriage_possible_rec(grille, sequence, i ,j-(sequence[l-1])-1, l-1, 0)\n elif grille[i][j]==1:\n if compare_2:\n return coloriage_possible_rec(grille, sequence, i ,j-(sequence[l-1]), l-1 ,0)\n else:\n return False\n elif grille[i][j]==0:\n if compare_1:\n return coloriage_possible_rec(grille, sequence, i ,j-(sequence[l-1]-1), l-1 ,0)\n else:\n return False\n else:\n print(\"Syntaxe erreur valeur different que -1 0 1\")\n exit()\n else:\n if grille[i][j]==-1:\n if grille[i][j-sequence[l-1]-1]==1 and compare_1:\n return coloriage_possible_rec(grille, sequence, i ,j-(sequence[l-1])-1, l-1, 0)\n elif grille[i][j-sequence[l-1]]==1 and compare_2:\n return coloriage_possible_rec(grille, sequence, i ,j-(sequence[l-1]), l-1 ,0)\n elif not (compare_1 or compare_2):\n return False\n else:\n if grille[i][j-sequence[l-1]-1]==0:\n return coloriage_possible_rec(grille, sequence, i ,j-(sequence[l-1]), l-1 ,0)\n else:\n return coloriage_possible_rec(grille, sequence, i ,j-(sequence[l-1]), l-1 ,0) or coloriage_possible_rec(grille, sequence, i ,j-(sequence[l-1])-1, l-1, 0)\n elif grille[i][j]==1:\n if compare_2:\n return coloriage_possible_rec(grille, sequence, i ,j-(sequence[l-1]), l-1 ,0)\n else:\n return False\n elif grille[i][j]==0:\n if compare_1:\n return coloriage_possible_rec(grille, sequence, i ,j-(sequence[l-1]-1), l-1 ,0)\n else:\n return False\n else:\n print(\"Syntaxe erreur valeur different que -1 0 1\")\n exit()\n\nprint(coloriage_possible(grille, sequence1, 1, 1, 2))\nprint(coloriage_possible(grille, sequence2, 1, 3, 2))\n"
] |
[
[
"numpy.full"
]
] |
ultrons/t5x
|
[
"e684a307fe62e4a088f457cc592c299cfb070794"
] |
[
"t5x/models.py"
] |
[
"# Copyright 2021 The T5X Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"T5X Models.\n\nThis module uses layers.py to build a higher-level model structure and define\nmethods for the loss computation as well as a train, prediction, and evaluation\nsteps.\n\"\"\"\n\nimport abc\nimport functools\nfrom typing import Any, Mapping, MutableMapping, Optional, Tuple, Type, Union\n\nfrom flax import core as flax_core\nfrom flax import linen as nn\nfrom flax import optim\nfrom flax.core import scope as flax_scope\nfrom flax.training import common_utils\nimport jax\nimport jax.numpy as jnp\nimport numpy as np\nimport seqio\nfrom t5x import decoding\nimport tensorflow as tf\nimport typing_extensions\n\nArray = Union[np.ndarray, jnp.ndarray, jax.pxla.ShardedDeviceArray, tf.Tensor]\nMetricsMap = MutableMapping[str, jnp.ndarray]\nOptimizer = optim.Optimizer\nMetricMapType = Mapping[str, jnp.ndarray]\nPyTreeDef = type(jax.tree_structure(None))\n\n\nclass TokensIdsToLogitsCallable(typing_extensions.Protocol):\n \"\"\"Token ids to logits mapping call signature.\"\"\"\n\n def __call__(\n self, token_ids: jnp.ndarray, cache: Mapping[str, jnp.ndarray]\n ) -> Tuple[jnp.ndarray, Mapping[str, jnp.ndarray]]:\n \"\"\"Performs forward pass to convert token ids to logits.\n\n Args:\n token_ids: [batch_size, 1] int32 tokens for single position used during\n incremental decoding. Non-0 prefix tokens to be used as a forced prompt.\n cache: flax attention cache.\n\n Returns:\n a tuple of logits with a shape [batch_size, vocab_size] and an updated\n cache.\n \"\"\"\n ...\n\n\nclass DecodeFnCallable(typing_extensions.Protocol):\n \"\"\"Decoding function call signature.\"\"\"\n\n def __call__(self, *, inputs: jnp.ndarray, cache: Mapping[str, jnp.ndarray],\n tokens_to_logits: TokensIdsToLogitsCallable, eos_id: int,\n num_decodes: int, decode_rng: Optional[jnp.ndarray],\n **kwargs) -> Tuple[jnp.ndarray, jnp.ndarray]:\n \"\"\"Decoding function interface.\n\n Args:\n inputs: [batch_size, max_decode_len] int32 sequence of tokens, with non-0\n prefix tokens to be used as a forced prompt.\n cache: flax attention cache.\n tokens_to_logits: fast autoregressive decoder function taking single token\n slices and cache and returning next-token logits and updated cache.\n eos_id: end-of-sentence token for target vocabulary.\n num_decodes: number of decoded sequences to be returned.\n decode_rng: an optional JAX PRNG Key for stochastic sampling routines.\n **kwargs: an optional kwargs. One common usecase of this is passing\n decoding parameters at the callsite.\n\n Returns:\n decodes: Array of sequences: [batch_size, num_decodes, max_decode_len].\n The `num_decodes` dimension is expected to be sorted by the `scores`,\n i.e., `decodes[:, -1, :] has the highest scores among `num_decodes`\n decoded sequences.\n scores: Array of log likelihood scores: [batch_size, num_decodes]\n \"\"\"\n ...\n\n\nclass BaseModel(abc.ABC):\n \"\"\"Abstract base class for models.\n\n Subclasses must implement the abstract methods. Any additional arguments added\n to these methods must have defaults or be bound at run time to fit the\n interface expected by the standard training, inference, and evaluation\n functions.\n \"\"\"\n\n FEATURE_CONVERTER_CLS: Type[seqio.FeatureConverter]\n\n def __init__(self, optimizer_def: optim.OptimizerDef):\n # TODO(jbulian): Move the optimizer out of the model and make it a training\n # parameter.\n self.optimizer_def = optimizer_def\n\n @abc.abstractmethod\n def loss_fn(\n self,\n params: PyTreeDef,\n batch: Mapping[str, jnp.ndarray],\n dropout_rng: Optional[jnp.ndarray],\n ) -> Tuple[jnp.ndarray, Tuple[jnp.ndarray, MetricsMap]]:\n \"\"\"Computes loss and metrics.\n\n Args:\n params: model parameters.\n batch: a batch of inputs.\n dropout_rng: rng to use for dropout, or None for deterministic mode.\n\n Returns:\n loss: the loss computed for the given inputs and parameters.\n aux:\n weight_sum: sum of the per-token weights applied to the loss.\n metrics: a mapping of metrics computed for this batch.\n \"\"\"\n pass\n\n def predict_batch(self, params: PyTreeDef,\n batch: Mapping[str, jnp.ndarray]) -> jnp.ndarray:\n \"\"\"Thin wrapper around `self.predict_batch_with_aux`.\"\"\"\n # The first element of the return value is the predicted sequences.\n return self.predict_batch_with_aux(params, batch)[0]\n\n @abc.abstractmethod\n def predict_batch_with_aux(\n self,\n params: PyTreeDef,\n batch: Mapping[str, jnp.ndarray],\n ) -> Tuple[jnp.ndarray, Mapping[str, jnp.ndarray]]:\n \"\"\"Predicts batch with auxiliary outputs.\"\"\"\n pass\n\n @abc.abstractmethod\n def score_batch(self,\n params: PyTreeDef,\n batch: Mapping[str, jnp.ndarray],\n return_intermediates: bool = False) -> jnp.ndarray:\n \"\"\"Computes scores for batch.\"\"\"\n pass\n\n @abc.abstractmethod\n def get_initial_variables(\n self,\n rng: jnp.ndarray,\n input_shapes: Mapping[str, Array],\n input_types: Optional[Mapping[str, jnp.dtype]] = None\n ) -> flax_scope.FrozenVariableDict:\n \"\"\"Returns the initial variables of the model.\"\"\"\n pass\n\n @abc.abstractmethod\n def get_initial_metrics(self) -> Mapping[str, Array]:\n \"\"\"Dictionary of metrics and initial values.\"\"\"\n pass\n\n @abc.abstractmethod\n def summarize_metrics_fn(self, metrics: Mapping[str, Array], duration: float,\n num_steps: int) -> Mapping[str, Array]:\n \"\"\"Converts metrics into tensorboard-friendly summary.\n\n Args:\n metrics: Metrics obtained from `loss_fn`, summed across multiple batches.\n duration: The duration of the run being summarized.\n num_steps: The number of steps the metrics are summed across.\n\n Returns:\n summary: Metrics in tensorboard friendly format.\n \"\"\"\n pass\n\n\nclass BaseTransformerModel(BaseModel):\n \"\"\"Abstract base class for Transformer models using.\n\n Subclasses must implement `predict_batch_with_aux`, `score_batch`,\n `get_initial_variables` from `BaseModel` as well as `_compute_logits`.\n \"\"\"\n\n def __init__(self,\n module: nn.Module,\n input_vocabulary: seqio.Vocabulary,\n output_vocabulary: seqio.Vocabulary,\n optimizer_def: optim.OptimizerDef,\n decode_fn: Optional[DecodeFnCallable] = None):\n self.module = module\n self._input_vocabulary = input_vocabulary\n self._output_vocabulary = output_vocabulary\n self._decode_fn = decode_fn\n super().__init__(optimizer_def=optimizer_def)\n\n @property\n def input_vocabulary(self):\n return self._input_vocabulary\n\n @property\n def output_vocabulary(self):\n return self._output_vocabulary\n\n @property\n def decode_fn(self):\n return self._decode_fn\n\n @abc.abstractmethod\n def _compute_logits(self,\n params: PyTreeDef,\n batch: Mapping[str, jnp.ndarray],\n dropout_rng: Optional[jnp.ndarray] = None) -> jnp.ndarray:\n \"\"\"Computes logits via a forward pass of the model.\"\"\"\n pass\n\n def loss_fn(\n self,\n params: PyTreeDef,\n batch: Mapping[str, jnp.ndarray],\n dropout_rng: Optional[jnp.ndarray],\n label_smoothing: float = 0.0,\n z_loss: float = 0.0,\n loss_normalizing_factor: Optional[float] = None\n ) -> Tuple[jnp.ndarray, Tuple[jnp.ndarray, MetricsMap]]:\n \"\"\"Loss function used for training with a cross-entropy loss.\"\"\"\n logits = self._compute_logits(params, batch, dropout_rng)\n loss, total_z_loss, weight_sum = compute_weighted_cross_entropy(\n logits,\n targets=batch['decoder_target_tokens'],\n weights=batch.get('decoder_loss_weights', None),\n label_smoothing=label_smoothing,\n z_loss=z_loss,\n loss_normalizing_factor=loss_normalizing_factor)\n metrics = self._compute_metrics(batch, logits, loss, total_z_loss,\n weight_sum)\n return loss, (weight_sum, metrics)\n\n def _compute_metrics(\n self,\n batch: Mapping[str, jnp.ndarray],\n logits: jnp.ndarray,\n loss: jnp.ndarray,\n total_z_loss: jnp.ndarray,\n weight_sum: jnp.ndarray,\n ) -> MetricsMap:\n \"\"\"Compute metrics given the logits, targets and loss.\"\"\"\n additional_metrics = {\n 'z_loss': total_z_loss,\n 'cross_ent_loss': loss - total_z_loss\n }\n return compute_metrics(\n logits=logits,\n targets=batch['decoder_target_tokens'],\n weights=batch.get('decoder_loss_weights', None),\n loss=loss,\n weight_sum=weight_sum,\n additional_metrics=additional_metrics)\n\n def get_initial_metrics(self):\n return {\n 'loss': 0.0,\n 'accuracy': 0.0,\n 'weight_sum': 0.0,\n 'z_loss': 0.0,\n 'cross_ent_loss': 0.0,\n 'num_tokens': 0.0,\n 'num_examples': 0.0\n }\n\n def summarize_metrics_fn(self, metrics: Mapping[str, Array], duration: float,\n num_steps: int) -> Mapping[str, Array]:\n \"\"\"Convert metrics into tensorboard-friendly summary.\"\"\"\n summary = {\n 'accuracy':\n metrics['accuracy'] / metrics['weight_sum'],\n 'loss':\n metrics['loss'] / num_steps,\n 'loss_per_nonpadding_target_token':\n metrics['loss'] / metrics['weight_sum'],\n 'loss_per_all_target_tokens':\n metrics['loss'] / metrics['num_tokens'],\n 'timing/seqs_per_second':\n metrics['num_examples'] / duration,\n 'timing/steps_per_second':\n num_steps / duration,\n 'timing/seconds':\n duration,\n 'timing/seqs':\n metrics['num_examples'],\n }\n\n if 'z_loss' in metrics:\n summary['z_loss'] = metrics['z_loss'] / num_steps\n summary['cross_ent_loss'] = metrics['cross_ent_loss'] / num_steps\n summary['cross_ent_loss_per_all_target_tokens'] = (\n metrics['cross_ent_loss'] / metrics['num_tokens'])\n summary['z_loss_per_all_target_tokens'] = (\n metrics['z_loss'] / metrics['num_tokens'])\n return summary\n\n\nclass EncoderDecoderModel(BaseTransformerModel):\n \"\"\"Wrapper class for the models.Transformer nn.module.\"\"\"\n\n FEATURE_CONVERTER_CLS = seqio.EncDecFeatureConverter\n\n def __init__(self,\n module: nn.Module,\n input_vocabulary: seqio.Vocabulary,\n output_vocabulary: seqio.Vocabulary,\n optimizer_def: optim.OptimizerDef,\n decode_fn: DecodeFnCallable = decoding.beam_search):\n super().__init__(\n module=module,\n input_vocabulary=input_vocabulary,\n output_vocabulary=output_vocabulary,\n optimizer_def=optimizer_def,\n decode_fn=decode_fn)\n\n # Adds explicit loss method for proper configuration.\n # TODO(b/194404217): Remove once gin correctly handles child class configs.\n def loss_fn(\n self,\n params: PyTreeDef,\n batch: Mapping[str, jnp.ndarray],\n dropout_rng: Optional[jnp.ndarray],\n label_smoothing: float = 0.0,\n z_loss: float = 0.0,\n loss_normalizing_factor: Optional[float] = None\n ) -> Tuple[jnp.ndarray, Tuple[jnp.ndarray, MetricsMap]]:\n\n return super().loss_fn(\n params=params,\n batch=batch,\n dropout_rng=dropout_rng,\n label_smoothing=label_smoothing,\n z_loss=z_loss,\n loss_normalizing_factor=loss_normalizing_factor)\n\n def get_initial_variables(\n self,\n rng: jnp.ndarray,\n input_shapes: Mapping[str, Array],\n input_types: Optional[Mapping[str, jnp.dtype]] = None\n ) -> flax_scope.FrozenVariableDict:\n \"\"\"Get the initial variables for an encoder-decoder model.\"\"\"\n input_types = {} if input_types is None else input_types\n encoder_shape = input_shapes['encoder_input_tokens']\n encoder_type = input_types.get('encoder_input_tokens', jnp.float32)\n decoder_shape = input_shapes['decoder_input_tokens']\n decoder_type = input_types.get('decoder_input_tokens', jnp.float32)\n initial_variables = self.module.init(\n rng,\n jnp.ones(encoder_shape, encoder_type),\n jnp.ones(decoder_shape, decoder_type),\n jnp.ones(decoder_shape, decoder_type),\n decode=False,\n enable_dropout=False)\n return initial_variables\n\n def _compute_logits(\n self,\n params: PyTreeDef,\n batch: Mapping[str, jnp.ndarray],\n dropout_rng: Optional[jnp.ndarray] = None,\n mutable: flax_scope.CollectionFilter = False\n ) -> Union[jnp.ndarray, Tuple[jnp.ndarray, flax_scope.FrozenVariableDict]]:\n \"\"\"Computes logits via a forward pass of `self.module_cls`.\"\"\"\n # Dropout is provided only for the training mode.\n rngs = {'dropout': dropout_rng} if dropout_rng is not None else None\n\n return self.module.apply(\n {'params': params},\n batch['encoder_input_tokens'],\n batch['decoder_input_tokens'],\n batch['decoder_target_tokens'],\n encoder_segment_ids=batch.get('encoder_segment_ids', None),\n decoder_segment_ids=batch.get('decoder_segment_ids', None),\n encoder_positions=batch.get('encoder_positions', None),\n decoder_positions=batch.get('decoder_positions', None),\n decode=False,\n enable_dropout=rngs is not None,\n rngs=rngs,\n mutable=mutable)\n\n def _compute_logits_from_slice(\n self, flat_ids: jnp.ndarray, flat_cache: Mapping[str, jnp.ndarray],\n params: PyTreeDef, encoded_inputs: jnp.ndarray, raw_inputs: jnp.ndarray,\n max_decode_length: int) -> Tuple[jnp.ndarray, Mapping[str, jnp.ndarray]]:\n \"\"\"Token slice to logits from decoder model.\"\"\"\n # flat_ids: [batch * beam, seq_len=1]\n # cache is expanded inside beam_search to become flat_cache\n # flat_cache: [batch * beam, num_heads, depth_per_head, max_decode_len]\n # flat_logits: [batch * beam, seq_len=1, vocab]\n flat_logits, new_vars = self.module.apply(\n {\n 'params': params,\n 'cache': flat_cache\n },\n encoded_inputs,\n raw_inputs, # only needed for encoder padding mask\n flat_ids,\n flat_ids,\n enable_dropout=False,\n decode=True,\n max_decode_length=max_decode_length,\n mutable=['cache'],\n method=self.module.decode)\n # Remove sequence length dimension since it's always 1 during decoding.\n flat_logits = jnp.squeeze(flat_logits, axis=1)\n new_flat_cache = new_vars['cache']\n return flat_logits, new_flat_cache\n\n def predict_batch_with_aux(\n self,\n params: PyTreeDef,\n batch: Mapping[str, jnp.ndarray],\n decoder_params: Optional[MutableMapping[str, Any]] = None,\n return_all_decodes: bool = False,\n num_decodes: int = 1,\n ) -> Tuple[jnp.ndarray, Mapping[str, jnp.ndarray]]:\n \"\"\"Predict with fast decoding beam search on a batch.\n\n Here we refer to \"parameters\" for values that can be compiled into the\n model dynamically, as opposed to static configuration settings that require\n a recompile. For example, the model weights and the decoder brevity-penalty\n are parameters and can be modified without requiring a recompile. The number\n of layers, the batch size and the decoder beam size are configuration\n options that require recompilation if changed.\n\n This method can be used with a customizable decoding function as long as it\n follows the signature of `DecodeFnCallable`. In order to provide a unified\n interface for the decoding functions, we use a generic names. For example a\n beam size is a concept unique to beam search. Conceptually, it corresponds\n to the number of sequences returned by the beam search. Therefore, the\n generic argument `num_decodes` corresponds to the beam size if\n `self._decode_fn` is a beam search. For temperature sampling, `num_decodes`\n corresponds to the number of indepedent sequences to be sampled. Typically\n `num_decodes = 1` is used for tempeature sampling.\n\n If `return_all_decodes = True`, the return tuple contains the predictions\n with a shape [batch, num_decodes, max_decode_len] and the scores (i.e., log\n probability of the generated sequence) with a shape [batch, num_decodes].\n\n If `return_all_decodes = False`, the return tuple contains the predictions\n with a shape [batch, max_decode_len] and the scores with a shape [batch].\n\n `decoder_params` can be used to pass dynamic configurations to\n `self.decode_fn`. An example usage is to pass different random seed (i.e.,\n `jax.random.PRNGKey(seed)` with different `seed` value). This can be done by\n setting `decoder_params['decode_rng'] = jax.random.PRNGKey(seed)`.\n\n Args:\n params: model parameters.\n batch: a batch of inputs.\n decoder_params: additional (model-independent) parameters for the decoder.\n return_all_decodes: whether to return the entire beam or just the top-1.\n num_decodes: the number of beams to use in beam search.\n\n Returns:\n A tuple containing:\n the batch of predictions, with the entire beam if requested\n an auxiliary dictionary of decoder scores\n \"\"\"\n # Prepare zeroed-out autoregressive cache.\n # [batch, input_len]\n inputs = batch['encoder_input_tokens']\n # [batch, target_len]\n target_shape = batch['decoder_input_tokens'].shape\n target_type = batch['decoder_input_tokens'].dtype\n _, variables_with_cache = self.module.apply(\n {'params': params},\n jnp.ones(inputs.shape, inputs.dtype),\n jnp.ones(target_shape, target_type),\n jnp.ones(target_shape, target_type),\n decode=True,\n enable_dropout=False,\n mutable=['cache'])\n\n cache = variables_with_cache['cache']\n\n # Prepare transformer fast-decoder call for beam search: for beam search, we\n # need to set up our decoder model to handle a batch size equal to\n # batch_size * num_decodes, where each batch item's data is expanded\n # in-place rather than tiled.\n # i.e. if we denote each batch element subtensor as el[n]:\n # [el0, el1, el2] --> beamsize=2 --> [el0,el0,el1,el1,el2,el2]\n # [batch * num_decodes, input_len, emb_dim]\n encoded_inputs = decoding.flat_batch_beam_expand(\n self.module.apply({'params': params},\n inputs,\n enable_dropout=False,\n method=self.module.encode), num_decodes)\n\n # [batch * num_decodes, input_len]\n raw_inputs = decoding.flat_batch_beam_expand(inputs, num_decodes)\n\n tokens_ids_to_logits = functools.partial(\n self._compute_logits_from_slice,\n params=params,\n encoded_inputs=encoded_inputs,\n raw_inputs=raw_inputs,\n max_decode_length=target_shape[1])\n\n if decoder_params is None:\n decoder_params = {}\n\n # For beam search, `decoder_prompt_inputs` is only used to obtain batch size\n # and max decode length information. For temperature sampling,\n # `decod_prompt_inputs` will be filled with the sampled ids.\n decoder_prompt_inputs = jnp.zeros_like(batch['decoder_input_tokens'])\n\n # TODO(hwchung): rename the returned value names to more generic ones.\n # Using the above-defined single-step decoder function, run a\n # beam search over possible sequences given input encoding.\n # decodes: [batch, num_decodes, max_decode_len + 1]\n # scores: [batch, num_decodes]\n scanned = hasattr(self.module, 'scan_layers') and self.module.scan_layers\n decodes, scores = self._decode_fn(\n inputs=decoder_prompt_inputs,\n cache=cache,\n tokens_to_logits=tokens_ids_to_logits,\n eos_id=self.output_vocabulary.eos_id,\n num_decodes=num_decodes,\n cache_offset=1 if scanned else 0,\n **decoder_params)\n\n # Beam search returns [n_batch, n_beam, n_length] with beam dimension sorted\n # in increasing order of log-probability.\n # Return the highest scoring beam sequence.\n if return_all_decodes:\n return decodes, {'scores': scores}\n else:\n return decodes[:, -1, :], {'scores': scores[:, -1]}\n\n def score_batch(\n self,\n params: PyTreeDef,\n batch: Mapping[str, jnp.ndarray],\n return_intermediates: bool = False,\n ) -> Union[jnp.ndarray, Tuple[jnp.ndarray, Mapping[str, Any]]]:\n \"\"\"Compute log likelihood score on a batch.\"\"\"\n weights = batch['decoder_loss_weights']\n target_tokens = batch['decoder_target_tokens']\n\n if return_intermediates:\n logits, modified_variables = self._compute_logits(\n params=params, batch=batch, mutable=['intermediates'])\n\n # Inside self.module, we called nn.Module.sow to track various\n # intermediate values. We extract them here.\n intermediates = flax_core.unfreeze(\n modified_variables.get('intermediates', {}))\n\n # Track per-token labels and loss weights as well. These are not\n # intermediate values of logit computation, so we manually add them here.\n intermediates.setdefault('decoder', {})\n intermediates['decoder']['target_tokens'] = (target_tokens,)\n intermediates['decoder']['loss_weights'] = (weights,)\n # Note that the values are singleton tuples. This is because values inside\n # `intermediates` should be tuples tracking all instantiations of a value.\n # These values each have just one instantiation, hence singletons.\n else:\n logits = self._compute_logits(params, batch) # type: jnp.ndarray\n\n # Purposefully don't use config.z_loss because that term is for training\n # stability and shouldn't affect our reported scores.\n token_scores = -cross_entropy_with_logits(\n logits,\n common_utils.onehot(\n target_tokens, logits.shape[-1], on_value=1, off_value=0),\n z_loss=0.0)[0] * weights\n\n sequence_scores = token_scores.sum(-1)\n\n if return_intermediates:\n return sequence_scores, intermediates\n\n return sequence_scores\n\n\nclass DecoderOnlyModel(BaseTransformerModel):\n \"\"\"Wrapper class for the layers.DecoderOnly nn.module.\n\n It accepts inputs made out of only 'targets' or both 'inputs'\n and 'targets'. If both 'inputs' and 'targets' are present, the loss will\n be computed only on 'targets'.\n \"\"\"\n\n FEATURE_CONVERTER_CLS = seqio.DecoderFeatureConverter\n\n def __init__(self,\n module: nn.Module,\n vocabulary: seqio.Vocabulary,\n optimizer_def: optim.OptimizerDef,\n decode_fn: DecodeFnCallable = decoding.temperature_sample):\n super().__init__(\n module,\n input_vocabulary=vocabulary,\n output_vocabulary=vocabulary,\n optimizer_def=optimizer_def,\n decode_fn=decode_fn)\n\n def get_initial_variables(\n self,\n rng: jnp.ndarray,\n input_shapes: Mapping[str, Array],\n input_types: Optional[Mapping[str, jnp.dtype]] = None\n ) -> flax_scope.FrozenVariableDict:\n \"\"\"Get the initial variables.\"\"\"\n input_types = {} if input_types is None else input_types\n decoder_shape = input_shapes['decoder_input_tokens']\n decoder_type = input_types.get('decoder_input_tokens', jnp.float32)\n initial_variables = self.module.init(\n rng,\n jnp.ones(decoder_shape, decoder_type),\n jnp.ones(decoder_shape, decoder_type),\n enable_dropout=False)\n return initial_variables\n\n def _compute_logits(self,\n params: PyTreeDef,\n batch: Mapping[str, jnp.ndarray],\n dropout_rng: Optional[jnp.ndarray] = None) -> jnp.ndarray:\n \"\"\"Computes logits via a forward pass of `self.module`.\"\"\"\n rngs = {'dropout': dropout_rng} if dropout_rng is not None else None\n\n return self.module.apply(\n {'params': params},\n batch['decoder_input_tokens'],\n batch['decoder_target_tokens'],\n decoder_segment_ids=batch.get('decoder_segment_ids', None),\n decoder_positions=batch.get('decoder_positions', None),\n rngs=rngs,\n decode=False,\n enable_dropout=rngs is not None)\n\n def _compute_logits_from_slice(\n self,\n flat_ids: jnp.ndarray,\n flat_cache: Mapping[str, jnp.ndarray],\n params: PyTreeDef,\n max_decode_length: int,\n ) -> Tuple[jnp.ndarray, Mapping[str, jnp.ndarray]]:\n \"\"\"Token slice to logits from decoder model.\"\"\"\n # flat_ids: [batch, seq_len=1]\n # flat_cache['cached_(keys|values)']:\n # [batch, num_heads, depth_per_head, max_decode_length]\n # flat_cache['cache_index']: [batch]\n # flat_logits: [batch, seq_len=1, vocab]\n flat_logits, new_vars = self.module.apply(\n {\n 'params': params,\n 'cache': flat_cache\n },\n flat_ids,\n flat_ids,\n enable_dropout=False,\n decode=True,\n max_decode_length=max_decode_length,\n mutable=['cache'])\n # Remove sequence length dimension since it's always 1 during decoding.\n flat_logits = jnp.squeeze(flat_logits, axis=1)\n new_flat_cache = new_vars['cache']\n return flat_logits, new_flat_cache\n\n def score_batch(self,\n params: PyTreeDef,\n batch: Mapping[str, jnp.ndarray],\n return_intermediates: bool = False) -> jnp.ndarray:\n \"\"\"Compute log likelihood score on a batch.\"\"\"\n\n if return_intermediates: # TODO(jaaustin) add this\n raise NotImplementedError(\n 'return_intermediates is not yet supported for DecoderOnlyModel.')\n\n decoder_target_tokens = batch['decoder_target_tokens']\n weights = batch['decoder_loss_weights']\n\n logits = self._compute_logits(params=params, batch=batch, dropout_rng=None)\n\n token_scores = -cross_entropy_with_logits(\n logits,\n common_utils.onehot(\n decoder_target_tokens, logits.shape[-1], on_value=1, off_value=0),\n z_loss=0.0)[0] * weights\n sequence_scores = token_scores.sum(-1)\n return sequence_scores\n\n def predict_batch_with_aux(\n self,\n params: PyTreeDef,\n batch: Mapping[str, jnp.ndarray],\n *,\n return_all_decodes: bool = False,\n num_decodes: int = 1,\n decoder_params: Optional[MutableMapping[str, Any]] = None,\n ) -> Tuple[jnp.ndarray, Mapping[str, jnp.ndarray]]:\n \"\"\"Predict with prefix.\n\n `decoder_params` can be used to pass dynamic configurations to\n `self.decode_fn`. An example usage is to pass different random seed (i.e.,\n `jax.random.PRNGKey(seed)` with different `seed` value). This can be done by\n setting `decoder_params['decode_rng'] = jax.random.PRNGKey(seed)`.\n\n Although this method is short, there are a few subtle points that. We use a\n running example to make these points clear.\n\n ```\n Example\n inputs = [9, 4, 6, 1]\n targets = [3, 9, 1]\n\n seqio.PrefixLMFeatureConverter will generate these set of features\n\n decoder_target_tokens = [9, 4, 6, 1, 3, 9, 1, 0, 0]\n decoder_input_tokens = [0, 9, 4, 6, 1, 3, 9, 1, 0]\n decoder_causal_attention = [1, 1, 1, 1, 1, 0, 0, 0, 0]\n\n The output of this function is (a` through `e` are the sampled token ids):\n\n sampled_sequences = [9, 4, 6, 1, a, b, c, d, e].\n ```\n\n Given these set of features, we make a few important observation.\n\n 1) When a decoder-only model is used for a supervised learning with \"inputs\"\n and \"targets\", one way to handle this is to concatenate the \"inputs\" and\n \"targets\". For training, we use teacher forcing for the entire\n concatenated sequence. For inference, on the other hand, we don't have\n the targets. This requires that we use teacher forcing on the \"inputs\"\n portion while using the generated token as the input token for the next\n decoding step. For evaluation, we do have \"targets\" but we only want to\n use them for computing metrics, i.e., by comparing to the sequence\n generated by the model.\n\n This function is currently used for evaluation mode, but by ignoring\n \"targets\", it can be extended for the inference mode.\n\n 2) During evaluation mode, the targets portion is zeroed out and they are\n filled with the sampled token ids. The inputs portion is kept intact.\n\n 3) Note that `decoder_causal_attention` has an additional 1 after the final\n \"inputs\" token. This is because the position where the last \"inputs\"\n token (in this case 1) is input and the output is the first \"target\"\n token (in this case 3) can be included in the non-causal attention\n region.\n\n This results in an alignment between `decoder_input_tokens` and\n `decoder_causal_attention` because the former is shifted to the right by\n one position. So we use `decoder_causal_attention` as a binary mask to\n zero out the target tokens in `decoder_input_tokens`.\n\n Currently, this function only supports fully-causal attention, even in the\n prefix.\n\n Note:\n In order to use a custom self._decode_fn with this model it must support:\n\n 1) Decoding from a partially decoded state by accepting a vector of\n `initial_indices` that specify where in the input to start decoding\n from.\n 2) Using a vector as the loop counter to support different examples being\n a different number of steps into their decoding loop.\n 3) Be able to handle one batch element reaching `max_decode_length`\n before the others without it causing the model to prematurely stop\n decoding.\n\n Args:\n params: model parameters.\n batch: batch element with the model features specified in\n seqio.PrefixLMFeatureConverter.\n return_all_decodes: if True, will return all batch_size * num_decodes\n samples from the model as an array of shape [batch_size, num_decodes,\n sequence_length]. Otherwise returns only the most likely samples as an\n array of shape [batch_size, sequence_length].\n num_decodes: number of decoded sequences to be returned.\n decoder_params: additional (model-independent) parameters for the decoder.\n\n Returns:\n sampled_sequences: an array of shape [batch, max_decode_length].\n \"\"\"\n if 'decoder_causal_attention' not in batch:\n raise ValueError(\n 'Batch does not have the right format for text generation: probably '\n 'because `task_feature_lengths` passed to the feature converter does '\n 'not have both `inputs` and `targets`.')\n # Prepare zeroed-out autoregressive cache. The shape will be\n # [batch, ..., max_decode_length]\n target_shape = batch['decoder_input_tokens'].shape\n target_type = batch['decoder_input_tokens'].dtype\n max_decode_length = target_shape[1]\n\n _, variables_with_cache = self.module.apply(\n {'params': params},\n jnp.ones(target_shape, target_type),\n jnp.ones(target_shape, target_type),\n enable_dropout=False,\n decode=True,\n mutable=['cache'])\n cache = variables_with_cache['cache']\n\n # We can use the decoder causal attention mask to tell how long the inputs\n # are. The causal mask has a 1 for all the input tokens (and one more to\n # cover the original BOS token, created by shifting the inputs one to the\n # right) so we need to delete one.\n inputs_lengths = jnp.sum(batch['decoder_causal_attention'], axis=1) - 1\n\n # since decoder_input_tokens is shifted to the right and\n # `decoder_causal_attention` has one more 1 than the number of inputs\n # tokens, this masks out targets portion of the decoder_input_tokens.\n inputs = batch['decoder_input_tokens'] * batch['decoder_causal_attention']\n\n # Prefill our cache with all the inputs. `inputs_lengths` is the index of\n # the last input token. The cache will be filled for all the input\n # positions, save the last input token. The cache index will point to the\n # index of this last input token which is considered during prefilling but\n # not cached. This re-computation is required as the logits for this\n # position are required for selecting the first output token.\n #\n # The cache is still `[B, ..., max_decode_len]` but any position less than\n # the `inputs_length` will be non-zero, that is\n # `cached_key[b, ..., i < inputs_lengths[b]] != 0`.\n #\n # The cache index is now a vector of size [B] = input_lengths\n _, variables_with_cache = self.module.apply(\n {\n 'params': params,\n 'cache': cache\n },\n inputs,\n # Use the `decoder_causal_attention` mask, which has 1 for all input\n # positions, including the BOS token, as the targets so when the\n # decoder attention mask is built, it will correctly cover the whole\n # input, Using something like the inputs will cause the first input\n # token (the 0 for BOS) will not be included in the mask. This also\n # restricts the mask to not include any target positions like it would\n # if you used `decoder_target_tokens`.\n batch['decoder_causal_attention'],\n mutable=['cache'],\n enable_dropout=False,\n prefill=True,\n prefill_lengths=inputs_lengths)\n prefilled_cache = variables_with_cache['cache']\n\n tokens_ids_to_logits = functools.partial(\n self._compute_logits_from_slice,\n params=params,\n max_decode_length=max_decode_length)\n\n if decoder_params is None:\n decoder_params = {}\n\n # Using the above-defined single-step decoder function, run temperature\n # sampling with the prefix.\n # [batch, max_decode_length]\n decoded_sequences, scores = self._decode_fn(\n inputs=inputs,\n cache=prefilled_cache,\n tokens_to_logits=tokens_ids_to_logits,\n eos_id=self.output_vocabulary.eos_id,\n num_decodes=num_decodes,\n initial_index=inputs_lengths,\n **decoder_params)\n\n if not return_all_decodes:\n # Search returns [n_batch, n_beam/decodes, n_length] with the beam/decode\n # dimension sorted in increasing order of log-probability.\n # `scores` is [batch, beam/decode_size]\n # We take the highest scoring sequence (-1) and its score\n decoded_sequences = decoded_sequences[:, -1, :]\n # Beam search returns []\n aux = {'scores': scores[:, -1]}\n else:\n # We return all samples and scores, rather than just the top ones.\n aux = {'scores': scores}\n\n return remove_prefix(decoded_sequences, inputs_lengths), aux\n\n\n@jax.vmap\ndef remove_prefix(sequence: jnp.ndarray,\n prefix_length: jnp.ndarray) -> jnp.ndarray:\n \"\"\"Remove the prefix portion and shift to the left by the prefix length.\n\n The example below uses non-decorated function definition, i.e., arrays do not\n have batch dimension. `jax.vmap` internally inserts the batch dimension at\n axis=0. The shape annotations do not include the batch dimension either.\n\n Example:\n ```python\n sequence = [1, 2, 3, 4, 5, 6, 7, 0]\n prefix_length = 2\n remove_prefix(sequence, prefix_length) = [3, 4, 5, 6, 7, 0, 0, 0]\n ```\n\n Note that this function assumes that the padding token has an id of 0.\n\n Args:\n sequence: [length] array.\n prefix_length: scalar, i.e., rank 0 array.\n\n Returns:\n [length] array with the prefix removed and the suffix shifted.\n \"\"\"\n length = sequence.shape[-1]\n # A binary mask with 1 at inputs.\n inputs_mask = (jnp.arange(length) < prefix_length)\n # A binary mask with 1 at the targets and padding positions.\n targets_and_padding_mask = jnp.logical_not(inputs_mask).astype(sequence.dtype)\n # Since padding id = 0, the padding mask is zeroed out.\n targets = sequence * targets_and_padding_mask\n # Shift to the left by prefix length. Wrapped elements are already zeroed.\n return jnp.roll(targets, -prefix_length, axis=-1)\n\n\n@jax.custom_vjp\ndef cross_entropy_with_logits(logits: jnp.ndarray, targets: jnp.ndarray,\n z_loss: float) -> jnp.ndarray:\n \"\"\"Computes cross entropy loss with stable custom gradient.\n\n Computes a stabilized-gradient version of:\n -jnp.sum(targets * nn.log_softmax(logits), axis=-1)\n\n If z_loss > 0, then an auxiliary loss equal to z_loss*log(z)^2\n will be added to the cross entropy loss (z = softmax normalization constant).\n The two uses of z_loss are:\n 1. To keep the logits from drifting too far from zero, which can cause\n unacceptable roundoff errors in bfloat16.\n 2. To encourage the logits to be normalized log-probabilities.\n\n Args:\n logits: [batch, length, num_classes] float array.\n targets: categorical one-hot targets [batch, length, num_classes] float\n array.\n z_loss: coefficient for auxilliary z-loss loss term.\n\n Returns:\n tuple with the total loss and the z_loss, both\n float arrays with shape [batch, length].\n \"\"\"\n logits_sum = jax.scipy.special.logsumexp(logits, axis=-1, keepdims=True)\n log_softmax = logits - logits_sum\n loss = -jnp.sum(targets * log_softmax, axis=-1)\n # Add auxilliary z-loss term.\n log_z = jnp.squeeze(logits_sum, axis=-1)\n total_z_loss = z_loss * jax.lax.square(log_z)\n loss += total_z_loss\n return loss, total_z_loss\n\n\ndef _cross_entropy_with_logits_fwd(\n logits: jnp.ndarray,\n targets: jnp.ndarray,\n z_loss: float = 0.0\n) -> Tuple[jnp.ndarray, Tuple[jnp.ndarray, jnp.ndarray, jnp.ndarray, jnp\n .ndarray, jnp.ndarray, jnp.ndarray, jnp.ndarray]]:\n \"\"\"Forward-mode of `cross_entropy_with_logits`.\"\"\"\n max_logit = logits.max(axis=-1, keepdims=True)\n shifted = logits - max_logit\n exp_shifted = jnp.exp(shifted)\n sum_exp = jnp.sum(exp_shifted, axis=-1, keepdims=True)\n log_softmax = shifted - jnp.log(sum_exp)\n loss = -jnp.sum(targets * log_softmax, axis=-1)\n # Add auxilliary z-loss term.\n log_z = jnp.squeeze(jnp.log(sum_exp) + max_logit, axis=-1)\n total_z_loss = z_loss * jax.lax.square(log_z)\n loss += total_z_loss\n return (loss, total_z_loss), (logits, targets, z_loss, exp_shifted, sum_exp,\n log_softmax, log_z)\n\n\ndef _cross_entropy_with_logits_bwd(\n res: Tuple[jnp.ndarray, jnp.ndarray, jnp.ndarray, jnp.ndarray, jnp.ndarray,\n jnp.ndarray, jnp.ndarray], g: Tuple[jnp.ndarray, jnp.ndarray]\n) -> Tuple[jnp.ndarray, jnp.ndarray, jnp.ndarray]:\n \"\"\"Backward-mode of `cross_entropy_with_logits`.\"\"\"\n g = g[0] # Ignore z_loss component as that is only used for logging.\n logits, targets, z_loss, exp_shifted, sum_exp, log_softmax, log_z = res\n # z-loss term adds the (2 * z_loss * log_z) factor.\n deriv = (\n jnp.expand_dims(1 + 2 * z_loss * log_z, -1) * exp_shifted / sum_exp -\n targets)\n g_logits = jnp.expand_dims(g, axis=-1) * deriv\n g_targets = -jnp.expand_dims(g, axis=-1) * log_softmax\n return (jnp.asarray(g_logits,\n logits.dtype), jnp.asarray(g_targets, targets.dtype),\n jnp.array(0.0)) # sets z-loss coeff gradient to 0\n\n\ncross_entropy_with_logits.defvjp(_cross_entropy_with_logits_fwd,\n _cross_entropy_with_logits_bwd)\n\n\ndef compute_weighted_cross_entropy(\n logits: jnp.ndarray,\n targets: jnp.ndarray,\n weights: Optional[jnp.ndarray] = None,\n label_smoothing: float = 0.0,\n z_loss: float = 0.0,\n loss_normalizing_factor: Optional[float] = None\n) -> Tuple[jnp.ndarray, jnp.ndarray, jnp.ndarray]:\n \"\"\"Compute weighted cross entropy and entropy for log probs and targets.\n\n Args:\n logits: [batch, length, num_classes] float array.\n targets: categorical targets [batch, length] int array.\n weights: None or array of shape [batch, length].\n label_smoothing: label smoothing constant, used to determine the on and off\n values.\n z_loss: coefficient for auxilliary z-loss loss term.\n loss_normalizing_factor: Constant to divide loss by. If not specified, loss\n will not be normalized. Intended for backward compatibility with T5-MTF\n training. Should not normally be used.\n\n Returns:\n Tuple of scalar loss, z_loss, and weight sum.\n \"\"\"\n if logits.ndim != targets.ndim + 1:\n raise ValueError('Incorrect shapes. Got shape %s logits and %s targets' %\n (str(logits.shape), str(targets.shape)))\n vocab_size = logits.shape[-1]\n confidence = 1.0 - label_smoothing\n low_confidence = (1.0 - confidence) / (vocab_size - 1)\n normalizing_constant = -(\n confidence * jnp.log(confidence) +\n (vocab_size - 1) * low_confidence * jnp.log(low_confidence + 1e-20))\n soft_targets = common_utils.onehot(\n targets, vocab_size, on_value=confidence, off_value=low_confidence)\n total_loss, total_z_loss = cross_entropy_with_logits(\n logits, soft_targets, z_loss=z_loss)\n total_loss = total_loss - normalizing_constant\n\n weight_sum = np.prod(targets.shape)\n if weights is not None:\n total_loss = total_loss * weights\n total_z_loss = total_z_loss * weights\n weight_sum = jnp.sum(weights)\n\n # By default, we do not normalize loss based on anything.\n # We don't normalize based on batch size because the optimizers we use are\n # pretty much scale invariant, so this simplifies things.\n # We don't normalize based on number of non-padding tokens in order to treat\n # each token as equally important regardless of sequence length.\n if loss_normalizing_factor:\n total_loss /= loss_normalizing_factor\n total_z_loss /= loss_normalizing_factor\n return jnp.sum(total_loss), jnp.sum(total_z_loss), weight_sum\n\n\ndef compute_weighted_accuracy(\n logits: jnp.ndarray,\n targets: jnp.ndarray,\n weights: Optional[jnp.ndarray] = None) -> Tuple[jnp.ndarray, jnp.ndarray]:\n \"\"\"Compute weighted accuracy for log probs and targets.\n\n Args:\n logits: [batch, length, num_classes] float array.\n targets: categorical one-hot targets [batch, length, category] int array.\n weights: None or array of shape [batch, length]\n\n Returns:\n Scalar accuracy.\n \"\"\"\n if logits.ndim != targets.ndim + 1:\n raise ValueError('Incorrect shapes. Got shape %s logits and %s targets' %\n (str(logits.shape), str(targets.shape)))\n accuracy = jnp.equal(jnp.argmax(logits, axis=-1), targets)\n if weights is not None:\n accuracy = accuracy * weights\n\n return jnp.sum(accuracy)\n\n\ndef compute_metrics(\n logits: jnp.ndarray, targets: jnp.ndarray, weights: jnp.ndarray,\n loss: jnp.ndarray, weight_sum: jnp.ndarray,\n additional_metrics: MutableMapping[str, jnp.ndarray]\n) -> MutableMapping[str, jnp.ndarray]:\n \"\"\"Compute summary metrics.\"\"\"\n accuracy = compute_weighted_accuracy(logits, targets, weights)\n metrics = {\n 'loss': loss,\n 'accuracy': accuracy,\n 'weight_sum': weight_sum,\n 'num_examples': targets.shape[0],\n 'num_tokens': targets.size\n }\n metrics.update(additional_metrics)\n return metrics\n\n\ndef get_input_vocabulary(model: BaseTransformerModel) -> seqio.Vocabulary:\n return model.input_vocabulary\n\n\ndef get_output_vocabulary(model: BaseTransformerModel) -> seqio.Vocabulary:\n return model.output_vocabulary\n"
] |
[
[
"numpy.prod"
]
] |
saadz-khan/stylegan2
|
[
"9fa83f5213e1077d7e5d0d595a961618f3524156"
] |
[
"run_generator.py"
] |
[
"# Copyright (c) 2019, NVIDIA Corporation. All rights reserved.\n#\n# This work is made available under the Nvidia Source Code License-NC.\n# To view a copy of this license, visit\n# https://nvlabs.github.io/stylegan2/license.html\n\nimport argparse\nimport numpy as np\nimport PIL.Image\nimport dnnlib\nimport dnnlib.tflib as tflib\nimport re\nimport sys\nfrom tqdm import tqdm_notebook as tqdm\nimport scipy.interpolate as interpolate\nfrom opensimplex import OpenSimplex\nimport os\n\nimport pretrained_networks\n\n# from https://colab.research.google.com/drive/1ShgW6wohEFQtqs_znMna3dzrcVoABKIH\ndef generate_zs_from_seeds(seeds,Gs):\n zs = []\n for seed_idx, seed in enumerate(seeds):\n rnd = np.random.RandomState(seed)\n z = rnd.randn(1, *Gs.input_shape[1:]) # [minibatch, component]\n zs.append(z)\n return zs\n\ndef generate_images_from_seeds(seeds, truncation_psi):\n return generate_images(generate_zs_from_seeds(seeds), truncation_psi)\n\ndef convertZtoW(latent, truncation_psi=0.7, truncation_cutoff=9):\n dlatent = Gs.components.mapping.run(latent, None) # [seed, layer, component]\n dlatent_avg = Gs.get_var('dlatent_avg') # [component]\n for i in range(truncation_cutoff):\n dlatent[0][i] = (dlatent[0][i]-dlatent_avg)*truncation_psi + dlatent_avg\n \n return dlatent\n\ndef generate_latent_images(zs, truncation_psi,save_npy,prefix):\n Gs_kwargs = dnnlib.EasyDict()\n Gs_kwargs.output_transform = dict(func=tflib.convert_images_to_uint8, nchw_to_nhwc=True)\n Gs_kwargs.randomize_noise = False\n if not isinstance(truncation_psi, list):\n truncation_psi = [truncation_psi] * len(zs)\n \n for z_idx, z in enumerate(zs):\n if isinstance(z,list):\n z = np.array(z).reshape(1,512)\n elif isinstance(z,np.ndarray):\n z.reshape(1,512)\n print('Generating image for step %d/%d ...' % (z_idx, len(zs)))\n Gs_kwargs.truncation_psi = truncation_psi[z_idx]\n noise_rnd = np.random.RandomState(1) # fix noise\n tflib.set_vars({var: noise_rnd.randn(*var.shape.as_list()) for var in noise_vars}) # [height, width]\n images = Gs.run(z, None, **Gs_kwargs) # [minibatch, height, width, channel]\n PIL.Image.fromarray(images[0], 'RGB').save(dnnlib.make_run_dir_path('%s%05d.png' % (prefix,z_idx)))\n if save_npy:\n np.save(dnnlib.make_run_dir_path('%s%05d.npy' % (prefix,z_idx)), z)\n\ndef generate_images_in_w_space(dlatents, truncation_psi,save_npy,prefix):\n Gs_kwargs = dnnlib.EasyDict()\n Gs_kwargs.output_transform = dict(func=tflib.convert_images_to_uint8, nchw_to_nhwc=True)\n Gs_kwargs.randomize_noise = False\n Gs_kwargs.truncation_psi = truncation_psi\n dlatent_avg = Gs.get_var('dlatent_avg') # [component]\n\n # temp_dir = 'frames%06d'%int(1000000*random.random())\n # os.system('mkdir %s'%temp_dir)\n\n for row, dlatent in enumerate(dlatents):\n print('Generating image for step %d/%d ...' % (row, len(dlatents)))\n #row_dlatents = (dlatent[np.newaxis] - dlatent_avg) * np.reshape(truncation_psi, [-1, 1, 1]) + dlatent_avg\n dl = (dlatent-dlatent_avg)*truncation_psi + dlatent_avg\n row_images = Gs.components.synthesis.run(dlatent, **Gs_kwargs)\n PIL.Image.fromarray(row_images[0], 'RGB').save(dnnlib.make_run_dir_path('frame%05d.png' % row))\n if save_npy:\n np.save(dnnlib.make_run_dir_path('%s%05d.npy' % (prefix,row)), dlatent)\n\ndef line_interpolate(zs, steps):\n out = []\n for i in range(len(zs)-1):\n for index in range(steps):\n fraction = index/float(steps) \n out.append(zs[i+1]*fraction + zs[i]*(1-fraction))\n return out\n\ndef truncation_traversal(network_pkl,npys,seed=[0],start=-1.0,stop=1.0,increment=0.1):\n print('Loading networks from \"%s\"...' % network_pkl)\n _G, _D, Gs = pretrained_networks.load_networks(network_pkl)\n noise_vars = [var for name, var in Gs.components.synthesis.vars.items() if name.startswith('noise')]\n\n Gs_kwargs = dnnlib.EasyDict()\n Gs_kwargs.output_transform = dict(func=tflib.convert_images_to_uint8, nchw_to_nhwc=True)\n Gs_kwargs.randomize_noise = False\n\n count = 1\n trunc = start\n\n while trunc <= stop:\n Gs_kwargs.truncation_psi = trunc\n print('Generating truncation %0.2f' % trunc)\n\n rnd = np.random.RandomState(seed)\n z = rnd.randn(1, *Gs.input_shape[1:]) # [minibatch, component]\n tflib.set_vars({var: rnd.randn(*var.shape.as_list()) for var in noise_vars}) # [height, width]\n images = Gs.run(z, None, **Gs_kwargs) # [minibatch, height, width, channel] \n PIL.Image.fromarray(images[0], 'RGB').save(dnnlib.make_run_dir_path('frame%05d.png' % count))\n\n trunc+=increment\n count+=1\n\n\n\n#----------------------------------------------------------------------------\n\ndef generate_images(network_pkl, seeds, npy_files, truncation_psi):\n print('Loading networks from \"%s\"...' % network_pkl)\n _G, _D, Gs = pretrained_networks.load_networks(network_pkl)\n noise_vars = [var for name, var in Gs.components.synthesis.vars.items() if name.startswith('noise')]\n\n Gs_kwargs = dnnlib.EasyDict()\n Gs_kwargs.output_transform = dict(func=tflib.convert_images_to_uint8, nchw_to_nhwc=True)\n Gs_kwargs.randomize_noise = False\n if truncation_psi is not None:\n Gs_kwargs.truncation_psi = truncation_psi\n\n if seeds is not None:\n for seed_idx, seed in enumerate(seeds):\n print('Generating image for seed %d (%d/%d) ...' % (seed, seed_idx+1, len(seeds)))\n rnd = np.random.RandomState(seed)\n z = rnd.randn(1, *Gs.input_shape[1:]) # [minibatch, component]\n tflib.set_vars({var: rnd.randn(*var.shape.as_list()) for var in noise_vars}) # [height, width]\n images = Gs.run(z, None, **Gs_kwargs) # [minibatch, height, width, channel]\n PIL.Image.fromarray(images[0], 'RGB').save(dnnlib.make_run_dir_path('seed%04d.png' % seed))\n \n if npy_files is not None:\n npys = npy_files.split(',')\n dlatent_avg = Gs.get_var('dlatent_avg') # [component]\n \n for npy in range(len(npys)):\n print('Generating image from npy (%d/%d) ...' % (npy+1, len(npys)))\n w = np.load(npys[npy])\n print(w.shape)\n rnd = np.random.RandomState(1)\n dl = (w-dlatent_avg)*truncation_psi + dlatent_avg\n images = Gs.components.synthesis.run(w, **Gs_kwargs) # [minibatch, height, width, channel]\n name = os.path.basename(npys[npy])\n PIL.Image.fromarray(images[0], 'RGB').save(dnnlib.make_run_dir_path('%s.png' % name))\n \n \n \n#----------------------------------------------------------------------------\n\ndef generate_neighbors(network_pkl, seeds, npys, diameter, truncation_psi, num_samples, save_vector):\n global _G, _D, Gs, noise_vars\n print('Loading networks from \"%s\"...' % network_pkl)\n _G, _D, Gs = pretrained_networks.load_networks(network_pkl)\n noise_vars = [var for name, var in Gs.components.synthesis.vars.items() if name.startswith('noise')]\n\n Gs_kwargs = dnnlib.EasyDict()\n Gs_kwargs.output_transform = dict(func=tflib.convert_images_to_uint8, nchw_to_nhwc=True)\n Gs_kwargs.randomize_noise = False\n if truncation_psi is not None:\n Gs_kwargs.truncation_psi = truncation_psi\n\n for seed_idx, seed in enumerate(seeds):\n print('Generating image for seed %d (%d/%d) ...' % (seed, seed_idx+1, len(seeds)))\n rnd = np.random.RandomState(seed)\n \n og_z = rnd.randn(1, *Gs.input_shape[1:]) # [minibatch, component]\n tflib.set_vars({var: rnd.randn(*var.shape.as_list()) for var in noise_vars}) # [height, width]\n images = Gs.run(og_z, None, **Gs_kwargs) # [minibatch, height, width, channel]\n PIL.Image.fromarray(images[0], 'RGB').save(dnnlib.make_run_dir_path('seed%04d.png' % seed))\n \n zs = []\n z_prefix = 'seed%04d_neighbor' % seed\n\n for s in range(num_samples):\n random = np.random.uniform(-diameter,diameter,[1,512])\n# zs.append(np.clip((og_z+random),-1,1))\n new_z = np.clip(np.add(og_z,random),-1,1)\n images = Gs.run(new_z, None, **Gs_kwargs) # [minibatch, height, width, channel]\n PIL.Image.fromarray(images[0], 'RGB').save(dnnlib.make_run_dir_path('%s%04d.png' % (z_prefix,s)))\n # generate_latent_images(zs, truncation_psi, save_vector, z_prefix)\n if save_vector:\n np.save(dnnlib.make_run_dir_path('%s%05d.npy' % (z_prefix,s)), new_z)\n\n\n#----------------------------------------------------------------------------\n\ndef valmap(value, istart, istop, ostart, ostop):\n return ostart + (ostop - ostart) * ((value - istart) / (istop - istart))\n\nclass OSN():\n min=-1\n max= 1\n\n def __init__(self,seed,diameter):\n self.tmp = OpenSimplex(seed)\n self.d = diameter\n self.x = 0\n self.y = 0\n\n def get_val(self,angle):\n self.xoff = valmap(np.cos(angle), -1, 1, self.x, self.x + self.d);\n self.yoff = valmap(np.sin(angle), -1, 1, self.y, self.y + self.d);\n return self.tmp.noise2d(self.xoff,self.yoff)\n\ndef get_noiseloop(endpoints, nf, d, start_seed):\n features = []\n zs = []\n for i in range(512):\n features.append(OSN(i+start_seed,d))\n\n inc = (np.pi*2)/nf\n for f in range(nf):\n z = np.random.randn(1, 512)\n for i in range(512):\n z[0,i] = features[i].get_val(inc*f) \n zs.append(z)\n\n return zs\n \ndef get_latent_interpolation_bspline(endpoints, nf, k, s, shuffle):\n if shuffle:\n random.shuffle(endpoints)\n x = np.array(endpoints)\n x = np.append(x, x[0,:].reshape(1, x.shape[1]), axis=0)\n \n nd = x.shape[1]\n latents = np.zeros((nd, nf))\n nss = list(range(1, 10)) + [10]*(nd-19) + list(range(10,0,-1))\n for i in tqdm(range(nd-9)):\n idx = list(range(i,i+10))\n tck, u = interpolate.splprep([x[:,j] for j in range(i,i+10)], k=k, s=s)\n out = interpolate.splev(np.linspace(0, 1, num=nf, endpoint=True), tck)\n latents[i:i+10,:] += np.array(out)\n latents = latents / np.array(nss).reshape((512,1))\n return latents.T\n\ndef generate_latent_walk(network_pkl, truncation_psi, walk_type, frames, seeds, npys, save_vector, diameter=2.0, start_seed=0 ):\n global _G, _D, Gs, noise_vars\n print('Loading networks from \"%s\"...' % network_pkl)\n _G, _D, Gs = pretrained_networks.load_networks(network_pkl)\n noise_vars = [var for name, var in Gs.components.synthesis.vars.items() if name.startswith('noise')]\n zs = []\n \n if(len(seeds) > 0):\n zs = generate_zs_from_seeds(seeds,Gs)\n elif(len(npys) > 0):\n zs = npys\n \n if(len(zs) > 2 ):\n print('not enough values to generate walk')\n# return false;\n\n walk_type = walk_type.split('-')\n \n if walk_type[0] == 'line':\n number_of_steps = int(frames/(len(zs)-1))+1\n \n if (len(walk_type)>1 and walk_type[1] == 'w'):\n ws = []\n for i in range(len(zs)):\n ws.append(convertZtoW(zs[i]))\n points = line_interpolate(ws,number_of_steps)\n zpoints = line_interpolate(zs,number_of_steps)\n else:\n points = line_interpolate(zs,number_of_steps)\n\n # from Gene Kogan\n elif walk_type[0] == 'bspline':\n # bspline in w doesnt work yet\n # if (len(walk_type)>1 and walk_type[1] == 'w'):\n # ws = []\n # for i in range(len(zs)):\n # ws.append(convertZtoW(zs[i]))\n\n # print(ws[0].shape)\n # w = []\n # for i in range(len(ws)):\n # w.append(np.asarray(ws[i]).reshape(512,18))\n # points = get_latent_interpolation_bspline(ws,frames,3, 20, shuffle=False)\n # else:\n z = []\n for i in range(len(zs)):\n z.append(np.asarray(zs[i]).reshape(512))\n points = get_latent_interpolation_bspline(z,frames,3, 20, shuffle=False)\n\n # from Dan Shiffman: https://editor.p5js.org/dvs/sketches/Gb0xavYAR\n elif walk_type[0] == 'noiseloop':\n points = get_noiseloop(None,frames,diameter,start_seed)\n\n if (walk_type[0] == 'line' and len(walk_type)>1 and walk_type[1] == 'w'):\n # print(points[0][:,:,1])\n # print(zpoints[0][:,1])\n # ws = []\n # for i in enumerate(len(points)):\n # ws.append(convertZtoW(points[i]))\n generate_images_in_w_space(points, truncation_psi,save_vector,'frame')\n elif (len(walk_type)>1 and walk_type[1] == 'w'):\n print('%s is not currently supported in w space, please change your interpolation type' % (walk_type[0]))\n else:\n generate_latent_images(points, truncation_psi,save_vector,'frame')\n\n#----------------------------------------------------------------------------\n\ndef style_mixing_example(network_pkl, row_seeds, col_seeds, truncation_psi, col_styles, minibatch_size=4):\n print('Loading networks from \"%s\"...' % network_pkl)\n _G, _D, Gs = pretrained_networks.load_networks(network_pkl)\n w_avg = Gs.get_var('dlatent_avg') # [component]\n\n Gs_syn_kwargs = dnnlib.EasyDict()\n Gs_syn_kwargs.output_transform = dict(func=tflib.convert_images_to_uint8, nchw_to_nhwc=True)\n Gs_syn_kwargs.randomize_noise = False\n Gs_syn_kwargs.minibatch_size = minibatch_size\n\n print('Generating W vectors...')\n all_seeds = list(set(row_seeds + col_seeds))\n all_z = np.stack([np.random.RandomState(seed).randn(*Gs.input_shape[1:]) for seed in all_seeds]) # [minibatch, component]\n all_w = Gs.components.mapping.run(all_z, None) # [minibatch, layer, component]\n all_w = w_avg + (all_w - w_avg) * truncation_psi # [minibatch, layer, component]\n w_dict = {seed: w for seed, w in zip(all_seeds, list(all_w))} # [layer, component]\n\n print('Generating images...')\n all_images = Gs.components.synthesis.run(all_w, **Gs_syn_kwargs) # [minibatch, height, width, channel]\n image_dict = {(seed, seed): image for seed, image in zip(all_seeds, list(all_images))}\n\n print('Generating style-mixed images...')\n for row_seed in row_seeds:\n for col_seed in col_seeds:\n w = w_dict[row_seed].copy()\n w[col_styles] = w_dict[col_seed][col_styles]\n image = Gs.components.synthesis.run(w[np.newaxis], **Gs_syn_kwargs)[0]\n image_dict[(row_seed, col_seed)] = image\n\n print('Saving images...')\n for (row_seed, col_seed), image in image_dict.items():\n PIL.Image.fromarray(image, 'RGB').save(dnnlib.make_run_dir_path('%d-%d.png' % (row_seed, col_seed)))\n\n print('Saving image grid...')\n _N, _C, H, W = Gs.output_shape\n canvas = PIL.Image.new('RGB', (W * (len(col_seeds) + 1), H * (len(row_seeds) + 1)), 'black')\n for row_idx, row_seed in enumerate([None] + row_seeds):\n for col_idx, col_seed in enumerate([None] + col_seeds):\n if row_seed is None and col_seed is None:\n continue\n key = (row_seed, col_seed)\n if row_seed is None:\n key = (col_seed, col_seed)\n if col_seed is None:\n key = (row_seed, row_seed)\n canvas.paste(PIL.Image.fromarray(image_dict[key], 'RGB'), (W * col_idx, H * row_idx))\n canvas.save(dnnlib.make_run_dir_path('grid.png'))\n\n#----------------------------------------------------------------------------\n\ndef _parse_num_range(s):\n '''Accept either a comma separated list of numbers 'a,b,c' or a range 'a-c' and return as a list of ints.'''\n\n range_re = re.compile(r'^(\\d+)-(\\d+)$')\n m = range_re.match(s)\n if m:\n return range(int(m.group(1)), int(m.group(2))+1)\n vals = s.split(',')\n return [int(x) for x in vals]\n\n\n#----------------------------------------------------------------------------\n\ndef _parse_npy_files(files):\n '''Accept a comma separated list of npy files and return a list of z vectors.'''\n print(files)\n zs =[]\n \n for f in files:\n zs.append(np.load(files[f]))\n \n return zs\n \n#----------------------------------------------------------------------------\n\n_examples = '''examples:\n\n # Generate ffhq uncurated images (matches paper Figure 12)\n python %(prog)s generate-images --network=gdrive:networks/stylegan2-ffhq-config-f.pkl --seeds=6600-6625 --truncation-psi=0.5\n\n # Generate ffhq curated images (matches paper Figure 11)\n python %(prog)s generate-images --network=gdrive:networks/stylegan2-ffhq-config-f.pkl --seeds=66,230,389,1518 --truncation-psi=1.0\n\n # Generate uncurated car images (matches paper Figure 12)\n python %(prog)s generate-images --network=gdrive:networks/stylegan2-car-config-f.pkl --seeds=6000-6025 --truncation-psi=0.5\n\n # Generate style mixing example (matches style mixing video clip)\n python %(prog)s style-mixing-example --network=gdrive:networks/stylegan2-ffhq-config-f.pkl --row-seeds=85,100,75,458,1500 --col-seeds=55,821,1789,293 --truncation-psi=1.0\n\n # Generate truncation animation from one seed\n python %(prog)s truncation_traversal --network=gdrive:networks/stylegan2-ffhq-config-f.pkl --seed=6600 --start=-1.0 --stop=1.0 --increment=0.1\n\n'''\n\n#----------------------------------------------------------------------------\n\ndef main():\n parser = argparse.ArgumentParser(\n description='''StyleGAN2 generator.\n\nRun 'python %(prog)s <subcommand> --help' for subcommand help.''',\n epilog=_examples,\n formatter_class=argparse.RawDescriptionHelpFormatter\n )\n\n subparsers = parser.add_subparsers(help='Sub-commands', dest='command')\n\n parser_truncation_traversal = subparsers.add_parser('truncation-traversal', help='Generate truncation walk')\n parser_truncation_traversal.add_argument('--network', help='Network pickle filename', dest='network_pkl', required=True)\n parser_truncation_traversal.add_argument('--seed', type=_parse_num_range, help='Singular seed value')\n parser_truncation_traversal.add_argument('--npys', type=_parse_npy_files, help='List of .npy files')\n parser_truncation_traversal.add_argument('--start', type=float, help='Starting value')\n parser_truncation_traversal.add_argument('--stop', type=float, help='Stopping value')\n parser_truncation_traversal.add_argument('--increment', type=float, help='Incrementing value')\n parser_truncation_traversal.add_argument('--result-dir', help='Root directory for run results (default: %(default)s)', default='results', metavar='DIR')\n\n parser_generate_latent_walk = subparsers.add_parser('generate-latent-walk', help='Generate latent walk')\n parser_generate_latent_walk.add_argument('--network', help='Network pickle filename', dest='network_pkl', required=True)\n parser_generate_latent_walk.add_argument('--truncation-psi', type=float, help='Truncation psi (default: %(default)s)', default=0.5)\n parser_generate_latent_walk.add_argument('--walk-type', help='Type of walk (default: %(default)s)', default='line')\n parser_generate_latent_walk.add_argument('--frames', type=int, help='Frame count (default: %(default)s', default=240)\n parser_generate_latent_walk.add_argument('--seeds', type=_parse_num_range, help='List of random seeds')\n parser_generate_latent_walk.add_argument('--npys', type=_parse_npy_files, help='List of .npy files')\n parser_generate_latent_walk.add_argument('--save_vector', dest='save_vector', action='store_true', help='also save vector in .npy format')\n parser_generate_latent_walk.add_argument('--diameter', type=float, help='diameter of noise loop', default=2.0)\n parser_generate_latent_walk.add_argument('--start_seed', type=int, help='random seed to start noise loop from', default=0)\n parser_generate_latent_walk.add_argument('--result-dir', help='Root directory for run results (default: %(default)s)', default='results', metavar='DIR')\n\n parser_generate_neighbors = subparsers.add_parser('generate-neighbors', help='Generate random neighbors of a seed')\n parser_generate_neighbors.add_argument('--network', help='Network pickle filename', dest='network_pkl', required=True)\n parser_generate_neighbors.add_argument('--seeds', type=_parse_num_range, help='List of random seeds')\n parser_generate_neighbors.add_argument('--npys', type=_parse_npy_files, help='List of .npy files')\n parser_generate_neighbors.add_argument('--diameter', type=float, help='distance around seed to sample from', default=0.1)\n parser_generate_neighbors.add_argument('--save_vector', dest='save_vector', action='store_true', help='also save vector in .npy format')\n parser_generate_neighbors.add_argument('--num_samples', type=int, help='How many neighbors to generate (default: %(default)s', default=25)\n parser_generate_neighbors.add_argument('--truncation-psi', type=float, help='Truncation psi (default: %(default)s)', default=0.5)\n parser_generate_neighbors.add_argument('--result-dir', help='Root directory for run results (default: %(default)s)', default='results', metavar='DIR')\n \n parser_generate_images = subparsers.add_parser('generate-images', help='Generate images')\n parser_generate_images.add_argument('--network', help='Network pickle filename', dest='network_pkl', required=True)\n parser_generate_images.add_argument('--seeds', type=_parse_num_range, help='List of random seeds')\n parser_generate_images.add_argument('--npys', help='List of .npy files', dest='npy_files')\n parser_generate_images.add_argument('--truncation-psi', type=float, help='Truncation psi (default: %(default)s)', default=0.5)\n parser_generate_images.add_argument('--result-dir', help='Root directory for run results (default: %(default)s)', default='results', metavar='DIR')\n\n parser_style_mixing_example = subparsers.add_parser('style-mixing-example', help='Generate style mixing video')\n parser_style_mixing_example.add_argument('--network', help='Network pickle filename', dest='network_pkl', required=True)\n parser_style_mixing_example.add_argument('--row-seeds', type=_parse_num_range, help='Random seeds to use for image rows', required=True)\n parser_style_mixing_example.add_argument('--col-seeds', type=_parse_num_range, help='Random seeds to use for image columns', required=True)\n parser_style_mixing_example.add_argument('--col-styles', type=_parse_num_range, help='Style layer range (default: %(default)s)', default='0-6')\n parser_style_mixing_example.add_argument('--truncation-psi', type=float, help='Truncation psi (default: %(default)s)', default=0.5)\n parser_style_mixing_example.add_argument('--result-dir', help='Root directory for run results (default: %(default)s)', default='results', metavar='DIR')\n\n args = parser.parse_args()\n kwargs = vars(args)\n subcmd = kwargs.pop('command')\n\n if subcmd is None:\n print ('Error: missing subcommand. Re-run with --help for usage.')\n sys.exit(1)\n\n sc = dnnlib.SubmitConfig()\n sc.num_gpus = 1\n sc.submit_target = dnnlib.SubmitTarget.LOCAL\n sc.local.do_not_copy_source_files = True\n sc.run_dir_root = kwargs.pop('result_dir')\n sc.run_desc = subcmd\n\n func_name_map = {\n 'truncation-traversal': 'run_generator.truncation_traversal',\n 'generate-images': 'run_generator.generate_images',\n 'generate-neighbors': 'run_generator.generate_neighbors',\n 'generate-latent-walk': 'run_generator.generate_latent_walk',\n 'style-mixing-example': 'run_generator.style_mixing_example'\n }\n dnnlib.submit_run(sc, func_name_map[subcmd], **kwargs)\n\n#----------------------------------------------------------------------------\n\nif __name__ == \"__main__\":\n main()\n\n#----------------------------------------------------------------------------\n"
] |
[
[
"numpy.linspace",
"numpy.asarray",
"numpy.cos",
"numpy.sin",
"numpy.random.uniform",
"numpy.add",
"numpy.random.randn",
"numpy.load",
"numpy.array",
"numpy.zeros",
"numpy.random.RandomState"
]
] |
nlinc1905/dsilt-ml-code
|
[
"d51fffd16e83f93ea7d49f65102e731abd3ba70c"
] |
[
"03 Rule Learners/03_association_rules.py"
] |
[
"\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nimport numpy as np\nfrom mlxtend.preprocessing import TransactionEncoder\nfrom mlxtend.frequent_patterns import apriori, association_rules\n\n#-------------------------------------------------------------------------------------------------#\n#---------------------------------------EDA and Cleaning------------------------------------------#\n#-------------------------------------------------------------------------------------------------#\n\nraw_d = pd.read_excel(\"Online Retail.xlsx\", sheet_name=\"Online Retail\")\nprint(raw_d.head())\n\ndef naCol(df, print_result=True):\n \"\"\"\n Checks for null or missing values in all columns of a Pandas dataframe\n\n Arguments:\n df: A Pandas dataframe\n print_result: indicates whether or not the output should be printed to the console\n\n Returns:\n dict: (key:value) = (column_name:number_missing_values) \n \"\"\"\n y = dict.fromkeys(df.columns)\n for idx, key in enumerate(y.keys()):\n if df.dtypes[list(y.keys())[idx]] == 'object':\n y[key] = pd.isnull(df[list(y.keys())[idx]]).sum() + (df[list(y.keys())[idx]]=='').sum()\n else:\n y[key] = pd.isnull(df[list(y.keys())[idx]]).sum()\n if print_result:\n print(\"Number of nulls by column:\")\n for k, v in y.items():\n print(k, v)\n return y\n\nnaCol(raw_d)\n# Documentation says InvoiceNo starting with \"C\" is cancellation\nprint(len(raw_d[raw_d['InvoiceNo'].astype('str').apply(lambda x: x[:1])=='C']))\nprint(set(raw_d['InvoiceNo'].astype('str').apply(lambda x: x[:1]))) # What does \"A\" mean?\nprint(raw_d[raw_d['InvoiceNo'].astype('str').apply(lambda x: x[:1])=='A'][:6])\n\n# Are there any transactions with unusual quantities?\nprint(raw_d.Quantity.value_counts())\nprint(raw_d[raw_d['Quantity']<0][:6]) # Possibly returns\nprint(raw_d[raw_d['Quantity']>1000][:6]) # Cheap items in bulk\n\n# Are there any unusual unit prices?\nsns.distplot(raw_d['UnitPrice'])\nplt.show()\nprint(raw_d[raw_d['UnitPrice']<0][:6]) # Adjustments\nprint(raw_d[raw_d['UnitPrice']>1000][:6]) # Postage and Amazon Fees\n# Consider removing StockCodes in ['AmazonFee', 'M' , 'Post', 'Dot', B']\n\n# Can a customer have multiple invoices?\nprint(raw_d[['InvoiceNo', 'CustomerID']].groupby('CustomerID').agg('count'))\n\n# Does an invoice contain multiple items?\nitems_by_invoice = raw_d[['StockCode', 'InvoiceNo']].groupby('InvoiceNo').agg('count').reset_index()\nprint(items_by_invoice)\n# Are there invoices with only 1 item? These can be removed.\ninvoices_to_remove = items_by_invoice[items_by_invoice['StockCode']<2]['InvoiceNo'].tolist()\n\n# Remove data that could produce meaningless or useless rules\nclean_d = raw_d[(~raw_d['InvoiceNo'].str.slice(0,1).isin(['C', 'A'])) \\\n & (raw_d['Quantity']>0) \\\n & (raw_d['UnitPrice']>0) \\\n & (~raw_d['StockCode'].isin(['AMAZONFEE', 'M', 'POST', 'DOT', 'B'])) \\\n & (~raw_d['InvoiceNo'].isin(invoices_to_remove))] \\\n [['InvoiceNo', 'StockCode', 'Description']]\n\nprint(clean_d.info())\nclean_d['InvoiceNo'] = clean_d['InvoiceNo'].astype('int')\nclean_d['Description'] = clean_d['Description'].str.strip()\n\n#clean_d.to_csv('online_retail_clean.csv', index=False)\n\n#-------------------------------------------------------------------------------------------------#\n#------------------------------------Apriori Association Rules------------------------------------#\n#-------------------------------------------------------------------------------------------------#\n\ntrxn_d = clean_d[['InvoiceNo', 'Description']].groupby('InvoiceNo')['Description'].apply(list).reset_index()\nte = TransactionEncoder()\nte_ary = te.fit(trxn_d['Description']).transform(trxn_d['Description'])\nd = pd.DataFrame(te_ary, columns=te.columns_)\n\n# Sense checks\nprint(len(set(clean_d['Description']))) #should match nbr rows\nprint(len(set(clean_d['InvoiceNo']))) #should match nbr columns\nprint(clean_d.info())\nprint(d.info())\nprint(d.sum()[d.sum()<1])\nprint(d.sum()[d.sum()==np.inf])\nprint(d.sum(axis=1)[d.sum(axis=1)<1])\nprint(d.sum(axis=1)[d.sum(axis=1)==np.inf])\n\nfrequent_itemsets = apriori(d, min_support=0.03,\n use_colnames=False,\n max_len=None)\nprint(frequent_itemsets.head())\na_rules = association_rules(frequent_itemsets,\n metric=\"confidence\",\n min_threshold=0.7)\nprint(a_rules.head())\n#a_rules.to_csv('online_retail_learned_rules.csv', index=False)\n"
] |
[
[
"pandas.read_excel",
"matplotlib.pyplot.show",
"pandas.DataFrame"
]
] |
mkiseljov/JONNEE
|
[
"3085493837842f1d10b01f2b269971e731beef16"
] |
[
"DuoGAE/visualization.py"
] |
[
"# Plot results\n\nimport time\nimport numpy as np\nfrom sklearn.manifold import TSNE\nimport matplotlib.pyplot as plt\nplt.style.use('ggplot')\n\n\ndef plot_results(results, test_freq, path='results.png', show=False):\n # Init\n plt.close('all')\n fig = plt.figure(figsize=(8, 8))\n\n x_axis_train = range(len(results['train_elbo']))\n x_axis_test = range(0, len(x_axis_train), test_freq)\n x_axis_train_added = range(len(results['train_added']))\n # Elbo and added loss\n ax = fig.add_subplot(2, 2, 1)\n ax.plot(x_axis_train, results['train_elbo'], label=\"ELBO\")\n ax.plot(x_axis_train_added, results['train_added'], label=\"L1+L2+L3\")\n ax.set_ylabel('Loss on train')\n ax.set_title('Loss')\n ax.legend(loc='upper right')\n\n # Accuracy\n ax = fig.add_subplot(2, 2, 2)\n ax.plot(x_axis_train, results['accuracy_train'])\n ax.plot(x_axis_test, results['accuracy_test'])\n ax.set_ylabel('Accuracy')\n ax.set_title('Accuracy')\n ax.legend(['Train', 'Test'], loc='lower right')\n\n # ROC\n ax = fig.add_subplot(2, 2, 3)\n ax.plot(x_axis_train, results['roc_train'])\n ax.plot(x_axis_test, results['roc_test'])\n ax.set_xlabel('Epoch')\n ax.set_ylabel('ROC AUC')\n ax.set_title('ROC AUC')\n ax.legend(['Train', 'Test'], loc='lower right')\n\n # Precision\n ax = fig.add_subplot(2, 2, 4)\n ax.plot(x_axis_train, results['ap_train'])\n ax.plot(x_axis_test, results['ap_test'])\n ax.set_xlabel('Epoch')\n ax.set_ylabel('Precision')\n ax.set_title('Precision')\n ax.legend(['Train', 'Test'], loc='lower right')\n\n # Save\n fig.tight_layout()\n if show:\n plt.show()\n else:\n fig.savefig(path)\n\n\ndef plot_points_tsne(emb, seed=42, labels=None, figsize=(12, 8)):\n tsne = TSNE(n_components=2, verbose=0, perplexity=40,\n \t\t\tn_iter=600, random_state=seed)\n \n tsne_results = tsne.fit_transform(emb)\n\n vis_x = tsne_results[:, 0]\n vis_y = tsne_results[:, 1]\n\n plt.figure(figsize=figsize)\n plt.scatter(vis_x, vis_y)\n plt.show()\n\n\n\ndef plot_points(emb, seed=42, labels=None, figsize=(12, 8)):\n vis_x = emb[:, 0]\n vis_y = emb[:, 1]\n\n plt.figure(figsize=figsize)\n plt.scatter(vis_x, vis_y)\n plt.show()\n\n\n\ndef plot_with_classes(emb, edges, labels, figsize=(14, 10)):\n # dots\n tsne = TSNE(n_components=2, verbose=0, perplexity=40, n_iter=600)\n tsne_results = tsne.fit_transform(emb)\n\n # plotting\n vis_x = tsne_results[:, 0]\n vis_y = tsne_results[:, 1]\n y_data = labels\n\n plt.figure(figsize=figsize)\n plt.scatter(vis_x, vis_y, c=y_data,\n cmap=plt.cm.get_cmap(\"jet\", 39))\n # plt.colorbar(ticks=range(39))\n\n for edge in edges:\n a, b = edge\n a, b = int(a), int(b)\n plt.plot([vis_x[a], vis_x[b]], [vis_y[a], vis_y[b]],\n color='gray', alpha=0.1)\n\n plt.show()\n\n"
] |
[
[
"matplotlib.pyplot.scatter",
"matplotlib.pyplot.cm.get_cmap",
"matplotlib.pyplot.plot",
"sklearn.manifold.TSNE",
"matplotlib.pyplot.close",
"matplotlib.pyplot.show",
"matplotlib.pyplot.style.use",
"matplotlib.pyplot.figure"
]
] |
doncat99/FinanceDataCenter
|
[
"1538c8347ed5bff9a99a3cca07507a7605108124",
"1538c8347ed5bff9a99a3cca07507a7605108124"
] |
[
"findy/database/plugins/baostock/quotes/bao_china_stock_kdata_recorder.py",
"findy/database/plugins/baostock/meta/bao_china_stock_meta_recorder.py"
] |
[
"# -*- coding: utf-8 -*-\nimport time\n\nimport pandas as pd\n\nfrom findy import findy_config\nfrom findy.interface import Region, Provider, ChnExchange, EntityType\nfrom findy.database.schema import IntervalLevel, AdjustType\nfrom findy.database.schema.meta.stock_meta import Stock\nfrom findy.database.schema.datatype import StockKdataCommon\nfrom findy.database.plugins.recorder import KDataRecorder\nfrom findy.database.plugins.baostock.common import to_bao_trading_level, to_bao_entity_id, \\\n to_bao_trading_field, to_bao_adjust_flag\nfrom findy.database.quote import get_entities\nfrom findy.utils.pd import pd_valid\nfrom findy.utils.time import PD_TIME_FORMAT_DAY, PD_TIME_FORMAT_ISO8601, to_time_str\n\nimport findy.vendor.baostock as bs\ntry:\n bs.login()\nexcept:\n pass\n\n\nclass BaoChinaStockKdataRecorder(KDataRecorder):\n # 数据来自jq\n region = Region.CHN\n provider = Provider.BaoStock\n entity_schema = Stock\n # 只是为了把recorder注册到data_schema\n data_schema = StockKdataCommon\n\n def __init__(self,\n exchanges=[e.value for e in ChnExchange],\n entity_ids=None,\n codes=None,\n batch_size=10,\n force_update=True,\n sleeping_time=0,\n default_size=findy_config['batch_size'],\n real_time=False,\n fix_duplicate_way='ignore',\n start_timestamp=None,\n end_timestamp=None,\n level=IntervalLevel.LEVEL_1WEEK,\n kdata_use_begin_time=False,\n close_hour=15,\n close_minute=0,\n one_day_trading_minutes=4 * 60,\n adjust_type=AdjustType.qfq,\n share_para=None) -> None:\n level = IntervalLevel(level)\n adjust_type = AdjustType(adjust_type)\n self.data_schema = self.get_kdata_schema(entity_type=EntityType.Stock, level=level, adjust_type=adjust_type)\n self.bao_trading_level = to_bao_trading_level(level)\n\n super().__init__(EntityType.Stock, exchanges, entity_ids, codes, batch_size, force_update, sleeping_time,\n default_size, real_time, fix_duplicate_way, start_timestamp, end_timestamp, close_hour,\n close_minute, level, kdata_use_begin_time, one_day_trading_minutes, share_para=share_para)\n self.adjust_type = adjust_type\n\n async def init_entities(self, db_session):\n # init the entity list\n self.entities, column_names = get_entities(\n region=self.region,\n provider=self.provider,\n db_session=db_session,\n entity_schema=self.entity_schema,\n entity_type=self.entity_type,\n exchanges=self.exchanges,\n entity_ids=self.entity_ids,\n codes=self.codes,\n filters=[Stock.is_active.is_(True)])\n\n def generate_domain_id(self, entity, df, time_fmt=PD_TIME_FORMAT_DAY):\n format = PD_TIME_FORMAT_DAY if self.level >= IntervalLevel.LEVEL_1DAY else PD_TIME_FORMAT_ISO8601\n return df['entity_id'] + '_' + df[self.get_evaluated_time_field()].dt.strftime(format)\n\n def bao_get_bars(self, code, start, end, frequency=\"d\", adjustflag=\"3\",\n fields=\"date, code, open, high, low, close, preclose, volume, amount, adjustflag, turn, tradestatus, pctChg, isST\"):\n\n def _bao_get_bars(code, start, end, frequency, adjustflag, fields):\n k_rs = bs.query_history_k_data_plus(code, start_date=start, end_date=end, frequency=frequency,\n adjustflag=adjustflag, fields=fields)\n return k_rs.get_data()\n\n self.logger.debug(\"HTTP GET: bars, with code={}, unit={}, start={}, end={}\".format(code, frequency, start, end))\n try:\n return _bao_get_bars(code, start, end, frequency, adjustflag, fields)\n except Exception as e:\n self.logger.error(f'bao_get_bars, code: {code}, error: {e}')\n return None\n\n async def record(self, entity, http_session, db_session, para):\n start_point = time.time()\n\n (ref_record, start, end, size, timestamps) = para\n\n start = to_time_str(start)\n if self.bao_trading_level in ['d', 'w', 'm']:\n start = max(start, \"1990-12-19\")\n else:\n start = max(start, \"1999-07-26\")\n\n df = self.bao_get_bars(to_bao_entity_id(entity),\n start=start,\n end=end if end is None else to_time_str(end),\n frequency=self.bao_trading_level,\n fields=to_bao_trading_field(self.bao_trading_level),\n adjustflag=to_bao_adjust_flag(self.adjust_type))\n\n if pd_valid(df):\n return False, time.time() - start_point, (ref_record, self.format(entity, df))\n\n return True, time.time() - start_point, None\n\n def format(self, entity, df):\n if self.bao_trading_level == 'd':\n df.rename(columns={'turn': 'turnover', 'date': 'timestamp', 'preclose': 'pre_close', 'pctChg': 'change_pct',\n 'peTTM': 'pe_ttm', 'psTTM': 'ps_ttm', 'pcfNcfTTM': 'pcf_ncf_ttm', 'pbMRQ': 'pb_mrq', 'isST': 'is_st'}, inplace=True)\n df['timestamp'] = pd.to_datetime(df['timestamp'])\n df['is_st'] = df['is_st'].apply(lambda x: 1 if x == '1' else 0)\n\n elif self.bao_trading_level == 'w' or self.bao_trading_level == 'm':\n df.rename(columns={'turn': 'turnover', 'date': 'timestamp', 'pctChg': 'change_pct'}, inplace=True)\n df['timestamp'] = pd.to_datetime(df['timestamp'])\n\n else:\n df.rename(columns={'time': 'timestamp'}, inplace=True)\n df['timestamp'] = pd.to_datetime(df['timestamp'], format='%Y%m%d%H%M%S%f')\n\n cols = df.select_dtypes('object').columns.to_list()\n cols.remove('adjustflag')\n df.replace(r'^\\s*$', 0.0, regex=True, inplace=True)\n df[cols] = df[cols].astype(float)\n\n df['entity_id'] = entity.id\n df['provider'] = self.provider.value\n df['name'] = entity.name\n df['code'] = entity.code\n df['level'] = self.level.value\n df.replace({'adjustflag': {'1': 'hfq', '2': 'qfq', '3': 'normal'}}, inplace=True)\n\n df['id'] = self.generate_domain_id(entity, df)\n return df\n\n async def on_finish_entity(self, entity, http_session, db_session, result):\n now = time.time()\n if result == 2 and not entity.is_active:\n try:\n db_session.commit()\n except Exception as e:\n self.logger.error(f'{self.__class__.__name__}, error: {e}')\n db_session.rollback()\n\n return time.time() - now\n\n async def on_finish(self):\n pass\n",
"# -*- coding: utf-8 -*-\nimport pandas as pd\n\nfrom findy.interface import Region, Provider, EntityType\nfrom findy.interface.writer import df_to_db\nfrom findy.database.schema.meta.stock_meta import Stock, StockDetail\nfrom findy.database.plugins.recorder import RecorderForEntities\nfrom findy.database.plugins.baostock.common import to_entity_id, to_bao_entity_type\nfrom findy.database.context import get_db_session\nfrom findy.utils.pd import pd_valid\n\nimport findy.vendor.baostock as bs\ntry:\n bs.login()\nexcept:\n pass\n\n\nclass BaoChinaStockDetailRecorder(RecorderForEntities):\n region = Region.CHN\n provider = Provider.BaoStock\n data_schema = StockDetail\n\n\nclass BaoChinaStockListRecorder(RecorderForEntities):\n region = Region.CHN\n provider = Provider.BaoStock\n data_schema = Stock\n\n def bao_get_all_securities(self, entity_type):\n def _bao_get_all_securities(entity_type):\n k_rs = bs.query_stock_basic()\n df = k_rs.get_data()\n return df[df['type'] == entity_type] if not df.empty else df\n\n try:\n return _bao_get_all_securities(entity_type)\n except Exception as e:\n self.logger.error(f'bao_get_all_securities, error: {e}')\n return None\n\n def to_entity(self, df, entity_type: EntityType, category=None):\n # 上市日期\n df.rename(columns={'ipoDate': 'list_date', 'outDate': 'end_date', 'code_name': 'name'}, inplace=True)\n df['end_date'].replace(r'^\\s*$', '2200-01-01', regex=True, inplace=True)\n\n df['list_date'] = pd.to_datetime(df['list_date'])\n df['end_date'] = pd.to_datetime(df['end_date'])\n df['timestamp'] = df['list_date']\n\n df['entity_id'] = df['code'].apply(lambda x: to_entity_id(entity_type=entity_type, bao_code=x))\n df['id'] = df['entity_id']\n df['entity_type'] = entity_type.value\n df[['exchange', 'code']] = df['code'].str.split('.', expand=True)\n\n if category:\n df['category'] = category\n\n return df\n\n async def run(self):\n # 抓取股票列表\n df_entity = self.bao_get_all_securities(to_bao_entity_type(EntityType.Stock))\n\n if pd_valid(df_entity):\n df_stock = self.to_entity(df_entity, entity_type=EntityType.Stock)\n\n # persist to Stock\n await df_to_db(region=self.region,\n provider=self.provider,\n data_schema=Stock,\n db_session=get_db_session(self.region, self.provider, Stock),\n df=df_stock)\n\n # persist StockDetail too\n await df_to_db(region=self.region,\n provider=self.provider,\n data_schema=StockDetail,\n db_session=get_db_session(self.region, self.provider, StockDetail),\n df=df_stock)\n\n self.logger.info(\"persist stock list success\")\n"
] |
[
[
"pandas.to_datetime"
],
[
"pandas.to_datetime"
]
] |
owenfeehan/python-visualization-scripts
|
[
"96f14ac39f572cdea3838cde2d44a920a6383ddd"
] |
[
"src/anchor_python_visualization/projection/_tsne.py"
] |
[
"\"\"\"T-SNE projection.\"\"\"\n\n__author__ = \"Owen Feehan\"\n__copyright__ = \"Copyright (C) 2021 Owen Feehan\"\n__license__ = \"MIT\"\n__version__ = \"0.1\"\n\nimport pandas as pd\nfrom sklearn.manifold import TSNE\n\nfrom ._derive_utilities import derive_projected\nfrom ._pca import PCAProjection\nfrom .projector import Projector\n\nMAX_NUMBER_FEATURES_TSNE = 50\n\n\nclass TSNEProjection(Projector):\n \"\"\"Projects to two-dimensions using T-SNE (preceded by a PCA if num(embeddings) > MAX_NUM_FEATURES_TSNE)\n\n It produces embeddings TSNE0 and TSNE1.\n \"\"\"\n\n # Overriding a base class\n def project(self, features: pd.DataFrame) -> pd.DataFrame:\n\n # If there are many embeddings, then use PCA first before T-SNE as per recommendation in documentation\n # https://scikit-learn.org/stable/modules/generated/sklearn.manifold.TSNE.html\n if len(features.columns) > MAX_NUMBER_FEATURES_TSNE:\n pca = PCAProjection(2)\n features = pca.project(features)\n\n tsne = TSNE(n_components=2, random_state=0, verbose=1)\n projection = tsne.fit_transform(features)\n # Convert back into a data-frame, assigning feature-names for each component\n return derive_projected(features, projection, \"TSNE\")\n"
] |
[
[
"sklearn.manifold.TSNE"
]
] |
blaylockbk/Web-Homepage
|
[
"f5ede8f7ea7c1a8af662069c8feca7a418d01cc7"
] |
[
"HRRR_archive/hrrr_sfc_table.py"
] |
[
"# Brian Blaylock\n# August 14, 2017\n\n\nimport numpy as np\n\ntable = np.genfromtxt('https://api.mesowest.utah.edu/archive/HRRR/GRIB2Table_hrrr_2d.txt',\n delimiter=',',\n skip_header=4,\n names=True,\n dtype=None)\n\nall_headers = table.dtype.names\n\ntable_headers = ['RecordNumber', 'WGrib2Name', 'FieldType', 'VerticalLevels', 'Units']\n\nf = open('hrrr_sfc_table.html', 'w')\nf.write('''\n<html>\n<head>\n<script src=\"https://home.chpc.utah.edu/~u0553130/Brian_Blaylock/js/site/siteopen.js\"></script>\n<title>HRRR Download Page</title>\n</head>\n\n<body>\n<script src=\"js/site/sitemenu.js\"></script>\n\n\n<div class=\"container\">\n <h1 align=\"center\">HRRR Table: SFC</h1>\n<br>\n<table class='table table-hover sortable'>\n<tr>''')\nfor header in table_headers:\n f.write('<th>'+header+'</th>')\nf.write('''</tr>''')\n\nf.write('''<tr>''')\n \nfor row in range(len(table[header])):\n f.write('<tr>')\n for header in table_headers: \n f.write('''\n<td>'''+str(table[header][row])+'''</td>\n''')\n f.write('</tr>')\n\nf.write('''\n</table>\n\n</div>\n\n</body>\n</html>\n''')\nf.close()"
] |
[
[
"numpy.genfromtxt"
]
] |
umbc-sanjaylab/DeepPseudo_AAAI2021
|
[
"89d6eaf57212ef77900008927b70a89fbe43b216",
"89d6eaf57212ef77900008927b70a89fbe43b216"
] |
[
"Marginal_DeepPSeudo/get_validation_performance.py",
"Marginal_DeepPSeudo/import_data.py"
] |
[
"''' The code is inspired by the code for DeepHit model. The github link of the code for DeepHit is https://github.com/chl8856/DeepHit. Reference: C. Lee, W. R. Zame, J. Yoon, M. van der Schaar, \"DeepHit: A Deep Learning Approach to Survival Analysis with Competing Risks,\" AAAI Conference on Artificial Intelligence (AAAI), 2018.\n\nThis 'get_validation_performance.py' trains the Marginal DeepPseudo model and give the validation C-index performance for random search.\n'''\nimport numpy as np\nimport pandas as pd\nimport tensorflow as tf\nimport random\nimport os\nfrom termcolor import colored\n\nfrom import_data import import_data\nfrom Marginal_DeepPseudo import Marginal_DeepPseudo_Model\nfrom utils_eval import c_index, brier_score, weighted_c_index, weighted_brier_score\n\n\n'tensorflow =1.14.0'\n'numpy =1.16.5'\n'pandas =0.24.2'\n\n##### USER-DEFINED FUNCTIONS\ndef log(x):\n return tf.log(x + 1e-8)\n\n\ndef div(x, y):\n return tf.div(x, (y + 1e-8))\n\n\n\ndef f_get_minibatch(mb_size, x, y):\n \"\"\"Get minibatches.\n Arguments:\n - mb_size: size of the minibatch\n - x: covariates\n - y: pseudo values for CIF\n \n Returns:\n - minibatches of covariates and pseudo values\n \n \"\"\"\n idx = range(np.shape(x)[0])\n idx = random.sample(idx, mb_size)\n\n x_mb = x[idx, :].astype(np.float32)\n y_mb = y[idx, :].astype(np.float32) \n return x_mb, y_mb\n\n\ndef get_valid_performance(in_parser, out_itr, evalTime=None, MAX_VALUE = -99, OUT_ITERATION=5):\n \"\"\" Trains the Marginal DeepPseudo model and give the validation C-index performance for random search.\n\n Arguments:\n - in_parser: dictionary of hyperparameters\n - out_itr: indicator of set of 5-fold cross validation datasets\n - evalTime: None or a list(e.g. [12, 60]). Evaluation times at which the validation performance is measured\n - MAX_VALUE: maximum validation value\n - OUT_ITERATION: Total number of the set of cross-validation data\n\n Returns:\n - the validation performance of the trained network\n - save the trained network in the folder directed by \"in_parser['out_path'] + '/itr_' + str(out_itr)\"\n \"\"\"\n \n ## Define a list of continuous columns from the covariates\n continuous_columns=['feature1','feature2','feature3','feature4','feature5','feature6','feature7','feature8','feature9','feature10','feature11','feature12']\n ## If there are categorical variables in the covariates, define a list of the categorical variables\n \n ## Import the attributes \n tr_data, tr_time, tr_label, y_train, va_data, va_time, va_label, y_val, te_data, te_time, te_label, y_test, num_Category, num_Event, num_evalTime, x_dim = import_data(out_itr, evalTime, categorical_columns=None, continuous_columns=continuous_columns)\n\n \n ## Hyper-parameters\n ACTIVATION_FN = {'selu': tf.nn.selu, 'elu': tf.nn.elu, 'tanh': tf.nn.tanh, 'relu':tf.nn.relu} \n mb_size = in_parser['mb_size']\n iteration = in_parser['iteration']\n keep_prob = in_parser['keep_prob']\n lr_train = in_parser['lr_train']\n initial_W = tf.contrib.layers.xavier_initializer()\n\n\n ## Make Dictionaries\n # Input Dimensions\n input_dims = { 'x_dim' : x_dim,\n 'num_Event' : num_Event,\n 'num_Category' : num_Category,\n 'num_evalTime' : len(evalTime)}\n\n # Network hyper-paramters\n network_settings = { 'num_units' : in_parser['num_units'],\n 'num_layers' : in_parser['num_layers'],\n 'activation_fn' : ACTIVATION_FN[in_parser['activation_fn']],\n 'initial_W' : initial_W }\n\n\n file_path_final = in_parser['out_path'] + '/itr_' + str(out_itr)\n\n #change parameters...\n if not os.path.exists(file_path_final + '/models/'):\n os.makedirs(file_path_final + '/models/')\n\n\n ## Use GPU\n tf.reset_default_graph()\n config = tf.ConfigProto()\n config.gpu_options.allow_growth = True\n sess = tf.Session(config=config)\n\n ## Call the Marginal DeepPseudo Model\n model = Marginal_DeepPseudo_Model(sess, \"Marginal_DeepPseudo\", input_dims, network_settings)\n saver = tf.train.Saver()\n sess.run(tf.global_variables_initializer())\n\n \n \n max_valid = -99\n stop_flag = 0\n\n ### Training - Main\n print( \"MAIN TRAINING ...\")\n print( \"EVALUATION TIMES: \" + str(evalTime))\n\n avg_loss = 0\n for itr in range(iteration):\n if stop_flag > 10: #for faster early stopping\n break\n else:\n x_mb, y_mb= f_get_minibatch(mb_size, tr_data, y_train) #get the minibatches\n DATA = (x_mb, y_mb)\n _, loss_curr = model.train(DATA, keep_prob, lr_train) #train the model\n avg_loss += loss_curr/1000\n \n if (itr+1)%1000 == 0:\n print('|| ITR: ' + str('%04d' % (itr + 1)) + ' | Loss: ' + colored(str('%.4f' %(avg_loss)), 'yellow' , attrs=['bold']))\n avg_loss = 0\n\n ### Validation based on the average C-index\n if (itr+1)%1000 == 0:\n \n ### Prediction for validation data\n pred = model.predict(va_data)\n \n\n ### Evaluation on validation data\n val_result = np.zeros([num_Event, len(evalTime)])\n\n for t, t_time in enumerate(evalTime):\n eval_horizon = int(t_time)\n if eval_horizon >= num_Category:\n print('ERROR: evaluation horizon is out of range')\n val_result[:, t] = -1\n else:\n risk = pred[:,:,t] #risk score until evalTime\n for k in range(num_Event):\n val_result[k, t] = weighted_c_index(tr_time, (tr_label[:,0] == k+1).astype(int), risk[:,k], va_time, (va_label[:,0] == k+1).astype(int), eval_horizon) #weighted c-index calculation for validation data\n \n tmp_valid = np.mean(val_result) #average weighted C-index\n\n if tmp_valid > max_valid:\n stop_flag = 0\n max_valid = tmp_valid\n print( 'updated.... average c-index = ' + str('%.4f' %(tmp_valid)))\n\n if max_valid > MAX_VALUE:\n saver.save(sess, file_path_final + '/models/model_itr_' + str(out_itr))\n else:\n stop_flag += 1\n\n return max_valid\n\n",
"## Import Packages\nimport numpy as np\nimport pandas as pd\n\n## Import \"rpy2\" to run the \"R\" script in Python\nfrom rpy2.robjects import pandas2ri\npandas2ri.activate()\nfrom rpy2.robjects.vectors import IntVector, FloatVector\nfrom rpy2.robjects import default_converter\nfrom rpy2.robjects.conversion import Converter, localconverter\nimport rpy2.robjects as robjects\n\n\n## Function for getting pseudo values\ndef pseudoFunction(data, evaltime):\n \"\"\"Calculate the pseudo values for cumulative incidence function.\n Arguments:\n data: A dataframe containing survival time and event status, which will be used to estimate pseudo values for CIF.\n evaltime: Evaluation times at which pseudo values are calcuated\n Returns:\n A dataframe of pseudo values for all subjects for all causes at the evaluation time points. \n \"\"\"\n r=robjects.r\n r.source('Compute_pseudo_values.r')\n ps=r.pseudo(data, evaltime)\n return ps\n\n\n## Function to convert categorical variables into one-hot-encoded dummy variables\ndef to_one_hot(dataframe, columns):\n \"\"\"Convert columns in dataframe to one-hot encoding.\n Arguments:\n dataframe (dataframe): pandas dataframe containing covariates\n columns (list of strings): list categorical column names to one hot encode\n Returns:\n one_hot_df (dataframe): dataframe with categorical columns encoded as binary variables\n \"\"\"\n\n \n one_hot_df = pd.get_dummies(data=dataframe, columns=columns)\n \n return one_hot_df\n\n## Function to standardize the continuous variables\ndef standardized(train_df, test_df, continuous_columns):\n \"\"\"Standardize the continuous columns in dataframe.\n Arguments:\n train_df: training dataframe containing covariates\n test_df: test dataframe whose continuous columns will be standardized\n continuous_columns: A list of name of the continuous columns in the dataframe\n Returns:\n A new test dataframe whose continuous columns ared standardized\n \"\"\"\n mean = train_df.loc[:, continuous_columns].mean()\n stdev = train_df.loc[:, continuous_columns].std()\n test_df.loc[:, continuous_columns] = (test_df.loc[:, continuous_columns] - mean) / (stdev+1e-8)\n return test_df\n\n\n\ndef import_data(out_itr, evalTime, categorical_columns=None, continuous_columns=None):\n \"\"\"Preprocess the data to use them to the model to train, validate and predict\n Arguments: \n out_itr: indicator of set of 5-fold cross validation data out of 5 simulated dataset\n evalTime: Evaluation times \n categorical_columns: A list of name of the categorical columns in the dataframe\n continuous_columns: A list of name of the continuous columns in the dataframe\n Returns:\n All the attributes that will be used in the model to train, validate and predict\n \"\"\"\n\n ### Loading Data from the folder named as the dataset (Synthetic/WIHS/SEER) in the code directory\n train_df = pd.read_csv('Synthetic/train_data_{}.csv'.format(out_itr))\n val_df = pd.read_csv('Synthetic/valid_data_{}.csv'.format(out_itr))\n test_df = pd.read_csv('Synthetic/test_data_{}.csv'.format(out_itr))\n \n \n #Create a column 'train' to trainining, validation and test data and combined them. Then convert the the categorical variables into dummy variables on combined data so that the number of columns in all three dataset remain equal. \n train_df['train']=1\n val_df['train']=2\n test_df['train']=3\n df=pd.concat([train_df, val_df, test_df])\n \n #Convert the categorical variables into dummy variables\n if categorical_columns is not None:\n df = to_one_hot(df, categorical_columns)\n train_data=df[df['train']==1] \n val_data=df[df['train']==2]\n test_data=df[df['train']==3]\n \n #Drop the 'train' column from all three datasets.\n train_data=train_data.drop(columns=['train'])\n val_data=val_data.drop(columns=['train'])\n test_data=test_data.drop(columns=['train'])\n \n #Standardize the contunuous columns\n if continuous_columns is not None:\n train_data=standardized(train_data, train_data, continuous_columns)\n val_data=standardized(train_data, val_data, continuous_columns)\n test_data=standardized(train_data, test_data, continuous_columns)\n \n #Full Dataset\n dataset = df.drop(columns=['train'])\n label = np.asarray(dataset[['status']])\n time = np.asarray(dataset[['time']])\n data = np.asarray(dataset.drop(columns=['status', 'time']))\n\n\n num_Category = int(np.max(time) * 1.2) #to have enough time-horizon\n num_Event = int(len(np.unique(label)) - 1) #the number of events (excluding censoring as an event)\n x_dim = np.shape(data)[1] #No. of covarites in the dataset\n num_evalTime = len(evalTime) #No. of evaluation times\n \n \n #Preprocess the Training Data\n train_feat=train_data.drop(columns=['time','status']) \n tr_data = np.asarray(train_feat)\n tr_time=np.asarray(train_data[['time']])\n tr_label=np.asarray(train_data[['status']])\n eval_time=FloatVector(evalTime)\n #Convert the 'Python' dataframe to 'R'\n with localconverter(default_converter + pandas2ri.converter) as cv:\n train_data_pseudo = pandas2ri.py2ri(train_data)\n train_pseudo=pseudoFunction(train_data_pseudo, eval_time) \n y_train=np.reshape(train_pseudo, [-1, num_Event, len(evalTime)])\n\n \n\n #Preprocess the Validation Data\n val_feat=val_data.drop(columns=['time','status'])\n va_data = np.asarray(val_feat)\n va_time=np.asarray(val_data[['time']])\n va_label=np.asarray(val_data[['status']])\n #Convert the 'Python' dataframe to 'R'\n with localconverter(default_converter + pandas2ri.converter) as cv:\n val_data_pseudo = pandas2ri.py2ri(val_data)\n val_pseudo=pseudoFunction(val_data_pseudo, eval_time) \n y_val=np.reshape(val_pseudo, [-1, num_Event, len(evalTime)])\n\n \n \n\n #Preprocess the Test Data\n test_feat=test_data.drop(columns=['time','status'])\n te_data = np.asarray(test_feat)\n te_time=np.asarray(test_data[['time']])\n te_label=np.asarray(test_data[['status']])\n #Convert the 'Python' dataframe to 'R'\n with localconverter(default_converter + pandas2ri.converter) as cv:\n test_data_pseudo = pandas2ri.py2ri(test_data)\n test_pseudo=pseudoFunction(test_data_pseudo, eval_time) \n y_test=np.reshape(test_pseudo, [-1, num_Event, len(evalTime)])\n \n \n \n return tr_data, tr_time, tr_label, y_train, va_data, va_time, va_label, y_val, te_data, te_time, te_label, y_test, num_Category, num_Event, num_evalTime, x_dim\n"
] |
[
[
"tensorflow.div",
"tensorflow.ConfigProto",
"tensorflow.global_variables_initializer",
"tensorflow.reset_default_graph",
"numpy.shape",
"tensorflow.log",
"tensorflow.contrib.layers.xavier_initializer",
"tensorflow.Session",
"tensorflow.train.Saver",
"numpy.mean"
],
[
"pandas.concat",
"numpy.unique",
"numpy.asarray",
"numpy.max",
"numpy.shape",
"pandas.get_dummies"
]
] |
lazykyama/gpu-minicamp-examples
|
[
"20674b17286ffab727f0d6aa9538ee92218fa855"
] |
[
"pytorch/native/pytorch_distributed_run_example.py"
] |
[
"# SPDX-FileCopyrightText: Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.\n# SPDX-License-Identifier: Apache-2.0\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\nimport argparse\nimport glob\nimport os\nimport time\n\nimport torch\nimport torch.distributed as dist\nfrom torch.nn.parallel import DistributedDataParallel as DDP\nfrom torch.utils.data.distributed import DistributedSampler\n\nimport torchvision\nimport torchvision.models as models\nimport torchvision.transforms as transforms\n\n\ndef _check_pytorch_version():\n version_info = tuple(map(int, torch.__version__.split(\".\")[:2]))\n if version_info[0] < 1:\n # Not supported version because of old major version, 0.x.\n return False\n if version_info[1] < 9:\n # Not supported version because of old minor version, 1.8 or earlier.\n return False\n return True\n\n\ndef main():\n is_expected_version = _check_pytorch_version()\n if not is_expected_version:\n raise RuntimeError(\n (\n \"Your PyTorch version is not expected in this example code.\"\n \"1.9+ is required.\"\n )\n )\n\n args = parse_args()\n\n if not args.use_older_api:\n # Setup distributed process group.\n # NOTE:\n # In PyTorch 1.8 or earlier, the user needs to pass\n # several information like rank.\n # But, after 1.9+, the user no longer gives these values in\n # the typical case.\n # Please see more details at \"Important Notices:\" in the page below.\n # https://pytorch.org/docs/stable/elastic/run.html\n dist.init_process_group(backend=\"nccl\")\n\n # NOTE:\n # Before PyTorch 1.8, `--local_rank` must be added into\n # script argeuments.\n # But, after 1.9, this argument is not necessary.\n # For more details, please read\n # \"Transitioning from torch.distributed.launch to\n # torch.distributed.run\" below.\n # https://pytorch.org/docs/stable/elastic/run.html#transitioning-from-torch-distributed-launch-to-torch-distributed-run\n local_rank = int(os.environ[\"LOCAL_RANK\"])\n global_rank = int(os.environ[\"RANK\"])\n world_size = int(os.environ[\"WORLD_SIZE\"])\n else:\n # NOTE:\n # Due to some reasons, if you need to use older API,\n # torch.distributed.launch, please switch to here.\n local_rank = args.local_rank\n global_rank = int(os.environ[\"RANK\"])\n world_size = int(os.environ[\"WORLD_SIZE\"])\n\n dist.init_process_group(\n backend=\"nccl\",\n init_method=\"env://\",\n rank=global_rank,\n world_size=world_size,\n )\n print(\n (\n \"job information: (local_rank, global_rank, world_size) = \"\n f\"({local_rank}, {global_rank}, {world_size})\"\n )\n )\n\n device = torch.device(\n f\"cuda:{local_rank}\" if torch.cuda.is_available() else \"cpu\"\n )\n\n # Prepare output directory.\n if global_rank == 0:\n if not os.path.exists(args.output_path):\n os.makedirs(args.output_path)\n if not os.path.isdir(args.output_path):\n raise RuntimeError(\n f\"{args.output_path} exists, but it is not a directory.\"\n )\n barrier(device=device, src_rank=0)\n\n trainloader, valloader, sampler, n_classes = prepare_dataset(\n args.input_path,\n args.batch_size,\n num_workers=args.num_workers,\n no_validation=args.no_validation,\n )\n\n model = build_model(n_classes)\n model = model.to(device)\n # Make model distributed version.\n model = DDP(model, device_ids=[local_rank])\n\n criterion = torch.nn.CrossEntropyLoss()\n optimizer = torch.optim.SGD(model.parameters(), lr=args.lr)\n # NOTE:\n # If you are interested in the acceleration by Tensor Cores,\n # please read the following doc.\n # https://pytorch.org/docs/stable/amp.html\n scaler = torch.cuda.amp.GradScaler()\n\n for epoch in range(args.num_epochs):\n if global_rank == 0:\n print(f\"Epoch {epoch+1}/{args.num_epochs}\")\n\n # NOTE:\n # `sampler.set_epoch()` must be called when just starting each epoch.\n # For more detalis, please see the warning comment of\n # \"DistributedSampler\" in the page below.\n # https://pytorch.org/docs/stable/data.html\n sampler.set_epoch(epoch)\n\n running_loss = 0.0\n # NOTE:\n # This is a simplified way to measure the time.\n # You should use more precise method to know the performance.\n starttime = time.perf_counter()\n for i, data in enumerate(trainloader):\n # NOTE:\n # If you want to overlap the data transferring between CPU-GPU,\n # you need to additionally implement custom dataloader,\n # or use pinned memory.\n # Following pages could help you.\n # https://github.com/NVIDIA/DeepLearningExamples/blob/f24917b3ee73763cfc888ceb7dbb9eb62343c81e/PyTorch/Classification/ConvNets/image_classification/dataloaders.py#L347\n # https://pytorch.org/docs/stable/data.html#memory-pinning\n inputs = data[0].to(device, non_blocking=True)\n labels = data[1].to(device, non_blocking=True)\n\n optimizer.zero_grad()\n\n with torch.cuda.amp.autocast():\n outputs = model(inputs)\n loss = criterion(outputs, labels)\n scaler.scale(loss).backward()\n scaler.step(optimizer)\n scaler.update()\n\n if global_rank != 0:\n # Skip showing training loss.\n continue\n\n # Calculate and show current average loss.\n running_loss += loss.item()\n if (i % args.logging_interval) == (args.logging_interval - 1):\n print(\n (\n f\"\\t [iter={i+1:05d}] training loss = \"\n f\"{running_loss / (i+1):.3f}\"\n )\n )\n\n # End of each epoch.\n\n # Calculate validation result.\n if not args.no_validation:\n running_valloss = 0.0\n n_valiter = len(valloader)\n model.eval()\n with torch.no_grad():\n for valdata in valloader:\n val_in = valdata[0].to(device, non_blocking=True)\n val_label = valdata[1].to(device, non_blocking=True)\n valout = model(val_in)\n valloss = criterion(valout, val_label)\n running_valloss += valloss.item()\n model.train()\n\n # Sync all validation results.\n valinfo = torch.tensor([running_valloss, n_valiter], device=device)\n dist.all_reduce(valinfo)\n running_valloss = valinfo[0].item()\n n_valiter = valinfo[1].item()\n\n if global_rank != 0:\n # Directly goes to next epoch.\n continue\n\n # Show this epoch time and training&validation losses.\n # NOTE: This time includes vaidation time.\n duration = time.perf_counter() - starttime\n if args.no_validation:\n print(\n (\n f\"\\t [iter={i+1:05d}] \"\n f\"{duration:.3f}s {duration*1000. / i:.3f}ms/step, \"\n f\"loss = {running_loss / (i+1):.3f}\"\n )\n )\n else:\n print(\n (\n f\"\\t [iter={i+1:05d}] \"\n f\"{duration:.3f}s {duration*1000. / i:.3f}ms/step, \"\n f\"loss = {running_loss / (i+1):.3f}, \"\n f\"val_loss = {running_valloss / n_valiter:.3f}\"\n )\n )\n\n # Save model.\n if global_rank == 0:\n model_filepath = os.path.join(args.output_path, \"model.pth\")\n torch.save(model.state_dict(), model_filepath)\n\n # Send a notification.\n print(\n (\n f\"[ranks:{local_rank} / {global_rank}] \"\n \"rank0 is sending a notification.\"\n )\n )\n barrier(device=device, src_rank=0)\n print(\n (\n f\"[ranks:{local_rank} / {global_rank}] \"\n \"notification from rank0 has been sent.\"\n )\n )\n else:\n # Wait for a notification from rank0.\n print(\n (\n f\"[ranks:{local_rank} / {global_rank}] \"\n \"worker rank is waiting for saving model complesion...\"\n )\n )\n barrier(device=device, src_rank=0)\n print(\n (\n f\"[ranks:{local_rank} / {global_rank}] \"\n \"worker rank received a notification from rank0.\"\n )\n )\n\n # Finalize.\n dist.destroy_process_group()\n print(\"done.\")\n\n\ndef barrier(device, src_rank):\n notification = torch.zeros(1, device=device)\n dist.broadcast(notification, src=src_rank)\n torch.cuda.synchronize(device=device)\n\n\ndef prepare_dataset(datadir, batch_size, num_workers=8, no_validation=False):\n n_classes = len(glob.glob(os.path.join(datadir, \"train\", \"cls_*\")))\n\n # Prepare transform ops.\n # Basically, PIL object should be converted into tensor.\n transform = transforms.Compose([transforms.ToTensor()])\n\n # Prepare train dataset.\n # NOTE:\n # ImageFolder assumes that `root` directory contains\n # several class directories like below.\n # root/cls_000, root/cls_001, root/cls_002, ...\n trainset = torchvision.datasets.ImageFolder(\n root=os.path.join(datadir, \"train\"), transform=transform\n )\n\n # NOTE:\n # When using Sampler, `shuffle` on DataLoader must *NOT* be specified.\n # For more details, please read DataLoader API reference.\n # https://pytorch.org/docs/stable/data.html\n sampler = DistributedSampler(trainset)\n trainloader = torch.utils.data.DataLoader(\n trainset,\n batch_size=batch_size,\n shuffle=False,\n num_workers=num_workers,\n sampler=sampler,\n )\n\n # Prepare val dataset.\n if no_validation:\n valset = [] # NOTE: To show a message later.\n valloader = None\n else:\n valset = torchvision.datasets.ImageFolder(\n root=os.path.join(datadir, \"val\"), transform=transform\n )\n sampler = DistributedSampler(valset)\n valloader = torch.utils.data.DataLoader(\n valset,\n batch_size=batch_size,\n shuffle=False,\n num_workers=num_workers,\n sampler=sampler,\n )\n\n print(f\"trainset.size = {len(trainset)}\")\n print(f\"valset.size = {len(valset)}\")\n\n return trainloader, valloader, sampler, n_classes\n\n\ndef build_model(n_classes):\n model = models.resnet50(pretrained=False)\n n_fc_in_feats = model.fc.in_features\n model.fc = torch.nn.Sequential(\n torch.nn.Linear(n_fc_in_feats, 512), torch.nn.Linear(512, n_classes)\n )\n return model\n\n\ndef parse_args():\n parser = argparse.ArgumentParser(\n description=\"PyTorch torch.distributed.run Example\",\n formatter_class=argparse.ArgumentDefaultsHelpFormatter,\n )\n parser.add_argument(\n \"--input-path\",\n type=str,\n default=\"./images\",\n help=\"a parent directory path to input image files\",\n )\n\n parser.add_argument(\n \"--batch-size\", type=int, default=64, help=\"input batch size\"\n )\n parser.add_argument(\n \"--lr\", type=float, default=0.001, help=\"learning rate\"\n )\n parser.add_argument(\n \"--num-epochs\", type=int, default=10, help=\"number of epochs\"\n )\n parser.add_argument(\n \"--num-workers\",\n type=int,\n default=8,\n help=\"number of workers for data loading\",\n )\n\n parser.add_argument(\n \"--output-path\",\n type=str,\n default=\"./models\",\n help=\"output path to store saved model\",\n )\n\n parser.add_argument(\n \"--no-validation\", action=\"store_true\", help=\"Disable validation.\"\n )\n\n parser.add_argument(\"--use-older-api\", action=\"store_true\")\n\n parser.add_argument(\n \"--logging-interval\", type=int, default=10, help=\"logging interval\"\n )\n\n args, unknown_args = parser.parse_known_args()\n if args.use_older_api:\n older_parser = argparse.ArgumentParser()\n older_parser.add_argument(\n \"--local_rank\", type=int, help=\"local rank info.\"\n )\n args = older_parser.parse_args(unknown_args, namespace=args)\n print(args)\n\n return args\n\n\nif __name__ == \"__main__\":\n main()\n"
] |
[
[
"torch.nn.CrossEntropyLoss",
"torch.distributed.broadcast",
"torch.cuda.synchronize",
"torch.distributed.init_process_group",
"torch.utils.data.distributed.DistributedSampler",
"torch.zeros",
"torch.__version__.split",
"torch.utils.data.DataLoader",
"torch.tensor",
"torch.cuda.amp.autocast",
"torch.nn.Linear",
"torch.cuda.amp.GradScaler",
"torch.no_grad",
"torch.cuda.is_available",
"torch.distributed.destroy_process_group",
"torch.distributed.all_reduce",
"torch.nn.parallel.DistributedDataParallel"
]
] |
SebastiaanZ/aoc-2019
|
[
"e1fe4630b0f375be0b79398e07e23b9c0196efbb"
] |
[
"solutions/day08/solution.py"
] |
[
"from collections import Counter\nfrom operator import itemgetter\nfrom typing import List, Tuple\n\nimport numpy as np\n\n\ndef part_one(data: str) -> int:\n \"\"\"Find the layer with the least amount of `0`s and return number of `1`s * number of `2`s.\"\"\"\n min_layer = min((Counter(layer) for layer in zip(*[iter(data)]*150)), key=itemgetter(\"0\"))\n return min_layer[\"1\"] * min_layer[\"2\"]\n\n\ndef part_two_numpy(data: List[int]) -> str:\n \"\"\"Reconstruct the image of the password by stacking partially transparent layers.\"\"\"\n image = np.full(150, 2, dtype=np.uint8)\n for layer in zip(*[iter(data)]*150):\n mask = image == 2\n image[mask] = np.array(layer)[mask]\n image = image.reshape((6, 25)).tolist()\n return \"\\n\".join(\"\".join(\"\\u2588\" if pixel == 1 else \" \" for pixel in row) for row in image)\n\n\ndef part_two(data: str) -> str:\n \"\"\"Reconstruct the image of the password by stacking partially transparent layers.\"\"\"\n pixels = [[] for _ in range(6)]\n for i in range(150):\n for pixel in data[i::150]:\n if pixel != \"2\":\n row = i // 25\n pixels[row].append(pixel)\n break\n return \"\\n\".join(\"\".join(\"\\u2588\" if pixel == \"1\" else \" \" for pixel in row) for row in pixels)\n\n\ndef main(data: List[str]) -> Tuple[int, str]:\n \"\"\"The main function taking care of parsing the input data and running the solutions.\"\"\"\n data = data[0]\n answer_one = part_one(data)\n answer_two = part_two(data)\n return answer_one, \"\\n\" + answer_two\n"
] |
[
[
"numpy.array",
"numpy.full"
]
] |
MarcCoru/manim
|
[
"54ce7a446cc1884b1a9f2cd395aae3d482b24500"
] |
[
"manimlib/mobject/coordinate_systems.py"
] |
[
"import numpy as np\nimport numbers\n\nfrom manimlib.constants import *\nfrom manimlib.mobject.functions import ParametricFunction\nfrom manimlib.mobject.geometry import Arrow\nfrom manimlib.mobject.geometry import Line\nfrom manimlib.mobject.number_line import NumberLine\nfrom manimlib.mobject.svg.tex_mobject import TexMobject\nfrom manimlib.mobject.types.vectorized_mobject import VGroup\nfrom manimlib.utils.config_ops import digest_config\nfrom manimlib.utils.config_ops import merge_dicts_recursively\nfrom manimlib.utils.simple_functions import binary_search\nfrom manimlib.utils.space_ops import angle_of_vector\n\n# TODO: There should be much more code reuse between Axes, NumberPlane and GraphScene\n\n\nclass CoordinateSystem():\n \"\"\"\n Abstract class for Axes and NumberPlane\n \"\"\"\n CONFIG = {\n \"dimension\": 2,\n \"x_min\": -FRAME_X_RADIUS,\n \"x_max\": FRAME_X_RADIUS,\n \"y_min\": -FRAME_Y_RADIUS,\n \"y_max\": FRAME_Y_RADIUS,\n }\n\n def coords_to_point(self, *coords):\n raise Exception(\"Not implemented\")\n\n def point_to_coords(self, point):\n raise Exception(\"Not implemented\")\n\n def c2p(self, *coords):\n \"\"\"Abbreviation for coords_to_point\"\"\"\n return self.coords_to_point(*coords)\n\n def p2c(self, point):\n \"\"\"Abbreviation for point_to_coords\"\"\"\n return self.point_to_coords(point)\n\n def get_axes(self):\n raise Exception(\"Not implemented\")\n\n def get_axis(self, index):\n return self.get_axes()[index]\n\n def get_x_axis(self):\n return self.get_axis(0)\n\n def get_y_axis(self):\n return self.get_axis(1)\n\n def get_z_axis(self):\n return self.get_axis(2)\n\n def get_x_axis_label(self, label_tex, edge=RIGHT, direction=DL, **kwargs):\n return self.get_axis_label(\n label_tex, self.get_x_axis(),\n edge, direction, **kwargs\n )\n\n def get_y_axis_label(self, label_tex, edge=UP, direction=DR, **kwargs):\n return self.get_axis_label(\n label_tex, self.get_y_axis(),\n edge, direction, **kwargs\n )\n\n def get_axis_label(self, label_tex, axis, edge, direction, buff=MED_SMALL_BUFF):\n label = TexMobject(label_tex)\n label.next_to(\n axis.get_edge_center(edge), direction,\n buff=buff\n )\n label.shift_onto_screen(buff=MED_SMALL_BUFF)\n return label\n\n def get_axis_labels(self, x_label_tex=\"x\", y_label_tex=\"y\"):\n self.axis_labels = VGroup(\n self.get_x_axis_label(x_label_tex),\n self.get_y_axis_label(y_label_tex),\n )\n return self.axis_labels\n\n def get_graph(self, function, **kwargs):\n x_min = kwargs.pop(\"x_min\", self.x_min)\n x_max = kwargs.pop(\"x_max\", self.x_max)\n graph = ParametricFunction(\n lambda t: self.coords_to_point(t, function(t)),\n t_min=x_min,\n t_max=x_max,\n **kwargs\n )\n graph.underlying_function = function\n return graph\n\n def get_parametric_curve(self, function, **kwargs):\n dim = self.dimension\n graph = ParametricFunction(\n lambda t: self.coords_to_point(\n *function(t)[:dim]\n ),\n **kwargs\n )\n graph.underlying_function = function\n return graph\n\n def input_to_graph_point(self, x, graph):\n if hasattr(graph, \"underlying_function\"):\n return self.coords_to_point(x, graph.underlying_function(x))\n else:\n alpha = binary_search(\n function=lambda a: self.point_to_coords(\n graph.point_from_proportion(a)\n )[0],\n target=x,\n lower_bound=self.x_min,\n upper_bound=self.x_max,\n )\n if alpha is not None:\n return graph.point_from_proportion(alpha)\n else:\n return None\n\n\nclass Axes(VGroup, CoordinateSystem):\n CONFIG = {\n \"axis_config\": {\n \"color\": LIGHT_GREY,\n \"include_tip\": True,\n \"exclude_zero_from_default_numbers\": True,\n },\n \"x_axis_config\": {},\n \"y_axis_config\": {\n \"label_direction\": LEFT,\n },\n \"center_point\": ORIGIN,\n }\n\n def __init__(self, **kwargs):\n VGroup.__init__(self, **kwargs)\n self.x_axis = self.create_axis(\n self.x_min, self.x_max, self.x_axis_config\n )\n self.y_axis = self.create_axis(\n self.y_min, self.y_max, self.y_axis_config\n )\n self.y_axis.rotate(90 * DEGREES, about_point=ORIGIN)\n # Add as a separate group incase various other\n # mobjects are added to self, as for example in\n # NumberPlane below\n self.axes = VGroup(self.x_axis, self.y_axis)\n self.add(*self.axes)\n self.shift(self.center_point)\n\n def create_axis(self, min_val, max_val, axis_config):\n new_config = merge_dicts_recursively(\n self.axis_config,\n {\"x_min\": min_val, \"x_max\": max_val},\n axis_config,\n )\n return NumberLine(**new_config)\n\n def coords_to_point(self, *coords):\n origin = self.x_axis.number_to_point(0)\n result = np.array(origin)\n for axis, coord in zip(self.get_axes(), coords):\n result += (axis.number_to_point(coord) - origin)\n return result\n\n def c2p(self, *coords):\n return self.coords_to_point(*coords)\n\n def point_to_coords(self, point):\n return tuple([\n axis.point_to_number(point)\n for axis in self.get_axes()\n ])\n\n def p2c(self, point):\n return self.point_to_coords(point)\n\n def get_axes(self):\n return self.axes\n\n def get_coordinate_labels(self, x_vals=None, y_vals=None):\n if x_vals is None:\n x_vals = []\n if y_vals is None:\n y_vals = []\n x_mobs = self.get_x_axis().get_number_mobjects(*x_vals)\n y_mobs = self.get_y_axis().get_number_mobjects(*y_vals)\n\n self.coordinate_labels = VGroup(x_mobs, y_mobs)\n return self.coordinate_labels\n\n def add_coordinates(self, x_vals=None, y_vals=None):\n self.add(self.get_coordinate_labels(x_vals, y_vals))\n return self\n\n\nclass ThreeDAxes(Axes):\n CONFIG = {\n \"dimension\": 3,\n \"x_min\": -5.5,\n \"x_max\": 5.5,\n \"y_min\": -5.5,\n \"y_max\": 5.5,\n \"z_axis_config\": {},\n \"z_min\": -3.5,\n \"z_max\": 3.5,\n \"z_normal\": DOWN,\n \"num_axis_pieces\": 20,\n \"light_source\": 9 * DOWN + 7 * LEFT + 10 * OUT,\n }\n\n def __init__(self, **kwargs):\n Axes.__init__(self, **kwargs)\n z_axis = self.z_axis = self.create_axis(\n self.z_min, self.z_max, self.z_axis_config\n )\n z_axis.rotate(-np.pi / 2, UP, about_point=ORIGIN)\n z_axis.rotate(\n angle_of_vector(self.z_normal), OUT,\n about_point=ORIGIN\n )\n self.axes.add(z_axis)\n self.add(z_axis)\n\n self.add_3d_pieces()\n self.set_axis_shading()\n\n def add_3d_pieces(self):\n for axis in self.axes:\n axis.pieces = VGroup(\n *axis.get_pieces(self.num_axis_pieces)\n )\n axis.add(axis.pieces)\n axis.set_stroke(width=0, family=False)\n axis.set_shade_in_3d(True)\n\n def set_axis_shading(self):\n def make_func(axis):\n vect = self.light_source\n return lambda: (\n axis.get_edge_center(-vect),\n axis.get_edge_center(vect),\n )\n for axis in self:\n for submob in axis.family_members_with_points():\n submob.get_gradient_start_and_end_points = make_func(axis)\n submob.get_unit_normal = lambda a: np.ones(3)\n submob.set_sheen(0.2)\n\n\nclass NumberPlane(Axes):\n CONFIG = {\n \"axis_config\": {\n \"stroke_color\": BLACK,\n \"stroke_width\": 2,\n \"include_ticks\": False,\n \"include_tip\": False,\n \"line_to_number_buff\": SMALL_BUFF,\n \"label_direction\": DR,\n \"number_scale_val\": 0.5,\n },\n \"y_axis_config\": {\n \"label_direction\": DR,\n },\n \"background_line_style\": {\n \"stroke_color\": BLUE_D,\n \"stroke_width\": 2,\n \"stroke_opacity\": 1,\n },\n # Defaults to a faded version of line_config\n \"faded_line_style\": None,\n \"x_line_frequency\": 1,\n \"y_line_frequency\": 1,\n \"faded_line_ratio\": 1,\n \"make_smooth_after_applying_functions\": True,\n }\n\n def __init__(self, **kwargs):\n super().__init__(**kwargs)\n self.init_background_lines()\n\n def init_background_lines(self):\n if self.faded_line_style is None:\n style = dict(self.background_line_style)\n # For anything numerical, like stroke_width\n # and stroke_opacity, chop it in half\n for key in style:\n if isinstance(style[key], numbers.Number):\n style[key] *= 0.5\n self.faded_line_style = style\n\n self.background_lines, self.faded_lines = self.get_lines()\n self.background_lines.set_style(\n **self.background_line_style,\n )\n self.faded_lines.set_style(\n **self.faded_line_style,\n )\n self.add_to_back(\n self.faded_lines,\n self.background_lines,\n )\n\n def get_lines(self):\n x_axis = self.get_x_axis()\n y_axis = self.get_y_axis()\n x_freq = self.x_line_frequency\n y_freq = self.y_line_frequency\n\n x_lines1, x_lines2 = self.get_lines_parallel_to_axis(\n x_axis, y_axis, x_freq,\n self.faded_line_ratio,\n )\n y_lines1, y_lines2 = self.get_lines_parallel_to_axis(\n y_axis, x_axis, y_freq,\n self.faded_line_ratio,\n )\n lines1 = VGroup(*x_lines1, *y_lines1)\n lines2 = VGroup(*x_lines2, *y_lines2)\n return lines1, lines2\n\n def get_lines_parallel_to_axis(self, axis1, axis2, freq, ratio):\n line = Line(axis1.get_start(), axis1.get_end())\n dense_freq = (1 + ratio)\n step = (1 / dense_freq) * freq\n\n lines1 = VGroup()\n lines2 = VGroup()\n ranges = (\n np.arange(0, axis2.x_max, step),\n np.arange(0, axis2.x_min, -step),\n )\n for inputs in ranges:\n for k, x in enumerate(inputs):\n new_line = line.copy()\n new_line.move_to(axis2.number_to_point(x))\n if k % (1 + ratio) == 0:\n lines1.add(new_line)\n else:\n lines2.add(new_line)\n return lines1, lines2\n\n def get_center_point(self):\n return self.coords_to_point(0, 0)\n\n def get_x_unit_size(self):\n return self.get_x_axis().get_unit_size()\n\n def get_y_unit_size(self):\n return self.get_x_axis().get_unit_size()\n\n def get_axes(self):\n return self.axes\n\n def get_vector(self, coords, **kwargs):\n kwargs[\"buff\"] = 0\n return Arrow(\n self.coords_to_point(0, 0),\n self.coords_to_point(*coords),\n **kwargs\n )\n\n def prepare_for_nonlinear_transform(self, num_inserted_curves=50):\n for mob in self.family_members_with_points():\n num_curves = mob.get_num_curves()\n if num_inserted_curves > num_curves:\n mob.insert_n_curves(\n num_inserted_curves - num_curves\n )\n return self\n\n\nclass ComplexPlane(NumberPlane):\n CONFIG = {\n \"color\": BLUE,\n \"line_frequency\": 1,\n }\n\n def number_to_point(self, number):\n number = complex(number)\n return self.coords_to_point(number.real, number.imag)\n\n def n2p(self, number):\n return self.number_to_point(number)\n\n def point_to_number(self, point):\n x, y = self.point_to_coords(point)\n return complex(x, y)\n\n def p2n(self, point):\n return self.point_to_number(point)\n\n def get_default_coordinate_values(self):\n x_numbers = self.get_x_axis().default_numbers_to_display()\n y_numbers = self.get_y_axis().default_numbers_to_display()\n y_numbers = [\n complex(0, y) for y in y_numbers if y != 0\n ]\n return [*x_numbers, *y_numbers]\n\n def get_coordinate_labels(self, *numbers, **kwargs):\n if len(numbers) == 0:\n numbers = self.get_default_coordinate_values()\n\n self.coordinate_labels = VGroup()\n for number in numbers:\n z = complex(number)\n if abs(z.imag) > abs(z.real):\n axis = self.get_y_axis()\n value = z.imag\n kwargs = merge_dicts_recursively(\n kwargs,\n {\"number_config\": {\"unit\": \"i\"}},\n )\n else:\n axis = self.get_x_axis()\n value = z.real\n number_mob = axis.get_number_mobject(value, **kwargs)\n self.coordinate_labels.add(number_mob)\n return self.coordinate_labels\n\n def add_coordinates(self, *numbers):\n self.add(self.get_coordinate_labels(*numbers))\n return self\n"
] |
[
[
"numpy.arange",
"numpy.array",
"numpy.ones"
]
] |
fregu856/2D_detection
|
[
"1f22a6d604d39f8f79fe916fcdbf40b5b668a39a"
] |
[
"utilities.py"
] |
[
"import cv2\nimport numpy as np\nimport tensorflow as tf\n\n# function for drawing all ground truth bboxes of an image on the image:\ndef visualize_gt_label(img_path, label_path):\n class_to_color = {\"car\": (255, 191, 0),\n \"cyclist\": (0, 191, 255),\n \"pedestrian\": (255, 0, 191)}\n\n img = cv2.imread(img_path, -1)\n\n with open(label_path) as label_file:\n for line in label_file:\n splitted_line = line.split(\" \")\n bbox_class = splitted_line[0].lower().strip()\n if bbox_class not in [\"car\", \"cyclist\", \"pedestrian\"]:\n break\n x_left = int(float(splitted_line[4]))\n y_bottom = int(float(splitted_line[5]))\n x_right = int(float(splitted_line[6]))\n y_top = int(float(splitted_line[7]))\n\n # draw the bbox:\n cv2.rectangle(img, (x_left, y_top), (x_right, y_bottom),\n class_to_color[bbox_class], 2)\n\n img_with_bboxes = img\n return img_with_bboxes\n\n# function for drawing a set of bboxes in an img:\ndef draw_bboxes(img, bboxes, class_labels, probs=None):\n class_label_to_string = {0: \"car\", 1: \"pedestrian\", 2: \"cyclist\"}\n class_to_color = {\"car\": (255, 191, 0),\n \"cyclist\": (0, 191, 255),\n \"pedestrian\": (255, 0, 191)}\n\n for bbox, class_label, prob in zip(bboxes, class_labels, probs):\n xmin, ymin, xmax, ymax = bbox_transform(bbox)\n\n h = ymax - ymin\n w = xmax - xmin\n\n class_string = class_label_to_string[class_label]\n\n # draw the bbox:\n cv2.rectangle(img, (int(xmin), int(ymax)), (int(xmax), int(ymin)),\n class_to_color[class_string], 2)\n\n if probs is not None:\n # write the detection probability on the bbox:\n # # make the top line of the bbox thicker:\n cv2.rectangle(img, (int(xmin), int(ymin)), (int(xmax), int(ymin-12)),\n class_to_color[class_string], -1)\n # # write the probaility in the top line of the bbox:\n prob_string = \"%.2f\" % prob\n cv2.putText(img, prob_string, (int(xmin)+2, int(ymin)-2), 2, 0.4,\n (255,255,255), 0)\n\n img_with_bboxes = img\n return img_with_bboxes\n\ndef safe_exp(w, thresh):\n # NOTE! this function is taken directly from\n # github.com/BichenWuUCB/squeezeDet\n\n slope = np.exp(thresh)\n\n lin_bool = w > thresh\n lin_region = tf.to_float(lin_bool)\n\n lin_out = slope*(w - thresh + 1.)\n exp_out = tf.exp(tf.where(lin_bool, tf.zeros_like(w), w))\n\n out = lin_region*lin_out + (1.-lin_region)*exp_out\n\n return out\n\n# function for converting a bbox of [cx, cy, w, h] format to\n# [xmin, ymin, xmax, ymax] format:\ndef bbox_transform(bbox):\n cx, cy, w, h = bbox\n\n xmin = cx - w/2\n ymin = cy - h/2\n xmax = cx + w/2\n ymax = cy + h/2\n\n out_box = [xmin, ymin, xmax, ymax]\n\n return out_box\n\n# function for converting a bbox of [xmin, ymin, xmax, ymax] format to\n# [cx, cy, w, h] format:\ndef bbox_transform_inv(bbox):\n xmin, ymin, xmax, ymax = bbox\n\n w = xmax - xmin + 1.0\n h = ymax - ymin + 1.0\n cx = xmin + w/2\n cy = ymin + h/2\n\n out_box = [cx, cy, w, h]\n\n return out_box\n\n# function for performing non-maximum suppression:\ndef nms(boxes, probs, threshold):\n # NOTE! this function is taken directly from\n # github.com/BichenWuUCB/squeezeDet\n\n # get indices in descending order acc. to prob:\n order = probs.argsort()[::-1]\n\n keep = [True]*len(order)\n for i in range(len(order)-1):\n ovps = batch_IOU(boxes[order[i+1:]], boxes[order[i]])\n for j, ov in enumerate(ovps):\n if ov > threshold:\n keep[order[j+i+1]] = False\n\n return keep\n\n# function for computing the IOU between a bbox and a batch of bboxes:\ndef batch_IOU(boxes, box):\n intersect_xmax = np.minimum(boxes[:, 0] + 0.5*boxes[:, 2], box[0] + 0.5*box[2])\n intersect_xmin = np.maximum(boxes[:, 0] - 0.5*boxes[:, 2], box[0] - 0.5*box[2])\n intersect_ymax = np.minimum(boxes[:, 1] + 0.5*boxes[:, 3], box[1] + 0.5*box[3])\n intersect_ymin = np.maximum(boxes[:, 1] - 0.5*boxes[:, 3], box[1] - 0.5*box[3])\n\n intersect_w = np.maximum(0.0, intersect_xmax - intersect_xmin)\n intersect_h = np.maximum(0.0, intersect_ymax - intersect_ymin)\n intersection_area = intersect_w*intersect_h\n\n union_area = boxes[:, 2]*boxes[:, 3] + box[2]*box[3] - intersection_area\n\n IOUs = intersection_area/union_area\n\n return IOUs\n\n# function for building a dense matrix from a sparse representation:\ndef sparse_to_dense(indices, output_shape, values, default_value=0):\n # NOTE! this function is a modified version of sparse_to_dense in\n # github.com/BichenWuUCB/squeezeDet\n\n # (indices: list of indices. if indices[i] = [k, l], then array[k,l] should\n # be set to values[i])\n\n # (output_shape: shape of the dense matrix)\n\n # (values: list of values. if indices[i] = [k, l], then array[k,l] should be\n # set to values[i])\n \n # (default_value: values to set for indices not specified in indices)\n\n array = np.ones(output_shape)*default_value\n for idx, value in zip(indices, values):\n array[tuple(idx)] = value\n\n return array\n\n# function for reading all variable values from a caffe model:\ndef get_caffemodel_weights(prototxt_path, caffemodel_path):\n # NOTE! this function is inspired by utils/caffemodel2pkl.py in\n # github.com/BichenWuUCB/squeezeDet\n\n import caffe\n\n net = caffe.Net(prototxt_path, caffemodel_path, caffe.TEST)\n weights = {}\n no_of_layers = len(net.layers)\n for i in range(no_of_layers):\n layer_name = net._layer_names[i]\n layer = net.layers[i]\n layer_blobs = [o.data for o in layer.blobs]\n weights[layer_name] = layer_blobs\n\n return weights\n"
] |
[
[
"numpy.maximum",
"numpy.minimum",
"numpy.ones",
"tensorflow.zeros_like",
"tensorflow.to_float",
"numpy.exp"
]
] |
fengyouliang/wheat_detection
|
[
"d056123426a1260c29b486cbb8e44a88a0a3c5bc",
"d056123426a1260c29b486cbb8e44a88a0a3c5bc"
] |
[
"tests/test_ops/test_merge_cells.py",
"mmdet/models/roi_heads/mask_heads/fused_semantic_head.py"
] |
[
"\"\"\"\r\nCommandLine:\r\n pytest tests/test_merge_cells.py\r\n\"\"\"\r\nimport torch\r\nimport torch.nn.functional as F\r\n\r\nfrom mmdet.ops.merge_cells import (BaseMergeCell, ConcatCell,\r\n GlobalPoolingCell, SumCell)\r\n\r\n\r\ndef test_sum_cell():\r\n inputs_x = torch.randn([2, 256, 32, 32])\r\n inputs_y = torch.randn([2, 256, 16, 16])\r\n sum_cell = SumCell(256, 256)\r\n output = sum_cell(inputs_x, inputs_y, out_size=inputs_x.shape[-2:])\r\n assert output.size() == inputs_x.size()\r\n output = sum_cell(inputs_x, inputs_y, out_size=inputs_y.shape[-2:])\r\n assert output.size() == inputs_y.size()\r\n output = sum_cell(inputs_x, inputs_y)\r\n assert output.size() == inputs_x.size()\r\n\r\n\r\ndef test_concat_cell():\r\n inputs_x = torch.randn([2, 256, 32, 32])\r\n inputs_y = torch.randn([2, 256, 16, 16])\r\n concat_cell = ConcatCell(256, 256)\r\n output = concat_cell(inputs_x, inputs_y, out_size=inputs_x.shape[-2:])\r\n assert output.size() == inputs_x.size()\r\n output = concat_cell(inputs_x, inputs_y, out_size=inputs_y.shape[-2:])\r\n assert output.size() == inputs_y.size()\r\n output = concat_cell(inputs_x, inputs_y)\r\n assert output.size() == inputs_x.size()\r\n\r\n\r\ndef test_global_pool_cell():\r\n inputs_x = torch.randn([2, 256, 32, 32])\r\n inputs_y = torch.randn([2, 256, 32, 32])\r\n gp_cell = GlobalPoolingCell(with_out_conv=False)\r\n gp_cell_out = gp_cell(inputs_x, inputs_y, out_size=inputs_x.shape[-2:])\r\n assert (gp_cell_out.size() == inputs_x.size())\r\n gp_cell = GlobalPoolingCell(256, 256)\r\n gp_cell_out = gp_cell(inputs_x, inputs_y, out_size=inputs_x.shape[-2:])\r\n assert (gp_cell_out.size() == inputs_x.size())\r\n\r\n\r\ndef test_resize_methods():\r\n inputs_x = torch.randn([2, 256, 128, 128])\r\n target_resize_sizes = [(128, 128), (256, 256)]\r\n resize_methods_list = ['nearest', 'bilinear']\r\n\r\n for method in resize_methods_list:\r\n merge_cell = BaseMergeCell(upsample_mode=method)\r\n for target_size in target_resize_sizes:\r\n merge_cell_out = merge_cell._resize(inputs_x, target_size)\r\n gt_out = F.interpolate(inputs_x, size=target_size, mode=method)\r\n assert merge_cell_out.equal(gt_out)\r\n\r\n target_size = (64, 64) # resize to a smaller size\r\n merge_cell = BaseMergeCell()\r\n merge_cell_out = merge_cell._resize(inputs_x, target_size)\r\n kernel_size = inputs_x.shape[-1] // target_size[-1]\r\n gt_out = F.max_pool2d(\r\n inputs_x, kernel_size=kernel_size, stride=kernel_size)\r\n assert (merge_cell_out == gt_out).all()\r\n",
"import torch.nn as nn\r\nimport torch.nn.functional as F\r\nfrom mmcv.cnn import ConvModule, kaiming_init\r\n\r\nfrom mmdet.core import auto_fp16, force_fp32\r\nfrom mmdet.models.builder import HEADS\r\n\r\n\r\n@HEADS.register_module()\r\nclass FusedSemanticHead(nn.Module):\r\n r\"\"\"Multi-level fused semantic segmentation head.\r\n\r\n .. code-block:: none\r\n\r\n in_1 -> 1x1 conv ---\r\n |\r\n in_2 -> 1x1 conv -- |\r\n ||\r\n in_3 -> 1x1 conv - ||\r\n ||| /-> 1x1 conv (mask prediction)\r\n in_4 -> 1x1 conv -----> 3x3 convs (*4)\r\n | \\-> 1x1 conv (feature)\r\n in_5 -> 1x1 conv ---\r\n \"\"\" # noqa: W605\r\n\r\n def __init__(self,\r\n num_ins,\r\n fusion_level,\r\n num_convs=4,\r\n in_channels=256,\r\n conv_out_channels=256,\r\n num_classes=183,\r\n ignore_label=255,\r\n loss_weight=0.2,\r\n conv_cfg=None,\r\n norm_cfg=None):\r\n super(FusedSemanticHead, self).__init__()\r\n self.num_ins = num_ins\r\n self.fusion_level = fusion_level\r\n self.num_convs = num_convs\r\n self.in_channels = in_channels\r\n self.conv_out_channels = conv_out_channels\r\n self.num_classes = num_classes\r\n self.ignore_label = ignore_label\r\n self.loss_weight = loss_weight\r\n self.conv_cfg = conv_cfg\r\n self.norm_cfg = norm_cfg\r\n self.fp16_enabled = False\r\n\r\n self.lateral_convs = nn.ModuleList()\r\n for i in range(self.num_ins):\r\n self.lateral_convs.append(\r\n ConvModule(\r\n self.in_channels,\r\n self.in_channels,\r\n 1,\r\n conv_cfg=self.conv_cfg,\r\n norm_cfg=self.norm_cfg,\r\n inplace=False))\r\n\r\n self.convs = nn.ModuleList()\r\n for i in range(self.num_convs):\r\n in_channels = self.in_channels if i == 0 else conv_out_channels\r\n self.convs.append(\r\n ConvModule(\r\n in_channels,\r\n conv_out_channels,\r\n 3,\r\n padding=1,\r\n conv_cfg=self.conv_cfg,\r\n norm_cfg=self.norm_cfg))\r\n self.conv_embedding = ConvModule(\r\n conv_out_channels,\r\n conv_out_channels,\r\n 1,\r\n conv_cfg=self.conv_cfg,\r\n norm_cfg=self.norm_cfg)\r\n self.conv_logits = nn.Conv2d(conv_out_channels, self.num_classes, 1)\r\n\r\n self.criterion = nn.CrossEntropyLoss(ignore_index=ignore_label)\r\n\r\n def init_weights(self):\r\n kaiming_init(self.conv_logits)\r\n\r\n @auto_fp16()\r\n def forward(self, feats):\r\n x = self.lateral_convs[self.fusion_level](feats[self.fusion_level])\r\n fused_size = tuple(x.shape[-2:])\r\n for i, feat in enumerate(feats):\r\n if i != self.fusion_level:\r\n feat = F.interpolate(\r\n feat, size=fused_size, mode='bilinear', align_corners=True)\r\n x += self.lateral_convs[i](feat)\r\n\r\n for i in range(self.num_convs):\r\n x = self.convs[i](x)\r\n\r\n mask_pred = self.conv_logits(x)\r\n x = self.conv_embedding(x)\r\n return mask_pred, x\r\n\r\n @force_fp32(apply_to=('mask_pred', ))\r\n def loss(self, mask_pred, labels):\r\n labels = labels.squeeze(1).long()\r\n loss_semantic_seg = self.criterion(mask_pred, labels)\r\n loss_semantic_seg *= self.loss_weight\r\n return loss_semantic_seg\r\n"
] |
[
[
"torch.randn",
"torch.nn.functional.max_pool2d",
"torch.nn.functional.interpolate"
],
[
"torch.nn.CrossEntropyLoss",
"torch.nn.ModuleList",
"torch.nn.Conv2d",
"torch.nn.functional.interpolate"
]
] |
aakanksha888sahu/tensorflow
|
[
"2f6e53147b7b27b7289a892998a891e3dead440e"
] |
[
"tensorflow/contrib/distribute/python/mirrored_strategy_test.py"
] |
[
"# Copyright 2018 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Tests for class MirroredStrategy.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom tensorflow.contrib.distribute.python import mirrored_strategy\nfrom tensorflow.contrib.distribute.python import strategy_test_lib\nfrom tensorflow.python.eager import context\nfrom tensorflow.python.eager import test\nfrom tensorflow.python.framework import constant_op\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.framework import test_util\nfrom tensorflow.python.ops import variable_scope\nfrom tensorflow.python.training import distribution_strategy_context as ds_context\n\n\nclass MirroredOneCPUDistributionTest(strategy_test_lib.DistributionTestBase):\n\n def _get_distribution_strategy(self):\n return mirrored_strategy.MirroredStrategy([\"/device:CPU:0\"])\n\n def testMinimizeLossEager(self):\n self._test_minimize_loss_eager(self._get_distribution_strategy())\n\n def testMinimizeLossGraph(self):\n self._test_minimize_loss_graph(self._get_distribution_strategy())\n\n def testReplicaId(self):\n self._test_replica_id(self._get_distribution_strategy())\n\n @test_util.run_in_graph_and_eager_modes\n def testCallAndMergeExceptions(self):\n self._test_call_and_merge_exceptions(self._get_distribution_strategy())\n\n @test_util.run_in_graph_and_eager_modes\n def testInputContextPropertyLocal(self):\n d = mirrored_strategy.MirroredStrategy(num_gpus_per_worker=2)\n input_fn = self._input_fn_to_test_input_context(\n expected_num_replicas_in_sync=2,\n expected_num_input_pipelines=1,\n expected_input_pipeline_id=0)\n d.make_input_fn_iterator(input_fn)\n\n def testInputContextPropertyMultiWorker(self):\n d = mirrored_strategy.MirroredStrategy(num_gpus_per_worker=2)\n cluster_spec = {\"worker\": [\"worker1\", \"worker2\", \"worker3\"]}\n d.configure(cluster_spec=cluster_spec)\n with context.graph_mode():\n # `expected_input_pipeline_id` is None because the input_fn will be called\n # multiple times, each with a different input_pipeline_id.\n input_fn = self._input_fn_to_test_input_context(\n expected_num_replicas_in_sync=6,\n expected_num_input_pipelines=3,\n expected_input_pipeline_id=None)\n d.make_input_fn_iterator(input_fn)\n\n\nclass VariableCreatorStackTest(test.TestCase):\n\n def testCreatorStacksAreThreadLocal(self):\n devices = [\"/device:CPU:0\", \"/device:GPU:0\"]\n dist = mirrored_strategy.MirroredStrategy(devices)\n\n def model_fn():\n replica_id_str = str(self.evaluate(_replica_id()))\n\n def thread_creator_fn(next_creator, *args, **kwargs):\n return next_creator(*args, **kwargs) + \":thread_\" + replica_id_str\n\n with variable_scope.variable_creator_scope(thread_creator_fn):\n # Create a variable in this scope.\n v = variable_scope.variable(1.0)\n\n # This will pause the current thread, and execute the other thread.\n ds_context.get_replica_context().merge_call(lambda _: _)\n return v\n\n def main_thread_creator(next_creator, *args, **kwargs):\n # We are not using the underlying next_creator for test purposes.\n del next_creator, args, kwargs\n return \"main_thread\"\n\n with context.graph_mode(), \\\n dist.scope(), \\\n variable_scope.variable_creator_scope(main_thread_creator):\n result = dist.call_for_each_replica(model_fn)\n result = dist.unwrap(result)\n expected = [\"main_thread:thread_0\", \"main_thread:thread_1\"]\n self.assertEqual(expected, result)\n\n\ndef _replica_id():\n replica_id = ds_context.get_replica_context().replica_id_in_sync_group\n if not isinstance(replica_id, ops.Tensor):\n replica_id = constant_op.constant(replica_id)\n return replica_id\n\n\nclass MultiWorkerMirroredStrategyTest(test.TestCase):\n\n def testDeviceScope(self):\n \"\"\"Test the device scope of multi-worker MirroredStrategy.\"\"\"\n with context.graph_mode():\n strategy = mirrored_strategy.MirroredStrategy(num_gpus=context.num_gpus())\n strategy.configure(\n cluster_spec={\"worker\": [\"/job:worker/task:0\", \"/job:worker/task:1\"]})\n with strategy.scope():\n a = constant_op.constant(1.)\n with ops.device(\"/cpu:0\"):\n b = constant_op.constant(1.)\n self.assertEqual(a.device, \"/job:worker/task:0\")\n self.assertEqual(b.device, \"/job:worker/task:0/device:CPU:0\")\n\n\nif __name__ == \"__main__\":\n test.main()\n"
] |
[
[
"tensorflow.python.framework.ops.device",
"tensorflow.python.eager.context.num_gpus",
"tensorflow.python.training.distribution_strategy_context.get_replica_context",
"tensorflow.python.ops.variable_scope.variable_creator_scope",
"tensorflow.python.eager.test.main",
"tensorflow.contrib.distribute.python.mirrored_strategy.MirroredStrategy",
"tensorflow.python.ops.variable_scope.variable",
"tensorflow.python.eager.context.graph_mode",
"tensorflow.python.framework.constant_op.constant"
]
] |
yngtodd/mobil
|
[
"4a6479bfe0f6a29cc3e6ff4e75a98475ec74ae67"
] |
[
"debug.py"
] |
[
"'''Train CIFAR10 with PyTorch.'''\nfrom __future__ import print_function\n\nimport torch\nimport torch.nn as nn\nimport torch.optim as optim\nimport torch.nn.functional as F\nimport torch.backends.cudnn as cudnn\n\nimport torchvision\nimport torchvision.transforms as transforms\n\nimport os\nimport argparse\n\n#from train_test import train, test\nfrom data_utils import trainloader, testloader\n\n#from utils import progress_bar\nfrom torch.autograd import Variable\nfrom cnn2layer import CNN\nfrom net import Net\n\nimport numpy as np\nfrom skopt.callbacks import DeadlineStopper\nfrom skopt import gp_minimize\nfrom skopt import dump\nfrom hyperspace.space_division import HyperSpace\n\n\nparser = argparse.ArgumentParser(description='PyTorch CIFAR10 Training')\nparser.add_argument('--lr', default=0.1, type=float, help='learning rate')\nparser.add_argument('--num_epochs', default=20, type=int, help='number of epochs')\nparser.add_argument('--resume', '-r', action='store_true', help='resume from checkpoint')\nargs = parser.parse_args()\n\nbest_acc = 0 # best test accuracy\nstart_epoch = 0 # start from epoch 0 or last checkpoint epoch\n\n\ndef objective(space):\n kernel_size1, stride1, dropout1, kernel_size2, stride2, dropout2, learning_rate = space\n\n # Hyper Parameters\n num_epochs = 10\n kernel_size1 = int(kernel_size1)\n stride1 = int(kernel_size1)\n dropout1 = float(dropout1)\n kernel_size2 = int(kernel_size2)\n stride2 = int(stride2)\n dropout2 = float(dropout2)\n learning_rate = float(learning_rate)\n\n cnn = CNN()\n cnn.cuda()\n\n criterion = nn.CrossEntropyLoss()\n optimizer = torch.optim.Adam(cnn.parameters(), lr=learning_rate)\n\n # Train the Model\n for epoch in range(num_epochs):\n for i, (images, labels) in enumerate(trainloader):\n print(images.shape)\n images = Variable(images).cuda()\n labels = Variable(labels).cuda()\n\n # Forward + Backward + Optimize\n optimizer.zero_grad()\n outputs = cnn(images)\n loss = criterion(outputs, labels)\n loss.backward()\n optimizer.step()\n\n if (i+1) % 100 == 0:\n print ('Epoch [%d/%d], Step [%d/%d], Loss: %.4f'\n %(epoch+1, num_epochs, i+1, 60000//128, loss.data[0]))\n\n # Test the Model\n correct = 0\n total = 0\n for images, labels in testloader:\n images = Variable(images).cuda()\n outputs = cnn(images)\n _, predicted = torch.max(outputs.data, 1)\n total += labels.size(0)\n correct += (predicted.cpu() == labels).sum()\n\n test_accuracy = 100 * correct / total\n return loss.data[0]\n\n\ndef main():\n hyperparameters = {'kernelSize1': np.arange(2,10),\n 'stride1': np.arange(1, 5),\n 'dropout1': np.linspace(0.0, 0.8),\n 'kernelSize2': np.arange(2,10),\n 'stride2': np.arange(1, 5),\n 'dropout2': np.linspace(0.0, 0.8),\n 'learningRate': np.linspace(0.001, 0.1)}\n\n hyperspace = HyperSpace(hyperparameters)\n all_intervals = hyperspace.fold_space()\n hyperspaces = hyperspace.hyper_permute(all_intervals)\n subspace_keys, subspace_boundaries = hyperspace.format_hyperspace(hyperspaces)\n\n space = subspace_boundaries[0]\n\n deadline = DeadlineStopper(18000)\n # Gaussian process minimization (see scikit-optimize skopt module for other optimizers)\n res_gp = gp_minimize(objective, space, n_calls=50, callback=deadline, random_state=0, verbose=True)\n # Each worker will write their results to disk\n #dump(res_gp, '/lustre/atlas/proj-shared/csc237/ygx/safari_zone/vision/pytorch/cifar2/mobilenet/hyper_results/gp_subspace_' + str(rank))\n\n\nif __name__=='__main__':\n main()\n"
] |
[
[
"torch.nn.CrossEntropyLoss",
"torch.max",
"numpy.linspace",
"numpy.arange",
"torch.autograd.Variable"
]
] |
stefanoregis/pycryptobot
|
[
"1edeaab46d2a7f9046a76f18527cc6d152b8707d"
] |
[
"models/Trading.py"
] |
[
"\"\"\"Technical analysis on a trading Pandas DataFrame\"\"\"\n\nimport json, math\nimport numpy as np\nimport pandas as pd\nimport re, sys\nfrom statsmodels.tsa.statespace.sarimax import SARIMAX\nfrom models.CoinbasePro import AuthAPI\n\nclass TechnicalAnalysis():\n def __init__(self, data=pd.DataFrame()):\n \"\"\"Technical Analysis object model\n \n Parameters\n ----------\n data : Pandas Time Series\n data[ts] = [ 'date', 'market', 'granularity', 'low', 'high', 'open', 'close', 'volume' ]\n \"\"\"\n\n if not isinstance(data, pd.DataFrame):\n raise TypeError('Data is not a Pandas dataframe.')\n\n if list(data.keys()) != [ 'date', 'market', 'granularity', 'low', 'high', 'open', 'close', 'volume' ]:\n raise ValueError('Data not not contain date, market, granularity, low, high, open, close, volume')\n\n if not 'close' in data.columns:\n raise AttributeError(\"Pandas DataFrame 'close' column required.\")\n\n if not data['close'].dtype == 'float64' and not data['close'].dtype == 'int64':\n raise AttributeError(\"Pandas DataFrame 'close' column not int64 or float64.\")\n\n self.df = data\n self.levels = []\n\n def getDataFrame(self):\n \"\"\"Returns the Pandas DataFrame\"\"\"\n\n return self.df\n\n def addAll(self):\n \"\"\"Adds analysis to the DataFrame\"\"\"\n\n self.addChangePct()\n\n self.addCMA()\n self.addSMA(20)\n self.addSMA(50)\n self.addSMA(200)\n self.addEMA(12)\n self.addEMA(26)\n self.addGoldenCross()\n self.addDeathCross()\n self.addFibonacciBollingerBands()\n\n self.addRSI(14)\n self.addMACD()\n self.addOBV()\n self.addElderRayIndex()\n\n self.addEMABuySignals()\n self.addSMABuySignals()\n self.addMACDBuySignals() \n\n self.addCandleAstralBuy()\n self.addCandleAstralSell()\n self.addCandleHammer()\n self.addCandleInvertedHammer()\n self.addCandleShootingStar()\n self.addCandleHangingMan()\n self.addCandleThreeWhiteSoldiers()\n self.addCandleThreeBlackCrows()\n self.addCandleDoji()\n self.addCandleThreeLineStrike()\n self.addCandleTwoBlackGapping()\n self.addCandleMorningStar()\n self.addCandleEveningStar()\n self.addCandleAbandonedBaby()\n self.addCandleMorningDojiStar()\n self.addCandleEveningDojiStar()\n\n \"\"\"Candlestick References\n https://commodity.com/technical-analysis\n https://www.investopedia.com\n https://github.com/SpiralDevelopment/candlestick-patterns\n https://www.incrediblecharts.com/candlestick_patterns/candlestick-patterns-strongest.php\n \"\"\"\n\n def candleHammer(self):\n \"\"\"* Candlestick Detected: Hammer (\"Weak - Reversal - Bullish Signal - Up\"\"\"\n\n return ((self.df['high'] - self.df['low']) > 3 * (self.df['open'] - self.df['close'])) \\\n & (((self.df['close'] - self.df['low']) / (.001 + self.df['high'] - self.df['low'])) > 0.6) \\\n & (((self.df['open'] - self.df['low']) / (.001 + self.df['high'] - self.df['low'])) > 0.6)\n\n def addCandleHammer(self):\n self.df['hammer'] = self.candleHammer()\n\n def candleShootingStar(self):\n \"\"\"* Candlestick Detected: Shooting Star (\"Weak - Reversal - Bearish Pattern - Down\")\"\"\"\n\n return ((self.df['open'].shift(1) < self.df['close'].shift(1)) & (self.df['close'].shift(1) < self.df['open'])) \\\n & (self.df['high'] - np.maximum(self.df['open'], self.df['close']) >= (abs(self.df['open'] - self.df['close']) * 3)) \\\n & ((np.minimum(self.df['close'], self.df['open']) - self.df['low']) <= abs(self.df['open'] - self.df['close']))\n\n def addCandleShootingStar(self):\n self.df['shooting_star'] = self.candleShootingStar()\n\n def candleHangingMan(self):\n \"\"\"* Candlestick Detected: Hanging Man (\"Weak - Continuation - Bearish Pattern - Down\")\"\"\"\n\n return ((self.df['high'] - self.df['low']) > (4 * (self.df['open'] - self.df['close']))) \\\n & (((self.df['close'] - self.df['low']) / (.001 + self.df['high'] - self.df['low'])) >= 0.75) \\\n & (((self.df['open'] - self.df['low']) / (.001 + self.df['high'] - self.df['low'])) >= 0.75) \\\n & (self.df['high'].shift(1) < self.df['open']) \\\n & (self.df['high'].shift(2) < self.df['open'])\n\n def addCandleHangingMan(self):\n self.df['hanging_man'] = self.candleHangingMan()\n\n def candleInvertedHammer(self):\n \"\"\"* Candlestick Detected: Inverted Hammer (\"Weak - Continuation - Bullish Pattern - Up\")\"\"\"\n\n return (((self.df['high'] - self.df['low']) > 3 * (self.df['open'] - self.df['close'])) \\\n & ((self.df['high'] - self.df['close']) / (.001 + self.df['high'] - self.df['low']) > 0.6) \\\n & ((self.df['high'] - self.df['open']) / (.001 + self.df['high'] - self.df['low']) > 0.6))\n\n def addCandleInvertedHammer(self):\n self.df['inverted_hammer'] = self.candleInvertedHammer()\n\n def candleThreeWhiteSoldiers(self):\n \"\"\"*** Candlestick Detected: Three White Soldiers (\"Strong - Reversal - Bullish Pattern - Up\")\"\"\"\n\n return ((self.df['open'] > self.df['open'].shift(1)) & (self.df['open'] < self.df['close'].shift(1))) \\\n & (self.df['close'] > self.df['high'].shift(1)) \\\n & (self.df['high'] - np.maximum(self.df['open'], self.df['close']) < (abs(self.df['open'] - self.df['close']))) \\\n & ((self.df['open'].shift(1) > self.df['open'].shift(2)) & (self.df['open'].shift(1) < self.df['close'].shift(2))) \\\n & (self.df['close'].shift(1) > self.df['high'].shift(2)) \\\n & (self.df['high'].shift(1) - np.maximum(self.df['open'].shift(1), self.df['close'].shift(1)) < (abs(self.df['open'].shift(1) - self.df['close'].shift(1))))\n\n def addCandleThreeWhiteSoldiers(self):\n self.df['three_white_soldiers'] = self.candleThreeWhiteSoldiers()\n\n def candleThreeBlackCrows(self):\n \"\"\"* Candlestick Detected: Three Black Crows (\"Strong - Reversal - Bearish Pattern - Down\")\"\"\"\n\n return ((self.df['open'] < self.df['open'].shift(1)) & (self.df['open'] > self.df['close'].shift(1))) \\\n & (self.df['close'] < self.df['low'].shift(1)) \\\n & (self.df['low'] - np.maximum(self.df['open'], self.df['close']) < (abs(self.df['open'] - self.df['close']))) \\\n & ((self.df['open'].shift(1) < self.df['open'].shift(2)) & (self.df['open'].shift(1) > self.df['close'].shift(2))) \\\n & (self.df['close'].shift(1) < self.df['low'].shift(2)) \\\n & (self.df['low'].shift(1) - np.maximum(self.df['open'].shift(1), self.df['close'].shift(1)) < (abs(self.df['open'].shift(1) - self.df['close'].shift(1))))\n\n def addCandleThreeBlackCrows(self):\n self.df['three_black_crows'] = self.candleThreeBlackCrows()\n\n def candleDoji(self):\n \"\"\"! Candlestick Detected: Doji (\"Indecision\")\"\"\"\n\n return ((abs(self.df['close'] - self.df['open']) / (self.df['high'] - self.df['low'])) < 0.1) \\\n & ((self.df['high'] - np.maximum(self.df['close'], self.df['open'])) > (3 * abs(self.df['close'] - self.df['open']))) \\\n & ((np.minimum(self.df['close'], self.df['open']) - self.df['low']) > (3 * abs(self.df['close'] - self.df['open'])))\n\n def addCandleDoji(self):\n self.df['doji'] = self.candleDoji()\n\n def candleThreeLineStrike(self):\n \"\"\"** Candlestick Detected: Three Line Strike (\"Reliable - Reversal - Bullish Pattern - Up\")\"\"\"\n\n return ((self.df['open'].shift(1) < self.df['open'].shift(2)) & (self.df['open'].shift(1) > self.df['close'].shift(2))) \\\n & (self.df['close'].shift(1) < self.df['low'].shift(2)) \\\n & (self.df['low'].shift(1) - np.maximum(self.df['open'].shift(1), self.df['close'].shift(1)) < (abs(self.df['open'].shift(1) - self.df['close'].shift(1)))) \\\n & ((self.df['open'].shift(2) < self.df['open'].shift(3)) & (self.df['open'].shift(2) > self.df['close'].shift(3))) \\\n & (self.df['close'].shift(2) < self.df['low'].shift(3)) \\\n & (self.df['low'].shift(2) - np.maximum(self.df['open'].shift(2), self.df['close'].shift(2)) < (abs(self.df['open'].shift(2) - self.df['close'].shift(2)))) \\\n & ((self.df['open'] < self.df['low'].shift(1)) & (self.df['close'] > self.df['high'].shift(3)))\n\n def addCandleThreeLineStrike(self):\n self.df['three_line_strike'] = self.candleThreeLineStrike()\n\n def candleTwoBlackGapping(self):\n \"\"\"*** Candlestick Detected: Two Black Gapping (\"Reliable - Reversal - Bearish Pattern - Down\")\"\"\"\n\n return ((self.df['open'] < self.df['open'].shift(1)) & (self.df['open'] > self.df['close'].shift(1))) \\\n & (self.df['close'] < self.df['low'].shift(1)) \\\n & (self.df['low'] - np.maximum(self.df['open'], self.df['close']) < (abs(self.df['open'] - self.df['close']))) \\\n & (self.df['high'].shift(1) < self.df['low'].shift(2))\n\n def addCandleTwoBlackGapping(self):\n self.df['two_black_gapping'] = self.candleTwoBlackGapping()\n\n def candleMorningStar(self):\n \"\"\"*** Candlestick Detected: Morning Star (\"Strong - Reversal - Bullish Pattern - Up\")\"\"\"\n\n return ((np.maximum(self.df['open'].shift(1), self.df['close'].shift(1)) < self.df['close'].shift(2)) & (self.df['close'].shift(2) < self.df['open'].shift(2))) \\\n & ((self.df['close'] > self.df['open']) & (self.df['open'] > np.maximum(self.df['open'].shift(1), self.df['close'].shift(1))))\n\n def addCandleMorningStar(self):\n self.df['morning_star'] = self.candleMorningStar()\n\n def candleEveningStar(self):\n \"\"\"*** Candlestick Detected: Evening Star (\"Strong - Reversal - Bearish Pattern - Down\")\"\"\"\n\n return ((np.minimum(self.df['open'].shift(1), self.df['close'].shift(1)) > self.df['close'].shift(2)) & (self.df['close'].shift(2) > self.df['open'].shift(2))) \\\n & ((self.df['close'] < self.df['open']) & (self.df['open'] < np.minimum(self.df['open'].shift(1), self.df['close'].shift(1))))\n\n def addCandleEveningStar(self):\n self.df['evening_star'] = self.candleEveningStar()\n\n def candleAbandonedBaby(self):\n \"\"\"** Candlestick Detected: Abandoned Baby (\"Reliable - Reversal - Bullish Pattern - Up\")\"\"\"\n\n return (self.df['open'] < self.df['close']) \\\n & (self.df['high'].shift(1) < self.df['low']) \\\n & (self.df['open'].shift(2) > self.df['close'].shift(2)) \\\n & (self.df['high'].shift(1) < self.df['low'].shift(2))\n\n def addCandleAbandonedBaby(self):\n self.df['abandoned_baby'] = self.candleAbandonedBaby()\n\n def candleMorningDojiStar(self):\n \"\"\"** Candlestick Detected: Morning Doji Star (\"Reliable - Reversal - Bullish Pattern - Up\")\"\"\"\n\n return (self.df['close'].shift(2) < self.df['open'].shift(2)) \\\n & (abs(self.df['close'].shift(2) - self.df['open'].shift(2)) / (self.df['high'].shift(2) - self.df['low'].shift(2)) >= 0.7) \\\n & (abs(self.df['close'].shift(1) - self.df['open'].shift(1)) / (self.df['high'].shift(1) - self.df['low'].shift(1)) < 0.1) \\\n & (self.df['close'] > self.df['open']) \\\n & (abs(self.df['close'] - self.df['open']) / (self.df['high'] - self.df['low']) >= 0.7) \\\n & (self.df['close'].shift(2) > self.df['close'].shift(1)) \\\n & (self.df['close'].shift(2) > self.df['open'].shift(1)) \\\n & (self.df['close'].shift(1) < self.df['open']) \\\n & (self.df['open'].shift(1) < self.df['open']) \\\n & (self.df['close'] > self.df['close'].shift(2)) \\\n & ((self.df['high'].shift(1) - np.maximum(self.df['close'].shift(1), self.df['open'].shift(1))) > (3 * abs(self.df['close'].shift(1) - self.df['open'].shift(1)))) \\\n & (np.minimum(self.df['close'].shift(1), self.df['open'].shift(1)) - self.df['low'].shift(1)) > (3 * abs(self.df['close'].shift(1) - self.df['open'].shift(1)))\n\n def addCandleMorningDojiStar(self):\n self.df['morning_doji_star'] = self.candleMorningDojiStar()\n\n def candleEveningDojiStar(self):\n \"\"\"** Candlestick Detected: Evening Doji Star (\"Reliable - Reversal - Bearish Pattern - Down\")\"\"\"\n\n return (self.df['close'].shift(2) > self.df['open'].shift(2)) \\\n & (abs(self.df['close'].shift(2) - self.df['open'].shift(2)) / (self.df['high'].shift(2) - self.df['low'].shift(2)) >= 0.7) \\\n & (abs(self.df['close'].shift(1) - self.df['open'].shift(1)) / (self.df['high'].shift(1) - self.df['low'].shift(1)) < 0.1) \\\n & (self.df['close'] < self.df['open']) \\\n & (abs(self.df['close'] - self.df['open']) / (self.df['high'] - self.df['low']) >= 0.7) \\\n & (self.df['close'].shift(2) < self.df['close'].shift(1)) \\\n & (self.df['close'].shift(2) < self.df['open'].shift(1)) \\\n & (self.df['close'].shift(1) > self.df['open']) \\\n & (self.df['open'].shift(1) > self.df['open']) \\\n & (self.df['close'] < self.df['close'].shift(2)) \\\n & ((self.df['high'].shift(1) - np.maximum(self.df['close'].shift(1), self.df['open'].shift(1))) > (3 * abs(self.df['close'].shift(1) - self.df['open'].shift(1)))) \\\n & (np.minimum(self.df['close'].shift(1), self.df['open'].shift(1)) - self.df['low'].shift(1)) > (3 * abs(self.df['close'].shift(1) - self.df['open'].shift(1)))\n\n def addCandleEveningDojiStar(self):\n self.df['evening_doji_star'] = self.candleEveningDojiStar()\n\n def candleAstralBuy(self):\n \"\"\"*** Candlestick Detected: Astral Buy (Fibonacci 3, 5, 8)\"\"\"\n\n return (self.df['close'] < self.df['close'].shift(3)) & (self.df['low'] < self.df['low'].shift(5)) \\\n & (self.df['close'].shift(1) < self.df['close'].shift(4)) & (self.df['low'].shift(1) < self.df['low'].shift(6)) \\\n & (self.df['close'].shift(2) < self.df['close'].shift(5)) & (self.df['low'].shift(2) < self.df['low'].shift(7)) \\\n & (self.df['close'].shift(3) < self.df['close'].shift(6)) & (self.df['low'].shift(3) < self.df['low'].shift(8)) \\\n & (self.df['close'].shift(4) < self.df['close'].shift(7)) & (self.df['low'].shift(4) < self.df['low'].shift(9)) \\\n & (self.df['close'].shift(5) < self.df['close'].shift(8)) & (self.df['low'].shift(5) < self.df['low'].shift(10)) \\\n & (self.df['close'].shift(6) < self.df['close'].shift(9)) & (self.df['low'].shift(6) < self.df['low'].shift(11)) \\\n & (self.df['close'].shift(7) < self.df['close'].shift(10)) & (self.df['low'].shift(7) < self.df['low'].shift(12))\n \n def addCandleAstralBuy(self):\n self.df['astral_buy'] = self.candleAstralBuy()\n\n def candleAstralSell(self):\n \"\"\"*** Candlestick Detected: Astral Sell (Fibonacci 3, 5, 8)\"\"\"\n\n return (self.df['close'] > self.df['close'].shift(3)) & (self.df['high'] > self.df['high'].shift(5)) \\\n & (self.df['close'].shift(1) > self.df['close'].shift(4)) & (self.df['high'].shift(1) > self.df['high'].shift(6)) \\\n & (self.df['close'].shift(2) > self.df['close'].shift(5)) & (self.df['high'].shift(2) > self.df['high'].shift(7)) \\\n & (self.df['close'].shift(3) > self.df['close'].shift(6)) & (self.df['high'].shift(3) > self.df['high'].shift(8)) \\\n & (self.df['close'].shift(4) > self.df['close'].shift(7)) & (self.df['high'].shift(4) > self.df['high'].shift(9)) \\\n & (self.df['close'].shift(5) > self.df['close'].shift(8)) & (self.df['high'].shift(5) > self.df['high'].shift(10)) \\\n & (self.df['close'].shift(6) > self.df['close'].shift(9)) & (self.df['high'].shift(6) > self.df['high'].shift(11)) \\\n & (self.df['close'].shift(7) > self.df['close'].shift(10)) & (self.df['high'].shift(7) > self.df['high'].shift(12))\n \n def addCandleAstralSell(self):\n self.df['astral_sell'] = self.candleAstralSell()\n\n def changePct(self):\n \"\"\"Close change percentage\"\"\"\n\n close_pc = self.df['close'] / self.df['close'].shift(1) - 1\n close_pc = close_pc.fillna(0)\n return close_pc\n \n def addChangePct(self):\n \"\"\"Adds the close percentage to the DataFrame\"\"\"\n\n self.df['close_pc'] = self.changePct()\n\n # cumulative returns\n self.df['close_cpc'] = (1 + self.df['close_pc']).cumprod()\n\n def cumulativeMovingAverage(self):\n \"\"\"Calculates the Cumulative Moving Average (CMA)\"\"\"\n\n return self.df.close.expanding().mean()\n\n def addCMA(self):\n \"\"\"Adds the Cumulative Moving Average (CMA) to the DataFrame\"\"\"\n\n self.df['cma'] = self.cumulativeMovingAverage()\n\n def exponentialMovingAverage(self, period):\n \"\"\"Calculates the Exponential Moving Average (EMA)\"\"\"\n\n if not isinstance(period, int):\n raise TypeError('Period parameter is not perioderic.')\n\n if period < 5 or period > 200:\n raise ValueError('Period is out of range')\n\n if len(self.df) < period:\n raise Exception('Data range too small.')\n\n return self.df.close.ewm(span=period, adjust=False).mean()\n\n def addEMA(self, period):\n \"\"\"Adds the Exponential Moving Average (EMA) the DateFrame\"\"\"\n\n if not isinstance(period, int):\n raise TypeError('Period parameter is not perioderic.')\n\n if period < 5 or period > 200:\n raise ValueError('Period is out of range')\n\n if len(self.df) < period:\n raise Exception('Data range too small.')\n\n self.df['ema' + str(period)] = self.exponentialMovingAverage(period)\n\n def calculateRelativeStrengthIndex(self, series, interval=14):\n \"\"\"Calculates the RSI on a Pandas series of closing prices.\"\"\"\n\n if not isinstance(series, pd.Series):\n raise TypeError('Pandas Series required.')\n\n if not isinstance(interval, int):\n raise TypeError('Interval integer required.')\n\n if(len(series) < interval):\n raise IndexError('Pandas Series smaller than interval.')\n\n diff = series.diff(1).dropna()\n\n sum_gains = 0 * diff\n sum_gains[diff > 0] = diff[diff > 0]\n avg_gains = sum_gains.ewm(com=interval-1, min_periods=interval).mean()\n\n sum_losses = 0 * diff\n sum_losses[diff < 0] = diff[diff < 0]\n avg_losses = sum_losses.ewm(com=interval-1, min_periods=interval).mean()\n\n rs = abs(avg_gains / avg_losses)\n rsi = 100 - 100 / (1 + rs)\n\n return rsi\n\n def addFibonacciBollingerBands(self, interval=20, multiplier=3):\n \"\"\"Adds Fibonacci Bollinger Bands.\"\"\"\n\n if not isinstance(interval, int):\n raise TypeError('Interval integer required.')\n\n if not isinstance(multiplier, int):\n raise TypeError('Multiplier integer required.')\n\n tp = (self.df['high'] + self.df['low'] + self.df['close']) / 3\n sma = tp.rolling(interval).mean()\n sd = multiplier * tp.rolling(interval).std()\n\n sma = sma.fillna(0)\n sd = sd.fillna(0)\n\n self.df['fbb_mid'] = sma\n self.df['fbb_upper0_236'] = sma + (0.236 * sd)\n self.df['fbb_upper0_382'] = sma + (0.382 * sd)\n self.df['fbb_upper0_5'] = sma + (0.5 * sd)\n self.df['fbb_upper0_618'] = sma + (0.618 * sd)\n self.df['fbb_upper0_764'] = sma + (0.764 * sd)\n self.df['fbb_upper1'] = sma + (1 * sd)\n self.df['fbb_lower0_236'] = sma - (0.236 * sd)\n self.df['fbb_lower0_382'] = sma - (0.382 * sd)\n self.df['fbb_lower0_5'] = sma - (0.5 * sd)\n self.df['fbb_lower0_618'] = sma - (0.618 * sd)\n self.df['fbb_lower0_764'] = sma - (0.764 * sd)\n self.df['fbb_lower1'] = sma - (1 * sd)\n\n def movingAverageConvergenceDivergence(self):\n \"\"\"Calculates the Moving Average Convergence Divergence (MACD)\"\"\"\n\n if len(self.df) < 26:\n raise Exception('Data range too small.')\n\n if not self.df['ema12'].dtype == 'float64' and not self.df['ema12'].dtype == 'int64':\n raise AttributeError(\"Pandas DataFrame 'ema12' column not int64 or float64.\")\n\n if not self.df['ema26'].dtype == 'float64' and not self.df['ema26'].dtype == 'int64':\n raise AttributeError(\"Pandas DataFrame 'ema26' column not int64 or float64.\")\n\n df = pd.DataFrame()\n df['macd'] = self.df['ema12'] - self.df['ema26']\n df['signal'] = df['macd'].ewm(span=9, adjust=False).mean() \n return df\n\n def addMACD(self):\n \"\"\"Adds the Moving Average Convergence Divergence (MACD) to the DataFrame\"\"\"\n\n df = self.movingAverageConvergenceDivergence()\n self.df['macd'] = df['macd']\n self.df['signal'] = df['signal']\n\n def onBalanceVolume(self):\n \"\"\"Calculate On-Balance Volume (OBV)\"\"\"\n\n return np.where(self.df['close'] == self.df['close'].shift(1), 0, np.where(self.df['close'] > self.df['close'].shift(1), self.df['volume'], \n np.where(self.df['close'] < self.df['close'].shift(1), -self.df['volume'], self.df.iloc[0]['volume']))).cumsum()\n\n def addOBV(self):\n \"\"\"Add the On-Balance Volume (OBV) to the DataFrame\"\"\"\n\n self.df['obv'] = self.onBalanceVolume()\n self.df['obv_pc'] = self.df['obv'].pct_change() * 100\n self.df['obv_pc'] = np.round(self.df['obv_pc'].fillna(0), 2) \n\n def relativeStrengthIndex(self, period):\n \"\"\"Calculate the Relative Strength Index (RSI)\"\"\"\n\n if not isinstance(period, int):\n raise TypeError('Period parameter is not perioderic.')\n\n if period < 7 or period > 21:\n raise ValueError('Period is out of range')\n\n # calculate relative strength index\n rsi = self.calculateRelativeStrengthIndex(self.df['close'], period)\n # default to midway-50 for first entries\n rsi = rsi.fillna(50)\n return rsi\n\n def addRSI(self, period):\n \"\"\"Adds the Relative Strength Index (RSI) to the DataFrame\"\"\"\n\n if not isinstance(period, int):\n raise TypeError('Period parameter is not perioderic.')\n\n if period < 7 or period > 21:\n raise ValueError('Period is out of range')\n\n self.df['rsi' + str(period)] = self.relativeStrengthIndex(period) \n self.df['rsi' + str(period)] = self.df['rsi' + str(period)].replace(np.nan, 50)\n\n def seasonalARIMAModel(self):\n \"\"\"Returns the Seasonal ARIMA Model for price predictions\"\"\"\n\n # parameters for SARIMAX\n model = SARIMAX(self.df['close'], trend='n', order=(0,1,0), seasonal_order=(1,1,1,12))\n return model.fit(disp=-1)\n\n def seasonalARIMAModelFittedValues(self):\n \"\"\"Returns the Seasonal ARIMA Model for price predictions\"\"\"\n\n return self.seasonalARIMAModel().fittedvalues\n\n def simpleMovingAverage(self, period):\n \"\"\"Calculates the Simple Moving Average (SMA)\"\"\"\n\n if not isinstance(period, int):\n raise TypeError('Period parameter is not perioderic.')\n\n if period < 5 or period > 200:\n raise ValueError('Period is out of range')\n\n if len(self.df) < period:\n raise Exception('Data range too small.')\n\n return self.df.close.rolling(period, min_periods=1).mean()\n\n def addSMA(self, period):\n \"\"\"Add the Simple Moving Average (SMA) to the DataFrame\"\"\"\n\n if not isinstance(period, int):\n raise TypeError('Period parameter is not perioderic.')\n\n if period < 5 or period > 200:\n raise ValueError('Period is out of range')\n\n if len(self.df) < period:\n raise Exception('Data range too small.')\n\n self.df['sma' + str(period)] = self.simpleMovingAverage(period)\n\n def addGoldenCross(self):\n \"\"\"Add Golden Cross SMA50 over SMA200\"\"\"\n\n if 'sma50' not in self.df:\n self.addSMA(50)\n\n if 'sma200' not in self.df:\n self.addSMA(200)\n\n self.df['goldencross'] = self.df['sma50'] > self.df['sma200']\n\n def addDeathCross(self):\n \"\"\"Add Death Cross SMA50 over SMA200\"\"\"\n\n if 'sma50' not in self.df:\n self.addSMA(50)\n\n if 'sma200' not in self.df:\n self.addSMA(200)\n\n self.df['deathcross'] = self.df['sma50'] < self.df['sma200']\n\n def addElderRayIndex(self):\n \"\"\"Add Elder Ray Index\"\"\"\n\n if 'ema13' not in self.df:\n self.addEMA(13)\n\n self.df['elder_ray_bull'] = self.df['high'] - self.df['ema13']\n self.df['elder_ray_bear'] = self.df['low'] - self.df['ema13']\n\n # bear power’s value is negative but increasing (i.e. becoming less bearish)\n # bull power’s value is increasing (i.e. becoming more bullish)\n self.df['eri_buy'] = ((self.df['elder_ray_bear'] < 0) & (self.df['elder_ray_bear'] > self.df['elder_ray_bear'].shift(1))) | ((self.df['elder_ray_bull'] > self.df['elder_ray_bull'].shift(1))) \n \n # bull power’s value is positive but decreasing (i.e. becoming less bullish)\n # bear power’s value is decreasing (i.e., becoming more bearish)\n self.df['eri_sell'] = ((self.df['elder_ray_bull'] > 0) & (self.df['elder_ray_bull'] < self.df['elder_ray_bull'].shift(1))) | ((self.df['elder_ray_bear'] < self.df['elder_ray_bear'].shift(1)))\n\n def getSupportResistanceLevels(self):\n \"\"\"Calculate the Support and Resistance Levels\"\"\"\n\n self.levels = [] \n self.__calculateSupportResistenceLevels()\n levels_ts = {}\n for level in self.levels:\n levels_ts[self.df.index[level[0]]] = level[1]\n # add the support levels to the DataFrame\n return pd.Series(levels_ts)\n\n def printSupportResistanceLevel(self, price=0):\n if isinstance(price, int) or isinstance(price, float):\n df = self.getSupportResistanceLevels()\n\n if len(df) > 0:\n df_last = df.tail(1)\n if float(df_last[0]) < price:\n print (' Support level of ' + str(df_last[0]) + ' formed at ' + str(df_last.index[0]), \"\\n\")\n elif float(df_last[0]) > price:\n print (' Resistance level of ' + str(df_last[0]) + ' formed at ' + str(df_last.index[0]), \"\\n\")\n else:\n print (' Support/Resistance level of ' + str(df_last[0]) + ' formed at ' + str(df_last.index[0]), \"\\n\")\n\n def getResistance(self, price=0):\n if isinstance(price, int) or isinstance(price, float):\n if price > 0:\n sr = self.getSupportResistanceLevels()\n for r in sr.sort_values(): \n if r > price:\n return r\n\n return price\n\n def getFibonacciUpper(self, price=0):\n if isinstance(price, int) or isinstance(price, float):\n if price > 0:\n fb = self.getFibonacciRetracementLevels()\n for f in fb.values():\n if f > price:\n return f\n \n return price\n\n def getTradeExit(self, price=0):\n if isinstance(price, int) or isinstance(price, float):\n if price > 0:\n r = self.getResistance(price)\n f = self.getFibonacciUpper(price)\n if price < r and price < f:\n r_margin = ((r - price) / price) * 100\n f_margin = ((f - price) / price) * 100\n\n if r_margin > 1 and f_margin > 1 and r <= f:\n return r\n elif r_margin > 1 and f_margin > 1 and f <= r:\n return f\n elif r_margin > 1 and f_margin < 1:\n return r\n elif f_margin > 1 and r_margin < 1:\n return f\n \n return price\n\n def printSupportResistanceFibonacciLevels(self, price=0):\n if isinstance(price, int) or isinstance(price, float):\n if price > 0:\n sr = self.getSupportResistanceLevels()\n\n s = price\n for r in sr.sort_values(): \n if r > price:\n fb = self.getFibonacciRetracementLevels()\n\n l = price\n for b in fb.values():\n if b > price:\n return 'support: ' + str(s) + ', resistance: ' + str(r) + ', fibonacci (l): ' + str(l) + ', fibonacci (u): ' + str(b)\n else:\n l = b\n\n break\n else:\n s = r\n\n if len(sr) > 1 and sr.iloc[-1] < price:\n fb = self.getFibonacciRetracementLevels()\n\n l = price\n for b in fb.values():\n if b > price:\n return 'support: ' + str(sr.iloc[-1]) + ', fibonacci (l): ' + str(l) + ', fibonacci (u): ' + str(b)\n else:\n l = b\n\n return ''\n\n def addEMABuySignals(self):\n \"\"\"Adds the EMA12/EMA26 buy and sell signals to the DataFrame\"\"\"\n\n if not isinstance(self.df, pd.DataFrame):\n raise TypeError('Pandas DataFrame required.')\n\n if not 'close' in self.df.columns:\n raise AttributeError(\"Pandas DataFrame 'close' column required.\")\n\n if not self.df['close'].dtype == 'float64' and not self.df['close'].dtype == 'int64':\n raise AttributeError(\n \"Pandas DataFrame 'close' column not int64 or float64.\")\n\n if not 'ema12' or not 'ema26' in self.df.columns:\n self.addEMA(12)\n self.addEMA(26)\n\n # true if EMA12 is above the EMA26\n self.df['ema12gtema26'] = self.df.ema12 > self.df.ema26\n # true if the current frame is where EMA12 crosses over above\n self.df['ema12gtema26co'] = self.df.ema12gtema26.ne(self.df.ema12gtema26.shift())\n self.df.loc[self.df['ema12gtema26'] == False, 'ema12gtema26co'] = False\n\n # true if the EMA12 is below the EMA26\n self.df['ema12ltema26'] = self.df.ema12 < self.df.ema26\n # true if the current frame is where EMA12 crosses over below\n self.df['ema12ltema26co'] = self.df.ema12ltema26.ne(self.df.ema12ltema26.shift())\n self.df.loc[self.df['ema12ltema26'] == False, 'ema12ltema26co'] = False\n\n def addSMABuySignals(self):\n \"\"\"Adds the SMA50/SMA200 buy and sell signals to the DataFrame\"\"\"\n\n if not isinstance(self.df, pd.DataFrame):\n raise TypeError('Pandas DataFrame required.')\n\n if not 'close' in self.df.columns:\n raise AttributeError(\"Pandas DataFrame 'close' column required.\")\n\n if not self.df['close'].dtype == 'float64' and not self.df['close'].dtype == 'int64':\n raise AttributeError(\n \"Pandas DataFrame 'close' column not int64 or float64.\")\n\n if not 'sma50' or not 'sma200' in self.df.columns:\n self.addSMA(50)\n self.addSMA(200)\n\n # true if SMA50 is above the SMA200\n self.df['sma50gtsma200'] = self.df.sma50 > self.df.sma200\n # true if the current frame is where SMA50 crosses over above\n self.df['sma50gtsma200co'] = self.df.sma50gtsma200.ne(self.df.sma50gtsma200.shift())\n self.df.loc[self.df['sma50gtsma200'] == False, 'sma50gtsma200co'] = False\n\n # true if the SMA50 is below the SMA200\n self.df['sma50ltsma200'] = self.df.sma50 < self.df.sma200\n # true if the current frame is where SMA50 crosses over below\n self.df['sma50ltsma200co'] = self.df.sma50ltsma200.ne(self.df.sma50ltsma200.shift())\n self.df.loc[self.df['sma50ltsma200'] == False, 'sma50ltsma200co'] = False\n\n def addMACDBuySignals(self):\n \"\"\"Adds the MACD/Signal buy and sell signals to the DataFrame\"\"\"\n\n if not isinstance(self.df, pd.DataFrame):\n raise TypeError('Pandas DataFrame required.')\n\n if not 'close' in self.df.columns:\n raise AttributeError(\"Pandas DataFrame 'close' column required.\")\n\n if not self.df['close'].dtype == 'float64' and not self.df['close'].dtype == 'int64':\n raise AttributeError(\"Pandas DataFrame 'close' column not int64 or float64.\")\n\n if not 'macd' or not 'signal' in self.df.columns:\n self.addMACD()\n self.addOBV()\n\n # true if MACD is above the Signal\n self.df['macdgtsignal'] = self.df.macd > self.df.signal\n # true if the current frame is where MACD crosses over above\n self.df['macdgtsignalco'] = self.df.macdgtsignal.ne(self.df.macdgtsignal.shift())\n self.df.loc[self.df['macdgtsignal'] == False, 'macdgtsignalco'] = False\n\n # true if the MACD is below the Signal\n self.df['macdltsignal'] = self.df.macd < self.df.signal\n # true if the current frame is where MACD crosses over below\n self.df['macdltsignalco'] = self.df.macdltsignal.ne(self.df.macdltsignal.shift())\n self.df.loc[self.df['macdltsignal'] == False, 'macdltsignalco'] = False\n\n def getFibonacciRetracementLevels(self, price=0):\n # validates price is numeric\n if not isinstance(price, int) and not isinstance(price, float):\n raise TypeError('Optional price is not numeric.')\n\n price_min = self.df.close.min()\n price_max = self.df.close.max()\n \n diff = price_max - price_min\n \n data = {}\n\n if price != 0 and (price <= price_min):\n data['ratio1'] = float(self.__truncate(price_min, 2))\n elif price == 0:\n data['ratio1'] = float(self.__truncate(price_min, 2))\n\n if price != 0 and (price > price_min) and (price <= (price_max - 0.768 * diff)):\n data['ratio1'] = float(self.__truncate(price_min, 2))\n data['ratio0_768'] = float(self.__truncate(price_max - 0.768 * diff, 2))\n elif price == 0:\n data['ratio0_768'] = float(self.__truncate(price_max - 0.768 * diff, 2)) \n\n if price != 0 and (price > (price_max - 0.768 * diff)) and (price <= (price_max - 0.618 * diff)):\n data['ratio0_768'] = float(self.__truncate(price_max - 0.768 * diff, 2))\n data['ratio0_618'] = float(self.__truncate(price_max - 0.618 * diff, 2))\n elif price == 0:\n data['ratio0_618'] = float(self.__truncate(price_max - 0.618 * diff, 2)) \n\n if price != 0 and (price > (price_max - 0.618 * diff)) and (price <= (price_max - 0.5 * diff)):\n data['ratio0_618'] = float(self.__truncate(price_max - 0.618 * diff, 2))\n data['ratio0_5'] = float(self.__truncate(price_max - 0.5 * diff, 2))\n elif price == 0:\n data['ratio0_5'] = float(self.__truncate(price_max - 0.5 * diff, 2))\n\n if price != 0 and (price > (price_max - 0.5 * diff)) and (price <= (price_max - 0.382 * diff)):\n data['ratio0_5'] = float(self.__truncate(price_max - 0.5 * diff, 2))\n data['ratio0_382'] = float(self.__truncate(price_max - 0.382 * diff, 2))\n elif price == 0:\n data['ratio0_382'] = float(self.__truncate(price_max - 0.382 * diff, 2))\n\n if price != 0 and (price > (price_max - 0.382 * diff)) and (price <= (price_max - 0.286 * diff)):\n data['ratio0_382'] = float(self.__truncate(price_max - 0.382 * diff, 2))\n data['ratio0_286'] = float(self.__truncate(price_max - 0.286 * diff, 2))\n elif price == 0:\n data['ratio0_286'] = float(self.__truncate(price_max - 0.286 * diff, 2))\n\n if price != 0 and (price > (price_max - 0.286 * diff)) and (price <= price_max):\n data['ratio0_286'] = float(self.__truncate(price_max - 0.286 * diff, 2)) \n data['ratio0'] = float(self.__truncate(price_max, 2))\n elif price == 0:\n data['ratio0'] = float(self.__truncate(price_max, 2))\n\n if price != 0 and (price < (price_max + 0.272 * diff)) and (price >= price_max):\n data['ratio0'] = float(self.__truncate(price_max, 2))\n data['ratio1_272'] = float(self.__truncate(price_max + 0.272 * diff, 2))\n elif price == 0:\n data['ratio1_272'] = float(self.__truncate(price_max + 0.272 * diff, 2))\n\n if price != 0 and (price < (price_max + 0.414 * diff)) and (price >= (price_max + 0.272 * diff)):\n data['ratio1_272'] = float(self.__truncate(price_max, 2))\n data['ratio1_414'] = float(self.__truncate(price_max + 0.414 * diff, 2))\n elif price == 0:\n data['ratio1_414'] = float(self.__truncate(price_max + 0.414 * diff, 2))\n\n if price != 0 and (price < (price_max + 0.618 * diff)) and (price >= (price_max + 0.414 * diff)):\n data['ratio1_618'] = float(self.__truncate(price_max + 0.618 * diff, 2))\n elif price == 0:\n data['ratio1_618'] = float(self.__truncate(price_max + 0.618 * diff, 2))\n\n return data\n\n def saveCSV(self, filename='tradingdata.csv'):\n \"\"\"Saves the DataFrame to an uncompressed CSV.\"\"\"\n\n p = re.compile(r\"^[\\w\\-. ]+$\")\n if not p.match(filename):\n raise TypeError('Filename required.')\n\n if not isinstance(self.df, pd.DataFrame):\n raise TypeError('Pandas DataFrame required.')\n\n try:\n self.df.to_csv(filename)\n except OSError:\n print('Unable to save: ', filename)\n\n def __calculateSupportResistenceLevels(self):\n \"\"\"Support and Resistance levels. (private function)\"\"\"\n\n for i in range(2, self.df.shape[0] - 2):\n if self.__isSupport(self.df, i):\n l = self.df['low'][i]\n if self.__isFarFromLevel(l):\n self.levels.append((i, l))\n elif self.__isResistance(self.df, i):\n l = self.df['high'][i]\n if self.__isFarFromLevel(l):\n self.levels.append((i, l))\n return self.levels\n\n def __isSupport(self, df, i):\n \"\"\"Is support level? (private function)\"\"\"\n\n c1 = df['low'][i] < df['low'][i - 1]\n c2 = df['low'][i] < df['low'][i + 1]\n c3 = df['low'][i + 1] < df['low'][i + 2]\n c4 = df['low'][i - 1] < df['low'][i - 2]\n support = c1 and c2 and c3 and c4\n return support\n\n def __isResistance(self, df, i):\n \"\"\"Is resistance level? (private function)\"\"\"\n\n c1 = df['high'][i] > df['high'][i - 1]\n c2 = df['high'][i] > df['high'][i + 1]\n c3 = df['high'][i + 1] > df['high'][i + 2]\n c4 = df['high'][i - 1] > df['high'][i - 2]\n resistance = c1 and c2 and c3 and c4\n return resistance\n\n def __isFarFromLevel(self, l):\n \"\"\"Is far from support level? (private function)\"\"\"\n\n s = np.mean(self.df['high'] - self.df['low'])\n return np.sum([abs(l-x) < s for x in self.levels]) == 0\n\n def __truncate(self, f, n):\n return math.floor(f * 10 ** n) / 10 ** n"
] |
[
[
"numpy.maximum",
"numpy.minimum",
"pandas.Series",
"pandas.DataFrame",
"numpy.mean"
]
] |
janblechschmidt/DeepBSDE
|
[
"fd138083a78b15fd75bee4a5e65761ac41eb7d29"
] |
[
"equation.py"
] |
[
"import numpy as np\nimport tensorflow as tf\nfrom scipy.stats import multivariate_normal as normal\n\n\nclass Equation(object):\n \"\"\"Base class for defining PDE related function.\"\"\"\n\n def __init__(self, eqn_config):\n # Global parameters from config\n self.dim = eqn_config.dim\n self.total_time = eqn_config.total_time\n self.num_time_interval = eqn_config.num_time_interval\n self.delta_t = self.total_time / self.num_time_interval\n self.sqrt_delta_t = np.sqrt(self.delta_t)\n # Placeholder for exact solution\n self.y_init = None\n\n def sample(self, num_sample):\n \"\"\"Sample forward SDE.\"\"\"\n # Method to draw samples, i.e. solves forward problem\n raise NotImplementedError\n\n def f_tf(self, t, x, y, z):\n \"\"\"Generator function in the PDE.\"\"\"\n # Source term of pde\n raise NotImplementedError\n\n def g_tf(self, t, x):\n \"\"\"Terminal condition of the PDE.\"\"\"\n # Terminal condition\n raise NotImplementedError\n\n\nclass HJBLQ(Equation):\n \"\"\"HJB equation in PNAS paper doi.org/10.1073/pnas.1718942115\"\"\"\n def __init__(self, eqn_config):\n # This calls the __init__ from base class\n super(HJBLQ, self).__init__(eqn_config)\n # This is \\xi in the paper\n self.x_init = np.zeros(self.dim)\n # Constant diffusion\n self.sigma = np.sqrt(2.0)\n # Parameter of equation, factor in front of nonlinear term\n self.lambd = 1.0\n\n def sample(self, num_sample):\n # Sample all rvs at once\n dw_sample = normal.rvs(size=[num_sample,\n self.dim,\n self.num_time_interval]) * self.sqrt_delta_t\n # Prepare numpy array for paths\n x_sample = np.zeros([num_sample,\n self.dim,\n self.num_time_interval + 1])\n # Initialize first temporal layer with x_init\n x_sample[:, :, 0] = np.ones([num_sample, self.dim]) * self.x_init\n # This is Euler Maruyama without any drift-term\n for i in range(self.num_time_interval):\n x_sample[:, :, i + 1] = x_sample[:, :, i] + self.sigma * dw_sample[:, :, i]\n # Return all scaled random numbers as well as simulated paths\n return dw_sample, x_sample\n\n def f_tf(self, t, x, y, z):\n # Corresponds to - lambda * norm(\\grad u)^2\n return -self.lambd * tf.reduce_sum(tf.square(z), 1, keepdims=True)\n\n def g_tf(self, t, x):\n return tf.math.log((1 + tf.reduce_sum(tf.square(x), 1, keepdims=True)) / 2)\n\n\nclass AllenCahn(Equation):\n \"\"\"Allen-Cahn equation in PNAS paper doi.org/10.1073/pnas.1718942115\"\"\"\n def __init__(self, eqn_config):\n super(AllenCahn, self).__init__(eqn_config)\n self.x_init = np.zeros(self.dim)\n self.sigma = np.sqrt(2.0)\n\n def sample(self, num_sample):\n dw_sample = normal.rvs(size=[num_sample,\n self.dim,\n self.num_time_interval]) * self.sqrt_delta_t\n x_sample = np.zeros([num_sample, self.dim, self.num_time_interval + 1])\n x_sample[:, :, 0] = np.ones([num_sample, self.dim]) * self.x_init\n for i in range(self.num_time_interval):\n x_sample[:, :, i + 1] = x_sample[:, :, i] + self.sigma * dw_sample[:, :, i]\n return dw_sample, x_sample\n\n def f_tf(self, t, x, y, z):\n return y - tf.pow(y, 3)\n\n def g_tf(self, t, x):\n return 0.5 / (1 + 0.2 * tf.reduce_sum(tf.square(x), 1, keepdims=True))\n\n\nclass PricingDefaultRisk(Equation):\n \"\"\"\n Nonlinear Black-Scholes equation with default risk in PNAS paper\n doi.org/10.1073/pnas.1718942115\n \"\"\"\n def __init__(self, eqn_config):\n super(PricingDefaultRisk, self).__init__(eqn_config)\n self.x_init = np.ones(self.dim) * 100.0\n self.sigma = 0.2\n self.rate = 0.02 # interest rate R\n self.delta = 2.0 / 3\n self.gammah = 0.2\n self.gammal = 0.02\n self.mu_bar = 0.02\n self.vh = 50.0\n self.vl = 70.0\n self.slope = (self.gammah - self.gammal) / (self.vh - self.vl)\n\n def sample(self, num_sample):\n dw_sample = normal.rvs(size=[num_sample,\n self.dim,\n self.num_time_interval]) * self.sqrt_delta_t\n x_sample = np.zeros([num_sample, self.dim, self.num_time_interval + 1])\n x_sample[:, :, 0] = np.ones([num_sample, self.dim]) * self.x_init\n for i in range(self.num_time_interval):\n x_sample[:, :, i + 1] = (1 + self.mu_bar * self.delta_t) * x_sample[:, :, i] + (\n self.sigma * x_sample[:, :, i] * dw_sample[:, :, i])\n return dw_sample, x_sample\n\n def f_tf(self, t, x, y, z):\n piecewise_linear = tf.nn.relu(\n tf.nn.relu(y - self.vh) * self.slope + self.gammah - self.gammal) + self.gammal\n return (-(1 - self.delta) * piecewise_linear - self.rate) * y\n\n def g_tf(self, t, x):\n return tf.reduce_min(x, 1, keepdims=True)\n\n\nclass PricingDiffRate(Equation):\n \"\"\"\n Nonlinear Black-Scholes equation with different interest rates for borrowing and lending\n in Section 4.4 of Comm. Math. Stat. paper doi.org/10.1007/s40304-017-0117-6\n \"\"\"\n def __init__(self, eqn_config):\n super(PricingDiffRate, self).__init__(eqn_config)\n self.x_init = np.ones(self.dim) * 100\n self.sigma = 0.2\n self.mu_bar = 0.06\n self.rl = 0.04\n self.rb = 0.06\n self.alpha = 1.0 / self.dim\n\n def sample(self, num_sample):\n dw_sample = normal.rvs(size=[num_sample,\n self.dim,\n self.num_time_interval]) * self.sqrt_delta_t\n x_sample = np.zeros([num_sample, self.dim, self.num_time_interval + 1])\n x_sample[:, :, 0] = np.ones([num_sample, self.dim]) * self.x_init\n factor = np.exp((self.mu_bar-(self.sigma**2)/2)*self.delta_t)\n for i in range(self.num_time_interval):\n x_sample[:, :, i + 1] = (factor * np.exp(self.sigma * dw_sample[:, :, i])) * x_sample[:, :, i]\n return dw_sample, x_sample\n\n def f_tf(self, t, x, y, z):\n temp = tf.reduce_sum(z, 1, keepdims=True) / self.sigma\n return -self.rl * y - (self.mu_bar - self.rl) * temp + (\n (self.rb - self.rl) * tf.maximum(temp - y, 0))\n\n def g_tf(self, t, x):\n temp = tf.reduce_max(x, 1, keepdims=True)\n return tf.maximum(temp - 120, 0) - 2 * tf.maximum(temp - 150, 0)\n\n\nclass BurgersType(Equation):\n \"\"\"\n Multidimensional Burgers-type PDE in Section 4.5 of Comm. Math. Stat. paper\n doi.org/10.1007/s40304-017-0117-6\n \"\"\"\n def __init__(self, eqn_config):\n super(BurgersType, self).__init__(eqn_config)\n self.x_init = np.zeros(self.dim)\n self.y_init = 1 - 1.0 / (1 + np.exp(0 + np.sum(self.x_init) / self.dim))\n self.sigma = self.dim + 0.0\n\n def sample(self, num_sample):\n dw_sample = normal.rvs(size=[num_sample,\n self.dim,\n self.num_time_interval]) * self.sqrt_delta_t\n x_sample = np.zeros([num_sample, self.dim, self.num_time_interval + 1])\n x_sample[:, :, 0] = np.ones([num_sample, self.dim]) * self.x_init\n for i in range(self.num_time_interval):\n x_sample[:, :, i + 1] = x_sample[:, :, i] + self.sigma * dw_sample[:, :, i]\n return dw_sample, x_sample\n\n def f_tf(self, t, x, y, z):\n return (y - (2 + self.dim) / 2.0 / self.dim) * tf.reduce_sum(z, 1, keepdims=True)\n\n def g_tf(self, t, x):\n return 1 - 1.0 / (1 + tf.exp(t + tf.reduce_sum(x, 1, keepdims=True) / self.dim))\n\n\nclass QuadraticGradient(Equation):\n \"\"\"\n An example PDE with quadratically growing derivatives in Section 4.6 of Comm. Math. Stat. paper\n doi.org/10.1007/s40304-017-0117-6\n \"\"\"\n def __init__(self, eqn_config):\n super(QuadraticGradient, self).__init__(eqn_config)\n self.alpha = 0.4\n self.x_init = np.zeros(self.dim)\n base = self.total_time + np.sum(np.square(self.x_init) / self.dim)\n self.y_init = np.sin(np.power(base, self.alpha))\n\n def sample(self, num_sample):\n dw_sample = normal.rvs(size=[num_sample,\n self.dim,\n self.num_time_interval]) * self.sqrt_delta_t\n x_sample = np.zeros([num_sample, self.dim, self.num_time_interval + 1])\n x_sample[:, :, 0] = np.ones([num_sample, self.dim]) * self.x_init\n for i in range(self.num_time_interval):\n x_sample[:, :, i + 1] = x_sample[:, :, i] + dw_sample[:, :, i]\n return dw_sample, x_sample\n\n def f_tf(self, t, x, y, z):\n x_square = tf.reduce_sum(tf.square(x), 1, keepdims=True)\n base = self.total_time - t + x_square / self.dim\n base_alpha = tf.pow(base, self.alpha)\n derivative = self.alpha * tf.pow(base, self.alpha - 1) * tf.cos(base_alpha)\n term1 = tf.reduce_sum(tf.square(z), 1, keepdims=True)\n term2 = -4.0 * (derivative ** 2) * x_square / (self.dim ** 2)\n term3 = derivative\n term4 = -0.5 * (\n 2.0 * derivative + 4.0 / (self.dim ** 2) * x_square * self.alpha * (\n (self.alpha - 1) * tf.pow(base, self.alpha - 2) * tf.cos(base_alpha) - (\n self.alpha * tf.pow(base, 2 * self.alpha - 2) * tf.sin(base_alpha)\n )\n )\n )\n return term1 + term2 + term3 + term4\n\n def g_tf(self, t, x):\n return tf.sin(\n tf.pow(tf.reduce_sum(tf.square(x), 1, keepdims=True) / self.dim, self.alpha))\n\n\nclass ReactionDiffusion(Equation):\n \"\"\"\n Time-dependent reaction-diffusion-type example PDE in Section 4.7 of Comm. Math. Stat. paper\n doi.org/10.1007/s40304-017-0117-6\n \"\"\"\n def __init__(self, eqn_config):\n super(ReactionDiffusion, self).__init__(eqn_config)\n self._kappa = 0.6\n self.lambd = 1 / np.sqrt(self.dim)\n self.x_init = np.zeros(self.dim)\n self.y_init = 1 + self._kappa + np.sin(self.lambd * np.sum(self.x_init)) * np.exp(\n -self.lambd * self.lambd * self.dim * self.total_time / 2)\n\n def sample(self, num_sample):\n dw_sample = normal.rvs(size=[num_sample,\n self.dim,\n self.num_time_interval]) * self.sqrt_delta_t\n x_sample = np.zeros([num_sample, self.dim, self.num_time_interval + 1])\n x_sample[:, :, 0] = np.ones([num_sample, self.dim]) * self.x_init\n for i in range(self.num_time_interval):\n x_sample[:, :, i + 1] = x_sample[:, :, i] + dw_sample[:, :, i]\n return dw_sample, x_sample\n\n def f_tf(self, t, x, y, z):\n exp_term = tf.exp((self.lambd ** 2) * self.dim * (t - self.total_time) / 2)\n sin_term = tf.sin(self.lambd * tf.reduce_sum(x, 1, keepdims=True))\n temp = y - self._kappa - 1 - sin_term * exp_term\n return tf.minimum(tf.constant(1.0, dtype=tf.float64), tf.square(temp))\n\n def g_tf(self, t, x):\n return 1 + self._kappa + tf.sin(self.lambd * tf.reduce_sum(x, 1, keepdims=True))\n"
] |
[
[
"numpy.sqrt",
"tensorflow.reduce_sum",
"numpy.exp",
"numpy.square",
"tensorflow.square",
"numpy.zeros",
"tensorflow.pow",
"numpy.power",
"scipy.stats.multivariate_normal.rvs",
"tensorflow.exp",
"numpy.sum",
"tensorflow.nn.relu",
"tensorflow.reduce_max",
"tensorflow.sin",
"tensorflow.cos",
"tensorflow.constant",
"tensorflow.maximum",
"numpy.ones",
"tensorflow.reduce_min"
]
] |
git163/Cornell-MOE
|
[
"df299d1be882d2af9796d7a68b3f9505cac7a53e"
] |
[
"pes/PES/target_function.py"
] |
[
"import numpy as np\r\nimport numpy.random as npr\r\n\r\n\r\n\r\n#This file is used to store the function that the user would like to oprimize.\r\n#Users can define their own functions in this file. Here we define two synthetic\r\n#funtions. One is Hartmann6 and the other one is Branin Hoo. Users can define \r\n#their own functions in the similar way. \r\n\r\n\r\n\r\n\r\n#The Hartmann6 function. The only global minimum is at (0.20169, 0.150011, 0.476874, \r\n# 0.275332, 0.311652, 0.6573). The minimum value is -3.32237 without noise. Here we \r\n# add a 10^(-3) noise to the function. The input bounds are 0 <= xi <= 1, i = 1..6. \r\ndef Hartmann6(x):\r\n \r\n alpha = [1.00, 1.20, 3.00, 3.20]\r\n A = np.array([[10.00, 3.00, 17.00, 3.50, 1.70, 8.00],\r\n [0.05, 10.00, 17.00, 0.10, 8.00, 14.00],\r\n [3.00, 3.50, 1.70, 10.00, 17.00, 8.00],\r\n [17.00, 8.00, 0.05, 10.00, 0.10, 14.00]])\r\n P = 0.0001 * np.array([[1312, 1696, 5569, 124, 8283, 5886],\r\n [2329, 4135, 8307, 3736, 1004, 9991],\r\n [2348, 1451, 3522, 2883, 3047, 6650],\r\n [4047, 8828, 8732, 5743, 1091, 381]])\r\n\r\n\r\n\r\n external_sum = 0\r\n for i in range(4):\r\n internal_sum = 0\r\n for j in range(6):\r\n internal_sum = internal_sum + A[i, j] * (x[j] - P[i, j]) ** 2\r\n external_sum = external_sum + alpha[i] * np.exp(-internal_sum)\r\n external_sum = -external_sum + 10**(-3)\r\n return external_sum\r\n\r\n\r\n\r\n\r\n\r\n\r\n#The Hartmann6 function. The global minimums are at (0.12389382, 0.81833333) and \r\n#(0.961652, 0.165). The minimum value is -1.047394 without noise. Here we add\r\n#a standard gaussian noise with 10^(-3) scale to the function. The input bounds\r\n#are 0 <= xi <= 1, i = 1,2. \r\ndef Branin_Hoo(X):\r\n x1 = X[0]\r\n x2 = X[1]\r\n \r\n x1bar = 15*x1 - 5\r\n x2bar = 15 * x2 \r\n \r\n term1 = x2bar - 5.1*x1bar**2/(4*np.pi**2) + 5*x1bar/np.pi - 6\r\n term2 = (10 - 10/(8*np.pi)) * np.cos(x1bar)\r\n \r\n ret = (term1**2 + term2 - 44.81) / 51.95 + 10**(-3) * npr.normal(0,1)\r\n return ret"
] |
[
[
"numpy.exp",
"numpy.random.normal",
"numpy.array",
"numpy.cos"
]
] |
ebentley17/Deniz_lab_code
|
[
"3cf13c769bed0ddf0abf0dc74213a9dec96bfabb"
] |
[
"wrangling/tutorials/compile_fluorimeter_data_simple.py"
] |
[
"\"\"\"This is a simple script to compile data from the Deniz lab fluorimeter.\n\nYou can make a shortcut of this file. Double-click the file to run it. \nYou will be prompted to enter the path for a folder of .ifx data \nfiles. A .csv with the compiled data will be saved in the same folder. \n\"\"\"\n\nimport sys\nimport glob\nimport pandas\nimport numpy as np\n\nfrom wrangling import fluorimeter, utilities\nfrom wrangling.tutorials import handle_input\n\n\n# prevent closing on exception\ndef show_exception_and_exit(exc_type, exc_value, tb):\n import traceback\n\n traceback.print_exception(exc_type, exc_value, tb)\n input(\"Press key to exit.\")\n sys.exit(-1)\n\n\nsys.excepthook = show_exception_and_exit\n\n\n# set up input_manipulator function to pass to handle_input.interpret()\ndef validate_folder(string):\n if handle_input.file_or_folder(string) == \"folder\":\n return string\n else:\n raise RuntimeError(\"You have not selected a valid folder.\\n\")\n\n\n# repeat the query if you don't select any files\nvalid_folder_selected = False\nwhile not valid_folder_selected:\n filepath = handle_input.interpret(\n \"Enter the path of the folder to be analyzed: \", \n validate_folder\n )\n\n try:\n df = fluorimeter.assemble_ifx_files(\n glob.glob(filepath + \"/*ifx\"), \n title_as_column=True\n )\n valid_folder_selected = True\n # if something goes wrong, show the error and repeat the loop\n except ValueError as e:\n print(str(e) + \"\\n\")\n\n# correct Intensity column if present\ntry:\n df = fluorimeter.correct_df_intensity(df, detect_slit=True)\n# will fail if comment column is not an allowed slit value\nexcept (KeyError, RuntimeError):\n pass\n\n# df has been modified in place for the last time\n# this is the version of df that gets imported by other scripts\n\n\ndef automatically_standardize_concentrations(df):\n # automatically detect columns with concentration entries\n concentration_columns = [x for x in df.columns if \"[\" in x]\n\n df = df.copy()\n\n # standardize concentration to the first unit found in each column\n for conc_col in concentration_columns:\n # iterate through rows looking for the first non-NaN value\n i = 0\n while i < len(df):\n column_content = df.loc[i, conc_col]\n\n if type(column_content) is float:\n if np.isnan(column_content):\n i += 1\n else:\n break\n\n # this will cause an error if there is an entire column of NaN values\n # and i gets to the size of the dataframe\n # but an entire column of NaN shouldn't be constructed\n unit = str(column_content)[-2:]\n if unit not in [\"mM\", \"uM\", \"nM\", \"pM\"]:\n continue\n\n df = utilities.standardize_concentration(df, columns=conc_col, unit=unit)\n\n return df\n\n\nautomatically_standardize_concentrations(df).to_csv(filepath + \"/compiled.csv\")\n"
] |
[
[
"numpy.isnan"
]
] |
zhangxiao339/document-ocr
|
[
"2c87c67691f76e947804a28df18957422c1f2c2d"
] |
[
"single_word_ocr/load_saved_model.py"
] |
[
"#!/usr/bin/env python\n#-*- coding:utf-8 -*-\n#author: wu.zheng midday.me\n\nimport tensorflow as tf\nimport cv2\nimport numpy as np\nimport json\nimport time\nimport os\n\nEXPORT_PATH = \"single_word_model/densenet_exported/1565353481\"\nVOCAB_PATH = './gbk.json'\n\n\ndef load_charset_map():\n char_set = json.load(open(VOCAB_PATH))\n char_set = {v:k for k, v in char_set.items()}\n return char_set\n\ndef pre_process_image(im):\n image_size = 64\n h, w = im.shape[:2]\n if h > image_size or w > image_size:\n im = cv2.resize(im, (image_size, image_size))\n else:\n pad_height = int((image_size - h) / 2)\n pad_width = int((image_size - w) / 2)\n im = np.pad(im, ((pad_height, image_size-h - pad_height), (pad_width, image_size - w - pad_width)),\n mode='constant', constant_values=((255, 255),(255, 255)))\n print(im.shape)\n im = im / 255.0 \n im = im.reshape([1, image_size, image_size, 1])\n return im\n\nclass Model():\n def __init__(self):\n self.sess = tf.Session()\n tf.saved_model.loader.load(self.sess, ['serve'], EXPORT_PATH)\n graph = tf.get_default_graph()\n self.input_image = graph.get_tensor_by_name('image:0')\n self.predict_prob = graph.get_tensor_by_name('prob:0')\n self.predict_ids = graph.get_tensor_by_name('prediction:0')\n self.char_dict = load_charset_map()\n\n def predict(self, im):\n im = pre_process_image(im)\n feed_dict = {self.input_image: im }\n predict_prob, predict_ids = self.sess.run([self.predict_prob, self.predict_ids], feed_dict=feed_dict)\n out_index = int(predict_ids)\n print(self.predict_prob, self.char_dict[out_index])\n return self.char_dict[int(out_index)]\n\n\n\ndef predict(image_path, true_label=\"\", counter=0):\n im = cv2.imread(image_path, cv2.IMREAD_GRAYSCALE )\n print(im.shape)\n word_im_list = [im]\n for i, word_im in enumerate(word_im_list):\n cv2.imwrite(os.path.join('word_image', str(counter)+\".png\"), word_im)\n start = time.time()\n word = model.predict(word_im)\n cost = time.time() - start\n print('image_path: %s true_label: %s predict_label: %s cost : %f'%(image_path, true_label, word, cost))\n return true_label == word\n\ndef load_image_path(file_path):\n image_path_list = []\n label_path_list = []\n with open(file_path) as f:\n for _, line in enumerate(f):\n line = line.strip(\"\\n\")\n line = line.split()\n if len(line) < 2:\n continue\n image_path = line[0]\n label = line[1]\n label_path_list.append(label)\n image_path_list.append(image_path)\n return image_path_list, label_path_list\n\n\nmodel = Model()\n\nif __name__ == \"__main__\":\n file_path = '/home/zhengwu/data/pdfs/books_line/d83a286f-08c9-5cb4-8dc0-65162a99781e/labels.txt'\n image_path_list, label_path_list = load_image_path(file_path)\n counter = 0\n right_counter = 0\n for image_path, label in zip(image_path_list, label_path_list):\n print(image_path)\n print(label)\n res = predict(image_path, label, counter)\n if res:\n right_counter +=1\n counter += 1\n if counter > 1000:\n break\n print(right_counter*1.0 / counter)\n\n\n\n"
] |
[
[
"tensorflow.saved_model.loader.load",
"tensorflow.get_default_graph",
"numpy.pad",
"tensorflow.Session"
]
] |
ronaldogomes96/Acelera-Dev-DataScience
|
[
"3bdd7ae86a907db3339fdd7abc615ecaf5a683a6"
] |
[
"Modulo 3/desafioModulo3.py"
] |
[
"\nimport pandas as pd\nimport json\n\ndf = pd.read_csv('baseDesafio.csv')\n\n#Selecionando as colunas para o uso\ndadosImportantes = { 'pontuacao_credito' : df['pontuacao_credito'] ,\n 'estado_residencia' : df['estado_residencia']}\n\n#Criando um novo datframe com essa colunas\ndata = pd.DataFrame(dadosImportantes)\n\n#Lendo o arquivo json\nwith open ('submission.json','r') as arquivo:\n texto = arquivo.read()\n submissionJSON = json.loads(texto)\nsubmissionJSON['SC']['moda'] = 10\n\n#Recebendo os valores necessarios\nmoda = df.groupby('estado_residencia')['pontuacao_credito'].apply(lambda x: x.mode())\nmedia = df.groupby('estado_residencia')['pontuacao_credito'].mean().reset_index()\nmediana = df.groupby('estado_residencia')['pontuacao_credito'].median().reset_index()\ndesvioPadrao = df.groupby('estado_residencia')['pontuacao_credito'].std().reset_index()\n\n#Analisando os resultados\nprint(moda)\nprint(mediana)\nprint(media)\nprint(desvioPadrao)\n"
] |
[
[
"pandas.read_csv",
"pandas.DataFrame"
]
] |
paragagrawal11/transformers
|
[
"18c32eeb21afa1d883094af54ba6321aa2d4c8db"
] |
[
"src/transformers/models/electra/modeling_electra.py"
] |
[
"# coding=utf-8\n# Copyright 2019 The Google AI Language Team Authors and The HuggingFace Inc. team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"PyTorch ELECTRA model. \"\"\"\n\nimport math\nimport os\nfrom dataclasses import dataclass\nfrom typing import Optional, Tuple\n\nimport torch\nimport torch.nn as nn\nfrom torch.nn import CrossEntropyLoss, MSELoss\n\nfrom ...activations import ACT2FN, get_activation\nfrom ...file_utils import (\n ModelOutput,\n add_code_sample_docstrings,\n add_start_docstrings,\n add_start_docstrings_to_model_forward,\n replace_return_docstrings,\n)\nfrom ...modeling_outputs import (\n BaseModelOutputWithCrossAttentions,\n MaskedLMOutput,\n MultipleChoiceModelOutput,\n QuestionAnsweringModelOutput,\n SequenceClassifierOutput,\n TokenClassifierOutput,\n)\nfrom ...modeling_utils import (\n PreTrainedModel,\n SequenceSummary,\n apply_chunking_to_forward,\n find_pruneable_heads_and_indices,\n prune_linear_layer,\n)\nfrom ...utils import logging\nfrom .configuration_electra import ElectraConfig\n\n\nlogger = logging.get_logger(__name__)\n\n_CONFIG_FOR_DOC = \"ElectraConfig\"\n_TOKENIZER_FOR_DOC = \"ElectraTokenizer\"\n\nELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST = [\n \"google/electra-small-generator\",\n \"google/electra-base-generator\",\n \"google/electra-large-generator\",\n \"google/electra-small-discriminator\",\n \"google/electra-base-discriminator\",\n \"google/electra-large-discriminator\",\n # See all ELECTRA models at https://huggingface.co/models?filter=electra\n]\n\n\ndef load_tf_weights_in_electra(model, config, tf_checkpoint_path, discriminator_or_generator=\"discriminator\"):\n \"\"\"Load tf checkpoints in a pytorch model.\"\"\"\n try:\n import re\n\n import numpy as np\n import tensorflow as tf\n except ImportError:\n logger.error(\n \"Loading a TensorFlow model in PyTorch, requires TensorFlow to be installed. Please see \"\n \"https://www.tensorflow.org/install/ for installation instructions.\"\n )\n raise\n tf_path = os.path.abspath(tf_checkpoint_path)\n logger.info(\"Converting TensorFlow checkpoint from {}\".format(tf_path))\n # Load weights from TF model\n init_vars = tf.train.list_variables(tf_path)\n names = []\n arrays = []\n for name, shape in init_vars:\n logger.info(\"Loading TF weight {} with shape {}\".format(name, shape))\n array = tf.train.load_variable(tf_path, name)\n names.append(name)\n arrays.append(array)\n for name, array in zip(names, arrays):\n original_name: str = name\n\n try:\n if isinstance(model, ElectraForMaskedLM):\n name = name.replace(\"electra/embeddings/\", \"generator/embeddings/\")\n\n if discriminator_or_generator == \"generator\":\n name = name.replace(\"electra/\", \"discriminator/\")\n name = name.replace(\"generator/\", \"electra/\")\n\n name = name.replace(\"dense_1\", \"dense_prediction\")\n name = name.replace(\"generator_predictions/output_bias\", \"generator_lm_head/bias\")\n\n name = name.split(\"/\")\n # print(original_name, name)\n # adam_v and adam_m are variables used in AdamWeightDecayOptimizer to calculated m and v\n # which are not required for using pretrained model\n if any(n in [\"global_step\", \"temperature\"] for n in name):\n logger.info(\"Skipping {}\".format(original_name))\n continue\n pointer = model\n for m_name in name:\n if re.fullmatch(r\"[A-Za-z]+_\\d+\", m_name):\n scope_names = re.split(r\"_(\\d+)\", m_name)\n else:\n scope_names = [m_name]\n if scope_names[0] == \"kernel\" or scope_names[0] == \"gamma\":\n pointer = getattr(pointer, \"weight\")\n elif scope_names[0] == \"output_bias\" or scope_names[0] == \"beta\":\n pointer = getattr(pointer, \"bias\")\n elif scope_names[0] == \"output_weights\":\n pointer = getattr(pointer, \"weight\")\n elif scope_names[0] == \"squad\":\n pointer = getattr(pointer, \"classifier\")\n else:\n pointer = getattr(pointer, scope_names[0])\n if len(scope_names) >= 2:\n num = int(scope_names[1])\n pointer = pointer[num]\n if m_name.endswith(\"_embeddings\"):\n pointer = getattr(pointer, \"weight\")\n elif m_name == \"kernel\":\n array = np.transpose(array)\n try:\n assert (\n pointer.shape == array.shape\n ), f\"Pointer shape {pointer.shape} and array shape {array.shape} mismatched\"\n except AssertionError as e:\n e.args += (pointer.shape, array.shape)\n raise\n print(\"Initialize PyTorch weight {}\".format(name), original_name)\n pointer.data = torch.from_numpy(array)\n except AttributeError as e:\n print(\"Skipping {}\".format(original_name), name, e)\n continue\n return model\n\n\nclass ElectraEmbeddings(nn.Module):\n \"\"\"Construct the embeddings from word, position and token_type embeddings.\"\"\"\n\n def __init__(self, config):\n super().__init__()\n self.word_embeddings = nn.Embedding(config.vocab_size, config.embedding_size, padding_idx=config.pad_token_id)\n self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.embedding_size)\n self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.embedding_size)\n\n # self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load\n # any TensorFlow checkpoint file\n self.LayerNorm = nn.LayerNorm(config.embedding_size, eps=config.layer_norm_eps)\n self.dropout = nn.Dropout(config.hidden_dropout_prob)\n\n # position_ids (1, len position emb) is contiguous in memory and exported when serialized\n self.register_buffer(\"position_ids\", torch.arange(config.max_position_embeddings).expand((1, -1)))\n self.position_embedding_type = getattr(config, \"position_embedding_type\", \"absolute\")\n\n # Copied from transformers.models.bert.modeling_bert.BertEmbeddings.forward\n def forward(self, input_ids=None, token_type_ids=None, position_ids=None, inputs_embeds=None):\n if input_ids is not None:\n input_shape = input_ids.size()\n else:\n input_shape = inputs_embeds.size()[:-1]\n\n seq_length = input_shape[1]\n\n if position_ids is None:\n position_ids = self.position_ids[:, :seq_length]\n\n if token_type_ids is None:\n token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=self.position_ids.device)\n\n if inputs_embeds is None:\n inputs_embeds = self.word_embeddings(input_ids)\n token_type_embeddings = self.token_type_embeddings(token_type_ids)\n\n embeddings = inputs_embeds + token_type_embeddings\n if self.position_embedding_type == \"absolute\":\n position_embeddings = self.position_embeddings(position_ids)\n embeddings += position_embeddings\n embeddings = self.LayerNorm(embeddings)\n embeddings = self.dropout(embeddings)\n return embeddings\n\n\n# Copied from transformers.models.bert.modeling_bert.BertSelfAttention with Bert->Electra\nclass ElectraSelfAttention(nn.Module):\n def __init__(self, config):\n super().__init__()\n if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, \"embedding_size\"):\n raise ValueError(\n \"The hidden size (%d) is not a multiple of the number of attention \"\n \"heads (%d)\" % (config.hidden_size, config.num_attention_heads)\n )\n\n self.num_attention_heads = config.num_attention_heads\n self.attention_head_size = int(config.hidden_size / config.num_attention_heads)\n self.all_head_size = self.num_attention_heads * self.attention_head_size\n\n self.query = nn.Linear(config.hidden_size, self.all_head_size)\n self.key = nn.Linear(config.hidden_size, self.all_head_size)\n self.value = nn.Linear(config.hidden_size, self.all_head_size)\n\n self.dropout = nn.Dropout(config.attention_probs_dropout_prob)\n self.position_embedding_type = getattr(config, \"position_embedding_type\", \"absolute\")\n if self.position_embedding_type == \"relative_key\" or self.position_embedding_type == \"relative_key_query\":\n self.max_position_embeddings = config.max_position_embeddings\n self.distance_embedding = nn.Embedding(2 * config.max_position_embeddings - 1, self.attention_head_size)\n\n def transpose_for_scores(self, x):\n new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)\n x = x.view(*new_x_shape)\n return x.permute(0, 2, 1, 3)\n\n def forward(\n self,\n hidden_states,\n attention_mask=None,\n head_mask=None,\n encoder_hidden_states=None,\n encoder_attention_mask=None,\n output_attentions=False,\n ):\n mixed_query_layer = self.query(hidden_states)\n\n # If this is instantiated as a cross-attention module, the keys\n # and values come from an encoder; the attention mask needs to be\n # such that the encoder's padding tokens are not attended to.\n if encoder_hidden_states is not None:\n mixed_key_layer = self.key(encoder_hidden_states)\n mixed_value_layer = self.value(encoder_hidden_states)\n attention_mask = encoder_attention_mask\n else:\n mixed_key_layer = self.key(hidden_states)\n mixed_value_layer = self.value(hidden_states)\n\n query_layer = self.transpose_for_scores(mixed_query_layer)\n key_layer = self.transpose_for_scores(mixed_key_layer)\n value_layer = self.transpose_for_scores(mixed_value_layer)\n\n # Take the dot product between \"query\" and \"key\" to get the raw attention scores.\n attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))\n\n if self.position_embedding_type == \"relative_key\" or self.position_embedding_type == \"relative_key_query\":\n seq_length = hidden_states.size()[1]\n position_ids_l = torch.arange(seq_length, dtype=torch.long, device=hidden_states.device).view(-1, 1)\n position_ids_r = torch.arange(seq_length, dtype=torch.long, device=hidden_states.device).view(1, -1)\n distance = position_ids_l - position_ids_r\n positional_embedding = self.distance_embedding(distance + self.max_position_embeddings - 1)\n positional_embedding = positional_embedding.to(dtype=query_layer.dtype) # fp16 compatibility\n\n if self.position_embedding_type == \"relative_key\":\n relative_position_scores = torch.einsum(\"bhld,lrd->bhlr\", query_layer, positional_embedding)\n attention_scores = attention_scores + relative_position_scores\n elif self.position_embedding_type == \"relative_key_query\":\n relative_position_scores_query = torch.einsum(\"bhld,lrd->bhlr\", query_layer, positional_embedding)\n relative_position_scores_key = torch.einsum(\"bhrd,lrd->bhlr\", key_layer, positional_embedding)\n attention_scores = attention_scores + relative_position_scores_query + relative_position_scores_key\n\n attention_scores = attention_scores / math.sqrt(self.attention_head_size)\n if attention_mask is not None:\n # Apply the attention mask is (precomputed for all layers in ElectraModel forward() function)\n attention_scores = attention_scores + attention_mask\n\n # Normalize the attention scores to probabilities.\n attention_probs = nn.Softmax(dim=-1)(attention_scores)\n\n # This is actually dropping out entire tokens to attend to, which might\n # seem a bit unusual, but is taken from the original Transformer paper.\n attention_probs = self.dropout(attention_probs)\n\n # Mask heads if we want to\n if head_mask is not None:\n attention_probs = attention_probs * head_mask\n\n context_layer = torch.matmul(attention_probs, value_layer)\n\n context_layer = context_layer.permute(0, 2, 1, 3).contiguous()\n new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)\n context_layer = context_layer.view(*new_context_layer_shape)\n\n outputs = (context_layer, attention_probs) if output_attentions else (context_layer,)\n return outputs\n\n\n# Copied from transformers.models.bert.modeling_bert.BertSelfOutput\nclass ElectraSelfOutput(nn.Module):\n def __init__(self, config):\n super().__init__()\n self.dense = nn.Linear(config.hidden_size, config.hidden_size)\n self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)\n self.dropout = nn.Dropout(config.hidden_dropout_prob)\n\n def forward(self, hidden_states, input_tensor):\n hidden_states = self.dense(hidden_states)\n hidden_states = self.dropout(hidden_states)\n hidden_states = self.LayerNorm(hidden_states + input_tensor)\n return hidden_states\n\n\n# Copied from transformers.models.bert.modeling_bert.BertAttention with Bert->Electra\nclass ElectraAttention(nn.Module):\n def __init__(self, config):\n super().__init__()\n self.self = ElectraSelfAttention(config)\n self.output = ElectraSelfOutput(config)\n self.pruned_heads = set()\n\n def prune_heads(self, heads):\n if len(heads) == 0:\n return\n heads, index = find_pruneable_heads_and_indices(\n heads, self.self.num_attention_heads, self.self.attention_head_size, self.pruned_heads\n )\n\n # Prune linear layers\n self.self.query = prune_linear_layer(self.self.query, index)\n self.self.key = prune_linear_layer(self.self.key, index)\n self.self.value = prune_linear_layer(self.self.value, index)\n self.output.dense = prune_linear_layer(self.output.dense, index, dim=1)\n\n # Update hyper params and store pruned heads\n self.self.num_attention_heads = self.self.num_attention_heads - len(heads)\n self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads\n self.pruned_heads = self.pruned_heads.union(heads)\n\n def forward(\n self,\n hidden_states,\n attention_mask=None,\n head_mask=None,\n encoder_hidden_states=None,\n encoder_attention_mask=None,\n output_attentions=False,\n ):\n self_outputs = self.self(\n hidden_states,\n attention_mask,\n head_mask,\n encoder_hidden_states,\n encoder_attention_mask,\n output_attentions,\n )\n attention_output = self.output(self_outputs[0], hidden_states)\n outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them\n return outputs\n\n\n# Copied from transformers.models.bert.modeling_bert.BertIntermediate\nclass ElectraIntermediate(nn.Module):\n def __init__(self, config):\n super().__init__()\n self.dense = nn.Linear(config.hidden_size, config.intermediate_size)\n if isinstance(config.hidden_act, str):\n self.intermediate_act_fn = ACT2FN[config.hidden_act]\n else:\n self.intermediate_act_fn = config.hidden_act\n\n def forward(self, hidden_states):\n hidden_states = self.dense(hidden_states)\n hidden_states = self.intermediate_act_fn(hidden_states)\n return hidden_states\n\n\n# Copied from transformers.models.bert.modeling_bert.BertOutput\nclass ElectraOutput(nn.Module):\n def __init__(self, config):\n super().__init__()\n self.dense = nn.Linear(config.intermediate_size, config.hidden_size)\n self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)\n self.dropout = nn.Dropout(config.hidden_dropout_prob)\n\n def forward(self, hidden_states, input_tensor):\n hidden_states = self.dense(hidden_states)\n hidden_states = self.dropout(hidden_states)\n hidden_states = self.LayerNorm(hidden_states + input_tensor)\n return hidden_states\n\n\n# Copied from transformers.models.bert.modeling_bert.BertLayer with Bert->Electra\nclass ElectraLayer(nn.Module):\n def __init__(self, config):\n super().__init__()\n self.chunk_size_feed_forward = config.chunk_size_feed_forward\n self.seq_len_dim = 1\n self.attention = ElectraAttention(config)\n self.is_decoder = config.is_decoder\n self.add_cross_attention = config.add_cross_attention\n if self.add_cross_attention:\n assert self.is_decoder, f\"{self} should be used as a decoder model if cross attention is added\"\n self.crossattention = ElectraAttention(config)\n self.intermediate = ElectraIntermediate(config)\n self.output = ElectraOutput(config)\n\n def forward(\n self,\n hidden_states,\n attention_mask=None,\n head_mask=None,\n encoder_hidden_states=None,\n encoder_attention_mask=None,\n output_attentions=False,\n ):\n self_attention_outputs = self.attention(\n hidden_states,\n attention_mask,\n head_mask,\n output_attentions=output_attentions,\n )\n attention_output = self_attention_outputs[0]\n outputs = self_attention_outputs[1:] # add self attentions if we output attention weights\n\n if self.is_decoder and encoder_hidden_states is not None:\n assert hasattr(\n self, \"crossattention\"\n ), f\"If `encoder_hidden_states` are passed, {self} has to be instantiated with cross-attention layers by setting `config.add_cross_attention=True`\"\n cross_attention_outputs = self.crossattention(\n attention_output,\n attention_mask,\n head_mask,\n encoder_hidden_states,\n encoder_attention_mask,\n output_attentions,\n )\n attention_output = cross_attention_outputs[0]\n outputs = outputs + cross_attention_outputs[1:] # add cross attentions if we output attention weights\n\n layer_output = apply_chunking_to_forward(\n self.feed_forward_chunk, self.chunk_size_feed_forward, self.seq_len_dim, attention_output\n )\n outputs = (layer_output,) + outputs\n return outputs\n\n def feed_forward_chunk(self, attention_output):\n intermediate_output = self.intermediate(attention_output)\n layer_output = self.output(intermediate_output, attention_output)\n return layer_output\n\n\n# Copied from transformers.models.bert.modeling_bert.BertEncoder with Bert->Electra\nclass ElectraEncoder(nn.Module):\n def __init__(self, config):\n super().__init__()\n self.config = config\n self.layer = nn.ModuleList([ElectraLayer(config) for _ in range(config.num_hidden_layers)])\n\n def forward(\n self,\n hidden_states,\n attention_mask=None,\n head_mask=None,\n encoder_hidden_states=None,\n encoder_attention_mask=None,\n output_attentions=False,\n output_hidden_states=False,\n return_dict=True,\n ):\n all_hidden_states = () if output_hidden_states else None\n all_self_attentions = () if output_attentions else None\n all_cross_attentions = () if output_attentions and self.config.add_cross_attention else None\n for i, layer_module in enumerate(self.layer):\n if output_hidden_states:\n all_hidden_states = all_hidden_states + (hidden_states,)\n\n layer_head_mask = head_mask[i] if head_mask is not None else None\n\n if getattr(self.config, \"gradient_checkpointing\", False):\n\n def create_custom_forward(module):\n def custom_forward(*inputs):\n return module(*inputs, output_attentions)\n\n return custom_forward\n\n layer_outputs = torch.utils.checkpoint.checkpoint(\n create_custom_forward(layer_module),\n hidden_states,\n attention_mask,\n layer_head_mask,\n encoder_hidden_states,\n encoder_attention_mask,\n )\n else:\n layer_outputs = layer_module(\n hidden_states,\n attention_mask,\n layer_head_mask,\n encoder_hidden_states,\n encoder_attention_mask,\n output_attentions,\n )\n hidden_states = layer_outputs[0]\n if output_attentions:\n all_self_attentions = all_self_attentions + (layer_outputs[1],)\n if self.config.add_cross_attention:\n all_cross_attentions = all_cross_attentions + (layer_outputs[2],)\n\n if output_hidden_states:\n all_hidden_states = all_hidden_states + (hidden_states,)\n\n if not return_dict:\n return tuple(\n v\n for v in [hidden_states, all_hidden_states, all_self_attentions, all_cross_attentions]\n if v is not None\n )\n return BaseModelOutputWithCrossAttentions(\n last_hidden_state=hidden_states,\n hidden_states=all_hidden_states,\n attentions=all_self_attentions,\n cross_attentions=all_cross_attentions,\n )\n\n\nclass ElectraDiscriminatorPredictions(nn.Module):\n \"\"\"Prediction module for the discriminator, made up of two dense layers.\"\"\"\n\n def __init__(self, config):\n super().__init__()\n\n self.dense = nn.Linear(config.hidden_size, config.hidden_size)\n self.dense_prediction = nn.Linear(config.hidden_size, 1)\n self.config = config\n\n def forward(self, discriminator_hidden_states):\n hidden_states = self.dense(discriminator_hidden_states)\n hidden_states = get_activation(self.config.hidden_act)(hidden_states)\n logits = self.dense_prediction(hidden_states).squeeze(-1)\n\n return logits\n\n\nclass ElectraGeneratorPredictions(nn.Module):\n \"\"\"Prediction module for the generator, made up of two dense layers.\"\"\"\n\n def __init__(self, config):\n super().__init__()\n\n self.LayerNorm = nn.LayerNorm(config.embedding_size)\n self.dense = nn.Linear(config.hidden_size, config.embedding_size)\n\n def forward(self, generator_hidden_states):\n hidden_states = self.dense(generator_hidden_states)\n hidden_states = get_activation(\"gelu\")(hidden_states)\n hidden_states = self.LayerNorm(hidden_states)\n\n return hidden_states\n\n\nclass ElectraPreTrainedModel(PreTrainedModel):\n \"\"\"\n An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained\n models.\n \"\"\"\n\n config_class = ElectraConfig\n load_tf_weights = load_tf_weights_in_electra\n base_model_prefix = \"electra\"\n _keys_to_ignore_on_load_missing = [r\"position_ids\"]\n _keys_to_ignore_on_load_unexpected = [r\"electra\\.embeddings_project\\.weight\", r\"electra\\.embeddings_project\\.bias\"]\n\n # Copied from transformers.models.bert.modeling_bert.BertPreTrainedModel._init_weights\n def _init_weights(self, module):\n \"\"\" Initialize the weights \"\"\"\n if isinstance(module, (nn.Linear, nn.Embedding)):\n # Slightly different from the TF version which uses truncated_normal for initialization\n # cf https://github.com/pytorch/pytorch/pull/5617\n module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)\n elif isinstance(module, nn.LayerNorm):\n module.bias.data.zero_()\n module.weight.data.fill_(1.0)\n if isinstance(module, nn.Linear) and module.bias is not None:\n module.bias.data.zero_()\n\n\n@dataclass\nclass ElectraForPreTrainingOutput(ModelOutput):\n \"\"\"\n Output type of :class:`~transformers.ElectraForPreTraining`.\n\n Args:\n loss (`optional`, returned when ``labels`` is provided, ``torch.FloatTensor`` of shape :obj:`(1,)`):\n Total loss of the ELECTRA objective.\n logits (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length)`):\n Prediction scores of the head (scores for each token before SoftMax).\n hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``):\n Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)\n of shape :obj:`(batch_size, sequence_length, hidden_size)`.\n\n Hidden-states of the model at the output of each layer plus the initial embedding outputs.\n attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):\n Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape :obj:`(batch_size, num_heads,\n sequence_length, sequence_length)`.\n\n Attentions weights after the attention softmax, used to compute the weighted average in the self-attention\n heads.\n \"\"\"\n\n loss: Optional[torch.FloatTensor] = None\n logits: torch.FloatTensor = None\n hidden_states: Optional[Tuple[torch.FloatTensor]] = None\n attentions: Optional[Tuple[torch.FloatTensor]] = None\n\n\nELECTRA_START_DOCSTRING = r\"\"\"\n\n This model inherits from :class:`~transformers.PreTrainedModel`. Check the superclass documentation for the generic\n methods the library implements for all its model (such as downloading or saving, resizing the input embeddings,\n pruning heads etc.)\n\n This model is also a PyTorch `torch.nn.Module <https://pytorch.org/docs/stable/nn.html#torch.nn.Module>`__\n subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to\n general usage and behavior.\n\n Parameters:\n config (:class:`~transformers.ElectraConfig`): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the :meth:`~transformers.PreTrainedModel.from_pretrained` method to load the model\n weights.\n\"\"\"\n\nELECTRA_INPUTS_DOCSTRING = r\"\"\"\n Args:\n input_ids (:obj:`torch.LongTensor` of shape :obj:`({0})`):\n Indices of input sequence tokens in the vocabulary.\n\n Indices can be obtained using :class:`~transformers.ElectraTokenizer`. See\n :meth:`transformers.PreTrainedTokenizer.encode` and :meth:`transformers.PreTrainedTokenizer.__call__` for\n details.\n\n `What are input IDs? <../glossary.html#input-ids>`__\n attention_mask (:obj:`torch.FloatTensor` of shape :obj:`({0})`, `optional`):\n Mask to avoid performing attention on padding token indices. Mask values selected in ``[0, 1]``:\n\n - 1 for tokens that are **not masked**,\n - 0 for tokens that are **masked**.\n\n `What are attention masks? <../glossary.html#attention-mask>`__\n token_type_ids (:obj:`torch.LongTensor` of shape :obj:`({0})`, `optional`):\n Segment token indices to indicate first and second portions of the inputs. Indices are selected in ``[0,\n 1]``:\n\n - 0 corresponds to a `sentence A` token,\n - 1 corresponds to a `sentence B` token.\n\n `What are token type IDs? <../glossary.html#token-type-ids>`_\n position_ids (:obj:`torch.LongTensor` of shape :obj:`({0})`, `optional`):\n Indices of positions of each input sequence tokens in the position embeddings. Selected in the range ``[0,\n config.max_position_embeddings - 1]``.\n\n `What are position IDs? <../glossary.html#position-ids>`_\n head_mask (:obj:`torch.FloatTensor` of shape :obj:`(num_heads,)` or :obj:`(num_layers, num_heads)`, `optional`):\n Mask to nullify selected heads of the self-attention modules. Mask values selected in ``[0, 1]``:\n\n - 1 indicates the head is **not masked**,\n - 0 indicates the head is **masked**.\n\n inputs_embeds (:obj:`torch.FloatTensor` of shape :obj:`({0}, hidden_size)`, `optional`):\n Optionally, instead of passing :obj:`input_ids` you can choose to directly pass an embedded representation.\n This is useful if you want more control over how to convert :obj:`input_ids` indices into associated\n vectors than the model's internal embedding lookup matrix.\n encoder_hidden_states (:obj:`torch.FloatTensor` of shape :obj:`({0}, hidden_size)`, `optional`):\n Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if\n the model is configured as a decoder.\n encoder_attention_mask (:obj:`torch.FloatTensor` of shape :obj:`({0})`, `optional`):\n Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in\n the cross-attention if the model is configured as a decoder. Mask values selected in ``[0, 1]``:\n\n - 1 indicates the head is **not masked**,\n - 0 indicates the head is **masked**.\n\n output_attentions (:obj:`bool`, `optional`):\n Whether or not to return the attentions tensors of all attention layers. See ``attentions`` under returned\n tensors for more detail.\n output_hidden_states (:obj:`bool`, `optional`):\n Whether or not to return the hidden states of all layers. See ``hidden_states`` under returned tensors for\n more detail.\n return_dict (:obj:`bool`, `optional`):\n Whether or not to return a :class:`~transformers.file_utils.ModelOutput` instead of a plain tuple.\n\"\"\"\n\n\n@add_start_docstrings(\n \"The bare Electra Model transformer outputting raw hidden-states without any specific head on top. Identical to \"\n \"the BERT model except that it uses an additional linear layer between the embedding layer and the encoder if the \"\n \"hidden size and embedding size are different.\"\n \"\"\n \"Both the generator and discriminator checkpoints may be loaded into this model.\",\n ELECTRA_START_DOCSTRING,\n)\nclass ElectraModel(ElectraPreTrainedModel):\n def __init__(self, config):\n super().__init__(config)\n self.embeddings = ElectraEmbeddings(config)\n\n if config.embedding_size != config.hidden_size:\n self.embeddings_project = nn.Linear(config.embedding_size, config.hidden_size)\n\n self.encoder = ElectraEncoder(config)\n self.config = config\n self.init_weights()\n\n def get_input_embeddings(self):\n return self.embeddings.word_embeddings\n\n def set_input_embeddings(self, value):\n self.embeddings.word_embeddings = value\n\n def _prune_heads(self, heads_to_prune):\n \"\"\"\n Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base\n class PreTrainedModel\n \"\"\"\n for layer, heads in heads_to_prune.items():\n self.encoder.layer[layer].attention.prune_heads(heads)\n\n @add_start_docstrings_to_model_forward(ELECTRA_INPUTS_DOCSTRING.format(\"batch_size, sequence_length\"))\n @add_code_sample_docstrings(\n tokenizer_class=_TOKENIZER_FOR_DOC,\n checkpoint=\"google/electra-small-discriminator\",\n output_type=BaseModelOutputWithCrossAttentions,\n config_class=_CONFIG_FOR_DOC,\n )\n def forward(\n self,\n input_ids=None,\n attention_mask=None,\n token_type_ids=None,\n position_ids=None,\n head_mask=None,\n inputs_embeds=None,\n output_attentions=None,\n output_hidden_states=None,\n return_dict=None,\n ):\n output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions\n output_hidden_states = (\n output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states\n )\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n\n if input_ids is not None and inputs_embeds is not None:\n raise ValueError(\"You cannot specify both input_ids and inputs_embeds at the same time\")\n elif input_ids is not None:\n input_shape = input_ids.size()\n elif inputs_embeds is not None:\n input_shape = inputs_embeds.size()[:-1]\n else:\n raise ValueError(\"You have to specify either input_ids or inputs_embeds\")\n\n device = input_ids.device if input_ids is not None else inputs_embeds.device\n\n if attention_mask is None:\n attention_mask = torch.ones(input_shape, device=device)\n if token_type_ids is None:\n token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device)\n\n extended_attention_mask = self.get_extended_attention_mask(attention_mask, input_shape, device)\n head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers)\n\n hidden_states = self.embeddings(\n input_ids=input_ids, position_ids=position_ids, token_type_ids=token_type_ids, inputs_embeds=inputs_embeds\n )\n\n if hasattr(self, \"embeddings_project\"):\n hidden_states = self.embeddings_project(hidden_states)\n\n hidden_states = self.encoder(\n hidden_states,\n attention_mask=extended_attention_mask,\n head_mask=head_mask,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n )\n\n return hidden_states\n\n\nclass ElectraClassificationHead(nn.Module):\n \"\"\"Head for sentence-level classification tasks.\"\"\"\n\n def __init__(self, config):\n super().__init__()\n self.dense = nn.Linear(config.hidden_size, config.hidden_size)\n self.dropout = nn.Dropout(config.hidden_dropout_prob)\n self.out_proj = nn.Linear(config.hidden_size, config.num_labels)\n\n def forward(self, features, **kwargs):\n x = features[:, 0, :] # take <s> token (equiv. to [CLS])\n x = self.dropout(x)\n x = self.dense(x)\n x = get_activation(\"gelu\")(x) # although BERT uses tanh here, it seems Electra authors used gelu here\n x = self.dropout(x)\n x = self.out_proj(x)\n return x\n\n\n@add_start_docstrings(\n \"\"\"\n ELECTRA Model transformer with a sequence classification/regression head on top (a linear layer on top of the\n pooled output) e.g. for GLUE tasks.\n \"\"\",\n ELECTRA_START_DOCSTRING,\n)\nclass ElectraForSequenceClassification(ElectraPreTrainedModel):\n def __init__(self, config):\n super().__init__(config)\n self.num_labels = config.num_labels\n self.electra = ElectraModel(config)\n self.classifier = ElectraClassificationHead(config)\n\n self.init_weights()\n\n @add_start_docstrings_to_model_forward(ELECTRA_INPUTS_DOCSTRING.format(\"batch_size, sequence_length\"))\n @add_code_sample_docstrings(\n tokenizer_class=_TOKENIZER_FOR_DOC,\n checkpoint=\"google/electra-small-discriminator\",\n output_type=SequenceClassifierOutput,\n config_class=_CONFIG_FOR_DOC,\n )\n def forward(\n self,\n input_ids=None,\n attention_mask=None,\n token_type_ids=None,\n position_ids=None,\n head_mask=None,\n inputs_embeds=None,\n labels=None,\n output_attentions=None,\n output_hidden_states=None,\n return_dict=None,\n ):\n r\"\"\"\n labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`):\n Labels for computing the sequence classification/regression loss. Indices should be in :obj:`[0, ...,\n config.num_labels - 1]`. If :obj:`config.num_labels == 1` a regression loss is computed (Mean-Square loss),\n If :obj:`config.num_labels > 1` a classification loss is computed (Cross-Entropy).\n \"\"\"\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n\n discriminator_hidden_states = self.electra(\n input_ids,\n attention_mask,\n token_type_ids,\n position_ids,\n head_mask,\n inputs_embeds,\n output_attentions,\n output_hidden_states,\n return_dict,\n )\n\n sequence_output = discriminator_hidden_states[0]\n logits = self.classifier(sequence_output)\n\n loss = None\n if labels is not None:\n if self.num_labels == 1:\n # We are doing regression\n loss_fct = MSELoss()\n loss = loss_fct(logits.view(-1), labels.view(-1))\n else:\n loss_fct = CrossEntropyLoss()\n loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))\n\n if not return_dict:\n output = (logits,) + discriminator_hidden_states[1:]\n return ((loss,) + output) if loss is not None else output\n\n return SequenceClassifierOutput(\n loss=loss,\n logits=logits,\n hidden_states=discriminator_hidden_states.hidden_states,\n attentions=discriminator_hidden_states.attentions,\n )\n\n\n@add_start_docstrings(\n \"\"\"\n Electra model with a binary classification head on top as used during pretraining for identifying generated tokens.\n\n It is recommended to load the discriminator checkpoint into that model.\n \"\"\",\n ELECTRA_START_DOCSTRING,\n)\nclass ElectraForPreTraining(ElectraPreTrainedModel):\n def __init__(self, config):\n super().__init__(config)\n\n self.electra = ElectraModel(config)\n self.discriminator_predictions = ElectraDiscriminatorPredictions(config)\n self.init_weights()\n\n @add_start_docstrings_to_model_forward(ELECTRA_INPUTS_DOCSTRING.format(\"batch_size, sequence_length\"))\n @replace_return_docstrings(output_type=ElectraForPreTrainingOutput, config_class=_CONFIG_FOR_DOC)\n def forward(\n self,\n input_ids=None,\n attention_mask=None,\n token_type_ids=None,\n position_ids=None,\n head_mask=None,\n inputs_embeds=None,\n labels=None,\n output_attentions=None,\n output_hidden_states=None,\n return_dict=None,\n ):\n r\"\"\"\n labels (``torch.LongTensor`` of shape ``(batch_size, sequence_length)``, `optional`):\n Labels for computing the ELECTRA loss. Input should be a sequence of tokens (see :obj:`input_ids`\n docstring) Indices should be in ``[0, 1]``:\n\n - 0 indicates the token is an original token,\n - 1 indicates the token was replaced.\n\n Returns:\n\n Examples::\n\n >>> from transformers import ElectraTokenizer, ElectraForPreTraining\n >>> import torch\n\n >>> tokenizer = ElectraTokenizer.from_pretrained('google/electra-small-discriminator')\n >>> model = ElectraForPreTraining.from_pretrained('google/electra-small-discriminator')\n\n >>> input_ids = torch.tensor(tokenizer.encode(\"Hello, my dog is cute\", add_special_tokens=True)).unsqueeze(0) # Batch size 1\n >>> logits = model(input_ids).logits\n \"\"\"\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n\n discriminator_hidden_states = self.electra(\n input_ids,\n attention_mask,\n token_type_ids,\n position_ids,\n head_mask,\n inputs_embeds,\n output_attentions,\n output_hidden_states,\n return_dict,\n )\n discriminator_sequence_output = discriminator_hidden_states[0]\n\n logits = self.discriminator_predictions(discriminator_sequence_output)\n\n loss = None\n if labels is not None:\n loss_fct = nn.BCEWithLogitsLoss()\n if attention_mask is not None:\n active_loss = attention_mask.view(-1, discriminator_sequence_output.shape[1]) == 1\n active_logits = logits.view(-1, discriminator_sequence_output.shape[1])[active_loss]\n active_labels = labels[active_loss]\n loss = loss_fct(active_logits, active_labels.float())\n else:\n loss = loss_fct(logits.view(-1, discriminator_sequence_output.shape[1]), labels.float())\n\n if not return_dict:\n output = (logits,) + discriminator_hidden_states[1:]\n return ((loss,) + output) if loss is not None else output\n\n return ElectraForPreTrainingOutput(\n loss=loss,\n logits=logits,\n hidden_states=discriminator_hidden_states.hidden_states,\n attentions=discriminator_hidden_states.attentions,\n )\n\n\n@add_start_docstrings(\n \"\"\"\n Electra model with a language modeling head on top.\n\n Even though both the discriminator and generator may be loaded into this model, the generator is the only model of\n the two to have been trained for the masked language modeling task.\n \"\"\",\n ELECTRA_START_DOCSTRING,\n)\nclass ElectraForMaskedLM(ElectraPreTrainedModel):\n def __init__(self, config):\n super().__init__(config)\n\n self.electra = ElectraModel(config)\n self.generator_predictions = ElectraGeneratorPredictions(config)\n\n self.generator_lm_head = nn.Linear(config.embedding_size, config.vocab_size)\n self.init_weights()\n\n def get_output_embeddings(self):\n return self.generator_lm_head\n\n @add_start_docstrings_to_model_forward(ELECTRA_INPUTS_DOCSTRING.format(\"batch_size, sequence_length\"))\n @add_code_sample_docstrings(\n tokenizer_class=_TOKENIZER_FOR_DOC,\n checkpoint=\"google/electra-small-discriminator\",\n output_type=MaskedLMOutput,\n config_class=_CONFIG_FOR_DOC,\n )\n def forward(\n self,\n input_ids=None,\n attention_mask=None,\n token_type_ids=None,\n position_ids=None,\n head_mask=None,\n inputs_embeds=None,\n labels=None,\n output_attentions=None,\n output_hidden_states=None,\n return_dict=None,\n ):\n r\"\"\"\n labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):\n Labels for computing the masked language modeling loss. Indices should be in ``[-100, 0, ...,\n config.vocab_size]`` (see ``input_ids`` docstring) Tokens with indices set to ``-100`` are ignored\n (masked), the loss is only computed for the tokens with labels in ``[0, ..., config.vocab_size]``\n \"\"\"\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n\n generator_hidden_states = self.electra(\n input_ids,\n attention_mask,\n token_type_ids,\n position_ids,\n head_mask,\n inputs_embeds,\n output_attentions,\n output_hidden_states,\n return_dict,\n )\n generator_sequence_output = generator_hidden_states[0]\n\n prediction_scores = self.generator_predictions(generator_sequence_output)\n prediction_scores = self.generator_lm_head(prediction_scores)\n\n loss = None\n # Masked language modeling softmax layer\n if labels is not None:\n loss_fct = nn.CrossEntropyLoss() # -100 index = padding token\n loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), labels.view(-1))\n\n if not return_dict:\n output = (prediction_scores,) + generator_hidden_states[1:]\n return ((loss,) + output) if loss is not None else output\n\n return MaskedLMOutput(\n loss=loss,\n logits=prediction_scores,\n hidden_states=generator_hidden_states.hidden_states,\n attentions=generator_hidden_states.attentions,\n )\n\n\n@add_start_docstrings(\n \"\"\"\n Electra model with a token classification head on top.\n\n Both the discriminator and generator may be loaded into this model.\n \"\"\",\n ELECTRA_START_DOCSTRING,\n)\nclass ElectraForTokenClassification(ElectraPreTrainedModel):\n def __init__(self, config):\n super().__init__(config)\n\n self.electra = ElectraModel(config)\n self.dropout = nn.Dropout(config.hidden_dropout_prob)\n self.classifier = nn.Linear(config.hidden_size, config.num_labels)\n self.init_weights()\n\n @add_start_docstrings_to_model_forward(ELECTRA_INPUTS_DOCSTRING.format(\"batch_size, sequence_length\"))\n @add_code_sample_docstrings(\n tokenizer_class=_TOKENIZER_FOR_DOC,\n checkpoint=\"google/electra-small-discriminator\",\n output_type=TokenClassifierOutput,\n config_class=_CONFIG_FOR_DOC,\n )\n def forward(\n self,\n input_ids=None,\n attention_mask=None,\n token_type_ids=None,\n position_ids=None,\n head_mask=None,\n inputs_embeds=None,\n labels=None,\n output_attentions=None,\n output_hidden_states=None,\n return_dict=None,\n ):\n r\"\"\"\n labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):\n Labels for computing the token classification loss. Indices should be in ``[0, ..., config.num_labels -\n 1]``.\n \"\"\"\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n\n discriminator_hidden_states = self.electra(\n input_ids,\n attention_mask,\n token_type_ids,\n position_ids,\n head_mask,\n inputs_embeds,\n output_attentions,\n output_hidden_states,\n return_dict,\n )\n discriminator_sequence_output = discriminator_hidden_states[0]\n\n discriminator_sequence_output = self.dropout(discriminator_sequence_output)\n logits = self.classifier(discriminator_sequence_output)\n\n loss = None\n if labels is not None:\n loss_fct = nn.CrossEntropyLoss()\n # Only keep active parts of the loss\n if attention_mask is not None:\n active_loss = attention_mask.view(-1) == 1\n active_logits = logits.view(-1, self.config.num_labels)[active_loss]\n active_labels = labels.view(-1)[active_loss]\n loss = loss_fct(active_logits, active_labels)\n else:\n loss = loss_fct(logits.view(-1, self.config.num_labels), labels.view(-1))\n\n if not return_dict:\n output = (logits,) + discriminator_hidden_states[1:]\n return ((loss,) + output) if loss is not None else output\n\n return TokenClassifierOutput(\n loss=loss,\n logits=logits,\n hidden_states=discriminator_hidden_states.hidden_states,\n attentions=discriminator_hidden_states.attentions,\n )\n\n\n@add_start_docstrings(\n \"\"\"\n ELECTRA Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear\n layers on top of the hidden-states output to compute `span start logits` and `span end logits`).\n \"\"\",\n ELECTRA_START_DOCSTRING,\n)\nclass ElectraForQuestionAnswering(ElectraPreTrainedModel):\n config_class = ElectraConfig\n base_model_prefix = \"electra\"\n\n def __init__(self, config):\n super().__init__(config)\n self.num_labels = config.num_labels\n\n self.electra = ElectraModel(config)\n self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels)\n\n self.init_weights()\n\n @add_start_docstrings_to_model_forward(ELECTRA_INPUTS_DOCSTRING.format(\"batch_size, sequence_length\"))\n @add_code_sample_docstrings(\n tokenizer_class=_TOKENIZER_FOR_DOC,\n checkpoint=\"google/electra-small-discriminator\",\n output_type=QuestionAnsweringModelOutput,\n config_class=_CONFIG_FOR_DOC,\n )\n def forward(\n self,\n input_ids=None,\n attention_mask=None,\n token_type_ids=None,\n position_ids=None,\n head_mask=None,\n inputs_embeds=None,\n start_positions=None,\n end_positions=None,\n output_attentions=None,\n output_hidden_states=None,\n return_dict=None,\n ):\n r\"\"\"\n start_positions (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`):\n Labels for position (index) of the start of the labelled span for computing the token classification loss.\n Positions are clamped to the length of the sequence (:obj:`sequence_length`). Position outside of the\n sequence are not taken into account for computing the loss.\n end_positions (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`):\n Labels for position (index) of the end of the labelled span for computing the token classification loss.\n Positions are clamped to the length of the sequence (:obj:`sequence_length`). Position outside of the\n sequence are not taken into account for computing the loss.\n \"\"\"\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n\n discriminator_hidden_states = self.electra(\n input_ids,\n attention_mask=attention_mask,\n token_type_ids=token_type_ids,\n position_ids=position_ids,\n head_mask=head_mask,\n inputs_embeds=inputs_embeds,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n )\n\n sequence_output = discriminator_hidden_states[0]\n\n logits = self.qa_outputs(sequence_output)\n start_logits, end_logits = logits.split(1, dim=-1)\n start_logits = start_logits.squeeze(-1)\n end_logits = end_logits.squeeze(-1)\n\n total_loss = None\n if start_positions is not None and end_positions is not None:\n # If we are on multi-GPU, split add a dimension\n if len(start_positions.size()) > 1:\n start_positions = start_positions.squeeze(-1)\n if len(end_positions.size()) > 1:\n end_positions = end_positions.squeeze(-1)\n # sometimes the start/end positions are outside our model inputs, we ignore these terms\n ignored_index = start_logits.size(1)\n start_positions.clamp_(0, ignored_index)\n end_positions.clamp_(0, ignored_index)\n\n loss_fct = CrossEntropyLoss(ignore_index=ignored_index)\n start_loss = loss_fct(start_logits, start_positions)\n end_loss = loss_fct(end_logits, end_positions)\n total_loss = (start_loss + end_loss) / 2\n\n if not return_dict:\n output = (\n start_logits,\n end_logits,\n ) + discriminator_hidden_states[1:]\n return ((total_loss,) + output) if total_loss is not None else output\n\n return QuestionAnsweringModelOutput(\n loss=total_loss,\n start_logits=start_logits,\n end_logits=end_logits,\n hidden_states=discriminator_hidden_states.hidden_states,\n attentions=discriminator_hidden_states.attentions,\n )\n\n\n@add_start_docstrings(\n \"\"\"\n ELECTRA Model with a multiple choice classification head on top (a linear layer on top of the pooled output and a\n softmax) e.g. for RocStories/SWAG tasks.\n \"\"\",\n ELECTRA_START_DOCSTRING,\n)\nclass ElectraForMultipleChoice(ElectraPreTrainedModel):\n def __init__(self, config):\n super().__init__(config)\n\n self.electra = ElectraModel(config)\n self.sequence_summary = SequenceSummary(config)\n self.classifier = nn.Linear(config.hidden_size, 1)\n\n self.init_weights()\n\n @add_start_docstrings_to_model_forward(ELECTRA_INPUTS_DOCSTRING.format(\"batch_size, num_choices, sequence_length\"))\n @add_code_sample_docstrings(\n tokenizer_class=_TOKENIZER_FOR_DOC,\n checkpoint=\"google/electra-small-discriminator\",\n output_type=MultipleChoiceModelOutput,\n config_class=_CONFIG_FOR_DOC,\n )\n def forward(\n self,\n input_ids=None,\n attention_mask=None,\n token_type_ids=None,\n position_ids=None,\n head_mask=None,\n inputs_embeds=None,\n labels=None,\n output_attentions=None,\n output_hidden_states=None,\n return_dict=None,\n ):\n r\"\"\"\n labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`):\n Labels for computing the multiple choice classification loss. Indices should be in ``[0, ...,\n num_choices-1]`` where :obj:`num_choices` is the size of the second dimension of the input tensors. (See\n :obj:`input_ids` above)\n \"\"\"\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n num_choices = input_ids.shape[1] if input_ids is not None else inputs_embeds.shape[1]\n\n input_ids = input_ids.view(-1, input_ids.size(-1)) if input_ids is not None else None\n attention_mask = attention_mask.view(-1, attention_mask.size(-1)) if attention_mask is not None else None\n token_type_ids = token_type_ids.view(-1, token_type_ids.size(-1)) if token_type_ids is not None else None\n position_ids = position_ids.view(-1, position_ids.size(-1)) if position_ids is not None else None\n inputs_embeds = (\n inputs_embeds.view(-1, inputs_embeds.size(-2), inputs_embeds.size(-1))\n if inputs_embeds is not None\n else None\n )\n\n discriminator_hidden_states = self.electra(\n input_ids,\n attention_mask=attention_mask,\n token_type_ids=token_type_ids,\n position_ids=position_ids,\n head_mask=head_mask,\n inputs_embeds=inputs_embeds,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n )\n\n sequence_output = discriminator_hidden_states[0]\n\n pooled_output = self.sequence_summary(sequence_output)\n logits = self.classifier(pooled_output)\n reshaped_logits = logits.view(-1, num_choices)\n\n loss = None\n if labels is not None:\n loss_fct = CrossEntropyLoss()\n loss = loss_fct(reshaped_logits, labels)\n\n if not return_dict:\n output = (reshaped_logits,) + discriminator_hidden_states[1:]\n return ((loss,) + output) if loss is not None else output\n\n return MultipleChoiceModelOutput(\n loss=loss,\n logits=reshaped_logits,\n hidden_states=discriminator_hidden_states.hidden_states,\n attentions=discriminator_hidden_states.attentions,\n )\n"
] |
[
[
"torch.nn.Softmax",
"torch.nn.Dropout",
"torch.nn.CrossEntropyLoss",
"torch.ones",
"torch.zeros",
"torch.einsum",
"torch.from_numpy",
"torch.nn.Embedding",
"torch.nn.LayerNorm",
"tensorflow.train.load_variable",
"torch.nn.Linear",
"torch.matmul",
"torch.nn.BCEWithLogitsLoss",
"numpy.transpose",
"torch.arange",
"tensorflow.train.list_variables",
"torch.nn.MSELoss"
]
] |
malamleh93/quickNAT_pytorch
|
[
"e6076854ed4d8d0bdd36c8c1ff9a5a5cd27906a6"
] |
[
"utils/preprocessor.py"
] |
[
"import numpy as np\n\nORIENTATION = {\n 'coronal': \"COR\",\n 'axial': \"AXI\",\n 'sagital': \"SAG\"\n}\n\n\ndef rotate_orientation(volume_data, volume_label, orientation=ORIENTATION['coronal']):\n if orientation == ORIENTATION['coronal']:\n return volume_data.transpose((2, 0, 1)), volume_label.transpose((2, 0, 1))\n elif orientation == ORIENTATION['axial']:\n return volume_data.transpose((1, 2, 0)), volume_label.transpose((1, 2, 0))\n elif orientation == ORIENTATION['sagital']:\n return volume_data, volume_label\n else:\n raise ValueError(\"Invalid value for orientation. Pleas see help\")\n\n\ndef estimate_weights_mfb(labels):\n class_weights = np.zeros_like(labels)\n unique, counts = np.unique(labels, return_counts=True)\n median_freq = np.median(counts)\n weights = np.zeros(len(unique))\n for i, label in enumerate(unique):\n class_weights += (median_freq // counts[i]) * np.array(labels == label)\n weights[int(label)] = median_freq // counts[i]\n\n grads = np.gradient(labels)\n edge_weights = (grads[0] ** 2 + grads[1] ** 2) > 0\n class_weights += 2 * edge_weights\n return class_weights, weights\n\n\ndef remap_labels(labels, remap_config):\n \"\"\"\n Function to remap the label values into the desired range of algorithm\n \"\"\"\n if remap_config == 'FS':\n label_list = [2, 3, 4, 5, 7, 8, 10, 11, 12, 13, 14, 15, 16, 17, 18, 24, 26, 28, 41, 42, 43, 44, 46, 47, 49, 50,\n 51, 52, 53, 54, 58, 60]\n elif remap_config == 'Neo':\n labels[(labels >= 100) & (labels % 2 == 0)] = 210\n labels[(labels >= 100) & (labels % 2 == 1)] = 211\n label_list = [45, 211, 52, 50, 41, 39, 60, 37, 58, 56, 4, 11, 35, 48, 32, 46, 30, 62, 44, 210, 51, 49, 40, 38,\n 59, 36, 57, 55, 47, 31, 23, 61]\n else:\n raise ValueError(\"Invalid argument value for remap config, only valid options are FS and Neo\")\n\n new_labels = np.zeros_like(labels)\n\n for i, label in enumerate(label_list):\n label_present = np.zeros_like(labels)\n label_present[labels == label] = 1\n new_labels = new_labels + (i + 1) * label_present\n\n return new_labels\n\n\ndef reduce_slices(data, labels, skip_Frame=40):\n \"\"\"\n This function removes the useless black slices from the start and end. And then selects every even numbered frame.\n \"\"\"\n no_slices, H, W = data.shape\n mask_vector = np.zeros(no_slices, dtype=int)\n mask_vector[::2], mask_vector[1::2] = 1, 0\n mask_vector[:skip_Frame], mask_vector[-skip_Frame:-1] = 0, 0\n\n data_reduced = np.compress(mask_vector, data, axis=0).reshape(-1, H, W)\n labels_reduced = np.compress(mask_vector, labels, axis=0).reshape(-1, H, W)\n\n return data_reduced, labels_reduced\n\n\ndef remove_black(data, labels):\n clean_data, clean_labels = [], []\n for i, frame in enumerate(labels):\n unique, counts = np.unique(frame, return_counts=True)\n if counts[0] / sum(counts) < .99:\n clean_labels.append(frame)\n clean_data.append(data[i])\n return np.array(clean_data), np.array(clean_labels)\n"
] |
[
[
"numpy.gradient",
"numpy.unique",
"numpy.median",
"numpy.compress",
"numpy.zeros_like",
"numpy.array",
"numpy.zeros"
]
] |
RamitPahwa/Knowledge-Distillation
|
[
"fc5ed25affc00ac1aa8be300748a4902aa85fb8e"
] |
[
"data_loader.py"
] |
[
"from __future__ import print_function\nfrom PIL import Image\nimport os\nimport os.path\nimport numpy as np\nimport sys\nif sys.version_info[0] == 2:\n import cPickle as pickle\nelse:\n import pickle\n\nimport torch.utils.data as data\n\nclass CIFARSel(data.Dataset):\n base_folder = 'cifar-10-batches-py'\n url = \"https://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz\"\n filename = \"cifar-10-python.tar.gz\"\n tgz_md5 = 'c58f30108f718f92721af3b95e74349a'\n train_list = [\n ['data_batch_1', 'c99cafc152244af753f735de768cd75f'],\n ['data_batch_2', 'd4bba439e000b95fd0a9bffe97cbabec'],\n ['data_batch_3', '54ebc095f3ab1f0389bbae665268c751'],\n ['data_batch_4', '634d18415352ddfa80567beed471001a'],\n ['data_batch_5', '482c414d41f54cd18b22e5b47cb7c3cb'],\n ]\n\n test_list = [\n ['test_batch', '40351d587109b95175f43aff81a1287e'],\n ]\n @property\n def targets(self):\n if self.train:\n return self.train_labels\n else:\n return self.test_labels\n\n def __init__(self, root,names,name_class,train = True,transform=None, target_transform=None):\n self.root = os.path.expanduser(root)\n self.transform = transform\n self.target_transform = target_transform # training set or test set\n self.train = train\n if self.train:\n self.train_data = []\n self.train_labels = []\n self.train_data_selected = []\n self.train_labels_selected =[]\n # self.superclass_train_labels = []\n # self.superclass_test_labels_selected =[] \n for fentry in self.train_list:\n f = fentry[0]\n file = os.path.join(self.root, self.base_folder, f)\n fo = open(file, 'rb')\n if sys.version_info[0] == 2:\n entry = pickle.load(fo)\n else:\n entry = pickle.load(fo, encoding='latin1')\n self.train_data.append(entry['data'])\n if 'labels' in entry:\n self.train_labels += entry['labels']\n else:\n self.train_labels += entry['fine_labels']\n # self.superclass_train_labels += entry['coarse_labels']\n fo.close()\n\n self.train_data = np.concatenate(self.train_data)\n self.train_data = self.train_data.reshape((50000, 3, 32, 32))\n self.train_data = self.train_data.transpose((0, 2, 3, 1)) # convert to HWC\n # print(len(self.train_data))\n for i in range(len(self.train_data)):\n for name in names:\n if self.train_labels[i] == name_class[name]:\n self.train_data_selected.append(self.train_data[i])\n self.train_labels_selected.append(self.train_labels[i])\n # self.superclass_train_labels_selected.append(self.superclass_train_labels[i])\n # print(len(self.train_data_selected))\n\n else:\n self.test_data_selected =[]\n self.test_labels_selected =[]\n # self.superclass_test_labels_selected =[] \n f = self.test_list[0][0]\n file = os.path.join(self.root, self.base_folder, f)\n fo = open(file, 'rb')\n if sys.version_info[0] == 2:\n entry = pickle.load(fo)\n else:\n entry = pickle.load(fo, encoding='latin1')\n self.test_data = entry['data']\n if 'labels' in entry:\n self.test_labels = entry['labels']\n else:\n self.test_labels = entry['fine_labels']\n # self.superclass_test_labels = entry['coarse_labels']\n fo.close()\n self.test_data = self.test_data.reshape((10000, 3, 32, 32))\n self.test_data = self.test_data.transpose((0, 2, 3, 1)) # convert to HWC\n\n for i in range(len(self.test_data)):\n for name in names:\n if self.test_labels[i] == name_class[name]:\n self.test_data_selected.append(self.test_data[i])\n self.test_labels_selected.append(self.test_labels[i])\n # self.superclass_test_labels_selected.append(self.superclass_test_labels[i])\n\n\n def __getitem__(self, index):\n \"\"\"\n Args:\n index (int): Index\n Returns:\n tuple: (image, target) where target is index of the target class.\n \"\"\"\n if self.train:\n img, target = self.train_data_selected[index], self.train_labels_selected[index]\n else:\n img, target = self.test_data_selected[index], self.test_labels_selected[index]\n\n # doing this so that it is consistent with all other datasets\n # to return a PIL Image\n img = Image.fromarray(img)\n\n if self.transform is not None:\n img = self.transform(img)\n\n if self.target_transform is not None:\n target = self.target_transform(target)\n\n return img, target\n\n def __len__(self):\n if self.train:\n return len(self.train_data_selected)\n else:\n return len(self.test_data_selected)\n\n# dataset = CIFARSel('/home/ubuntu/inappmlagents/N2N',['dog'],name_class=name_class)\n# print(len(dataset))\n\nclass CIFAR100Sel(CIFARSel):\n 'Inherits CIFARSel Class'\n\n base_folder = 'cifar-100-python'\n url = \"https://www.cs.toronto.edu/~kriz/cifar-100-python.tar.gz\"\n filename = \"cifar-100-python.tar.gz\"\n tgz_md5 = 'eb9058c3a382ffc7106e4002c42a8d85'\n train_list = [\n ['train', '16019d7e3df5f24257cddd939b257f8d'],\n ]\n\n test_list = [\n ['test', 'f0ef6b0ae62326f3e7ffdfab6717acfc'],\n ]\ndef unpickle(file):\n import pickle\n with open(file, 'rb') as fo:\n dict = pickle.load(fo, encoding='latin1')\n return dict\n\n# dataset = CIFAR100Sel('/code/N2N',['beetle'],name_class=name_class)\n# print(len(dataset))"
] |
[
[
"numpy.concatenate"
]
] |
WenheLI/npyjs
|
[
"7becdf6d53ae22eb2748cc207988ff8f2cce47d5"
] |
[
"test/generate-test-data.py"
] |
[
"import numpy as np\nimport json\n\nrecords = {}\n\nfor dimensions in [(10,), (65, 65), (100, 100, 100), (4, 4, 4, 4, 4)]:\n for dtype in [\"int8\", \"int16\", \"int64\", \"float32\", \"float64\"]:\n name = f\"./data/{'x'.join(str(i) for i in dimensions)}-{dtype}\"\n data = np.random.randint(0, 255, dimensions).astype(dtype)\n records[name] = data.ravel()[-5:].tolist()\n np.save(name, data)\njson.dump(\n records, open(\"records.json\", \"w\"),\n)\n"
] |
[
[
"numpy.save",
"numpy.random.randint"
]
] |
JimothyJohn/PerceiverToolkit
|
[
"7f1e4b93a619f9b93000dc52ffbe4eeaf07d612b"
] |
[
"OpticalFlow.py"
] |
[
"#!/usr/bin/env python\n\n# Copyright 2021 DeepMind Technologies Limited\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom PIL import Image\nimport streamlit as st\nimport os\n\nimport functools\nimport itertools\nimport pickle\n\nimport haiku as hk\nimport jax\nimport jax.numpy as jnp\nimport matplotlib.pyplot as plt\nimport numpy as np\n\nimport cv2\nimport imageio\n\nfrom perceiver import perceiver, io_processors\n\nFLOW_SCALE_FACTOR = 20\n# The network assumes images are of the following size\nTRAIN_SIZE = (368, 496)\nTRAIN_RESIZE = (496, 368)\n\n\ndef optical_flow(images):\n \"\"\"Perceiver IO model for optical flow.\n\n Args:\n images: Array of two stacked images, of shape [B, 2, H, W, C]\n Returns:\n Optical flow field, of shape [B, H, W, 2].\n \"\"\"\n input_preprocessor = io_processors.ImagePreprocessor(\n position_encoding_type='fourier',\n fourier_position_encoding_kwargs=dict(\n num_bands=64,\n max_resolution=TRAIN_SIZE,\n sine_only=False,\n concat_pos=True,\n ),\n n_extra_pos_mlp=0,\n prep_type='patches',\n spatial_downsample=1,\n conv_after_patching=True,\n temporal_downsample=2)\n\n encoder = encoder = perceiver.PerceiverEncoder(\n num_self_attends_per_block=24,\n # Weights won't be shared if num_blocks is set to 1.\n num_blocks=1,\n z_index_dim=2048,\n num_cross_attend_heads=1,\n num_z_channels=512,\n num_self_attend_heads=16,\n cross_attend_widening_factor=1,\n self_attend_widening_factor=1,\n dropout_prob=0.0,\n z_pos_enc_init_scale=0.02,\n cross_attention_shape_for_attn='kv',\n name='perceiver_encoder')\n\n decoder = perceiver.FlowDecoder(\n TRAIN_SIZE,\n rescale_factor=100.0,\n use_query_residual=False,\n output_num_channels=2,\n output_w_init=jnp.zeros,\n # We query the decoder using the first frame features\n # rather than a standard decoder position encoding.\n position_encoding_type='fourier',\n fourier_position_encoding_kwargs=dict(\n concat_pos=True,\n max_resolution=TRAIN_SIZE,\n num_bands=64,\n sine_only=False\n )\n )\n\n model = perceiver.Perceiver(\n input_preprocessor=input_preprocessor,\n encoder=encoder,\n decoder=decoder,\n output_postprocessor=None)\n\n return model(io_processors.patches_for_flow(images),\n is_training=False) * FLOW_SCALE_FACTOR\n\n\noptical_flow = hk.transform(optical_flow)\n_apply_optical_flow_model = jax.jit(optical_flow.apply)\n\n\ndef compute_grid_indices(image_shape, patch_size=TRAIN_SIZE, min_overlap=20):\n if min_overlap >= TRAIN_SIZE[0] or min_overlap >= TRAIN_SIZE[1]:\n raise ValueError(\n f\"Overlap should be less than size of patch (got {min_overlap}\"\n f\"for patch size {patch_size}).\")\n ys = list(range(0, image_shape[0], TRAIN_SIZE[0] - min_overlap))\n xs = list(range(0, image_shape[1], TRAIN_SIZE[1] - min_overlap))\n # Make sure the final patch is flush with the image boundary\n ys[-1] = image_shape[0] - patch_size[0]\n xs[-1] = image_shape[1] - patch_size[1]\n return itertools.product(ys, xs)\n\n\ndef compute_optical_flow(params, rng, img1, img2, grid_indices,\n patch_size=TRAIN_SIZE):\n \"\"\"Function to compute optical flow between two images.\n\n To compute the flow between images of arbitrary sizes, we divide the image\n into patches, compute the flow for each patch, and stitch the flows together.\n\n Args:\n params: model parameters\n rng: jax.random.PRNGKey, not used in this model\n img1: first image\n img2: second image\n grid_indices: indices of the upper left corner for each patch.\n patch_size: size of patch, should be TRAIN_SIZE.\n \"\"\"\n imgs = jnp.stack([img1, img2], axis=0)[None]\n height = imgs.shape[-3]\n width = imgs.shape[-2]\n\n if height < patch_size[0]:\n raise ValueError(\n f\"Height of image (shape: {imgs.shape}) must be at least {patch_size[0]}.\"\n \"Please pad or resize your image to the minimum dimension.\"\n )\n if width < patch_size[1]:\n raise ValueError(\n f\"Width of image (shape: {imgs.shape}) must be at least {patch_size[1]}.\"\n \"Please pad or resize your image to the minimum dimension.\"\n )\n\n flows = 0\n flow_count = 0\n\n grid_indices = compute_grid_indices(img1.shape)\n for y, x in grid_indices:\n inp_piece = imgs[..., y: y + patch_size[0],\n x: x + patch_size[1], :]\n flow_piece = _apply_optical_flow_model(params, rng, inp_piece)\n weights_x, weights_y = jnp.meshgrid(\n jnp.arange(patch_size[1]), jnp.arange(patch_size[0]))\n\n weights_x = jnp.minimum(weights_x + 1, patch_size[1] - weights_x)\n weights_y = jnp.minimum(weights_y + 1, patch_size[0] - weights_y)\n weights = jnp.minimum(weights_x, weights_y)[jnp.newaxis, :, :,\n jnp.newaxis]\n padding = [(0, 0), (y, height - y - patch_size[0]),\n (x, width - x - patch_size[1]), (0, 0)]\n flows += jnp.pad(flow_piece * weights, padding)\n flow_count += jnp.pad(weights, padding)\n\n flows /= flow_count\n return flows\n\n\nrng = jax.random.PRNGKey(42)\nwith open(\"models/optical_flow_checkpoint.pystate\", \"rb\") as f:\n params = pickle.loads(f.read())\n f.close()\n\nstate = {}\n\n\n# Convert array to proper datatype, resize to match trained size, and\n# Normalize from [0-255] integers to [-1. to 1.] floats\ndef normalize(im):\n im = np.asarray(im)\n im = cv2.resize(im, TRAIN_RESIZE)\n return im / 255.0 * 2 - 1\n\n\ndef visualize_flow(flow):\n flow = np.array(flow)\n # Use Hue, Saturation, Value colour model\n hsv = np.zeros((flow.shape[0], flow.shape[1], 3), dtype=np.uint8)\n hsv[..., 2] = 255\n\n mag, ang = cv2.cartToPolar(flow[..., 0], flow[..., 1])\n hsv[..., 0] = ang / np.pi / 2 * 180\n hsv[..., 1] = np.clip(mag * 255 / 24, 0, 255)\n bgr = cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR)\n return bgr\n\n\n# Divide images into patches, compute flow between corresponding patches\n# of both images, and stitch the flows together\ndef ExtractFlow(firstFrame, secondFrame):\n\n grid_indices = compute_grid_indices(firstFrame.shape)\n return compute_optical_flow(\n params,\n rng,\n normalize(firstFrame),\n normalize(secondFrame),\n grid_indices\n )\n"
] |
[
[
"numpy.asarray",
"numpy.array",
"numpy.zeros",
"numpy.clip"
]
] |
liwen-deepmotion/map_based_lidar_camera_calibration_tool
|
[
"d260380729b05b153c2efd1e76d4ae077c48c4b1"
] |
[
"calibration_tool/vector/polygon_3d.py"
] |
[
"\n\nimport numpy as np\n\nfrom vector.vector import Vector\n\n\nclass Polygon3D(Vector):\n\n def __init__(self, vertices=np.zeros((0, 3))):\n super().__init__(vertices)\n"
] |
[
[
"numpy.zeros"
]
] |
Albertios/PythonInGIS_EagleOwl
|
[
"35cba102b2c93bb1a7b415e9460aa955d4816b32"
] |
[
"Scripts/stackedBarChart.py"
] |
[
"import matplotlib.pyplot as plt\nimport numpy as np\n\ncnames = [\n '#F0F8FF',\n '#FAEBD7',\n '#00FFFF',\n '#7FFFD4',\n '#F0FFFF',\n '#F5F5DC',\n '#FFE4C4',\n '#000000',\n '#FFEBCD',\n '#0000FF',\n '#8A2BE2',\n '#A52A2A',\n '#DEB887',\n '#5F9EA0',\n '#7FFF00',\n '#D2691E',\n '#FF7F50',\n '#6495ED',\n '#FFF8DC',\n '#DC143C',\n '#00FFFF',\n '#00008B',\n '#008B8B',\n '#B8860B',\n '#A9A9A9',\n '#006400',\n '#BDB76B',\n '#8B008B',\n '#556B2F',\n '#FF8C00',\n '#9932CC',\n '#8B0000',\n '#E9967A',\n '#8FBC8F',\n '#483D8B',\n '#2F4F4F',\n '#00CED1',\n '#9400D3',\n '#FF1493',\n '#00BFFF',\n '#696969',\n '#1E90FF',\n '#B22222',\n '#FFFAF0',\n '#228B22',\n '#FF00FF',\n '#DCDCDC',\n '#F8F8FF',\n '#FFD700',\n '#DAA520',\n '#808080',\n '#008000',\n '#ADFF2F',\n '#F0FFF0',\n '#FF69B4',\n '#CD5C5C',\n '#4B0082',\n '#FFFFF0',\n '#F0E68C',\n '#E6E6FA',\n '#FFF0F5',\n '#7CFC00',\n '#FFFACD',\n '#ADD8E6',\n '#F08080',\n '#E0FFFF',\n '#FAFAD2',\n '#90EE90',\n '#D3D3D3',\n '#FFB6C1',\n '#FFA07A',\n '#20B2AA',\n '#87CEFA',\n '#778899',\n '#B0C4DE',\n '#FFFFE0',\n '#00FF00',\n '#32CD32',\n '#FAF0E6',\n '#FF00FF',\n '#800000',\n '#66CDAA',\n '#0000CD',\n '#BA55D3',\n '#9370DB',\n '#3CB371',\n '#7B68EE',\n '#00FA9A',\n '#48D1CC',\n '#C71585',\n '#191970',\n '#F5FFFA',\n '#FFE4E1',\n '#FFE4B5',\n '#FFDEAD',\n '#000080',\n '#FDF5E6',\n '#808000',\n '#6B8E23',\n '#FFA500',\n '#FF4500',\n '#DA70D6',\n '#EEE8AA',\n '#98FB98',\n '#AFEEEE',\n '#DB7093',\n '#FFEFD5',\n '#FFDAB9',\n '#CD853F',\n '#FFC0CB',\n '#DDA0DD',\n '#B0E0E6',\n '#800080',\n '#FF0000',\n '#BC8F8F',\n '#4169E1',\n '#8B4513',\n '#FA8072',\n '#FAA460',\n '#2E8B57',\n '#FFF5EE',\n '#A0522D',\n '#C0C0C0',\n '#87CEEB',\n '#6A5ACD',\n '#708090',\n '#FFFAFA',\n '#00FF7F',\n '#4682B4',\n '#D2B48C',\n '#008080',\n '#D8BFD8',\n '#FF6347',\n '#40E0D0',\n '#EE82EE',\n '#F5DEB3',\n '#FFFFFF',\n '#F5F5F5',\n '#FFFF00',\n '#9ACD32']\nmonths = {'Jan': [], \n 'Feb': [], \n 'Mar': [],\n 'Apr': [],\n 'May': [],\n 'Jun': [],\n 'Jul': [],\n 'Aug': [],\n 'Sep': [],\n 'Oct': [],\n 'Nov': [],\n 'Dec': []\n }\n\n\ndef getOwl(monthTable, ID):\n result = []\n \n for f in monthTable:\n if f[0] == ID:\n result.append(f)\n return result\n \ndef fillNull(months):\n months[\"Jan\"].append(0)\n months[\"Feb\"].append(0)\n months[\"Mar\"].append(0)\n months[\"Apr\"].append(0)\n months[\"May\"].append(0)\n months[\"Jun\"].append(0)\n months[\"Jul\"].append(0)\n months[\"Aug\"].append(0)\n months[\"Sep\"].append(0)\n months[\"Oct\"].append(0)\n months[\"Nov\"].append(0)\n months[\"Dec\"].append(0)\n return months\n\ndef fillMonths(monthTable, months):\n \n curOwl = monthTable[0][0]\n \n for feature in monthTable:\n tempOwl = feature[0]\n \n month = feature[2]\n dist = feature[3]\n owl = getOwl(monthTable, \"1751\")\n \n # get all Data for one owl\n # fill all month with distance\n # missing data = 0 distance\n months = fillNull(months)\n \n if month == \"01\":\n months[\"Jan\"][len(months[\"Jan\"])-1] = dist\n if month == \"02\":\n months[\"Feb\"][len(months[\"Feb\"])-1] = dist\n if month == \"03\":\n months[\"Mar\"][len(months[\"Mar\"])-1] = dist\n if month == \"04\":\n months[\"Apr\"][len(months[\"Apr\"])-1] = dist\n if month == \"05\":\n months[\"May\"][len(months[\"May\"])-1] = dist\n if month == \"06\":\n months[\"Jun\"][len(months[\"Jun\"])-1] = dist\n if month == \"07\":\n months[\"Jul\"][len(months[\"Jul\"])-1] = dist\n if month == \"08\":\n months[\"Aug\"][len(months[\"Aug\"])-1] = dist\n if month == \"09\":\n months[\"Sep\"][len(months[\"Sep\"])-1] = dist\n if month == \"10\":\n months[\"Oct\"][len(months[\"Oct\"])-1] = dist\n if month == \"11\":\n months[\"Nov\"][len(months[\"Nov\"])-1] = dist\n if month == \"12\":\n months[\"Dec\"][len(months[\"Dec\"])-1] = dist\n \n return months\n \n\nmonths = fillMonths(monthTable, months)\n\n\nX = np.arange(12)\n\ncurOwl = [np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,]\ncounter = 0\n\ntempOwl = \"0\"\n\nlastOwl=\"none\"\n\nfor feature in monthTable:\n \n owl = feature[0]\n \n if owl != tempOwl:\n \n tempOwl = owl\n t = getOwl(monthTable, feature[0])\n \n for i in t:\n \n month = i[2]\n \n if month == \"01\":\n curOwl[0] = i[3]\n if month == \"02\":\n curOwl[1] = i[3]\n if month == \"03\":\n curOwl[2] = i[3]\n if month == \"04\":\n curOwl[3] = i[3]\n if month == \"05\":\n curOwl[4] = i[3]\n if month == \"06\":\n curOwl[5] = i[3]\n if month == \"07\":\n curOwl[6] = i[3]\n if month == \"08\":\n curOwl[7] = i[3]\n if month == \"09\":\n curOwl[8] = i[3]\n if month == \"10\":\n curOwl[9] = i[3]\n if month == \"11\":\n curOwl[10] = i[3]\n if month == \"12\":\n curOwl[11] = i[3]\n \n \n col = cnames[counter]\n if lastOwl == \"none\":\n plt.bar(X, curOwl, color = col)\n else:\n plt.bar(X, curOwl, color = col, bottom = lastOwl)\n \n lastOwl = curOwl\n\n counter = counter + 5\n\n\n\n\nplt.show()\n\n\n\n\n\n"
] |
[
[
"numpy.arange",
"matplotlib.pyplot.show",
"matplotlib.pyplot.bar"
]
] |
caicre/PrivMRF
|
[
"9ff82346ec6cf29889a4fe0d7cd9fdd480e7d5ab"
] |
[
"domain.py"
] |
[
"from functools import reduce\nimport numpy as np\n\nclass Domain:\n # attr_list specifies the order of axis\n def __init__(self, domain_dict, attr_list):\n self.dict = domain_dict\n self.attr_list = attr_list\n self.shape = [domain_dict[i]['domain'] for i in attr_list]\n\n\n def project(self, attr_set):\n new_dict = {key: self.dict[key] for key in attr_set}\n new_attr_list = [attr for attr in self.attr_list if attr in attr_set]\n return Domain(new_dict, new_attr_list)\n \n def moveaxis(self, attr_list):\n attr_set = set(self.attr_list)\n new_attr_list = [attr for attr in attr_list if attr in attr_set]\n return Domain(self.dict, new_attr_list)\n\n def attr_domain(self, attr):\n if attr in self.dict:\n return self.dict[attr]['domain']\n else:\n return None\n\n def size(self):\n return reduce(lambda x,y: x*y, self.shape, 1)\n \n # edge for np.histogramdd\n def edge(self):\n return [list(range(i+1)) for i in self.shape]\n\n def index_list(self, domain):\n if not isinstance(domain, Domain):\n attr_list = domain\n else:\n attr_list = domain.attr_list\n index_list = []\n for attr in attr_list:\n index_list.append(self.attr_list.index(attr))\n return index_list\n\n def invert(self, domain):\n new_dict = {}\n new_attr_list = []\n for i in self.attr_list:\n if i not in domain.dict:\n new_attr_list.append(i)\n new_dict[i] = self.dict[i]\n return Domain(new_dict, new_attr_list)\n\n def equal(self, domain):\n if len(self.attr_list) != len(domain.attr_list):\n return False\n for i in range(len(self.attr_list)):\n if self.attr_list[i] != domain.attr_list[i]:\n return False\n return True\n\n def __sub__(self, parameter):\n domain = [attr for attr in self.dict if attr not in parameter.dict]\n return self.project(domain)\n\n def __add__(self, parameter):\n domain_dict = self.dict.copy()\n for attr in parameter.dict:\n domain_dict[attr] = parameter.dict[attr]\n attr_list = self.attr_list.copy()\n for attr in parameter.attr_list:\n if attr in set(parameter.attr_list) - set(self.attr_list):\n attr_list.append(attr)\n return Domain(domain_dict, attr_list)\n\n def __len__(self):\n return len(self.dict)\n\nclass Smoother:\n def __init__(self, histogram, domain, value_range, value_threshold):\n self.domain = domain\n histogram = histogram.flatten()\n values, indices, counts = np.unique(histogram, return_index=True, return_counts=True)\n value_counts = list(zip(values, indices, counts))\n value_counts.sort(key=lambda x: x[0])\n\n min_range = value_counts[0][2]\n current_range = min_range + value_range\n \n self.index_map = {}\n self.new_index_id = 0\n for i in range(len(value_counts)):\n if value_counts[i][0] > value_threshold:\n break\n if value_counts[i][0] < current_range:\n self.index_map[value_counts[i][1]] = self.new_index_id\n else:\n while value_counts[i][0] >= current_range:\n current_range += value_range\n self.new_index_id += 1\n self.index_map[value_counts[i][1]] = self.new_index_id\n print('compress marginal domain', domain.attr_list, self.new_index_id, domain.size())\n\n def smoothed_noisy_histogram(self, histogram, domain, noise):\n if not self.domain.equal(domain):\n print('error: wrong smoother')\n exit(-1)\n histogram = histogram.flatten()\n values, indices, counts = np.unique(histogram, return_index=True, return_counts=True)\n \n new_index_to_value = {index: 0 for index in range(self.new_index_id+1)}\n new_index_to_index = {index: [] for index in range(self.new_index_id+1)}\n for index in self.index_map:\n new_index = self.index_map[index]\n new_index_to_value[new_index] += histogram[index]\n new_index_to_index[new_index].append(index)\n \n histogram += np.random.normal(scale=noise, size=domain.size())\n\n for new_index in new_index_to_index:\n value = new_index_to_value[new_index] + np.random.normal(scale=noise)\n indices_num = len(new_index_to_index[new_index])\n for index in new_index_to_index[new_index]:\n histogram[index] = int(value/indices_num)\n\n histogram = np.reshape(histogram, domain.shape)\n\n return histogram"
] |
[
[
"numpy.reshape",
"numpy.random.normal",
"numpy.unique"
]
] |
AustinTSchaffer/seedpod_ground_risk
|
[
"694846fb1b19ebc3deb44a310c0e509e25c5f002"
] |
[
"seedpod_ground_risk/pathfinding/theta_star.py"
] |
[
"from heapq import heappop, heappush\nfrom typing import Union, List\n\nimport numpy as np\nfrom skimage.draw import line\n\nfrom seedpod_ground_risk.pathfinding.a_star import _reconstruct_path\nfrom seedpod_ground_risk.pathfinding.algorithm import Algorithm\nfrom seedpod_ground_risk.pathfinding.environment import Node, GridEnvironment\nfrom seedpod_ground_risk.pathfinding.heuristic import Heuristic, ManhattanHeuristic\n\n\nclass RiskThetaStar(Algorithm):\n\n def __init__(self, heuristic: Heuristic = ManhattanHeuristic()):\n self.heuristic = heuristic.h\n\n def find_path(self, environment: GridEnvironment, start: Node, end: Node, smooth=False, k=1, thres=3e-8,\n method=np.mean, **kwargs) -> Union[List[Node], None]:\n grid = environment.grid\n self.risk_threshold = thres\n self.cost_method = method\n self.max_cost = grid.max()\n self.max_dist = np.sqrt((grid.shape[0] ** 2) + (grid.shape[1] ** 2))\n\n # Use heapq;the thread safety provided by PriorityQueue is not needed, as we only exec on a single thread\n open = [start]\n start.f = start.g = start.h = 0\n start.parent = start\n open_cost = {start: self._euc_dist(start, end)}\n closed = set()\n\n while open:\n node = heappop(open)\n if node in open_cost:\n open_cost.pop(node)\n else:\n continue\n closed.add(node)\n if node == end:\n return _reconstruct_path(node, grid, smooth=smooth)\n\n for neighbour in environment.get_neighbours(node):\n if neighbour in closed:\n continue\n cost, parent = self._calc_cost(neighbour, node, grid)\n if neighbour in open_cost and cost < neighbour.g:\n del open_cost[neighbour]\n neighbour.g = cost\n neighbour.parent = parent\n neighbour.f = cost + (k * self._euc_dist(neighbour, node))\n heappush(open, neighbour)\n open_cost[neighbour] = neighbour.f\n\n return None\n\n def _calc_cost(self, child: Node, best: Node, grid):\n g1 = best.g + self._edge_cost(best, child, grid)\n g2 = best.parent.g + self._edge_cost(best.parent, child, grid)\n if g2 <= g1:\n return g2, best.parent\n else:\n return g1, best\n\n def _edge_cost(self, best, child, grid):\n dist = self._euc_dist(best, child)\n if dist < 1.5: # if adjacent don't use bresenham\n node_costs = grid[(best.position[0], child.position[0]), (best.position[1], child.position[1])]\n else:\n l = line(best.position[0], best.position[1], child.position[0], child.position[1])\n node_costs = grid[l[0], l[1]]\n cost = self.cost_method(node_costs)\n if cost < self.risk_threshold:\n return 0\n return dist * cost\n\n def _euc_dist(self, n1, n2):\n return ((n1.position[0] - n2.position[0]) ** 2 + (n1.position[1] - n2.position[1]) ** 2) ** 0.5\n"
] |
[
[
"numpy.sqrt"
]
] |
knarfamlap/tensor2tensor
|
[
"92ebc7152e0f4f42871251f17dbe6db8409d4fae"
] |
[
"tensor2tensor/layers/common_image_attention_test.py"
] |
[
"# coding=utf-8\n# Copyright 2018 The Tensor2Tensor Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Tests for common image attention utilities.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom absl.testing import parameterized\nfrom tensor2tensor.layers import common_image_attention\n\nimport tensorflow as tf\n\n\nclass CommonImageAttentionTest(parameterized.TestCase, tf.test.TestCase):\n\n @parameterized.parameters(\n (common_image_attention.DistributionType.DMOL, 5, 50),\n (common_image_attention.DistributionType.CAT, None, 256),\n )\n def testPostProcessImageTrainMode(self, likelihood, num_mixtures, depth):\n batch = 1\n rows = 8\n cols = 24\n hparams = tf.contrib.training.HParams(\n hidden_size=2,\n likelihood=likelihood,\n mode=tf.estimator.ModeKeys.TRAIN,\n num_mixtures=num_mixtures,\n )\n inputs = tf.random_uniform([batch, rows, cols, hparams.hidden_size],\n minval=-1., maxval=1.)\n outputs = common_image_attention.postprocess_image(\n inputs, rows, cols, hparams)\n self.assertEqual(outputs.shape, (batch, rows, cols, depth))\n\n @parameterized.parameters(\n (common_image_attention.DistributionType.DMOL, 5, 50),\n (common_image_attention.DistributionType.CAT, None, 256),\n )\n def testPostProcessImageInferMode(self, likelihood, num_mixtures, depth):\n batch = 1\n rows = 8\n cols = 24\n block_length = 4\n block_width = 2\n hparams = tf.contrib.training.HParams(\n block_raster_scan=True,\n hidden_size=2,\n likelihood=likelihood,\n mode=tf.estimator.ModeKeys.PREDICT,\n num_mixtures=num_mixtures,\n query_shape=[block_length, block_width],\n )\n inputs = tf.random_uniform([batch, rows, cols, hparams.hidden_size],\n minval=-1., maxval=1.)\n outputs = common_image_attention.postprocess_image(\n inputs, rows, cols, hparams)\n num_blocks_rows = rows // block_length\n num_blocks_cols = cols // block_width\n self.assertEqual(outputs.shape,\n (batch, num_blocks_rows, num_blocks_cols,\n block_length, block_width, depth))\n\n @parameterized.parameters(\n (common_image_attention.DistributionType.DMOL, 5, 50),\n (common_image_attention.DistributionType.CAT, None, 256),\n )\n def testCreateOutputTrainMode(self, likelihood, num_mixtures, depth):\n batch = 1\n height = 8\n width = 8\n channels = 3\n rows = height\n if likelihood == common_image_attention.DistributionType.CAT:\n cols = channels * width\n else:\n cols = width\n hparams = tf.contrib.training.HParams(\n hidden_size=2,\n likelihood=likelihood,\n mode=tf.estimator.ModeKeys.TRAIN,\n num_mixtures=num_mixtures,\n )\n decoder_output = tf.random_normal([batch, rows, cols, hparams.hidden_size])\n targets = tf.random_uniform([batch, height, width, channels],\n minval=-1., maxval=1.)\n output = common_image_attention.create_output(\n decoder_output, rows, cols, targets, hparams)\n if hparams.likelihood == common_image_attention.DistributionType.CAT:\n self.assertEqual(output.shape, (batch, height, width, channels, depth))\n else:\n self.assertEqual(output.shape, (batch, height, width, depth))\n\nif __name__ == \"__main__\":\n tf.test.main()\n"
] |
[
[
"tensorflow.random_uniform",
"tensorflow.contrib.training.HParams",
"tensorflow.test.main",
"tensorflow.random_normal"
]
] |
arthurflor/signatures
|
[
"e6ae5e5996df438e533420d80eda288d849db28c"
] |
[
"src/classifier/tree.py"
] |
[
"from sklearn.ensemble import RandomForestClassifier\nfrom sklearn.tree import DecisionTreeClassifier\nfrom sklearn.model_selection import train_test_split\nimport util.data as data\nimport util.path as path\nimport numpy as np\nimport os\n\ndef random_split(test_size, features, labels):\n features_tr, features_te, labels_tr, labels_te = train_test_split(features, labels, test_size=test_size)\n return features_tr, labels_tr, features_te, labels_te\n\nclass RandomForest():\n\n def __init__(self, len_features):\n self.name = \"random_forest_(\" + str(len_features) + \"f)\"\n self.clf = RandomForestClassifier(criterion='entropy', n_estimators=1000, random_state=0)\n\n def save(self, report, version, graph):\n f_name = self.name + \"_\" + str(version) \n destination = os.path.join(path.out(), data.DATASET, self.name)\n data.saveVariable(destination, f_name, report.log)\n if graph: data.saveGraph(destination, f_name, self.clf.estimators_[0], self.clf.classes_)\n\n def training(self, features_tr, labels_tr):\n print(\"Training...\")\n self.clf.fit(features_tr, labels_tr)\n\n def predict(self, features):\n print(\"Classifying...\")\n return self.clf.predict(features)\n\nclass CART():\n\n def __init__(self, len_features):\n self.name = \"cart_(\" + str(len_features) + \"f)\"\n self.clf = DecisionTreeClassifier(criterion=\"entropy\", random_state=0)\n\n def save(self, report, version, graph):\n f_name = self.name + \"_\" + str(version) \n destination = os.path.join(path.out(), data.DATASET, self.name)\n data.saveVariable(destination, f_name, report.log)\n if graph: data.saveGraph(destination, f_name, self.clf, self.clf.classes_)\n\n def training(self, features_tr, labels_tr):\n print(\"Training...\")\n self.clf.fit(features_tr, labels_tr)\n\n def predict(self, features):\n print(\"Classifying...\")\n return self.clf.predict(features)\n\n\nclass C45():\n\n def __init__(self, len_features):\n self.name = \"c45_(\" + str(len_features) + \"f)\"\n self.criterion = self.entropy\n self.min_gain = 0.5\n self.clf = None\n\n def save(self, report, version, graph):\n f_name = self.name + \"_\" + str(version) \n destination = os.path.join(path.out(), data.DATASET, self.name)\n data.saveVariable(destination, f_name, report.log)\n if graph: data.saveNodeTree(destination, f_name, self.clf)\n\n def training(self, features_tr, labels_tr):\n features_tr = np.array(features_tr).tolist()\n\n for i in range(len(features_tr)):\n features_tr[i].append(labels_tr[i])\n\n print(\"Training...\")\n self.clf = self.__training__(list(features_tr), self.criterion)\n self.prune(self.clf, self.criterion, self.min_gain)\n\n def predict(self, features):\n features = np.array(features).tolist()\n json_arr = [self.__predict__(self.clf, item) for item in features]\n print(\"Classifying...\")\n return self.predictToStandart(json_arr)\n\n def predictToStandart(self, json_arr):\n arr = []\n for item in json_arr:\n keys = list(item.keys())\n quant = [item[x] for x in keys]\n i_max = np.argmax(quant)\n arr.append(keys[i_max])\n return arr\n\n def __training__(self, rows, criterion):\n if len(rows) == 0: return Node()\n current_score = criterion(rows)\n best_gain = 0.0\n best_attr = None\n best_sets = None\n column_count = len(rows[0]) - 1\n\n for col in range(0, column_count):\n columnValues = [row[col] for row in rows]\n\n for value in columnValues:\n (set1, set2) = self.divideSet(rows, col, value)\n p = float(len(set1)) / len(rows)\n gain = current_score - (p*criterion(set1)) - ((1-p)*criterion(set2))\n \n if (gain > best_gain and len(set1) > 0 and len(set2) > 0):\n best_gain = gain\n best_attr = (col, value)\n best_sets = (set1, set2)\n\n if (best_gain > 0):\n true_branch = self.__training__(best_sets[0], criterion)\n false_branch = self.__training__(best_sets[1], criterion)\n return Node(col=best_attr[0], value=best_attr[1], true_branch=true_branch, false_branch=false_branch)\n else:\n return Node(results=self.uniqueCounts(rows))\n\n def divideSet(self, rows, column, value):\n splittingFunction = None\n if (isinstance(value, int) or isinstance(value, float)):\n splittingFunction = lambda row: row[column] >= value\n else: \n splittingFunction = lambda row: row[column] == value\n list1 = [row for row in rows if splittingFunction(row)]\n list2 = [row for row in rows if not splittingFunction(row)]\n return (list1, list2)\n\n def uniqueCounts(self, rows):\n results = {}\n for row in rows:\n r = row[-1]\n if (r not in results): results[r] = 0\n results[r] += 1\n return results\n \n def entropy(self, rows):\n log2 = lambda x: np.log(x)/np.log(2)\n results = self.uniqueCounts(rows)\n entr = 0.0\n for r in results:\n p = float(results[r])/len(rows)\n entr -= p*log2(p)\n return entr\n \n def gini(self, rows):\n total = len(rows)\n counts = self.uniqueCounts(rows)\n imp = 0.0\n for k1 in counts:\n p1 = float(counts[k1])/total \n for k2 in counts:\n if k1 == k2: continue\n p2 = float(counts[k2])/total\n imp += p1*p2\n return imp\n\n def prune(self, tree, criterion, min_gain):\n if (tree.true_branch.results == None): \n self.prune(tree.true_branch, criterion, min_gain)\n if (tree.false_branch.results == None): \n self.prune(tree.false_branch, criterion, min_gain)\n\n if (tree.true_branch.results != None and tree.false_branch.results != None):\n tb, fb = [], []\n for v, c in tree.true_branch.results.items(): tb += [[v]] * c\n for v, c in tree.false_branch.results.items(): fb += [[v]] * c\n\n p = float(len(tb)) / len(tb + fb)\n delta = criterion(tb+fb) - p*criterion(tb) - (1-p)*criterion(fb)\n\n if (delta < min_gain):\n print('A branch was pruned: gain ~ %f' % delta)\n tree.true_branch, tree.false_branch = None, None\n tree.results = self.uniqueCounts(tb + fb)\n\n def __predict__(self, tree, features): \n if (tree.results != None):\n return tree.results\n else:\n v = features[tree.col]\n branch = None\n if (isinstance(v, int) or isinstance(v, float)):\n if (v >= tree.value): branch = tree.true_branch\n else: branch = tree.false_branch\n else:\n if (v == tree.value): branch = tree.true_branch\n else: branch = tree.false_branch\n return self.__predict__(branch, features)\n\nclass Node():\n\n def __init__(self, col=-1, value=None, true_branch=None, false_branch=None, results=None):\n self.col = col\n self.value = value\n self.true_branch = true_branch\n self.false_branch = false_branch\n self.results = results"
] |
[
[
"numpy.log",
"sklearn.ensemble.RandomForestClassifier",
"sklearn.model_selection.train_test_split",
"sklearn.tree.DecisionTreeClassifier",
"numpy.argmax",
"numpy.array"
]
] |
mares29/rio-tiler
|
[
"72ddbaa7ff1a972774cdb94fea664a9d017409bf"
] |
[
"tests/conftest.py"
] |
[
"\"\"\"``pytest`` configuration.\"\"\"\n\nimport os\n\nimport pytest\n\nimport numpy\n\nimport rasterio\nfrom rasterio.io import MemoryFile\nfrom rasterio.transform import from_bounds\nfrom rasterio.enums import ColorInterp\n\nfrom rio_cogeo.cogeo import cog_translate\nfrom rio_cogeo.profiles import cog_profiles\n\n\nwith rasterio.Env() as env:\n drivers = env.drivers()\n\n\nrequires_webp = pytest.mark.skipif(\n \"WEBP\" not in drivers.keys(), reason=\"Only relevant if WEBP drivers is supported\"\n)\n\n\n@pytest.fixture\ndef cloudoptimized_geotiff():\n \"\"\"Create CloudOptimized GeoTIFF fixture.\"\"\"\n\n def _cloudoptimized_geotiff(\n output_dir,\n name,\n crs,\n bounds,\n dtype,\n nodata_type,\n tilesize=256,\n nband=1,\n x_size=2000,\n y_size=2000,\n ):\n fout = \"{}/{}-{}-{}-{}b.tif\".format(output_dir, name, dtype, nodata_type, nband)\n if os.path.exists(fout):\n return fout\n\n if not os.path.isdir(output_dir):\n os.makedirs(output_dir)\n\n profile_options = {\"blockxsize\": tilesize, \"blockysize\": tilesize}\n output_profile = cog_profiles.get(\"deflate\")\n output_profile.update(profile_options)\n\n arr = numpy.random.randint(1, 255, size=(nband, y_size, x_size)).astype(\n numpy.uint8\n )\n arr[:, 0:500, 0:500] = 0\n\n mask = numpy.zeros((1, y_size, x_size), dtype=numpy.uint8) + 255\n mask[:, 0:500, 0:500] = 0\n\n w, s, e, n = bounds\n src_profile = dict(\n driver=\"GTiff\",\n count=nband,\n dtype=\"uint8\",\n height=y_size,\n width=x_size,\n crs=crs,\n transform=from_bounds(w, s, e, n, x_size, y_size),\n )\n if nodata_type in [\"nodata\", \"mask\"]:\n src_profile[\"nodata\"] = 0\n elif nodata_type == \"alpha\":\n src_profile[\"count\"] = nband + 1\n\n gdal_config = dict(\n GDAL_NUM_THREADS=\"ALL_CPUS\",\n GDAL_TIFF_INTERNAL_MASK=True,\n GDAL_TIFF_OVR_BLOCKSIZE=\"128\",\n )\n\n with MemoryFile() as memfile:\n with memfile.open(**src_profile) as mem:\n ci = [ColorInterp.gray]\n if nband > 1:\n ci += [ColorInterp.undefined] * (nband - 1)\n\n if nodata_type == \"alpha\":\n data = numpy.concatenate([arr, mask])\n ci += [ColorInterp.alpha]\n else:\n data = arr\n\n mem.colorinterp = ci\n mem.write(data)\n\n cog_translate(\n mem,\n fout,\n output_profile,\n config=gdal_config,\n in_memory=True,\n dtype=dtype,\n quiet=True,\n add_mask=True if nodata_type == \"mask\" else False,\n )\n\n return fout\n\n return _cloudoptimized_geotiff\n"
] |
[
[
"numpy.concatenate",
"numpy.zeros",
"numpy.random.randint"
]
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.