repo_name
stringlengths
6
130
hexsha
list
file_path
list
code
list
apis
list
yyx1994/pytorch.repmet
[ "847a2b71fa751e6d381c233df0107a53592d8ce5", "847a2b71fa751e6d381c233df0107a53592d8ce5" ]
[ "losses/magnet_loss.py", "model_definitions/cnns/basics/protonet.py" ]
[ "\"\"\"\nTaken from vithursant's repo:\nhttps://github.com/vithursant/MagnetLoss-PyTorch/blob/master/magnet_loss/magnet_loss.py\n\"\"\"\nimport numpy as np\n\nimport torch\nimport torch.nn as nn\nfrom torch.autograd import Variable\nimport torch.nn.functional as F\n\n\nclass MagnetLoss(nn.Module):\n \"\"\"\n Magnet loss technique presented in the paper:\n ''Metric Learning with Adaptive Density Discrimination'' by Oren Rippel, Manohar Paluri, Piotr Dollar, Lubomir Bourdev in\n https://research.fb.com/wp-content/uploads/2016/05/metric-learning-with-adaptive-density-discrimination.pdf?\n\n Args:\n r: A batch of features.\n classes: Class labels for each example.\n clusters: Cluster labels for each example.\n cluster_classes: Class label for each cluster.\n n_clusters: Total number of clusters.\n alpha: The cluster separation gap hyperparameter.\n\n Returns:\n total_loss: The total magnet loss for the batch.\n losses: The loss for each example in the batch.\n \"\"\"\n def __init__(self, m, d, alpha=1.0, L=128, style='closest'):\n super(MagnetLoss, self).__init__()\n self.r = None\n self.classes = None\n self.clusters = None\n self.cluster_classes = None\n self.n_clusters = None\n self.alpha = alpha\n self.L = L\n self.style = style\n self.n_clusters = m\n self.examples_per_cluster = d\n self.variances = torch.tensor([0.0])\n\n def forward(self, input, target): # reps and classes, x and y\n\n GPU_INT_DTYPE = torch.cuda.IntTensor\n GPU_LONG_DTYPE = torch.cuda.LongTensor\n GPU_FLOAT_DTYPE = torch.cuda.FloatTensor\n\n self.r = input\n classes = target.cpu().numpy()\n self.classes = torch.from_numpy(classes).type(GPU_LONG_DTYPE)\n self.clusters, _ = torch.sort(torch.arange(0, float(self.n_clusters)).repeat(self.examples_per_cluster))\n self.clusters = self.clusters.type(GPU_INT_DTYPE)\n self.cluster_classes = self.classes[0:self.n_clusters*self.examples_per_cluster:self.examples_per_cluster]\n\n # Take cluster means within the batch\n cluster_examples = dynamic_partition(self.r, self.clusters, self.n_clusters)\n\n cluster_means = torch.stack([torch.mean(x, dim=0) for x in cluster_examples])\n\n sample_costs = compute_euclidean_distance(cluster_means, expand_dims(self.r, 1))\n\n self.sample_costs = sample_costs\n\n clusters_tensor = self.clusters.type(GPU_FLOAT_DTYPE)\n n_clusters_tensor = torch.arange(0, self.n_clusters).type(GPU_FLOAT_DTYPE)\n\n intra_cluster_mask = Variable(comparison_mask(clusters_tensor, n_clusters_tensor).type(GPU_FLOAT_DTYPE))\n\n intra_cluster_costs = torch.sum(intra_cluster_mask * sample_costs, dim=1)\n\n N = self.r.size()[0] # N = M*D (Batch size)\n\n variance = torch.sum(intra_cluster_costs) / float(N - 1)\n\n # self.variances = np.hstack((self.variances, variance.data.cpu().numpy()))\n self.variances = torch.cat((self.variances, variance.unsqueeze(0).cpu()), 0)\n\n var_normalizer = -1 / (2 * variance**2)\n\n # Compute numerator\n numerator = torch.exp(var_normalizer * intra_cluster_costs - self.alpha)\n\n classes_tensor = self.classes.type(GPU_FLOAT_DTYPE)\n cluster_classes_tensor = self.cluster_classes.type(GPU_FLOAT_DTYPE)\n\n # Compute denominator\n diff_class_mask = Variable(comparison_mask(classes_tensor, cluster_classes_tensor).type(GPU_FLOAT_DTYPE))\n\n diff_class_mask = 1 - diff_class_mask # Logical not on ByteTensor\n\n denom_sample_costs = torch.exp(var_normalizer * sample_costs)\n\n denominator = torch.sum(diff_class_mask * denom_sample_costs, dim=1)\n\n epsilon = 1e-8\n\n losses = F.relu(-torch.log(numerator / (denominator + epsilon) + epsilon))\n\n total_loss = torch.mean(losses)\n\n\n if self.style == 'closest': # acts on the clusters in this batch/episode rather than those calculate over the entire set!!\n _, pred = sample_costs.min(1)\n acc = pred.eq(clusters_tensor.type(GPU_LONG_DTYPE)).float().mean()\n else:\n raise NotImplementedError\n # TODO implement the version that takes into account variance\n # TODO note it will still just be acc on batch rather than set... (unlike val)\n # num_classes = len(np.unique(self.cluster_classes.cpu())) # m # the number of classes in this batch\n #\n # num_clusters = cluster_means.size()[0] # m*k\n #\n # # Sort the clusters by closest distance to sample\n # sorted_sample_costs, indices = torch.sort(sample_costs)\n # sorted_sample_costs = sorted_sample_costs.squeeze()\n # indices = indices.type(GPU_LONG_DTYPE).squeeze()\n # sorted_cluster_classes = self.cluster_classes[indices]\n #\n # # If L < num_clusters then lets only take the top L\n # if self.L < num_clusters:\n # sorted_sample_costs = sorted_sample_costs[:self.L]\n # sorted_cluster_classes = sorted_cluster_classes[:self.L]\n # num_clusters = self.L\n #\n # normalised_costs = torch.exp(var_normalizer * sorted_sample_costs).type(GPU_FLOAT_DTYPE)\n #\n # per_class_costs = torch.zeros(num_classes, num_clusters).type(GPU_FLOAT_DTYPE) # todo, address this issue of num_classes not matching batch_size and that being a problem...\n # per_class_costs = per_class_costs.scatter_(0, sorted_cluster_classes.unsqueeze(0), normalised_costs.unsqueeze(0))\n # numerator = per_class_costs.sum(1)\n #\n # denominator = torch.sum(normalised_costs)\n #\n # epsilon = 1e-8\n #\n # probs = numerator / (denominator + epsilon)\n #\n # _, pred = probs.max(0)\n # acc = pred.eq(target).float()\n return total_loss, losses, pred, acc\n\n\nclass MagnetLossEval(nn.Module):\n\n def __init__(self, L=128, style='magnet'):\n super(MagnetLossEval, self).__init__()\n self.cluster_means = None\n self.cluster_classes = None\n self.variance = None\n self.L = L\n self.style = style\n\n def forward(self, input, target): # reps and classes, x and y # expects batch size of 1\n\n # make sure these have been set with the callbacks!!\n assert self.cluster_means is not None\n assert self.cluster_classes is not None\n assert self.variance is not None\n\n GPU_INT_DTYPE = torch.cuda.IntTensor\n GPU_LONG_DTYPE = torch.cuda.LongTensor\n GPU_FLOAT_DTYPE = torch.cuda.FloatTensor\n\n num_classes = np.max(self.cluster_classes) + 1 # the number of classes of the dataset\n cluster_means = torch.from_numpy(self.cluster_means).type(GPU_FLOAT_DTYPE)\n cluster_classes = torch.from_numpy(self.cluster_classes).type(GPU_LONG_DTYPE)\n sample_costs = compute_euclidean_distance(cluster_means, expand_dims(input, 1))\n\n if self.style == 'closest':\n _, pred = sample_costs.min(1)\n pred = cluster_classes[pred]\n acc = pred.eq(target).float()\n return torch.zeros(1), torch.zeros(1), pred, acc\n else:\n num_clusters = cluster_means.size()[0]\n\n # Sort the clusters by closest distance to sample\n sorted_sample_costs, indices = torch.sort(sample_costs)\n sorted_sample_costs = sorted_sample_costs.squeeze()\n indices = indices.type(GPU_LONG_DTYPE).squeeze()\n sorted_cluster_classes = cluster_classes[indices]\n\n # If L < num_clusters then lets only take the top L\n if self.L < num_clusters:\n sorted_sample_costs = sorted_sample_costs[:self.L]\n sorted_cluster_classes = sorted_cluster_classes[:self.L]\n num_clusters = self.L\n\n var_normalizer = -1 / (2 * self.variance ** 2)\n\n normalised_costs = torch.exp(var_normalizer * sorted_sample_costs).type(GPU_FLOAT_DTYPE)\n\n per_class_costs = torch.zeros(num_classes, num_clusters).type(GPU_FLOAT_DTYPE)\n per_class_costs = per_class_costs.scatter_(0, sorted_cluster_classes.unsqueeze(0), normalised_costs.unsqueeze(0))\n numerator = per_class_costs.sum(1)\n\n denominator = torch.sum(normalised_costs)\n\n epsilon = 1e-8\n\n probs = numerator / (denominator + epsilon)\n\n _, pred = probs.max(0)\n acc = pred.eq(target).float()\n\n return torch.zeros(1), torch.zeros(1), pred, acc\n\n\ndef expand_dims(var, dim=0):\n \"\"\" Is similar to [numpy.expand_dims](https://docs.scipy.org/doc/numpy/reference/generated/numpy.expand_dims.html).\n var = torch.range(0, 9).view(-1, 2)\n torch.expand_dims(var, 0).size()\n # (1, 5, 2)\n \"\"\"\n sizes = list(var.size())\n sizes.insert(dim, 1)\n return var.view(*sizes)\n\n\ndef comparison_mask(a_labels, b_labels):\n \"\"\"Computes boolean mask for distance comparisons\"\"\"\n return torch.eq(expand_dims(a_labels, 1),\n expand_dims(b_labels, 0))\n\n\ndef dynamic_partition(X, partitions, n_clusters):\n \"\"\"Partitions the data into the number of cluster bins\"\"\"\n cluster_bin = torch.chunk(X, n_clusters)\n return cluster_bin\n\n\ndef compute_euclidean_distance(x, y):\n return torch.sum((x - y)**2, dim=2)\n", "\"\"\"\nTaken from orobix\nhttps://github.com/orobix/Prototypical-Networks-for-Few-shot-Learning-PyTorch/blob/master/src/protonet.py\n\"\"\"\n\nimport torch.nn as nn\n\n\ndef conv_block(in_channels, out_channels):\n '''\n returns a block conv-bn-relu-pool\n '''\n return nn.Sequential(\n nn.Conv2d(in_channels, out_channels, 3, padding=1),\n nn.BatchNorm2d(out_channels),\n nn.ReLU(),\n nn.MaxPool2d(2)\n )\n\n\nclass ProtoNet(nn.Module):\n '''\n Model as described in the reference paper,\n source: https://github.com/jakesnell/prototypical-networks/blob/f0c48808e496989d01db59f86d4449d7aee9ab0c/protonets/models/few_shot.py#L62-L84\n '''\n def __init__(self, x_dim=1, hid_dim=64, z_dim=64):\n super(ProtoNet, self).__init__()\n self.encoder = nn.Sequential(\n conv_block(x_dim, hid_dim),\n conv_block(hid_dim, hid_dim),\n conv_block(hid_dim, hid_dim),\n conv_block(hid_dim, z_dim),\n )\n\n def forward(self, x):\n x = self.encoder(x)\n return x.view(x.size(0), -1) # Flatten\n" ]
[ [ "numpy.max", "torch.zeros", "torch.arange", "torch.from_numpy", "torch.sort", "torch.tensor", "torch.log", "torch.exp", "torch.mean", "torch.chunk", "torch.sum" ], [ "torch.nn.ReLU", "torch.nn.Conv2d", "torch.nn.BatchNorm2d", "torch.nn.MaxPool2d" ] ]
JaGeo/emmet
[ "db01498d1136fc499961277f0b0edce3b9ddf386" ]
[ "emmet-core/emmet/core/vasp/validation.py" ]
[ "from datetime import datetime\nfrom typing import Dict, List, Union\n\nimport numpy as np\nfrom pydantic import Field, PyObject\nfrom pymatgen.core.structure import Structure\nfrom pymatgen.io.vasp.sets import VaspInputSet\n\nfrom emmet.core.settings import EmmetSettings\nfrom emmet.core.base import EmmetBaseModel\nfrom emmet.core.mpid import MPID\nfrom emmet.core.utils import DocEnum\nfrom emmet.core.vasp.task import TaskDocument\n\nSETTINGS = EmmetSettings()\n\n\nclass DeprecationMessage(DocEnum):\n MANUAL = \"M\", \"manual deprecation\"\n KPTS = \"C001\", \"Too few KPoints\"\n KSPACING = \"C002\", \"KSpacing not high enough\"\n ENCUT = \"C002\", \"ENCUT too low\"\n FORCES = \"C003\", \"Forces too large\"\n CONVERGENCE = \"E001\", \"Calculation did not converge\"\n MAX_SCF = \"E002\", \"Max SCF gradient too large\"\n LDAU = \"I001\", \"LDAU Parameters don't match the inputset\"\n\n\nclass ValidationDoc(EmmetBaseModel):\n \"\"\"\n Validation document for a VASP calculation\n \"\"\"\n\n task_id: MPID = Field(..., description=\"The task_id for this validation document\")\n valid: bool = Field(False, description=\"Whether this task is valid or not\")\n last_updated: datetime = Field(\n description=\"Last updated date for this document\",\n default_factory=datetime.utcnow,\n )\n reasons: List[Union[DeprecationMessage, str]] = Field(\n None, description=\"List of deprecation tags detailing why this task isn't valid\"\n )\n warnings: List[str] = Field(\n [], description=\"List of potential warnings about this calculation\"\n )\n data: Dict = Field(\n description=\"Dictioary of data used to perform validation.\"\n \" Useful for post-mortem analysis\"\n )\n\n class Config:\n extra = \"allow\"\n\n @classmethod\n def from_task_doc(\n cls,\n task_doc: TaskDocument,\n kpts_tolerance: float = SETTINGS.VASP_KPTS_TOLERANCE,\n kspacing_tolerance: float = SETTINGS.VASP_KSPACING_TOLERANCE,\n input_sets: Dict[str, PyObject] = SETTINGS.VASP_DEFAULT_INPUT_SETS,\n LDAU_fields: List[str] = SETTINGS.VASP_CHECKED_LDAU_FIELDS,\n max_allowed_scf_gradient: float = SETTINGS.VASP_MAX_SCF_GRADIENT,\n ) -> \"ValidationDoc\":\n \"\"\"\n Determines if a calculation is valid based on expected input parameters from a pymatgen inputset\n\n Args:\n task_doc: the task document to process\n kpts_tolerance: the tolerance to allow kpts to lag behind the input set settings\n kspacing_tolerance: the tolerance to allow kspacing to lag behind the input set settings\n input_sets: a dictionary of task_types -> pymatgen input set for validation\n LDAU_fields: LDAU fields to check for consistency\n max_allowed_scf_gradient: maximum uphill gradient allowed for SCF steps after the\n initial equillibriation period\n \"\"\"\n\n structure = task_doc.output.structure\n calc_type = task_doc.calc_type\n inputs = task_doc.orig_inputs\n bandgap = task_doc.output.bandgap\n chemsys = task_doc.chemsys\n\n reasons = []\n data = {}\n warnings = []\n\n if str(calc_type) in input_sets:\n\n # Ensure inputsets that need the bandgap get it\n try:\n valid_input_set: VaspInputSet = input_sets[str(calc_type)](\n structure, bandgap=bandgap\n )\n except TypeError:\n valid_input_set = input_sets[str(calc_type)](structure)\n\n # Checking K-Points\n # Calculations that use KSPACING will not have a .kpoints attr\n if valid_input_set.kpoints is not None:\n valid_num_kpts = valid_input_set.kpoints.num_kpts or np.prod(\n valid_input_set.kpoints.kpts[0]\n )\n num_kpts = inputs.get(\"kpoints\", {}).get(\"nkpoints\", 0) or np.prod(\n inputs.get(\"kpoints\", {}).get(\"kpoints\", [1, 1, 1])\n )\n data[\"kpts_ratio\"] = num_kpts / valid_num_kpts\n if data[\"kpts_ratio\"] < kpts_tolerance:\n reasons.append(DeprecationMessage.KPTS)\n\n else:\n valid_kspacing = valid_input_set.incar.get(\"KSPACING\", 0)\n if inputs.get(\"incar\", {}).get(\"KSPACING\"):\n data[\"kspacing_delta\"] = (\n inputs[\"incar\"].get(\"KSPACING\") - valid_kspacing\n )\n # larger KSPACING means fewer k-points\n if data[\"kspacing_delta\"] > kspacing_tolerance:\n warnings.append(\n f\"KSPACING is greater than input set: {data['kspacing_delta']}\"\n f\" lower than {kspacing_tolerance} \",\n )\n elif data[\"kspacing_delta\"] < kspacing_tolerance:\n warnings.append(\n f\"KSPACING is lower than input set: {data['kspacing_delta']}\"\n f\" lower than {kspacing_tolerance} \",\n )\n\n # warn, but don't invalidate if wrong ISMEAR\n valid_ismear = valid_input_set.incar.get(\"ISMEAR\", 1)\n curr_ismear = inputs.get(\"incar\", {}).get(\"ISMEAR\", 1)\n if curr_ismear != valid_ismear:\n warnings.append(\n f\"Inappropriate smearing settings. Set to {curr_ismear},\"\n f\" but should be {valid_ismear}\"\n )\n\n # Checking ENCUT\n encut = inputs.get(\"incar\", {}).get(\"ENCUT\")\n valid_encut = valid_input_set.incar[\"ENCUT\"]\n data[\"encut_ratio\"] = float(encut) / valid_encut # type: ignore\n if data[\"encut_ratio\"] < 1:\n reasons.append(DeprecationMessage.ENCUT)\n\n # U-value checks\n # NOTE: Reverting to old method of just using input.hubbards which is wrong in many instances\n input_hubbards = task_doc.input.hubbards\n\n if valid_input_set.incar.get(\"LDAU\", False) or len(input_hubbards) > 0:\n # Assemble required input_set LDAU params into dictionary\n input_set_hubbards = dict(\n zip(\n valid_input_set.poscar.site_symbols,\n valid_input_set.incar.get(\"LDAUU\", []),\n )\n )\n\n all_elements = list(\n set(input_set_hubbards.keys()) | set(input_hubbards.keys())\n )\n diff_ldau_params = {\n el: (input_set_hubbards.get(el, 0), input_hubbards.get(el, 0))\n for el in all_elements\n if not np.allclose(\n input_set_hubbards.get(el, 0), input_hubbards.get(el, 0)\n )\n }\n\n if len(diff_ldau_params) > 0:\n reasons.append(DeprecationMessage.LDAU)\n warnings.extend(\n [\n f\"U-value for {el} should be {good} but was {bad}\"\n for el, (good, bad) in diff_ldau_params.items()\n ]\n )\n\n # Check the max upwards SCF step\n skip = abs(inputs.get(\"incar\", {}).get(\"NLEMDL\", -5)) - 1\n energies = [\n d[\"e_fr_energy\"]\n for d in task_doc.calcs_reversed[0][\"output\"][\"ionic_steps\"][-1][\n \"electronic_steps\"\n ]\n ]\n if len(energies) > skip:\n max_gradient = np.max(np.gradient(energies)[skip:])\n data[\"max_gradient\"] = max_gradient\n if max_gradient > max_allowed_scf_gradient:\n reasons.append(DeprecationMessage.MAX_SCF)\n else:\n warnings.append(\n \"Not enough electronic steps to compute valid gradient\"\n \" and compare with max SCF gradient tolerance\"\n )\n\n # Check for Am and Po elements. These currently do not have proper elemental entries\n # and will not get treated properly by the thermo builder.\n if (\"Am\" in chemsys) or (\"Po\" in chemsys):\n reasons.append(DeprecationMessage.MANUAL)\n\n doc = ValidationDoc(\n task_id=task_doc.task_id,\n calc_type=calc_type,\n run_type=task_doc.run_type,\n valid=len(reasons) == 0,\n reasons=reasons,\n data=data,\n warnings=warnings,\n )\n return doc\n\n\ndef _get_unsorted_symbol_set(structure: Structure):\n \"\"\"\n Have to build structure_symbol set manually to ensure we get the right order since pymatgen sorts its symbol_set list\n \"\"\"\n return list(\n {\n str(sp): 1 for site in structure for sp, v in site.species.items() if v != 0\n }.keys()\n )\n" ]
[ [ "numpy.prod", "numpy.gradient" ] ]
kornosk/transformers
[ "4ce6bcc31095ddb8d4cdd79831217f200c53e801" ]
[ "examples/pytorch/text-classification/run_glue.py" ]
[ "#!/usr/bin/env python\n# coding=utf-8\n# Copyright 2020 The HuggingFace Inc. team. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\" Finetuning the library models for sequence classification on GLUE.\"\"\"\n# You can also adapt this script on your own text classification task. Pointers for this are left as comments.\n\nimport logging\nimport os\nimport random\nimport sys\nfrom dataclasses import dataclass, field\nfrom typing import Optional\n\nimport numpy as np\nfrom datasets import load_dataset, load_metric\n\nimport transformers\nfrom transformers import (\n AutoConfig,\n AutoModelForSequenceClassification,\n AutoTokenizer,\n DataCollatorWithPadding,\n EvalPrediction,\n HfArgumentParser,\n PretrainedConfig,\n Trainer,\n TrainingArguments,\n default_data_collator,\n set_seed,\n)\nfrom transformers.trainer_utils import get_last_checkpoint, is_main_process\nfrom transformers.utils import check_min_version\n\n\n# Will error if the minimal version of Transformers is not installed. Remove at your own risks.\ncheck_min_version(\"4.6.0.dev0\")\n\ntask_to_keys = {\n \"cola\": (\"sentence\", None),\n \"mnli\": (\"premise\", \"hypothesis\"),\n \"mrpc\": (\"sentence1\", \"sentence2\"),\n \"qnli\": (\"question\", \"sentence\"),\n \"qqp\": (\"question1\", \"question2\"),\n \"rte\": (\"sentence1\", \"sentence2\"),\n \"sst2\": (\"sentence\", None),\n \"stsb\": (\"sentence1\", \"sentence2\"),\n \"wnli\": (\"sentence1\", \"sentence2\"),\n}\n\nlogger = logging.getLogger(__name__)\n\n\n@dataclass\nclass DataTrainingArguments:\n \"\"\"\n Arguments pertaining to what data we are going to input our model for training and eval.\n\n Using `HfArgumentParser` we can turn this class\n into argparse arguments to be able to specify them on\n the command line.\n \"\"\"\n\n task_name: Optional[str] = field(\n default=None,\n metadata={\"help\": \"The name of the task to train on: \" + \", \".join(task_to_keys.keys())},\n )\n max_seq_length: int = field(\n default=128,\n metadata={\n \"help\": \"The maximum total input sequence length after tokenization. Sequences longer \"\n \"than this will be truncated, sequences shorter will be padded.\"\n },\n )\n overwrite_cache: bool = field(\n default=False, metadata={\"help\": \"Overwrite the cached preprocessed datasets or not.\"}\n )\n pad_to_max_length: bool = field(\n default=True,\n metadata={\n \"help\": \"Whether to pad all samples to `max_seq_length`. \"\n \"If False, will pad the samples dynamically when batching to the maximum length in the batch.\"\n },\n )\n max_train_samples: Optional[int] = field(\n default=None,\n metadata={\n \"help\": \"For debugging purposes or quicker training, truncate the number of training examples to this \"\n \"value if set.\"\n },\n )\n max_eval_samples: Optional[int] = field(\n default=None,\n metadata={\n \"help\": \"For debugging purposes or quicker training, truncate the number of evaluation examples to this \"\n \"value if set.\"\n },\n )\n max_predict_samples: Optional[int] = field(\n default=None,\n metadata={\n \"help\": \"For debugging purposes or quicker training, truncate the number of prediction examples to this \"\n \"value if set.\"\n },\n )\n train_file: Optional[str] = field(\n default=None, metadata={\"help\": \"A csv or a json file containing the training data.\"}\n )\n validation_file: Optional[str] = field(\n default=None, metadata={\"help\": \"A csv or a json file containing the validation data.\"}\n )\n test_file: Optional[str] = field(default=None, metadata={\"help\": \"A csv or a json file containing the test data.\"})\n\n def __post_init__(self):\n if self.task_name is not None:\n self.task_name = self.task_name.lower()\n if self.task_name not in task_to_keys.keys():\n raise ValueError(\"Unknown task, you should pick one in \" + \",\".join(task_to_keys.keys()))\n elif self.train_file is None or self.validation_file is None:\n raise ValueError(\"Need either a GLUE task or a training/validation file.\")\n else:\n train_extension = self.train_file.split(\".\")[-1]\n assert train_extension in [\"csv\", \"json\"], \"`train_file` should be a csv or a json file.\"\n validation_extension = self.validation_file.split(\".\")[-1]\n assert (\n validation_extension == train_extension\n ), \"`validation_file` should have the same extension (csv or json) as `train_file`.\"\n\n\n@dataclass\nclass ModelArguments:\n \"\"\"\n Arguments pertaining to which model/config/tokenizer we are going to fine-tune from.\n \"\"\"\n\n model_name_or_path: str = field(\n metadata={\"help\": \"Path to pretrained model or model identifier from huggingface.co/models\"}\n )\n config_name: Optional[str] = field(\n default=None, metadata={\"help\": \"Pretrained config name or path if not the same as model_name\"}\n )\n tokenizer_name: Optional[str] = field(\n default=None, metadata={\"help\": \"Pretrained tokenizer name or path if not the same as model_name\"}\n )\n cache_dir: Optional[str] = field(\n default=None,\n metadata={\"help\": \"Where do you want to store the pretrained models downloaded from huggingface.co\"},\n )\n use_fast_tokenizer: bool = field(\n default=True,\n metadata={\"help\": \"Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.\"},\n )\n model_revision: str = field(\n default=\"main\",\n metadata={\"help\": \"The specific model version to use (can be a branch name, tag name or commit id).\"},\n )\n use_auth_token: bool = field(\n default=False,\n metadata={\n \"help\": \"Will use the token generated when running `transformers-cli login` (necessary to use this script \"\n \"with private models).\"\n },\n )\n\n\ndef main():\n # See all possible arguments in src/transformers/training_args.py\n # or by passing the --help flag to this script.\n # We now keep distinct sets of args, for a cleaner separation of concerns.\n\n parser = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments))\n if len(sys.argv) == 2 and sys.argv[1].endswith(\".json\"):\n # If we pass only one argument to the script and it's the path to a json file,\n # let's parse it to get our arguments.\n model_args, data_args, training_args = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1]))\n else:\n model_args, data_args, training_args = parser.parse_args_into_dataclasses()\n\n # Detecting last checkpoint.\n last_checkpoint = None\n if os.path.isdir(training_args.output_dir) and training_args.do_train and not training_args.overwrite_output_dir:\n last_checkpoint = get_last_checkpoint(training_args.output_dir)\n if last_checkpoint is None and len(os.listdir(training_args.output_dir)) > 0:\n raise ValueError(\n f\"Output directory ({training_args.output_dir}) already exists and is not empty. \"\n \"Use --overwrite_output_dir to overcome.\"\n )\n elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:\n logger.info(\n f\"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change \"\n \"the `--output_dir` or add `--overwrite_output_dir` to train from scratch.\"\n )\n\n # Setup logging\n logging.basicConfig(\n format=\"%(asctime)s - %(levelname)s - %(name)s - %(message)s\",\n datefmt=\"%m/%d/%Y %H:%M:%S\",\n handlers=[logging.StreamHandler(sys.stdout)],\n )\n logger.setLevel(logging.INFO if is_main_process(training_args.local_rank) else logging.WARN)\n\n # Log on each process the small summary:\n logger.warning(\n f\"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}\"\n + f\"distributed training: {bool(training_args.local_rank != -1)}, 16-bits training: {training_args.fp16}\"\n )\n # Set the verbosity to info of the Transformers logger (on main process only):\n if is_main_process(training_args.local_rank):\n transformers.utils.logging.set_verbosity_info()\n transformers.utils.logging.enable_default_handler()\n transformers.utils.logging.enable_explicit_format()\n logger.info(f\"Training/evaluation parameters {training_args}\")\n\n # Set seed before initializing model.\n set_seed(training_args.seed)\n\n # Get the datasets: you can either provide your own CSV/JSON training and evaluation files (see below)\n # or specify a GLUE benchmark task (the dataset will be downloaded automatically from the datasets Hub).\n #\n # For CSV/JSON files, this script will use as labels the column called 'label' and as pair of sentences the\n # sentences in columns called 'sentence1' and 'sentence2' if such column exists or the first two columns not named\n # label if at least two columns are provided.\n #\n # If the CSVs/JSONs contain only one non-label column, the script does single sentence classification on this\n # single column. You can easily tweak this behavior (see below)\n #\n # In distributed training, the load_dataset function guarantee that only one local process can concurrently\n # download the dataset.\n if data_args.task_name is not None:\n # Downloading and loading a dataset from the hub.\n datasets = load_dataset(\"glue\", data_args.task_name, cache_dir=model_args.cache_dir)\n else:\n # Loading a dataset from your local files.\n # CSV/JSON training and evaluation files are needed.\n data_files = {\"train\": data_args.train_file, \"validation\": data_args.validation_file}\n\n # Get the test dataset: you can provide your own CSV/JSON test file (see below)\n # when you use `do_predict` without specifying a GLUE benchmark task.\n if training_args.do_predict:\n if data_args.test_file is not None:\n train_extension = data_args.train_file.split(\".\")[-1]\n test_extension = data_args.test_file.split(\".\")[-1]\n assert (\n test_extension == train_extension\n ), \"`test_file` should have the same extension (csv or json) as `train_file`.\"\n data_files[\"test\"] = data_args.test_file\n else:\n raise ValueError(\"Need either a GLUE task or a test file for `do_predict`.\")\n\n for key in data_files.keys():\n logger.info(f\"load a local file for {key}: {data_files[key]}\")\n\n if data_args.train_file.endswith(\".csv\"):\n # Loading a dataset from local csv files\n datasets = load_dataset(\"csv\", data_files=data_files, cache_dir=model_args.cache_dir)\n else:\n # Loading a dataset from local json files\n datasets = load_dataset(\"json\", data_files=data_files, cache_dir=model_args.cache_dir)\n # See more about loading any type of standard or custom dataset at\n # https://huggingface.co/docs/datasets/loading_datasets.html.\n\n # Labels\n if data_args.task_name is not None:\n is_regression = data_args.task_name == \"stsb\"\n if not is_regression:\n label_list = datasets[\"train\"].features[\"label\"].names\n num_labels = len(label_list)\n else:\n num_labels = 1\n else:\n # Trying to have good defaults here, don't hesitate to tweak to your needs.\n is_regression = datasets[\"train\"].features[\"label\"].dtype in [\"float32\", \"float64\"]\n if is_regression:\n num_labels = 1\n else:\n # A useful fast method:\n # https://huggingface.co/docs/datasets/package_reference/main_classes.html#datasets.Dataset.unique\n label_list = datasets[\"train\"].unique(\"label\")\n label_list.sort() # Let's sort it for determinism\n num_labels = len(label_list)\n\n # Load pretrained model and tokenizer\n #\n # In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently\n # download model & vocab.\n config = AutoConfig.from_pretrained(\n model_args.config_name if model_args.config_name else model_args.model_name_or_path,\n num_labels=num_labels,\n finetuning_task=data_args.task_name,\n cache_dir=model_args.cache_dir,\n revision=model_args.model_revision,\n use_auth_token=True if model_args.use_auth_token else None,\n )\n tokenizer = AutoTokenizer.from_pretrained(\n model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path,\n cache_dir=model_args.cache_dir,\n use_fast=model_args.use_fast_tokenizer,\n revision=model_args.model_revision,\n use_auth_token=True if model_args.use_auth_token else None,\n )\n model = AutoModelForSequenceClassification.from_pretrained(\n model_args.model_name_or_path,\n from_tf=bool(\".ckpt\" in model_args.model_name_or_path),\n config=config,\n cache_dir=model_args.cache_dir,\n revision=model_args.model_revision,\n use_auth_token=True if model_args.use_auth_token else None,\n )\n\n # Preprocessing the datasets\n if data_args.task_name is not None:\n sentence1_key, sentence2_key = task_to_keys[data_args.task_name]\n else:\n # Again, we try to have some nice defaults but don't hesitate to tweak to your use case.\n non_label_column_names = [name for name in datasets[\"train\"].column_names if name != \"label\"]\n if \"sentence1\" in non_label_column_names and \"sentence2\" in non_label_column_names:\n sentence1_key, sentence2_key = \"sentence1\", \"sentence2\"\n else:\n if len(non_label_column_names) >= 2:\n sentence1_key, sentence2_key = non_label_column_names[:2]\n else:\n sentence1_key, sentence2_key = non_label_column_names[0], None\n\n # Padding strategy\n if data_args.pad_to_max_length:\n padding = \"max_length\"\n else:\n # We will pad later, dynamically at batch creation, to the max sequence length in each batch\n padding = False\n\n # Some models have set the order of the labels to use, so let's make sure we do use it.\n label_to_id = None\n if (\n model.config.label2id != PretrainedConfig(num_labels=num_labels).label2id\n and data_args.task_name is not None\n and not is_regression\n ):\n # Some have all caps in their config, some don't.\n label_name_to_id = {k.lower(): v for k, v in model.config.label2id.items()}\n if list(sorted(label_name_to_id.keys())) == list(sorted(label_list)):\n label_to_id = {i: int(label_name_to_id[label_list[i]]) for i in range(num_labels)}\n else:\n logger.warning(\n \"Your model seems to have been trained with labels, but they don't match the dataset: \",\n f\"model labels: {list(sorted(label_name_to_id.keys()))}, dataset labels: {list(sorted(label_list))}.\"\n \"\\nIgnoring the model labels as a result.\",\n )\n elif data_args.task_name is None and not is_regression:\n label_to_id = {v: i for i, v in enumerate(label_list)}\n\n if data_args.max_seq_length > tokenizer.model_max_length:\n logger.warning(\n f\"The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the\"\n f\"model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.\"\n )\n max_seq_length = min(data_args.max_seq_length, tokenizer.model_max_length)\n\n def preprocess_function(examples):\n # Tokenize the texts\n args = (\n (examples[sentence1_key],) if sentence2_key is None else (examples[sentence1_key], examples[sentence2_key])\n )\n result = tokenizer(*args, padding=padding, max_length=max_seq_length, truncation=True)\n\n # Map labels to IDs (not necessary for GLUE tasks)\n if label_to_id is not None and \"label\" in examples:\n result[\"label\"] = [(label_to_id[l] if l != -1 else -1) for l in examples[\"label\"]]\n return result\n\n datasets = datasets.map(preprocess_function, batched=True, load_from_cache_file=not data_args.overwrite_cache)\n if training_args.do_train:\n if \"train\" not in datasets:\n raise ValueError(\"--do_train requires a train dataset\")\n train_dataset = datasets[\"train\"]\n if data_args.max_train_samples is not None:\n train_dataset = train_dataset.select(range(data_args.max_train_samples))\n\n if training_args.do_eval:\n if \"validation\" not in datasets and \"validation_matched\" not in datasets:\n raise ValueError(\"--do_eval requires a validation dataset\")\n eval_dataset = datasets[\"validation_matched\" if data_args.task_name == \"mnli\" else \"validation\"]\n if data_args.max_eval_samples is not None:\n eval_dataset = eval_dataset.select(range(data_args.max_eval_samples))\n\n if training_args.do_predict or data_args.task_name is not None or data_args.test_file is not None:\n if \"test\" not in datasets and \"test_matched\" not in datasets:\n raise ValueError(\"--do_predict requires a test dataset\")\n predict_dataset = datasets[\"test_matched\" if data_args.task_name == \"mnli\" else \"test\"]\n if data_args.max_predict_samples is not None:\n predict_dataset = predict_dataset.select(range(data_args.max_predict_samples))\n\n # Log a few random samples from the training set:\n if training_args.do_train:\n for index in random.sample(range(len(train_dataset)), 3):\n logger.info(f\"Sample {index} of the training set: {train_dataset[index]}.\")\n\n # Get the metric function\n if data_args.task_name is not None:\n metric = load_metric(\"glue\", data_args.task_name)\n # TODO: When datasets metrics include regular accuracy, make an else here and remove special branch from\n # compute_metrics\n\n # You can define your custom compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a\n # predictions and label_ids field) and has to return a dictionary string to float.\n def compute_metrics(p: EvalPrediction):\n preds = p.predictions[0] if isinstance(p.predictions, tuple) else p.predictions\n preds = np.squeeze(preds) if is_regression else np.argmax(preds, axis=1)\n if data_args.task_name is not None:\n result = metric.compute(predictions=preds, references=p.label_ids)\n if len(result) > 1:\n result[\"combined_score\"] = np.mean(list(result.values())).item()\n return result\n elif is_regression:\n return {\"mse\": ((preds - p.label_ids) ** 2).mean().item()}\n else:\n return {\"accuracy\": (preds == p.label_ids).astype(np.float32).mean().item()}\n\n # Data collator will default to DataCollatorWithPadding, so we change it if we already did the padding.\n if data_args.pad_to_max_length:\n data_collator = default_data_collator\n elif training_args.fp16:\n data_collator = DataCollatorWithPadding(tokenizer, pad_to_multiple_of=8)\n else:\n data_collator = None\n\n # Initialize our Trainer\n trainer = Trainer(\n model=model,\n args=training_args,\n train_dataset=train_dataset if training_args.do_train else None,\n eval_dataset=eval_dataset if training_args.do_eval else None,\n compute_metrics=compute_metrics,\n tokenizer=tokenizer,\n data_collator=data_collator,\n )\n\n # Training\n if training_args.do_train:\n checkpoint = None\n if training_args.resume_from_checkpoint is not None:\n checkpoint = training_args.resume_from_checkpoint\n elif last_checkpoint is not None:\n checkpoint = last_checkpoint\n train_result = trainer.train(resume_from_checkpoint=checkpoint)\n metrics = train_result.metrics\n max_train_samples = (\n data_args.max_train_samples if data_args.max_train_samples is not None else len(train_dataset)\n )\n metrics[\"train_samples\"] = min(max_train_samples, len(train_dataset))\n\n trainer.save_model() # Saves the tokenizer too for easy upload\n\n trainer.log_metrics(\"train\", metrics)\n trainer.save_metrics(\"train\", metrics)\n trainer.save_state()\n\n # Evaluation\n if training_args.do_eval:\n logger.info(\"*** Evaluate ***\")\n\n # Loop to handle MNLI double evaluation (matched, mis-matched)\n tasks = [data_args.task_name]\n eval_datasets = [eval_dataset]\n if data_args.task_name == \"mnli\":\n tasks.append(\"mnli-mm\")\n eval_datasets.append(datasets[\"validation_mismatched\"])\n\n for eval_dataset, task in zip(eval_datasets, tasks):\n metrics = trainer.evaluate(eval_dataset=eval_dataset)\n\n max_eval_samples = (\n data_args.max_eval_samples if data_args.max_eval_samples is not None else len(eval_dataset)\n )\n metrics[\"eval_samples\"] = min(max_eval_samples, len(eval_dataset))\n\n trainer.log_metrics(\"eval\", metrics)\n trainer.save_metrics(\"eval\", metrics)\n\n if training_args.do_predict:\n logger.info(\"*** Predict ***\")\n\n # Loop to handle MNLI double evaluation (matched, mis-matched)\n tasks = [data_args.task_name]\n predict_datasets = [predict_dataset]\n if data_args.task_name == \"mnli\":\n tasks.append(\"mnli-mm\")\n predict_datasets.append(datasets[\"test_mismatched\"])\n\n for predict_dataset, task in zip(predict_datasets, tasks):\n # Removing the `label` columns because it contains -1 and Trainer won't like that.\n predict_dataset.remove_columns_(\"label\")\n predictions = trainer.predict(predict_dataset, metric_key_prefix=\"predict\").predictions\n predictions = np.squeeze(predictions) if is_regression else np.argmax(predictions, axis=1)\n\n output_predict_file = os.path.join(training_args.output_dir, f\"predict_results_{task}.txt\")\n if trainer.is_world_process_zero():\n with open(output_predict_file, \"w\") as writer:\n logger.info(f\"***** Predict results {task} *****\")\n writer.write(\"index\\tprediction\\n\")\n for index, item in enumerate(predictions):\n if is_regression:\n writer.write(f\"{index}\\t{item:3.3f}\\n\")\n else:\n item = label_list[item]\n writer.write(f\"{index}\\t{item}\\n\")\n\n if training_args.push_to_hub:\n kwargs = {\"finetuned_from\": model_args.model_name_or_path, \"tags\": \"text-classification\"}\n if data_args.task_name is not None:\n kwargs[\"language\"] = \"en\"\n kwargs[\"dataset_tags\"] = \"glue\"\n kwargs[\"dataset_args\"] = data_args.task_name\n kwargs[\"dataset\"] = f\"GLUE {data_args.task_name.upper()}\"\n\n trainer.push_to_hub(**kwargs)\n\n\ndef _mp_fn(index):\n # For xla_spawn (TPUs)\n main()\n\n\nif __name__ == \"__main__\":\n main()\n" ]
[ [ "numpy.squeeze", "numpy.argmax" ] ]
brightgeng/reportgen
[ "ef299686e974c4064b5552a3beeb4e5dc0c1b48a" ]
[ "reportgen/questionnaire/questionnaire.py" ]
[ "# -*- coding: utf-8 -*\n'''问卷数据分析工具包\nCreated on Tue Nov 8 20:05:36 2016\n@author: JSong\n\n1、针对问卷星数据,编写并封装了很多常用算法\n2、利用report工具包,能将数据直接导出为PPTX\n\n该工具包支持一下功能:\n1、编码问卷星、问卷网等数据\n2、封装描述统计和交叉分析函数\n3、支持生成一份整体的报告和相关数据\n'''\n\n\n\n\nimport os\nimport re\nimport sys\nimport math\nimport time\n\n\n\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom .. import report as rpt\nfrom .. import associate\n\n__all__=['read_code',\n 'save_code',\n 'spec_rcode',\n 'dataText_to_code',\n 'dataCode_to_text',\n 'var_combine',\n 'wenjuanwang',\n 'wenjuanxing',\n 'load_data',\n 'read_data',\n 'save_data',\n 'data_merge',\n 'clean_ftime',\n 'data_auto_code',\n 'qdata_flatten',\n 'sample_size_cal',\n 'confidence_interval',\n 'gof_test',\n 'chi2_test',\n 'fisher_exact',\n 'anova',\n 'mca',\n 'cluster',\n 'scatter',\n 'sankey',\n 'qtable',\n 'association_rules',\n 'contingency',\n 'cross_chart',\n 'summary_chart',\n 'onekey_gen',\n 'scorpion']\n\n\n\n#=================================================================\n#\n#\n# 【问卷数据处理】\n#\n#\n#==================================================================\n\n\n\ndef read_code(filename):\n '''读取code编码文件并输出为字典格式\n 1、支持json格式\n 2、支持本包规定的xlsx格式\n see alse to_code\n '''\n file_type=os.path.splitext(filename)[1][1:]\n if file_type == 'json':\n import json\n code=json.load(filename)\n return code\n d=pd.read_excel(filename,header=None)\n d=d[d.any(axis=1)]#去除空行\n d.fillna('NULL',inplace=True)\n d=d.as_matrix()\n code={}\n for i in range(len(d)):\n tmp=d[i,0].strip()\n if tmp == 'key':\n # 识别题号\n code[d[i,1]]={}\n key=d[i,1]\n elif tmp in ['qlist','code_order']:\n # 识别字典值为列表的字段\n ind=np.argwhere(d[i+1:,0]!='NULL')\n if len(ind)>0:\n j=i+1+ind[0][0]\n else:\n j=len(d)\n tmp2=list(d[i:j,1])\n # 列表中字符串的格式化,去除前后空格\n for i in range(len(tmp2)):\n if isinstance(tmp2[i],str):\n tmp2[i]=tmp2[i].strip()\n code[key][tmp]=tmp2\n elif tmp in ['code','code_r']:\n # 识别字典值为字典的字段\n ind=np.argwhere(d[i+1:,0]!='NULL')\n if len(ind)>0:\n j=i+1+ind[0][0]\n else:\n j=len(d)\n tmp1=list(d[i:j,1])\n tmp2=list(d[i:j,2])\n for i in range(len(tmp2)):\n if isinstance(tmp2[i],str):\n tmp2[i]=tmp2[i].strip()\n #tmp2=[s.strip() for s in tmp2 if isinstance(s,str) else s]\n code[key][tmp]=dict(zip(tmp1,tmp2))\n # 识别其他的列表字段\n elif (tmp!='NULL') and (d[i,2]=='NULL') and ((i==len(d)-1) or (d[i+1,0]=='NULL')):\n ind=np.argwhere(d[i+1:,0]!='NULL')\n if len(ind)>0:\n j=i+1+ind[0][0]\n else:\n j=len(d)\n if i==len(d)-1:\n code[key][tmp]=d[i,1]\n else:\n tmp2=list(d[i:j,1])\n for i in range(len(tmp2)):\n if isinstance(tmp2[i],str):\n tmp2[i]=tmp2[i].strip()\n code[key][tmp]=tmp2\n # 识别其他的字典字段\n elif (tmp!='NULL') and (d[i,2]!='NULL') and ((i==len(d)-1) or (d[i+1,0]=='NULL')):\n ind=np.argwhere(d[i+1:,0]!='NULL')\n if len(ind)>0:\n j=i+1+ind[0][0]\n else:\n j=len(d)\n tmp1=list(d[i:j,1])\n tmp2=list(d[i:j,2])\n for i in range(len(tmp2)):\n if isinstance(tmp2[i],str):\n tmp2[i]=tmp2[i].strip()\n #tmp2=[s.strip() for s in tmp2 if isinstance(s,str) else s]\n code[key][tmp]=dict(zip(tmp1,tmp2))\n elif tmp == 'NULL':\n continue\n else:\n code[key][tmp]=d[i,1]\n return code\n\ndef save_code(code,filename='code.xlsx'):\n '''code本地输出\n 1、输出为json格式,根据文件名自动识别\n 2、输出为Excel格式\n see also read_code\n '''\n save_type=os.path.splitext(filename)[1][1:]\n if save_type == 'json':\n code=pd.DataFrame(code)\n code.to_json(filename,force_ascii=False)\n return\n tmp=pd.DataFrame(columns=['name','value1','value2'])\n i=0\n if all(['Q' in c[0] for c in code.keys()]):\n key_qlist=sorted(code,key=lambda c:int(re.findall('\\d+',c)[0]))\n else:\n key_qlist=code.keys()\n for key in key_qlist:\n code0=code[key]\n tmp.loc[i]=['key',key,'']\n i+=1\n #print(key)\n for key0 in code0:\n tmp2=code0[key0]\n if (type(tmp2) == list) and tmp2:\n tmp.loc[i]=[key0,tmp2[0],'']\n i+=1\n for ll in tmp2[1:]:\n tmp.loc[i]=['',ll,'']\n i+=1\n elif (type(tmp2) == dict) and tmp2:\n try:\n tmp2_key=sorted(tmp2,key=lambda c:float(re.findall('[\\d\\.]+','%s'%c)[-1]))\n except:\n tmp2_key=list(tmp2.keys())\n j=0\n for key1 in tmp2_key:\n if j==0:\n tmp.loc[i]=[key0,key1,tmp2[key1]]\n else:\n tmp.loc[i]=['',key1,tmp2[key1]]\n i+=1\n j+=1\n else:\n if tmp2:\n tmp.loc[i]=[key0,tmp2,'']\n i+=1\n if sys.version>'3':\n tmp.to_excel(filename,index=False,header=False)\n else:\n tmp.to_csv(filename,index=False,header=False,encoding='utf-8')\n\n\n'''问卷数据导入和编码\n对每一个题目的情形进行编码:题目默认按照Q1、Q2等给出\nQn.content: 题目内容\nQn.qtype: 题目类型,包含:单选题、多选题、填空题、排序题、矩阵单选题等\nQn.qlist: 题目列表,例如多选题对应着很多小题目\nQn.code: dict,题目选项编码\nQn.code_r: 题目对应的编码(矩阵题目专有)\nQn.code_order: 题目类别的顺序,用于PPT报告的生成[一般后期添加]\nQn.name: 特殊类型,包含:城市题、NPS题等\nQn.weight:dict,每个选项的权重\n'''\n\n\ndef dataText_to_code(df,sep,qqlist=None):\n '''编码文本数据\n\n '''\n\n if sep in [';','┋']:\n qtype='多选题'\n elif sep in ['-->','→']:\n qtype='排序题'\n if not qqlist:\n qqlist=df.columns\n # 处理多选题\n code={}\n for qq in qqlist:\n tmp=df[qq].map(lambda x : x.split(sep) if isinstance(x,str) else [])\n item_list=sorted(set(tmp.sum()))\n if qtype == '多选题':\n tmp=tmp.map(lambda x: [int(t in x) for t in item_list])\n code_tmp={'code':{},'qtype':u'多选题','qlist':[],'content':qq}\n elif qtype == '排序题':\n tmp=tmp.map(lambda x:[x.index(t)+1 if t in x else np.nan for t in item_list])\n code_tmp={'code':{},'qtype':u'排序题','qlist':[],'content':qq}\n for i,t in enumerate(item_list):\n column_name='{}_A{:.0f}'.format(qq,i+1)\n df[column_name]=tmp.map(lambda x:x[i])\n code_tmp['code'][column_name]=item_list[i]\n code_tmp['qlist']=code_tmp['qlist']+[column_name]\n code[qq]=code_tmp\n df.drop(qq,axis=1,inplace=True)\n return df,code\n\ndef dataCode_to_text(df,code=None):\n '''将按序号数据转换成文本\n\n '''\n if df.max().max()>1:\n sep='→'\n else:\n sep='┋'\n if code:\n df=df.rename(code)\n qlist=list(df.columns)\n df['text']=np.nan\n if sep in ['┋']:\n for i in df.index:\n w=df.loc[i,:]==1\n df.loc[i,'text']=sep.join(list(w.index[w]))\n elif sep in ['→']:\n for i in df.index:\n w=df.loc[i,:]\n w=w[w>=1].sort_values()\n df.loc[i,'text']=sep.join(list(w.index))\n df.drop(qlist,axis=1,inplace=True)\n return df\n\ndef var_combine(data,code,qq1,qq2,sep=',',qnum_new=None,qname_new=None):\n '''将两个变量组合成一个变量\n 例如:\n Q1:'性别',Q2: 年龄\n 组合后生成:\n 1、男_16~19岁\n 2、男_20岁~40岁\n 3、女_16~19岁\n 4、女_20~40岁\n '''\n if qnum_new is None:\n if 'Q'==qq2[0]:\n qnum_new=qq1+'_'+qq2[1:]\n else:\n qnum_new=qq1+'_'+qq2\n if qname_new is None:\n qname_new=code[qq1]['content']+'_'+code[qq2]['content']\n\n if code[qq1]['qtype']!='单选题' or code[qq2]['qtype']!='单选题':\n print('只支持组合两个单选题,请检查.')\n raise\n d1=data[code[qq1]['qlist'][0]]\n d2=data[code[qq2]['qlist'][0]]\n sm=max(code[qq1]['code'].keys())# 进位制\n sn=max(code[qq2]['code'].keys())# 进位制\n if isinstance(sm,str) or isinstance(sn,str):\n print('所选择的两个变量不符合函数要求.')\n raise\n data[qnum_new]=(d1-1)*sn+d2\n code[qnum_new]={'qtype':'单选题','qlist':[qnum_new],'content':qname_new}\n\n code_tmp={}\n for c1 in code[qq1]['code']:\n for c2 in code[qq2]['code']:\n cc=(c1-1)*sn+c2\n value='{}{}{}'.format(code[qq1]['code'][c1],sep,code[qq2]['code'][c2])\n code_tmp[cc]=value\n code[qnum_new]['code']=code_tmp\n print('变量已合并,新变量题号为:{}'.format(qnum_new))\n return data,code\n\n\n\ndef wenjuanwang(filepath='.\\\\data',encoding='gbk'):\n '''问卷网数据导入和编码\n 输入:\n filepath:\n 列表,[0]为按文本数据路径,[1]为按序号文本,[2]为编码文件\n 文件夹路径,函数会自动在文件夹下搜寻相关数据\n 输出:\n (data,code):\n data为按序号的数据,题目都替换成了Q_n\n code为数据编码,可利用函数to_code()导出为json格式或者Excel格式数据\n '''\n if isinstance(filepath,list):\n filename1=filepath[0]\n filename2=filepath[1]\n filename3=filepath[2]\n elif os.path.isdir(filepath):\n filename1=os.path.join(filepath,'All_Data_Readable.csv')\n filename2=os.path.join(filepath,'All_Data_Original.csv')\n filename3=os.path.join(filepath,'code.csv')\n else:\n print('can not dection the filepath!')\n\n d1=pd.read_csv(filename1,encoding=encoding)\n d1.drop([u'答题时长'],axis=1,inplace=True)\n d2=pd.read_csv(filename2,encoding=encoding)\n d3=pd.read_csv(filename3,encoding=encoding,header=None,na_filter=False)\n d3=d3.as_matrix()\n # 遍历code.csv,获取粗略的编码,暂缺qlist,矩阵单选题的code_r\n code={}\n for i in range(len(d3)):\n if d3[i,0]:\n key=d3[i,0]\n code[key]={}\n code[key]['content']=d3[i,1]\n code[key]['qtype']=d3[i,2]\n code[key]['code']={}\n code[key]['qlist']=[]\n elif d3[i,2]:\n tmp=d3[i,1]\n if code[key]['qtype'] in [u'多选题',u'排序题']:\n tmp=key+'_A'+'%s'%(tmp)\n code[key]['code'][tmp]='%s'%(d3[i,2])\n code[key]['qlist'].append(tmp)\n elif code[key]['qtype'] in [u'单选题']:\n try:\n tmp=int(tmp)\n except:\n tmp='%s'%(tmp)\n code[key]['code'][tmp]='%s'%(d3[i,2])\n code[key]['qlist']=[key]\n elif code[key]['qtype'] in [u'填空题']:\n code[key]['qlist']=[key]\n else:\n try:\n tmp=int(tmp)\n except:\n tmp='%s'%(tmp)\n code[key]['code'][tmp]='%s'%(d3[i,2])\n\n # 更新矩阵单选的code_r和qlist\n qnames_Readable=list(d1.columns)\n qnames=list(d2.columns)\n for key in code.keys():\n qlist=[]\n for name in qnames:\n if re.match(key+'_',name) or key==name:\n qlist.append(name)\n if ('qlist' not in code[key]) or (not code[key]['qlist']):\n code[key]['qlist']=qlist\n if code[key]['qtype'] in [u'矩阵单选题']:\n tmp=[qnames_Readable[qnames.index(q)] for q in code[key]['qlist']]\n code_r=[re.findall('_([^_]*?)$',t)[0] for t in tmp]\n code[key]['code_r']=dict(zip(code[key]['qlist'],code_r))\n # 处理时间格式\n d2['start']=pd.to_datetime(d2['start'])\n d2['finish']=pd.to_datetime(d2['finish'])\n tmp=d2['finish']-d2['start']\n tmp=tmp.astype(str).map(lambda x:60*int(re.findall(':(\\d+):',x)[0])+int(re.findall(':(\\d+)\\.',x)[0]))\n ind=np.where(d2.columns=='finish')[0][0]\n d2.insert(int(ind)+1,u'答题时长(秒)',tmp)\n return (d2,code)\n\n\ndef wenjuanxing(filepath='.\\\\data',headlen=6):\n '''问卷星数据导入和编码\n 输入:\n filepath:\n 列表, filepath[0]: (23_22_0.xls)为按文本数据路径,filepath[1]: (23_22_2.xls)为按序号文本\n 文件夹路径,函数会自动在文件夹下搜寻相关数据,优先为\\d+_\\d+_0.xls和\\d+_\\d+_2.xls\n headlen: 问卷星数据基础信息的列数\n 输出:\n (data,code):\n data为按序号的数据,题目都替换成了Q_n\n code为数据编码,可利用函数to_code()导出为json格式或者Excel格式数据\n '''\n #filepath='.\\\\data'\n #headlen=6# 问卷从开始到第一道正式题的数目(一般包含序号,提交答卷时间的等等)\n if isinstance(filepath,list):\n filename1=filepath[0]\n filename2=filepath[1]\n elif os.path.isdir(filepath):\n filelist=os.listdir(filepath)\n n1=n2=0\n for f in filelist:\n s1=re.findall('\\d+_\\d+_0.xls',f)\n s2=re.findall('\\d+_\\d+_2.xls',f)\n if s1:\n filename1=s1[0]\n n1+=1\n if s2:\n filename2=s2[0]\n n2+=1\n if n1+n2==0:\n print(u'在文件夹下没有找到问卷星按序号和按文本数据,请检查目录或者工作目录.')\n return\n elif n1+n2>2:\n print(u'存在多组问卷星数据,请检查.')\n return\n filename1=os.path.join(filepath,filename1)\n filename2=os.path.join(filepath,filename2)\n else:\n print('can not dection the filepath!')\n\n d1=pd.read_excel(filename1)\n d2=pd.read_excel(filename2)\n d2.replace({-2:np.nan,-3:np.nan},inplace=True)\n #d1.replace({u'(跳过)':np.nan},inplace=True)\n\n code={}\n '''\n 遍历一遍按文本数据,获取题号和每个题目的类型\n '''\n for name in d1.columns[headlen:]:\n tmp=re.findall(u'^(\\d{1,3})[、::]',name)\n # 识别多选题、排序题\n if tmp:\n new_name='Q'+tmp[0]\n current_name='Q'+tmp[0]\n code[new_name]={}\n content=re.findall(u'\\d{1,3}[、::](.*)',name)\n code[new_name]['content']=content[0]\n d1.rename(columns={name:new_name},inplace=True)\n code[new_name]['qlist']=[]\n code[new_name]['code']={}\n code[new_name]['qtype']=''\n code[new_name]['name']=''\n qcontent=str(list(d1[new_name]))\n # 单选题和多选题每个选项都可能有开放题,得识别出来\n if ('〖' in qcontent) and ('〗' in qcontent):\n code[new_name]['qlist_open']=[]\n if '┋' in qcontent:\n code[new_name]['qtype']=u'多选题'\n elif '→' in qcontent:\n code[new_name]['qtype']=u'排序题'\n # 识别矩阵单选题\n else:\n tmp2=re.findall(u'^第(\\d{1,3})题\\(.*?\\)',name)\n if tmp2:\n new_name='Q'+tmp2[0]\n else:\n pass\n if new_name not in code.keys():\n j=1\n current_name=new_name\n new_name=new_name+'_R%s'%j\n code[current_name]={}\n code[current_name]['content']=current_name+'(问卷星数据中未找到题目具体内容)'\n code[current_name]['qlist']=[]\n code[current_name]['code']={}\n code[current_name]['code_r']={}\n code[current_name]['qtype']=u'矩阵单选题'\n code[current_name]['name']=''\n #code[current_name]['sample_len']=0\n d1.rename(columns={name:new_name},inplace=True)\n else:\n j+=1\n new_name=new_name+'_R%s'%j\n d1.rename(columns={name:new_name},inplace=True)\n #raise Exception(u\"can not dection the NO. of question.\")\n #print('can not dection the NO. of question')\n #print(name)\n #pass\n # 遍历按序号数据,完整编码\n d2qlist=d2.columns[6:].tolist()\n for name in d2qlist:\n tmp1=re.findall(u'^(\\d{1,3})[、::]',name)# 单选题和填空题\n tmp2=re.findall(u'^第(.*?)题',name)# 多选题、排序题和矩阵单选题\n if tmp1:\n current_name='Q'+tmp1[0]# 当前题目的题号\n d2.rename(columns={name:current_name},inplace=True)\n code[current_name]['qlist'].append(current_name)\n #code[current_name]['sample_len']=d2[current_name].count()\n ind=d2[current_name].copy()\n ind=ind.notnull()\n c1=d1.loc[ind,current_name].unique()\n c2=d2.loc[ind,current_name].unique()\n #print('========= %s========'%current_name)\n if (c2.dtype == object) or ((list(c1)==list(c2)) and len(c2)>=min(15,len(d2[ind]))) or (len(c2)>50):\n code[current_name]['qtype']=u'填空题'\n else:\n code[current_name]['qtype']=u'单选题'\n #code[current_name]['code']=dict(zip(c2,c1))\n if 'qlist_open' in code[current_name].keys():\n tmp=d1[current_name].map(lambda x: re.findall('〖(.*?)〗',x)[0] if re.findall('〖(.*?)〗',x) else '')\n ind_open=np.argwhere(d2.columns.values==current_name).tolist()[0][0]\n d2.insert(ind_open+1,current_name+'_open',tmp)\n d1[current_name]=d1[current_name].map(lambda x: re.sub('〖.*?〗','',x))\n #c1=d1.loc[ind,current_name].map(lambda x: re.sub('〖.*?〗','',x)).unique()\n code[current_name]['qlist_open']=[current_name+'_open']\n #c2_tmp=d2.loc[ind,current_name].map(lambda x: int(x) if (('%s'%x!='nan') and not(isinstance(x,str)) and (int(x)==x)) else x)\n code[current_name]['code']=dict(zip(d2.loc[ind,current_name],d1.loc[ind,current_name]))\n #code[current_name]['code']=dict(zip(c2,c1))\n\n elif tmp2:\n name0='Q'+tmp2[0]\n # 新题第一个选项\n if name0 != current_name:\n j=1#记录多选题的小题号\n current_name=name0\n c2=list(d2[name].unique())\n\n if code[current_name]['qtype'] == u'矩阵单选题':\n name1='Q'+tmp2[0]+'_R%s'%j\n c1=list(d1[name1].unique())\n code[current_name]['code']=dict(zip(c2,c1))\n #print(dict(zip(c2,c1)))\n else:\n name1='Q'+tmp2[0]+'_A%s'%j\n #code[current_name]['sample_len']=d2[name].notnull().sum()\n else:\n j+=1#记录多选题的小题号\n c2=list(d2[name].unique())\n if code[current_name]['qtype'] == u'矩阵单选题':\n name1='Q'+tmp2[0]+'_R%s'%j\n c1=list(d1[name1].unique())\n old_dict=code[current_name]['code'].copy()\n new_dict=dict(zip(c2,c1))\n old_dict.update(new_dict)\n code[current_name]['code']=old_dict.copy()\n else:\n name1='Q'+tmp2[0]+'_A%s'%j\n code[current_name]['qlist'].append(name1)\n d2.rename(columns={name:name1},inplace=True)\n tmp3=re.findall(u'第.*?题\\((.*)\\)',name)[0]\n if code[current_name]['qtype'] == u'矩阵单选题':\n code[current_name]['code_r'][name1]=tmp3\n else:\n code[current_name]['code'][name1]=tmp3\n # 识别开放题\n if (code[current_name]['qtype'] == u'多选题'):\n openq=tmp3+'〖.*?〗'\n openq=re.sub('\\)','\\)',openq)\n openq=re.sub('\\(','\\(',openq)\n openq=re.compile(openq)\n qcontent=str(list(d1[current_name]))\n if re.findall(openq,qcontent):\n tmp=d1[current_name].map(lambda x: re.findall(openq,x)[0] if re.findall(openq,x) else '')\n ind=np.argwhere(d2.columns.values==name1).tolist()[0][0]\n d2.insert(ind+1,name1+'_open',tmp)\n code[current_name]['qlist_open'].append(name1+'_open')\n # 删除字典中的nan\n keys=list(code[current_name]['code'].keys())\n for key in keys:\n if '%s'%key == 'nan':\n del code[current_name]['code'][key]\n\n # 处理一些特殊题目,给它们的选项固定顺序,例如年龄、收入等\n for k in code.keys():\n content=code[k]['content']\n qtype=code[k]['qtype']\n if ('code' in code[k]) and (code[k]['code']!={}):\n tmp1=code[k]['code'].keys()\n tmp2=code[k]['code'].values()\n # 识别选项是否是有序变量\n tmp3=[len(re.findall('\\d+','%s'%v))>0 for v in tmp2]#是否有数字\n tmp4=[len(re.findall('-|~','%s'%v))>0 for v in tmp2]#是否有\"-\"或者\"~\"\n if (np.array(tmp3).sum()>=len(tmp2)-2) or (np.array(tmp4).sum()>=len(tmp2)*0.8-(1e-17)):\n try:\n tmp_key=sorted(code[k]['code'],key=lambda c:float(re.findall('[\\d\\.]+','%s'%c)[-1]))\n except:\n tmp_key=list(tmp1)\n code_order=[code[k]['code'][v] for v in tmp_key]\n code[k]['code_order']=code_order\n # 识别矩阵量表题\n if qtype=='矩阵单选题':\n tmp3=[int(re.findall('\\d+','%s'%v)[0]) for v in tmp2 if re.findall('\\d+','%s'%v)]\n if (set(tmp3)<=set([0,1,2,3,4,5,6,7,8,9,10])) and (len(tmp3)==len(tmp2)):\n code[k]['weight']=dict(zip(tmp1,tmp3))\n continue\n # 识别特殊题型\n if ('性别' in content) and ('男' in tmp2) and ('女' in tmp2):\n code[k]['name']='性别'\n if ('gender' in content.lower()) and ('Male' in tmp2) and ('Female' in tmp2):\n code[k]['name']='性别'\n if (('年龄' in content) or ('age' in content.lower())) and (np.array(tmp3).sum()>=len(tmp2)-1):\n code[k]['name']='年龄'\n if ('满意度' in content) and ('整体' in content):\n tmp3=[int(re.findall('\\d+','%s'%v)[0]) for v in tmp2 if re.findall('\\d+','%s'%v)]\n if set(tmp3)<=set([0,1,2,3,4,5,6,7,8,9,10]):\n code[k]['name']='满意度'\n if len(tmp3)==len(tmp2):\n code[k]['weight']=dict(zip(tmp1,tmp3))\n if ('意愿' in content) and ('推荐' in content):\n tmp3=[int(re.findall('\\d+','%s'%v)[0]) for v in tmp2 if re.findall('\\d+','%s'%v)]\n if set(tmp3)<=set([0,1,2,3,4,5,6,7,8,9,10]):\n code[k]['name']='NPS'\n if len(tmp3)==len(tmp2):\n weight=pd.Series(dict(zip(tmp1,tmp3)))\n weight=weight.replace(dict(zip([0,1,2,3,4,5,6,7,8,9,10],[-100,-100,-100,-100,-100,-100,-100,0,0,100,100])))\n code[k]['weight']=weight.to_dict()\n\n try:\n d2[u'所用时间']=d2[u'所用时间'].map(lambda s: int(s[:-1]))\n except:\n pass\n\n return (d2,code)\n\n\ndef load_data(method='filedialog',**kwargs):\n '''导入问卷数据\n # 暂时只支持已编码的和问卷星数据\n 1、支持路径搜寻\n 2、支持自由选择文件\n method:\n -filedialog: 打开文件窗口选择\n -pathsearch:自带搜索路径,需提供filepath\n '''\n if method=='filedialog':\n import tkinter as tk\n from tkinter.filedialog import askopenfilenames\n tk.Tk().withdraw();\n #print(u'请选择编码所需要的数据文件(支持问卷星和已编码好的数据)')\n if 'initialdir' in kwargs:\n initialdir=kwargs['initialdir']\n elif os.path.isdir('.\\\\data'):\n initialdir = \".\\\\data\"\n else:\n initialdir = \".\"\n title =u\"请选择编码所需要的数据文件(支持问卷星和已编码好的数据)\"\n filetypes = ((\"Excel files\",\"*.xls;*.xlsx\"),(\"CSV files\",\"*.csv\"),(\"all files\",\"*.*\"))\n filenames=[]\n while len(filenames)<1:\n filenames=askopenfilenames(initialdir=initialdir,title=title,filetypes=filetypes)\n if len(filenames)<1:\n print('请至少选择一个文件.')\n filenames=list(filenames)\n elif method == 'pathsearch':\n if 'filepath' in kwargs:\n filepath=kwargs['filepath']\n else :\n filepath='.\\\\data\\\\'\n if os.path.isdir(filepath):\n filenames=os.listdir(filepath)\n filenames=[os.path.join(filepath,s) for s in filenames]\n else:\n print('搜索路径错误')\n raise\n info=[]\n for filename in filenames:\n filename_nopath=os.path.split(filename)[1]\n data=read_data(filename)\n # 第一列包含的字段\n field_c1=set(data.iloc[:,0].dropna().unique())\n field_r1=set(data.columns)\n # 列名是否包含Q\n hqlen=[len(re.findall('^[qQ]\\d+',c))>0 for c in field_r1]\n hqrate=hqlen.count(True)/len(field_r1) if len(field_r1)>0 else 0\n rowlens,collens=data.shape\n # 数据中整数/浮点数的占比\n rate_real=data.applymap(lambda x:isinstance(x,(int,float))).sum().sum()/rowlens/collens\n\n tmp={'filename':filename_nopath,'filenametype':'','rowlens':rowlens,'collens':collens,\\\n 'field_c1':field_c1,'field_r1':field_r1,'type':'','rate_real':rate_real}\n\n if len(re.findall('^data.*\\.xls',filename_nopath))>0:\n tmp['filenametype']='data'\n elif len(re.findall('^code.*\\.xls',filename_nopath))>0:\n tmp['filenametype']='code'\n elif len(re.findall('\\d+_\\d+_\\d.xls',filename_nopath))>0:\n tmp['filenametype']='wenjuanxing'\n\n if tmp['filenametype']=='code' or set(['key','code','qlist','qtype']) < field_c1:\n tmp['type']='code'\n if tmp['filenametype']=='wenjuanxing' or len(set(['序号','提交答卷时间','所用时间','来自IP','来源','来源详情','总分'])&field_r1)>=5:\n tmp['type']='wenjuanxing'\n if tmp['filenametype']=='data' or hqrate>=0.5:\n tmp['type']='data'\n info.append(tmp)\n questype=[k['type'] for k in info]\n # 这里有一个优先级存在,优先使用已编码好的数据,其次是问卷星数据\n if questype.count('data')*questype.count('code')==1:\n data=read_data(filenames[questype.index('data')])\n code=read_code(filenames[questype.index('code')])\n elif questype.count('wenjuanxing')>=2:\n filenames=[(f,info[i]['rate_real']) for i,f in enumerate(filenames) if questype[i]=='wenjuanxing']\n tmp=[]\n for f,rate_real in filenames:\n t2=0 if rate_real<0.5 else 2\n d=pd.read_excel(f)\n d=d.iloc[:,0]\n tmp.append((t2,d))\n #print('添加{}'.format(t2))\n tmp_equal=0\n for t,d0 in tmp[:-1]:\n if len(d)==len(d0) and all(d==d0):\n tmp_equal+=1\n tmp[-1]=(t2+int(t/10)*10,tmp[-1][1])\n max_quesnum=max([int(t/10) for t,d in tmp])\n if tmp_equal==0:\n tmp[-1]=(tmp[-1][0]+max_quesnum*10+10,tmp[-1][1])\n #print('修改为{}'.format(tmp[-1][0]))\n # 重新整理所有的问卷数据\n questype=[t for t,d in tmp]\n filenames=[f for f,r in filenames]\n quesnums=max([int(t/10) for t in questype])#可能存在的数据组数\n filename_wjx=[]\n for i in range(1,quesnums+1):\n if questype.count(i*10)==1 and questype.count(i*10+2)==1:\n filename_wjx.append([filenames[questype.index(i*10)],filenames[questype.index(i*10+2)]])\n if len(filename_wjx)==1:\n data,code=wenjuanxing(filename_wjx[0])\n elif len(filename_wjx)>1:\n print('脚本识别出多组问卷星数据,请选择需要编码的数据:')\n for i,f in enumerate(filename_wjx):\n print('{}: {}'.format(i+1,'/'.join([os.path.split(f[0])[1],os.path.split(f[1])[1]])))\n ii=input('您选择的数据是(数据前的编码,如:1):')\n ii=re.sub('\\s','',ii)\n if ii.isnumeric():\n data,code=wenjuanxing(filename_wjx[int(ii)-1])\n else:\n print('您输入正确的编码.')\n else:\n print('没有找到任何问卷数据..')\n raise\n else:\n print('没有找到任何数据')\n raise\n return data,code\n\n\n\n\ndef spec_rcode(data,code):\n city={'北京':0,'上海':0,'广州':0,'深圳':0,'成都':1,'杭州':1,'武汉':1,'天津':1,'南京':1,'重庆':1,'西安':1,'长沙':1,'青岛':1,'沈阳':1,'大连':1,'厦门':1,'苏州':1,'宁波':1,'无锡':1,\\\n '福州':2,'合肥':2,'郑州':2,'哈尔滨':2,'佛山':2,'济南':2,'东莞':2,'昆明':2,'太原':2,'南昌':2,'南宁':2,'温州':2,'石家庄':2,'长春':2,'泉州':2,'贵阳':2,'常州':2,'珠海':2,'金华':2,\\\n '烟台':2,'海口':2,'惠州':2,'乌鲁木齐':2,'徐州':2,'嘉兴':2,'潍坊':2,'洛阳':2,'南通':2,'扬州':2,'汕头':2,'兰州':3,'桂林':3,'三亚':3,'呼和浩特':3,'绍兴':3,'泰州':3,'银川':3,'中山':3,\\\n '保定':3,'西宁':3,'芜湖':3,'赣州':3,'绵阳':3,'漳州':3,'莆田':3,'威海':3,'邯郸':3,'临沂':3,'唐山':3,'台州':3,'宜昌':3,'湖州':3,'包头':3,'济宁':3,'盐城':3,'鞍山':3,'廊坊':3,'衡阳':3,\\\n '秦皇岛':3,'吉林':3,'大庆':3,'淮安':3,'丽江':3,'揭阳':3,'荆州':3,'连云港':3,'张家口':3,'遵义':3,'上饶':3,'龙岩':3,'衢州':3,'赤峰':3,'湛江':3,'运城':3,'鄂尔多斯':3,'岳阳':3,'安阳':3,\\\n '株洲':3,'镇江':3,'淄博':3,'郴州':3,'南平':3,'齐齐哈尔':3,'常德':3,'柳州':3,'咸阳':3,'南充':3,'泸州':3,'蚌埠':3,'邢台':3,'舟山':3,'宝鸡':3,'德阳':3,'抚顺':3,'宜宾':3,'宜春':3,'怀化':3,\\\n '榆林':3,'梅州':3,'呼伦贝尔':3,'临汾':4,'南阳':4,'新乡':4,'肇庆':4,'丹东':4,'德州':4,'菏泽':4,'九江':4,'江门市':4,'黄山':4,'渭南':4,'营口':4,'娄底':4,'永州市':4,'邵阳':4,'清远':4,\\\n '大同':4,'枣庄':4,'北海':4,'丽水':4,'孝感':4,'沧州':4,'马鞍山':4,'聊城':4,'三明':4,'开封':4,'锦州':4,'汉中':4,'商丘':4,'泰安':4,'通辽':4,'牡丹江':4,'曲靖':4,'东营':4,'韶关':4,'拉萨':4,\\\n '襄阳':4,'湘潭':4,'盘锦':4,'驻马店':4,'酒泉':4,'安庆':4,'宁德':4,'四平':4,'晋中':4,'滁州':4,'衡水':4,'佳木斯':4,'茂名':4,'十堰':4,'宿迁':4,'潮州':4,'承德':4,'葫芦岛':4,'黄冈':4,'本溪':4,\\\n '绥化':4,'萍乡':4,'许昌':4,'日照':4,'铁岭':4,'大理州':4,'淮南':4,'延边州':4,'咸宁':4,'信阳':4,'吕梁':4,'辽阳':4,'朝阳':4,'恩施州':4,'达州市':4,'益阳市':4,'平顶山':4,'六安':4,'延安':4,\\\n '梧州':4,'白山':4,'阜阳':4,'铜陵市':4,'河源':4,'玉溪市':4,'黄石':4,'通化':4,'百色':4,'乐山市':4,'抚州市':4,'钦州':4,'阳江':4,'池州市':4,'广元':4,'滨州':5,'阳泉':5,'周口市':5,'遂宁':5,\\\n '吉安':5,'长治':5,'铜仁':5,'鹤岗':5,'攀枝花':5,'昭通':5,'云浮':5,'伊犁州':5,'焦作':5,'凉山州':5,'黔西南州':5,'广安':5,'新余':5,'锡林郭勒':5,'宣城':5,'兴安盟':5,'红河州':5,'眉山':5,\\\n '巴彦淖尔':5,'双鸭山市':5,'景德镇市':5,'鸡西':5,'三门峡':5,'宿州':5,'汕尾':5,'阜新':5,'张掖':5,'玉林':5,'乌兰察布':5,'鹰潭':5,'黑河':5,'伊春':5,'贵港市':5,'漯河':5,'晋城':5,'克拉玛依':5,\\\n '随州':5,'保山':5,'濮阳':5,'文山州':5,'嘉峪关':5,'六盘水':5,'乌海':5,'自贡':5,'松原':5,'内江':5,'黔东南州':5,'鹤壁':5,'德宏州':5,'安顺':5,'资阳':5,'鄂州':5,'忻州':5,'荆门':5,'淮北':5,\\\n '毕节':5,'巴音郭楞':5,'防城港':5,'天水':5,'黔南州':5,'阿坝州':5,'石嘴山':5,'安康':5,'亳州市':5,'昌吉州':5,'普洱':5,'楚雄州':5,'白城':5,'贺州':5,'哈密':5,'来宾':5,'庆阳':5,'河池':5,\\\n '张家界 雅安':5,'辽源':5,'湘西州':5,'朔州':5,'临沧':5,'白银':5,'塔城地区':5,'莱芜':5,'迪庆州':5,'喀什地区':5,'甘孜州':5,'阿克苏':5,'武威':5,'巴中':5,'平凉':5,'商洛':5,'七台河':5,'金昌':5,\\\n '中卫':5,'阿勒泰':5,'铜川':5,'海西州':5,'吴忠':5,'固原':5,'吐鲁番':5,'阿拉善盟':5,'博尔塔拉州':5,'定西':5,'西双版纳':5,'陇南':5,'大兴安岭':5,'崇左':5,'日喀则':5,'临夏州':5,'林芝':5,\\\n '海东':5,'怒江州':5,'和田地区':5,'昌都':5,'儋州':5,'甘南州':5,'山南':5,'海南州':5,'海北州':5,'玉树州':5,'阿里地区':5,'那曲地区':5,'黄南州':5,'克孜勒苏州':5,'果洛州':5,'三沙':5}\n code_keys=list(code.keys())\n for qq in code_keys:\n qlist=code[qq]['qlist']\n #qtype=code[qq]['qtype']\n content=code[qq]['content']\n ind=list(data.columns).index(qlist[-1])\n data1=data[qlist]\n '''\n 识别问卷星中的城市题\n '''\n tf1=u'城市' in content\n tf2=data1[data1.notnull()].applymap(lambda x:'-' in '%s'%x).all().all()\n tf3=(qq+'a' not in data.columns) and (qq+'b' not in data.columns)\n if tf1 and tf2 and tf3:\n # 省份和城市\n tmp1=data[qq].map(lambda x:x.split('-')[0])\n tmp2=data[qq].map(lambda x:x.split('-')[1])\n tmp2[tmp1==u'上海']=u'上海'\n tmp2[tmp1==u'北京']=u'北京'\n tmp2[tmp1==u'天津']=u'天津'\n tmp2[tmp1==u'重庆']=u'重庆'\n tmp2[tmp1==u'香港']=u'香港'\n tmp2[tmp1==u'澳门']=u'澳门'\n data.insert(ind+1,qq+'a',tmp1)\n data.insert(ind+2,qq+'b',tmp2)\n code[qq+'a']={'content':'省份','qtype':'填空题','qlist':[qq+'a']}\n code[qq+'b']={'content':'城市','qtype':'填空题','qlist':[qq+'b']}\n tmp3=data[qq+'b'].map(lambda x: city[x] if x in city.keys() else x)\n tmp3=tmp3.map(lambda x: 6 if isinstance(x,str) else x)\n data.insert(ind+3,qq+'c',tmp3)\n code[qq+'c']={'content':'城市分级','qtype':'单选题','qlist':[qq+'c'],\\\n 'code':{0:'北上广深',1:'新一线',2:'二线',3:'三线',4:'四线',5:'五线',6:'五线以下'}}\n\n return data,code\n\n\ndef levenshtein(s, t):\n ''''' From Wikipedia article; Iterative with two matrix rows. '''\n if s == t: return 0\n elif len(s) == 0: return len(t)\n elif len(t) == 0: return len(s)\n v0 = [None] * (len(t) + 1)\n v1 = [None] * (len(t) + 1)\n for i in range(len(v0)):\n v0[i] = i\n for i in range(len(s)):\n v1[0] = i + 1\n for j in range(len(t)):\n cost = 0 if s[i] == t[j] else 1\n v1[j + 1] = min(v1[j] + 1, v0[j + 1] + 1, v0[j] + cost)\n for j in range(len(v0)):\n v0[j] = v1[j]\n\n return v1[len(t)]\n\ndef code_similar(code1,code2):\n '''\n 题目内容相似度用最小编辑距离来度量\n 选项相似度分为几种\n 1、完全相同:1\n 2、单选题:暂时只考虑序号和值都相等的,且共同变量超过一半:2\n 2、多选题/排序题:不考虑序号,共同变量超过一半即可:3\n 3、矩阵单选题:code_r 暂时只考虑完全匹配\n 4、其他情况为0\n\n '''\n code_distance_min=pd.DataFrame(index=code1.keys(),columns=['qnum','similar_content','similar_code'])\n for c1 in code1:\n # 计算题目内容的相似度\n disstance_str=pd.Series(index=code2.keys())\n for c2 in code2:\n if code1[c1]['qtype']==code2[c2]['qtype']:\n disstance_str[c2]=levenshtein(code1[c1]['content'], code2[c2]['content'])\n c2=disstance_str.idxmin()\n if '%s'%c2 == 'nan':\n continue\n min_len=(len(code1[c1]['content'])+len(code2[c2]['content']))/2\n similar_content=100-100*disstance_str[c2]/min_len if min_len>0 else 0\n # 计算选项的相似度\n qtype=code2[c2]['qtype']\n if qtype == '单选题':\n t1=code1[c1]['code']\n t2=code2[c2]['code']\n inner_key=list(set(t1.keys())&set(t2.keys()))\n tmp=all([t1[c]==t2[c] for c in inner_key])\n if t1==t2:\n similar_code=1\n elif len(inner_key)>=0.5*len(set(t1.keys())|set(t2.keys())) and tmp:\n similar_code=2\n else:\n similar_code=0\n elif qtype in ['多选题','排序题']:\n t1=code1[c1]['code']\n t2=code2[c2]['code']\n t1=[t1[c] for c in code1[c1]['qlist']]\n t2=[t2[c] for c in code2[c2]['qlist']]\n inner_key=set(t1)&set(t2)\n if t1==t2:\n similar_code=1\n elif len(set(t1)&set(t2))>=0.5*len(set(t1)|set(t2)):\n similar_code=3\n else:\n similar_code=0\n elif qtype in ['矩阵多选题']:\n t1=code1[c1]['code_r']\n t2=code2[c2]['code_r']\n t1=[t1[c] for c in code1[c1]['qlist']]\n t2=[t2[c] for c in code2[c2]['qlist']]\n inner_key=set(t1)&set(t2)\n if t1==t2:\n similar_code=1\n elif len(set(t1)&set(t2))>=0.5*len(set(t1)|set(t2)):\n similar_code=3\n else:\n similar_code=0\n elif qtype in ['填空题']:\n similar_code=1\n else:\n similar_code=0\n\n code_distance_min.loc[c1,'qnum']=c2\n code_distance_min.loc[c1,'similar_content']=similar_content\n code_distance_min.loc[c1,'similar_code']=similar_code\n\n\n # 剔除qnum中重复的值\n code_distance_min=code_distance_min.sort_values(['qnum','similar_content','similar_code'],ascending=[False,False,True])\n code_distance_min.loc[code_distance_min.duplicated(['qnum']),:]=np.nan\n code_distance_min=pd.DataFrame(code_distance_min,index=code1.keys())\n return code_distance_min\n\n\n\n\ndef data_merge(ques1,ques2,qlist1=None,qlist2=None,name1='ques1',name2='ques2',\\\n mergeqnum='Q0',similar_threshold=70):\n '''合并两份数据\n ques1: 列表,[data1,code1]\n ques2: 列表,[data2,code2]\n '''\n data1,code1=ques1\n data2,code2=ques2\n\n if (qlist1 is None) or (qlist2 is None):\n qlist1=[]\n qlist2=[]\n qqlist1=[]\n qqlist2=[]\n code_distance_min=code_similar(code1,code2)\n code1_key=sorted(code1,key=lambda x:int(re.findall('\\d+',x)[0]))\n for c1 in code1_key:\n qtype1=code1[c1]['qtype']\n #print('{}:{}'.format(c1,code1[c1]['content']))\n rs_qq=code_distance_min.loc[c1,'qnum']\n similar_content=code_distance_min.loc[c1,'similar_content']\n similar_code=code_distance_min.loc[c1,'similar_code']\n if (similar_content>=similar_threshold) and (similar_code in [1,2]):\n #print('推荐合并第二份数据中的{}({}), 两个题目相似度为为{:.0f}%'.format(rs_qq,code2[rs_qq]['content'],similar))\n print('将自动合并: {} 和 {}'.format(c1,rs_qq))\n user_qq=rs_qq\n qqlist1+=code1[c1]['qlist']\n qqlist2+=code2[user_qq]['qlist']\n qlist1.append(c1)\n qlist2.append(rs_qq)\n elif (similar_content>=similar_threshold) and (similar_code==3):\n # 针对非单选题,此时要调整选项顺序\n t1=code1[c1]['code_r'] if qtype1 =='矩阵单选题' else code1[c1]['code']\n t1_qlist=code1[c1]['qlist']\n t1_value=[t1[k] for k in t1_qlist]\n t2=code2[rs_qq]['code_r'] if qtype1 =='矩阵单选题' else code2[rs_qq]['code']\n t2_qlist=code2[rs_qq]['qlist']\n t2_value=[t2[k] for k in t2_qlist]\n # 保留相同的选项\n t1_qlist_new=[q for q in t1_qlist if t1[q] in list(set(t1_value)&set(t2_value))]\n t2_r=dict(zip([s[1] for s in t2.items()],[s[0] for s in t2.items()]))\n t2_qlist_new=[t2_r[s] for s in [t1[q] for q in t1_qlist_new]]\n code1[c1]['qlist']=t1_qlist_new\n code1[c1]['code']={k:t1[k] for k in t1_qlist_new}\n qqlist1+=t1_qlist_new\n qqlist2+=t2_qlist_new\n qlist1.append(c1)\n qlist2.append(rs_qq)\n print('将自动合并: {} 和 {} (只保留了相同的选项)'.format(c1,rs_qq))\n\n elif similar_code in [1,2]:\n print('-'*40)\n print('为【 {}:{} 】自动匹配到: '.format(c1,code1[c1]['content']))\n print(' 【 {}:{} 】,其相似度为{:.0f}%.'.format(rs_qq,code2[rs_qq]['content'],similar_content))\n tmp=input('是否合并该组题目,请输入 yes/no (也可以输入第二份数据中其他您需要匹配的题目): ')\n tmp=re.sub('\\s','',tmp)\n tmp=tmp.lower()\n if tmp in ['yes','y']:\n user_qq=rs_qq\n elif tmp in ['no','n']:\n user_qq=None\n else:\n tmp=re.sub('^q','Q',tmp)\n if tmp not in code2:\n user_qq=None\n elif (tmp in code2) and (tmp!=rs_qq):\n print('您输入的是{}:{}'.format(tmp,code2[tmp]['content']))\n user_qq=tmp\n if user_qq==rs_qq:\n qqlist1+=code1[c1]['qlist']\n qqlist2+=code2[user_qq]['qlist']\n qlist1.append(c1)\n qlist2.append(user_qq)\n print('将自动合并: {} 和 {}'.format(c1,rs_qq))\n elif user_qq is not None:\n # 比对两道题目的code\n if 'code' in code1[c1] and len(code1[c1]['code'])>0:\n t1=code1[c1]['code_r'] if qtype1 =='矩阵单选题' else code1[c1]['code']\n t2=code2[user_qq]['code_r'] if code2[user_qq]['qtype'] =='矩阵单选题' else code2[user_qq]['code']\n if set(t1.values())==set(t2.values()):\n qqlist1+=code1[c1]['qlist']\n qqlist2+=code2[user_qq]['qlist']\n qlist1.append(c1)\n qlist2.append(user_qq)\n print('将自动合并: {} 和 {}'.format(c1,user_qq))\n else:\n print('两个题目的选项不匹配,将自动跳过.')\n else:\n qqlist1+=[code1[c1]['qlist'][0]]\n qqlist2+=[code2[user_qq]['qlist'][0]]\n qlist1.append(c1)\n qlist2.append(user_qq)\n print('将自动合并: {} 和 {}'.format(c1,user_qq))\n else:\n print('将自动跳过: {}'.format(c1))\n print('-'*40)\n else:\n print('将自动跳过: {}'.format(c1))\n tmp=input('请问您需要的题目是否都已经合并? 请输入(yes / no): ')\n tmp=re.sub('\\s','',tmp)\n tmp=tmp.lower()\n if tmp in ['no','n']:\n print('请确保接下来您要合并的题目类型和选项完全一样.')\n while 1:\n tmp=input('请输入您想合并的题目对,直接回车则终止输入(如: Q1,Q1 ): ')\n tmp=re.sub('\\s','',tmp)# 去掉空格\n tmp=re.sub(',',',',tmp)# 修正可能错误的逗号\n tmp=tmp.split(',')\n tmp=[re.sub('^q','Q',qq) for qq in tmp]\n if len(tmp)<2:\n break\n if tmp[0] in qlist1 or tmp[1] in qlist2:\n print('该题已经被合并,请重新输入')\n continue\n if tmp[0] not in code1 or tmp[1] not in code2:\n print('输入错误, 请重新输入')\n continue\n c1=tmp[0]\n c2=tmp[1]\n print('您输入的是:')\n print('第一份数据中的【 {}:{} 】'.format(c1,code1[c1]['content']))\n print('第二份数据中的【 {}:{} 】'.format(c2,code2[c2]['content']))\n w=code_similar({c1:code1[c1]},{c2:code2[c2]})\n similar_code=w.loc[c1,'similar_code']\n if similar_code in [1,2] and len(code1[c1]['qlist'])==len(code2[c2]['qlist']):\n qqlist1+=code1[c1]['qlist']\n qqlist2+=code2[c2]['qlist']\n qlist1.append(c1)\n qlist2.append(c2)\n print('将自动合并: {} 和 {}'.format(c1,c2))\n else:\n print('选项不匹配,请重新输入')\n else:\n qqlist1=[]\n for qq in qlist1:\n qqlist1=qqlist1+code1[qq]['qlist']\n qqlist2=[]\n for qq in qlist2:\n qqlist2=qqlist2+code2[qq]['qlist']\n\n # 将题号列表转化成data中的列名\n if mergeqnum in qqlist1:\n mergeqnum=mergeqnum+'merge'\n data1=data1.loc[:,qqlist1]\n data1.loc[:,mergeqnum]=1\n data2=data2.loc[:,qqlist2]\n data2.loc[:,mergeqnum]=2\n\n if len(qqlist1)!=len(qqlist2):\n print('两份数据选项不完全匹配,请检查....')\n raise\n data2=data2.rename(columns=dict(zip(qqlist2,qqlist1)))\n data12=data1.append(data2,ignore_index=True)\n code12={}\n for i,cc in enumerate(qlist1):\n code12[cc]=code1[cc]\n if 'code' in code1[cc] and 'code' in code2[qlist2[i]]:\n code12[cc]['code'].update(code2[qlist2[i]]['code'])\n code12[mergeqnum]={'content':u'来源','code':{1:name1,2:name2},'qtype':u'单选题','qlist':[mergeqnum]}\n return data12,code12\n\n\n\n\n\n\n\n\n\n## ===========================================================\n#\n#\n# 数据清洗 #\n#\n#\n## ==========================================================\n\n\n\n\ndef clean_ftime(ftime,cut_percent=0.25):\n '''\n ftime 是完成问卷的秒数\n 思路:\n 1、只考虑截断问卷完成时间较小的样本\n 2、找到完成时间变化的拐点,即需要截断的时间点\n 返回:r\n 建议截断<r的样本\n '''\n t_min=int(ftime.min())\n t_cut=int(ftime.quantile(cut_percent))\n x=np.array(range(t_min,t_cut))\n y=np.array([len(ftime[ftime<=i]) for i in range(t_min,t_cut)])\n z1 = np.polyfit(x, y, 4) # 拟合得到的函数\n z2=np.polyder(z1,2) #求二阶导数\n r=np.roots(np.polyder(z2,1))\n r=int(r[0])\n return r\n\n\n\n## ===========================================================\n#\n#\n# 数据分析和输出 #\n#\n#\n## ==========================================================\n\n\ndef data_auto_code(data):\n '''智能判断问卷数据\n 输入\n data: 数据框,列名需要满足Qi或者Qi_\n 输出:\n code: 自动编码\n '''\n data=pd.DataFrame(data)\n columns=data.columns\n columns=[c for c in columns if re.match('Q\\d+',c)]\n code={}\n for cc in columns:\n # 识别题目号\n if '_' not in cc:\n key=cc\n else:\n key=cc.split('_')[0]\n # 新的题目则产生新的code\n if key not in code:\n code[key]={}\n code[key]['qlist']=[]\n code[key]['code']={}\n code[key]['content']=key\n code[key]['qtype']=''\n # 处理各题目列表\n if key == cc:\n code[key]['qlist']=[key]\n elif re.findall('^'+key+'_[a-zA-Z]{0,}\\d+$',cc):\n code[key]['qlist'].append(cc)\n else:\n if 'qlist_open' in code[key]:\n code[key]['qlist_open'].append(cc)\n else:\n code[key]['qlist_open']=[cc]\n\n for kk in code.keys():\n dd=data[code[kk]['qlist']]\n # 单选题和填空题\n if len(dd.columns)==1:\n tmp=dd[dd.notnull()].iloc[:,0].unique()\n if dd.iloc[:,0].value_counts().mean() >=2:\n code[kk]['qtype']=u'单选题'\n code[kk]['code']=dict(zip(tmp,tmp))\n else:\n code[kk]['qtype']=u'填空题'\n del code[kk]['code']\n else:\n tmp=set(dd[dd.notnull()].as_matrix().flatten())\n if set(tmp)==set([0,1]):\n code[kk]['qtype']=u'多选题'\n code[kk]['code']=dict(zip(code[kk]['qlist'],code[kk]['qlist']))\n elif 'R' in code[kk]['qlist'][0]:\n code[kk]['qtype']=u'矩阵单选题'\n code[kk]['code_r']=dict(zip(code[kk]['qlist'],code[kk]['qlist']))\n code[kk]['code']=dict(zip(list(tmp),list(tmp)))\n else:\n code[kk]['qtype']=u'排序题'\n code[kk]['code']=dict(zip(code[kk]['qlist'],code[kk]['qlist']))\n return code\n\n\n\n\ndef save_data(data,filename=u'data.xlsx',code=None):\n '''保存问卷数据到本地\n 根据filename后缀选择相应的格式保存\n 如果有code,则保存按文本数据\n '''\n savetype=os.path.splitext(filename)[1][1:]\n data1=data.copy()\n if code:\n for qq in code.keys():\n qtype=code[qq]['qtype']\n qlist=code[qq]['qlist']\n if qtype == u'单选题':\n # 将序号换成文本,题号加上具体内容\n data1[qlist[0]].replace(code[qq]['code'],inplace=True)\n data1.rename(columns={qq:'{}({})'.format(qq,code[qq]['content'])},inplace=True)\n elif qtype == u'矩阵单选题':\n # 同单选题\n data1[code[qq]['qlist']].replace(code[qq]['code'],inplace=True)\n tmp1=code[qq]['qlist']\n tmp2=['{}({})'.format(q,code[qq]['code_r'][q]) for q in tmp1]\n data1.rename(columns=dict(zip(tmp1,tmp2)),inplace=True)\n elif qtype in [u'排序题']:\n # 先变成一道题,插入表中,然后再把序号变成文本\n tmp=data[qlist]\n tmp=tmp.rename(columns=code[qq]['code'])\n tmp=dataCode_to_text(tmp)\n ind=list(data1.columns).index(qlist[0])\n qqname='{}({})'.format(qq,code[qq]['content'])\n data1.insert(ind,qqname,tmp)\n\n tmp1=code[qq]['qlist']\n tmp2=['{}_{}'.format(qq,code[qq]['code'][q]) for q in tmp1]\n data1.rename(columns=dict(zip(tmp1,tmp2)),inplace=True)\n elif qtype in [u'多选题']:\n # 先变成一道题,插入表中,然后再把序号变成文本\n tmp=data[qlist]\n tmp=tmp.rename(columns=code[qq]['code'])\n tmp=dataCode_to_text(tmp)\n ind=list(data1.columns).index(qlist[0])\n qqname='{}({})'.format(qq,code[qq]['content'])\n data1.insert(ind,qqname,tmp)\n\n for q in qlist:\n data1[q].replace({0:'',1:code[qq]['code'][q]},inplace=True)\n tmp2=['{}_{}'.format(qq,code[qq]['code'][q]) for q in qlist]\n data1.rename(columns=dict(zip(qlist,tmp2)),inplace=True)\n\n else:\n data1.rename(columns={qq:'{}({})'.format(qq,code[qq]['content'])},inplace=True)\n if (savetype == u'xlsx') or (savetype == u'xls'):\n data1.to_excel(filename,index=False)\n elif savetype == u'csv':\n data1.to_csv(filename,index=False)\n\n\ndef read_data(filename):\n savetype=os.path.splitext(filename)[1][1:]\n if (savetype==u'xlsx') or (savetype==u'xls'):\n data=pd.read_excel(filename)\n elif savetype==u'csv':\n data=pd.read_csv(filename)\n else:\n print('con not read file!')\n return data\n\n\n\ndef sa_to_ma(data):\n '''单选题数据转换成多选题数据\n data是单选题数据, 要求非有效列别为nan\n 可以使用内置函数pd.get_dummies()代替\n '''\n if isinstance(data,pd.core.frame.DataFrame):\n data=data[data.columns[0]]\n #categorys=sorted(data[data.notnull()].unique())\n categorys=data[data.notnull()].unique()\n try:\n categorys=sorted(categorys)\n except:\n pass\n #print('sa_to_ma function::cannot sorted')\n data_ma=pd.DataFrame(index=data.index,columns=categorys)\n for c in categorys:\n data_ma[c]=data.map(lambda x : int(x==c))\n data_ma.loc[data.isnull(),:]=np.nan\n return data_ma\n\ndef to_dummpy(data,code,qqlist=None,qtype_new='多选题',ignore_open=True):\n '''转化成哑变量\n 将数据中所有的单选题全部转化成哑变量,另外剔除掉开放题和填空题\n 返回一个很大的只有0和1的数据\n '''\n if qqlist is None:\n qqlist=sorted(code,key=lambda x:int(re.findall('\\d+',x)[0]))\n bdata=pd.DataFrame()\n bcode={}\n for qq in qqlist:\n qtype=code[qq]['qtype']\n data0=data[code[qq]['qlist']]\n if qtype=='单选题':\n data0=data0.iloc[:,0]\n categorys=data0[data0.notnull()].unique()\n try:\n categorys=sorted(categorys)\n except :\n pass\n categorys=[t for t in categorys if t in code[qq]['code']]\n cname=[code[qq]['code'][k] for k in categorys]\n columns_name=['{}_A{}'.format(qq,i+1) for i in range(len(categorys))]\n tmp=pd.DataFrame(index=data0.index,columns=columns_name)\n for i,c in enumerate(categorys):\n tmp[columns_name[i]]=data0.map(lambda x : int(x==c))\n #tmp.loc[data0.isnull(),:]=0\n code_tmp={'content':code[qq]['content'],'qtype':qtype_new}\n code_tmp['code']=dict(zip(columns_name,cname))\n code_tmp['qlist']=columns_name\n bcode.update({qq:code_tmp})\n bdata=pd.concat([bdata,tmp],axis=1)\n elif qtype in ['多选题','排序题','矩阵单选题']:\n bdata=pd.concat([bdata,data0],axis=1)\n bcode.update({qq:code[qq]})\n bdata=bdata.fillna(0)\n try:\n bdata=bdata.astype(np.int64,raise_on_error=False)\n except :\n pass\n return bdata,bcode\n\n\ndef qdata_flatten(data,code,quesid=None,userid_begin=None):\n '''将问卷数据展平,字段如下\n userid: 用户ID\n quesid: 问卷ID\n qnum: 题号\n qname: 题目内容\n qtype: 题目类型\n samplelen:题目的样本数\n itemnum: 选项序号\n itemname: 选项内容\n code: 用户的选择\n codename: 用户选择的具体值\n count: 计数\n percent(%): 计数占比(百分比)\n '''\n\n if not userid_begin:\n userid_begin=1000000\n data.index=[userid_begin+i+1 for i in range(len(data))]\n if '提交答卷时间' in data.columns:\n begin_date=pd.to_datetime(data['提交答卷时间']).min().strftime('%Y-%m-%d')\n end_date=pd.to_datetime(data['提交答卷时间']).max().strftime('%Y-%m-%d')\n else:\n begin_date=''\n end_date=''\n data,code=to_dummpy(data,code,qtype_new='单选题')\n code_item={}\n for qq in code:\n if code[qq]['qtype']=='矩阵单选题':\n code_item.update(code[qq]['code_r'])\n else :\n code_item.update(code[qq]['code'])\n\n qdata=data.stack().reset_index()\n qdata.columns=['userid','qn_an','code']\n qdata['qnum']=qdata['qn_an'].map(lambda x:x.split('_')[0])\n qdata['itemnum']=qdata['qn_an'].map(lambda x:'_'.join(x.split('_')[1:]))\n\n if quesid:\n qdata['quesid']=quesid\n qdata=qdata[['userid','quesid','qnum','itemnum','code']]\n else:\n qdata=qdata[['userid','qnum','itemnum','code']]\n # 获取描述统计信息:\n samplelen=qdata.groupby(['userid','qnum'])['code'].sum().map(lambda x:int(x>0)).unstack().sum()\n quesinfo=qdata.groupby(['qnum','itemnum','code'])['code'].count()\n quesinfo.name='count'\n quesinfo=quesinfo.reset_index()\n quesinfo=quesinfo[quesinfo['code']!=0]\n #quesinfo=qdata.groupby(['quesid','qnum','itemnum'])['code'].sum()\n quesinfo['samplelen']=quesinfo['qnum'].replace(samplelen.to_dict())\n quesinfo['percent(%)']=0\n quesinfo.loc[quesinfo['samplelen']>0,'percent(%)']=100*quesinfo.loc[quesinfo['samplelen']>0,'count']/quesinfo.loc[quesinfo['samplelen']>0,'samplelen']\n\n quesinfo['qname']=quesinfo['qnum'].map(lambda x: code[x]['content'])\n quesinfo['qtype']=quesinfo['qnum'].map(lambda x: code[x]['qtype'])\n quesinfo['itemname']=quesinfo['qnum']+quesinfo['itemnum'].map(lambda x:'_%s'%x)\n quesinfo['itemname']=quesinfo['itemname'].replace(code_item)\n #quesinfo['itemname']=quesinfo['qn_an'].map(lambda x: code[x.split('_')[0]]['code_r'][x] if \\\n #code[x.split('_')[0]]['qtype']=='矩阵单选题' else code[x.split('_')[0]]['code'][x])\n # 各个选项的含义\n quesinfo['codename']=''\n quesinfo.loc[quesinfo['code']==0,'codename']='否'\n quesinfo.loc[quesinfo['code']==1,'codename']='是'\n quesinfo['tmp']=quesinfo['qnum']+quesinfo['code'].map(lambda x:'_%s'%int(x))\n quesinfo['codename'].update(quesinfo.loc[(quesinfo['code']>0)&(quesinfo['qtype']=='矩阵单选题'),'tmp']\\\n .map(lambda x: code[x.split('_')[0]]['code'][int(x.split('_')[1])]))\n quesinfo['codename'].update(quesinfo.loc[(quesinfo['code']>0)&(quesinfo['qtype']=='排序题'),'tmp'].map(lambda x: 'Top{}'.format(x.split('_')[1])))\n quesinfo['begin_date']=begin_date\n quesinfo['end_date']=end_date\n if quesid:\n quesinfo['quesid']=quesid\n quesinfo=quesinfo[['quesid','begin_date','end_date','qnum','qname','qtype','samplelen','itemnum','itemname','code','codename','count','percent(%)']]\n else:\n quesinfo=quesinfo[['qnum','qname','qtype','samplelen','itemnum','itemname','code','codename','count','percent(%)']]\n\n # 排序\n quesinfo['qnum']=quesinfo['qnum'].astype('category')\n quesinfo['qnum'].cat.set_categories(sorted(list(quesinfo['qnum'].unique()),key=lambda x:int(re.findall('\\d+',x)[0])), inplace=True)\n quesinfo['itemnum']=quesinfo['itemnum'].astype('category')\n quesinfo['itemnum'].cat.set_categories(sorted(list(quesinfo['itemnum'].unique()),key=lambda x:int(re.findall('\\d+',x)[0])), inplace=True)\n quesinfo=quesinfo.sort_values(['qnum','itemnum','code'])\n return qdata,quesinfo\n\n\n\n\ndef confidence_interval(p,n,alpha=0.05):\n import scipy.stats as stats\n t=stats.norm.ppf(1-alpha/2)\n ci=t*math.sqrt(p*(1-p)/n)\n #a=p-stats.norm.ppf(1-alpha/2)*math.sqrt(p*(1-p)/n)\n #b=p+stats.norm.ppf(1-alpha/2)*math.sqrt(p*(1-p)/n)\n return ci\n\ndef sample_size_cal(interval,N,alpha=0.05):\n '''调研样本量的计算\n 参考:https://www.surveysystem.com/sscalc.htm\n sample_size_cal(interval,N,alpha=0.05)\n 输入:\n interval: 误差范围,例如0.03\n N: 总体的大小,一般1万以上就没啥差别啦\n alpha:置信水平,默认95%\n '''\n import scipy.stats as stats\n p=stats.norm.ppf(1-alpha/2)\n if interval>1:\n interval=interval/100\n samplesize=p**2/4/interval**2\n if N:\n samplesize=samplesize*N/(samplesize+N)\n samplesize=int(round(samplesize))\n return samplesize\n\n\ndef gof_test(fo,fe=None,alpha=0.05):\n '''拟合优度检验\n 输入:\n fo:观察频数\n fe:期望频数,缺省为平均数\n 返回:\n 1: 样本与总体有差异\n 0:样本与总体无差异\n 例子:\n gof_test(np.array([0.3,0.4,0.3])*222)\n '''\n import scipy.stats as stats\n fo=np.array(fo).flatten()\n C=len(fo)\n if not fe:\n N=fo.sum()\n fe=np.array([N/C]*C)\n else:\n fe=np.array(fe).flatten()\n chi_value=(fo-fe)**2/fe\n chi_value=chi_value.sum()\n chi_value_fit=stats.chi2.ppf(q=1-alpha,df=C-1)\n #CV=np.sqrt((fo-fe)**2/fe**2/(C-1))*100\n if chi_value>chi_value_fit:\n result=1\n else:\n result=0\n return result\n\n\ndef chi2_test(fo,alpha=0.05):\n import scipy.stats as stats\n fo=pd.DataFrame(fo)\n chiStats = stats.chi2_contingency(observed=fo)\n #critical_value = stats.chi2.ppf(q=1-alpha,df=chiStats[2])\n #observed_chi_val = chiStats[0]\n # p<alpha 等价于 observed_chi_val>critical_value\n chi2_data=(chiStats[1] <= alpha,chiStats[1])\n return chi2_data\n\ndef fisher_exact(fo,alpha=0.05):\n '''fisher_exact 显著性检验函数\n 此处采用的是调用R的解决方案,需要安装包 pyper\n python解决方案参见\n https://mrnoutahi.com/2016/01/03/Fisher-exac-test-for-mxn-table/\n 但还有些问题,所以没用.\n '''\n import pyper as pr\n r=pr.R(use_pandas=True,use_numpy=True)\n r.assign('fo',fo)\n r(\"b<-fisher.test(fo)\")\n pdata=r['b']\n p_value=pdata['p.value']\n if p_value<alpha:\n result=1\n else:\n result=0\n return (result,p_value)\n\ndef anova(data,formula):\n '''方差分析\n 输入\n --data: DataFrame格式,包含数值型变量和分类型变量\n --formula:变量之间的关系,如:数值型变量~C(分类型变量1)[+C(分类型变量1)[+C(分类型变量1):(分类型变量1)]\n\n 返回[方差分析表]\n [总体的方差来源于组内方差和组间方差,通过比较组间方差和组内方差的比来推断两者的差异]\n --df:自由度\n --sum_sq:误差平方和\n --mean_sq:误差平方和/对应的自由度\n --F:mean_sq之比\n --PR(>F):p值,比如<0.05则代表有显著性差异\n '''\n import statsmodels.api as sm\n from statsmodels.formula.api import ols\n cw_lm=ols(formula, data=data).fit() #Specify C for Categorical\n r=sm.stats.anova_lm(cw_lm)\n return r\n\n\ndef mca(X,N=2):\n '''对应分析函数,暂时支持双因素\n X:观察频数表\n N:返回的维数,默认2维\n 可以通过scatter函数绘制:\n fig=scatter([pr,pc])\n fig.savefig('mca.png')\n '''\n from scipy.linalg import diagsvd\n S = X.sum().sum()\n Z = X / S # correspondence matrix\n r = Z.sum(axis=1)\n c = Z.sum()\n D_r = np.diag(1/np.sqrt(r))\n Z_c = Z - np.outer(r, c) # standardized residuals matrix\n D_c = np.diag(1/np.sqrt(c))\n\n # another option, not pursued here, is sklearn.decomposition.TruncatedSVD\n P,s,Q = np.linalg.svd(np.dot(np.dot(D_r, Z_c),D_c))\n #S=diagsvd(s[:2],P.shape[0],2)\n pr=np.dot(np.dot(D_r,P),diagsvd(s[:N],P.shape[0],N))\n pc=np.dot(np.dot(D_c,Q.T),diagsvd(s[:N],Q.shape[0],N))\n inertia=np.cumsum(s**2)/np.sum(s**2)\n inertia=inertia.tolist()\n if isinstance(X,pd.DataFrame):\n pr=pd.DataFrame(pr,index=X.index,columns=list('XYZUVW')[:N])\n pc=pd.DataFrame(pc,index=X.columns,columns=list('XYZUVW')[:N])\n return pr,pc,inertia\n '''\n w=pd.ExcelWriter(u'mca_.xlsx')\n pr.to_excel(w,startrow=0,index_label=True)\n pc.to_excel(w,startrow=len(pr)+2,index_label=True)\n w.save()\n '''\n\ndef cluster(data,code,cluster_qq,n_clusters='auto',max_clusters=7):\n '''对态度题进行聚类\n '''\n\n from sklearn.cluster import KMeans\n #from sklearn.decomposition import PCA\n from sklearn import metrics\n #import prince\n qq_max=sorted(code,key=lambda x:int(re.findall('\\d+',x)[0]))[-1]\n new_cluster='Q{}'.format(int(re.findall('\\d+',qq_max)[0])+1)\n #new_cluster='Q32'\n\n qlist=code[cluster_qq]['qlist']\n X=data[qlist]\n # 去除所有态度题选择的分数都一样的用户(含仅有两个不同)\n std_t=min(1.41/np.sqrt(len(qlist)),0.40) if len(qlist)>=8 else 0.10\n X=X[X.T.std()>std_t]\n index_bk=X.index#备份,方便还原\n X.fillna(0,inplace=True)\n X1=X.T\n X1=(X1-X1.mean())/X1.std()\n X1=X1.T.as_matrix()\n\n\n if n_clusters == 'auto':\n #聚类个数的选取和评估\n silhouette_score=[]# 轮廊系数\n SSE_score=[]\n klist=np.arange(2,15)\n for k in klist:\n est = KMeans(k) # 4 clusters\n est.fit(X1)\n tmp=np.sum((X1-est.cluster_centers_[est.labels_])**2)\n SSE_score.append(tmp)\n tmp=metrics.silhouette_score(X1, est.labels_)\n silhouette_score.append(tmp)\n '''\n fig = plt.figure(1)\n ax = fig.add_subplot(111)\n fig = plt.figure(2)\n ax.plot(klist,np.array(silhouette_score))\n ax = fig.add_subplot(111)\n ax.plot(klist,np.array(SSE_score))\n '''\n # 找轮廊系数的拐点\n ss=np.array(silhouette_score)\n t1=[False]+list(ss[1:]>ss[:-1])\n t2=list(ss[:-1]>ss[1:])+[False]\n k_log=[t1[i]&t2[i] for i in range(len(t1))]\n if True in k_log:\n k=k_log.index(True)\n else:\n k=1\n k=k if k<=max_clusters-2 else max_clusters-2 # 限制最多分7类\n k_best=klist[k]\n else:\n k_best=n_clusters\n\n est = KMeans(k_best) # 4 clusters\n est.fit(X1)\n\n # 系数计算\n SSE=np.sqrt(np.sum((X1-est.cluster_centers_[est.labels_])**2)/len(X1))\n silhouette_score=metrics.silhouette_score(X1, est.labels_)\n\n print('有效样本数:{},特征数:{},最佳分类个数:{} 类'.format(len(X1),len(qlist),k_best))\n print('SSE(样本到所在类的质心的距离)为:{:.2f},轮廊系数为: {:.2f}'.format(SSE,silhouette_score))\n\n # 绘制降维图\n '''\n X_PCA = PCA(2).fit_transform(X1)\n kwargs = dict(cmap = plt.cm.get_cmap('rainbow', 10),\n edgecolor='none', alpha=0.6)\n labels=pd.Series(est.labels_)\n plt.figure()\n plt.scatter(X_PCA[:, 0], X_PCA[:, 1], c=labels, **kwargs)\n '''\n\n '''\n # 三维立体图\n fig = plt.figure()\n ax = fig.add_subplot(111, projection='3d')\n ax.scatter(X_PCA[:, 0], X_PCA[:, 1],X_PCA[:, 2], c=labels, **kwargs)\n '''\n\n # 导出到原数据\n parameters={'methods':'kmeans','inertia':est.inertia_,'SSE':SSE,'silhouette':silhouette_score,\\\n 'n_clusters':k_best,'n_features':len(qlist),'n_samples':len(X1),'qnum':new_cluster,\\\n 'data':X1,'labels':est.labels_}\n data[new_cluster]=pd.Series(est.labels_,index=index_bk)\n code[new_cluster]={'content':'态度题聚类结果','qtype':'单选题','qlist':[new_cluster],\n 'code':dict(zip(range(k_best),['cluster{}'.format(i+1) for i in range(k_best)]))}\n print('结果已经存进数据, 题号为:{}'.format(new_cluster))\n return data,code,parameters\n '''\n # 对应分析\n t=data.groupby([new_cluster])[code[cluster_qq]['qlist']].mean()\n t.columns=['R{}'.format(i+1) for i in range(len(code[cluster_qq]['qlist']))]\n t=t.rename(index=code[new_cluster]['code'])\n ca=prince.CA(t)\n ca.plot_rows_columns(show_row_labels=True,show_column_labels=True)\n '''\n\n\n\ndef scatter(data,legend=False,title=None,font_ch=None,find_path=None):\n '''\n 绘制带数据标签的散点图\n '''\n import matplotlib.font_manager as fm\n if font_ch is None:\n fontlist=['calibri.ttf','simfang.ttf','simkai.ttf','simhei.ttf','simsun.ttc','msyh.ttf','msyh.ttc']\n myfont=''\n if not find_path:\n find_paths=['C:\\\\Windows\\\\Fonts','']\n # fontlist 越靠后越优先,findpath越靠后越优先\n for find_path in find_paths:\n for f in fontlist:\n if os.path.exists(os.path.join(find_path,f)):\n myfont=os.path.join(find_path,f)\n if len(myfont)==0:\n print('没有找到合适的中文字体绘图,请检查.')\n myfont=None\n else:\n myfont = fm.FontProperties(fname=myfont)\n else:\n myfont=fm.FontProperties(fname=font_ch)\n fig, ax = plt.subplots()\n #ax.grid('on')\n ax.xaxis.set_ticks_position('none')\n ax.yaxis.set_ticks_position('none')\n ax.axhline(y=0, linestyle='-', linewidth=1.2, alpha=0.6)\n ax.axvline(x=0, linestyle='-', linewidth=1.2, alpha=0.6)\n color=['blue','red','green','dark']\n if not isinstance(data,list):\n data=[data]\n for i,dd in enumerate(data):\n ax.scatter(dd.iloc[:,0], dd.iloc[:,1], c=color[i], s=50,\n label=dd.columns[1])\n for _, row in dd.iterrows():\n ax.annotate(row.name, (row.iloc[0], row.iloc[1]), color=color[i],fontproperties=myfont,fontsize=10)\n ax.axis('equal')\n if legend:\n ax.legend(loc='best')\n if title:\n ax.set_title(title,fontproperties=myfont)\n return fig\n\n\n\ndef sankey(df,filename=None):\n '''SanKey图绘制\n df的列是左节点,行是右节点\n 注:暂时没找到好的Python方法,所以只生成R语言所需数据\n 返回links 和 nodes\n # R code 参考\n library(networkD3)\n dd=read.csv('price_links.csv')\n links<-data.frame(source=dd$from,target=dd$to,value=dd$value)\n nodes=read.csv('price_nodes.csv',encoding = 'UTF-8')\n nodes<-nodes['name']\n Energy=c(links=links,nodes=nodes)\n sankeyNetwork(Links = links, Nodes = nodes, Source = \"source\",\n Target = \"target\", Value = \"value\", NodeID = \"name\",\n units = \"TWh\",fontSize = 20,fontFamily='微软雅黑',nodeWidth=20)\n '''\n nodes=['Total']\n nodes=nodes+list(df.columns)+list(df.index)\n nodes=pd.DataFrame(nodes)\n nodes['id']=range(len(nodes))\n nodes.columns=['name','id']\n R,C=df.shape\n t1=pd.DataFrame(df.as_matrix(),columns=range(1,C+1),index=range(C+1,R+C+1))\n t1.index.name='to'\n t1.columns.name='from'\n links=t1.unstack().reset_index(name='value')\n links0=pd.DataFrame({'from':[0]*C,'to':range(1,C+1),'value':list(df.sum())})\n links=links0.append(links)\n if filename:\n links.to_csv(filename+'_links.csv',index=False,encoding='utf-8')\n nodes.to_csv(filename+'_nodes.csv',index=False,encoding='utf-8')\n return (links,nodes)\n\n\ndef table(data,code,total=True):\n '''\n 单个题目描述统计\n code是data的编码,列数大于1\n 返回字典格式数据:\n 'fop':百分比, 对于单选题和为1,多选题分母为样本数\n 'fo': 观察频数表,其中添加了合计项\n 'fw': 加权频数表,可实现平均值、T2B等功能,仅当code中存在关键词'weight'时才有\n '''\n # 单选题\n qtype=code['qtype']\n index=code['qlist']\n data=pd.DataFrame(data)\n sample_len=data[code['qlist']].notnull().T.any().sum()\n result={}\n if qtype == u'单选题':\n fo=data.iloc[:,0].value_counts()\n if 'weight' in code:\n w=pd.Series(code['weight'])\n fo1=fo[w.index][fo[w.index].notnull()]\n fw=(fo1*w).sum()/fo1.sum()\n result['fw']=fw\n fo.sort_values(ascending=False,inplace=True)\n fop=fo.copy()\n fop=fop/fop.sum()*1.0\n fop[u'合计']=fop.sum()\n fo[u'合计']=fo.sum()\n if 'code' in code:\n fop.rename(index=code['code'],inplace=True)\n fo.rename(index=code['code'],inplace=True)\n fop.name=u'占比'\n fo.name=u'频数'\n fop=pd.DataFrame(fop)\n fo=pd.DataFrame(fo)\n result['fo']=fo\n result['fop']=fop\n elif qtype == u'多选题':\n fo=data.sum()\n fo.sort_values(ascending=False,inplace=True)\n fo[u'合计']=fo.sum()\n if 'code' in code:\n fo.rename(index=code['code'],inplace=True)\n fop=fo.copy()\n fop=fop/sample_len\n fop.name=u'占比'\n fo.name=u'频数'\n fop=pd.DataFrame(fop)\n fo=pd.DataFrame(fo)\n result['fop']=fop\n result['fo']=fo\n elif qtype == u'矩阵单选题':\n fo=pd.DataFrame(columns=code['qlist'],index=sorted(code['code']))\n for i in fo.columns:\n fo.loc[:,i]=data[i].value_counts()\n if 'weight' not in code:\n code['weight']=dict(zip(code['code'].keys(),code['code'].keys()))\n fw=pd.DataFrame(columns=[u'加权'],index=code['qlist'])\n w=pd.Series(code['weight'])\n for c in fo.columns:\n t=fo[c]\n t=t[w.index][t[w.index].notnull()]\n if t.sum()>1e-17:\n fw.loc[c,u'加权']=(t*w).sum()/t.sum()\n else:\n fw.loc[c,u'加权']=0\n fw.rename(index=code['code_r'],inplace=True)\n result['fw']=fw\n result['weight']=','.join(['{}:{}'.format(code['code'][c],code['weight'][c]) for c in code['code']])\n fo.rename(columns=code['code_r'],index=code['code'],inplace=True)\n fop=fo.copy()\n fop=fop/sample_len\n result['fop']=fop\n result['fo']=fo\n elif qtype == u'排序题':\n #提供综合统计和TOP1值统计\n # 其中综合的算法是当成单选题,给每个TOP分配和为1的权重\n #topn=max([len(data[q][data[q].notnull()].unique()) for q in index])\n #topn=len(index)\n topn=data[index].fillna(0).max().max()\n topn=int(topn)\n qsort=dict(zip([i+1 for i in range(topn)],[(topn-i)*2.0/(topn+1)/topn for i in range(topn)]))\n top1=data.applymap(lambda x:int(x==1))\n data_weight=data.replace(qsort)\n t1=pd.DataFrame()\n t1['TOP1']=top1.sum()\n t1[u'综合']=data_weight.sum()\n t1.sort_values(by=u'综合',ascending=False,inplace=True)\n t1.rename(index=code['code'],inplace=True)\n t=t1.copy()\n t=t/sample_len\n result['fop']=t\n result['fo']=t1\n # 新增topn矩阵\n t_topn=pd.DataFrame()\n for i in range(topn):\n t_topn['TOP%d'%(i+1)]=data.applymap(lambda x:int(x==i+1)).sum()\n t_topn.sort_values(by=u'TOP1',ascending=False,inplace=True)\n if 'code' in code:\n t_topn.rename(index=code['code'],inplace=True)\n result['TOPN_fo']=t_topn#频数\n result['TOPN']=t_topn/sample_len\n result['weight']='+'.join(['TOP{}*{:.2f}'.format(i+1,(topn-i)*2.0/(topn+1)/topn) for i in range(topn)])\n else:\n result['fop']=None\n result['fo']=None\n if (not total) and not(result['fo'] is None) and (u'合计' in result['fo'].index):\n result['fo'].drop([u'合计'],axis=0,inplace=True)\n result['fop'].drop([u'合计'],axis=0,inplace=True)\n if not(result['fo'] is None) and ('code_order' in code):\n code_order=[q for q in code['code_order'] if q in result['fo'].index]\n if u'合计' in result['fo'].index:\n code_order=code_order+[u'合计']\n result['fo']=pd.DataFrame(result['fo'],index=code_order)\n result['fop']=pd.DataFrame(result['fop'],index=code_order)\n return result\n\ndef crosstab(data_index,data_column,code_index=None,code_column=None,qtype=None,total=True):\n '''适用于问卷数据的交叉统计\n 输入参数:\n data_index: 因变量,放在行中\n data_column:自变量,放在列中\n code_index: dict格式,指定data_index的编码等信息\n code_column: dict格式,指定data_column的编码等信息\n qtype: 给定两个数据的题目类型,若为字符串则给定data_index,若为列表,则给定两个的\n 返回字典格式数据\n 'fop':默认的百分比表,行是data_index,列是data_column\n 'fo':原始频数表,且添加了总体项\n 'fw': 加权平均值\n\n 简要说明:\n 因为要处理各类题型,这里将单选题处理为多选题\n\n fo:观察频数表\n nij是同时选择了Ri和Cj的频数\n 总体的频数是选择了Ri的频数,与所在行的总和无关\n 行变量\\列变量 C1 |C2 | C3| C4|总体\n R1| n11|n12|n13|n14|n1:\n R2| n21|n22|n23|n23|n2:\n R3| n31|n32|n33|n34|n3:\n fop: 观察百分比表(列变量)\n 这里比较难处理,data_column各个类别的样本量和总体的样本量不一样,各类别的样本量为同时\n 选择了行变量和列类别的频数。而总体的样本量为选择了行变量的频数\n fw: 加权平均值\n 如果data_index的编码code含有weight字段,则我们会输出分组的加权平均值\n\n\n '''\n\n # 将Series转为DataFrame格式\n data_index=pd.DataFrame(data_index)\n data_column=pd.DataFrame(data_column)\n\n # 获取行/列变量的题目类型\n # 默认值\n if data_index.shape[1]==1:\n qtype1=u'单选题'\n else:\n qtype1=u'多选题'\n if data_column.shape[1]==1:\n qtype2=u'单选题'\n else:\n qtype2=u'多选题'\n # 根据参数修正\n if code_index:\n qtype1=code_index['qtype']\n if qtype1 == u'单选题':\n data_index.replace(code_index['code'],inplace=True)\n elif qtype1 in [u'多选题',u'排序题']:\n data_index.rename(columns=code_index['code'],inplace=True)\n elif qtype1 == u'矩阵单选题':\n data_index.rename(columns=code_index['code_r'],inplace=True)\n if code_column:\n qtype2=code_column['qtype']\n if qtype2 == u'单选题':\n data_column.replace(code_column['code'],inplace=True)\n elif qtype2 in [u'多选题',u'排序题']:\n data_column.rename(columns=code_column['code'],inplace=True)\n elif qtype2 == u'矩阵单选题':\n data_column.rename(columns=code_column['code_r'],inplace=True)\n if qtype:\n #qtype=list(qtype)\n if isinstance(qtype,list) and len(qtype)==2:\n qtype1=qtype[0]\n qtype2=qtype[1]\n elif isinstance(qtype,str):\n qtype1=qtype\n if qtype1 == u'单选题':\n data_index=sa_to_ma(data_index)\n qtype1=u'多选题'\n # 将单选题变为多选题\n if qtype2 == u'单选题':\n #data_column=pd.get_dummies(data_column.iloc[:,0])\n data_column=sa_to_ma(data_column)\n qtype2=u'多选题'\n\n # 准备工作\n index_list=list(data_index.columns)\n columns_list=list(data_column.columns)\n # 频数表/data_column各个类别的样本量\n column_freq=data_column.iloc[list(data_index.notnull().T.any()),:].sum()\n #column_freq[u'总体']=column_freq.sum()\n column_freq[u'总体']=data_index.notnull().T.any().sum()\n R=len(index_list)\n C=len(columns_list)\n result={}\n result['sample_size']=column_freq\n if (qtype1 == u'多选题') and (qtype2 == u'多选题'):\n data_index.fillna(0,inplace=True)\n t=pd.DataFrame(np.dot(data_index.fillna(0).T,data_column.fillna(0)))\n t.rename(index=dict(zip(range(R),index_list)),columns=dict(zip(range(C),columns_list)),inplace=True)\n if code_index and ('weight' in code_index):\n w=pd.Series(code_index['weight'])\n w.rename(index=code_index['code'],inplace=True)\n fw=pd.DataFrame(columns=[u'加权'],index=t.columns)\n for c in t.columns:\n tmp=t[c]\n tmp=tmp[w.index][tmp[w.index].notnull()]\n if abs(tmp.sum())>0:\n fw.loc[c,u'加权']=(tmp*w).sum()/tmp.sum()\n else:\n fw.loc[c,u'加权']=0\n fo1=data_index.sum()[w.index][data_index.sum()[w.index].notnull()]\n if abs(fo1.sum())>0:\n fw.loc[u'总体',u'加权']=(fo1*w).sum()/fo1.sum()\n else:\n fw.loc[u'总体',u'加权']=0\n result['fw']=fw\n t[u'总体']=data_index.sum()\n t.sort_values([u'总体'],ascending=False,inplace=True)\n t1=t.copy()\n for i in t.columns:\n if column_freq[i]!=0:\n t.loc[:,i]=t.loc[:,i]/column_freq[i]\n result['fop']=t\n result['fo']=t1\n elif (qtype1 == u'矩阵单选题') and (qtype2 == u'多选题'):\n if code_index and ('weight' in code_index):\n data_index.replace(code_index['weight'],inplace=True)\n t=pd.DataFrame(np.dot(data_index.fillna(0).T,data_column.fillna(0)))\n t=pd.DataFrame(np.dot(t,np.diag(1/data_column.sum())))\n t.rename(index=dict(zip(range(R),index_list)),columns=dict(zip(range(C),columns_list)),inplace=True)\n t[u'总体']=data_index.mean()\n t.sort_values([u'总体'],ascending=False,inplace=True)\n t1=t.copy()\n result['fop']=t\n result['fo']=t1\n elif (qtype1 == u'排序题') and (qtype2 == u'多选题'):\n topn=int(data_index.max().max())\n #topn=max([len(data_index[q][data_index[q].notnull()].unique()) for q in index_list])\n qsort=dict(zip([i+1 for i in range(topn)],[(topn-i)*2.0/(topn+1)/topn for i in range(topn)]))\n data_index_zh=data_index.replace(qsort)\n t=pd.DataFrame(np.dot(data_index_zh.fillna(0).T,data_column.fillna(0)))\n t.rename(index=dict(zip(range(R),index_list)),columns=dict(zip(range(C),columns_list)),inplace=True)\n t[u'总体']=data_index_zh.sum()\n t.sort_values([u'总体'],ascending=False,inplace=True)\n t1=t.copy()\n for i in t.columns:\n if column_freq[i]!=0:\n t.loc[:,i]=t.loc[:,i]/column_freq[i]\n result['fop']=t\n result['fo']=t1\n # 新增TOP1 数据\n data_index_top1=data_index.applymap(lambda x:int(x==1))\n top1=pd.DataFrame(np.dot(data_index_top1.fillna(0).T,data_column.fillna(0)))\n top1.rename(index=dict(zip(range(R),index_list)),columns=dict(zip(range(C),columns_list)),inplace=True)\n top1[u'总体']=data_index_top1.fillna(0).sum()\n top1.sort_values([u'总体'],ascending=False,inplace=True)\n for i in top1.columns:\n if column_freq[i]!=0:\n top1.loc[:,i]=top1.loc[:,i]/column_freq[i]\n result['TOP1']=top1\n else:\n result['fop']=None\n result['fo']=None\n # 去除总体\n if (not total) and not(result['fo'] is None) and ('总体' in result['fo'].columns):\n result['fo'].drop(['总体'],axis=1,inplace=True)\n result['fop'].drop(['总体'],axis=1,inplace=True)\n # 顺序重排\n if not(result['fo'] is None) and code_index and ('code_order' in code_index) and qtype1!='矩阵单选题':\n code_order=code_index['code_order']\n code_order=[q for q in code_order if q in result['fo'].index]\n if u'总体' in result['fo'].index:\n code_order=code_order+[u'总体']\n result['fo']=pd.DataFrame(result['fo'],index=code_order)\n result['fop']=pd.DataFrame(result['fop'],index=code_order)\n if not(result['fo'] is None) and code_column and ('code_order' in code_column) and qtype2!='矩阵单选题':\n code_order=code_column['code_order']\n code_order=[q for q in code_order if q in result['fo'].columns]\n if u'总体' in result['fo'].columns:\n code_order=code_order+[u'总体']\n result['fo']=pd.DataFrame(result['fo'],columns=code_order)\n result['fop']=pd.DataFrame(result['fop'],columns=code_order)\n return result\n\n\ndef qtable(data,*args,**kwargs):\n '''简易频数统计函数\n 输入\n data:数据框,可以是所有的数据\n code:数据编码\n q1: 题目序号\n q2: 题目序号\n # 单个变量的频数统计\n qtable(data,code,'Q1')\n # 两个变量的交叉统计\n qtable(data,code,'Q1','Q2')\n\n '''\n code=None\n q1=None\n q2=None\n for a in args:\n if (isinstance(a,str)) and (not q1):\n q1=a\n elif (isinstance(a,str)) and (q1):\n q2=a\n elif isinstance(a,dict):\n code=a\n if not code:\n code=data_auto_code(data)\n if not q1:\n print('please input the q1,such as Q1.')\n return\n total=False\n for key in kwargs:\n if key == 'total':\n total=kwargs['total']\n if q2 is None:\n result=table(data[code[q1]['qlist']],code[q1],total=total)\n else:\n result=crosstab(data[code[q1]['qlist']],data[code[q2]['qlist']],code[q1],code[q2],total=total)\n return result\n\ndef association_rules(df,minSup=0.08,minConf=0.4,Y=None):\n '''关联规则分析\n df: DataFrame,bool 类型。是一个类似购物篮数据 \n\n '''\n try:\n df=df.astype(bool)\n except:\n print('df 必须为 bool 类型')\n return (None,None,None)\n columns = np.array(df.columns)\n gen=associate.frequent_itemsets(np.array(df), minSup)\n itemsets=dict(gen)\n rules=associate.association_rules(itemsets,minConf)\n rules=pd.DataFrame(list(rules))\n if len(rules) == 0:\n return (None,None,None)\n # 依次是LHS、RHS、支持度、置信度\n rules.columns=['antecedent','consequent','sup','conf']\n rules['sup']=rules['sup']/len(df)\n rules['antecedent']=rules['antecedent'].map(lambda x:[columns[i] for i in list(x)])\n rules['consequent']=rules['consequent'].map(lambda x:[columns[i] for i in list(x)])\n rules['rule']=rules['antecedent'].map(lambda x:','.join(['%s'%i for i in x]))\\\n +'-->'\\\n +rules['consequent'].map(lambda x:','.join(['%s'%i for i in x]))\n result=';\\n'.join(['{}: 支持度={:.1f}%, 置信度={:.1f}%'.format(\\\n rules.loc[ii,'rule'],100*rules.loc[ii,'sup'],100*rules.loc[ii,'conf']) for ii in rules.index[:4]])\n return (result,rules,itemsets)\n\n\n\ndef contingency(fo,alpha=0.05):\n ''' 列联表分析:(观察频数表分析)\n # 预增加一个各类别之间的距离\n 1、生成TGI指数、TWI指数、CHI指数\n 2、独立性检验\n 3、当两个变量不显著时,考虑单个之间的显著性\n 返回字典格式\n chi_test: 卡方检验结果,1:显著;0:不显著;-1:期望值不满足条件\n coef: 包含chi2、p值、V相关系数\n log: 记录一些异常情况\n FO: 观察频数\n FE: 期望频数\n TGI:fo/fe\n TWI:fo-fe\n CHI:sqrt((fo-fe)(fo/fe-1))*sign(fo-fe)\n significant:{\n .'result': 显著性结果[1(显著),0(不显著),-1(fe小于5的过多)]\n .'pvalue':\n .'method': chi_test or fisher_test\n .'vcoef':\n .'threshold':\n }\n summary:{\n .'summary': 结论提取\n .'fit_test': 拟合优度检验\n .'chi_std':\n .'chi_mean':\n '''\n import scipy.stats as stats\n cdata={}\n if isinstance(fo,pd.core.series.Series):\n fo=pd.DataFrame(fo)\n if not isinstance(fo,pd.core.frame.DataFrame):\n return cdata\n R,C=fo.shape\n # 去除所有的总体、合计、其他、其它\n if u'总体' in fo.columns:\n fo.drop([u'总体'],axis=1,inplace=True)\n if any([(u'其他' in '%s'%s) or (u'其它' in '%s'%s) for s in fo.columns]):\n tmp=[s for s in fo.columns if (u'其他' in s) or (u'其它' in s)]\n for t in tmp:\n fo.drop([t],axis=1,inplace=True)\n if u'合计' in fo.index:\n fo.drop([u'合计'],axis=0,inplace=True)\n if any([(u'其他' in '%s'%s) or (u'其它' in '%s'%s) for s in fo.index]):\n tmp=[s for s in fo.index if (u'其他' in s) or (u'其它' in s)]\n for t in tmp:\n fo.drop([t],axis=0,inplace=True)\n fe=fo.copy()\n N=fo.sum().sum()\n if N==0:\n #print('rpt.contingency:: fo的样本数为0,请检查数据')\n return cdata\n for i in fe.index:\n for j in fe.columns:\n fe.loc[i,j]=fe.loc[i,:].sum()*fe.loc[:,j].sum()/float(N)\n TGI=fo/fe\n TWI=fo-fe\n CHI=np.sqrt((fo-fe)**2/fe)*(TWI.applymap(lambda x: int(x>0))*2-1)\n PCHI=1/(1+np.exp(-1*CHI))\n cdata['FO']=fo\n cdata['FE']=fe\n cdata['TGI']=TGI*100\n cdata['TWI']=TWI\n cdata['CHI']=CHI\n cdata['PCHI']=PCHI\n\n # 显著性检验(独立性检验)\n significant={}\n significant['threshold']=stats.chi2.ppf(q=1-alpha,df=C-1)\n #threshold=math.ceil(R*C*0.2)# 期望频数和实际频数不得小于5\n\n # 去除行、列变量中样本数和过低的变量\n threshold=max(3,min(30,N*0.05))\n ind1=fo.sum(axis=1)>=threshold\n ind2=fo.sum()>=threshold\n fo=fo.loc[ind1,ind2]\n\n if (fo.shape[0]<=1) or (np.any(fo.sum()==0)) or (np.any(fo.sum(axis=1)==0)):\n significant['result']=-2\n significant['pvalue']=-2\n significant['method']='fo not frequency'\n #elif ((fo<=5).sum().sum()>=threshold):\n #significant['result']=-1\n #significant['method']='need fisher_exact'\n '''fisher_exact运行所需时间极其的长,此处还是不作检验\n fisher_r,fisher_p=fisher_exact(fo)\n significant['pvalue']=fisher_p\n significant['method']='fisher_exact'\n significant['result']=fisher_r\n '''\n else:\n try:\n chiStats = stats.chi2_contingency(observed=fo)\n except:\n chiStats=(1,np.nan)\n significant['pvalue']=chiStats[1]\n significant['method']='chi-test'\n #significant['vcoef']=math.sqrt(chiStats[0]/N/min(R-1,C-1))\n if chiStats[1] <= alpha:\n significant['result']=1\n elif np.isnan(chiStats[1]):\n significant['pvalue']=-2\n significant['result']=-1\n else:\n significant['result']=0\n cdata['significant']=significant\n\n # 列联表分析summary\n chi_sum=(CHI**2).sum(axis=1)\n chi_value_fit=stats.chi2.ppf(q=1-alpha,df=C-1)#拟合优度检验\n fit_test=chi_sum.map(lambda x : int(x>chi_value_fit))\n summary={}\n summary['fit_test']=fit_test\n summary['chi_std']=CHI.unstack().std()\n summary['chi_mean']=CHI.unstack().mean()\n #print('the std of CHI is %.2f'%summary['chi_std'])\n conclusion=''\n fo_rank=fo.sum().rank(ascending=False)# 给列选项排名,只分析排名在前4选项的差异\n for c in fo_rank[fo_rank<5].index:#CHI.columns:\n #针对每一列,选出大于一倍方差的行选项,如果过多,则只保留前三个\n tmp=list(CHI.loc[CHI[c]-summary['chi_mean']>summary['chi_std'],c].sort_values(ascending=False)[:3].index)\n tmp=['%s'%s for s in tmp]# 把全部内容转化成字符串\n if tmp:\n tmp1=u'{col}:{s}'.format(col=c,s=' || '.join(tmp))\n conclusion=conclusion+tmp1+'; \\n'\n if significant['result']==1:\n if conclusion:\n tmp='在95%置信水平下显著性检验(卡方检验)结果为*显著*, 且CHI指标在一个标准差外的(即相对有差异的)有:\\n'\n else:\n tmp='在95%置信水平下显著性检验(卡方检验)结果为*显著*,但没有找到相对有差异的配对'\n elif significant['result']==0:\n if conclusion:\n tmp='在95%置信水平下显著性检验(卡方检验)结果为*不显著*, 但CHI指标在一个标准差外的(即相对有差异的)有:\\n'\n else:\n tmp='在95%置信水平下显著性检验(卡方检验)结果为*不显著*,且没有找到相对有差异的配对'\n else:\n if conclusion:\n tmp='不满足显著性检验(卡方检验)条件, 但CHI指标在一个标准差外的(即相对有差异的)有:\\n'\n else:\n tmp='不满足显著性检验(卡方检验)条件,且没有找到相对有差异的配对'\n conclusion=tmp+conclusion\n\n summary['summary']=conclusion\n cdata['summary']=summary\n return cdata\n\ndef pre_cross_qlist(data,code):\n '''自适应给出可以进行交叉分析的变量和相应选项\n 满足以下条件的将一键交叉分析:\n 1、单选题\n 2、如果选项是文本,则平均长度应小于10\n ...\n 返回:\n cross_qlist: [[题目序号,变量选项],]\n '''\n cross_qlist=[]\n for qq in code:\n qtype=code[qq]['qtype']\n qlist=code[qq]['qlist']\n content=code[qq]['content']\n sample_len_qq=data[code[qq]['qlist']].notnull().T.any().sum()\n if qtype not in ['单选题']:\n continue\n if not(set(qlist) <= set(data.columns)):\n continue\n t=qtable(data,code,qq)['fo']\n if 'code_order' in code[qq]:\n code_order=code[qq]['code_order']\n code_order=[q for q in code_order if q in t.index]\n t=pd.DataFrame(t,index=code_order)\n items=list(t.index)\n code_values=list(code[qq]['code'].values())\n if len(items)<=1:\n continue\n if all([isinstance(t,str) for t in code_values]):\n if sum([len(t) for t in code_values])/len(code_values)>15:\n continue\n if ('code_order' in code[qq]) and (len(items)<10):\n code_order=[q for q in code[qq]['code_order'] if q in t.index]\n t=pd.DataFrame(t,index=code_order)\n ind=np.where(t['频数']>=10)[0]\n if len(ind)>0:\n cross_order=list(t.index[range(ind[0],ind[-1]+1)])\n cross_qlist.append([qq,cross_order])\n continue\n if re.findall('性别|年龄|gender|age',content.lower()):\n cross_qlist.append([qq,items])\n continue\n if (len(items)<=sample_len_qq/30) and (len(items)<10):\n cross_order=list(t.index[t['频数']>=10])\n if cross_order:\n cross_qlist.append([qq,cross_order])\n continue\n return cross_qlist\n\n'''\nimport report as rpt\nppt=rpt.Report(template)\nppt.add_cover(filename)\nppt.add_slide(data=,title)\nppt.save()\nppt.plo\n\n'''\n\n\n\ndef cross_chart(data,code,cross_class,filename=u'交叉分析报告', cross_qlist=None,\\\ndelclass=None,plt_dstyle=None,cross_order=None,reverse_display=False,\\\ntotal_display=True,max_column_chart=20,save_dstyle=None,template=None):\n\n '''使用帮助\n data: 问卷数据,包含交叉变量和所有的因变量\n code: 数据编码\n cross_class: 交叉变量,单选题或者多选题,例如:Q1\n filename:文件名,用于PPT和保存相关数据\n cross_list: 需要进行交叉分析的变量,缺省为code中的所有变量\n delclass: 交叉变量中需要删除的单个变量,缺省空\n plt_dstyle: 绘制图表需要用的数据类型,默认为百分比表,可以选择['TGI'、'CHI'、'TWI']等\n save_dstyle: 需要保存的数据类型,格式为列表。\n cross_order: 交叉变量中各个类别的顺序,可以缺少\n total_display: PPT绘制图表中是否显示总体情况\n max_column_chart: 列联表的列数,小于则用柱状图,大于则用条形图\n template: PPT模板信息,{'path': 'layouts':}缺省用自带的。\n '''\n # ===================参数预处理=======================\n if plt_dstyle:\n plt_dstyle=plt_dstyle.upper()\n\n if not cross_qlist:\n try:\n cross_qlist=list(sorted(code,key=lambda c: int(re.findall('\\d+',c)[0])))\n except:\n cross_qlist=list(code.keys())\n if cross_class in cross_qlist:\n cross_qlist.remove(cross_class)\n\n # =================基本数据获取==========================\n #交叉分析的样本数统一为交叉变量的样本数\n sample_len=data[code[cross_class]['qlist']].notnull().T.any().sum()\n\n\n # 交叉变量中每个类别的频数分布.\n if code[cross_class]['qtype'] == u'单选题':\n #data[cross_class].replace(code[cross_class]['code'],inplace=True)\n cross_class_freq=data[code[cross_class]['qlist'][0]].value_counts()\n cross_class_freq[u'合计']=cross_class_freq.sum()\n cross_class_freq.rename(index=code[cross_class]['code'],inplace=True)\n #cross_columns_qlist=code[cross_class]['qlist']\n elif code[cross_class]['qtype'] == u'多选题':\n cross_class_freq=data[code[cross_class]['qlist']].sum()\n cross_class_freq[u'合计']=cross_class_freq.sum()\n cross_class_freq.rename(index=code[cross_class]['code'],inplace=True)\n #data.rename(columns=code[cross_class]['code'],inplace=True)\n #cross_columns_qlist=[code[cross_class]['code'][k] for k in code[cross_class]['qlist']]\n elif code[cross_class]['qtype'] == u'排序题':\n tmp=qtable(data,code,cross_class)\n #tmp,tmp1=table(data[code[cross_class]['qlist']],code[cross_class])\n cross_class_freq=tmp['fo'][u'综合']\n cross_class_freq[u'合计']=cross_class_freq.sum()\n\n # ================I/O接口=============================\n # pptx 接口\n prs=rpt.Report(template) if template else rpt.Report()\n\n if not os.path.exists('.\\\\out'):\n os.mkdir('.\\\\out')\n # 生成数据接口(因为exec&eval)\n Writer=pd.ExcelWriter('.\\\\out\\\\'+filename+u'.xlsx')\n Writer_save={}\n if save_dstyle:\n for dstyle in save_dstyle:\n Writer_save[u'Writer_'+dstyle]=pd.ExcelWriter('.\\\\out\\\\'+filename+u'_'+dstyle+'.xlsx')\n\n result={}#记录每道题的的统计数据,用户函数的返回数据\n\n # 记录没到题目的样本数和显著性差异检验结果,用于最后的数据输出\n cross_columns=list(cross_class_freq.index)\n cross_columns=[r for r in cross_columns if r!=u'合计']\n cross_columns=['内容','题型']+cross_columns+[u'总体',u'显著性检验']\n conclusion=pd.DataFrame(index=cross_qlist,columns=cross_columns)\n conclusion.to_excel(Writer,u'索引')\n\n # ================封面页=============================\n prs.add_cover(title=filename)\n\n # ================背景页=============================\n title=u'说明'\n summary=u'交叉题目为'+cross_class+u': '+code[cross_class]['content']\n summary=summary+'\\n'+u'各类别样本量如下:'\n prs.add_slide(data={'data':cross_class_freq,'slide_type':'table'},title=title,\\\n summary=summary)\n\n data_column=data[code[cross_class]['qlist']]\n for qq in cross_qlist:\n # 遍历所有题目\n #print(qq)\n qtitle=code[qq]['content']\n qlist=code[qq]['qlist']\n qtype=code[qq]['qtype']\n if not(set(qlist) <= set(data.columns)):\n continue\n data_index=data[qlist]\n\n sample_len=data_column.iloc[list(data_index.notnull().T.any()),:].notnull().T.any().sum()\n summary=None\n if qtype not in [u'单选题',u'多选题',u'排序题',u'矩阵单选题']:\n continue\n # 交叉统计\n try:\n if reverse_display:\n result_t=crosstab(data_column,data_index,code_index=code[cross_class],code_column=code[qq])\n else:\n result_t=crosstab(data_index,data_column,code_index=code[qq],code_column=code[cross_class])\n except :\n print('脚本在处理{}时出了一天小问题.....')\n continue\n if ('fo' in result_t) and ('fop' in result_t):\n t=result_t['fop']\n t1=result_t['fo']\n qsample=result_t['sample_size']\n else:\n continue\n\n if t is None:\n continue\n\n # =======数据修正==============\n if cross_order and (not reverse_display):\n if u'总体' not in cross_order:\n cross_order=cross_order+[u'总体']\n cross_order=[q for q in cross_order if q in t.columns]\n t=pd.DataFrame(t,columns=cross_order)\n t1=pd.DataFrame(t1,columns=cross_order)\n if cross_order and reverse_display:\n cross_order=[q for q in cross_order if q in t.index]\n t=pd.DataFrame(t,index=cross_order)\n t1=pd.DataFrame(t1,index=cross_order)\n '''在crosstab中已经重排了\n if 'code_order' in code[qq] and qtype!='矩阵单选题':\n code_order=code[qq]['code_order']\n if reverse_display:\n #code_order=[q for q in code_order if q in t.columns]\n if u'总体' in t1.columns:\n code_order=code_order+[u'总体']\n t=pd.DataFrame(t,columns=code_order)\n t1=pd.DataFrame(t1,columns=code_order)\n else:\n #code_order=[q for q in code_order if q in t.index]\n t=pd.DataFrame(t,index=code_order)\n t1=pd.DataFrame(t1,index=code_order)\n '''\n t.fillna(0,inplace=True)\n t1.fillna(0,inplace=True)\n\n # =======保存到Excel中========\n t2=pd.concat([t,t1],axis=1)\n t2.to_excel(Writer,qq,index_label=qq,float_format='%.3f')\n Writer_rows=len(t2)# 记录当前Excel文件写入的行数\n pd.DataFrame(qsample,columns=['样本数']).to_excel(Writer,qq,startrow=Writer_rows+2)\n Writer_rows+=len(qsample)+2\n\n #列联表分析\n cdata=contingency(t1,alpha=0.05)# 修改容错率\n result[qq]=cdata\n if cdata:\n summary=cdata['summary']['summary']\n # 保存各个指标的数据\n if save_dstyle:\n for dstyle in save_dstyle:\n cdata[dstyle].to_excel(Writer_save[u'Writer_'+dstyle],qq,index_label=qq,float_format='%.2f')\n\n if qtype in [u'单选题',u'多选题',u'排序题']:\n plt_data=t*100\n else:\n plt_data=t.copy()\n if (abs(1-plt_data.sum())<=0.01+1e-17).all():\n plt_data=plt_data*100\n\n\n # ========================【特殊题型处理区】================================\n if 'fw' in result_t:\n plt_data=result_t['fw']\n if cross_order and (not reverse_display):\n if u'总体' not in cross_order:\n cross_order=cross_order+[u'总体']\n cross_order=[q for q in cross_order if q in plt_data.index]\n plt_data=pd.DataFrame(plt_data,index=cross_order)\n plt_data.to_excel(Writer,qq,startrow=Writer_rows+2)\n Writer_rows+=len(plt_data)\n\n if plt_dstyle and isinstance(cdata,dict) and (plt_dstyle in cdata):\n plt_data=cdata[plt_dstyle]\n\n # 绘制PPT\n title=qq+'['+qtype+']: '+qtitle\n if not summary:\n summary=u'这里是结论区域.'\n if 'significant' in cdata:\n sing_result=cdata['significant']['result']\n sing_pvalue=cdata['significant']['pvalue']\n else:\n sing_result=-2\n sing_pvalue=-2\n footnote=u'显著性检验的p值为{:.3f},数据来源于{},样本N={}'.format(sing_pvalue,qq,sample_len)\n\n # 保存相关数据\n conclusion.loc[qq,:]=qsample\n conclusion.loc[qq,[u'内容',u'题型']]=pd.Series({u'内容':code[qq]['content'],u'题型':code[qq]['qtype']})\n conclusion.loc[qq,u'显著性检验']=sing_result\n\n if (not total_display) and (u'总体' in plt_data.columns):\n plt_data.drop([u'总体'],axis=1,inplace=True)\n\n if len(plt_data)>max_column_chart:\n prs.add_slide(data={'data':plt_data[::-1],'slide_type':'chart','type':'BAR_CLUSTERED'},\\\n title=title,summary=summary,footnote=footnote)\n else:\n prs.add_slide(data={'data':plt_data,'slide_type':'chart','type':'COLUMN_CLUSTERED'},\\\n title=title,summary=summary,footnote=footnote)\n # 排序题特殊处理\n if (qtype == u'排序题') and ('TOP1' in result_t):\n plt_data=result_t['TOP1']*100\n # =======数据修正==============\n if cross_order and (not reverse_display):\n if u'总体' not in cross_order:\n cross_order=cross_order+[u'总体']\n cross_order=[q for q in cross_order if q in plt_data.columns]\n plt_data=pd.DataFrame(plt_data,columns=cross_order)\n if cross_order and reverse_display:\n cross_order=[q for q in cross_order if q in plt_data.index]\n plt_data=pd.DataFrame(plt_data,index=cross_order)\n if 'code_order' in code[qq]:\n code_order=code[qq]['code_order']\n if reverse_display:\n #code_order=[q for q in code_order if q in t.columns]\n if u'总体' in t1.columns:\n code_order=code_order+[u'总体']\n plt_data=pd.DataFrame(plt_data,columns=code_order)\n else:\n #code_order=[q for q in code_order if q in t.index]\n plt_data=pd.DataFrame(plt_data,index=code_order)\n plt_data.fillna(0,inplace=True)\n title='[TOP1]' + title\n if len(plt_data)>max_column_chart:\n prs.add_slide(data={'data':plt_data[::-1],'slide_type':'chart','type':'BAR_CLUSTERED'},\\\n title=title,summary=summary,footnote=footnote)\n else:\n prs.add_slide(data={'data':plt_data,'slide_type':'chart','type':'COLUMN_CLUSTERED'},\\\n title=title,summary=summary,footnote=footnote)\n\n\n\n\n '''\n # ==============小结页=====================\n difference=pd.Series(difference,index=total_qlist_0)\n '''\n\n # ========================文件生成和导出======================\n #difference.to_csv('.\\\\out\\\\'+filename+u'_显著性检验.csv',encoding='gbk')\n if plt_dstyle:\n filename=filename+'_'+plt_dstyle\n try:\n prs.save('.\\\\out\\\\'+filename+u'.pptx')\n except:\n prs.save('.\\\\out\\\\'+filename+u'_副本.pptx')\n conclusion.to_excel(Writer,'索引')\n Writer.save()\n if save_dstyle:\n for dstyle in save_dstyle:\n Writer_save[u'Writer_'+dstyle].save()\n\n return result\n\n\n\ndef summary_chart(data,code,filename=u'整体统计报告', summary_qlist=None,\\\nmax_column_chart=20,template=None):\n\n # ===================参数预处理=======================\n if not summary_qlist:\n try:\n summary_qlist=list(sorted(code,key=lambda c: int(re.findall('\\d+',c)[0])))\n except:\n summary_qlist=list(code.keys())\n\n # =================基本数据获取==========================\n #统一的有效样本,各个题目可能有不能的样本数\n sample_len=len(data)\n\n # ================I/O接口=============================\n # pptx 接口\n prs=rpt.Report(template) if template else rpt.Report()\n\n if not os.path.exists('.\\\\out'):\n os.mkdir('.\\\\out')\n Writer=pd.ExcelWriter('.\\\\out\\\\'+filename+'.xlsx')\n\n result={}#记录每道题的过程数据\n # 记录样本数等信息,用于输出\n conclusion=pd.DataFrame(index=summary_qlist,columns=[u'内容',u'题型',u'样本数'])\n conclusion.to_excel(Writer,u'索引')\n # ================封面页=============================\n prs.add_cover(title=filename)\n # ================背景页=============================\n title=u'说明'\n qtype_count=[code[k]['qtype'] for k in code]\n qtype_count=[[qtype,qtype_count.count(qtype)] for qtype in set(qtype_count)]\n qtype_count=sorted(qtype_count,key=lambda x:x[1],reverse=True)\n summary='该数据一共有{}个题目,其中有'.format(len(code))\n summary+=','.join(['{} {} 道'.format(t[0],t[1]) for t in qtype_count])\n summary+='.\\n 经统计, 该数据有效样本数为 {} 份。下表是在该样本数下,各比例对应的置信区间(置信水平95%).'.format(sample_len)\n w=pd.DataFrame(index=[(i+1)*0.05 for i in range(10)],columns=['比例','置信区间'])\n w['比例']=w.index\n w['置信区间']=w['比例'].map(lambda x:confidence_interval(x,sample_len))\n w['置信区间']=w['置信区间'].map(lambda x:'±{:.1f}%'.format(x*100))\n w['比例']=w['比例'].map(lambda x:'{:.0f}% / {:.0f}%'.format(x*100,100-100*x))\n w=w.set_index('比例')\n prs.add_slide(data={'data':w,'slide_type':'table'},title=title,summary=summary)\n\n\n for qq in summary_qlist:\n '''\n 特殊题型处理\n 整体满意度题:后期归为数值类题型\n '''\n #print(qq)\n qtitle=code[qq]['content']\n qlist=code[qq]['qlist']\n qtype=code[qq]['qtype']\n if not(set(qlist) <= set(data.columns)):\n continue\n sample_len_qq=data[code[qq]['qlist']].notnull().T.any().sum()\n\n conclusion.loc[qq,u'内容']=qtitle\n conclusion.loc[qq,u'题型']=qtype\n conclusion.loc[qq,u'样本数']=sample_len_qq\n # 填空题只统计数据,不绘图\n if qtype == '填空题':\n startcols=0\n for qqlist in qlist:\n tmp=pd.DataFrame(data[qqlist].value_counts()).reset_index()\n tmp.to_excel(Writer,qq,startcol=startcols,index=False)\n startcols+=3\n continue\n if qtype not in [u'单选题',u'多选题',u'排序题',u'矩阵单选题']:\n continue\n try:\n result_t=table(data[qlist],code=code[qq])\n except:\n print(u'脚本处理 {} 时出了一点小问题.....'.format(qq))\n continue\n t=result_t['fop']\n t1=result_t['fo']\n\n # =======数据修正==============\n if 'code_order' in code[qq]:\n code_order=code[qq]['code_order']\n code_order=[q for q in code_order if q in t.index]\n if u'合计' in t.index:\n code_order=code_order+[u'合计']\n t=pd.DataFrame(t,index=code_order)\n t1=pd.DataFrame(t1,index=code_order)\n t.fillna(0,inplace=True)\n t1.fillna(0,inplace=True)\n\n # =======保存到Excel中========\n Writer_rows=0\n t2=pd.concat([t,t1],axis=1)\n t2.to_excel(Writer,qq,startrow=Writer_rows,index_label=qq,float_format='%.3f')\n Writer_rows+=len(t2)+2\n\n # ==========根据个题型提取结论==================\n summary=''\n if qtype in ['单选题','多选题']:\n try:\n gof_result=gof_test(t1)\n except :\n gof_result=-2\n if gof_result==1:\n summary+='拟合优度检验*显著*'\n elif gof_result==0:\n summary+='拟合优度检验*不显著*'\n else:\n summary+='不满足拟合优度检验条件'\n\n if qtype == '多选题':\n tmp=data[qlist].rename(columns=code[qq]['code'])\n tmp_t=len(tmp)*tmp.shape[1]*np.log(tmp.shape[1])\n if tmp_t<20000:\n minSup=0.08\n minConf=0.40\n elif tmp_t<50000:\n minSup=0.15\n minConf=0.60\n else:\n minSup=0.20\n minConf=0.60\n aso_result,rules,freq=association_rules(tmp,minSup=minSup,minConf=minConf)\n numItem_mean=t1.sum().sum()/sample_len_qq\n if u'合计' in t1.index:\n numItem_mean=numItem_mean/2\n if aso_result:\n summary+=' || 平均每个样本选了{:.1f}个选项 || 找到的关联规则如下(只显示TOP4):\\n{}'.format(numItem_mean,aso_result)\n rules.to_excel(Writer,qq,startrow=Writer_rows,index=False,float_format='%.3f')\n Writer_rows+=len(rules)+2\n else:\n summary+=' || 平均每个样本选了{:.1f}个选项 || 没有找到关联性较大的规则'.format(numItem_mean)\n\n # 各种题型的结论和相关注释。\n if (qtype in [u'单选题']) and 'fw' in result_t:\n tmp=u'加权平均值'\n if ('name' in code[qq]) and code[qq]['name']==u'满意度':\n tmp=u'满意度平均值'\n elif ('name' in code[qq]) and code[qq]['name']=='NPS':\n tmp=u'NPS值'\n summary+=' || {}为:{:.3f}'.format(tmp,result_t['fw'])\n elif qtype =='排序题':\n summary+=' 此处“综合”指标的计算方法为 :={}/总频数.'.format(result_t['weight'])\n if len(summary)==0:\n summary+=u'这里是结论区域'\n\n # ===============数据再加工==========================\n if qtype in [u'单选题',u'多选题',u'排序题']:\n plt_data=t*100\n else:\n plt_data=t.copy()\n if u'合计' in plt_data.index:\n plt_data.drop([u'合计'],axis=0,inplace=True)\n result[qq]=plt_data\n title=qq+'['+qtype+']: '+qtitle\n\n\n footnote=u'数据来源于%s,样本N=%d'%(qq,sample_len_qq)\n # 绘制图表plt_data一般是Series,对于矩阵单选题,其是DataFrame\n\n if len(t)>max_column_chart:\n prs.add_slide(data={'data':plt_data[::-1],'slide_type':'chart','type':'BAR_CLUSTERED'},\\\n title=title,summary=summary,footnote=footnote)\n elif (len(t)>3) or (len(plt_data.shape)>1 and plt_data.shape[1]>1):\n prs.add_slide(data={'data':plt_data,'slide_type':'chart','type':'COLUMN_CLUSTERED'},\\\n title=title,summary=summary,footnote=footnote)\n else:\n prs.add_slide(data={'data':plt_data,'slide_type':'chart','type':'PIE'},\\\n title=title,summary=summary,footnote=footnote)\n\n\n\n\n\n\n #==============特殊题型处理===============\n # 矩阵单选题特殊处理\n if (qtype == u'矩阵单选题') and ('fw' in result_t):\n plt_data=result_t['fw']\n plt_data.rename(columns={u'加权':u'平均值'},inplace=True)\n plt_data.to_excel(Writer,qq,startrow=Writer_rows,float_format='%.3f')\n Writer_rows=len(plt_data)+2\n plt_data.fillna(0,inplace=True)\n title='[平均值]'+title\n summary=summary+' || 该平均分采用的权值是:\\n'+result_t['weight']\n if len(plt_data)>max_column_chart:\n prs.add_slide(data={'data':plt_data[::-1],'slide_type':'chart','type':'BAR_STACKED'},\\\n title=title,summary=summary,footnote=footnote)\n else:\n prs.add_slide(data={'data':plt_data,'slide_type':'chart','type':'COLUMN_STACKED'},\\\n title=title,summary=summary,footnote=footnote)\n\n\n\n # 排序题特殊处理\n if (qtype == u'排序题') and ('TOPN' in result_t):\n plt_data=result_t['TOPN']\n # 将频数和频数百分表保存至本地\n tmp=pd.concat([result_t['TOPN'],result_t['TOPN_fo']],axis=1)\n tmp.to_excel(Writer,qq,startrow=Writer_rows,float_format='%.3f')\n\n Writer_rows=len(plt_data)+2\n plt_data=plt_data*100\n # =======数据修正==============\n if 'code_order' in code[qq]:\n code_order=code[qq]['code_order']\n #code_order=[q for q in code_order if q in t.index]\n if u'合计' in plt_data.index:\n code_order=code_order+[u'合计']\n plt_data=pd.DataFrame(plt_data,index=code_order)\n plt_data.fillna(0,inplace=True)\n\n title='[TOPN]'+title\n if len(plt_data)>max_column_chart:\n prs.add_slide(data={'data':plt_data[::-1],'slide_type':'chart','type':'BAR_STACKED'},\\\n title=title,summary=summary,footnote=footnote)\n else:\n prs.add_slide(data={'data':plt_data,'slide_type':'chart','type':'COLUMN_STACKED'},\\\n title=title,summary=summary,footnote=footnote)\n\n\n\n # ========================文件生成和导出======================\n try:\n prs.save('.\\\\out\\\\'+filename+u'.pptx')\n except:\n prs.save('.\\\\out\\\\'+filename+u'_副本.pptx')\n conclusion.to_excel(Writer,'索引')\n Writer.save()\n return result\n\ndef onekey_gen(data,code,filename=u'reprotgen 报告自动生成',template=None):\n '''一键生成所有可能需要的报告\n 包括\n 描述统计报告\n 单选题的交叉分析报告\n '''\n try:\n summary_chart(data,code,filename=filename,template=template);\n except:\n print('整体报告生成过程中出现错误,将跳过..')\n pass\n print('已生成 '+filename)\n cross_qlist=pre_cross_qlist(data,code)\n if len(cross_qlist)==0:\n return None\n for cross_qq in cross_qlist:\n qq=cross_qq[0]\n cross_order=cross_qq[1]\n if ('name' in code[qq]) and (code[qq]['name']!=''):\n filename='{}_差异分析'.format(code[qq]['name'])\n else:\n filename='{}_差异分析'.format(qq)\n save_dstyle=None #['TGI','CHI']\n try:\n cross_chart(data,code,qq,filename=filename,cross_order=cross_order,\\\n save_dstyle=save_dstyle,template=template);\n print('已生成 '+filename)\n except:\n print(filename+'生成过程中出现错误,将跳过...')\n pass\n return None\n\n\ndef scorpion(data,code,filename='scorpion'):\n '''天蝎X计划\n 返回一个excel文件\n 1、索引\n 2、各个题目的频数表\n 3、所有可能的交叉分析\n '''\n\n if not os.path.exists('.\\\\out'):\n os.mkdir('.\\\\out')\n Writer=pd.ExcelWriter('.\\\\out\\\\'+filename+'.xlsx')\n try:\n qqlist=list(sorted(code,key=lambda c: int(re.findall('\\d+',c)[0])))\n except:\n qqlist=list(code.keys())\n qIndex=pd.DataFrame(index=qqlist,columns=[u'content',u'qtype',u'SampleSize'])\n qIndex.to_excel(Writer,u'索引')\n\n # 生成索引表和频数表\n Writer_rows=0\n for qq in qqlist:\n qtitle=code[qq]['content']\n qlist=code[qq]['qlist']\n qtype=code[qq]['qtype']\n if not(set(qlist) <= set(data.columns)):\n continue\n sample_len_qq=data[code[qq]['qlist']].notnull().T.any().sum()\n qIndex.loc[qq,u'content']=qtitle\n qIndex.loc[qq,u'qtype']=qtype\n qIndex.loc[qq,u'SampleSize']=sample_len_qq\n if qtype not in [u'单选题',u'多选题',u'排序题',u'矩阵单选题']:\n continue\n try:\n result_t=table(data[qlist],code=code[qq])\n except:\n print(u'脚本处理 {} 时出了一点小问题.....'.format(qq))\n continue\n fop=result_t['fop']\n fo=result_t['fo']\n if (qtype == u'排序题') and ('TOPN' in result_t):\n tmp=result_t['TOPN']\n tmp[u'综合']=fo[u'综合']\n fo=tmp.copy()\n tmp=result_t['TOPN_fo']\n tmp[u'综合']=fop[u'综合']\n fop=tmp.copy()\n # =======保存到Excel中========\n fo_fop=pd.concat([fo,fop],axis=1)\n fo_fop.to_excel(Writer,u'频数表',startrow=Writer_rows,startcol=1,index_label=code[qq]['content'],float_format='%.3f')\n tmp=pd.DataFrame({'name':[qq]})\n tmp.to_excel(Writer,u'频数表',index=False,header=False,startrow=Writer_rows)\n Writer_rows+=len(fo_fop)+3\n qIndex.to_excel(Writer,'索引')\n\n crossAna=pd.DataFrame(columns=['RowVar','ColVar','SampleSize','pvalue','significant','summary'])\n N=0\n qqlist=[qq for qq in qqlist if code[qq]['qtype'] in ['单选题','多选题','矩阵单选题','排序题']]\n start_time=time.clock()\n N_cal=len(qqlist)*(len(qqlist)-1)*0.1# 用于计算脚本剩余时间\n for qq1 in qqlist:\n for qq2 in qqlist:\n #qtype1=code[qq1]['qtype']\n if (N>=N_cal) and (N<N_cal+1.0):\n tmp=(time.clock()-start_time)*9\n if tmp>60:\n print('请耐心等待, 预计还需要{:.1f}秒'.format(tmp))\n qtype2=code[qq2]['qtype']\n if (qq1==qq2) or (qtype2 not in [u'单选题',u'多选题']):\n continue\n data_index=data[code[qq1]['qlist']]\n data_column=data[code[qq2]['qlist']]\n samplesize=data_column.iloc[list(data_index.notnull().T.any()),:].notnull().T.any().sum()\n try:\n fo=qtable(data,code,qq1,qq2)['fo']\n except :\n crossAna.loc[N,:]=[qq1,qq2,samplesize,'','','']\n N+=1\n continue\n try:\n cdata=contingency(fo,alpha=0.05)\n except :\n crossAna.loc[N,:]=[qq1,qq2,samplesize,'','','']\n N+=1\n continue\n if cdata:\n result=cdata['significant']['result']\n pvalue=cdata['significant']['pvalue']\n summary=cdata['summary']['summary']\n else:\n result=-2\n pvalue=-2\n summary='没有找到结论'\n summary='\\n'.join(summary.splitlines()[1:])#去掉第一行\n if len(summary)==0:\n summary='没有找到结论'\n crossAna.loc[N,:]=[qq1,qq2,samplesize,pvalue,result,summary]\n N+=1\n crossAna.to_excel(Writer,'交叉分析表',index=False)\n\n Writer.save()\n" ]
[ [ "numpy.dot", "pandas.read_excel", "numpy.exp", "numpy.where", "scipy.linalg.diagsvd", "numpy.cumsum", "numpy.outer", "pandas.concat", "pandas.read_csv", "matplotlib.font_manager.FontProperties", "numpy.log", "pandas.DataFrame", "matplotlib.pyplot.subplots", "numpy.arange", "numpy.polyfit", "numpy.sqrt", "pandas.ExcelWriter", "scipy.stats.chi2.ppf", "pandas.to_datetime", "numpy.array", "scipy.stats.chi2_contingency", "numpy.argwhere", "numpy.polyder", "scipy.stats.norm.ppf", "numpy.isnan", "numpy.sum", "sklearn.cluster.KMeans", "sklearn.metrics.silhouette_score", "pandas.Series" ] ]
lucijabrezocnik/NiaPy
[ "1582d1af835c022c77224ea0234178a399efc106", "1582d1af835c022c77224ea0234178a399efc106" ]
[ "NiaPy/benchmarks/cosinemixture.py", "NiaPy/algorithms/basic/cso.py" ]
[ "# encoding=utf8\n\n\"\"\"Implementations of Cosine mixture functions.\"\"\"\n\nfrom numpy import cos, pi\nfrom NiaPy.benchmarks.benchmark import Benchmark\n\n__all__ = ['CosineMixture']\n\nclass CosineMixture(Benchmark):\n\tr\"\"\"Implementations of Cosine mixture function.\n\n\tDate: 2018\n\n\tAuthor: Klemen Berkovič\n\n\tLicense: MIT\n\n\tFunction:\n\t**Cosine Mixture Function**\n\n\t\t:math:`f(\\textbf{x}) = - 0.1 \\sum_{i = 1}^D \\cos (5 \\pi x_i) - \\sum_{i = 1}^D x_i^2`\n\n\t\t**Input domain:**\n\t\tThe function can be defined on any input domain but it is usually\n\t\tevaluated on the hypercube :math:`x_i ∈ [-1, 1]`, for all :math:`i = 1, 2,..., D`.\n\n\t\t**Global maximu:**\n\t\t:math:`f(x^*) = -0.1 D`, at :math:`x^* = (0.0,...,0.0)`\n\n\tLaTeX formats:\n\t\tInline:\n\t\t\t\t$f(\\textbf{x}) = - 0.1 \\sum_{i = 1}^D \\cos (5 \\pi x_i) - \\sum_{i = 1}^D x_i^2$\n\n\t\tEquation:\n\t\t\t\t\\begin{equation} f(\\textbf{x}) = - 0.1 \\sum_{i = 1}^D \\cos (5 \\pi x_i) - \\sum_{i = 1}^D x_i^2 \\end{equation}\n\n\t\tDomain:\n\t\t\t\t$-1 \\leq x_i \\leq 1$\n\n\tReference:\n\thttp://infinity77.net/global_optimization/test_functions_nd_C.html#go_benchmark.CosineMixture\n\t\"\"\"\n\tName = ['CosineMixture']\n\n\tdef __init__(self, Lower=-1.0, Upper=1.0):\n\t\tr\"\"\"Initialize of Cosine mixture benchmark.\n\n\t\tArgs:\n\t\t\tLower (Optional[float]): Lower bound of problem.\n\t\t\tUpper (Optional[float]): Upper bound of problem.\n\n\t\tSee Also:\n\t\t\t:func:`NiaPy.benchmarks.Benchmark.__init__`\n\t\t\"\"\"\n\t\tBenchmark.__init__(self, Lower, Upper)\n\n\t@staticmethod\n\tdef latex_code():\n\t\tr\"\"\"Return the latex code of the problem.\n\n\t\tReturns:\n\t\t\tstr: Latex code\n\t\t\"\"\"\n\t\treturn r'''$f(\\textbf{x}) = - 0.1 \\sum_{i = 1}^D \\cos (5 \\pi x_i) - \\sum_{i = 1}^D x_i^2$'''\n\n\tdef function(self):\n\t\tr\"\"\"Return benchmark evaluation function.\n\n\t\tReturns:\n\t\t\tCallable[[int, Union[int, float, List[int, float], numpy.ndarray]], float]: Fitness function\n\t\t\"\"\"\n\t\tdef f(D, X):\n\t\t\tr\"\"\"Fitness function.\n\n\t\t\tArgs:\n\t\t\t\tD (int): Dimensionality of the problem\n\t\t\t\tsol (Union[int, float, List[int, float], numpy.ndarray]): Solution to check.\n\n\t\t\tReturns:\n\t\t\t\tfloat: Fitness value for the solution.\n\t\t\t\"\"\"\n\t\t\tv1, v2 = 0.0, 0.0\n\t\t\tfor i in range(D): v1, v2 = v1 + cos(5 * pi * X[i]), v2 + X[i] ** 2\n\t\t\treturn -0.1 * v1 - v2\n\t\treturn f\n\n# vim: tabstop=3 noexpandtab shiftwidth=3 softtabstop=3\n", "# encoding=utf8\nimport logging\nimport math\n\nimport numpy as np\nfrom NiaPy.algorithms.algorithm import Algorithm\nlogging.basicConfig()\nlogger = logging.getLogger('NiaPy.algorithms.basic')\nlogger.setLevel('INFO')\n\n__all__ = ['CatSwarmOptimization']\n\nclass CatSwarmOptimization(Algorithm):\n r\"\"\"Implementation of Cat swarm optimiization algorithm.\n\n **Algorithm:** Cat swarm optimization\n\n **Date:** 2019\n\n **Author:** Mihael Baketarić\n\n **License:** MIT\n\n **Reference paper:** Chu, Shu-Chuan & Tsai, Pei-Wei & Pan, Jeng-Shyang. (2006). Cat Swarm Optimization. 854-858. 10.1007/11801603_94.\n \"\"\"\n Name = ['CatSwarmOptimization', 'CSO']\n\n @staticmethod\n def algorithmInfo():\n \tr\"\"\"Get algorithm information.\n\n \tReturns:\n \t\tstr: Algorithm information.\n\n \tSee Also:\n \t\t* :func:`NiaPy.algorithms.Algorithm.algorithmInfo`\n \t\"\"\"\n \treturn r\"\"\"Chu, Shu-Chuan & Tsai, Pei-Wei & Pan, Jeng-Shyang. (2006). Cat Swarm Optimization. 854-858. 10.1007/11801603_94.\"\"\"\n\n @staticmethod\n def typeParameters(): return {\n 'NP': lambda x: isinstance(x, int) and x > 0,\n 'MR': lambda x: isinstance(x, (int, float)) and 0 <= x <= 1,\n 'C1': lambda x: isinstance(x, (int, float)) and x >= 0,\n 'SMP': lambda x: isinstance(x, int) and x > 0,\n 'SPC': lambda x: isinstance(x, bool),\n 'CDC': lambda x: isinstance(x, (int, float)) and 0 <= x <= 1,\n 'SRD': lambda x: isinstance(x, (int, float)) and 0 <= x <= 1,\n 'vMax': lambda x: isinstance(x, (int, float)) and x > 0\n }\n\n def setParameters(self, NP=30, MR=0.1, C1=2.05, SMP=3, SPC=True, CDC=0.85, SRD=0.2, vMax=1.9, **ukwargs):\n r\"\"\"Set the algorithm parameters.\n\n Arguments:\n NP (int): Number of individuals in population.\n MR (float): Mixture ratio.\n C1 (float): Constant in tracing mode.\n SMP (int): Seeking memory pool.\n SPC (bool): Self-position considering.\n CDC (float): Decides how many dimensions will be varied.\n SRD (float): Seeking range of the selected dimension.\n vMax (float): Maximal velocity.\n\n See Also:\n * :func:`NiaPy.algorithms.Algorithm.setParameters`\n \"\"\"\n Algorithm.setParameters(self, NP=NP, **ukwargs)\n self.MR, self.C1, self.SMP, self.SPC, self.CDC, self.SRD, self.vMax = MR, C1, SMP, SPC, CDC, SRD, vMax\n\n def initPopulation(self, task):\n r\"\"\"Initialize population.\n\n Args:\n task (Task): Optimization task.\n\n Returns:\n Tuple[numpy.ndarray, numpy.ndarray[float], Dict[str, Any]]:\n 1. Initialized population.\n 2. Initialized populations fitness/function values.\n 3. Additional arguments:\n * Dictionary of modes (seek or trace) and velocities for each cat\n See Also:\n * :func:`NiaPy.algorithms.Algorithm.initPopulation`\n \"\"\"\n pop, fpop, d = Algorithm.initPopulation(self, task)\n d['modes'] = self.randomSeekTrace()\n d['velocities'] = self.uniform(-self.vMax, self.vMax, [len(pop), task.D])\n return pop, fpop, d\n\n def repair(self, x, l, u):\n r\"\"\"Repair array to range.\n\n Args:\n x (numpy.ndarray): Array to repair.\n l (numpy.ndarray): Lower limit of allowed range.\n u (numpy.ndarray): Upper limit of allowed range.\n\n Returns:\n numpy.ndarray: Repaired array.\n \"\"\"\n ir = np.where(x < l)\n x[ir] = l[ir]\n ir = np.where(x > u)\n x[ir] = u[ir]\n return x\n\n def randomSeekTrace(self):\n r\"\"\"Set cats into seeking/tracing mode.\n\n Returns:\n numpy.ndarray: One or zero. One means tracing mode. Zero means seeking mode. Length of list is equal to NP.\n \"\"\"\n lista = np.zeros((self.NP,), dtype=int)\n indexes = np.arange(self.NP)\n self.Rand.shuffle(indexes)\n lista[indexes[:int(self.NP * self.MR)]] = 1\n return lista\n\n def weightedSelection(self, weights):\n r\"\"\"Random selection considering the weights.\n\n Args:\n weights (numpy.ndarray): weight for each potential position.\n\n Returns:\n int: index of selected next position.\n \"\"\"\n cumulative_sum = np.cumsum(weights)\n return np.argmax(cumulative_sum >= (self.rand() * cumulative_sum[-1]))\n\n def seekingMode(self, task, cat, fcat, pop, fpop, fxb):\n r\"\"\"Seeking mode.\n\n Args:\n task (Task): Optimization task.\n cat (numpy.ndarray): Individual from population.\n fcat (float): Current individual's fitness/function value.\n pop (numpy.ndarray): Current population.\n fpop (numpy.ndarray): Current population fitness/function values.\n fxb (float): Current best cat fitness/function value.\n\n Returns:\n Tuple[numpy.ndarray, float, numpy.ndarray, float]:\n 1. Updated individual's position\n 2. Updated individual's fitness/function value\n 3. Updated global best position\n 4. Updated global best fitness/function value\n \"\"\"\n cat_copies = []\n cat_copies_fs = []\n for j in range(self.SMP - 1 if self.SPC else self.SMP):\n cat_copies.append(cat.copy())\n indexes = np.arange(task.D)\n self.Rand.shuffle(indexes)\n to_vary_indexes = indexes[:int(task.D * self.CDC)]\n if self.randint(2) == 1:\n cat_copies[j][to_vary_indexes] += cat_copies[j][to_vary_indexes] * self.SRD\n else:\n cat_copies[j][to_vary_indexes] -= cat_copies[j][to_vary_indexes] * self.SRD\n cat_copies[j] = task.repair(cat_copies[j])\n cat_copies_fs.append(task.eval(cat_copies[j]))\n if self.SPC:\n cat_copies.append(cat.copy())\n cat_copies_fs.append(fcat)\n\n cat_copies_select_probs = np.ones(len(cat_copies))\n fmax = np.max(cat_copies_fs)\n fmin = np.min(cat_copies_fs)\n if any(x != cat_copies_fs[0] for x in cat_copies_fs):\n fb = fmax\n if math.isinf(fb):\n cat_copies_select_probs = np.full(len(cat_copies), fb)\n else:\n cat_copies_select_probs = np.abs(cat_copies_fs - fb) / (fmax - fmin)\n if fmin < fxb:\n fxb = fmin\n ind = self.randint(self.NP, 1, 0)\n pop[ind] = cat_copies[np.where(cat_copies_fs == fmin)[0][0]]\n fpop[ind] = fmin\n sel_index = self.weightedSelection(cat_copies_select_probs)\n return cat_copies[sel_index], cat_copies_fs[sel_index], pop, fpop\n\n def tracingMode(self, task, cat, velocity, xb):\n r\"\"\"Tracing mode.\n\n Args:\n task (Task): Optimization task.\n cat (numpy.ndarray): Individual from population.\n velocity (numpy.ndarray): Velocity of individual.\n xb (numpy.ndarray): Current best individual.\n Returns:\n Tuple[numpy.ndarray, float, numpy.ndarray]:\n 1. Updated individual's position\n 2. Updated individual's fitness/function value\n 3. Updated individual's velocity vector\n \"\"\"\n Vnew = self.repair(velocity + (self.uniform(0, 1, len(velocity)) * self.C1 * (xb - cat)), np.full(task.D, -self.vMax), np.full(task.D, self.vMax))\n cat_new = task.repair(cat + Vnew)\n return cat_new, task.eval(cat_new), Vnew\n\n def runIteration(self, task, pop, fpop, xb, fxb, velocities, modes, **dparams):\n r\"\"\"Core function of Cat Swarm Optimization algorithm.\n\n Args:\n task (Task): Optimization task.\n pop (numpy.ndarray): Current population.\n fpop (numpy.ndarray): Current population fitness/function values.\n xb (numpy.ndarray): Current best individual.\n fxb (float): Current best cat fitness/function value.\n velocities (numpy.ndarray): Velocities of individuals.\n modes (numpy.ndarray): Flag of each individual.\n **dparams (Dict[str, Any]): Additional function arguments.\n\n Returns:\n Tuple[numpy.ndarray, numpy.ndarray, numpy.ndarray, float, Dict[str, Any]]:\n 1. New population.\n 2. New population fitness/function values.\n 3. New global best solution.\n 4. New global best solutions fitness/objective value.\n 5. Additional arguments:\n * Dictionary of modes (seek or trace) and velocities for each cat.\n \"\"\"\n pop_copies = pop.copy()\n for k in range(len(pop_copies)):\n if modes[k] == 0:\n pop_copies[k], fpop[k], pop_copies[:], fpop[:] = self.seekingMode(task, pop_copies[k], fpop[k], pop_copies, fpop, fxb)\n else: # if cat in tracing mode\n pop_copies[k], fpop[k], velocities[k] = self.tracingMode(task, pop_copies[k], velocities[k], xb)\n ib = np.argmin(fpop)\n if fpop[ib] < fxb: xb, fxb = pop_copies[ib].copy(), fpop[ib]\n return pop_copies, fpop, xb, fxb, {'velocities': velocities, 'modes': self.randomSeekTrace()}\n\n# vim: tabstop=3 noexpandtab shiftwidth=3 softtabstop=3\n" ]
[ [ "numpy.cos" ], [ "numpy.max", "numpy.full", "numpy.zeros", "numpy.argmin", "numpy.min", "numpy.where", "numpy.arange", "numpy.abs", "numpy.cumsum" ] ]
altonelli/tensorflow-fast-style-transfer
[ "5046164c7702982d2d6b308d75555888549c0d0a", "5046164c7702982d2d6b308d75555888549c0d0a" ]
[ "style_transfer_tester.py", "style_transfer_trainer.py" ]
[ "from transform import Transform\n\nimport tensorflow as tf\n\n\nclass StyleTransferTester:\n\n def __init__(self, session, content_image, model_path):\n # session\n self.sess = session\n\n # input images\n self.x0 = content_image\n\n # input model\n self.model_path = model_path\n\n # image transform network\n self.transform = Transform()\n\n # build graph for style transfer\n self._build_graph()\n\n def _build_graph(self):\n\n # graph input\n self.x = tf.placeholder(tf.float32, shape=self.x0.shape, name='input')\n self.xi = tf.expand_dims(self.x, 0) # add one dim for batch\n\n # result image from transform-net\n self.y_hat = self.transform.net(self.xi/255.0)\n self.y_hat = tf.squeeze(self.y_hat) # remove one dim for batch\n self.y_hat = tf.clip_by_value(self.y_hat, 0., 255.)\n\n def test(self):\n\n # initialize parameters\n self.sess.run(tf.global_variables_initializer())\n\n # load pre-trained model\n saver = tf.train.Saver()\n saver.restore(self.sess, self.model_path)\n\n # get transformed image\n output = self.sess.run(self.y_hat, feed_dict={self.x: self.x0})\n\n return output\n\n\n\n\n\n", "from utils import Utils\nfrom google_storage_utils import GCS\nfrom transform import Transform\nfrom private_constants import PrivateConstants\nfrom constants import Constants\n# break for Colab\nimport tensorflow as tf\nimport numpy as np\nimport collections\nimport os\n\n\nclass StyleTransferTrainer:\n def __init__(self, content_layer_ids, style_layer_ids, content_images, style_image, session, net, num_epochs,\n batch_size, content_weight, style_weight, tv_weight, learn_rate, save_path, check_period, test_image,\n max_size, style_name=\"unknown\"):\n\n self.net = net\n self.sess = session\n self.style_name = style_name\n\n # sort layers info\n self.CONTENT_LAYERS = collections.OrderedDict(sorted(content_layer_ids.items()))\n self.STYLE_LAYERS = collections.OrderedDict(sorted(style_layer_ids.items()))\n\n # input images\n self.x_list = content_images\n mod = len(content_images) % batch_size\n self.style_name = style_name\n self.x_list = self.x_list[:-mod]\n self.y_s0 = style_image\n self.content_size = len(self.x_list)\n\n # parameters for optimization\n self.num_epochs = num_epochs\n self.content_weight = content_weight\n self.style_weight = style_weight\n self.tv_weight = tv_weight\n self.learn_rate = learn_rate\n self.batch_size = batch_size\n self.check_period = check_period\n\n # path for model to be saved\n self.save_path = save_path\n\n # image transform network\n self.transform = Transform()\n self.tester = Transform('test')\n\n # build graph for style transfer\n self._build_graph()\n\n # test during training\n if test_image is not None:\n self.TEST = True\n\n # load content image\n self.test_image = Utils.load_image(test_image, max_size=max_size)\n\n # build graph\n self.x_test = tf.placeholder(tf.float32, shape=self.test_image.shape, name='test_input')\n self.xi_test = tf.expand_dims(self.x_test, 0) # add one dim for batch\n\n # result image from transform-net\n self.y_hat_test = self.tester.net(\n self.xi_test / 255.0) # please build graph for train first. tester.net reuses variables.\n\n else:\n self.TEST = False\n\n def _build_graph(self):\n\n \"\"\" prepare data \"\"\"\n\n self.batch_shape = (self.batch_size,256,256,3)\n\n # graph input\n self.y_c = tf.placeholder(tf.float32, shape=self.batch_shape, name='content')\n self.y_s = tf.placeholder(tf.float32, shape=self.y_s0.shape, name='style')\n\n # preprocess for VGG\n self.y_c_pre = self.net.preprocess(self.y_c)\n self.y_s_pre = self.net.preprocess(self.y_s)\n\n # get content-layer-feature for content loss\n content_layers = self.net.feed_forward(self.y_c_pre, scope='content')\n self.Ps = {}\n for id in self.CONTENT_LAYERS:\n self.Ps[id] = content_layers[id]\n\n # get style-layer-feature for style loss\n style_layers = self.net.feed_forward(self.y_s_pre, scope='style')\n self.As = {}\n for id in self.STYLE_LAYERS:\n self.As[id] = self._gram_matrix(style_layers[id])\n\n # result of image transform net\n self.x = self.y_c/255.0\n self.y_hat = self.transform.net(self.x)\n \n # get layer-values for x\n self.y_hat_pre = self.net.preprocess(self.y_hat)\n self.Fs = self.net.feed_forward(self.y_hat_pre, scope='mixed')\n\n \"\"\" compute loss \"\"\"\n\n # style & content losses\n L_content = 0\n L_style = 0\n for id in self.Fs:\n if id in self.CONTENT_LAYERS:\n ## content loss ##\n\n F = self.Fs[id] # content feature of x\n P = self.Ps[id] # content feature of p\n\n b, h, w, d = F.get_shape() # first return value is batch size (must be one)\n b = b.value # batch size\n N = h.value*w.value # product of width and height\n M = d.value # number of filters\n\n w = self.CONTENT_LAYERS[id] # weight for this layer\n\n L_content += w * 2 * tf.nn.l2_loss(F-P) / (b*N*M)\n\n elif id in self.STYLE_LAYERS:\n ## style loss ##\n\n F = self.Fs[id]\n\n b, h, w, d = F.get_shape() # first return value is batch size (must be one)\n b = b.value # batch size\n N = h.value * w.value # product of width and height\n M = d.value # number of filters\n\n w = self.STYLE_LAYERS[id] # weight for this layer\n\n G = self._gram_matrix(F, (b,N,M)) # style feature of x\n A = self.As[id] # style feature of a\n\n L_style += w * 2 * tf.nn.l2_loss(G - A) / (b * (M ** 2))\n\n # total variation loss\n L_tv = self._get_total_variation_loss(self.y_hat)\n\n \"\"\" compute total loss \"\"\"\n\n # Loss of total variation regularization\n alpha = self.content_weight\n beta = self.style_weight\n gamma = self.tv_weight\n\n self.L_content = alpha*L_content\n self.L_style = beta*L_style\n self.L_tv = gamma*L_tv\n self.L_total = self.L_content + self.L_style + self.L_tv\n\n # add summary for each loss\n tf.summary.scalar('L_content', self.L_content)\n tf.summary.scalar('L_style', self.L_style)\n tf.summary.scalar('L_tv', self.L_tv)\n tf.summary.scalar('L_total', self.L_total)\n\n # borrowed from https://github.com/lengstrom/fast-style-transfer/blob/master/src/optimize.py\n def _get_total_variation_loss(self, img):\n b, h, w, d = img.get_shape()\n b = b.value\n h = h.value\n w = w.value\n d = d.value\n tv_y_size = (h-1) * w * d\n tv_x_size = h * (w-1) * d\n y_tv = tf.nn.l2_loss(img[:, 1:, :, :] - img[:, :self.batch_shape[1] - 1, :, :])\n x_tv = tf.nn.l2_loss(img[:, :, 1:, :] - img[:, :, :self.batch_shape[2] - 1, :])\n loss = 2. * (x_tv / tv_x_size + y_tv / tv_y_size) / b\n\n loss = tf.cast(loss, tf.float32)\n return loss\n\n def train(self):\n \"\"\" define optimizer Adam \"\"\"\n global_step = tf.contrib.framework.get_or_create_global_step()\n\n trainable_variables = tf.trainable_variables()\n grads = tf.gradients(self.L_total, trainable_variables)\n\n optimizer = tf.train.AdamOptimizer(self.learn_rate)\n train_op = optimizer.apply_gradients(zip(grads, trainable_variables), global_step=global_step,\n name='train_step')\n\n \"\"\" tensor board \"\"\"\n # merge all summaries into a single op\n merged_summary_op = tf.summary.merge_all()\n\n # op to write logs to Tensorboard\n summary_writer = tf.summary.FileWriter(self.save_path, graph=tf.get_default_graph())\n\n \"\"\" session run \"\"\"\n self.sess.run(tf.global_variables_initializer())\n\n # saver to save model\n saver = tf.train.Saver()\n\n # restore check-point if it exits\n checkpoint_exists = True\n try:\n ckpt_state = tf.train.get_checkpoint_state(self.save_path)\n except tf.errors.OutOfRangeError as e:\n print('Cannot restore checkpoint: %s' % e)\n checkpoint_exists = False\n if not (ckpt_state and ckpt_state.model_checkpoint_path):\n print('No model to restore at %s' % self.save_path)\n checkpoint_exists = False\n\n if checkpoint_exists:\n tf.logging.info('Loading checkpoint %s', ckpt_state.model_checkpoint_path)\n saver.restore(self.sess, ckpt_state.model_checkpoint_path)\n\n \"\"\" loop for train \"\"\"\n num_examples = len(self.x_list)\n\n # get iteration info\n if checkpoint_exists:\n iterations = self.sess.run(global_step)\n epoch = (iterations * self.batch_size) // num_examples\n iterations = iterations - epoch*(num_examples // self.batch_size)\n else:\n epoch = 0\n iterations = 0\n\n print(\"About to train {}/{} epocs\".format(epoch, self.num_epochs))\n print(\"{} iter, {} batchsize, {} num examples\".format(iterations, self.batch_size, num_examples))\n\n while epoch < self.num_epochs:\n while iterations * self.batch_size < num_examples:\n\n curr = iterations * self.batch_size\n step = curr + self.batch_size\n x_batch = np.zeros(self.batch_shape, dtype=np.float32)\n for j, img_p in enumerate(self.x_list[curr:step]):\n x_batch[j] = Utils.get_img(img_p, (256, 256, 3)).astype(np.float32)\n\n iterations += 1\n\n assert x_batch.shape[0] == self.batch_size\n\n _, summary, L_total, L_content, L_style, L_tv, step = self.sess.run(\n [train_op, merged_summary_op, self.L_total, self.L_content, self.L_style, self.L_tv, global_step],\n feed_dict={self.y_c: x_batch, self.y_s: self.y_s0})\n\n print('epoch : %d, iter : %4d, ' % (epoch, step),\n 'L_total : %g, L_content : %g, L_style : %g, L_tv : %g' % (L_total, L_content, L_style, L_tv))\n\n # write logs at every iteration\n summary_writer.add_summary(summary, iterations)\n\n if step % self.check_period == 0:\n res = saver.save(self.sess, self.save_path + '/final.ckpt', step)\n for suffix in Constants.CKPT_SUFFIXES:\n local_path = self.save_path + '/final.ckpt-' + str(int(step)) + suffix\n gcs_path = PrivateConstants.MODEL_PATH + '/training/{}/{}'.format(self.style_name, Utils.get_formatted_date()) +'/final.ckpt-' + str(int(step)) + suffix\n GCS.upload_to_gcs(PrivateConstants.BUCKET_NAME, local_path, gcs_path)\n\n if self.TEST:\n output_image = self.sess.run([self.y_hat_test], feed_dict={self.x_test: self.test_image})\n output_image = np.squeeze(output_image[0]) # remove one dim for batch\n output_image = np.clip(output_image, 0., 255.)\n\n Utils.save_image(output_image, self.save_path + '/result_' + \"%05d\" % step + '.jpg')\n epoch += 1\n iterations = 0\n res = saver.save(self.sess,self.save_path+'/final.ckpt')\n\n def _gram_matrix(self, tensor, shape=None):\n\n if shape is not None:\n B = shape[0] # batch size\n HW = shape[1] # height x width\n C = shape[2] # channels\n CHW = C*HW\n else:\n B, H, W, C = map(lambda i: i.value, tensor.get_shape())\n HW = H*W\n CHW = W*H*C\n\n # reshape the tensor so it is a (B, 2-dim) matrix\n # so that 'B'th gram matrix can be computed\n feats = tf.reshape(tensor, (B, HW, C))\n\n # leave dimension of batch as it is\n feats_T = tf.transpose(feats, perm=[0, 2, 1])\n\n # paper suggests to normalize gram matrix by its number of elements\n gram = tf.matmul(feats_T, feats) / CHW\n\n return gram\n\n\n\n\n\n\n\n\n\n\n" ]
[ [ "tensorflow.expand_dims", "tensorflow.train.Saver", "tensorflow.squeeze", "tensorflow.placeholder", "tensorflow.clip_by_value", "tensorflow.global_variables_initializer" ], [ "tensorflow.matmul", "tensorflow.train.get_checkpoint_state", "tensorflow.gradients", "tensorflow.reshape", "tensorflow.global_variables_initializer", "tensorflow.cast", "tensorflow.trainable_variables", "tensorflow.get_default_graph", "tensorflow.train.Saver", "tensorflow.logging.info", "tensorflow.transpose", "tensorflow.train.AdamOptimizer", "numpy.zeros", "tensorflow.summary.scalar", "tensorflow.expand_dims", "tensorflow.contrib.framework.get_or_create_global_step", "tensorflow.nn.l2_loss", "tensorflow.placeholder", "numpy.clip", "tensorflow.summary.merge_all", "numpy.squeeze" ] ]
zlheui/rafiki
[ "f9a3d01ad1620bd91bd5d4d758fedac54e09a803" ]
[ "rafiki/predictor/ensemble.py" ]
[ "import numpy as np\n\nfrom rafiki.constants import TaskType\n\ndef ensemble_predictions(predictions_list, predict_label_mappings, task):\n # TODO: Better ensembling of predictions based on `predict_label_mapping` & `task` of models\n\n if len(predictions_list) == 0 or len(predictions_list[0]) == 0:\n return []\n\n # By default, just return some trial's predictions\n index = 0\n predictions = predictions_list[index]\n predict_label_mapping = predict_label_mappings[index]\n\n if task == TaskType.IMAGE_CLASSIFICATION:\n # Map probabilities to most probable label\n pred_indices = np.argmax(predictions, axis=1)\n predictions = [predict_label_mapping[str(i)] for i in pred_indices]\n \n return predictions\n" ]
[ [ "numpy.argmax" ] ]
michellefli/EverybodyDanceNow_reproduce_pytorch
[ "50bae6478cd23661397cc38c164e366fd475967e" ]
[ "train_pose2vid.py" ]
[ "import os\nimport numpy as np\nimport torch\nimport time\nimport sys\nfrom collections import OrderedDict\nfrom torch.autograd import Variable\nfrom pathlib import Path\nimport warnings\n\nwarnings.filterwarnings('ignore')\nmainpath = os.getcwd()\npix2pixhd_dir = Path(mainpath+'/src/pix2pixHD/')\nsys.path.append(str(pix2pixhd_dir))\n\n\nfrom data.data_loader import CreateDataLoader\nfrom models.models import create_model\nimport util.util as util\nfrom util.visualizer import Visualizer\nimport src.config.train_opt as opt\n\nos.environ['CUDA_VISIBLE_DEVICES'] = \"0\"\ntorch.multiprocessing.set_sharing_strategy('file_system')\ntorch.backends.cudnn.benchmark = True\n\n\ndef main():\n iter_path = os.path.join(opt.checkpoints_dir, opt.name, 'iter.txt')\n data_loader = CreateDataLoader(opt)\n dataset = data_loader.load_data()\n dataset_size = len(data_loader)\n print('#training images = %d' % dataset_size)\n\n start_epoch, epoch_iter = 1, 0\n total_steps = (start_epoch - 1) * dataset_size + epoch_iter\n display_delta = total_steps % opt.display_freq\n print_delta = total_steps % opt.print_freq\n save_delta = total_steps % opt.save_latest_freq\n\n model = create_model(opt)\n model = model.cuda()\n visualizer = Visualizer(opt)\n\n for epoch in range(start_epoch, opt.niter + opt.niter_decay + 1):\n epoch_start_time = time.time()\n if epoch != start_epoch:\n epoch_iter = epoch_iter % dataset_size\n for i, data in enumerate(dataset, start=epoch_iter):\n iter_start_time = time.time()\n total_steps += opt.batchSize\n epoch_iter += opt.batchSize\n\n # whether to collect output images\n save_fake = total_steps % opt.display_freq == display_delta\n\n ############## Forward Pass ######################\n losses, generated = model(Variable(data['label']), Variable(data['inst']),\n Variable(data['image']), Variable(data['feat']), infer=save_fake)\n\n # sum per device losses\n losses = [torch.mean(x) if not isinstance(x, int) else x for x in losses]\n loss_dict = dict(zip(model.loss_names, losses))\n\n # calculate final loss scalar\n loss_D = (loss_dict['D_fake'] + loss_dict['D_real']) * 0.5\n loss_G = loss_dict['G_GAN'] + loss_dict.get('G_GAN_Feat', 0) + loss_dict.get('G_VGG', 0)\n\n ############### Backward Pass ####################\n # update generator weights\n model.optimizer_G.zero_grad()\n loss_G.backward()\n model.optimizer_G.step()\n\n # update discriminator weights\n model.optimizer_D.zero_grad()\n loss_D.backward()\n model.optimizer_D.step()\n\n\n ############## Display results and errors ##########\n ### print out errors\n if total_steps % opt.print_freq == print_delta:\n errors = {k: v.data if not isinstance(v, int) else v for k, v in loss_dict.items()} # CHANGE: removed [0] after v.data\n t = (time.time() - iter_start_time) / opt.batchSize\n visualizer.print_current_errors(epoch, epoch_iter, errors, t)\n visualizer.plot_current_errors(errors, total_steps)\n\n ### display output images\n if save_fake:\n visuals = OrderedDict([('input_label', util.tensor2label(data['label'][0], opt.label_nc)),\n ('synthesized_image', util.tensor2im(generated.data[0])),\n ('real_image', util.tensor2im(data['image'][0]))])\n visualizer.display_current_results(visuals, epoch, total_steps)\n\n ### save latest model\n if total_steps % opt.save_latest_freq == save_delta:\n print('saving the latest model (epoch %d, total_steps %d)' % (epoch, total_steps))\n model.save('latest')\n np.savetxt(iter_path, (epoch, epoch_iter), delimiter=',', fmt='%d')\n\n if epoch_iter >= dataset_size:\n break\n\n # end of epoch\n print('End of epoch %d / %d \\t Time Taken: %d sec' %\n (epoch, opt.niter + opt.niter_decay, time.time() - epoch_start_time))\n\n ### save model for this epoch\n if epoch % opt.save_epoch_freq == 0:\n print('saving the model at the end of epoch %d, iters %d' % (epoch, total_steps))\n model.save('latest')\n model.save(epoch)\n np.savetxt(iter_path, (epoch + 1, 0), delimiter=',', fmt='%d')\n\n ### instead of only training the local enhancer, train the entire network after certain iterations\n if (opt.niter_fix_global != 0) and (epoch == opt.niter_fix_global):\n model.update_fixed_params()\n\n ### linearly decay learning rate after certain iterations\n if epoch > opt.niter:\n model.update_learning_rate()\n\n torch.cuda.empty_cache()\n\nif __name__ == '__main__':\n main()\n" ]
[ [ "numpy.savetxt", "torch.autograd.Variable", "torch.cuda.empty_cache", "torch.mean", "torch.multiprocessing.set_sharing_strategy" ] ]
donnel2-cooper/drone_control
[ "3bb3a1c1f768916ac41d4b78692e2edab0776c07", "3bb3a1c1f768916ac41d4b78692e2edab0776c07" ]
[ "Lectures/MAV_Dynamics/mav_dynamics.py", "Lectures/dirk_drone_code/gravity.py" ]
[ "\"\"\"\nmavDynamics \n - this file implements the dynamic equations of motion for MAV\n - use unit quaternion for the attitude state\n \npart of mavPySim \n - Beard & McLain, PUP, 2012\n - Update history: \n 12/20/2018 - RWB\n 2/24/2020\n\"\"\"\nimport sys\nsys.path.append('..')\nimport numpy as np\n\n# load message types\nfrom message_types.msg_state import msgState\n\nimport parameters.aerosonde_parameters as MAV\nfrom tools.rotations import Quaternion2Rotation, Quaternion2Euler, skew, quat_prod\nimport mavsim_python_chap5_model_coef as chap5\n\nclass mavDynamics:\n def __init__(self, Ts):\n self._ts_simulation = Ts\n # set initial states based on parameter file\n # _state is the 13x1 internal state of the aircraft that is being propagated:\n # _state = [pn, pe, pd, u, v, w, e0, e1, e2, e3, p, q, r]\n # We will also need a variety of other elements that are functions of the _state and the wind.\n # self.true_state is a 19x1 vector that is estimated and used by the autopilot to control the aircraft:\n # true_state = [pn, pe, h, Va, alpha, beta, phi, theta, chi, p, q, r, Vg, wn, we, psi, gyro_bx, gyro_by, gyro_bz]\n self._state = np.array([[MAV.pn0], # (0)\n [MAV.pe0], # (1)\n [MAV.pd0], # (2)\n [MAV.u0], # (3)\n [MAV.v0], # (4)\n [MAV.w0], # (5)\n [MAV.e0], # (6)\n [MAV.e1], # (7)\n [MAV.e2], # (8)\n [MAV.e3], # (9)\n [MAV.p0], # (10)\n [MAV.q0], # (11)\n [MAV.r0]]) # (12)\n \n # store wind data for fast recall since it is used at various points in simulation\n self._wind = np.array([[0.], [0.], [0.]]) # wind in NED frame in meters/sec\n self._update_velocity_data()\n # store forces to avoid recalculation in the sensors function\n self._forces = np.array([[], [], []])\n \n ur = self._state.item(3)\n vr = self._state.item(4)\n wr = self._state.item(5)\n \n self._Va = np.sqrt(ur**2 + vr**2 + wr**2)\n self._alpha = np.arctan2(wr,ur)\n self._beta = np.arcsin(vr/self._Va)\n \n # initialize true_state message\n self.true_state = msgState()\n\n ###################################\n # public functions\n def update(self, delta, wind):\n \"\"\"\n Integrate the differential equations defining dynamics, update sensors\n delta = (delta_a, delta_e, delta_r, delta_t) are the control inputs\n wind is the wind vector in inertial coordinates\n Ts is the time step between function calls.\n \"\"\"\n # get forces and moments acting on rigid bod\n forces_moments = self._forces_moments(delta)\n\n # Integrate ODE using Runge-Kutta RK4 algorithm\n time_step = self._ts_simulation\n k1 = self._derivatives(self._state, forces_moments)\n k2 = self._derivatives(self._state + time_step/2.*k1, forces_moments)\n k3 = self._derivatives(self._state + time_step/2.*k2, forces_moments)\n k4 = self._derivatives(self._state + time_step*k3, forces_moments)\n self._state += time_step/6 * (k1 + 2*k2 + 2*k3 + k4)\n\n # normalize the quaternion\n e0 = self._state.item(6)\n e1 = self._state.item(7)\n e2 = self._state.item(8)\n e3 = self._state.item(9)\n normE = np.sqrt(e0**2+e1**2+e2**2+e3**2)\n self._state[6][0] = self._state.item(6)/normE\n self._state[7][0] = self._state.item(7)/normE\n self._state[8][0] = self._state.item(8)/normE\n self._state[9][0] = self._state.item(9)/normE\n\n # update the airspeed, angle of attack, and side slip angles using new state\n self._update_velocity_data(wind)\n\n # update the message class for the true state\n self._update_true_state()\n\n def external_set_state(self, new_state):\n self._state = new_state\n\n ###################################\n # private functions\n def _derivatives(self, x, u):\n \"\"\"\n for the dynamics xdot = f(x, u), returns fdot(x, u)\n \"\"\"\n # Get force, moment (torque)\n f_b = u[:3]\n m_b = u[3:]\n # Get position, velocity, quaternion (rotation), angular velocity \n r_i = x[:3] # wrt to i-frame\n v_b = x[3:6] # wrt to i-frame\n q_ib = x[6:10] # for rotation b to i-frame\n w_b = x[10:] # wrt to b-frame\n \n # Normalize quat. -> rotation\n q_ib = q_ib/np.linalg.norm(q_ib) # normalize\n R_ib = Quaternion2Rotation(q_ib)\n \n # Compute equations of motion\n # d/dt(r_i) \n rdot_i = R_ib @ v_b\n \n # d/dt(v_b)\n vdot_b = (1/MAV.mass)*f_b-skew(w_b) @ v_b\n \n # d/dt(q_ib)\n wq_ib = np.zeros((4,1))\n wq_ib[1:] = w_b\n qdot_ib = 0.5 * quat_prod(wq_ib, q_ib)\n wt_b = skew(w_b)\n \n # d/dt(w_b)\n wdot_b = np.linalg.inv(MAV.J) @ (m_b - (wt_b @ (MAV.J @ w_b)))\n \n x_out = np.concatenate([rdot_i,vdot_b,qdot_ib,np.array(wdot_b)],axis = 0)\n return x_out\n\n def _update_velocity_data(self, wind=np.zeros((6,1))):\n steady_state = wind[0:3]\n gust = wind[3:6]\n ur = self._state.item(3) - steady_state[0] - gust[0]\n vr = self._state.item(4) - steady_state[1] - gust[1]\n wr = self._state.item(5) - steady_state[2] - gust[2]\n \n self._Va = np.sqrt(ur**2 + vr**2 + wr**2)\n self._alpha = np.arctan2(wr,ur)\n self._beta = np.arcsin(vr/self._Va)\n\n\n def thrust_from_prop(self, delta_t):\n # compute thrust and torque due to propeller (See addendum by McLain)\n # map delta_t throttle command (0 to 1) into motor input voltage\n \n V_in = MAV.V_max * delta_t\n KQ = MAV.KQ\n \n # Quadratic formula to solve for motor speed\n a = MAV.C_Q0 * MAV.rho * np.power(MAV.D_prop, 5) / ((2. * np.pi )**2 )\n b = (MAV.C_Q1 * MAV.rho * np.power(MAV.D_prop, 4) / (2.*np.pi)) * self._Va + KQ**2/MAV.R_motor\n c = MAV.C_Q2 * MAV.rho * np.power(MAV.D_prop, 3) * self._Va**2 - (KQ / MAV.R_motor ) * V_in + KQ * MAV.i0\n # Consider only positive root\n Omega_op = (-b + np.sqrt(b**2 - 4*a* c)) / (2. * a )\n # compute advance ratio\n J_op = 2 * np.pi * self._Va / (Omega_op * MAV.D_prop)\n # compute nondimensionalized coefficients of thrust and torque\n C_T = MAV.C_T2 * J_op **2 + MAV.C_T1 * J_op + MAV.C_T0\n C_Q = MAV.C_Q2 * J_op **2 + MAV.C_Q1 * J_op + MAV.C_Q0\n # add thrust and torque due to propeller\n n = Omega_op / (2 * np.pi )\n fx = MAV.rho * n**2 * np.power(MAV.D_prop, 4) * C_T\n Mx = -MAV.rho * n**2 * np.power(MAV.D_prop, 5) * C_Q\n return fx,Mx\n \n def sigma(self,alpha):\n # pseudo sigmoid functions with cutoff +- alpha_0, returns coef btw 0 and 1\n a1 = -MAV.M * (alpha - MAV.alpha0)\n a2 = MAV.M * (alpha + MAV.alpha0)\n sigma_alpha = (1 + np.exp(a1)+np.exp(a2)) / ((1+np.exp(a1))*(1+np.exp(a2)))\n return sigma_alpha\n \n def CL(self,alpha):\n CL0 = MAV.C_L_0\n CLA = MAV.C_L_alpha\n sigma_alpha = self.sigma(alpha)\n # returns lift coefficient using eq 4.9\n CL_alpha = (1-sigma_alpha)*(CL0 + CLA*alpha) + sigma_alpha*(2*np.sign(alpha)*np.sin(alpha)**2 * np.cos(alpha))\n return CL_alpha\n \n def CD(self,alpha):\n # returns drag coefficient using eq 4.11\n CD_alpha = MAV.C_D_p + (MAV.C_L_0 + MAV.C_L_alpha*alpha)**2/(np.pi*MAV.e*MAV.AR)\n return CD_alpha\n \n def Cx(self,alpha):\n return -self.CD(alpha)*np.cos(alpha) + self.CL(alpha)*np.sin(alpha)\n \n def Cx_q(self,alpha):\n return -MAV.C_D_q*np.cos(alpha) + MAV.C_L_q*np.sin(alpha)\n \n def Cx_deltae(self,alpha):\n return -MAV.C_D_delta_e*np.cos(alpha) + MAV.C_L_delta_e*np.sin(alpha)\n \n def Cz(self,alpha):\n return -self.CD(alpha)*np.sin(alpha)-self.CL(alpha)*np.cos(alpha)\n \n def Cz_q(self,alpha):\n return -MAV.C_D_q*np.sin(alpha)-MAV.C_L_q*np.cos(alpha)\n \n def Cz_deltae(self,alpha):\n return -MAV.C_D_delta_e*np.sin(alpha)-MAV.C_L_delta_e*np.cos(alpha)\n \n def _forces_moments(self, delta):\n \"\"\"\n return the forces on the UAV based on the state, wind, and control surfaces\n :param delta: np.matrix(delta_e, delta_a, delta_r, delta_t)\n :return: Forces and Moments on the UAV np.matrix(Fx, Fy, Fz, Ml, Mn, Mm)\n \"\"\"\n phi, theta, psi = Quaternion2Euler(self._state[6:10])\n p = self._state.item(10)\n q = self._state.item(11)\n r = self._state.item(12)\n\n delta_e = delta.item(0)\n delta_a = delta.item(1)\n delta_r = delta.item(2)\n delta_t = delta.item(3)\n \n # Gravitational Components of Force, Moments = 0\n mg = MAV.mass*MAV.gravity\n fx_grav = -mg*np.sin(theta)\n fy_grav = mg* np.cos(theta) * np.sin(phi)\n fz_grav = mg* np.cos(theta) * np.cos(phi)\n \n # Thrust Components of Force and Moments\n fx_thrust,Mx_thrust = self.thrust_from_prop(delta_t)\n fy_thrust = 0\n fz_thrust = 0\n My_thrust = 0\n Mz_thrust = 0\n \n # Aerodynamic Components of Forces and Moments\n b = MAV.b\n cyp = MAV.C_Y_p\n cyr = MAV.C_Y_r\n cydeltaa = MAV.C_Y_delta_a\n cydeltar = MAV.C_Y_delta_r\n \n aero_coef = 0.5*MAV.rho*self._Va**2*MAV.S_wing\n fx_aero = aero_coef * (self.Cx(self._alpha) + self.Cx_q(self._alpha)*MAV.c/(2*self._Va)*q + self.Cx_deltae(self._alpha)*delta_e)\n fy_aero = aero_coef * (MAV.C_Y_0 + MAV.C_Y_beta*self._beta + MAV.C_Y_p*b/(2*self._Va)*p + cyr * b/(2*self._Va)*r + cydeltaa * delta_a + cydeltar* delta_r)\n fz_aero = aero_coef * (self.Cz(self._alpha) + self.Cz_q(self._alpha)*MAV.c/(2*self._Va)*q + self.Cz_deltae(self._alpha)*delta_e)\n Mx_aero = aero_coef * MAV.b * (MAV.C_ell_0 + MAV.C_ell_beta*self._beta + MAV.C_ell_p*b/(2*self._Va)*p + MAV.C_ell_r*b/(2*self._Va)*r + MAV.C_ell_delta_a*delta_a + MAV.C_ell_delta_r*delta_r)\n My_aero = aero_coef * MAV.c * (MAV.C_m_0 + MAV.C_m_alpha*self._alpha + MAV.C_m_q*MAV.c/(2*self._Va)*q + MAV.C_m_delta_e*delta_e)\n Mz_aero = aero_coef * MAV.b * (MAV.C_n_0 + MAV.C_n_beta*self._beta + MAV.C_n_p*MAV.b/(2*self._Va)*p + MAV.C_n_r*MAV.b/(2*self._Va)*r + MAV.C_n_delta_a*delta_a + MAV.C_n_delta_r*delta_r)\n \n \n \n fx = fx_grav + fx_aero + fx_thrust\n fy = fy_grav + fy_aero + fy_thrust\n fz = fz_grav + fz_aero + fz_thrust\n # print('fx = ',fx)\n # print('fy = ',fy)\n # print('fz = ',fz)\n Mx = Mx_aero + Mx_thrust\n My = My_aero + My_thrust\n Mz = Mz_aero + Mz_thrust\n # print('Mx = ',Mx)\n # print('My = ',My)\n # print('Mz = ',Mz)\n\n self._forces[0] = fx\n self._forces[1] = fy\n self._forces[2] = fz\n fm = np.reshape(np.array([fx, fy, fz, Mx, My, Mz]),[6,1])\n return fm\n\n def _update_true_state(self):\n # update the class structure for the true state:\n # [pn, pe, h, Va, alpha, beta, phi, theta, chi, p, q, r, Vg, wn, we, psi, gyro_bx, gyro_by, gyro_bz]\n phi, theta, psi = Quaternion2Euler(self._state[6:10])\n pdot = Quaternion2Rotation(self._state[6:10]) @ self._state[3:6]\n self.true_state.pn = self._state.item(0)\n self.true_state.pe = self._state.item(1)\n self.true_state.h = -self._state.item(2)\n self.true_state.Va = self._Va\n self.true_state.alpha = self._alpha\n self.true_state.beta = self._beta\n self.true_state.phi = phi\n self.true_state.theta = theta\n self.true_state.psi = psi\n self.true_state.Vg = np.linalg.norm(pdot)\n self.true_state.gamma = np.arcsin(pdot.item(2) / self.true_state.Vg)\n self.true_state.chi = np.arctan2(pdot.item(1), pdot.item(0))\n self.true_state.p = self._state.item(10)\n self.true_state.q = self._state.item(11)\n self.true_state.r = self._state.item(12)\n self.true_state.wn = self._wind.item(0)\n self.true_state.we = self._wind.item(1)\n ", "import numpy as np\nfrom rotations import rot2, rot3\nimport mavsim_python_parameters_aerosonde_parameters as P\n\nclass Gravity:\n def __init__(self, state):\n self.mass = P.mass\n self.gravity = P.gravity\n self.state = state\n \n \n # Aero quantities\n @property\n def force(self):\n R_ib = self.state.rot\n R_bi = R_ib.T\n W_i = np.array([0, 0, P.mass*P.gravity])\n F = R_bi @ W_i\n return F.flatten()\n " ]
[ [ "numpy.array", "numpy.linalg.norm", "numpy.sin", "numpy.zeros", "numpy.arcsin", "numpy.exp", "numpy.sign", "numpy.arctan2", "numpy.sqrt", "numpy.cos", "numpy.power", "numpy.linalg.inv" ], [ "numpy.array" ] ]
PennLINC/xcp_abcd
[ "983ca22febc6cbb59b54076b0638a246bafc59b4" ]
[ "xcp_abcd/utils/dcan2fmriprep.py" ]
[ "# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-\n# vi: set ft=python sts=4 ts=4 sw=4 et:\nimport os,json,glob,re\nimport numpy as np \nimport pandas as pd\nimport nibabel as nb \nfrom nilearn.input_data import NiftiMasker\n\ndef dcan2fmriprep(dcandir,outdir,sub_id=None):\n dcandir = os.path.abspath(dcandir)\n outdir = os.path.abspath(outdir)\n if sub_id is None:\n sub_idir = glob.glob(dcandir +'/sub*')\n sub_id = [ os.path.basename(j) for j in sub_idir]\n\n for j in sub_id:\n dcan2fmriprepx(dcan_dir=dcandir,out_dir=outdir,sub_id=j)\n \n return sub_id\n\n\ndef dcan2fmriprepx(dcan_dir,out_dir,sub_id):\n \"\"\"\n dcan2fmriprep(dcan_dir,out_dir)\n \"\"\"\n # get session id if available \n \n sess =glob.glob(dcan_dir+'/'+sub_id+'/s*')\n ses_id = []\n ses_id = [ j.split('ses-')[1] for j in sess]\n # anat dirx \n \n \n for ses in ses_id:\n anat_dirx = dcan_dir+'/' + sub_id + '/ses-' +ses + '/files/MNINonLinear/'\n anatdir = out_dir +'/' + sub_id + '/ses-'+ses+ '/anat/'\n os.makedirs(anatdir,exist_ok=True)\n sess='ses-'+ses\n tw1 = anat_dirx +'/T1w.nii.gz'\n brainmask = anat_dirx + '/brainmask_fs.nii.gz'\n ribbon = anat_dirx + '/ribbon.nii.gz'\n segm = anat_dirx + '/aparc+aseg.nii.gz'\n\n midR = glob.glob(anat_dirx + '/fsaverage_LR32k/*R.midthickness.32k_fs_LR.surf.gii')[0]\n midL = glob.glob(anat_dirx + '/fsaverage_LR32k/*L.midthickness.32k_fs_LR.surf.gii')[0]\n infR = glob.glob(anat_dirx + '/fsaverage_LR32k/*R.inflated.32k_fs_LR.surf.gii')[0]\n infL = glob.glob(anat_dirx + '/fsaverage_LR32k/*L.inflated.32k_fs_LR.surf.gii')[0]\n\n pialR = glob.glob(anat_dirx + '/fsaverage_LR32k/*R.pial.32k_fs_LR.surf.gii')[0]\n pialL = glob.glob(anat_dirx + '/fsaverage_LR32k/*L.pial.32k_fs_LR.surf.gii')[0]\n\n whiteR = glob.glob(anat_dirx + '/fsaverage_LR32k/*R.white.32k_fs_LR.surf.gii')[0]\n whiteL = glob.glob(anat_dirx + '/fsaverage_LR32k/*L.white.32k_fs_LR.surf.gii')[0]\n \n dcanimages = [tw1,segm,ribbon, brainmask,tw1,tw1,midL,midR,pialL,pialR,whiteL,whiteR,infL,infR]\n \n t1wim = anatdir + sub_id + '_' + sess + '_desc-preproc_T1w.nii.gz'\n t1seg = anatdir + sub_id + '_' + sess + '_dseg.nii.gz'\n t1ribbon = anatdir + sub_id + '_' + sess + '_desc-ribbon_T1w.nii.gz'\n t1brainm = anatdir + sub_id + '_' + sess + '_desc-brain_mask.nii.gz'\n regfile1 = anatdir + sub_id + '_' + sess + '_from-T1w_to-MNI152NLin2009cAsym_mode-image_xfm.h5'\n regfile2 = anatdir + sub_id + '_' + sess + '_from-MNI152NLin2009cAsym_to-T1w_mode-image_xfm.h5'\n\n lMid = anatdir + sub_id + '_' + sess + '_hemi-L_midthickness.surf.gii'\n rMid = anatdir + sub_id + '_' + sess + '_hemi-R_midthickness.surf.gii'\n\n lpial = anatdir + sub_id + '_' + sess + '_hemi-L_pial.surf.gii'\n rpial = anatdir + sub_id + '_' + sess + '_hemi-R_pial.surf.gii'\n\n lwhite = anatdir + sub_id + '_' + sess + '_hemi-L_smoothwm.surf.gii'\n rwhite = anatdir + sub_id + '_' + sess + '_hemi-R_smoothwm.surf.gii'\n\n linf = anatdir + sub_id + '_' + sess + '_hemi-L_inflated.surf.gii'\n rinf = anatdir + sub_id + '_' + sess + '_hemi-R_inflated.surf.gii'\n\n newanatfiles =[t1wim,t1seg,t1ribbon,t1brainm,regfile1,regfile2,lMid,rMid,lpial,rpial,\n lwhite,rwhite,linf,rinf]\n \n for i,j in zip(dcanimages,newanatfiles):\n symlinkfiles(i,j)\n \n\n # get masks and transforms \n\n wmmask =glob.glob(anat_dirx + '/wm_2mm_*_mask_eroded.nii.gz')[0]\n csfmask =glob.glob(anat_dirx + '/vent_2mm_*_mask_eroded.nii.gz')[0]\n tw1tonative = anat_dirx +'xfms/T1w_to_MNI_0GenericAffine.mat'\n\n # get task and idx run 01 \n func_dirx = dcan_dir +'/' + sub_id + '/ses-' +ses_id[0] + '/files/MNINonLinear/Results/'\n taskd = glob.glob(func_dirx + 'task-*')\n taskid=[]\n for k in taskd:\n if not os.path.isfile(k):\n taskid.append(os.path.basename(k).split('-')[1])\n\n\n \n\n func_dir = out_dir +'/' + sub_id + '/ses-'+ses+ '/func/' \n os.makedirs(func_dir,exist_ok=True)\n ses_id = 'ses-'+ses\n for ttt in taskid:\n taskdir ='task-'+ttt\n \n taskname = re.split(r'(\\d+)', ttt)[0]\n run_id = '_run-'+ str(re.split(r'(\\d+)', ttt)[1])\n func_dirxx = func_dirx + taskdir \n \n\n\n sbref = func_dirxx + '/'+ taskdir +'_SBRef.nii.gz'\n volume = func_dirxx + '/'+ taskdir + '.nii.gz'\n \n brainmask = func_dirxx + '/brainmask_fs.2.0.nii.gz'\n dtsereis = func_dirxx +'/'+ taskdir + '_Atlas.dtseries.nii'\n motionp = func_dirxx + '/Movement_Regressors.txt'\n rmsdx = func_dirxx + '/Movement_AbsoluteRMS.txt'\n \n \n mvreg = pd.read_csv(motionp,header=None,delimiter=r\"\\s+\")\n mvreg = mvreg.iloc[:,0:6]\n mvreg.columns=['trans_x','trans_y','trans_z','rot_x','rot_y','rot_z']\n # convert rot to rad\n mvreg['rot_x']=mvreg['rot_x']*np.pi/180\n mvreg['rot_y']=mvreg['rot_y']*np.pi/180\n mvreg['rot_z']=mvreg['rot_z']*np.pi/180\n\n\n csfreg = extractreg(mask=csfmask,nifti=volume)\n wmreg = extractreg(mask=wmmask,nifti=volume)\n gsreg = extractreg(mask=brainmask,nifti=volume)\n rsmd = np.loadtxt(rmsdx)\n \n brainreg = pd.DataFrame({'global_signal':gsreg,'white_matter':wmreg,'csf':csfreg,'rmsd':rsmd })\n regressors = pd.concat([mvreg, brainreg], axis=1)\n\n dcanfunfiles=[sbref,dtsereis,tw1tonative,tw1tonative]\n\n\n tr = nb.load(volume).header.get_zooms()[-1] # repetition time\n jsontis={\n \"RepetitionTime\": np.float(tr),\n \"TaskName\": taskname}\n\n json2={\n \"grayordinates\": \"91k\", \"space\": \"HCP grayordinates\",\n \"surface\": \"fsLR\",\"surface_density\": \"32k\",\n \"volume\": \"MNI152NLin6Asym\"}\n \n #boldname = func_dir + sub_id+'_'+ ses_id + '_task-'+taskname + run_id+ '_space-MNI152NLin6Asym_desc-preproc_bold.nii.gz'\n boldjson = func_dir + sub_id+'_'+ ses_id + '_task-'+taskname + run_id+ '_space-MNI152NLin6Asym_desc-preproc_bold.json'\n confreg = func_dir + sub_id+'_'+ ses_id + '_task-'+taskname + run_id+ '_desc-confounds_timeseries.tsv'\n confregj = func_dir + sub_id+'_'+ ses_id + '_task-'+taskname + run_id+ '_desc-confounds_timeseries.json'\n boldref = func_dir + sub_id+'_'+ ses_id + '_task-'+taskname + run_id+'_space-MNI152NLin6Asym_boldref.nii.gz'\n #brainmaskf = func_dir + sub_id+'_'+ ses_id + '_task-'+taskname + run_id +'_space-MNI152NLin6Asym_desc-brain_mask.nii.gz'\n dttseriesx = func_dir + sub_id+'_'+ ses_id + '_task-'+taskname + run_id+ '_space-fsLR_den-91k_bold.dtseries.nii'\n dttseriesj = func_dir + sub_id+'_'+ ses_id + '_task-'+taskname + run_id+ '_space-fsLR_den-91k_bold.dtseries.json'\n native2t1w = func_dir + sub_id+'_'+ ses_id + '_task-'+taskname + run_id+ '_from-scanner_to-T1w_mode-image_xfm.txt'\n t12native = func_dir + sub_id+'_'+ ses_id + '_task-'+taskname + run_id+ '_from-T1w_to-scanner_mode-image_xfm.txt'\n\n\n # maske coreg files here \n \n\n fmfuncfiles = [boldref,dttseriesx,native2t1w,t12native]\n\n # symlink files\n for jj,kk in zip(dcanfunfiles,fmfuncfiles):\n symlinkfiles(jj,kk)\n \n figdir = out_dir +'/' + sub_id+ '/figures/'\n os.makedirs(figdir,exist_ok=True)\n bbreg = figdir + sub_id+'_'+ ses_id + '_task-'+taskname + run_id+ '_desc-bbregister_bold.svg'\n bbreg = bbregplot(fixed_image=tw1,moving_image=boldref,out_file=bbreg,contour=ribbon)\n \n # write json\n writejson(jsontis,boldjson)\n writejson(json2,dttseriesj)\n writejson(json2,confregj)\n\n #save confounds\n regressors.to_csv(confreg,sep='\\t',index=False)\n dcanjosn = {\n \"Name\": \"ABCDDCAN\",\n \"BIDSVersion\": \"1.4.0\",\n \"DatasetType\": \"derivative\",\n \"GeneratedBy\": [\n {\n \"Name\": \"DCAN\",\n \"Version\": \"0.0.4\",\n \"CodeURL\": \"https://github.com/DCAN-Labs/abcd-hcp-pipeline\"\n }],}\n writejson(dcanjosn,out_dir+'/dataset_description.json')\n \n return dcanjosn\n\n\n#def symlinkfiles(src, dest):\n #if os.path.islink(dest): \n #os.remove(dest)\n #os.symlink(src,dest)\n #else:\n #os.symlink(src,dest)\n \n #return dest \n\n\ndef copyfileobj_example(source, dest, buffer_size=1024*1024*1024):\n \"\"\" \n Copy a file from source to dest. source and dest\n must be file-like objects, i.e. any object with a read or\n write method, like for example StringIO.\n \"\"\"\n while True:\n copy_buffer = source.read(buffer_size)\n if not copy_buffer:\n break\n dest.write(copy_buffer)\n\ndef symlinkfiles(source, dest):\n # Beware, this example does not handle any edge cases!\n with open(source, 'rb') as src, open(dest, 'wb') as dst:\n copyfileobj_example(src, dst)\n\ndef extractreg(mask,nifti):\n masker=NiftiMasker(mask_img=mask)\n signals = masker.fit_transform(nifti)\n return np.mean(signals,axis=1)\n\ndef writejson(data,outfile):\n with open(outfile,'w') as f:\n json.dump(data,f)\n return outfile\n\n\ndef bbregplot(fixed_image,moving_image, contour, out_file='report.svg'): \n from nilearn.image import threshold_img, load_img,resample_img\n from niworkflows.viz.utils import plot_registration\n from niworkflows.viz.utils import cuts_from_bbox, compose_view\n import numpy as np\n\n fixed_image_nii = load_img(fixed_image)\n moving_image_nii = load_img(moving_image)\n moving_image_nii = resample_img(moving_image_nii, target_affine=np.eye(3), interpolation='nearest')\n contour_nii = load_img(contour) if contour is not None else None\n\n mask_nii = threshold_img(fixed_image_nii, 1e-3)\n\n n_cuts = 7\n if contour_nii:\n cuts = cuts_from_bbox(contour_nii, cuts=n_cuts)\n else:\n cuts = cuts_from_bbox(mask_nii, cuts=n_cuts)\n\n compose_view(\n plot_registration(\n fixed_image_nii,\n \"fixed-image\",\n estimate_brightness=True,\n cuts=cuts,\n label='fixed',\n contour=contour_nii,\n compress='auto'\n ),\n plot_registration(\n moving_image_nii,\n \"moving-image\",\n estimate_brightness=True,\n cuts=cuts,\n label='moving',\n contour=contour_nii,\n compress='auto',\n ),\n out_file=out_file,\n )\n return out_file" ]
[ [ "numpy.float", "pandas.DataFrame", "numpy.mean", "numpy.eye", "numpy.loadtxt", "pandas.concat", "pandas.read_csv" ] ]
davide-belli/deep-learning-labs
[ "1acd37a527711dccdc00c1935724cc5de7c10955" ]
[ "assignment_2/part3/model.py" ]
[ "# MIT License\n#\n# Copyright (c) 2017 Tom Runia\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to conditions.\n#\n# Author: Deep Learning Course | Fall 2018\n# Date Created: 2018-09-04\n################################################################################\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport torch.nn as nn\nimport torch\n\n\nclass TextGenerationModel(nn.Module):\n\n def __init__(self, batch_size, seq_length, vocabulary_size,\n lstm_num_hidden=256, lstm_num_layers=2, device='cuda:0', input_size=1):\n\n super(TextGenerationModel, self).__init__()\n \n self.emb_size = 64\n \n self.device = device\n # self.emb = nn.Embedding(batch_size * seq_length, 64)\n # self.lstm = nn.LSTM(64, lstm_num_hidden, num_layers=lstm_num_layers, dropout=0)\n self.lstm = nn.LSTM(input_size, lstm_num_hidden, num_layers=lstm_num_layers, dropout=0)\n self.linear = nn.Linear(lstm_num_hidden, vocabulary_size)\n self.h = None\n\n def forward(self, x):\n \n # Reset hidden layer for Training\n if self.training:\n self.h = None\n \n # x = self.emb(x.squeeze(-1).type(torch.LongTensor).to(self.device))\n \n out, h = self.lstm(x.transpose(0, 1), self.h)\n out = self.linear(out)\n\n # Handle hidden layer for Inference\n if not self.training:\n self.h = h\n \n return out\n \n def reset_hidden(self):\n self.h = None\n" ]
[ [ "torch.nn.Linear", "torch.nn.LSTM" ] ]
MagaliDrumare/How-to-learn-Keras-Deep-Learning-with-Python-book-
[ "bbea1f3422d61baa8463c777d9a98c6eca16ffa1", "bbea1f3422d61baa8463c777d9a98c6eca16ffa1" ]
[ "08_PreTrainedConvNet.py", "10_RNN.py" ]
[ "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Jan 23 10:11:14 2018\n\n@author: magalidrumare\n@ copyright https://github.com/fchollet/deep-learning-with-python-notebooks\n\"\"\"\n\n# Use of a pre-trained convnet : VGG16\n\n# An effective approach to deep learning on small image dataset is to leverage a pre-trained network\n# A saved network trained on a large dataset for classification task. \n# -> ImageNet (1.4 million labeled images and 1000 different classes),VGG, ResNet, INception, Xception, etc...\n\n# Part 1-Take the convolutional base of a previous trained network and running the data throught it \n# Part 2- Train a new classifier on top of the output \n\n# Why not reuse the classifier on the top? \n# ->The representation learned by the classifier is specific to the set of classes the model was trained on.\n# ->The densely connected layer no longer contain any information about where the object are located. \n\n# Representation extracted by specific convolution layers depends on the depth of the layer in the model \n# layers that comes earlier in the model extract generic feature maps : edges, lolor, textures \n# layers higher-up extract abstract concepts : cat ear, dog eye. \n\n\n\n\n# Part 1-Take the convolutional base of a previous trained network \n\nimport keras \n\n# Instantiate the VGG16 model \n# include_top=false not include the top of the network. \nfrom keras.applications import VGG16\nconv_base=VGG16(weights='imagenet', \n include_top=false,\n input_shape=(150,150,3))\n\nconv_base.summary()\n#-> the final feature map has shape (4,4,512)\n#-> that the features on the top of which we stick a densely-connected classifier. \n\n\n\n# Part 1......and running the data throught it \n# Extract features from theses images calling the predicted methods of the conv_base model \n\n# import the dataset \nimport os\nimport numpy as np\nfrom keras.preprocessing.image import ImageDataGenerator\n\n# à modifier\nbase_dir = '/Users/fchollet/Downloads/cats_and_dogs_small'\n\ntrain_dir = os.path.join(base_dir, 'train')\nvalidation_dir = os.path.join(base_dir, 'validation')\ntest_dir = os.path.join(base_dir, 'test')\n\ndatagen = ImageDataGenerator(rescale=1./255)\nbatch_size = 20\n\n# Create extract features function \ndef extract_features(directory, sample_count):\n # 4, 4, 512 -> the final feature map of conv_base has shape (4,4,512)\n features = np.zeros(shape=(sample_count, 4, 4, 512))\n labels = np.zeros(shape=(sample_count))\n # pre-processing of the images with datagen.flow_from_directory\n generator = datagen.flow_from_directory(\n directory,\n target_size=(150, 150),\n batch_size=batch_size,\n class_mode='binary')\n i = 0\n for inputs_batch, labels_batch in generator:\n # extract the features from the conv_base with conv_base.predict \n features_batch = conv_base.predict(inputs_batch)\n features[i * batch_size : (i + 1) * batch_size] = features_batch\n labels[i * batch_size : (i + 1) * batch_size] = labels_batch\n i += 1\n if i * batch_size >= sample_count:\n # Note that since generators yield data indefinitely in a loop,\n # we must `break` after every image has been seen once.\n break\n return features, labels\n\n\n# Apply extractct feature function to the training, test, validation images dataset. \ntrain_features, train_labels = extract_features(train_dir, 2000)\nvalidation_features, validation_labels = extract_features(validation_dir, 1000)\ntest_features, test_labels = extract_features(test_dir, 1000)\n# shape of the extracted features (samples, 4, 4 , 512)\n# -> must be flattened to (samples, 8192)\ntrain_features = np.reshape(train_features, (2000, 4 * 4 * 512))\nvalidation_features = np.reshape(validation_features, (1000, 4 * 4 * 512))\ntest_features = np.reshape(test_features, (1000, 4 * 4 * 512))\n\n\n# Part 2- Train a new classifier on top of the output \nfrom keras import models\nfrom keras import layers\nfrom keras import optimizers\n\nmodel = models.Sequential()\nmodel.add(layers.Dense(256, activation='relu', input_dim=4 * 4 * 512))\nmodel.add(layers.Dropout(0.5))\nmodel.add(layers.Dense(1, activation='sigmoid'))\n\nmodel.compile(optimizer=optimizers.RMSprop(lr=2e-5),\n loss='binary_crossentropy',\n metrics=['acc'])\n\nhistory = model.fit(train_features, train_labels,\n epochs=30,\n batch_size=20,\n validation_data=(validation_features, validation_labels))\n\n\n", "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Jan 23 16:57:52 2018\n\n@author: magalidrumare\n@Deep learning for text and sequences by François Cholet. \n\"\"\"\n\n# Réucurrent Layer in Keras \n\n# pseudo Code RNN \nstate_t=0\nfor input_t in input_sequences : \n output_t=f(input_t,state_t)\n state_t=output_t\n \n \n# More detailled speudo code for the RNN \nstate_t= 0 \nfor input_t in input_sequences : \n output_t= activation(dot(W,input_t)+dot(U, state_t)+b)\n state_t=output_t\n\n# Numpy implementation of a simple RNN \nstate_t=0\nimport numpy as np \ntimesteps =100 \ninput_features = 32 \noutput_features =64\n\ninputs = np.random.random((timesteps, input_features))\nstates=np.zeros((output_features))\n\nW = np.random.random ((output_features, input_features))\nU = np.random.random ((output_features, output_features))\nb= np.random.random ((output_features))\n\nsuccessive_output =[]\nfor input_t in inputs : \n output_t = np.tanh(np.dot(W,input_t)+np.dot(U,state_t)+b)\n successive_output.append(output_t)\n state_t = output_t\n final_output_sequence = np.concatenate(successive_output, axis=0)\n \n# Keras implementation \n from keras.models import Sequential \n from keras.layers import Embedding, SimpleRNN\n model = Sequential()\n model.add(Embedding(10000,32)) # 10000 token embbeding dimension =32\n model.add(SimpleRNN(32))\n model.summary()\n \nfrom keras.datasets import imdb\nfrom keras.preprocessing import sequence\n\nmax_features = 10000 # number of words to consider as features\nmaxlen = 500 # cut texts after this number of words (among top max_features most common words)\nbatch_size = 32\n\nprint('Loading data...')\n(input_train, y_train), (input_test, y_test) = imdb.load_data(num_words=max_features)\nprint(len(input_train), 'train sequences')\nprint(len(input_test), 'test sequences')\n\n\n# pad_sequence \n# Transform a list of num_samples sequences (lists of scalars) into a 2D Numpy array\n# of shape (num_samples, num_timesteps)\n#Sequences that are shorter than num_timesteps are padded with value. \n#Sequences longer than num_timesteps are truncated so that it fits the desired length. \n\nprint('Pad sequences (samples x time)')\ninput_train = sequence.pad_sequences(input_train, maxlen=maxlen)\ninput_test = sequence.pad_sequences(input_test, maxlen=maxlen)\nprint('input_train shape:', input_train.shape)\nprint('input_test shape:', input_test.shape)\n\nfrom keras.layers import Dense\n\nmodel = Sequential()\nmodel.add(Embedding(max_features, 32))\nmodel.add(SimpleRNN(32))\nmodel.add(Dense(1, activation='sigmoid'))\n\nmodel.compile(optimizer='rmsprop', loss='binary_crossentropy', metrics=['acc'])\nhistory = model.fit(input_train, y_train,\n epochs=10,\n batch_size=128,\n validation_split=0.2)\n\n\n" ]
[ [ "numpy.reshape", "numpy.zeros" ], [ "numpy.concatenate", "numpy.random.random", "numpy.dot", "numpy.zeros" ] ]
miroslavradojevic/python-snippets
[ "753e1c15dc077d3bcf5de4fd5d3a675daf0da27c" ]
[ "pointcloud/pcl_conv.py" ]
[ "#!/usr/bin/env python\nimport argparse\nimport open3d as o3d\nimport numpy as np\nimport os\nimport time\nfrom os.path import join, dirname, basename, splitext, exists, isdir, isfile\nfrom os import listdir\nfrom numpy import linalg as LA\nimport math\nimport cv2\nfrom pathlib import Path\n\n\ndef pcd_to_bin(pcd_path, outdir=None):\n pcd = o3d.io.read_point_cloud(pcd_path, format=\"pcd\")\n pcd_arr = np.asarray(pcd.points)\n\n if len(pcd_arr) == 0:\n return None\n\n outpath = join(Path(pcd_path).parent if outdir is None else outdir, splitext(basename(pcd_path))[0] + \".bin\")\n\n # binarize array and save to the same file path with .bin extension\n pcd_arr.tofile(outpath)\n return outpath\n\n\ndef pcd_to_sphproj(pcd_path, nr_scans, width, outdir=None):\n pcd = o3d.io.read_point_cloud(pcd_path, format=\"pcd\")\n pcd_arr = np.asarray(pcd.points)\n\n if len(pcd_arr) == 0:\n return None\n\n # https://towardsdatascience.com/spherical-projection-for-point-clouds-56a2fc258e6c\n\n # print(pcd_arr.shape)\n # print(pcd_arr[:, :3].shape)\n\n R = LA.norm(pcd_arr[:, :3], axis=1)\n print(\"R {} | {} -- {}\".format(R.shape, np.amin(R), np.amax(R)))\n\n yaw = np.arctan2(pcd_arr[:, 1], pcd_arr[:, 0])\n # print(\"yaw {} | {} -- {}\".format(yaw.shape, np.amin(yaw), np.amax(yaw)))\n # print(\"y {} | {} -- {}\".format(pcd_arr[:, 1].shape, np.amin(pcd_arr[:, 1]), np.amax(pcd_arr[:, 1])))\n\n pitch = np.arcsin(np.divide(pcd_arr[:, 2], R))\n # print(\"pitch {} | {} -- {}\".format(pitch.shape, np.amin(pitch), np.amax(pitch)))\n\n # import matplotlib.pyplot as plt\n # plt.plot(yaw, pitch, 'b.')\n # plt.xlabel('yaw [rad]')\n # plt.ylabel('pitch [rad]')\n # plt.axis('equal')\n # plt.show()\n\n FOV_Down = np.amin(pitch)\n FOV_Up = np.amax(pitch)\n FOV = FOV_Up + abs(FOV_Down)\n\n u = np.around((nr_scans-1) * (1.0-(pitch-FOV_Down)/FOV)).astype(np.int16)\n # print(\"u {} | {} -- {} | {}\".format(u.shape, np.amin(u), np.amax(u), u.dtype))\n\n v = np.around((width-1) * (0.5 * ((yaw/math.pi) + 1))).astype(np.int16)\n # print(\"v {} | {} -- {} | {}\".format(v.shape, np.amin(v), np.amax(v), v.dtype))\n\n sph_proj = np.zeros((nr_scans, width))\n\n R[R > 100.0] = 100.0 # cut off all values above 100m\n R = np.round((R / 100.0) * 255.0) # convert 0.0-100.0m into 0.0-255.0 for saving as byte8 image\n\n sph_proj[u, v] = R\n # print(\"sph_proj {} | {} -- {} | {}\".format(sph_proj.shape, np.amin(sph_proj), np.amax(sph_proj), sph_proj.dtype))\n\n outpath = join(Path(pcd_path).parent if outdir is None else outdir, splitext(basename(pcd_path))[0] + \".jpg\")\n cv2.imwrite(outpath, sph_proj)\n print(outpath)\n return np.amin(R), np.amax(R)\n\n\ndef bin_to_pcd(bin_path, outdir=None):\n print(bin_path)\n pcd_arr = np.fromfile(bin_path, dtype=np.float32)\n pcd_arr = pcd_arr.reshape((-1, 4)) # kitti has 4 values per point\n # print(type(pcd_arr), pcd_arr.shape, len(pcd_arr))\n # print(pcd_arr[:, :3].shape)\n if len(pcd_arr) == 0:\n return None\n\n outpath = join(Path(bin_path).parent if outdir is None else outdir, splitext(basename(bin_path))[0] + \".pcd\")\n print(outpath)\n # save array as .pcd\n pcd = o3d.geometry.PointCloud()\n pcd.points = o3d.utility.Vector3dVector(pcd_arr[:, :3]) # 3 dimensions\n o3d.io.write_point_cloud(outpath, pcd)\n return outpath\n\n\ndef bin_to_sphproj(bin_path, outdir=None):\n pass\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description='Convert between .pcd and .bin point cloud formats')\n parser.add_argument(\"-t\", type=str, required=True,\n help=\"Conversion to run (pcd2bin, pcd2sphproj, bin2pcd, bin2sphproj)\")\n parser.add_argument(\"-p\", type=str, required=True, help=\"Path to directory or file with point cloud\")\n parser.add_argument(\"-nr_scans\", type=int, help=\"Number of lidar scans (default 16)\", default=16)\n parser.add_argument(\"-width\", type=int, help=\"Spherical projection width (default 1024)\", default=1024)\n\n args = parser.parse_args()\n\n if not exists(args.p):\n exit(\"{} does not exist\".format(args.p))\n\n if isfile(args.p):\n # check extension\n ext = splitext(args.p)[-1].lower()\n\n if args.t == \"pcd2bin\" and ext == \".pcd\":\n pcd_to_bin(args.p)\n elif args.t == \"bin2pcd\" and ext == \".bin\":\n bin_to_pcd(args.p)\n elif args.t == \"pcd2sphproj\" and ext == \".pcd\":\n pcd_to_sphproj(args.p, args.nr_scans, args.width)\n elif args.t == \"bin2sphproj\" and ext == \".bin\":\n bin_to_sphproj(args.p)\n else:\n print(\"Wrong conversion or extension incompatible with conversion\")\n\n elif isdir(args.p):\n # go through all files and convert .pcd or .bin files encountered within the directory\n\n timestamp = time.strftime(\"%Y%m%d-%H%M%S\")\n outdir = join(Path(args.p).parent, str(args.t) + \"_\" + timestamp)\n\n if not os.path.exists(outdir):\n os.makedirs(outdir)\n\n range_min = float('inf')\n range_max = float('-inf')\n\n for f in listdir(args.p):\n # check extension\n ext = splitext(f)[-1].lower()\n\n if args.t == \"pcd2bin\" and ext == \".pcd\":\n pcd_to_bin(join(args.p, f), outdir)\n elif args.t == \"bin2pcd\" and ext == \".bin\":\n bin_to_pcd(join(args.p, f), outdir)\n elif args.t == \"pcd2sphproj\" and ext == \".pcd\":\n range_min1, range_max1 = pcd_to_sphproj(join(args.p, f), args.nr_scans, args.width, outdir)\n if range_min1 < range_min:\n range_min = range_min1\n if range_max1 > range_max:\n range_max = range_max1\n elif args.t == \"bin2sphproj\" and ext == \".bin\":\n bin_to_sphproj(join(args.p, f), outdir)\n else:\n print(\"Wrong conversion or extension incompatible with conversion\")\n\n print(\"range: {} - {}\".format(range_min, range_max))" ]
[ [ "numpy.divide", "numpy.linalg.norm", "numpy.asarray", "numpy.zeros", "numpy.round", "numpy.amax", "numpy.arctan2", "numpy.fromfile", "numpy.amin", "numpy.around" ] ]
jjavier-bm/conkit
[ "9888e40caa1720a9905c551b963ffd4f99f9c913" ]
[ "setup.py" ]
[ "\"\"\"Python Interface for Residue-Residue Contact Predictions\"\"\"\nimport os\nimport sys\n\nfrom distutils.command.build import build\nfrom distutils.util import convert_path\nfrom setuptools import setup, Extension\n\nfrom Cython.Distutils import build_ext\nimport numpy as np\n\n\n# ==============================================================\n# Setup.py command extensions\n# ==============================================================\n\n\n# Credits to http://stackoverflow.com/a/33181352\nclass BuildCommand(build):\n user_options = build.user_options + [\n (\"script-python-path=\", None, \"Path to Python interpreter to be included in the scripts\")\n ]\n\n def initialize_options(self):\n build.initialize_options(self)\n self.script_python_path = None\n\n def finalize_options(self):\n build.finalize_options(self)\n\n def run(self):\n global script_python_path\n script_python_path = self.script_python_path\n build.run(self)\n\n\n# ==============================================================\n# Functions, functions, functions ...\n# ==============================================================\n\n\ndef dependencies():\n with open(\"requirements.txt\", \"r\") as f_in:\n deps = f_in.read().splitlines()\n return deps\n\n\ndef extensions():\n exts = [\"conkit/core/ext/c_contactmap.pyx\", \"conkit/core/ext/c_sequencefile.pyx\", \"conkit/misc/ext/c_bandwidth.pyx\"]\n extensions = []\n for ext in exts:\n extensions.append(\n Extension(\n ext.replace('/', '.').rsplit('.', 1)[0],\n [ext],\n include_dirs=[np.get_include()],\n ))\n return extensions\n\n\ndef readme():\n with open(\"README.rst\", \"r\") as f_in:\n return f_in.read()\n\n\ndef scripts():\n extension = \".bat\" if sys.platform.startswith(\"win\") else \"\"\n header = \"\" if sys.platform.startswith(\"win\") else \"#!/bin/sh\"\n bin_dir = \"bin\"\n command_dir = convert_path(\"conkit/command_line\")\n scripts = []\n for file in os.listdir(command_dir):\n if not file.startswith(\"_\") and file.endswith(\".py\"):\n # Make sure we have a workable name\n f_name = os.path.basename(file).rsplit(\".\", 1)[0]\n for c in [\".\", \"_\"]:\n new_f_name = f_name.replace(c, \"-\")\n # Write the content of the script\n script = os.path.join(bin_dir, new_f_name + extension)\n with open(script, \"w\") as f_out:\n f_out.write(header + os.linesep)\n # BATCH file\n if sys.platform.startswith(\"win\"):\n string = \"@{0} -m conkit.command_line.{1} %*\"\n # BASH file\n else:\n string = '{0} -m conkit.command_line.{1} \"$@\"'\n f_out.write(string.format(PYTHON_EXE, f_name) + os.linesep)\n os.chmod(script, 0o777)\n scripts.append(script)\n return scripts\n\n\ndef version():\n # Credits to http://stackoverflow.com/a/24517154\n main_ns = {}\n ver_path = convert_path(\"conkit/version.py\")\n with open(ver_path) as f_in:\n exec(f_in.read(), main_ns)\n return main_ns[\"__version__\"]\n\n\n# ==============================================================\n# Determine the Python executable\n# ==============================================================\nPYTHON_EXE = None\nfor arg in sys.argv:\n if arg[0:20] == \"--script-python-path\" and len(arg) == 20:\n option, value = arg, sys.argv[sys.argv.index(arg) + 1]\n PYTHON_EXE = value\n elif arg[0:20] == \"--script-python-path\" and arg[20] == \"=\":\n option, value = arg[:20], arg[21:]\n PYTHON_EXE = value\n\nif not PYTHON_EXE:\n PYTHON_EXE = sys.executable\n\n# ==============================================================\n# Define all the relevant options\n# ==============================================================\nAUTHOR = \"Felix Simkovic\"\nAUTHOR_EMAIL = \"felixsimkovic@me.com\"\nDESCRIPTION = __doc__.replace(\"\\n\", \"\")\nDEPENDENCIES = dependencies()\nEXT_MODULES = extensions()\nLICENSE = \"BSD License\"\nLONG_DESCRIPTION = readme()\nPACKAGE_DIR = \"conkit\"\nPACKAGE_NAME = \"conkit\"\nPLATFORMS = [\"POSIX\", \"Mac OS\", \"Windows\", \"Unix\"]\nSCRIPTS = scripts()\nURL = \"http://www.conkit.org/en/latest/\"\nVERSION = version()\n\nPACKAGES = [\n \"conkit\",\n \"conkit/applications\",\n \"conkit/command_line\",\n \"conkit/core\",\n \"conkit/core/ext\",\n \"conkit/io\",\n \"conkit/misc\",\n \"conkit/misc/ext\",\n \"conkit/plot\",\n]\n\nCLASSIFIERS = [\n \"Development Status :: 4 - Beta\",\n \"Intended Audience :: Science/Research\",\n \"License :: OSI Approved :: BSD License\",\n \"Operating System :: OS Independent\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n \"Topic :: Scientific/Engineering :: Bio-Informatics\",\n]\n\nTEST_REQUIREMENTS = [\n \"codecov\",\n \"coverage\",\n \"pytest\",\n \"pytest-cov\",\n \"pytest-pep8\",\n \"pytest-helpers-namespace\",\n]\n\nsetup(\n cmdclass={\n 'build': BuildCommand,\n 'build_ext': build_ext,\n },\n author=AUTHOR,\n author_email=AUTHOR_EMAIL,\n name=PACKAGE_NAME,\n description=DESCRIPTION,\n ext_modules=EXT_MODULES,\n include_dirs=[np.get_include()],\n long_description=LONG_DESCRIPTION,\n license=LICENSE,\n version=VERSION,\n url=URL,\n packages=PACKAGES,\n package_dir={PACKAGE_NAME: PACKAGE_DIR},\n scripts=SCRIPTS,\n platforms=PLATFORMS,\n classifiers=CLASSIFIERS,\n install_requires=DEPENDENCIES,\n tests_require=TEST_REQUIREMENTS,\n setup_requires=['pytest-runner'],\n include_package_data=True,\n zip_safe=False,\n)\n" ]
[ [ "numpy.get_include" ] ]
vuhe/LearnPython
[ "0a081a85456557ae542925cce950b23313c3c9b9" ]
[ "artificial_intelligence/experiment_7.py" ]
[ "# 导入需要的包\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport sklearn.datasets\nimport sklearn.linear_model\nimport matplotlib\n\n# Display plots inline and change default figure size\nmatplotlib.rcParams['figure.figsize'] = (10.0, 8.0) # 生成数据集并绘制出来\nnp.random.seed(0)\nX, y = sklearn.datasets.make_moons(200, noise=0.20)\nplt.scatter(X[:, 0], X[:, 1], s=40, c=y, cmap=plt.cm.Spectral)\n\n# 训练逻辑回归训练器\nclf = sklearn.linear_model.LogisticRegressionCV()\nclf.fit(X, y)\n\n\n# Helper function to plot a decision boundary.\n# If you don't fully understand this function don't worry, it just generates the contour plot below.\ndef plot_decision_boundary(pred_func):\n # Set min and max values and give it some padding\n x_min, x_max = X[:, 0].min() - .5, X[:, 0].max() + .5\n y_min, y_max = X[:, 1].min() - .5, X[:, 1].max() + .5\n h = 0.01\n # Generate a grid of points with distance h between them\n xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))\n # Predict the function value for the whole gid\n Z = pred_func(np.c_[xx.ravel(), yy.ravel()])\n Z = Z.reshape(xx.shape)\n # Plot the contour and training examples\n plt.contourf(xx, yy, Z, cmap=plt.cm.Spectral)\n plt.scatter(X[:, 0], X[:, 1], c=y, cmap=plt.cm.Spectral)\n\n\n# Plot the decision boundary\nplot_decision_boundary(lambda x: clf.predict(x))\nplt.title(\"Logistic Regression\")\nplt.show()\n\nnum_examples = len(X) # training set size\nnn_input_dim = 2 # input layer dimensionality\nnn_output_dim = 2 # output layer dimensionality\n\n# Gradient descent parameters (I picked these by hand)\nepsilon = 0.01 # learning rate for gradient descent\nreg_lambda = 0.01 # regularization strength\n\n\n# Helper function to evaluate the total loss on the dataset\ndef calculate_loss(model):\n W1, b1, W2, b2 = model['W1'], model['b1'], model['W2'], model['b2']\n # Forward propagation to calculate our predictions\n z1 = X.dot(W1) + b1\n a1 = np.tanh(z1)\n z2 = a1.dot(W2) + b2\n exp_scores = np.exp(z2)\n probs = exp_scores / np.sum(exp_scores, axis=1, keepdims=True)\n # Calculating the loss\n corect_logprobs = -np.log(probs[range(num_examples), y])\n data_loss = np.sum(corect_logprobs)\n # Add regulatization term to loss (optional)\n data_loss += reg_lambda / 2 * (np.sum(np.square(W1)) + np.sum(np.square(W2)))\n return 1. / num_examples * data_loss\n\n\n# This function learns parameters for the neural network and returns the model.\n# - nn_hdim: Number of nodes in the hidden layer\n# - num_passes: Number of passes through the training data for gradient descent\n# - print_loss: If True, print the loss every 1000 iterations\ndef build_model(nn_hdim, num_passes=20000, print_loss=False):\n # Initialize the parameters to random values. We need to learn these.\n np.random.seed(0)\n W1 = np.random.randn(nn_input_dim, nn_hdim) / np.sqrt(nn_input_dim)\n b1 = np.zeros((1, nn_hdim))\n W2 = np.random.randn(nn_hdim, nn_output_dim) / np.sqrt(nn_hdim)\n b2 = np.zeros((1, nn_output_dim))\n\n # This is what we return at the end\n model = {}\n\n # Gradient descent. For each batch...\n for i in range(0, num_passes):\n\n # Forward propagation\n z1 = X.dot(W1) + b1\n a1 = np.tanh(z1)\n z2 = a1.dot(W2) + b2\n exp_scores = np.exp(z2)\n probs = exp_scores / np.sum(exp_scores, axis=1, keepdims=True)\n\n # Backpropagation\n delta3 = probs\n delta3[range(num_examples), y] -= 1\n dW2 = (a1.T).dot(delta3)\n db2 = np.sum(delta3, axis=0, keepdims=True)\n delta2 = delta3.dot(W2.T) * (1 - np.power(a1, 2))\n dW1 = np.dot(X.T, delta2)\n db1 = np.sum(delta2, axis=0)\n\n # Add regularization terms (b1 and b2 don't have regularization terms)\n dW2 += reg_lambda * W2\n dW1 += reg_lambda * W1\n\n # Gradient descent parameter update\n W1 += -epsilon * dW1\n b1 += -epsilon * db1\n W2 += -epsilon * dW2\n b2 += -epsilon * db2\n\n # Assign new parameters to the model\n model = {'W1': W1, 'b1': b1, 'W2': W2, 'b2': b2}\n\n # Optionally print the loss.\n # This is expensive because it uses the whole dataset, so we don't want to do it too often.\n if print_loss and i % 1000 == 0:\n print(\"Loss after iteration %i: %f\" % (i, calculate_loss(model)))\n\n return model\n\n\n# Helper function to predict an output (0 or 1)\ndef predict(model, x):\n W1, b1, W2, b2 = model['W1'], model['b1'], model['W2'], model['b2']\n # Forward propagation\n z1 = x.dot(W1) + b1\n a1 = np.tanh(z1)\n z2 = a1.dot(W2) + b2\n exp_scores = np.exp(z2)\n probs = exp_scores / np.sum(exp_scores, axis=1, keepdims=True)\n return np.argmax(probs, axis=1)\n\n\n# Build a model with a 3-dimensional hidden layer\nmodel = build_model(3, print_loss=True)\n\n# Plot the decision boundary\nplot_decision_boundary(lambda x: predict(model, x))\nplt.title(\"Decision Boundary for hidden layer size 3\")\nplt.show()\n" ]
[ [ "numpy.square", "numpy.dot", "matplotlib.pyplot.contourf", "numpy.zeros", "numpy.random.seed", "numpy.sum", "matplotlib.pyplot.title", "numpy.random.randn", "numpy.exp", "numpy.tanh", "matplotlib.pyplot.show", "numpy.argmax", "numpy.arange", "numpy.sqrt", "numpy.power", "matplotlib.pyplot.scatter" ] ]
sjs1178/jandichatbot
[ "bdb20e6f671a2a133c8ad7ea73caefb3c8818fd8" ]
[ "createpickles.py" ]
[ "# !/usr/bin/env python\n# -*- coding: utf-8 -*-\n\"\"\"Default module to train a xor classifier and write weights to disk.\"\"\"\n\nfrom keras.models import Sequential\nfrom keras.layers.core import Dense, Activation\nimport keras.optimizers as kop\nimport numpy as np\nimport os\nfrom sklearn.preprocessing import StandardScaler\ntry:\n import cPickle as pickle\nexcept Exception as ex:\n import pickle\n\n\ndef check_dir_exists(dirname='./pickles'):\n \"\"\"Check if given dirname exists This will contain all the pickle files.\"\"\"\n if not os.path.exists(dirname):\n print(\"Directory to store pickes does not exist. Creating one now: ./pickles\")\n os.mkdir(dirname)\n\n\ndef save_x_y_scalar(X_train, Y_train):\n \"\"\"Use a normalization method on your current dataset and save the coefficients.\n\n Args:\n X_train: Input X_train\n Y_train: Lables Y_train\n Returns:\n Normalized X_train,Y_train ( currently using StandardScaler from scikit-learn)\n \"\"\"\n scalar_x = StandardScaler()\n X_train = scalar_x.fit_transform(X_train)\n\n scalar_y = StandardScaler()\n Y_train = scalar_y.fit_transform(Y_train)\n\n print('dumping StandardScaler objects ..')\n pickle.dump(scalar_y,\n open('pickles/scalar_y.pickle', \"wb\"),\n protocol=pickle.HIGHEST_PROTOCOL)\n pickle.dump(scalar_x,\n open('pickles/scalar_x.pickle', \"wb\"),\n protocol=pickle.HIGHEST_PROTOCOL)\n return X_train, Y_train\n\n\ndef create_model(X_train, Y_train):\n \"\"\"create_model will create a very simple neural net model and save the weights in a predefined directory.\n\n Args:\n X_train: Input X_train\n Y_train: Lables Y_train\n \"\"\"\n xin = X_train.shape[1]\n\n model = Sequential()\n model.add(Dense(units=4, input_shape=(xin, )))\n model.add(Activation('tanh'))\n model.add(Dense(4))\n model.add(Activation('linear'))\n model.add(Dense(1))\n\n rms = kop.RMSprop()\n\n print('compiling now..')\n model.compile(loss='mse', optimizer=rms)\n\n model.fit(X_train, Y_train, epochs=1000, batch_size=1, verbose=2)\n score = model.evaluate(X_train, Y_train, batch_size=1)\n print(\"Evaluation results:\", score)\n open('pickles/my_model_architecture.json', 'w').write(model.to_json())\n\n print(\"Saving weights in: ./pickles/my_model_weights.h5\")\n model.save_weights('pickles/my_model_weights.h5')\n\n\nif __name__ == '__main__':\n X_train = np.array([[1., 1.], [1., 0], [0, 1.], [0, 0]])\n Y_train = np.array([[0.], [1.], [1.], [0.]])\n\n check_dir_exists(dirname='./pickles')\n X_train, Y_train = save_x_y_scalar(X_train, Y_train)\n create_model(X_train, Y_train)\n" ]
[ [ "numpy.array", "sklearn.preprocessing.StandardScaler" ] ]
camall3n/focused-macros
[ "113b13cbcf1e2c5ea2817113fa57869cc856b01e" ]
[ "experiments/npuzzle/solve.py" ]
[ "import argparse\nimport copy\nimport os\nimport pickle\nimport random\nimport sys\nfrom types import SimpleNamespace\n\nimport numpy as np\n\nfrom domains.npuzzle import NPuzzle, macros\nfrom experiments import search, iw, bfws\n\ndef parse_args():\n \"\"\"Parse input arguments\n\n Use --help to see a pretty description of the arguments\n \"\"\"\n if 'ipykernel' in sys.argv[0]:\n sys.argv = [sys.argv[0]]\n parser = argparse.ArgumentParser()\n parser.add_argument('-n', type=int, default=15, choices=[8, 15, 24, 35, 48, 63, 80],\n help='Number of tiles')\n parser.add_argument('--random_seed','-s', type=int, default=1,\n help='Seed to use for RNGs')\n parser.add_argument('--macro_type','-m', type=str, default='primitive',\n choices=['primitive','random','learned'],\n help='Type of macro_list to consider during search')\n parser.add_argument('--search_alg', type=str, default='gbfs',\n choices = ['astar', 'gbfs', 'weighted_astar','bfws_r0', 'bfws_rg'],\n help='Search algorithm to run')\n parser.add_argument('--g_weight', type=float, default=None,\n help='Weight for g-score in weighted A*')\n parser.add_argument('--h_weight', type=float, default=None,\n help='Weight for h-score in weighted A*')\n parser.add_argument('--random_goal','-r', action='store_true', default=False,\n help='Generate a random goal instead of the default solve configuration')\n parser.add_argument('--max_transitions', type=lambda x: int(float(x)), default=5e5,\n help='Maximum number of state transitions')\n parser.add_argument('--bfws_precision', type=int, default=3,\n help='The number of width values, w \\in {1,...,P}, to use when the search algorithm is best-first width search')\n return parser.parse_args()\n\n\ndef solve():\n \"\"\"Instantiate an N-Puzzle and solve with the specified macro-actions and search algorithm\"\"\"\n args = parse_args()\n #\n\n # Set up the scramble\n random.seed(args.random_seed)\n np.random.seed(args.random_seed)\n\n start = NPuzzle(n=args.n).scramble(seed=args.random_seed)\n\n if args.random_goal:\n goal = NPuzzle(n=args.n).scramble(seed=args.random_seed+1000)\n print('Using goal pattern: {:03d}'.format(args.random_seed+1000))\n else:\n goal = NPuzzle(n=args.n)\n\n print('Using seed: {:03d}'.format(args.random_seed))\n print('Start:', start)\n print('Goal:', goal)\n print('Start:', ' '.join(map(str,list(start))))\n print('Goal: ', ' '.join(map(str,list(goal))))\n\n # Define the macros / models\n if args.macro_type == 'random':\n macros.generate_random_macro_set(args.random_seed)\n\n macro_namespace = {\n 'primitive': SimpleNamespace(macros=[], models=[]),\n 'random': macros.random,\n 'learned': macros.learned,\n }[args.macro_type]\n macro_list = macro_namespace.macros\n model_list = macro_namespace.models\n\n # Set up the search problem\n search_fn = {\n 'astar': search.astar,\n 'gbfs': search.gbfs,\n 'weighted_astar': search.weighted_astar,\n 'bfws_r0': bfws.bfws,\n 'bfws_rg': bfws.bfws,\n }[args.search_alg]\n\n def get_successors(puz):\n successors = [(copy.deepcopy(puz).transition(a), [a]) for a in puz.actions()]\n if args.macro_type != 'primitive':\n valid_macros = macro_list[puz.blank_idx]\n valid_models = model_list[puz.blank_idx]\n macro_successors = [(copy.deepcopy(puz).apply_macro(model=model), macro)\n for (macro, model) in zip(valid_macros, valid_models)]\n successors += macro_successors\n return successors\n\n search_dict = {\n 'start': start,\n 'is_goal': lambda node: node.state == goal,\n 'step_cost': lambda macro: 1,\n 'heuristic': lambda puz: len(puz.summarize_effects(baseline=goal)[0]),\n 'get_successors': get_successors,\n 'max_transitions': args.max_transitions,\n }\n\n if args.search_alg == 'weighted_astar':\n assert (args.g_weight is not None\n and args.h_weight is not None), 'Must specify weights if using weighted A*.'\n gh_weights = (args.g_weight, args.h_weight)\n search_dict['gh_weights'] = gh_weights\n\n if 'bfws' in args.search_alg:\n search_dict['precision'] = args.bfws_precision\n if args.search_alg == 'bfws_rg':\n goal_fns = [(lambda x, i=i: x.state[i] == goal[i]) for i, _ in enumerate(goal)]\n relevant_atoms = iw.iw(1, start, get_successors, goal_fns)\n if not relevant_atoms:\n relevant_atoms = iw.iw(2, start, get_successors, goal_fns)\n if not relevant_atoms:\n relevant_atoms = start.all_atoms()\n search_dict['R'] = relevant_atoms\n\n #%% Run the search\n search_results = search_fn(**search_dict)\n\n #%% Save the results\n tag = '{}-puzzle/'.format(args.n)\n if args.random_goal:\n tag += 'random_goal/'\n else:\n tag += 'default_goal/'\n tag += args.macro_type\n\n results_dir = 'results/npuzzle/{}/{}/'.format(args.search_alg,tag)\n os.makedirs(results_dir, exist_ok=True)\n with open(results_dir+'seed-{:03d}.pickle'.format(args.random_seed), 'wb') as file:\n pickle.dump(search_results, file)\n\n\nif __name__ == '__main__':\n solve()\n" ]
[ [ "numpy.random.seed" ] ]
Kexin-Wei/spinnup
[ "36e56b1f91538df7dd58ce42f82f809e85b2317d" ]
[ "env_pyrep/utils.py" ]
[ "import os\r\nimport json\r\nimport numpy as np\r\nimport itertools\r\nimport matplotlib.pyplot as plt\r\nfrom mpl_toolkits.mplot3d.art3d import Line3DCollection\r\nfrom mpl_toolkits import mplot3d\r\n\r\ndef liver_dump_init(env, name = None):\r\n liver = {'x':[],'Fes':[],'Fis':[],'Ficp':[],'volume':[],'col_p_n':[],'crash':[]} \r\n liver['vtx'] = env.liver.x.copy() \r\n if name is not None:\r\n liver['name'] = name\r\n else:\r\n liver['name'] = f\"_dt{env.timestep}_down_gm{env.liver.gamma}\"\r\n return liver\r\n \r\ndef liver_dump_step(liver,env):\r\n liver['x'].append(env.liver.x)\r\n liver['Fes'].append(env.liver.Fes)\r\n liver['Fis'].append(env.liver.Fis)\r\n liver['Ficp'].append(env.liver.Ficp)\r\n liver['volume'].append(np.round(env.liver.volumes6.sum() / env.liver.init_volume6.sum(),3))\r\n liver['col_p_n'].append(len(env.liver.check_tet_aabb_collision(env.sg.x)))\r\n liver['crash'].append(env.liver.crash_flag)\r\n return liver\r\n\r\ndef liver_dump(liver,ep = None):\r\n liver_save ={}\r\n liver_save['vtx'] = liver['vtx'].tolist()\r\n liver_save['x'] = np.array(liver['x']).tolist()\r\n liver_save['Fes'] = np.array(liver['Fes']).tolist()\r\n liver_save['Fis'] = np.array(liver['Fis']).tolist()\r\n liver_save['Ficp'] = np.array(liver['Ficp']).tolist()\r\n liver_save['volume'] = np.array(liver['volume']).tolist()\r\n liver_save['col_p_n']= np.array(liver['col_p_n']).tolist()\r\n liver_save['crash'] = np.array(liver['crash']).tolist()\r\n if ep is None:\r\n with open(os.path.join('liver_json',f\"liver_record{liver['name']}.json\"),'w') as f:\r\n json.dump(liver_save,f)\r\n else:\r\n with open(os.path.join('liver_json',f\"liver_record_{int(ep)}.json\"),'w') as f:\r\n json.dump(liver_save,f)\r\n\r\ndef liver_dump_load(liver):\r\n vtx = np.array(liver['vtx'])\r\n x = np.array(liver['x'])\r\n Fes = np.array(liver['Fes'])\r\n Fis = np.array(liver['Fis'])\r\n Ficp = np.array(liver['Ficp'])\r\n volume = np.array(liver['volume'])\r\n col_p_n = np.array(liver['col_p_n'])\r\n crash = np.array(liver['crash'])\r\n return vtx, x, Fes, Fis, Ficp, volume, col_p_n, crash\r\n'''\r\ntemp:\r\n 1. collision_response_cotin\r\n 2. collision_response_self\r\n'''\r\ndef collision_response_cotin(pair,liver,past_p,current_p):\r\n # check bc_co for all surface tri_element\r\n # add dn to decide\r\n move_v_disp_dict = {}\r\n move_tri_indexs = []\r\n flat_list = [item for sublist in list(pair.values()) for item in sublist]\r\n p_indexs = np.array(flat_list).reshape(-1)\r\n p_n = p_indexs.shape[0]\r\n ray = current_p[p_indexs]-past_p[p_indexs] \r\n ray = ray*(1/np.linalg.norm(ray,axis=-1))[:,None] # p_n x3\r\n \r\n # compute ray and normal vector, d= ray,n=normal_vec\r\n dn = ray@liver.tri_normal_vec.T # p_n x n_tri\r\n ap = liver.x[liver.tri_elements[:,0]][None,:] - past_p[p_indexs][:,None] # p_n x n_tri x 3 #choose first point as a \r\n apn = (ap * liver.tri_normal_vec[None,:]).sum(axis=-1) # p_n x n_tri x 3 -> p_n x n_tri\r\n ts = apn * (1/dn) # p_n x n_tri\r\n int_p = ts[:,:,None]*ray[:,None]+past_p[p_indexs][:,None] # p_n x n_tri x3 <- p_n x n_tri x1 * p_n x1 x3 + p_n x1 x3\r\n\r\n # compute barycentric coordinates of intersection points\r\n v1 = liver.x[liver.tri_elements[:,1]]-liver.x[liver.tri_elements[:,0]] # n_tri x3\r\n v2 = liver.x[liver.tri_elements[:,2]]-liver.x[liver.tri_elements[:,0]]\r\n tri_areax2 = np.linalg.norm(np.cross(v1,v2,axis=-1),axis=-1) # n_tri\r\n \r\n bc_temp = np.zeros((p_n,liver.n_tri,3,3,3))\r\n bc_temp[:] = np.tile(liver.x[liver.tri_elements], 3).reshape(-1, 3, 3, 3).transpose(0, 2, 1, 3) # p_n x n_tri x 3area x 3ps x 3\r\n for itemp in range(p_n):\r\n bc_temp[itemp, :, [0, 1, 2], [0, 1, 2]] = int_p[itemp]\r\n v1 = bc_temp[:, :, :, 1] - bc_temp[:, :, :, 0] # p_n x n_tri x 3area x 3xyz\r\n v2 = bc_temp[:, :, :, 2] - bc_temp[:, :, :, 0]\r\n areax2 = np.linalg.norm(np.cross(v1, v2, axis=-1), axis=-1) # p_n x n_tri x 3area\r\n bc_co = areax2 * (1.0 / tri_areax2)[np.newaxis, :,\r\n np.newaxis] # p_n x n_tri x 3area<- p_n x n_tri x 3area * 1 x n_tri x 3area\r\n \r\n for itemp in range(p_n):\r\n # check bc_co\r\n check1 = np.argwhere(abs(bc_co[itemp].sum(axis=-1) - 1) < 1e-3).flatten() # each p should have at least 1\r\n check2 = np.argwhere(dn[itemp] < 0).flatten()\r\n psb_tri_index = np.intersect1d(check1,check2) # all possible tri_elements satisfies the bc_co and the negative normal vector\r\n if psb_tri_index.size!=0:\r\n psb_ts = ts[itemp,psb_tri_index] # n_psb_tri_index\r\n # if np.any(psb_ts<0):\r\n # raise ValueError(\"liver shape error\")\r\n move_tri_index = psb_tri_index[psb_ts.argmin()] # only 1 the tri_elements should move\r\n move_t = current_p[p_indexs[itemp]] - int_p[itemp,move_tri_index]\r\n move_v_index_p = liver.tri_elements[move_tri_index]\r\n for ividx in move_v_index_p: # same points may move multiple times.\r\n if ividx not in move_v_disp_dict.keys(): \r\n move_v_disp_dict[ividx] = move_t # move_t put in for new vindex\r\n else:# compare move_t for old vindex\r\n if np.linalg.norm(np.c_[move_v_disp_dict[ividx],move_t].T,axis=-1).argmax() == 1 : # older move closer than new\r\n move_v_disp_dict[ividx] = move_t\r\n move_tri_indexs.append(move_tri_index.tolist())\r\n print(move_tri_indexs)\r\n return move_v_disp_dict\r\n\r\ndef collision_response_self(pair, liver, tool):\r\n # not so good when the deform is bigger\r\n # change to old fixed to test, problem still, try cotin methods\r\n new_vtx_delta = None\r\n move_tris = {}\r\n nv_aves = {}\r\n new_vtx_deltas = {}\r\n \r\n for key, value in pair.items():\r\n new_vtx_delta = np.zeros(liver.x.shape)\r\n i_tet, p_index = int(key), np.array(value)\r\n p_n = p_index.shape[0]\r\n\r\n # find potential collpaision surface tri_element\r\n col_tri_index = np.argwhere(liver.tri_tet[:, 0] == i_tet).flatten()\r\n if col_tri_index.size == 0: raise ValueError(\r\n \"Update time step too big, vertices skip the surface tetrahedron elements\")\r\n col_tri_n = col_tri_index.shape[0]\r\n col_tri_nv = liver.tri_normal_vec[col_tri_index]\r\n col_tri_p = liver.x[liver.tri_elements[col_tri_index].T[0]] # chose the first points\r\n\r\n # compute nv_ave\r\n nv_ave = tool.vtx_normal_vec[p_index].sum(axis=0)\r\n nv_ave = nv_ave / np.linalg.norm(nv_ave)\r\n nv_aves[key] = nv_ave\r\n\r\n # compute ts and intersection points\r\n dn = nv_ave.dot(col_tri_nv.T) # col_tri_n\r\n ap = col_tri_p[np.newaxis, :] - tool.x[p_index, np.newaxis] # p_n x col_tri_n x 3\r\n dotn = np.tile(col_tri_nv, p_n).reshape(-1, p_n, 3).transpose(1, 0, 2)\r\n apn = (ap * dotn).sum(axis=-1) # p_n x col_tri_n\r\n ts = apn * (1 / dn) # p_n x col_tri_n\r\n int_col_p = ts[:, :, np.newaxis] * nv_ave[np.newaxis, np.newaxis, :] \\\r\n + tool.vertices[p_index][:, np.newaxis, :] # p_n x col_tri_n x 1 * 1 x 1 x 3 + p_n x 1 x 3\r\n\r\n # compute barycentric coordinates of intersection points\r\n tri_vertices = liver.x[liver.tri_elements[col_tri_index]] # n_tri x 3 x 3\r\n v1 = tri_vertices[:, 1] - tri_vertices[:, 0]\r\n v2 = tri_vertices[:, 2] - tri_vertices[:, 0]\r\n tri_areax2 = np.linalg.norm(np.cross(v1, v2, axis=-1), axis=-1) # n_tri\r\n\r\n bc_temp = np.zeros((p_n, col_tri_n, 3, 3, 3))\r\n bc_temp[:] = np.tile(tri_vertices, 3).reshape(-1, 3, 3, 3).transpose(0, 2, 1, 3) # p_n x col_tri_n x 3 x 3 x 3\r\n for itemp in range(p_n):\r\n bc_temp[itemp, :, [0, 1, 2], [0, 1, 2]] = int_col_p[itemp]\r\n v1 = bc_temp[:, :, :, 1] - bc_temp[:, :, :, 0] # p_n x col_tri_n x 3area x 3xyz\r\n v2 = bc_temp[:, :, :, 2] - bc_temp[:, :, :, 0]\r\n areax2 = np.linalg.norm(np.cross(v1, v2, axis=-1), axis=-1) # p_n x col_tri_n x 3area\r\n bc_co = areax2 * (1.0 / tri_areax2)[np.newaxis, :,\r\n np.newaxis] # p_n x col_tri_n x 3area * 1 x col_tri_n x 3area = p_n x col_tri_n x 3area\r\n\r\n # Move tri to point with tmax \r\n check1 = np.argwhere(abs(bc_co.sum(axis=-1) - 1) < 1e-3)\r\n check2 = np.argwhere(dn < 0)\r\n inter_tri_index = np.intersect1d(check1[:, 1], check2) # find colliable surface tri_elements index\r\n # no colliable tri_elements\r\n if inter_tri_index.size == 0: \r\n the_best_tri = dn.argmin() # chose one of most collidable tri\r\n move_tri = liver.tri_elements[col_tri_index[the_best_tri]]\r\n tri_nv = liver.tri_normal_vec[col_tri_index[the_best_tri]].flatten()\r\n tri_vtx = liver.x[move_tri].reshape(3, 3)\r\n v = nv_ave - tri_nv # find a new direction, not so sharp as nv_ave\r\n v = v / np.linalg.norm(v)\r\n dn_t = v.dot(tri_nv) # 1\r\n ap_t = tri_vtx[0] - tool.x[p_index]\r\n t_t = ap_t.dot(tri_nv) / dn_t\r\n move_t = t_t.min()\r\n new_vtx_delta[move_tri] += - move_t * v\r\n new_vtx_deltas.setdefault(key, []).append(new_vtx_delta)\r\n move_tris.setdefault(key, []).append(move_tri.flatten())\r\n print(' None ',end='')\r\n else:\r\n # more than 1 colliable tri_elements\r\n if len(inter_tri_index) > 1:\r\n temp_delta = np.zeros((liver.x.shape[0], len(inter_tri_index))) # n_v * n_inter\r\n itemp = 0\r\n for inter_tri_i in inter_tri_index:\r\n part_p_index = check1[ check1[:, 1] == inter_tri_i, 0] # p index of each tri_element that satisfies bc_co condition\r\n move_t = ts[part_p_index, inter_tri_i].min()\r\n move_tri = liver.tri_elements[col_tri_index[inter_tri_i]]\r\n temp_delta[move_tri, itemp] = - move_t # collect all possible move_t for all vertices\r\n move_tris.setdefault(key, []).append(move_tri.flatten())\r\n itemp += 1\r\n new_vtx_delta += temp_delta.max(axis=-1)[:, np.newaxis] * nv_ave[np.newaxis,:] # move with the maximal move_t\r\n new_vtx_deltas.setdefault(key, []).append(new_vtx_delta)\r\n print(' Multi ',end='')\r\n else:\r\n # only 1 colliable tri_elements\r\n move_t = ts[:, inter_tri_index].min()\r\n move_tri = liver.tri_elements[col_tri_index[inter_tri_index]]\r\n new_vtx_delta[move_tri] += -move_t * nv_ave\r\n new_vtx_deltas.setdefault(key, []).append(new_vtx_delta)\r\n move_tris.setdefault(key, []).append(move_tri.flatten())\r\n print(' Single ',end='')\r\n return new_vtx_delta, move_tris, nv_aves, new_vtx_deltas\r\n\r\n'''\r\nstatic methods:\r\n 1. lame_param\r\n 2. tri_mid_vec\r\n 3. rotation_matrix\r\n 4. flatten_list\r\n'''\r\ndef lame_param(E, v):\r\n la = E * v / (1 + v) / (1 - 2 * v)\r\n mu = E / 2 / (1 + v)\r\n return la, mu\r\n\r\ndef tri_mid_vec(vertices, tri_elements):\r\n tri_vtx = vertices[tri_elements]\r\n tri_mid = tri_vtx.mean(axis=1)\r\n tri_normal_vec = np.cross(tri_vtx[:, 1] - tri_vtx[:, 0], tri_vtx[:, 2] - tri_vtx[:, 0])\r\n tri_normal_vec = tri_normal_vec * (1.0 / np.linalg.norm(tri_normal_vec, axis=1))[:, np.newaxis]\r\n return tri_mid, tri_normal_vec\r\n\r\ndef rotation_matrix(deg,axis='x'):\r\n rad = np.deg2rad(deg)\r\n s,c = np.sin(rad),np.cos(rad)\r\n if axis=='x':\r\n return np.array([ 1, 0, 0,\r\n 0, c, -s,\r\n 0, s, c]).reshape(-1,3)\r\n elif axis=='y':\r\n return np.array([ c, 0, s,\r\n 0, 1, 0,\r\n -s, 0, c]).reshape(-1,3)\r\n elif axis=='z':\r\n return np.array([ c, -s, 0,\r\n s, c, 0,\r\n 0, 0, 1]).reshape(-1,3)\r\n else:\r\n return np.ones((3,3))\r\n\r\n# def flatten_list(l):\r\n# # not work well\r\n# for el in l:\r\n# if isinstance(el, Iterable) and not isinstance(el, (str, bytes)):\r\n# return flatten_list(el)\r\n# else:\r\n# return el\r\n'''\r\nmatplotlibe subplot\r\n 1. create_axs\r\n 2. draw_liver\r\n 3. draw_liver_tool\r\n'''\r\ndef create_axs(subplot_n,block=False,return_fig=False):\r\n r = int(np.floor(np.sqrt(subplot_n)))\r\n c = int(subplot_n/r)\r\n fig = plt.figure(figsize=plt.figaspect(0.5))\r\n axs = {}\r\n for i in range(subplot_n):\r\n axs[i] = fig.add_subplot(r, c, i+1, projection='3d')\r\n if return_fig:\r\n return axs,fig\r\n return axs\r\n\r\ndef draw_liver(liver,ax):\r\n ax.cla()\r\n ax = liver.plt_vtx(ax=ax)\r\n ax = liver.plt_x(ax=ax)\r\n plt_equal(ax)\r\n return ax\r\n\r\ndef draw_liver_F(liver,axs,f_scl = 5e0):\r\n # Fes, Ficp, Fis+ displacement\r\n axs[0].cla() \r\n axs[0] = liver.plt_x(ax=axs[0])\r\n axs[0] = liver.plt_Fes(vec_to_scl=f_scl,ax=axs[0])\r\n plt_equal(axs[0])\r\n axs[1].cla()\r\n axs[1] = liver.plt_x(ax=axs[1])\r\n axs[1] = liver.plt_Ficp(vec_to_scl=f_scl,ax=axs[1])\r\n plt_equal(axs[1])\r\n axs[2].cla()\r\n axs[2] = liver.plt_vtx(ax=axs[2])\r\n axs[2] = liver.plt_x(ax=axs[2])\r\n axs[2] = liver.plt_Fis(vec_to_scl=f_scl,ax=axs[2])\r\n plt_equal(axs[2])\r\n return axs\r\n\r\ndef draw_liver_tool(liver,sg,axs,f_scl=5e0):\r\n axs[0].cla() \r\n axs[0] = liver.plt_x(ax=axs[0])\r\n axs[0] = liver.plt_tri_normal_vec(vec_scl=f_scl/2,ax=axs[0])\r\n plt_equal(axs[0])\r\n axs[1].cla()\r\n axs[1] = sg.plt_sg_x(ax=axs[1])\r\n axs[1] = sg._plt_vtx_normal_vec(sg.x,vec_scl=f_scl/2,ax=axs[1])\r\n plt_equal(axs[1])\r\n axs[2].cla()\r\n axs[2] = liver.plt_x(ax=axs[2])\r\n axs[2] = sg.plt_sg_x(ax=axs[2])\r\n plt_equal(axs[2]) \r\n axs_l = {axs[3],axs[4],axs[5]}\r\n axs_l = draw_liver(liver,axs_l,f_scl=f_scl) \r\n axs[3],axs[4],axs[5] = axs_l[0],axs_l[1],axs_l[2] \r\n plt.draw()#plt.show(block=False)\r\n return axs\r\n\r\n'''\r\naabb\r\n 1. xyzminmax\r\n 2. _plt_AABB\r\n 3. plt_aabb_p\r\n'''\r\ndef xyzminmax(aabb):\r\n # xmin, ymin, zmin, xmax, ymax, zmax = aabb[0], aabb[1], aabb[2], aabb[3], aabb[4], aabb[5]\r\n return aabb[0], aabb[1], aabb[2], aabb[3], aabb[4], aabb[5]\r\n\r\n\r\ndef plt_AABB(aabb, **kwargs):\r\n c_line = '#9467bd'\r\n c_p = '#e377c2'\r\n if 'c' in kwargs.keys():\r\n colors = kwargs['c']\r\n if type(colors) is list:\r\n c_line = colors[0]\r\n c_p = colors[1]\r\n elif type(colors) is str:\r\n c_line = colors\r\n ax = ax3d_handle(**kwargs)\r\n\r\n # aabb: 1x6, xmin, ymin, zmin, xmax, ymax, zmax\r\n xmin, ymin, zmin, xmax, ymax, zmax = xyzminmax(aabb)\r\n xyz = np.array([xmin, ymin, zmin, xmax, ymin, zmin, xmax, ymax, zmin, xmin, ymax, zmin,\r\n xmin, ymin, zmax, xmax, ymin, zmax, xmax, ymax, zmax, xmin, ymax, zmax]).reshape(-1, 3)\r\n line_segs = np.array([1, 2, 2, 3, 3, 4, 4, 1,\r\n 1, 5, 2, 6, 3, 7, 4, 8,\r\n 5, 6, 6, 7, 7, 8, 8, 5]).reshape(-1, 2) - 1\r\n line_vt = np.hstack((xyz[line_segs[:, 0]], xyz[line_segs[:, 1]])).copy()\r\n lc = Line3DCollection(line_vt.reshape(-1, 2, 3), colors=c_line, linestyles='--')\r\n ax.add_collection(lc)\r\n ax.scatter(xyz[:, 0], xyz[:, 1], xyz[:, 2], marker='o', c=c_p)\r\n return ax\r\n\r\n\r\ndef plt_aabb_p(aabb, p, **kwargs):\r\n ax = ax3d_handle(**kwargs)\r\n ax.scatter(p[0], p[1], p[2], c='#22D8C3')\r\n plt_AABB(aabb, ax=ax)\r\n return ax\r\n\r\n'''\r\nax handle\r\n 1. 1) plt_equal\r\n 2) plt_show_equal\r\n 3) set_axes_equal\r\n 4) _set_axes_radius\r\n 2. ax3d_handle\r\n 3. plt_tet\r\n 4. plt_tet_ps\r\n 5. plt_normal_vecs\r\n 6. plt_tri\r\n 7. plt_tri_ps\r\n'''\r\ndef plt_equal(ax,limits = None):\r\n ax.set_box_aspect((1, 1, 1)) # IMPORTANT - this is the new, key line\r\n set_axes_equal(ax,limits=limits) # IMPORTANT - this is also required\r\n\r\ndef plt_show_equal(ax,block=False,limits = None):\r\n plt_equal(ax,limits=limits)\r\n plt.show(block=block)\r\n\r\ndef set_axes_equal(ax: plt.Axes,limits = None):\r\n \"\"\"Set 3D plot axes to equal scale.\r\n\r\n Make axes of 3D plot have equal scale so that spheres appear as\r\n spheres and cubes as cubes. Required since `ax.axis('equal')`\r\n and `ax.set_aspect('equal')` don't work on 3D.\r\n \"\"\"\r\n if limits is None:\r\n limits = np.array([\r\n ax.get_xlim3d(),\r\n ax.get_ylim3d(),\r\n ax.get_zlim3d(),\r\n ])\r\n origin = np.mean(limits, axis=1)\r\n radius = 0.5 * np.max(np.abs(limits[:, 1] - limits[:, 0]))\r\n _set_axes_radius(ax, origin, radius)\r\n\r\ndef _set_axes_radius(ax, origin, radius):\r\n x, y, z = origin\r\n ax.set_xlim3d([x - radius, x + radius])\r\n ax.set_ylim3d([y - radius, y + radius])\r\n ax.set_zlim3d([z - radius, z + radius]) \r\n\r\ndef ax3d_handle(return_fig=False,**kwargs):\r\n if 'ax' in kwargs:\r\n ax = kwargs['ax']\r\n else:\r\n fig = plt.figure(figsize=(8,6))\r\n ax = fig.add_subplot(projection='3d')\r\n if return_fig:\r\n return ax,fig\r\n return ax\r\n\r\n\r\n\r\n\r\n\r\ndef plt_tet(vs, text_opt='off', **kwargs):\r\n ax = ax3d_handle(**kwargs)\r\n ax.scatter(vs[:, 0], vs[:, 1], vs[:, 2], c='#BCB6E3')\r\n if text_opt == \"on\":\r\n for i in range(4): ax.text(vs[i, 0], vs[i, 1], vs[i, 2], f'{i + 1}')\r\n line_order = np.array([1, 2, 1, 3, 1, 4, 2, 3, 2, 4, 3, 4]).reshape(-1, 2) - 1\r\n line_vt = np.hstack((vs[line_order[:, 0]], vs[line_order[:, 1]]))\r\n lc = Line3DCollection(line_vt.reshape(-1, 2, 3), colors='#8A7BFB')\r\n ax.add_collection(lc)\r\n return ax\r\n\r\n\r\ndef plt_tet_ps(vs, p, text_opt='off', **kwargs):\r\n p = np.array(p)\r\n ax = ax3d_handle(**kwargs)\r\n ax = plt_tet(vs, text_opt=text_opt, ax=ax)\r\n if len(p.shape) == 1: p = p.reshape(1, -1)\r\n ax.scatter(p[:, 0], p[:, 1], p[:, 2], c='#22D8C3')\r\n return ax\r\n\r\n\r\n\r\n\r\n\r\ndef plt_normal_vecs(base_ps, vecs, scl=1, **kwargs):\r\n vesc_scl = vecs * scl\r\n ax = ax3d_handle(**kwargs)\r\n ax.scatter(base_ps[:, 0], base_ps[:, 1], base_ps[:, 2], c='#1D1788')\r\n ax.quiver(base_ps[:, 0], base_ps[:, 1], base_ps[:, 2],\r\n vesc_scl[:, 0], vesc_scl[:, 1], vesc_scl[:, 2], color='#7D75FE')\r\n return ax\r\n\r\n\r\ndef plt_tet_ps_vecs(vs, p, vec, scl=1, text_opt = 'off', **kwargs):\r\n ax = ax3d_handle(**kwargs)\r\n ax = plt_tet_ps(vs, p, ax=ax, text_opt = text_opt)\r\n if len(p.shape) == 1: p = p.reshape(1, -1)\r\n if len(vec.shape) == 1: vec = vec.reshape(1, -1)\r\n ax = plt_normal_vecs(p, vec, scl=scl, ax=ax)\r\n return ax\r\n\r\n\r\ndef plt_tri(vs, text_opt='off', **kwargs):\r\n ax = ax3d_handle(**kwargs)\r\n ax.scatter(vs[:, 0], vs[:, 1], vs[:, 2], c='#ff00ff')\r\n if text_opt == \"on\":\r\n for i in range(3): ax.text(vs[i, 0], vs[i, 1], vs[i, 2], f'{i + 1}')\r\n line_order = np.array([1, 2, 1, 3, 2, 3]).reshape(-1, 2) - 1\r\n line_vt = np.hstack((vs[line_order[:, 0]], vs[line_order[:, 1]]))\r\n lc = Line3DCollection(line_vt.reshape(-1, 2, 3), colors='#9933ff')\r\n ax.add_collection(lc)\r\n return ax\r\n\r\n\r\ndef plt_tri_ps(vs, p, text_opt='off', **kwargs):\r\n ax = ax3d_handle(**kwargs)\r\n ax = plt_tri(vs, text_opt=text_opt, ax=ax)\r\n if len(p.shape) == 1: p = p.reshape(1, -1)\r\n ax.scatter(p[:, 0], p[:, 1], p[:, 2], c='#22D8C3')\r\n return ax\r\n" ]
[ [ "numpy.tile", "numpy.mean", "matplotlib.pyplot.draw", "numpy.cos", "numpy.deg2rad", "numpy.sin", "numpy.linalg.norm", "numpy.sqrt", "numpy.cross", "numpy.array", "numpy.zeros", "matplotlib.pyplot.figure", "numpy.intersect1d", "numpy.argwhere", "numpy.hstack", "matplotlib.pyplot.show", "matplotlib.pyplot.figaspect", "numpy.ones", "numpy.abs" ] ]
steffenvan/IMPALA-PopArt
[ "3cd086e85f8d48a4d3d5a3491fde007e1c9ee29d" ]
[ "popart/build_learner.py" ]
[ "import tensorflow as tf\nimport sys\nsys.path.insert(0,'..')\nimport vtrace_popart as vtrace\nnest = tf.contrib.framework.nest\n\nfrom .flags import *\n\ndef compute_baseline_loss(advantages):\n # Loss for the baseline, summed over the time dimension.\n # Multiply by 0.5 to match the standard update rule:\n # d(loss) / d(baseline) = advantage\n return .5 * tf.reduce_sum(tf.square(advantages))\n\ndef compute_entropy_loss(logits):\n policy = tf.nn.softmax(logits)\n log_policy = tf.nn.log_softmax(logits)\n entropy_per_timestep = tf.reduce_sum(-policy * log_policy, axis=-1)\n return -tf.reduce_sum(entropy_per_timestep)\n\ndef compute_policy_gradient_loss(logits, actions, advantages):\n cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(\n labels=actions, logits=logits)\n advantages = tf.stop_gradient(advantages)\n policy_gradient_loss_per_timestep = cross_entropy * advantages\n return tf.reduce_sum(policy_gradient_loss_per_timestep)\n\ndef build_learner(agent, env_outputs, agent_outputs, env_id):\n \"\"\"Builds the learner loop.\n\n Args:\n agent: A snt.RNNCore module outputting `AgentOutput` named tuples, with an\n `unroll` call for computing the outputs for a whole trajectory.\n agent_state: The initial agent state for each sequence in the batch.\n env_outputs: A `StepOutput` namedtuple where each field is of shape\n [T+1, ...].\n agent_outputs: An `AgentOutput` namedtuple where each field is of shape\n [T+1, ...].\n\n Returns:\n A tuple of (done, infos, and environment frames) where\n the environment frames tensor causes an update.\n \"\"\"\n\n learner_outputs = agent.unroll(agent_outputs.action, env_outputs, env_id)\n\n # Use last baseline value (from the value function) to bootstrap.\n bootstrap_value = learner_outputs.un_normalized_vf[-1]\n \n # At this point, the environment outputs at time step `t` are the inputs that\n # lead to the learner_outputs at time step `t`. After the following shifting,\n # the actions in agent_outputs and learner_outputs at time step `t` is what\n # leads to the environment outputs at time step `t`.\n agent_outputs = nest.map_structure(lambda t: t[1:], agent_outputs)\n rewards, infos, done, _ = nest.map_structure(\n lambda t: t[1:], env_outputs)\n learner_outputs = nest.map_structure(lambda t: t[:-1], learner_outputs)\n\n if FLAGS.reward_clipping == 'abs_one':\n clipped_rewards = tf.clip_by_value(rewards, -1, 1)\n elif FLAGS.reward_clipping == 'soft_asymmetric':\n squeezed = tf.tanh(rewards / 5.0)\n # Negative rewards are given less weight than positive rewards.\n clipped_rewards = tf.where(rewards < 0, .3 * squeezed, squeezed) * 5.\n\n discounts = tf.to_float(~done) * FLAGS.discounting\n game_specific_mean = tf.gather(agent._mean, env_id)\n game_specific_std = tf.gather(agent._std, env_id)\n\n # Compute V-trace returns and weights.\n # Note, this is put on the CPU because it's faster than on GPU. It can be\n # improved further with XLA-compilation or with a custom TensorFlow operation.\n with tf.device('/cpu'):\n vtrace_returns = vtrace.from_logits(\n behaviour_policy_logits=agent_outputs.policy_logits,\n target_policy_logits=learner_outputs.policy_logits,\n actions=agent_outputs.action,\n discounts=discounts,\n rewards=clipped_rewards,\n un_normalized_values=learner_outputs.un_normalized_vf,\n normalized_values=learner_outputs.normalized_vf,\n mean=game_specific_mean,\n std=game_specific_std,\n bootstrap_value=bootstrap_value)\n\n # First term of equation (7) in (Hessel et al., 2018)\n normalized_vtrace = (vtrace_returns.vs - game_specific_mean) / game_specific_std\n\n normalized_vtrace = nest.map_structure(tf.stop_gradient, normalized_vtrace)\n\n\n # Compute loss as a weighted sum of the baseline loss, the policy gradient\n # loss and an entropy regularization term.\n total_loss = compute_policy_gradient_loss(\n learner_outputs.policy_logits, agent_outputs.action,\n vtrace_returns.pg_advantages)\n\n baseline_loss = compute_baseline_loss(\n normalized_vtrace - learner_outputs.normalized_vf)\n\n total_loss += FLAGS.baseline_cost * baseline_loss\n total_loss += FLAGS.entropy_cost * compute_entropy_loss(\n learner_outputs.policy_logits)\n\n # Optimization\n num_env_frames = tf.train.get_global_step()\n\n learning_rate = tf.train.polynomial_decay(FLAGS.learning_rate, num_env_frames,\n FLAGS.total_environment_frames, 0)\n\n optimizer = tf.train.RMSPropOptimizer(learning_rate, FLAGS.decay,\n FLAGS.momentum, FLAGS.epsilon)\n\n # Use reward clipping for atari games only \n if FLAGS.gradient_clipping > 0.0:\n variables = tf.trainable_variables()\n gradients = tf.gradients(total_loss, variables)\n gradients, _ = tf.clip_by_global_norm(gradients, FLAGS.gradient_clipping)\n train_op = optimizer.apply_gradients(zip(gradients, variables))\n else:\n train_op = optimizer.minimize(total_loss)\n\n # Merge updating the network and environment frames into a single tensor.\n with tf.control_dependencies([train_op]):\n num_env_frames_and_train = num_env_frames.assign_add(\n FLAGS.batch_size * FLAGS.unroll_length)\n\n # Adding a few summaries.\n tf.summary.scalar('learning_rate', learning_rate)\n tf.summary.scalar('total_loss', total_loss)\n tf.summary.histogram('action', agent_outputs.action)\n \n # I'm not sure if it's really necessary to put this operation on the CPU. \n with tf.device('/cpu'):\n (mean, mean_squared) = (agent.update_moments(vtrace_returns.vs, env_id))\n return (done, infos, num_env_frames_and_train) + (mean, mean_squared)" ]
[ [ "tensorflow.gradients", "tensorflow.clip_by_value", "tensorflow.to_float", "tensorflow.control_dependencies", "tensorflow.stop_gradient", "tensorflow.nn.softmax", "tensorflow.tanh", "tensorflow.gather", "tensorflow.trainable_variables", "tensorflow.summary.histogram", "tensorflow.nn.log_softmax", "tensorflow.train.get_global_step", "tensorflow.summary.scalar", "tensorflow.where", "tensorflow.train.RMSPropOptimizer", "tensorflow.train.polynomial_decay", "tensorflow.reduce_sum", "tensorflow.clip_by_global_norm", "tensorflow.nn.sparse_softmax_cross_entropy_with_logits", "tensorflow.device", "tensorflow.square" ] ]
gelareh1985/GraphSAGE
[ "1d90a5c4168c44494dd58fc5dd25f90e566d49af" ]
[ "graphsage/metrics.py" ]
[ "import tensorflow as tf\r\n\r\n# DISCLAIMER:\r\n# Parts of this code file were originally forked from\r\n# https://github.com/tkipf/gcn\r\n# which itself was very inspired by the keras package\r\ndef masked_logit_cross_entropy(preds, labels, mask):\r\n \"\"\"Logit cross-entropy loss with masking.\"\"\"\r\n loss = tf.nn.sigmoid_cross_entropy_with_logits(logits=preds, labels=labels)\r\n loss = tf.reduce_sum(input_tensor=loss, axis=1)\r\n mask = tf.cast(mask, dtype=tf.float32)\r\n mask /= tf.maximum(tf.reduce_sum(input_tensor=mask), tf.constant([1.]))\r\n loss *= mask\r\n return tf.reduce_mean(input_tensor=loss)\r\n\r\ndef masked_softmax_cross_entropy(preds, labels, mask):\r\n \"\"\"Softmax cross-entropy loss with masking.\"\"\"\r\n loss = tf.nn.softmax_cross_entropy_with_logits(logits=preds, labels=tf.stop_gradient(labels))\r\n mask = tf.cast(mask, dtype=tf.float32)\r\n mask /= tf.maximum(tf.reduce_sum(input_tensor=mask), tf.constant([1.]))\r\n loss *= mask\r\n return tf.reduce_mean(input_tensor=loss)\r\n\r\n\r\ndef masked_l2(preds, actuals, mask):\r\n \"\"\"L2 loss with masking.\"\"\"\r\n loss = tf.nn.l2_loss(preds, actuals)\r\n mask = tf.cast(mask, dtype=tf.float32)\r\n mask /= tf.reduce_mean(input_tensor=mask)\r\n loss *= mask\r\n return tf.reduce_mean(input_tensor=loss)\r\n\r\ndef masked_accuracy(preds, labels, mask):\r\n \"\"\"Accuracy with masking.\"\"\"\r\n correct_prediction = tf.equal(tf.argmax(input=preds, axis=1), tf.argmax(input=labels, axis=1))\r\n accuracy_all = tf.cast(correct_prediction, tf.float32)\r\n mask = tf.cast(mask, dtype=tf.float32)\r\n mask /= tf.reduce_mean(input_tensor=mask)\r\n accuracy_all *= mask\r\n return tf.reduce_mean(input_tensor=accuracy_all)\r\n" ]
[ [ "tensorflow.argmax", "tensorflow.nn.l2_loss", "tensorflow.nn.sigmoid_cross_entropy_with_logits", "tensorflow.constant", "tensorflow.reduce_sum", "tensorflow.reduce_mean", "tensorflow.stop_gradient", "tensorflow.cast" ] ]
shinianzhihou/ClassyVision
[ "b3f714ef94275b3e9753ab3f3c8256cb852b96fc" ]
[ "classy_vision/tasks/classification_task.py" ]
[ "#!/usr/bin/env python3\n# Copyright (c) Facebook, Inc. and its affiliates.\n#\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\nimport contextlib\nimport copy\nimport enum\nimport json\nimport logging\nimport math\nimport multiprocessing as mp\nimport time\nfrom typing import Any, Dict, List, NamedTuple, Optional, Union\n\nimport torch\nimport torch.nn as nn\nfrom classy_vision.dataset import ClassyDataset, build_dataset\nfrom classy_vision.dataset.transforms.mixup import MixupTransform\nfrom classy_vision.generic.distributed_util import (\n all_reduce_mean,\n barrier,\n init_distributed_data_parallel_model,\n is_distributed_training_run,\n)\nfrom classy_vision.generic.util import (\n Timer,\n copy_model_to_gpu,\n load_and_broadcast_checkpoint,\n master_params,\n recursive_copy_to_gpu,\n split_batchnorm_params,\n update_classy_state,\n)\nfrom classy_vision.generic.util import get_torch_version\nfrom classy_vision.hooks import CheckpointHook, ClassyHook, build_hooks\nfrom classy_vision.losses import ClassyLoss, build_loss\nfrom classy_vision.meters import ClassyMeter, build_meters\nfrom classy_vision.models import ClassyModel, build_model\nfrom classy_vision.optim import (\n ClassyOptimizer,\n build_optimizer,\n build_optimizer_schedulers,\n)\nfrom classy_vision.optim.zero import ZeRO\nfrom torch.distributed import broadcast\n\nfrom . import register_task\nfrom .classy_task import ClassyTask\n\n\ntry:\n import apex\n\n apex_available = True\nexcept ImportError:\n apex_available = False\n\ntry:\n from torch.cuda.amp import GradScaler as TorchGradScaler\n\nexcept ImportError:\n pass\n\ntry:\n from fairscale.optim.grad_scaler import ShardedGradScaler\n\n fairscale_available = True\nexcept ImportError:\n fairscale_available = False\n\n\nclass AmpType(enum.Enum):\n # Automatic Mixed Precision supported types\n APEX = enum.auto()\n PYTORCH = enum.auto()\n\n\nclass BroadcastBuffersMode(enum.Enum):\n DISABLED = enum.auto()\n # Enable DistributedDataParallel's broadcast_buffers option, synchronizing\n # model buffers every forward pass.\n FORWARD_PASS = enum.auto()\n # Similar to FORWARD_PASS, but only synchronizes model buffers once\n # per epoch, between train and test phases. If your motivation for\n # synchronizing buffers is for buffers to be consistent during eval, use\n # this instead of FORWARD_PASS to reduce training overhead.\n BEFORE_EVAL = enum.auto()\n\n\nclass BatchNormSyncMode(enum.Enum):\n DISABLED = enum.auto() # No Synchronized Batch Normalization\n PYTORCH = enum.auto() # Use torch.nn.SyncBatchNorm\n APEX = enum.auto() # Use apex.parallel.SyncBatchNorm, needs apex to be installed\n\n\nclass LastBatchInfo(NamedTuple):\n loss: torch.Tensor\n output: torch.Tensor\n target: torch.Tensor\n sample: Dict[str, Any]\n step_data: Dict[str, Any]\n\n\n@register_task(\"classification_task\")\nclass ClassificationTask(ClassyTask):\n \"\"\"Basic classification training task.\n\n This task encapsultates all of the components and steps needed to\n train a classifier using a :class:`classy_vision.trainer.ClassyTrainer`.\n\n Assumes a train / test phase per each epoch and that the datasets\n have the same API as the map-style Dataset class in\n `torch.utils.data.dataset <https://pytorch.org/docs/stable/data.html\n #torch.utils.data.Dataset>`_ (in particular, this task makes use of\n the len). If you are using an `IterableDataset <https://pytorch.org/docs/\n stable/data.html#torch.utils.data.IterableDataset>`_ then a custom task\n may be appropriate.\n\n\n :var loss: Loss (see :class:`classy_vision.losses.ClassyLoss`) function used\n for computing the loss in each forward pass\n :var datasets: Mapping from a ``phase_type`` in [\"train\", \"test']\n to dataset used for training (or testing)\n :var meters: List of meters (see :class:`classy_vision.meters.ClassyMeter`)\n to calculate during training\n :var num_epochs: Number of epochs (passes over dataset) to train\n :var test_only: Used to only run the test phase\n :var base_model: Model to be trained, unwrapped in DDP or DP wrappers\n :var optimizer: Optimizer used in train step\n :var optimizer_schedulers: Dictionary. Key is the name of the optimizer\n option (e.g. lr), value is a ClassyParamScheduler\n :var checkpoint: Serializable dict which represents state in training\n :var phases: List of phase specific information, e.g. if phase is\n train / test.\n :var hooks: List of hooks to apply during training\n :var train: Phase type, if true it means we are training,\n false means testing\n :var distributed_model: Base model, but wrapped in DDP (DistributedDataParallel)\n :var phase_idx: Current phase id, first phase is 0, if task has not started\n training then returns -1\n :var train_phase_idx: Only counts train phases\n :var num_updates: Number of total parameter updates applied to model\n by the optimizer\n :var data_iterator: Iterator which can be used to obtain batches\n :var losses: Loss curve\n :var perf_log: list of training speed measurements, to be logged\n :var clip_grad_norm: maximum gradient norm (default None)\n :var simulated_global_batchsize: batch size simulated via gradient accumulation\n :var optimizer_period: apply optimizer after this many steps; derived from\n simulated_global_batchsize, default 1.\n \"\"\"\n\n def __init__(self):\n \"\"\"Constructs a ClassificationTask\"\"\"\n super().__init__()\n\n self.base_loss = None\n self.datasets = {}\n self.meters = []\n self.num_epochs = 1\n self.test_phase_period = 1\n self.train_phases_per_epoch = 0\n self.test_only = False\n self.base_model = None\n self.optimizer = None\n self.optimizer_schedulers = {}\n self.checkpoint_dict = None\n self.checkpoint_path = None\n self.phases = []\n self.hooks = []\n self.train = True\n self.distributed_model = None\n self.distributed_loss = None\n self.phase_idx = -1\n self.train_phase_idx = -1\n self.num_updates = 0\n self.dataloader = None\n self.data_iterator = None\n self.losses = []\n self.broadcast_buffers_mode: BroadcastBuffersMode = (\n BroadcastBuffersMode.BEFORE_EVAL\n )\n self.amp_args = None\n self.amp_type = None\n self.amp_grad_scaler = None\n self.mixup_transform = None\n self.perf_log = []\n self.last_batch = None\n self.batch_norm_sync_mode = BatchNormSyncMode.DISABLED\n self.find_unused_parameters = False\n self.use_gpu = torch.cuda.is_available()\n self.dataloader_mp_context = \"spawn\"\n self.bn_weight_decay = False\n self._train_only = True\n self.clip_grad_norm = None\n self.simulated_global_batchsize = None\n self.optimizer_period = 1\n self.ddp_bucket_cap_mb = 25\n self.use_sharded_ddp = False\n self.fp16_grad_compress = False\n\n def set_use_sharded_ddp(self, use_sharded_ddp: bool):\n self.use_sharded_ddp = use_sharded_ddp\n if self.use_sharded_ddp:\n logging.info(\"Using Sharded DDP\")\n return self\n\n def set_use_gpu(self, use_gpu: bool):\n self.use_gpu = use_gpu\n\n assert (\n not self.use_gpu or torch.cuda.is_available()\n ), \"CUDA required to train on GPUs\"\n\n return self\n\n def set_clip_grad_norm(self, clip_grad_norm: Optional[float]):\n \"\"\"Sets maximum gradient norm.\n\n None means gradient clipping is disabled. Defaults to None.\"\"\"\n self.clip_grad_norm = clip_grad_norm\n if clip_grad_norm is None:\n logging.info(\"Disabled gradient norm clipping.\")\n else:\n logging.info(\n f\"Enabled gradient norm clipping with threshold: {clip_grad_norm}\"\n )\n return self\n\n def set_simulated_global_batchsize(self, simulated_global_batchsize: Optional[int]):\n \"\"\"Sets a simulated batch size by gradient accumulation.\n\n Gradient accumulation adds up gradients from multiple minibatches and\n steps the optimizer every N train_steps, where N is optimizer_period.\n When enabled, the very last train_steps might end up not updating the\n model, depending on the number of total steps. None means gradient\n accumulation is disabled. Defaults to None.\"\"\"\n self.simulated_global_batchsize = simulated_global_batchsize\n return self\n\n def set_checkpoint(self, checkpoint_path: str):\n \"\"\"Sets checkpoint on task.\n\n Args:\n checkpoint_path: The path to load the checkpoint from. Can be a file or a\n directory. See :func:`load_checkpoint` for more information.\n \"\"\"\n self.checkpoint_path = checkpoint_path\n return self\n\n def _set_checkpoint_dict(self, checkpoint_dict: Dict[str, Any]):\n \"\"\"Sets the checkpoint dict in the task. Only used for testing.\n\n Args:\n checkpoint_dict: A serializable dict representing current task state\n \"\"\"\n self.checkpoint_dict = checkpoint_dict\n return self\n\n def set_num_epochs(self, num_epochs: Union[int, float]):\n \"\"\"Set number of epochs to be run.\n\n Args:\n num_epochs: Number of epochs to run task\n \"\"\"\n self.num_epochs = num_epochs\n return self\n\n def set_test_phase_period(self, test_phase_period: int):\n \"\"\"Set the period of test phase.\n\n Args:\n test_phase_period: The period of test phase\n \"\"\"\n self.test_phase_period = test_phase_period\n return self\n\n def set_dataset(self, dataset: ClassyDataset, phase_type: str):\n \"\"\"Set dataset for phase type on task\n\n Args:\n dataset: ClassyDataset for returning samples.\n phase_type: str must be one of \"train\" or \"test\"\n \"\"\"\n assert phase_type in [\n \"train\",\n \"test\",\n ], \"phase_type must be in ['train', 'test']\"\n self.datasets[phase_type] = dataset\n if phase_type == \"train\":\n self.train_phases_per_epoch = getattr(dataset, \"phases_per_epoch\", 1)\n else:\n self._train_only = False\n return self\n\n def set_dataloader_mp_context(self, dataloader_mp_context: Optional[str]):\n \"\"\"Set the multiprocessing context used by the dataloader.\n\n The context can be either 'spawn', 'fork', 'forkserver' or None (uses the\n default context). See\n https://docs.python.org/3/library/multiprocessing.html#multiprocessing.get_context\n for more details.\"\"\"\n\n self.dataloader_mp_context = dataloader_mp_context\n return self\n\n def set_optimizer(self, optimizer: ClassyOptimizer):\n \"\"\"Set optimizer for task\n\n Args:\n optimizer: optimizer for task\n \"\"\"\n self.optimizer = optimizer\n return self\n\n def set_loss(self, loss: ClassyLoss):\n \"\"\"Set loss function for task\n\n Args:\n loss: loss for task\n \"\"\"\n self.base_loss = loss\n return self\n\n def set_meters(self, meters: List[\"ClassyMeter\"]):\n \"\"\"Set meters for task\n\n Args:\n meters: list of meters to compute during training\n \"\"\"\n self.meters = meters\n return self\n\n def set_distributed_options(\n self,\n broadcast_buffers_mode: BroadcastBuffersMode = BroadcastBuffersMode.BEFORE_EVAL,\n batch_norm_sync_mode: BatchNormSyncMode = BatchNormSyncMode.DISABLED,\n batch_norm_sync_group_size: int = 0,\n find_unused_parameters: bool = False,\n bucket_cap_mb: int = 25,\n fp16_grad_compress: bool = False,\n ):\n \"\"\"Set distributed options.\n\n Args:\n broadcast_buffers_mode: Broadcast buffers mode. See\n :class:`BroadcastBuffersMode` for options.\n batch_norm_sync_mode: Batch normalization synchronization mode. See\n :class:`BatchNormSyncMode` for options.\n batch_norm_sync_group_size: Group size to use for synchronized batch norm.\n 0 means that the stats are synchronized across all replicas. For\n efficient synchronization, set it to the number of GPUs in a node (\n usually 8).\n find_unused_parameters: See\n :class:`torch.nn.parallel.DistributedDataParallel` for information.\n bucket_cap_mb: See\n :class:`torch.nn.parallel.DistributedDataParallel` for information.\n Raises:\n RuntimeError: If batch_norm_sync_mode is `BatchNormSyncMode.APEX` and apex\n is not installed.\n \"\"\"\n self.broadcast_buffers_mode = broadcast_buffers_mode\n\n if batch_norm_sync_group_size > 0:\n if not batch_norm_sync_mode == BatchNormSyncMode.APEX:\n # this should ideally work with PyTorch Sync BN as well, but it\n # fails while initializing DDP for some reason.\n raise ValueError(\n \"batch_norm_sync_group_size can be > 0 only when \"\n \"Apex Synchronized Batch Normalization is being used.\"\n )\n self.batch_norm_sync_group_size = batch_norm_sync_group_size\n\n if batch_norm_sync_mode == BatchNormSyncMode.DISABLED:\n logging.info(\"Synchronized Batch Normalization is disabled\")\n else:\n if batch_norm_sync_mode == BatchNormSyncMode.APEX and not apex_available:\n raise RuntimeError(\"apex is not installed\")\n msg = f\"Using Synchronized Batch Normalization using {batch_norm_sync_mode}\"\n if self.batch_norm_sync_group_size > 0:\n msg += f\" and group size {batch_norm_sync_group_size}\"\n logging.info(msg)\n self.batch_norm_sync_mode = batch_norm_sync_mode\n\n if find_unused_parameters:\n logging.info(\"Enabling find_unused_parameters in DDP\")\n\n self.find_unused_parameters = find_unused_parameters\n self.ddp_bucket_cap_mb = bucket_cap_mb\n\n if fp16_grad_compress:\n if get_torch_version() < [1, 8, 0]:\n raise RuntimeError(\n \"FP16 grad compression is only supported since PyTorch 1.8\"\n )\n logging.info(\"Enabling FP16 grad compression\")\n self.fp16_grad_compress = fp16_grad_compress\n\n return self\n\n def set_hooks(self, hooks: List[\"ClassyHook\"]):\n \"\"\"Set hooks for task\n\n Args:\n hooks: List of hooks to apply during training\n \"\"\"\n from classy_vision.hooks import ClassyHook\n\n assert isinstance(hooks, list)\n assert all(isinstance(hook, ClassyHook) for hook in hooks)\n assert len({hook.name() for hook in hooks}) == len(\n hooks\n ), \"Cannot have repeated hooks of the same class\"\n # TODO (zyan3): we move checkpoint hook to the end of the list because some hooks\n # may change the state of the model, and we want to save changed state in the checkpoint.\n # This is temporary fix.\n non_checkpoint_hooks = [\n hook for hook in hooks if not isinstance(hook, CheckpointHook)\n ]\n checkpoint_hooks = [hook for hook in hooks if isinstance(hook, CheckpointHook)]\n hooks = non_checkpoint_hooks + checkpoint_hooks\n self.hooks = hooks\n return self\n\n def set_model(self, model: ClassyModel):\n \"\"\"Set model for task\n\n Args:\n model: Model to be trained\n \"\"\"\n self.base_model = model\n return self\n\n def set_test_only(self, test_only: bool):\n \"\"\"Set test only flag\n\n Args:\n test_only: If true, only test phases will be run\n \"\"\"\n self.test_only = test_only\n return self\n\n def set_bn_weight_decay(self, bn_weight_decay: bool):\n assert type(bn_weight_decay) == bool\n\n self.bn_weight_decay = bn_weight_decay\n return self\n\n def set_amp_args(self, amp_args: Optional[Dict[str, Any]]):\n \"\"\"Disable / enable apex.amp and set the automatic mixed precision parameters.\n\n apex.amp can be utilized for mixed / half precision training.\n\n Args:\n amp_args: Dictionary containing arguments to be passed to\n amp.initialize. Set to None to disable amp. To enable mixed\n precision training, pass amp_args={\"opt_level\": \"O1\"} here.\n See https://nvidia.github.io/apex/amp.html for more info.\n\n Raises:\n RuntimeError: If opt_level is not None and apex is not installed.\n\n Warning: apex needs to be installed to utilize this feature.\n \"\"\"\n self.amp_args = amp_args\n\n if amp_args is None:\n logging.info(\"AMP disabled\")\n else:\n # Check that the requested AMP type is known\n try:\n self.amp_type = AmpType[self.amp_args[\"amp_type\"].upper()]\n except KeyError:\n logging.info(\"AMP type not specified, defaulting to Apex\")\n self.amp_type = AmpType.APEX\n\n # Check for CUDA availability, required for both Apex and Pytorch AMP\n if not torch.cuda.is_available():\n raise RuntimeError(\n \"AMP is required but CUDA is not supported, cannot enable AMP\"\n )\n\n # Check for Apex availability\n if self.amp_type == AmpType.APEX and not apex_available:\n raise RuntimeError(\n \"Apex AMP is required but Apex is not installed, cannot enable AMP\"\n )\n\n if self.use_sharded_ddp:\n if self.amp_type == AmpType.APEX:\n raise RuntimeError(\n \"ShardedDDP has been requested, which is incompatible with Apex AMP\"\n )\n\n if not fairscale_available:\n raise RuntimeError(\n \"ShardedDDP has been requested, but fairscale is not installed in the current environment\"\n )\n\n # Set Torch AMP grad scaler, used to prevent gradient underflow\n elif self.amp_type == AmpType.PYTORCH:\n\n if self.use_sharded_ddp:\n logging.info(\"Using ShardedGradScaler to manage Pytorch AMP\")\n self.amp_grad_scaler = ShardedGradScaler()\n else:\n self.amp_grad_scaler = TorchGradScaler()\n\n logging.info(f\"AMP enabled with args {amp_args}\")\n return self\n\n def set_mixup_transform(self, mixup_transform: Optional[\"MixupTransform\"]):\n \"\"\"Disable / enable mixup transform for data augmentation\n\n Args::\n mixup_transform: a callable object which performs mixup data augmentation\n \"\"\"\n self.mixup_transform = mixup_transform\n if mixup_transform is None:\n logging.info(\"mixup disabled\")\n else:\n logging.info(\"mixup enabled\")\n return self\n\n def set_optimizer_schedulers(self, schedulers):\n self.optimizer_schedulers = schedulers\n return self\n\n @classmethod\n def from_config(cls, config: Dict[str, Any]) -> \"ClassificationTask\":\n \"\"\"Instantiates a ClassificationTask from a configuration.\n\n Args:\n config: A configuration for a ClassificationTask.\n See :func:`__init__` for parameters expected in the config.\n\n Returns:\n A ClassificationTask instance.\n \"\"\"\n test_only = config.get(\"test_only\", False)\n if not test_only:\n # TODO Make distinction between epochs and phases in optimizer clear\n train_phases_per_epoch = config[\"dataset\"][\"train\"].get(\n \"phases_per_epoch\", 1\n )\n\n optimizer_config = config[\"optimizer\"]\n optimizer_config[\"num_epochs\"] = (\n config[\"num_epochs\"] * train_phases_per_epoch\n )\n optimizer = build_optimizer(optimizer_config)\n param_schedulers = build_optimizer_schedulers(optimizer_config)\n\n datasets = {}\n phase_types = [\"train\", \"test\"]\n for phase_type in phase_types:\n if phase_type in config[\"dataset\"]:\n datasets[phase_type] = build_dataset(config[\"dataset\"][phase_type])\n loss = build_loss(config[\"loss\"])\n amp_args = config.get(\"amp_args\")\n meters = build_meters(config.get(\"meters\", {}))\n model = build_model(config[\"model\"])\n\n mixup_transform = None\n if config.get(\"mixup\") is not None:\n assert \"alpha\" in config[\"mixup\"], \"key alpha is missing in mixup dict\"\n mixup_transform = MixupTransform(\n config[\"mixup\"][\"alpha\"], config[\"mixup\"].get(\"num_classes\")\n )\n\n # hooks config is optional\n hooks_config = config.get(\"hooks\")\n hooks = []\n if hooks_config is not None:\n hooks = build_hooks(hooks_config)\n\n distributed_config = config.get(\"distributed\", {})\n distributed_options = {\n \"broadcast_buffers_mode\": BroadcastBuffersMode[\n distributed_config.get(\"broadcast_buffers\", \"before_eval\").upper()\n ],\n \"batch_norm_sync_mode\": BatchNormSyncMode[\n distributed_config.get(\"batch_norm_sync_mode\", \"disabled\").upper()\n ],\n \"batch_norm_sync_group_size\": distributed_config.get(\n \"batch_norm_sync_group_size\", 0\n ),\n \"find_unused_parameters\": distributed_config.get(\n \"find_unused_parameters\", False\n ),\n \"bucket_cap_mb\": distributed_config.get(\"bucket_cap_mb\", 25),\n \"fp16_grad_compress\": distributed_config.get(\"fp16_grad_compress\", False),\n }\n\n task = (\n cls()\n .set_num_epochs(config[\"num_epochs\"])\n .set_test_phase_period(config.get(\"test_phase_period\", 1))\n .set_loss(loss)\n .set_test_only(test_only)\n .set_model(model)\n .set_meters(meters)\n .set_amp_args(amp_args)\n .set_mixup_transform(mixup_transform)\n .set_distributed_options(**distributed_options)\n .set_hooks(hooks)\n .set_bn_weight_decay(config.get(\"bn_weight_decay\", False))\n .set_clip_grad_norm(config.get(\"clip_grad_norm\"))\n .set_simulated_global_batchsize(config.get(\"simulated_global_batchsize\"))\n .set_use_sharded_ddp(config.get(\"use_sharded_ddp\", False))\n )\n\n if not test_only:\n task.set_optimizer(optimizer)\n task.set_optimizer_schedulers(param_schedulers)\n\n use_gpu = config.get(\"use_gpu\")\n if use_gpu is not None:\n task.set_use_gpu(use_gpu)\n\n for phase_type in datasets:\n task.set_dataset(datasets[phase_type], phase_type)\n\n # NOTE: this is a private member and only meant to be used for\n # logging/debugging purposes. See __repr__ implementation\n task._config = config\n\n return task\n\n @property\n def num_batches_per_phase(self):\n \"\"\"Returns number of batches in current phase iterator\"\"\"\n return len(self.data_iterator)\n\n @property\n def model(self):\n \"\"\"Returns model used in training (can be wrapped with DDP)\"\"\"\n return (\n self.distributed_model if is_distributed_training_run() else self.base_model\n )\n\n @property\n def loss(self):\n \"\"\"Returns loss used in training (can be wrapped with DDP)\"\"\"\n return self.distributed_loss if self.distributed_loss else self.base_loss\n\n @property\n def phase_type(self):\n \"\"\"Returns current phase type. String with value \"train\" or \"test\" \"\"\"\n return \"train\" if self.train else \"test\"\n\n @property\n def eval_phase_idx(self):\n \"\"\"Returns current evaluation phase\"\"\"\n return self.phase_idx - self.train_phase_idx - 1\n\n def get_total_training_phases(self):\n \"\"\"\n Returns the total number of \"train\" phases in the task\n \"\"\"\n num_training_phases = 0\n for phase in self.phases:\n if phase[\"train\"] is True:\n num_training_phases += 1\n return num_training_phases\n\n def get_total_test_phases(self):\n \"\"\"\n Returns the total number of \"test\" phases in the task\n \"\"\"\n num_test_phases = 0\n for phase in self.phases:\n if phase[\"train\"] is False:\n num_test_phases += 1\n return num_test_phases\n\n def _build_phases(self):\n \"\"\"Returns list of phases from config.\n\n These phases will look like:\n {\n train: is this a train or test phase?\n optimizer: optimizer settings\n }\n\n - If this is a test only run, then only test phases will be\n generated\n - If this is a training run with both train and test datasets, then x phases =\n x train phases + x test phases, interleaved. If test_phase_period > 1, test\n phases are only added after test_phase_period train phases. The last phase is\n always a test phase.\n - If this is a training run with only a train dataset, then x phases = x train\n phases.\n \"\"\"\n if not self.test_only:\n phases = [\n {\"train\": True}\n for _ in range(math.ceil(self.train_phases_per_epoch * self.num_epochs))\n ]\n\n if self._train_only:\n return phases\n\n final_phases = []\n for i, phase in enumerate(phases):\n final_phases.append(phase)\n if (i + 1) % self.test_phase_period == 0:\n final_phases.append({\"train\": False})\n if final_phases[-1][\"train\"]:\n final_phases.append({\"train\": False})\n return final_phases\n\n return [{\"train\": False} for _ in range(self.num_epochs)]\n\n def build_dataloader_from_dataset(self, dataset, **kwargs):\n \"\"\"Builds a dataloader from the provided dataset\n\n Args:\n dataset: A ClassyDataset\n kwargs: Additional kwargs to pass during dataloader construction for\n derived classes\n \"\"\"\n return dataset.iterator(\n phase_type=self.phase_type,\n current_phase_id=self.train_phase_idx if self.train else 0,\n pin_memory=self.use_gpu and torch.cuda.device_count() > 1,\n multiprocessing_context=mp.get_context(self.dataloader_mp_context),\n **kwargs,\n )\n\n def build_dataloaders_for_current_phase(self):\n \"\"\"Builds dataloader(s) for the current phase.\n\n Deriving classes can override this method to support custom behavior, like\n supporting multiple dataloaders in parallel.\n \"\"\"\n self.dataloader = self.build_dataloader_from_dataset(\n self.datasets[self.phase_type]\n )\n\n def prepare_optimizer(self, optimizer, model, loss=None):\n bn_params, other_params = split_batchnorm_params(model)\n if loss is not None:\n bn_params_loss, params_loss = split_batchnorm_params(loss)\n bn_params = bn_params + bn_params_loss\n other_params = other_params + params_loss\n\n bn_schedulers = self.optimizer_schedulers.copy()\n if not self.bn_weight_decay:\n bn_schedulers[\"weight_decay\"] = 0\n\n param_groups = [{\"params\": other_params, **self.optimizer_schedulers}]\n if len(bn_params) > 0:\n param_groups.append({\"params\": bn_params, **bn_schedulers})\n self.optimizer.set_param_groups(param_groups)\n\n def prepare(self):\n \"\"\"Prepares task for training, populates all derived attributes \"\"\"\n\n self.phases = self._build_phases()\n self.train = False if self.test_only else self.train\n\n if self.batch_norm_sync_mode == BatchNormSyncMode.PYTORCH:\n self.base_model = nn.SyncBatchNorm.convert_sync_batchnorm(self.base_model)\n elif self.batch_norm_sync_mode == BatchNormSyncMode.APEX:\n sync_bn_process_group = apex.parallel.create_syncbn_process_group(\n self.batch_norm_sync_group_size\n )\n self.base_model = apex.parallel.convert_syncbn_model(\n self.base_model, process_group=sync_bn_process_group\n )\n\n # move the model and loss to the right device\n if self.use_gpu:\n self.base_model, self.base_loss = copy_model_to_gpu(\n self.base_model, self.base_loss\n )\n else:\n self.base_loss.cpu()\n self.base_model.cpu()\n\n if self.optimizer is not None:\n self.prepare_optimizer(\n optimizer=self.optimizer, model=self.base_model, loss=self.base_loss\n )\n\n if self.amp_args is not None:\n if self.amp_type == AmpType.APEX:\n # Initialize apex.amp. This updates the model and the PyTorch optimizer (\n # if training, which is wrapped by the ClassyOptimizer in self.optimizer).\n # Please note this must happen before loading the checkpoint, cause\n # there's amp state to be restored.\n if self.optimizer is None:\n self.base_model = apex.amp.initialize(\n self.base_model, optimizers=None, **self.amp_args\n )\n else:\n self.base_model, self.optimizer.optimizer = apex.amp.initialize(\n self.base_model, self.optimizer.optimizer, **self.amp_args\n )\n\n if self.simulated_global_batchsize is not None:\n if self.simulated_global_batchsize % self.get_global_batchsize() != 0:\n raise ValueError(\n f\"Global batch size ({self.get_global_batchsize()}) must divide \"\n f\"simulated_global_batchsize ({self.simulated_global_batchsize})\"\n )\n else:\n self.simulated_global_batchsize = self.get_global_batchsize()\n\n self.optimizer_period = (\n self.simulated_global_batchsize // self.get_global_batchsize()\n )\n if self.optimizer_period > 1:\n logging.info(\n f\"Using gradient accumulation with a period of {self.optimizer_period}\"\n )\n\n if self.checkpoint_path:\n self.checkpoint_dict = load_and_broadcast_checkpoint(self.checkpoint_path)\n\n classy_state_dict = (\n None\n if self.checkpoint_dict is None\n else self.checkpoint_dict[\"classy_state_dict\"]\n )\n\n if classy_state_dict is not None:\n state_load_success = update_classy_state(self, classy_state_dict)\n assert (\n state_load_success\n ), \"Update classy state from checkpoint was unsuccessful.\"\n\n self.init_distributed_data_parallel_model()\n\n def init_distributed_data_parallel_model(self):\n \"\"\"\n Initialize\n `torch.nn.parallel.distributed.DistributedDataParallel <https://pytorch.org/\n docs/stable/nn.html#distributeddataparallel>`_.\n\n Needed for distributed training. This is where a model should be wrapped by DDP.\n \"\"\"\n if not is_distributed_training_run():\n return\n assert (\n self.distributed_model is None\n ), \"init_ddp_non_elastic must only be called once\"\n\n broadcast_buffers = (\n self.broadcast_buffers_mode == BroadcastBuffersMode.FORWARD_PASS\n )\n\n if self.use_sharded_ddp:\n if not isinstance(self.optimizer, ZeRO):\n raise ValueError(\n \"ShardedDataParallel engine should only be used in conjunction with ZeRO optimizer\"\n )\n from fairscale.nn.data_parallel import ShardedDataParallel\n\n # Replace the original DDP wrap by the shard-aware ShardedDDP\n self.distributed_model = ShardedDataParallel(\n module=self.base_model,\n sharded_optimizer=self.optimizer.optimizer,\n broadcast_buffers=broadcast_buffers,\n )\n else:\n self.distributed_model = init_distributed_data_parallel_model(\n self.base_model,\n broadcast_buffers=broadcast_buffers,\n find_unused_parameters=self.find_unused_parameters,\n bucket_cap_mb=self.ddp_bucket_cap_mb,\n )\n if self.fp16_grad_compress:\n\n from torch.distributed.algorithms import ddp_comm_hooks\n\n # FP16 hook is stateless and only takes a process group as the state.\n # We use the default process group so we set the state to None.\n process_group = None\n self.distributed_model.register_comm_hook(\n process_group,\n ddp_comm_hooks.default_hooks.fp16_compress_hook,\n )\n if (\n isinstance(self.base_loss, ClassyLoss)\n and self.base_loss.has_learned_parameters()\n ):\n logging.info(\"Initializing distributed loss\")\n self.distributed_loss = init_distributed_data_parallel_model(\n self.base_loss,\n broadcast_buffers=broadcast_buffers,\n find_unused_parameters=self.find_unused_parameters,\n bucket_cap_mb=self.ddp_bucket_cap_mb,\n )\n\n @property\n def where(self):\n \"\"\"Returns the proportion of training that has completed. If in test\n only mode, returns proportion of testing completed\n\n Returned value is a float in the range [0, 1)\n \"\"\"\n current_step = self.num_updates / self.get_global_batchsize()\n num_phases = (\n self.get_total_test_phases()\n if self.test_only\n else self.get_total_training_phases()\n )\n\n if self.num_batches_per_phase <= 0:\n raise RuntimeError(\"No batches to read. Is the dataset empty?\")\n\n num_steps = num_phases * self.num_batches_per_phase\n where = current_step / num_steps\n\n return where\n\n def get_classy_state(self, deep_copy: bool = False):\n \"\"\"Returns serialiable state of task\n\n Args:\n deep_copy: If true, does a deep copy of state before returning.\n \"\"\"\n optimizer_state = {}\n if self.optimizer is not None:\n optimizer_state = self.optimizer.get_classy_state()\n\n classy_state_dict = {\n \"train\": self.train,\n \"base_model\": self.base_model.get_classy_state(),\n \"meters\": [meter.get_classy_state() for meter in self.meters],\n \"optimizer\": optimizer_state,\n \"phase_idx\": self.phase_idx,\n \"train_phase_idx\": self.train_phase_idx,\n \"num_updates\": self.num_updates,\n \"losses\": self.losses,\n \"hooks\": {hook.name(): hook.get_classy_state() for hook in self.hooks},\n \"loss\": {},\n }\n if \"train\" in self.datasets and self._is_checkpointable_dataset(\n self.datasets[\"train\"]\n ):\n classy_state_dict[\"train_dataset_iterator\"] = self.datasets[\n \"train\"\n ].get_classy_state()\n\n if isinstance(self.base_loss, ClassyLoss):\n classy_state_dict[\"loss\"] = self.base_loss.get_classy_state()\n if self.amp_args is not None:\n if self.amp_type == AmpType.APEX:\n classy_state_dict[\"amp\"] = apex.amp.state_dict()\n\n elif self.amp_grad_scaler is not None:\n classy_state_dict[\"amp\"] = self.amp_grad_scaler.state_dict()\n\n if deep_copy:\n classy_state_dict = copy.deepcopy(classy_state_dict)\n return classy_state_dict\n\n def set_classy_state(self, state):\n \"\"\"Set task state\n\n Args:\n state: Dict containing state of a task\n \"\"\"\n # some settings are different in test only\n self.train = False if self.test_only else state[\"train\"]\n if not self.test_only:\n self.phase_idx = state[\"phase_idx\"]\n self.num_updates = state[\"num_updates\"]\n self.train_phase_idx = state[\"train_phase_idx\"]\n self.losses = state[\"losses\"]\n for meter, meter_state in zip(self.meters, state[\"meters\"]):\n meter.set_classy_state(meter_state)\n\n self.base_model.set_classy_state(state[\"base_model\"])\n if self.optimizer is not None:\n self.optimizer.set_classy_state(state[\"optimizer\"])\n if state.get(\"loss\") and isinstance(self.base_loss, ClassyLoss):\n self.base_loss.set_classy_state(state[\"loss\"])\n\n if \"amp\" in state:\n if self.amp_type == AmpType.APEX:\n apex.amp.load_state_dict(state[\"amp\"])\n else:\n self.amp_grad_scaler.load_state_dict(state[\"amp\"])\n\n for hook in self.hooks:\n # we still want to be able to run when new hooks are added or old\n # hooks are removed\n if hook.name() in state[\"hooks\"]:\n hook.set_classy_state(state[\"hooks\"][hook.name()])\n else:\n logging.warning(f\"No state found for hook: {hook.name()}\")\n\n if \"train\" in self.datasets and self._is_checkpointable_dataset(\n self.datasets[\"train\"]\n ):\n self.datasets[\"train\"].set_classy_state(state.get(\"train_dataset_iterator\"))\n\n @staticmethod\n def _is_checkpointable_dataset(dataset):\n return hasattr(dataset, \"get_classy_state\") and hasattr(\n dataset, \"set_classy_state\"\n )\n\n def eval_step(self):\n self.last_batch = None\n\n # Process next sample\n with Timer() as timer:\n sample = next(self.data_iterator)\n\n assert isinstance(sample, dict) and \"input\" in sample and \"target\" in sample, (\n f\"Returned sample [{sample}] is not a map with 'input' and\"\n + \"'target' keys\"\n )\n\n target = sample[\"target\"]\n if self.use_gpu:\n sample = recursive_copy_to_gpu(sample, non_blocking=True)\n\n # Optional Pytorch AMP context\n torch_amp_context = (\n torch.cuda.amp.autocast()\n if self.amp_type == AmpType.PYTORCH\n else contextlib.suppress()\n )\n\n with torch.no_grad(), torch_amp_context:\n output = self.model(sample[\"input\"])\n\n local_loss = self.compute_loss(output, sample)\n\n loss = local_loss.detach().clone()\n\n self.check_inf_nan(loss)\n\n self.losses.append(loss.data.cpu().item() * target.size(0))\n\n self.update_meters(output, sample)\n\n # Move some data to the task so hooks get a chance to access it\n self.last_batch = LastBatchInfo(\n loss=loss,\n output=output,\n target=target,\n sample=sample,\n step_data={\"sample_fetch_time\": timer.elapsed_time},\n )\n\n def check_inf_nan(self, loss):\n if loss == float(\"inf\") or loss == float(\"-inf\") or loss != loss:\n raise FloatingPointError(f\"Loss is infinity or NaN: {loss}\")\n\n def _should_do_step(self):\n \"\"\"Tells if we will be performing an optimizer step.\n\n Returns True always if there is no gradient accumulation. With gradient\n accumulation returns True only when the gradients will be synchronized and we\n will be performing an optimizer step.\n \"\"\"\n update_idx = self.num_updates // self.get_global_batchsize()\n return (update_idx % self.optimizer_period) == self.optimizer_period - 1\n\n def train_step(self):\n \"\"\"Train step to be executed in train loop.\"\"\"\n\n self.last_batch = None\n\n # Process next sample\n with Timer() as timer:\n sample = next(self.data_iterator)\n\n assert isinstance(sample, dict) and \"input\" in sample and \"target\" in sample, (\n f\"Returned sample [{sample}] is not a map with 'input' and\"\n + \"'target' keys\"\n )\n\n # Copy sample to GPU\n target = sample[\"target\"]\n if self.use_gpu:\n sample = recursive_copy_to_gpu(sample, non_blocking=True)\n\n if self.mixup_transform is not None:\n sample = self.mixup_transform(sample)\n\n # Optional Pytorch AMP context\n torch_amp_context = (\n torch.cuda.amp.autocast()\n if self.amp_type == AmpType.PYTORCH\n else contextlib.suppress()\n )\n\n # only sync with DDP when we need to perform an optimizer step\n # an optimizer step can be skipped if gradient accumulation is enabled\n do_step = self._should_do_step()\n ctx_mgr_model = (\n self.distributed_model.no_sync()\n if self.distributed_model is not None and not do_step\n else contextlib.suppress()\n )\n ctx_mgr_loss = (\n self.distributed_loss.no_sync()\n if self.distributed_loss is not None and not do_step\n else contextlib.suppress()\n )\n\n with ctx_mgr_model, ctx_mgr_loss:\n # Forward pass\n with torch.enable_grad(), torch_amp_context:\n output = self.model(sample[\"input\"])\n\n local_loss = self.compute_loss(output, sample)\n loss = local_loss.detach().clone()\n self.losses.append(loss.data.cpu().item() * target.size(0))\n\n self.update_meters(output, sample)\n\n # Backwards pass + optimizer step\n self.run_optimizer(local_loss)\n\n self.num_updates += self.get_global_batchsize()\n\n # Move some data to the task so hooks get a chance to access it\n self.last_batch = LastBatchInfo(\n loss=loss,\n output=output,\n target=target,\n sample=sample,\n step_data={\"sample_fetch_time\": timer.elapsed_time},\n )\n\n def compute_loss(self, model_output, sample):\n return self.loss(model_output, sample[\"target\"])\n\n def run_optimizer(self, loss):\n \"\"\"Runs backwards pass and update the optimizer\"\"\"\n\n self.check_inf_nan(loss)\n\n # Gradient accumulation logic. We always set optimizer_period, even\n # if gradient accumulation is disabled. Assumes all batches have the\n # same size\n update_idx = self.num_updates // self.get_global_batchsize()\n do_zero_grad = (update_idx % self.optimizer_period) == 0\n do_step = self._should_do_step()\n\n if do_zero_grad:\n self.optimizer.zero_grad()\n\n if self.amp_type == AmpType.APEX:\n with apex.amp.scale_loss(loss, self.optimizer.optimizer) as scaled_loss:\n scaled_loss.backward()\n elif self.amp_type == AmpType.PYTORCH:\n self.amp_grad_scaler.scale(loss).backward()\n else:\n loss.backward()\n\n if do_step:\n # Handle gradient accumulation related gradient rescaling\n if self.optimizer_period != 1:\n self._rescale_gradients(1 / self.optimizer_period)\n\n # Clipping must happen after grad accumulation\n if self.clip_grad_norm is not None:\n self._clip_gradients(self.clip_grad_norm)\n\n if self.amp_type == AmpType.PYTORCH:\n # If using mixed precision, handle underflow-related scaling\n # See https://pytorch.org/docs/stable/amp.html#gradient-scaling\n # for context\n self.amp_grad_scaler.step(self.optimizer, where=self.where)\n self.amp_grad_scaler.update()\n else:\n self.optimizer.step(where=self.where)\n\n def _rescale_gradients(self, scale):\n for param in master_params(self.optimizer):\n if param.grad is not None:\n param.grad.data.mul_(scale)\n\n def _clip_gradients(self, max_norm):\n nn.utils.clip_grad_norm_(master_params(self.optimizer), max_norm)\n\n def update_meters(self, model_output, sample):\n target = sample[\"target\"].detach().cpu()\n model_output = model_output.detach().cpu()\n\n # Update meters\n for meter in self.meters:\n meter.update(model_output, target, is_train=self.train)\n\n def synchronize_losses(self):\n \"\"\"Average the losses across the different replicas\"\"\"\n\n # Average losses across nodes\n losses_tensor = torch.tensor(self.losses)\n synchronized_losses_tensor = all_reduce_mean(losses_tensor)\n self.losses = synchronized_losses_tensor.tolist()\n\n def advance_phase(self):\n \"\"\"Performs bookkeeping / task updates between phases\n\n Increments phase idx, resets meters, resets loss history,\n resets counters, shuffles dataset, rebuilds iterators, and\n sets the train / test state for phase.\n \"\"\"\n logging.debug(\"Advancing phase\")\n # Reset meters for next phase / epoch\n for meter in self.meters:\n meter.reset()\n\n # Reset loss history for next epoch\n self.losses = []\n\n # Setup new phase\n self.phase_idx += 1\n phase = self.phases[self.phase_idx]\n self.train = True if phase[\"train\"] else False\n if self.train:\n self.train_phase_idx += 1\n\n # Re-build dataloader & re-create iterator anytime membership changes.\n self.build_dataloaders_for_current_phase()\n self.create_data_iterators()\n # Set up pytorch module in train vs eval mode, update optimizer.\n self._set_model_train_mode()\n\n def done_training(self):\n \"\"\"Stop condition for training\"\"\"\n return self.phase_idx + 1 >= len(self.phases)\n\n def create_data_iterators(self):\n \"\"\"Creates data iterator(s) for the current phase.\"\"\"\n # Delete iterator explicitly so that all dataloader processes\n # are cleaned up.\n del self.data_iterator\n self.data_iterator = iter(self.dataloader)\n\n def _set_model_train_mode(self):\n \"\"\"Set train mode for model\"\"\"\n phase = self.phases[self.phase_idx]\n self.base_model.train(phase[\"train\"])\n self.base_loss.train(phase[\"train\"])\n\n if (\n self.broadcast_buffers_mode == BroadcastBuffersMode.BEFORE_EVAL\n and not self.train\n ):\n self._broadcast_buffers()\n\n def _broadcast_buffers(self):\n \"\"\"Explicitly synchronize buffers across all devices.\"\"\"\n if self.distributed_model is None:\n return\n buffers = list(self.base_model.buffers())\n if len(buffers) > 0:\n logging.info(\"Synchronizing buffers before evaluation.\")\n for buffer in buffers:\n broadcast(buffer, 0, group=self.distributed_model.process_group)\n\n # TODO: Functions below should be better abstracted into the dataloader\n # abstraction\n def get_batchsize_per_replica(self):\n \"\"\"Return local replica's batchsize for dataset (e.g. batchsize per GPU)\"\"\"\n return self.datasets[self.phase_type].get_batchsize_per_replica()\n\n def get_global_batchsize(self):\n \"\"\"Return global batchsize across all trainers\"\"\"\n return self.datasets[self.phase_type].get_global_batchsize()\n\n def on_start(self):\n for hook in self.hooks:\n hook.on_start(self)\n\n def on_phase_start(self):\n self.phase_start_time_total = time.perf_counter()\n\n self.advance_phase()\n\n for hook in self.hooks:\n hook.on_phase_start(self)\n\n self.phase_start_time_train = time.perf_counter()\n\n def on_phase_end(self):\n self.log_phase_end(\"train\")\n\n if self.train:\n self.optimizer.on_epoch(where=self.where)\n\n logging.debug(\"Syncing losses on phase end...\")\n self.synchronize_losses()\n logging.debug(\"...losses synced\")\n\n logging.debug(\"Syncing meters on phase end...\")\n for meter in self.meters:\n meter.sync_state()\n logging.debug(\"...meters synced\")\n barrier()\n\n for hook in self.hooks:\n hook.on_phase_end(self)\n self.perf_log = []\n\n self.log_phase_end(\"total\")\n\n def on_end(self):\n for hook in self.hooks:\n hook.on_end(self)\n\n def log_phase_end(self, tag):\n if not self.train:\n return\n\n start_time = (\n self.phase_start_time_train\n if tag == \"train\"\n else self.phase_start_time_total\n )\n phase_duration = time.perf_counter() - start_time\n im_per_sec = (\n self.get_global_batchsize() * self.num_batches_per_phase\n ) / phase_duration\n self.perf_log.append(\n {\n \"tag\": tag,\n \"phase_idx\": self.train_phase_idx,\n \"epoch_duration\": phase_duration,\n \"im_per_sec\": im_per_sec,\n }\n )\n\n def __repr__(self):\n if hasattr(self, \"_config\"):\n config = json.dumps(self._config, indent=4)\n return f\"{super().__repr__()} initialized with config:\\n{config}\"\n\n return super().__repr__()\n" ]
[ [ "torch.cuda.amp.autocast", "torch.nn.SyncBatchNorm.convert_sync_batchnorm", "torch.no_grad", "torch.enable_grad", "torch.cuda.device_count", "torch.cuda.is_available", "torch.tensor", "torch.cuda.amp.GradScaler", "torch.distributed.broadcast" ] ]
varun123455/containermicroservices
[ "04bb2ab4dc8303969caf1047872059436a8c4e7d" ]
[ "load_balancer/docker_acts/app.py" ]
[ "def is_sha1(maybe_sha):\n if len(maybe_sha) != 40:\n return False\n try:\n sha_int = int(maybe_sha, 16)\n except ValueError:\n return False\n return True\n\ndef validate(date_text):\n try:\n datetime.datetime.strptime(date_text, '%d-%m-%Y:%S-%M-%H')\n return True\n except ValueError:\n return False\n\nfrom flask_cors import CORS\nfrom flask import Flask, render_template, Response, request, jsonify\nimport pandas as pd\nimport os\nimport json\nimport shutil\nimport datetime\nimport base64\nimport binascii\nimport datetime\nimport requests as r\n\nLOGIN_FILE_NAME = \"login.csv\"\nDB = \"templates/images\" \nGLOBAL_LIST = \"acts.csv\"\nIP = \"3.208.6.174:80\"\nINSTANCE_IP = \"34.226.230.93\"\ncount_requests = 0\n#IP = \"127.0.0.1:5000\"\n\n\napp = Flask(__name__)\nCORS(app)\n\n@app.errorhandler(405)\ndef method_not_allowed(e):\n global count_requests\n count_requests += 1\n return jsonify({'error': 405}), 405\n\n@app.route(\"/\")\ndef index():\n return render_template('index.html')\n \n@app.route(\"/api/v1/categories\", methods = [\"GET\", \"POST\"])\ndef list_categories():\n global count_requests\n count_requests += 1\n if not os.path.exists(DB):\n os.makedirs(DB, exist_ok = True)\n\n if request.method == 'GET':\n categories = os.listdir(DB)\n if not categories:\n return Response('{}', status=204, mimetype='application/json')\n response_data = {}\n \n for category in categories:\n response_data[category] = len(os.listdir(DB + \"/\" + category)) \n return jsonify(response_data)\n\n\n elif request.method == \"POST\":\n category = json.loads(request.data)[0]\n\n if category in os.listdir(DB):\n return Response('{}', status=400, mimetype='application/json')\n\n os.makedirs(DB + \"/\" + category, exist_ok = True)\n return Response('{}', status=201, mimetype='application/json')\n \n else:\n return Response('{}', status=405, mimetype='application/json')\n\n\n\n\n@app.route(\"/api/v1/categories/<category>\", methods = [\"DELETE\"])\ndef delete_category(category = None):\n global count_requests\n count_requests += 1\n if request.method == 'DELETE':\n categories = os.listdir(DB)\n if category in categories:\n if GLOBAL_LIST in os.listdir():\n data = pd.read_csv(GLOBAL_LIST)\n data = data[data.category != category]\n data.to_csv(GLOBAL_LIST, index = False)\n shutil.rmtree(DB + \"/\" + category)\n return Response('{}', status=200, mimetype='application/json')\n else:\n return Response('{}', status=400, mimetype='application/json')\n else:\n return Response('{}', status=405, mimetype='application/json')\n\n \n@app.route(\"/api/v1/categories/<category>/acts\", methods = [\"GET\"])\ndef list_acts(category = None):\n global count_requests\n count_requests += 1\n if request.method == 'GET':\n temp_path = DB + \"/\" + category + \"/\" + GLOBAL_LIST\n if category not in os.listdir(DB):\n return Response('[]', status=400, mimetype='application/json')\n start = request.args.get('start')\n end = request.args.get(\"end\")\n if start == None and end == None:\n #print(\"This part\")\n if os.path.exists(temp_path):\n data = pd.read_csv(temp_path)\n rows = data.shape[0]\n if rows == 0:\n return Response('[]', status=204, mimetype='application/json')\n elif rows >= 100:\n return Response('[]', status=413, mimetype='application/json')\n else:\n response_data = data.to_json(orient = \"records\")\n return Response(response_data, status=200, mimetype='application/json')\n else:\n return Response('[]', status=204, mimetype='application/json')\n else:\n start = int(start)\n end = int(end)\n temp_path = DB + \"/\" + category + \"/\" + GLOBAL_LIST\n if category not in os.listdir(DB):\n return Response('[]', status=400, mimetype='application/json')\n if os.path.exists(temp_path):\n data = pd.read_csv(temp_path)\n data[\"timestamp\"] = pd.to_datetime(data[\"timestamp\"], format = '%d-%m-%Y:%S-%M-%H')\n data[\"actId\"] = data[\"actId\"].astype(int)\n sorted_data = data.sort_values([\"timestamp\", \"actId\"], ascending = [False, False], axis = 0)\n #print(data)\n #print(sorted_data)\n rows = data.shape[0]\n if start < 1 or end > rows:\n return Response('[]', status=400, mimetype='application/json')\n if rows == 0:\n return Response('[]', status=204, mimetype='application/json')\n else:\n required_data = pd.DataFrame(sorted_data.iloc[start-1: end, :])\n #print(required_data)\n if required_data.shape[0] > 100:\n return Response(\"[]\", status=413, mimetype='application/json')\n required_data[\"timestamp\"] = pd.to_datetime(required_data[\"timestamp\"], format = '%d-%m-%Y:%S-%M-%H')\n required_data[\"timestamp\"] = required_data[\"timestamp\"].astype(str)\n response_data = required_data.to_json(orient = \"records\")\n return Response(response_data, status=200, mimetype='application/json')\n else:\n return Response('[]', status=204, mimetype='application/json')\n else:\n return Response('{}', status=405, mimetype='application/json')\n\n \n@app.route(\"/api/v1/categories/<category>/acts/size\", methods = [\"GET\"])\ndef count_acts(category = None):\n global count_requests\n count_requests += 1\n if request.method == 'GET':\n temp_path = DB + \"/\" + category\n if category not in os.listdir(DB):\n return Response('[]', status=400, mimetype='application/json')\n if os.path.exists(temp_path):\n data = pd.read_csv(GLOBAL_LIST)\n count = data[data.category == category].shape[0]\n return Response('[{0}]'.format(str(count)), status=200, mimetype='application/json')\n else:\n return Response('[]', status=204, mimetype='application/json')\n else:\n return Response('{}', status=405, mimetype='application/json')\n\n@app.route(\"/api/v1/acts/upvote\", methods = [\"POST\"])\ndef upvote():\n global count_requests\n count_requests += 1\n if request.method == 'POST':\n act_id = int(json.loads(request.data)[0])\n data_id = pd.read_csv(GLOBAL_LIST)\n if act_id not in data_id[\"act_id\"].tolist():\n return Response('[]', status=400, mimetype='application/json')\n\n category = data_id[data_id[\"act_id\"] == act_id][\"category\"].iloc[0]\n temp_path = DB + \"/\" + category + \"/\" + GLOBAL_LIST\n\n data = pd.read_csv(temp_path)\n data.set_index(\"actId\", inplace = True)\n data.at[act_id, \"upvotes\"] += 1\n data.reset_index(inplace = True)\n data.to_csv(temp_path,index = False)\n \n return Response(\"{}\", status=200, mimetype='application/json')\n \n else:\n return Response('{}', status=405, mimetype='application/json')\n\n@app.route(\"/api/v1/acts/<actId>\", methods = [\"DELETE\"])\ndef delete_act(actId = None):\n global count_requests\n count_requests += 1\n if request.method == 'DELETE':\n act_id = int(actId)\n data_id = pd.read_csv(GLOBAL_LIST)\n \n if act_id not in data_id[\"act_id\"].tolist():\n return Response('[]', status=400, mimetype='application/json')\n\n category = data_id[data_id[\"act_id\"] == act_id][\"category\"].iloc[0]\n temp_path = DB + \"/\" + category + \"/\" + GLOBAL_LIST\n\n data_id = data_id[data_id[\"act_id\"] != act_id]\n data_id.to_csv(GLOBAL_LIST, index = False)\n \n data = pd.read_csv(temp_path)\n data = data[data[\"actId\"] != act_id]\n data.to_csv(temp_path, index = False)\n\n os.remove(DB + \"/\" + category + \"/\" + str(act_id) + \".png\")\n return Response(\"{}\", status=200, mimetype='application/json')\n \n else:\n return Response('{}', status=405, mimetype='application/json')\n\n\n \n# @app.route(\"/api/v1/categories/<category>/acts?start=<startrange>&end=<endrange>\", methods = [\"GET\"])\n# def range_acts(category = None, startrange = 0, endrange = 0):\n# if request.method == 'GET':\n# temp_path = DB + \"/\" + category + \"/\" + GLOBAL_LIST\n# if category not in os.listdir(DB):\n# return Response('[]', status=400, mimetype='application/json')\n# if os.path.exists(temp_path):\n# data = pd.read_csv(temp_path)\n# sorted_data = data.sort(columns = [\"timestamp\"], ascending = False)\n# rows = data.shape[0]\n# if startrange < 1 or endrange > rows:\n# return Response('[]', status=400, mimetype='application/json')\n# if rows == 0:\n# return Response('[]', status=204, mimetype='application/json')\n# else:\n# required_data = sorted_data.ix[startrange-1: endrange-1, :]\n# print(required_data)\n# if required_data.shape[0] > 100:\n# return Response(\"[]\", status=413, mimetype='application/json')\n# response_data = required_data.to_json(orient = \"records\")\n# return Response(response_data, status=200, mimetype='application/json')\n# else:\n# return Response('[]', status=204, mimetype='application/json')\n# else:\n# return Response('{}', status=405, mimetype='application/json')\n\n\n@app.route(\"/api/v1/acts\", methods = [\"POST\"])\ndef upload_act():\n global count_requests\n count_requests += 1\n if request.method == 'POST':\n if not os.path.exists(DB):\n os.makedirs(DB, exist_ok = True)\n \n request_data = json.loads(request.data.decode('utf-8'))\n\n if not GLOBAL_LIST in os.listdir():\n data = pd.DataFrame(columns = ['act_id', \"category\"])\n data.to_csv(GLOBAL_LIST, index = False)\n \n if not LOGIN_FILE_NAME in os.listdir():\n data = pd.DataFrame(columns = ['username', 'password'])\n data.to_csv(LOGIN_FILE_NAME, index = False)\n \n data_acts = pd.read_csv(GLOBAL_LIST)\n #data_users = pd.read_csv(LOGIN_FILE_NAME)\n # Username and actId\n header = {\"origin\": INSTANCE_IP}\n resp = r.get( \"http://\"+ IP + \"/api/v1/users\", \"{}\", headers = header)\n print(\"=============\")\n print(resp.text)\n print(\"=============\")\n data_users = eval(resp.text)\n if request_data['username'] not in data_users or request_data[\"actId\"] in data_acts[\"act_id\"].tolist():\n return Response('{}', status=400, mimetype='application/json')\n # Upvotes field\n if \"upvotes\" in request_data.keys():\n return Response('{}', status=400, mimetype='application/json')\n request_data['upvotes'] = 0\n # category name\n if request_data[\"categoryName\"] not in os.listdir(DB):\n return Response('{}', status=400, mimetype='application/json')\n # Date Validity\n if not validate(request_data[\"timestamp\"]):\n return Response('{}', status=400, mimetype='application/json')\n # Base64 validity\n try:\n base64.b64decode(request_data[\"imgB64\"])\n except binascii.Error:\n return Response('{}', status=400, mimetype='application/json')\n\n data_acts = data_acts.append({\"act_id\": int(request_data[\"actId\"]), \"category\": request_data[\"categoryName\"] }, ignore_index = True)\n data_acts.to_csv(GLOBAL_LIST, index = False)\n\n with open(DB + \"/\" + request_data[\"categoryName\"] + \"/\" +str(request_data[\"actId\"]) + \".png\", \"wb\") as fp:\n fp.write(base64.decodebytes(request_data[\"imgB64\"].encode()))\n\n temp_path = DB + \"/\" + request_data[\"categoryName\"] + \"/\" + GLOBAL_LIST\n if not GLOBAL_LIST in os.listdir(DB + \"/\" + request_data[\"categoryName\"]):\n data = pd.DataFrame(columns = list(request_data.keys()))\n data.to_csv(temp_path, index = False)\n\n data = pd.read_csv(temp_path)\n data = data.append(request_data, ignore_index = True)\n data.to_csv(temp_path, index = False)\n\n return Response('{}', status=201, mimetype='application/json')\n else:\n return Response('{}', status=405, mimetype='application/json')\n\n\n@app.route(\"/api/v1/acts/count\", methods = [\"GET\"])\ndef count_act():\n global count_requests\n count_requests += 1\n if request.method == 'GET':\n if not GLOBAL_LIST in os.listdir():\n return Response('[0]', status=200, mimetype='application/json')\n else:\n data_acts = pd.read_csv(GLOBAL_LIST)\n count_acts = data_acts.shape[0]\n return Response('['+ str(count_acts) +']', status=200, mimetype='application/json')\n else:\n return Response('{}', status=405, mimetype='application/json')\n\n@app.route(\"/api/v1/_count\", methods = [\"GET\", \"DELETE\"])\ndef count_request():\n global count_requests\n if request.method == 'GET':\n return Response('['+ str(count_requests) +']', status=200, mimetype='application/json')\n elif request.method == 'DELETE':\n count_requests = 0\n return Response('{}', status=200, mimetype='application/json')\n else:\n return Response('{}', status=405, mimetype='application/json')\n\n \nif __name__ == '__main__':\n app.run(host = '0.0.0.0', port = 80, threaded=True)\n #app.run(threaded = True, debug = True, port = 2000)\n" ]
[ [ "pandas.to_datetime", "pandas.DataFrame", "pandas.read_csv" ] ]
vishalbelsare/FEDOT
[ "3a6f06b29cf2f173008d119f7cb5dc705a45f695" ]
[ "cases/industrial/multivariate_forecasting.py" ]
[ "import numpy as np\nimport pandas as pd\nfrom pylab import rcParams\nfrom sklearn.metrics import mean_absolute_error, mean_squared_error\n\n# Additional custom functions\nfrom cases.industrial.processing import multi_automl_fit_forecast, plot_results\nfrom fedot.core.constants import BEST_QUALITY_PRESET_NAME\nfrom fedot.core.data.multi_modal import prepare_multimodal_data\n\nrcParams['figure.figsize'] = 15, 7\n\nif __name__ == '__main__':\n # Below is an example of multivariate time series forecasting.\n # An example of how forecasts can be made is presented and a simple\n # validation is given on a single block which length is equal to the\n # length of the forecast horizon.\n\n # Define forecast horizon and read dataframe\n forecast_length = 20\n df = pd.read_csv('pw_dataset.csv', parse_dates=['datetime'])\n\n # Wrap time series data into InputData class\n features_to_use = ['wind_power_kWh', 'diesel_time_h', 'wind_time_h',\n 'velocity_max_msec', 'velocity_mean_msec', 'tmp_grad',\n 'diesel_fuel_kWh']\n ts = np.array(df['diesel_fuel_kWh'])\n mm_train, mm_test, = prepare_multimodal_data(dataframe=df,\n features=features_to_use,\n forecast_length=forecast_length)\n\n # Prepare parameters for algorithm launch\n # timeout 5 - means that AutoML algorithm will work for 5 minutes\n timeout = 0.5\n composer_params = {'max_depth': 6,\n 'max_arity': 3,\n 'pop_size': 20,\n 'num_of_generations': 20,\n 'preset': BEST_QUALITY_PRESET_NAME,\n 'metric': 'rmse',\n 'cv_folds': None,\n 'validation_blocks': None}\n forecast, obtained_pipeline = multi_automl_fit_forecast(mm_train, mm_test,\n timeout, composer_params,\n ts, forecast_length,\n vis=True)\n\n mse_metric = mean_squared_error(ts[-forecast_length:], forecast, squared=False)\n mae_metric = mean_absolute_error(ts[-forecast_length:], forecast)\n print(f'MAE - {mae_metric:.2f}')\n print(f'RMSE - {mse_metric:.2f}')\n\n # Save obtained pipeline\n obtained_pipeline.save('best')\n\n # Visualise predictions\n plot_results(actual_time_series=ts,\n predicted_values=forecast,\n len_train_data=len(ts) - forecast_length)\n" ]
[ [ "numpy.array", "pandas.read_csv", "sklearn.metrics.mean_squared_error", "sklearn.metrics.mean_absolute_error" ] ]
KiLJ4EdeN/CV_PYTHON
[ "95d17306d2af3ac596429639c1fee99cd4bbe263" ]
[ "CV_PYTHON/IMG_2.py" ]
[ "import cv2\r\nimport numpy as np\r\n\r\ngreen = np.uint8([[[255,0,0]]])\r\nhsv_green = cv2.cvtColor(green,cv2.COLOR_BGR2HSV)\r\nprint(hsv_green)\r\n" ]
[ [ "numpy.uint8" ] ]
Agoric/testnet-notes
[ "4072ead3e4b0339a4ef2068a253975f460b1afe8" ]
[ "nb4/slogfiles.py" ]
[ "# -*- coding: utf-8 -*-\n# # How long does a Computron take?\n#\n# - [build model of computron\\-to\\-wallclock relationship · Issue \\#3459 · Agoric/agoric\\-sdk](https://github.com/Agoric/agoric-sdk/issues/3459)\n\n# ## Preface: Python Data Tools\n#\n# See also [shell.nix](shell.nix).\n\n# +\nimport pandas as pd\nimport numpy as np\nimport sqlalchemy as sqla\nimport matplotlib.cm as cm\nimport dask\nimport dask.dataframe as dd\nimport dask.bag as db\n\ndict(pandas=pd.__version__,\n numpy=np.__version__,\n sqlalchemy=sqla.__version__,\n dask=dask.__version__)\n# -\n\n# ### Notebook / Scripting Authority\n#\n# As a nod to OCap discipline, we avoid ambient authority unless we're in a `TOP`-level scripting or notebook context.\n\nTOP = __name__ == '__main__'\n\n# Logging is a bit of an exception to OCap discipline, as is stderr.\n\n# +\nimport logging\nfrom sys import stderr\n\nlogging.basicConfig(level=logging.INFO, stream=stderr,\n format='%(asctime)s %(levelname)s: %(message)s', datefmt='%Y-%m-%d %H:%M:%S')\n\nlog = logging.getLogger(__name__)\nif TOP:\n log.info('notebook start')\n# -\n\n# ### Dask Parallel Scheduler UI\n\n# +\nfrom dask.distributed import Client, LocalCluster\n\nif TOP:\n cluster = LocalCluster(n_workers=8)\n client = Client(cluster)\n\nTOP and client\n# -\n\n# ## Result Store\n\n# +\ndb4_uri = 'sqlite:///slog4.db'\n\nif TOP:\n db4 = sqla.create_engine(db4_uri)\n# -\n\n# ## SLog files\n#\n# [rclone support for Google drive](https://rclone.org/drive/)\n#\n# > This contains 564GB of data from 117 participants, spread across 172 slogfiles ...\n#\n# ```\n# [nix-shell:~/t4]$ rclone sync --progress 'Engineering:/2021-07-04 testnet phase4-stress data/validator slogfiles' ./slogfiles/\n# Transferred: 78.633G / 78.633 GBytes, 100%, 101.302 MBytes/s, ETA 0s\n# Checks: 5 / 5, 100%\n# Transferred: 182 / 182, 100%\n# Elapsed time: 13m16.0s\n# ```\n#\n\n# +\nimport importlib\nimport slogdata\nimportlib.reload(slogdata)\nfrom slogdata import SlogAccess, CLI, show_times\n\nif TOP:\n def _dir(path):\n import pathlib\n return pathlib.Path(path)\n def _cli(bin):\n from subprocess import run, Popen\n return CLI(bin, run, Popen, debug=True)\n _sa4 = SlogAccess(_dir('/home/customer/t4/slogfiles'),\n _cli('/home/customer/projects/gztool/gztool'))\n\nTOP and show_times(_sa4.get_records('pathrocknetwork/chain-15.pathrocknetwork.slog.gz', 7721, 2))\n# -\n\n_bySize = _sa4.files_by_size()\n_bySize\n\n_bySize[_bySize.parent == 'KingSuper']\n\nTOP and _bySize[::5].set_index('name')[['st_size']].plot.barh(\n title='slogfile sizes (sample)',\n figsize=(10, 8));\n\n# ### random access with `gztool`\n#\n# [gztool](https://github.com/circulosmeos/gztool) `a03c5b4fd5b3` Jul 13 2021.\n#\n#\n# ```\n# ~/projects/gztool/gztool -C -e */*.slog.gz\n# ...\n# ERROR: Compressed data error in 'atlantean/atlantean-agorictest16-chain.slog.gz'.\n# ...\n# Index file 'ZenQQQ/ZenQQQ-agorictest16-chain.slog.gzi' already exists and will be used.\n# Processing 'ZenQQQ/ZenQQQ-agorictest16-chain.slog.gz' ...\n# Processing index to 'ZenQQQ/ZenQQQ-agorictest16-chain.slog.gzi'...\n#\n# 172 files processed\n# 1 files processed with errors!\n# ```\n\n# +\n# count lines on all slogfiles in parallel\n# TODO: if it's already in the DB, don't compute it again.\n\nif TOP:\n _withLines = _bySize.assign(\n lines=db.from_sequence(_bySize.values).map(\n lambda v: _sa4.line_count(*v[1:3])).compute())\n\nTOP and _withLines\n# -\n\n_withLines.to_sql('file_meta', db4, index=False, if_exists='replace')\n\n# !sqlite3 slog4.db '.header on' '.mode column' 'select * from file_meta limit 3'\n\n_withLines = pd.read_sql_table('file_meta', db4)\n\n\n# +\ndef file_chart(slogdf, sample=5, **plotkw):\n df = slogdf[['name', 'st_size', 'lines']].copy()\n df['b64'] = df.st_size / 64\n df.drop('st_size', axis=1, inplace=True)\n df.set_index('name')[::sample].plot.barh(**plotkw)\n\nTOP and file_chart(_withLines, title='slogfile sizes (sample)', figsize=(10, 8))\n# -\n\n# ## slogfile basics\n\npd.read_sql(\"\"\"\nselect st_size, lines\nfrom file_meta\norder by st_size desc\n\"\"\", db4).describe()\n\n\n# ## Runs, Blocks, and Deliveries\n#\n# > split each slogfile into runs (each beginning with an import-kernel event)\n\n# +\ndef partition_lines(lines, step=1000000):\n \"\"\"Note: line numbers are **1-based**\n \"\"\"\n lo = pd.DataFrame.from_records([\n dict(start=lo, qty=min(lines + 1 - lo, step), lines=lines)\n for lo in range(1, lines + 1, step)])\n return lo\n\npartition_lines(_withLines.lines.iloc[-1])\n\n\n# +\n#client.restart()\n\n# +\n# # !sqlite3 slog4.db 'drop table run'\n\n# +\ndef provide_table(engine, table, todo, chunksize=None, index=True):\n if sqla.inspect(engine).has_table(table):\n return pd.read_sql_table(table, engine, chunksize=chunksize)\n df = todo()\n df.to_sql(table, engine, index=index)\n return df\n\ndef runs_todo(withLines):\n runs = dd.from_delayed([\n dask.delayed(_sa4.provide_runs)(f.parent, f['name'], part.start, part.qty)\n for fid, f in withLines.iterrows()\n for _, part in partition_lines(f.lines).iterrows()\n ]).compute().sort_values(['file_id', 'line'])\n withNames = pd.merge(runs, withLines[['file_id', 'parent', 'name', 'st_size', 'lines']],\n on='file_id')\n # Compute end times\n byFile = withNames.groupby('file_id')\n runs = pd.concat([\n withNames,\n byFile.apply(lambda g: pd.DataFrame(dict(time_end=g.time.shift(-1)))),\n byFile.apply(lambda g: pd.DataFrame(dict(line_end=g.line.shift(-1)))),\n ], axis=1)\n runs.line_end = np.where(runs.line_end.isnull(), runs.lines, runs.line_end)\n return runs.sort_values(['st_size', 'file_id', 'line']).reset_index(drop=True)\n\n_runs = provide_table(db4, 'run', lambda: runs_todo(_withLines))\n# -\n\n# !sqlite3 slog4.db '.schema run'\n\nshow_times(_runs, ['time', 'time_end'])[['st_size', 'line', 'line_end', 'parent', 'file_id', 'time', 'time_end']]\n\n# ### runs per slogfile\n\ndf = _runs.groupby('file_id')[['line']].count()\ndf.describe()\n\n# +\ndf = pd.read_sql(\"\"\"\nselect file_id, count(*) runs, name, st_size, lines\nfrom run r\n-- join file_id s on s.\"index\" = r.slogfile\ngroup by file_id\norder by 2\n\"\"\", db4)\n\ndf.set_index('name')[['runs']][::5].plot.barh(\n log=True,\n title='slogfile runs (sample)',\n figsize=(10, 8));\n# -\n\n# ## agorictest-16 genesis: `2021-07-01 19:00:00`\n\ngen16 = show_times(pd.DataFrame(dict(blockHeight=64628, blockTime=[1625166000], ts=1625166000)), ['blockTime'])\ngen16\n\n# ## Block end start / finish events\n\n# +\nimport importlib\nimport slogdata\nfrom slogdata import SlogAccess\nimportlib.reload(slogdata)\n\n_sa4 = SlogAccess(_dir('/home/customer/t4/slogfiles'),\n _cli('/home/customer/projects/gztool/gztool'))\n\nshow_times(\n _sa4.provide_blocks('ChainodeTech', 'agorictest-16_chain.slog.gz', 1, 1000000)\n)\n\n\n# -\n\n# ## Separate runs by chain\n\n# +\ndef first_block(sa, run,\n head=5000,\n ts=gen16.ts[0]):\n log.info('1st block: %s/%s', run.parent, run['name'])\n qty = min(int(run.line_end) - run.line + 1, head)\n df = sa.get_blocks(f'{run.parent}/{run[\"name\"]}', run.line, qty)[:2]\n if not len(df):\n return pd.DataFrame.from_records([dict(\n blockHeight=-1,\n blockTime=-1,\n run=run.name,\n chain=np.nan)], index=[run.name])\n df = df.assign(run=run.name,\n chain=16 if df.blockTime[0] >= ts else 15)\n return df\n\nshow_times(first_block(_sa4, _runs.loc[0]))\n\n\n# +\ndef run2chain(sa, runs):\n df = runs.apply(lambda run: first_block(sa, run).iloc[0][['blockHeight', 'blockTime', 'chain']],\n axis=1)\n return df\n\n_r2c = run2chain(_sa4, _runs)\n_r2c\n# -\n\n_runchain = pd.concat([_runs.drop(columns=['index']), _r2c], axis=1)\n_runchain.to_sql('runchain', db4)\n_runchain.groupby('chain')[['line']].count()\n\n# !sqlite3 slog4.db '.header on' '.mode column' 'select * from runchain limit 3'\n\n_runchain = pd.read_sql('runchain', db4)\n_runchain.groupby('chain')[['line']].count()\n\n_runs['chain'] = _runchain.chain\n_runs.groupby('chain')[['file_id', 'lines']].count()\n\n\n# +\n# # !sqlite3 slog4.db 'drop table blockval;'\n\n# +\ndef blockval_todo(file_meta):\n return dd.from_delayed([\n dask.delayed(_sa4.provide_blocks)(f.parent, f['name'], part.start, part.qty)\n for fid, f in file_meta.iterrows()\n for _, part in partition_lines(f.lines).iterrows()\n ]).compute()\n\n_blockval = provide_table(db4, 'blockval', lambda: blockval_todo(_withLines), index=True)\nshow_times(_blockval)\n# -\n\n# !sqlite3 slog4.db '.schema blockval'\n\npd.read_sql(\"\"\"\nselect file_id, max(blockHeight)\nfrom blockval\nwhere blockTime >= 1625166000\ngroup by file_id\norder by 2 desc\n\"\"\", db4)\n\n# ### Consensus Block-to-Block Time\n\n# +\n# db4.execute(\"\"\"drop table if exists block\"\"\")\n# -\n\ndb4.execute(\"\"\"\ncreate table block as\n select distinct\n case when blockTime >= 1625166000 then 16 else 15 end chain\n , blockHeight, blockTime\n from blockval\n order by blockTime\n\"\"\")\npd.read_sql(\"\"\"\nselect * from block limit 10\n\"\"\", db4)\n\n# ### What is the range of blocks in `agorictest-16`?\n\npd.read_sql(\"\"\"\nselect lo, n, lo + n - 1, hi from (\nselect min(blockHeight) lo, max(blockHeight) hi, count(distinct blockHeight) n\nfrom block\nwhere chain = 16\n)\n\"\"\", db4)\n\n# +\nblk16 = pd.read_sql(\"\"\"\nselect blockHeight, blockTime\nfrom block\nwhere chain = 16\n\"\"\", db4, index_col='blockHeight')\n\nshow_times(blk16).describe(datetime_is_numeric=True)\n# -\n\nb16time = pd.read_sql(\"\"\"\nselect * from block\nwhere chain = 16\n\"\"\", db4, index_col='blockHeight')\nb16time['delta'] = b16time.shift(-1).blockTime - b16time.blockTime\nb16time[['delta']].describe()\n\nb16time[b16time.index < 90527].delta.max()\n\nb16time[b16time.delta == 120]\n\nb16time[['delta']].plot(\n title='agorictest-16 consensus blockTime delta',\n ylabel='sec',\n figsize=(9, 6));\n\nshow_times(b16time, ['blockTime']).set_index('blockTime')[['delta']].plot(\n title='agorictest-16 consensus blockTime delta',\n ylabel='sec',\n figsize=(9, 6));\n\n# histogram of block-to-block time delta for agorictest-16. (_Note the log scale on the y axis._)\n\nb16time[['delta']].hist(bins=20, log=True);\n\ndf = show_times(b16time, ['blockTime'])\ndf[df.blockTime <= '2021-07-02 19:00:00'][['delta']].hist(bins=20, log=True);\n\ndf[df.blockTime <= '2021-07-02 19:00:00'][['delta']].describe()\n\n# ### How many validators logged each block in agorictest-16?\n\ndf = pd.read_sql(\"\"\"\nselect blockHeight, count(distinct file_id) qty\nfrom blockval\nwhere sign = -1\nand blockTime >= 1625166000\ngroup by blockHeight\n\"\"\", db4)\ndf.head()\n\ndf.set_index('blockHeight').plot(title='agorictest-16 validator coverage by block', figsize=(9, 6));\n\n# !sqlite3 slog4.db '.schema run'\n\n# +\n# db4.execute('drop table if exists blockrun16')\ndb4.execute(\"\"\"\ncreate table blockrun16 as\nwith b as (\n select *\n from blockval\n where blockTime >= 1625166000\n)\nselect file_id\n , (select r.\"index\"\n from run r\n where r.file_id = b.file_id and r.line <= b.line and b.line < r.line_end) run\n , b.line, b.time\n , b.sign\n , blockHeight, blockTime\nfrom b\n\"\"\")\n\ndf = pd.read_sql(\"\"\"\nselect * from blockrun16\n\"\"\", db4)\n\ndf.tail()\n# -\n\nx = df.groupby('blockHeight')[['run']].count()\nx.plot();\n\nx['blockHeight'].sort_values('max').reset_index(drop=True).plot();\n\n# ## Slow Blocks\n\ndf = show_times(b16time, ['blockTime'])\ndf[(df.blockTime <= '2021-07-02 19:00:00') &\n (df.delta >= 30)]\n\n# Which runs include block 72712, which took 31 sec?\n\nb33 = pd.read_sql(\"\"\"\nselect lo.file_id, lo.run, lo.line, hi.line - lo.line + 1 range, lo.blockHeight\nfrom blockrun16 lo\njoin blockrun16 hi on hi.run = lo.run and hi.blockHeight = lo.blockHeight\nwhere lo.blockHeight in (72712)\nand lo.sign = -1\nand hi.sign = 1\n\"\"\", db4)\nb33\n\n# ## Correlating block start with block end\n\n_blockrun16 = df = pd.read_sql_table('blockrun16', db4)\ndf.tail()\n\nlo = df[df.sign == -1]\nhi = df.shift(-1)\nhi = hi[hi.sign == 1]\ndur = hi.time - lo.time\n# show_times(df, ['time', 'time_end'])\nlo['dur'] = dur\nlo['s_hi'] = hi.file_id\nlo['l_hi'] = hi.line\nlo['t_hi'] = hi.time\ndur = lo[lo.file_id == lo.s_hi]\nshow_times(dur, ['time', 'blockTime'])\n\nshow_times(\n dur.sort_values('dur').dropna().tail(),\n ['time', 'blockTime', 't_hi']\n)\n\ndur[dur.dur.abs() <= 120].plot.scatter(x='blockHeight', y='dur')\n\ndur[['blockHeight', 'dur']].describe()\n\n\n# ## Cranks in a Block\n\n# +\ndef long_runs_including(runs, blockrun, blockHeight):\n runs_matching = blockrun[blockrun.blockHeight == blockHeight].run\n runs = runs.assign(length=runs.line_end - runs.line)\n runs = runs[runs.index.isin(runs_matching)]\n return runs.sort_values('length', ascending=False)\n\n_long16 = long_runs_including(_runs, _blockrun16, 64628)\n_long16.head()\n# -\n\nshow_times(dur[dur.run == _long16.index[0]], ['time', 'blockTime', 't_hi'])\n\n_blockrun16[(_blockrun16.run == _long16.index[0]) & (_blockrun16.blockHeight == 64628)].iloc[:2]\n\n\n# +\ndef blockrun_records(blockHeight, run, slogAccess, blockrun,\n target=None, include=None):\n ref = f'{run.parent}/{run[\"name\"]}'\n br = blockrun[(blockrun.run == run.name) & (blockrun.blockHeight == blockHeight)]\n block_start = br.iloc[0] # assert sign == -1?\n block_end = br.iloc[1]\n length = block_end.line - block_start.line + 1\n df = slogAccess.get_records(f'{run.parent}/{run[\"name\"]}', int(block_start.line), int(length),\n target=target, include=include)\n return df.assign(file_id=run.file_id)\n\ndef get_vats(slogAccess, ref, start, qty):\n df = slogAccess.get_records(ref, start, qty,\n target='create-vat',\n include=['create-vat'])\n return df\n\ndef vats_in_blockrun(blockHeight, run, slogAccess, blockrun):\n br = blockrun[(blockrun.run == run.name) & (blockrun.blockHeight == blockHeight)]\n block_start = br.iloc[0] # assert sign == -1?\n block_end = br.iloc[1]\n length = block_end.line - block_start.line + 1\n ref = f'{run.parent}/{run[\"name\"]}'\n df = get_vats(slogAccess, ref, int(block_start.line), int(length))\n return df.assign(blockHeight=blockHeight, parent=run.parent)\n\n# _sa4.get_records('Nodeasy.com/Nodeasy.com-agorictest15-chain.slog.gz', 1662497, 1671912 - 1662497)\nvats_in_blockrun(_blockrun16.iloc[0].blockHeight, _runs.loc[_long16.index[0]],\n _sa4, _blockrun16)\n# -\n\nvats_in_blockrun(64629, _runs.loc[_long16.index[0]],\n _sa4, _blockrun16)\n\n\nno_deliveries = pd.DataFrame.from_records([\n {'time': 1625198620.6265895,\n 'type': 'deliver-result',\n 'crankNum': 1291,\n 'vatID': 'v11',\n 'deliveryNum': 124,\n 'kd': object(),\n 'line': 1673077,\n 'dr': object(),\n 'syscalls': 2,\n 'method': 'inbound',\n 'compute': 119496.0, # missing compute is possible... from replay.\n 'dur': 0.1912224292755127,\n }]).iloc[:0]\nno_deliveries.dtypes\n\n# +\nimport json\nimport itertools\n\n# {\"time\":1625059432.2093444,\"type\":\"cosmic-swingset-end-block-start\",\"blockHeight\":58394,\"blockTime\":1625059394}\n# {\"time\":1625059432.2096362,\"type\":\"cosmic-swingset-end-block-finish\",\"blockHeight\":58394,\"blockTime\":1625059394}\n\n\ndef block_cranks(records):\n deliveries = []\n syscalls = 0\n deliver = None\n for record in records:\n ty = record['type']\n if ty == 'deliver':\n deliver = record\n syscalls = 0\n elif ty == 'syscall-result':\n syscalls += 1\n elif ty == 'deliver-result':\n if not deliver:\n log.warn('no deliver? %s', record)\n continue\n dur = record['time'] - deliver['time']\n method = deliver['kd'][2]['method'] if deliver['kd'][0] == 'message' else None\n compute = record['dr'][2]['compute'] if type(record['dr'][2]) is type({}) else np.nan\n detail = dict(record,\n syscalls=syscalls,\n kd=deliver['kd'],\n method=method,\n compute=compute,\n dur=dur)\n deliveries.append(detail)\n if deliveries:\n return pd.DataFrame.from_records(deliveries)\n else:\n return no_deliveries\n\n\ndef get_deliveries(slogAccess, ref, start, qty):\n if qty <= 2: # just block start, block end\n return no_deliveries\n df = slogAccess.get_records(\n ref, int(start), int(qty),\n target=None, include=['deliver', 'deliver-result', 'syscall-result'])\n if len(df) > 0 and 'syscallNum' in df.columns:\n for c in ['syscallNum', 'ksr', 'vsr', 'vd']:\n df = df.drop(columns=list(set(df.columns) & set(['syscallNum', 'ksr', 'vsr', 'vd'])))\n return block_cranks(df.to_dict('records'))\n else:\n return no_deliveries\n\n_g16 = _blockrun16[(_blockrun16.run == _long16.index[0]) & (_blockrun16.blockHeight == 64628)].iloc[:2]\n_run1 = _runs.loc[_long16.index[0]]\n\nget_deliveries(_sa4, f'{_run1.parent}/{_run1[\"name\"]}', _g16.iloc[0].line, _g16.iloc[1].line - _g16.iloc[0].line + 1)\n# -\n\ndf = dur[dur.run == _long16.index[0]].assign(length=dur.l_hi - dur.line + 1)\n# df[df.length > 2].head(10)\ndf[df.dur > 5].head(10)\n\n\n# +\n# https://avi.im/blag/2021/fast-sqlite-inserts/\ndef run_sql(script, engine):\n for stmt in script.strip().split(';\\n'):\n engine.execute(stmt)\n\nrun_sql('''\nPRAGMA journal_mode = OFF;\nPRAGMA synchronous = 0;\nPRAGMA cache_size = 1000000;\nPRAGMA locking_mode = NORMAL;\nPRAGMA temp_store = MEMORY;\n''', db4)\n# -\n\nlen(dur)\n\ndur.to_sql('blockrun16dur', db4, if_exists='replace', chunksize=25000, index=False)\n\n# +\n_br2 = _blockrun16[(_blockrun16.run == _long16.index[0]) & (_blockrun16.blockHeight == 64632)].iloc[:2]\n\nget_deliveries(_sa4, f'{_run1.parent}/{_run1[\"name\"]}',\n _br2.iloc[0].line, _br2.iloc[1].line - _br2.iloc[0].line + 1)\n\n# +\n# chain_id, vatID, deliveryNum -> blockHeight, kd, compute\nimport inspect\n\ndef provide_deliveries(slogAccess, blockHeight, run, blockrun):\n br = blockrun[(blockrun.run == run.name) & (blockrun.blockHeight == blockHeight)]\n if len(br) < 2:\n return no_deliveries.assign(file_id=-1, chain=-1, blockHeight=blockHeight, run=run.name)\n block_start = br.iloc[0] # assert sign == -1?\n block_end = br.iloc[1]\n length = int(block_end.line - block_start.line + 1)\n df = slogAccess.provide_data(run.parent, run['name'], int(block_start.line), length,\n f'deliveries-{blockHeight}', no_deliveries,\n lambda ref, start, qty: get_deliveries(slogAccess, ref, start, qty),\n 'gzip')\n df = df.assign(chain=run.chain, blockHeight=blockHeight, run=run.name)\n if df.dtypes['chain'] not in ['int64', 'float64'] or 'vatID' not in df.columns or 'vd' in df.columns:\n raise NotImplementedError(f'cols: {df.columns} dtypes: {df.dtypes} block {blockHeight, int(block_start.line)}, run\\n{run}')\n return df\n\ndf = provide_deliveries(_sa4, 66371, _run1, _blockrun16)\n\nshow_times(df)\n# -\n\n# Computron rate for just this one block?\n\ndf.compute.sum() / df.dur.sum()\n\n# test empty\nprovide_deliveries(_sa4, 64629, _run1, _blockrun16)\n\n_runs.loc[455:456]\n\n# ## Cranks in one long run starting at agorictest-16 genesis\n\ngen16\n\ndf = pd.read_sql(\"\"\"\nwith lo as (\n select *\n , time - blockTime delta\n from blockrun16\n where blockHeight = 64628\n and blockTime = 1625166000\n and sign = -1\n and run is not null\n), hi as (\n select run, max(blockHeight) hi, max(blockTime) t_hi\n from blockrun16\n where run is not null\n and sign = -1\n group by run\n), agg as (\n select lo.*, hi.hi, hi.t_hi\n from lo join hi on lo.run = hi.run\n where abs(delta) < 7\n order by hi.t_hi desc\n)\nselect agg.*, run.parent, run.name\nfrom agg\njoin run on agg.run = run.\"index\"\nlimit 5\n\"\"\", db4)\nshow_times(df, ['time', 'blockTime', 't_hi'])\n\nshow_times(_runs).loc[445]\n\n# +\nimport json\n\n\ndef run1_deliveries(con, sa, lo, hi, run, br,\n json_cols=['kd', 'dr'],\n table='run1'):\n if sqla.inspect(con).has_table(table):\n lo = pd.read_sql(f'select max(blockHeight) + 1 lo from {table}', con).iloc[0].lo\n if_exists = 'append'\n else:\n if_exists = 'replace'\n for blockHeight in range(lo, hi):\n df = provide_deliveries(sa, blockHeight, run, br)\n if not len(df):\n # log.info('block %d: no deliveries', blockHeight)\n continue\n for col in json_cols:\n df[col] = df[col].apply(json.dumps)\n log.info('block %d of %d: %s += %d rows', blockHeight, hi, table, len(df))\n df.to_sql(table, con, if_exists=if_exists, index=False)\n if_exists = 'append'\n\n\nrun1_deliveries(db4, _sa4, 64628, 75000, _runs.loc[445], _blockrun16)\n# run1_deliveries(db4, _sa4, 75000, 90530, _runs.loc[445], _blockrun16, table='run1b')\n# -\n\n_run1 = df = pd.read_sql('select * from run1 union all select * from run1b', db4)\nshow_times(_run1.tail(3))\n\n_run1.blockHeight.describe()\n\n_run1[_run1.blockHeight >= 88296 - 2].sort_values('blockHeight').head(30).drop(columns=['kd', 'dr', 'file_id'])\n\ndf = _run1[_run1.blockHeight == 88295].sort_values('dur', ascending=False).drop(columns=['kd', 'dr', 'file_id'])\ndf.head(10)\n\ndf[df.dur >= 1]\n\n# TODO: compare `getPayout` here (in 88295) vs something earlier... same computrons? same duration?\n#\n# e.g. if harden weakset grew, the duration could grow while keeping computrons constant\n\n_run1[_run1.method == 'getPayout'][['compute', 'dur']].describe()\n\n_run1[_run1.method == 'getPayout'].compute.hist()\n\n_run1[(_run1.method == 'getPayout') & (_run1.compute == 31654)].plot.scatter(x='blockHeight', y='dur')\n\nlg = _run1[_run1.blockHeight > 76000]\nlg = lg[lg.dur < 1]\nlg[(lg.method == 'getPayout') & (lg.compute == 31654)].plot.scatter(x='blockHeight', y='dur')\n\n# Things got slower over time.\n#\n# Hypothesis: GC didn't happen -> weak set got big -> weakset access time got big\n\n# So computron model should not be based on this range, but rather on pre-loadgen time.\n\n# When looking at comptron / wallclock, we should look at:\n#\n# - all getCurrentAmount calls\n# - within a narrow range of blockHeight\n# - that all use the same # of computrons\n#\n# (as above)\n#\n\nb16time[b16time.delta == 224]\n\n_run1[['compute', 'dur']].describe()\n\n\n# +\ndef drate(df):\n rate = df.compute / (df.syscalls + 1) / df.dur\n # rate = df.compute / df.dur\n return df.assign(rate=rate)\n\ndf = drate(_run1).groupby('method')[['rate']].aggregate(['count', 'mean', 'std', 'max'])\ndf = df.sort_values(('rate', 'mean'), ascending=False)\ndf\n# -\n\ncommon = _run1.groupby('method')[['line']].count()\ncommon = common[common.line > 20]\ncommon\n\ndrate(_run1[_run1.method.isin(common.index)])[['method', 'rate']].boxplot(by='method', rot=90, figsize=(20, 12))\n\ncommon.sort_values('line', ascending=False).head()\n\n_run1.blockHeight.describe()\n\n_run1.sort_values('dur', ascending=False)\n\n\n# This is an always-busy sim, but **TODO** we'd like to look at the arrival pattern that we have.\n\n# +\ndef sim(df, c_eg, dur_eg, target):\n df = df[df.chain == 16]\n df['running'] = df.compute.cumsum() # try exp\n threshold = target * (c_eg / dur_eg)\n log.info('threshold: %s', threshold)\n df['sim_blk'] = (df.running / threshold).round()\n # df['adj'] = df.sim_blk - df.blockHeight\n return df.reset_index(drop=True)\n\ndf = _run1.drop(columns=['type', 'kd', 'dr', 'file_id', 'line', 'run'])\n# df = df[df.method != 'executeContract']\n# df = df[df.method == 'getCurrentAmount'] # getPayout\n\n# df.blockHeight = df.blockHeight - df.blockHeight.iloc[0]\ndf = sim(df, 48390.0, 0.074363, 5)\ndf = df[df.sim_blk.notnull()]\ndf.sim_blk = df.sim_blk.astype('int64')\nshow_times(df)\n# -\n\npd.read_sql('''\nselect count(distinct run)\nfrom blockrun16\n''', db4)\n\nlen(_runs)\n\n\n# +\ndef nth_block(sa, blockHeight, run, blockrun,\n ts=gen16.ts[0]):\n log.info('%d th block: %s/%s', blockHeight, run.parent, run['name'])\n br = blockrun[(blockrun.blockHeight == blockHeight) & (blockrun.run == run.name)]\n df = provide_deliveries(sa, blockHeight, run, br)\n if not len(df):\n return df\n df = df.assign(run=run.name, chain=run.chain)\n return df\n\n\nm1b1 = pd.concat(\n df\n for _, run in _runs.iterrows()\n for df in [nth_block(_sa4, 80001, run, _blockrun16)]\n if len(df)\n)\nm1b1\n# -\n\nm1b1[(m1b1.method == 'getCurrentAmount') & (m1b1.deliveryNum == 44721)][['compute', 'dur', 'run']]\n\ndf = m1b1[(m1b1.method == 'getCurrentAmount') & (m1b1.deliveryNum == 44721)][['compute', 'dur', 'run']]\ndf.describe()\n\n# ## Validator speed: 2-4x spread for `getCurrentAmount`\n\ndf[['dur']].hist()\n\n# +\n# df.groupby('method')[['compute']].describe().loc['executeContract']\n# -\n\ndf.compute.hist(log=True);\n\ndf.dur.hist(log=True);\n\ndf[df.dur < .1].dur.hist()\n\n# #### Total delivery duration per block\n\nx = pd.concat([\n df.groupby('blockHeight')[['dur']].sum(),\n df.groupby('sim_blk')[['dur']].sum().rename(columns=dict(dur='dur_sim')),\n], axis=1)\nx.hist(); # log=True);\n\nx.describe()\n\nx.dur.quantile(.9)\n\nxx = df.groupby('sim_blk')[['dur']].sum().rename(columns=dict(dur='dur_sim'))\n\nxx[xx.dur_sim > 25]\n\ndf[df.blockHeight == 88295].sort_values('dur', ascending=False)\n\ndf[df.sim_blk == 32607].sort_values('dur', ascending=False)\n\n_run1[_run1.compute == 381240].dur.describe()\n\n_run1[_run1.compute == 381240].plot.scatter(x='blockHeight', y='dur')\n\n# This wasn't a big deal during most of the chain (.25sec 75th percentile).\n#\n# We could model this within 2x or 3x by ignoring the spike.\n\n# **TODO**: what happened during that spike? is it consensus-observable? kernel-observable?\n\ndf = _run1[_run1.compute == 381240]\ndf[(df.blockHeight >= 88100) & (df.blockHeight < 88400)].plot.scatter(x='blockHeight', y='dur')\n\ndf[df.sim_blk == 32607].compute.sum()\n\ndf[df.sim_blk == 32607].dur.sum()\n\ndf[df.sim_blk == 32607].syscalls.sum()\n\ndf.groupby('blockHeight')[['syscalls']].sum().describe()\n\n# #### Total compute per block\n\nx = pd.concat([\n df.groupby('blockHeight')[['compute']].sum(),\n df.groupby('sim_blk')[['compute']].sum().rename(columns=dict(compute='cmp_sim')),\n], axis=1)\nx.hist(log=True);\n\nx.describe()\n\ncluster.scale(8)\n\nclient.restart()\n\nf'{12:04}'\n\n\n# +\ndef pick_chain(ht,\n gen=1625166000, hi=16, lo=15):\n return np.where(ht > gen, hi, lo)\n\n\ndef run_deliveries(slogs, sa, run):\n chain_id = f'agorictest-{run.chain}'\n blocks = pd.concat(\n pd.read_csv(blockFile)\n for blockFile in (slogs / run.parent).glob('*-blocks.csv')\n )\n blocks = blocks[(blocks.line >= run.line) &\n (blocks.line < run.line_end)]\n blocks = blocks.assign(run=run.name)\n heights = blocks.blockHeight.unique()\n log.info('run %s %-3d blocks %.16s %s', run.name, len(heights),\n pd.to_datetime(run.time, unit='s'), run['name'])\n tot = 0\n for blockHeight in heights:\n detail = provide_deliveries(sa, blockHeight, run, blocks)\n if not len(detail):\n continue\n tot += len(detail)\n yield detail\n if not tot:\n yield no_deliveries.assign(file_id=-1, chain=-1, blockHeight=-1, run=run.name)\n\n\ndef by_vat(dest, run, detail):\n chain_id = f'agorictest-{run.chain}'\n run_detail = f'{run.name:04}-{run.parent}-{run.file_id}-{run.line}'\n for vatID, g in detail.groupby('vatID'):\n try:\n (dest / chain_id / vatID).mkdir(parents=True)\n except:\n pass\n vat_dir = dest / chain_id / vatID\n f = vat_dir / f'delivery-detail-{run_detail}.csv.gz'\n log.info('saving to %s:\\n%s', f, g.set_index(['vatID', 'deliveryNum'])[['compute', 'dur']].tail(3))\n g.to_csv(f, index=False)\n f = vat_dir / f'delivery-summary-{run_detail}.csv.gz'\n g[['vatID', 'deliveryNum', 'kd', 'syscalls', 'compute']].to_csv(f, index=False)\n return detail.assign(run=run.name).groupby(['run', 'vatID'])[['deliveryNum']].count()\n\n#by_vat(_dir('slogfiles/'), _dir('vat-details/'), _sa4, _runs)\n\nfor df in run_deliveries(_dir('slogfiles/'), _sa4, _runs.loc[58]):\n print(df)\n print(by_vat(_dir('vat-details/'), _runs.loc[58], df))\n break\n\n\n# +\ndef run_deliveries_todo(sa, slogs, dest, runs):\n def do_run(run):\n df = pd.concat(\n detail\n for detail in run_deliveries(slogs, sa, run)\n )\n return by_vat(dest, run, df)\n todo = (\n dask.delayed(do_run)(run)\n for _, run in runs.iterrows()\n )\n return todo\n\nper_run = dd.from_delayed(run_deliveries_todo(_sa4, _dir('slogfiles/'), _dir('vat-details/'), _runs))\nper_run.compute()\n# -\n\npd.to_datetime(1625213913.1672082, unit='s')\n\n# +\nimport inspect\nfrom slogdata import show_times\n\ndb4.execute('drop table if exists crankrun') #@@\n\ndef deliveries_todo(sa, blockrun, runs):\n todo = (\n dask.delayed(provide_deliveries)(sa, blockHeight, run,\n blockrun[(blockrun.run == run.name) &\n (blockrun.blockHeight == blockHeight)])\n for run_ix, run in runs.iterrows()\n for heights in [blockrun[blockrun.run == run_ix].blockHeight.unique()]\n for _ in [log.info('run %s %-3d blocks %.16s %s', run_ix, len(heights),\n pd.to_datetime(run.time, unit='s'), run['name'])]\n for blockHeight in heights\n )\n log.info('todo: %s', type(todo))\n df = dd.from_delayed(todo,\n meta=no_deliveries.assign(file_id=1, chain=1, blockHeight=1, run=1))\n return df.compute()\n\n# _dr16 = provide_table(\n# db4, 'crankrun',\n# # 65517\n# lambda: deliveries_todo(_sa4, _blockrun16[_blockrun16.blockHeight <= 65000], _runs.loc[200:275]))\n\n_dr16 = deliveries_todo(_sa4, _blockrun16, # [_blockrun16.blockHeight <= 65000]\n _runs[_runs.chain == 16])\n\n_dr16\n# -\n\n# ## deliveries from batch\n\n_delrun = pd.read_sql('select * from delrun', db4)\n_delrun.groupby('chain')[['line']].count()\n\n\n# ## Are compute meter values consistent?\n\n# +\ndef compute_meter_consistent(df):\n compute_count = df.groupby(['vatID', 'deliveryNum'])[['compute']].nunique()\n dups = compute_count[compute_count['compute'] > 1]\n return pd.merge(dups.reset_index(),\n df[['run', 'vatID', 'deliveryNum', 'compute']],\n how='left', suffixes=['_dup', ''],\n left_on=['vatID', 'deliveryNum'],\n right_on=['vatID', 'deliveryNum'])\n\n# x = compute_meter_consistent(_alld16).compute()\nx = compute_meter_consistent(_delrun[_delrun.chain == 16]).sort_values(['vatID', 'deliveryNum']) # .compute()\nx\n# -\n\ncompute_meter_consistent(_delrun[_delrun.chain == 15]).sort_values(['vatID', 'deliveryNum']) # .compute()\n\n# ## Computrons per block\n\nblockdel = _delrun[_delrun.method != 'executeContract']\nkey = ['chain', 'blockHeight', 'vatID', 'deliveryNum', 'compute']\nblockdel = blockdel.sort_values(key).drop_duplicates()\ndf = blockdel.groupby(['chain', 'blockHeight'])[['deliveryNum']].count().sort_index()\ndf.plot()\n\n_bkcomp = df = blockdel.groupby(['chain', 'blockHeight'])[['compute']].sum()\ndf\n\ndf.plot()\n\n\n# +\ndef type2sign(df):\n df['sign'] = np.where(df.type == 'cosmic-swingset-end-block-start', -1, 1)\n return df\n\ndef byChain(df, gen=gen16.ts[0], hi=16, lo=15):\n return df.assign(chain=np.where(df.blockTime >= gen, hi, lo))\n return df\n\ndef slog_blocks(slogfiles,\n pattern='**/*-blocks.csv'):\n df = pd.concat(type2sign(pd.read_csv(p)[['type', 'blockHeight', 'blockTime']])\n for p in slogfiles.glob(pattern))\n df = byChain(df)\n key = ['chain', 'blockHeight', 'blockTime']\n df = df[key].sort_values(key).drop_duplicates()\n return df.reset_index(drop=True)\n\n_blk = slog_blocks(_dir('slogfiles/')) \n_blk.tail()\n# -\n\n_byChain = _blk.groupby('chain')\ndf = pd.merge(\n _byChain[['blockHeight']].nunique(),\n _byChain[['blockHeight']].aggregate(['min', 'max'])['blockHeight'],\n left_index=True, right_index=True,\n)\ndf['span'] = df['max'] - df['min'] + 1\ndf\n\n\n# +\ndef blockdur(df):\n df = df.set_index(['chain', 'blockHeight'])\n df['dur'] = df.shift(-1).blockTime - df.blockTime\n return df\n\n_bkdur = blockdur(_blk)\n_bkdur\n# -\n\n# compute by block with duration\n_bkcmpdur = _bkcomp.join(_bkdur, lsuffix='_d', rsuffix='_b')\n_bkcmpdur['rate'] = (_bkcmpdur.compute / _bkcmpdur.dur).astype(float)\n_bkcmpdur\n\n_bkcmpdur[_bkcmpdur.dur > _bkcmpdur.dur.quantile(0.99)]\n\ndf = _bkcmpdur.loc[16]\ndf[df.dur < 8][['rate']].hist(log=True)\n\n_bkcmpdur[_bkcmpdur.dur < 8][['rate']].describe()\n\n# ## simulation\n\n_delrun.groupby('run')[['line']].count()\n\n_delrun[['crankNum', 'run']].groupby(['crankNum'])[['run']].aggregate(['count']).plot()\n\n\n# +\ndef sim(df, percentile):\n df = df[df.chain == 16]\n df = df[df.method != 'executeContract']\n key = ['blockHeight', 'crankNum', 'vatID', 'deliveryNum', 'compute']\n df = df.groupby(key)[['dur']].aggregate(['count', 'mean', 'median', 'sum'])\n return df\n df = df[['blockHeight', 'crankNum', 'vatID', 'deliveryNum', 'compute']].sort_values(\n ['blockHeight', 'crankNum', 'vatID', 'deliveryNum']).drop_duplicates()\n threshold = df.compute.quantile(percentile)\n df['running'] = df.compute.cumsum()\n df['sim_block'] = (df.running / threshold).round()\n return df.reset_index(drop=True)\n\ndf = sim(_run1, .99)\ndf\n# -\n\ndf[['blockHeight']].plot()\n\ndf.set_index('blockHeight')[['sim_block']].plot()\n\n# ## Compute rate by vat\n\nplt.cm.rainbow[1]\n\npd.Categorical(_delrun.method.dropna(), ordered=True)\n\n# +\nimport matplotlib as plt\n\ndef cmap_of(df, color,\n cmap=plt.cm.get_cmap('hot')):\n df = df.loc[:, [color]].fillna('???')\n byColor = df.groupby(color).count() #.set_index(color)\n byColor['unit'] = range(len(byColor))\n byColor.unit = byColor.unit / len(byColor)\n byColor['color'] = byColor.unit.apply(cmap)\n return byColor.loc[df[color]].color\n\ncmap_of(_delrun, 'method')\n\n\n# +\ndef vat_rate(df, vatID):\n df = df[['vatID', 'deliveryNum', 'compute', 'dur']].dropna()\n df['rate'] = df.compute / df.dur\n df = df[df.vatID == vatID]\n # df = df.groupby('deliveryNum')[['compute', 'dur', 'rate']].mean()\n #df.sort_values('dur', ascending=False)\n #df\n df = df.set_index('deliveryNum').sort_index()\n return df\n\ndef show_rate(df, vatID, figsize=(8, 9)):\n df = vat_rate(df, vatID)\n ax = df.plot(subplots=True, figsize=figsize)\n \ndef fit_line(df, x, y, color=None, figsize=(9, 6)):\n df = df[~df[x].isnull() & ~df[y].isnull()]\n cs = np.polyfit(df[x], df[y], 1)\n f = np.poly1d(cs)\n if color:\n color = cmap_of(df, color)\n ax1 = df[[x, y]].plot.scatter(x=x, y=y, color=color, figsize=figsize)\n df['fit'] = f(df[x])\n df.plot(x=x, y='fit', color='Red', legend=False, ax=ax1);\n\n\n# show_rate(start1, 'v10');\n# vat_rate(start1, 'v10').plot.scatter(x='compute', y='dur')\n# fastSlog = start1[start1.slogfile == 'PDPnodeTestnet-agorictest16-chain.slog.gz']\n# fit_line(vat_rate(fastSlog, 'v10'), 'compute', 'dur')\n# len(fastSlog[fastSlog.vatID == 'v10'])\n# fastSlog[fastSlog.vatID == 'v10'].drop(['kd', 'dr'], axis=1) #.sort_values('compute', ascending=False)\n#fastSlog[fastSlog.vatID == 'v10'].set_index('deliveryNum').sort_index()[['compute', 'dur']].plot(subplots=True)\n\nfit_line(_delrun[_delrun.chain == 16], 'compute', 'dur', color='method')\n# -\n\n_r = _delrun[['compute', 'dur', 'method']].assign(rate=_delrun.compute / _delrun.dur)\n_r.groupby('method')[['rate']].describe().sort_values(('rate', 'mean'))\n\ndf.sort_values(('compute', 'mean'))\n\ndf = fastSlog[fastSlog.vatID == 'v10']\ndf['rate'] = df.compute / df.dur\ndf[['deliveryNum', 'dur', 'compute', 'rate']].set_index('deliveryNum').plot(subplots=True)\n\ndf.rate.describe()\n\n# ### exclude dynamic vat creation\n\nfastSlog.groupby('method')[['compute']].mean().plot.barh(log=True, figsize=(12, 10))\n\nnoContract = df =fastSlog[fastSlog.method != 'executeContract'].copy()\ndf['rate'] = df.compute / df.dur\ndf[['dur', 'compute', 'rate']].plot(subplots=True)\n\nfit_line(noContract, 'compute', 'dur')\n\nfit_line(fastSlog, 'compute', 'dur')\n\n# ## Add syscalls to the model\n\ndf = noContract\ncs = np.polyfit(df[['compute', 'syscalls']], df['dur'], 1)\n\ndf = _dr16.assign(chain_id=16)\ndf = df[['chain_id', 'vatID', 'deliveryNum', 'blockHeight', 'kd', 'compute']].drop_duplicates()\ndf = df.set_index(['chain_id', 'vatID', 'deliveryNum']).sort_index()\ndf[df.index.duplicated()]\ndf\n\ndf.loc[16].loc['v1'].loc[0]\n\n_dr16.query('(deliveryNum == 0) & (vatID == \"v1\")').groupby('compute')[['line']].count()\n\npd.merge(_dr16,\n df[df.index.duplicated()].reset_index()[['vatID', 'deliveryNum']],\n left_on=['vatID', 'deliveryNum'], right_on=['vatID', 'deliveryNum']\n )[['vatID', 'deliveryNum', 'blockHeight', 'kd', 'compute']]\n# _dr16.assign(chain_id=16).set_index(['chain_id', 'vatID', 'deliveryNum'])\n\n\ndall = pd.concat(\n pd.read_csv(f)\n for f in _dir('slogfiles/').glob('**/*-deliveries-*.csv.gz')\n)\ndall\n\n\n# +\ndef load_deliveries(files, con, table):\n if_exists = 'replace'\n for file in files:\n df = pd.read_csv(file)\n df.to_sql(table, con, if_exists=if_exists)\n if_exists = 'append'\n log.info('loaded %d records from %s', len(df), file)\n\nload_deliveries(\n _dir('slogfiles/').glob('**/*-deliveries-*.csv.gz'),\n db4,\n 'delrun3')\n# -\n\n# ### Did we ever do more than 1000 cranks in a block?\n#\n# if not, current policy never fired\n\ndf = _dr16[['blockHeight', 'crankNum']].drop_duplicates()\ndf.groupby('blockHeight')[['crankNum']].count().sort_values('crankNum', ascending=False)\n\n# ## @@ Older approaches\n\n# ## Delivery statistics\n#\n# > For each delivery in the corpus, we want to get statistics on the range of wallclock times taken by these validators.\n\n# +\nimport gzip\nimport itertools\n\n\ndef iter_cranks(path):\n \"\"\"split each slogfile into runs (each beginning with an import-kernel event),\n process each run by finding sequential matching deliver+deliver-result pairs,\n turn each pair into a (crankNum, computrons, wallclock) triple\n \"\"\"\n log.info('iter_cranks: %s', path)\n with gzip.open(path) as f:\n kernel = None\n deliver = None\n block = None\n syscalls = None\n for (ix, line) in enumerate(f):\n try:\n data = json.loads(line)\n except json.JSONDecodeError:\n log.warning('%s:%d: bad JSON: %s', path.name, ix, repr(line))\n continue\n ty = data['type']\n # print(ix, data['type'], kernel, deliver)\n if ty == 'import-kernel-finish':\n kernel = data\n deliver = None\n syscalls = None\n yield dict(kernel,\n slogfile=path.name, line=ix)\n elif ty == 'create-vat':\n yield dict(slogfile=path.name,\n line=ix,\n time=data['time'],\n type=ty,\n vatID=data['vatID'],\n description=data['description'],\n managerType=data['managerType'],\n time_kernel=kernel['time'])\n# {\"time\":1625059432.2093444,\"type\":\"cosmic-swingset-end-block-start\",\"blockHeight\":58394,\"blockTime\":1625059394}\n# {\"time\":1625059432.2096362,\"type\":\"cosmic-swingset-end-block-finish\",\"blockHeight\":58394,\"blockTime\":1625059394}\n elif ty == 'cosmic-swingset-end-block-start':\n block = data\n elif ty == 'cosmic-swingset-end-block-finish':\n time = data['time']\n time_start = block['time']\n dur = time - time_start\n if kernel:\n time_kernel = kernel['time']\n else:\n log.warning('%s:%d: missing kernel context', path.name, ix)\n time_kernel = np.nan\n yield dict(slogfile=path.name,\n line=ix,\n time=time,\n type=ty,\n time_start=time_start,\n dur=dur,\n blockHeight=data['blockHeight'],\n blockTime=data['blockTime'],\n time_kernel=time_kernel)\n block = None\n elif deliver is None:\n if ty == 'deliver':\n deliver = data\n syscalls = 0\n elif data['type'] == 'deliver-result':\n time = data['time']\n time_start = deliver['time']\n dur = time - time_start\n method = deliver['kd'][2]['method'] if deliver['kd'][0] == 'message' else None\n compute = data['dr'][2]['compute'] if type(data['dr'][2]) is type({}) else None\n if block:\n blockHeight = block['blockHeight']\n blockTime=block['blockTime']\n else:\n # odd... how do we get here without block info???\n log.warning('%s:%d: missing block context', path.name, ix)\n blockHeight = blockTime = np.nan\n if kernel:\n time_kernel = kernel['time']\n else:\n log.warning('%s:%d: missing kernel context', path.name, ix)\n time_kernel = np.nan\n yield dict(slogfile=path.name,\n line=ix,\n time=time,\n type=ty,\n crankNum=data['crankNum'],\n deliveryNum=data['deliveryNum'],\n vatID=data['vatID'],\n kd=deliver['kd'],\n method=method,\n syscalls=syscalls,\n dr=data['dr'],\n compute=compute,\n time_start=time_start,\n dur=dur,\n blockHeight=blockHeight,\n blockTime=blockTime,\n time_kernel=time_kernel)\n deliver = None\n elif ty == 'syscall-result':\n syscalls += 1\n elif ty in ['clist', 'syscall']:\n continue\n else:\n log.warning(\"%s:%d: expected deliver-result; got: %s\", path.name, ix, ty)\n deliver = None\n\n\ndef sample(files=50, cranks=2000, slogdir=slogdir):\n return pd.DataFrame.from_records(\n r\n for slogfile in itertools.islice(slogdir.glob('**/*.slog.gz'), files)\n for r in itertools.islice(iter_cranks(slogfile), cranks))\n\n# files_top = sample(200, 100)\nc500 = sample()\n# -\n\nshow_times(\nfiles_top[files_top.crankNum == 1][[\n 'slogfile', 'line', 'time', 'vatID', 'deliveryNum', 'syscalls', 'compute', 'time_kernel', 'blockHeight']\n].sort_values('blockHeight').set_index(['slogfile', 'line']),\n ['time'])\n\n\n# +\ndef show_times(df, cols):\n out = df.copy()\n for col in cols:\n out[col] = pd.to_datetime(out[col], unit='s')\n return out\n\ndef slogfile_summary(df):\n g = df.groupby(['slogfile', 'type'])\n out = g[['line']].count()\n out['time_min'] = g[['time']].min().time\n out['time_max'] = g[['time']].max().time\n out['blockHeight_min'] = g[['blockHeight']].min().blockHeight\n # out['blockHeight_max'] = g[['blockHeight']].max().blockHeight\n out['crankNum_min'] = g[['crankNum']].min().crankNum\n return show_times(out, ['time_min', 'time_max'])\n\nslogfile_summary(files_top) # [files_top.type == 'deliver-result']).sort_values('crankNum_min', ascending=False).head(15)\n\n\n# +\ndef stuff(df, slogfile):\n return df[(df.slogfile==slogfile) &\n (df.type == 'deliver-result')][['crankNum', 'vatID', 'deliveryNum', 'kd', 'line', 'blockHeight' ]]\n\n\ncoolex = stuff(c500, 'coolex-agorictest16-chain.slog.gz').set_index('crankNum')\nmym = stuff(c500, 'mymoniker-agorictest16-chain.slog.gz').set_index('crankNum')\nxwalk = pd.merge(coolex, mym, left_index=True, right_index=True)\nxwalk[xwalk.kd_x != xwalk.kd_y]\n# -\n\nxwalk[xwalk.deliveryNum_y == 2801].kd_y.iloc[0]\n\n# warner says: suppose we have 2 deliverInboundAcks\n#\n# when swingset tells mb device, device consults state _in RAM_ for dup ack num...\n# not durable... differs between run-from-start and restart\n\n# ## global crankNum -> vatID, deliveryNum\n\ncranks = c500[c500['type'] == 'deliver-result']\ncranks = cranks[['chain_id', 'crankNum', 'vatID', 'deliveryNum']].set_index(['chain_id', 'crankNum']).drop_duplicates().sort_index()\ncranks # .sort_values('deliveryNum')\n\nc500 = c500[~c500.line.isnull()]\nshow_times(c500[c500.blockHeight == 64628], ['time', 'time_start', 'blockTime'])\n\ncranks.pivot(columns='vatID', values='deliveryNum')\n\ncranks.plot(subplots=True)\n\nc500[['kd']].dropna()\n\nc500[['compute']].dropna()\n\n# +\n## reduced data set\n\n# chain-wide deliveries\n# chain_id, crankNum -> blockHeight, vatID, deliveryNum, kd, compute\n\n# chain_id, vatID, deliveryNum -> blockHeight, kd, compute\n# except vatTP?\n\n# per-validator data\n# chain_id, crankNum, run (slogfile, kernel-start) -> dur\n\n\n# +\n# global crankNum -> vatID, deliveryNum\n\nc500[['crankNum', 'vatID', 'deliveryNum']].set_index()\n\n# ignore un-full blocks?\n# histogram of block durations; interval between...\n# {\"time\":1625059432.2093444,\"type\":\"cosmic-swingset-end-block-start\",\"blockHeight\":58394,\"blockTime\":1625059394}\n# {\"time\":1625059432.2096362,\"type\":\"cosmic-swingset-end-block-finish\",\"blockHeight\":58394,\"blockTime\":1625059394}\n\n# \"blockTime\":1625059381 <- consensus block time is median of block times (?) \n\n\n# vatID, deliveryNum -> args / syscalls\n# watch out for GC esp.\n\n# c.run(runPolicy)\n# simple model: kernel says how many computrons\n# refinement: computrons, syscalls\n\n# fitness: block distribution... 10s blocks...\n# blocks that aren't too big (latency, validator variance risk)\n# cpu that isn't idle (throughput)\n# an ideal: median block time 10s\n# 80 20 %ile\n\n\n# importing a contract is an outlier\n\n\n# +\n# median validator - existing distribution of deliveries / compute -> blocks\n# supplement: study wallclock stuff\n# -\n\nshow_times(c500[c500['type'] == 'deliver-result'].set_index(['crankNum', 'vatID', 'deliveryNum', 'slogfile'])\n .drop(['type', 'kd', 'dr', 'time_dr', 'description', 'managerType'], axis=1).sort_index(),\n ['time', 'time_kernel', 'blockTime'])\n\n# ### Missing `compute` meter info?\n\nstart1 = c500\nstart1[(start1['type'] == 'deliver-result') & start1.compute.isnull()]\n\ncompute_ref = start1[(start1.slogfile == 'coolex-agorictest16-chain.slog.gz') &\n (start1['type'] == 'deliver-result')].set_index('crankNum')[['compute']]\ncompute_ref\n\ncompute_delta = start1[['slogfile', 'crankNum', 'compute']]\ncompute_delta = pd.merge(compute_delta, compute_ref,\n left_on='crankNum', right_index=True, suffixes=['', '_ref'])\ncompute_delta['delta'] = (compute_delta.compute - compute_delta.compute_ref).abs()\ncompute_delta.sort_values('delta', ascending=False)\n\n# +\ndf = start1\ncategories = df.vatID.apply(lambda v: int(v[1:]))\ncolors = cm.rainbow(np.linspace(0, 1, categories.max() + 1))\n\ndf.plot.scatter(x='compute', y='dur', c=colors[categories],\n title='Deliveries (colored by vatID)',\n figsize=(12, 9), ylabel=\"dur (sec)\");\n# -\n\nstart1[~start1.compute.isnull()].groupby('vatID')[['crankNum']].count().sort_values('crankNum', ascending=False)\n\n\n# +\ndef vat_rate(df, vatID):\n df = df[['vatID', 'deliveryNum', 'compute', 'dur']].dropna()\n df['rate'] = df.compute / df.dur\n df = df[df.vatID == vatID]\n # df = df.groupby('deliveryNum')[['compute', 'dur', 'rate']].mean()\n #df.sort_values('dur', ascending=False)\n #df\n df = df.set_index('deliveryNum').sort_index()\n return df\n\ndef show_rate(df, vatID, figsize=(8, 9)):\n df = vat_rate(df, vatID)\n ax = df.plot(subplots=True, figsize=figsize)\n \ndef fit_line(df, x, y, figsize=(9, 6)):\n cs = np.polyfit(df[x], df[y], 1)\n f = np.poly1d(cs)\n ax1 = df[[x, y]].plot.scatter(x=x, y=y, figsize=figsize)\n df['fit'] = f(df[x])\n df.plot(x=x, y='fit', color='Red', legend=False, ax=ax1);\n\n\n# show_rate(start1, 'v10');\n# vat_rate(start1, 'v10').plot.scatter(x='compute', y='dur')\nfastSlog = start1[start1.slogfile == 'PDPnodeTestnet-agorictest16-chain.slog.gz']\nfit_line(vat_rate(fastSlog, 'v10'), 'compute', 'dur')\n# len(fastSlog[fastSlog.vatID == 'v10'])\n# fastSlog[fastSlog.vatID == 'v10'].drop(['kd', 'dr'], axis=1) #.sort_values('compute', ascending=False)\n#fastSlog[fastSlog.vatID == 'v10'].set_index('deliveryNum').sort_index()[['compute', 'dur']].plot(subplots=True)\n# -\n\nvat_rate(start1, 'v16');\n\ndf = start1.pivot(columns='vatID', values=['compute', 'dur'],\n index=['vatID', 'deliveryNum', 'crankNum', 'slogfile', 'line'])\ndf.reset_index().set_index('deliveryNum').drop(['crankNum', 'line'], axis=1) #.plot(figsize=(12, 8));\n\ndf.reset_index().set_index('deliveryNum')[['v23']].sort_index().dropna() #.plot()\n\ndf.describe()\n\ndf[['v14']].dropna()\n\ndf.crankNum.hist();\n\ndf.deliveryNum.hist();\n\ndf.groupby('method')[['compute', 'rate']].describe()\n\ndf.groupby('method')[['rate', 'compute', 'dur']].mean().sort_values('rate').head(90).plot(\n subplots=True, rot=90, figsize=(8, 6), title='Method Compute Cost, Rate: bottom 90');\n\ndf.groupby('method')[['rate', 'compute', 'dur']].mean().sort_values('rate').tail(8).plot(\n subplots=True, rot=90, figsize=(8, 6), title='Method Compute Cost, Rate: top 8');\n\ndurByMethod.dur.sum()\n\n# +\ndurByMethod = df.groupby('method')[['dur']].sum().sort_values('dur', ascending=False)\n\ndurByMethod.plot.pie(y='dur', figsize=(12, 9), autopct='%1.1f%%')\n# -\n\ndf.groupby('vatID')[['rate']].describe().head(20)\n\ndf.groupby('slogfile')[['rate']].describe().head(20)\n\ndf.plot.scatter(x='deliveryNum', y='rate')\n\nspeed = df.groupby('slogfile')[['rate']].describe()[['rate'][0]][['count', 'mean', 'std']]\nspeed = speed.sort_values('mean', ascending=False)\nspeed['relative'] = speed['mean'] / speed['mean'][0]\nspeed\n\n\n# +\ndef boxplot_sorted(df, by, column, **config):\n df2 = pd.DataFrame({col:vals[column] for col, vals in df.groupby(by)})\n meds = df2.median().sort_values()\n return df2[meds.index].boxplot(**config)\n\nax = boxplot_sorted(df, by=[\"slogfile\"], column=\"rate\", rot=90, figsize=(12, 9))\nax.set_title('Validator Speed: Sample of 20 from Phase 4');\nax.set_ylabel('computrons / sec')\n# -\n\nax = df.sort_values('crankNum').plot.scatter(x='crankNum', y='compute');\nax.set_yscale('log')\n\ndf[(df.dur < df.dur.mean() + df.dur.std()) &\n (df.compute < df.compute.mean() + df.compute.std())][['compute', 'dur']].hist();\n\n# +\ndf = crank_info(c500)\ndf = df[df.crankNum.isin(compute_ref.index)]\n\nrate = np.polyfit(df.compute, df.dur, 1)\nf = np.poly1d(rate)\ndf['rate'] = f(df.compute)\n# df[['compute', 'dur', 'rate']].head()\nprint(f)\n# -\n\nax1 = df[['compute', 'dur']].plot.scatter(x='compute', y='dur', figsize=(9, 6))\ndf.plot(x='compute', y='rate', color='Red', legend=False, ax=ax1);\nax1.set_title(f\"{len(df)} cranks from w3m: Duration vs. Compute Meter\");\nax1.set_xlabel(\"compute units\")\nax1.set_ylabel(\"duration (sec)\")\n\nr = df.compute / df.dur\n\nr.max() / r.min()\n\ndf.sort_values('rate', ascending=False).drop(['time', 'type', 'detail', 'detail_dr'], axis=1)\n\n# ## Colophon: jupytext\n#\n# This is a jupyter notebook paired with a python script using [jupytext](https://jupytext.readthedocs.io/en/latest/).\n#\n# We use the [python38Packages.jupytext](https://search.nixos.org/packages?channel=21.05&from=0&size=50&buckets=%7B%22package_attr_set%22%3A%5B%22python38Packages%22%5D%2C%22package_license_set%22%3A%5B%5D%2C%22package_maintainers_set%22%3A%5B%5D%2C%22package_platforms%22%3A%5B%5D%7D&sort=relevance&query=jupytext) nix package; in particular, `/nix/store/a9911qj06dy0ah7fshl39x3w4cjs7bxk-python3.8-jupytext-1.11.2`.\n#\n" ]
[ [ "pandas.to_datetime", "pandas.DataFrame.from_records", "pandas.merge", "matplotlib.cm.get_cmap", "numpy.where", "numpy.polyfit", "numpy.poly1d", "pandas.read_sql", "pandas.read_sql_table", "pandas.read_csv" ] ]
mmargenot/edward
[ "e9ed2cdc26ca9146c23b77784c98dd882a587dd8" ]
[ "edward/inferences/sghmc.py" ]
[ "from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport six\nimport tensorflow as tf\n\nfrom edward.inferences.monte_carlo import MonteCarlo\nfrom edward.models import RandomVariable, Empirical\nfrom edward.util import copy\n\ntry:\n from edward.models import Normal\nexcept Exception as e:\n raise ImportError(\"{0}. Your TensorFlow version is not supported.\".format(e))\n\n\nclass SGHMC(MonteCarlo):\n \"\"\"Stochastic gradient Hamiltonian Monte Carlo (Chen et al., 2014).\n\n #### Notes\n\n In conditional inference, we infer $z$ in $p(z, \\\\beta\n \\mid x)$ while fixing inference over $\\\\beta$ using another\n distribution $q(\\\\beta)$.\n `SGHMC` substitutes the model's log marginal density\n\n $\\log p(x, z) = \\log \\mathbb{E}_{q(\\\\beta)} [ p(x, z, \\\\beta) ]\n \\\\approx \\log p(x, z, \\\\beta^*)$\n\n leveraging a single Monte Carlo sample, where $\\\\beta^* \\sim\n q(\\\\beta)$. This is unbiased (and therefore asymptotically exact as a\n pseudo-marginal method) if $q(\\\\beta) = p(\\\\beta \\mid x)$.\n\n #### Examples\n\n ```python\n mu = Normal(loc=0.0, scale=1.0)\n x = Normal(loc=mu, scale=1.0, sample_shape=10)\n\n qmu = Empirical(tf.Variable(tf.zeros(500)))\n inference = ed.SGHMC({mu: qmu}, {x: np.zeros(10, dtype=np.float32)})\n ```\n \"\"\"\n def __init__(self, *args, **kwargs):\n super(SGHMC, self).__init__(*args, **kwargs)\n\n def initialize(self, step_size=0.25, friction=0.1, *args, **kwargs):\n \"\"\"Initialize inference algorithm.\n\n Args:\n step_size: float, optional.\n Constant scale factor of learning rate.\n friction: float, optional.\n Constant scale on the friction term in the Hamiltonian system.\n \"\"\"\n self.step_size = step_size\n self.friction = friction\n self.v = {z: tf.Variable(tf.zeros(qz.params.shape[1:]))\n for z, qz in six.iteritems(self.latent_vars)}\n return super(SGHMC, self).initialize(*args, **kwargs)\n\n def build_update(self):\n \"\"\"Simulate Hamiltonian dynamics with friction using a discretized\n integrator. Its discretization error goes to zero as the learning\n rate decreases.\n\n Implements the update equations from (15) of Chen et al. (2014).\n \"\"\"\n old_sample = {z: tf.gather(qz.params, tf.maximum(self.t - 1, 0))\n for z, qz in six.iteritems(self.latent_vars)}\n old_v_sample = {z: v for z, v in six.iteritems(self.v)}\n\n # Simulate Hamiltonian dynamics with friction.\n friction = tf.constant(self.friction, dtype=tf.float32)\n learning_rate = tf.constant(self.step_size * 0.01, dtype=tf.float32)\n grad_log_joint = tf.gradients(self._log_joint(old_sample),\n list(six.itervalues(old_sample)))\n\n # v_sample is so named b/c it represents a velocity rather than momentum.\n sample = {}\n v_sample = {}\n for z, grad_log_p in zip(six.iterkeys(old_sample), grad_log_joint):\n qz = self.latent_vars[z]\n event_shape = qz.event_shape\n normal = Normal(loc=tf.zeros(event_shape),\n scale=(tf.sqrt(learning_rate * friction) *\n tf.ones(event_shape)))\n sample[z] = old_sample[z] + old_v_sample[z]\n v_sample[z] = ((1. - 0.5 * friction) * old_v_sample[z] +\n learning_rate * tf.convert_to_tensor(grad_log_p) +\n normal.sample())\n\n # Update Empirical random variables.\n assign_ops = []\n for z, qz in six.iteritems(self.latent_vars):\n variable = qz.get_variables()[0]\n assign_ops.append(tf.scatter_update(variable, self.t, sample[z]))\n assign_ops.append(tf.assign(self.v[z], v_sample[z]).op)\n\n # Increment n_accept.\n assign_ops.append(self.n_accept.assign_add(1))\n return tf.group(*assign_ops)\n\n def _log_joint(self, z_sample):\n \"\"\"Utility function to calculate model's log joint density,\n log p(x, z), for inputs z (and fixed data x).\n\n Args:\n z_sample: dict.\n Latent variable keys to samples.\n \"\"\"\n scope = tf.get_default_graph().unique_name(\"inference\")\n # Form dictionary in order to replace conditioning on prior or\n # observed variable with conditioning on a specific value.\n dict_swap = z_sample.copy()\n for x, qx in six.iteritems(self.data):\n if isinstance(x, RandomVariable):\n if isinstance(qx, RandomVariable):\n qx_copy = copy(qx, scope=scope)\n dict_swap[x] = qx_copy.value()\n else:\n dict_swap[x] = qx\n\n log_joint = 0.0\n for z in six.iterkeys(self.latent_vars):\n z_copy = copy(z, dict_swap, scope=scope)\n log_joint += tf.reduce_sum(\n self.scale.get(z, 1.0) * z_copy.log_prob(dict_swap[z]))\n\n for x in six.iterkeys(self.data):\n if isinstance(x, RandomVariable):\n x_copy = copy(x, dict_swap, scope=scope)\n log_joint += tf.reduce_sum(\n self.scale.get(x, 1.0) * x_copy.log_prob(dict_swap[x]))\n\n return log_joint\n" ]
[ [ "tensorflow.convert_to_tensor", "tensorflow.zeros", "tensorflow.assign", "tensorflow.get_default_graph", "tensorflow.group", "tensorflow.ones", "tensorflow.constant", "tensorflow.sqrt", "tensorflow.maximum", "tensorflow.scatter_update" ] ]
TonyMTH/Resume-Ranking
[ "6f560f7219848ddc7ee4bdbfabbd980905af4642", "6f560f7219848ddc7ee4bdbfabbd980905af4642" ]
[ "training/train.py", "training/playground.py" ]
[ "import numpy as np\nimport pandas as pd\nimport torch\nfrom sklearn.ensemble import RandomForestRegressor\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn import svm\nfrom torch.utils.data import DataLoader\nfrom sklearn.ensemble import ExtraTreesClassifier\nfrom parameters import *\nfrom training.evaluation import Evaluate, ClassificationRanker\nfrom training.feature_extraction import FeatureExtraction\nfrom training.train_loop import train_loop\nfrom training.utils import Utils, Datasets\nimport models as md\n\n# Define Processor\nprint(\"1.\\t\" + str(device.type).capitalize() + \" detected\\n\")\n\n# Preprocess Data\nutils = Utils()\n\nfeatureExtraction = FeatureExtraction()\n# validation data\nprint(\"2.\\tProcessing Resume data for validation ...\")\nresume = utils.process_resumes(pth, categories, scores, query_name, feature_name)\nfeatureExtraction.generate_features(resume, query_name, feature_name, resume_path)\n# train data\nprint(\"3.\\tProcessing Train data ...\")\n# utils.clean_save_data(data_train_path, data_test_path, data_valid_path, required_columns, clean_data_path)\n\n# Load Data\nprint(\"4.\\tLoading Data ...\")\nvalid = utils.load_data(resume_path)\ntrain_test = utils.load_data(clean_data_path)\noutput_dim = 1#len(train_test.y.unique())\n\n# Train/Test Split\nprint(\"5.\\tGetting Train/Test/Validation Data ...\")\nx_train, x_test, x_valid, y_train, y_test, y_valid, qid_train, qid_test, qid_valid = \\\n utils.split_data(train_test, valid, .05)\nprint('6.\\tTrain: {}\\tTest: {}\\tValid: {}\\tOutput: {}'.format(x_train.shape, x_test.shape, x_valid.shape, output_dim))\nprint(\n '7.\\tUnique Query Ids (train: {}\\ttest: {}\\tvalid: {})'.format(len(np.unique(qid_train)), len(np.unique(qid_test)),\n len(np.unique(qid_valid))))\n\n# Define Model\n# model = md.RNN(x_train.shape[1], output_dim, hidden2, 2)\n# model = md.Model1(x_train.shape[1], hidden1, hidden2, hidden3, output_dim)\n# model = md.Model2(output_dim)\nmodel = md.Model4(x_train.shape[1], output_dim)\nmodel.to(device)\nprint(\"8.\\tModel defined and moved to \" + str(device.__str__()))\n\n# Parameters\noptimizer = Optimizer(model.parameters())\nscheduler = scheduler(optimizer)\nprint(\"9.\\tCriterion set as \" + str(criterion.__str__()))\nprint(\"10.\\tOptimizer set as \" + str(optimizer.__str__()))\n\n# Data Loader\ntrain_dataset = Datasets(y_train, x_train, qid_train)\ntest_dataset = Datasets(y_test, x_test, qid_test)\nvalid_dataset = Datasets(y_valid, x_valid, qid_valid)\n\ntrain_loader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True)\ntest_loader = DataLoader(test_dataset, batch_size=56, shuffle=True)\nvalid_loader = DataLoader(valid_dataset, batch_size=batch_size, shuffle=True)\ntrain_qid, train_labels, train_features = next(iter(train_loader))\nprint(\"11.\\tDataLoader Shapes-> QID: {}\\tLabel: {}\\tFeatures: {}\".format(train_qid.size(), train_labels.size(),\n train_features.size()))\n\n# NN Model\nprint(\"12.\\tTrain loop\")\n# train_loop(model, epochs, optimizer, criterion, train_loader, test_loader, valid_loader, k_rank,\n# printing_gap, saved_model_device, model_path, device, PIK_plot_data, scheduler)\n\n# Regressor Model\n# rfr = RandomForestRegressor(n_estimators=200, min_samples_split=5, random_state=1, n_jobs=-1)\n# rfr.fit(x_train, y_train)\n# Evaluate().print_evaluation(rfr, x_train, y_train, qid_train, k_rank)\n# Evaluate().print_evaluation(rfr, x_test, y_test, qid_test, k_rank)\n# Evaluate().print_evaluation(rfr, x_valid, y_valid, qid_valid, k_rank)\n# Evaluate().save_model(rfr, reg_model_path)\n\n# SVM Model\nsm = svm.SVR()\nsm.fit(x_train, y_train)\nEvaluate().print_evaluation(sm, x_train, y_train, qid_train, k_rank)\nEvaluate().print_evaluation(sm, x_test, y_test, qid_test, k_rank)\nEvaluate().print_evaluation(sm, x_valid, y_valid, qid_valid, k_rank)\nEvaluate().save_model(sm, svm_model_path)\n\n\n# Classifier Model\n# etc = ClassificationRanker(LogisticRegression(C=1000))\n# etc.fit(x_train, y_train)\n# Evaluate().print_evaluation(etc, x_train, y_train, qid_train, k_rank)\n# Evaluate().print_evaluation(etc, x_test, y_test, qid_test, k_rank)\n# Evaluate().print_evaluation(etc, x_valid, y_valid, qid_valid, k_rank)\n#\n# yp = rfr.predict(x_valid)\n# for i, j, k in zip(qid_valid, y_valid, yp):\n# print(i, j, k)\n", "from collections import Counter\n\nimport numpy as np\n\ncorpus = [\n 'This is the first document'.lower().split(),\n 'This document is the second document .'.lower().split(),\n 'And this is the third one.'.lower().split(),\n 'Is this the first document?'.lower().split(),\n]\nquery = \"this is the third one\".lower().split()\n\nprint(np.array([[2, 3], [4, 5]]) / np.array([[2, 3], [2, 3]]))\n" ]
[ [ "sklearn.svm.SVR", "torch.utils.data.DataLoader", "numpy.unique" ], [ "numpy.array" ] ]
Daz-Riza-Seriog/Transport_Phenomena
[ "822b89556fa56ef57494a318cbb03524e3a4d237" ]
[ "Heat_Transfer/3.3-Wire_Isolation_Base__Fin.py" ]
[ "# Code made for Sergio Andrés Díaz Ariza\n# 23 March 2021\n# License MIT\n# Transport Phenomena: Python Program-Assesment 3.3\n\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nimport numpy as np\n\nsns.set()\n\n\nclass Wire_Param:\n\n def Power_from_volt(self, V, R_Km, Dis_insul, Diameter_mm):\n As = (np.pi * (Diameter_mm / 1000) ** 2) / 4\n R = R_Km * Dis_insul * As / 1000\n W = (V ** 2) / R # Power generated = Egen\n return W # [w/m^3]\n\n def Param_m(self, h, Kcopper, Diameter_mm):\n m = np.sqrt((4 * h * 0.2) / (Kcopper * (Diameter_mm / 1000)))\n return m\n\n\nclass Coeff_vector_B:\n\n def Eq_1(self, h, Tsurr, Egen, K, L):\n b1 = h * Tsurr - (Egen * (-L / 2)) + (h * (Egen * ((-L / 2) ** 2)) / 2 * K)\n return b1\n\n def Eq_2(self, Tsurr):\n b2 = Tsurr\n return b2\n\n def Eq_3(self):\n b3 = 0\n return b3\n\n def Eq_4(self):\n b4 = 0\n return b4\n\n\nclass T_profile:\n\n def T_z_to_L_2(self, Egen, z1, Kcopper, C1, C2):\n T = ((-Egen * ((z1) ** 2)) / 2 * Kcopper) + C1 * (z1) + C2\n return T\n\n def T_z_from_L_2_to_L(self, Tsurr, C3, C4, Param_m, z2):\n T = Tsurr + (C3 * np.exp((Param_m * z2))) + (C4 * np.exp((-Param_m * z2)))\n return T\n\n\nWire = Wire_Param()\nVect_B = Coeff_vector_B()\nProfile = T_profile()\n\nEgen = Wire.Power_from_volt(30e-6, 5.32, 0.2, 2.32) # [W/m^3]\nParam_m = Wire.Param_m(15, 386, 2.32) # Value L--> must be positive fo calculus\n\nb1 = Vect_B.Eq_1(15, 25 + 273.15, Egen, 386, -0.2)\nb2 = Vect_B.Eq_2(25 + 273.15)\nb3 = Vect_B.Eq_3()\nb4 = Vect_B.Eq_4()\n\nB = np.array([b1, b2, b3, b4])\nA = np.array([[-386 + (15 * (-0.2)), 15, 0, 0], [0, 1, -1, -1], [1, 0, -Param_m, Param_m],\n [0, 0, (np.exp(Param_m * 0.2)) * ((-386 * Param_m) - 15),\n (np.exp(-Param_m * 0.2)) * ((386 * Param_m) - 15)]])\n\n# A_inv = np.linalg.inv(A) ---> We can find the solution directly\nC = np.linalg.solve(A, B)\n\n# solve for Temperature Profile\nT_L_2 = Profile.T_z_to_L_2(Egen, np.arange(-0.2, 0.0, 1e-6), 386, C[0], C[1])\nT_L = Profile.T_z_from_L_2_to_L(25 + 273.15, C[2], C[3], Param_m, np.arange(0.0, 0.2, 1e-6))\n\nprint(T_L_2)\nprint(T_L)\nplt.figure(1)\nplt.plot(np.arange(0, 0.2, 1e-6), T_L_2, label='$0<L<0.2$')\nplt.plot(np.arange(0.2, 0.4, 1e-6), T_L, label='$0.2<L<0.4$')\nplt.title(\"Temperature Profile in a Wie\\n with Insulated part\", fontsize=16)\nplt.ylabel(\"Temperature $[K]$\", fontsize=14)\nplt.xlabel(\"Long of wire $[m]$ \", fontsize=14)\nplt.legend()\nplt.show()\n" ]
[ [ "numpy.array", "matplotlib.pyplot.xlabel", "matplotlib.pyplot.title", "matplotlib.pyplot.legend", "numpy.exp", "matplotlib.pyplot.figure", "numpy.arange", "matplotlib.pyplot.ylabel", "numpy.linalg.solve", "numpy.sqrt", "matplotlib.pyplot.show" ] ]
distributed-systems-group-7/TUD-DistributedSystems
[ "8de84791a7f721d8d5487693da4dcc63eb141132" ]
[ "Conflict-free_Replicated_Data_Types/src/benchmarking/OperationTestsGenerator.py" ]
[ "\nimport numpy as np\nimport math \n# number will be decreased by a small amount when some deletions happen \nnumber_of_vertices = 5_000 \n# probability of an arc between any two instances \nprobability_of_an_arc = 0.001 \n# number of reads in the read-heavy test \nread_test_operations = 20_000 \n# probability of removing a random vertex in each after processing each vertex\nremoval_probability = 0.04\n# probability of adding a lookup command after each add arc command in write-heavy test\nrandom_lookup_probability = 0.1\n# probability of adding an add command after each lookup command in read-heavy test \nrandom_add_probability = 0.1\n# used in the write-heavy test. prabability of removing a vertex. Removing an arc has a 1-x probability\nprobability_of_removing_a_vertex = 0.5\n# used in the read-heavy test. prabability of looking up a vertex. Looking up an arc has a 1-x probability\nprobability_of_looking_up_a_vertex = 0.5\n\navg_degree = number_of_vertices * probability_of_an_arc\nstd_deviation = math.sqrt((number_of_vertices-1)*probability_of_an_arc*(1-probability_of_an_arc))\n\nwrite_heavy_test_name = \"operations1.txt\"\nread_heavy_test_name = \"operations2.txt\"\n\nwith open(write_heavy_test_name, \"w\") as file:\n # write the vertices first so you dont get errors in neo4j\n for i in range(0, number_of_vertices):\n file.write(f\"av {i}\\n\")\n print(\"Written vertices\")\n \n # start adding the arcs\n for current_vertex in range(0, number_of_vertices):\n # get the degree of the vertex using the normal distribution\n degree = np.random.normal(avg_degree, std_deviation)\n for j in range(0, int(degree)):\n # select a target and write the operation to the instruction set\n target = np.random.randint(0, number_of_vertices)\n while target == current_vertex:\n target = np.random.randint(0, number_of_vertices)\n \n file.write(f\"aa {current_vertex} {target}\\n\")\n \n # add rare random lookups durring the write-heavy test\n if(np.random.ranf()<random_lookup_probability):\n if(np.random.ranf()<probability_of_looking_up_a_vertex):\n vertex_to_look = np.random.randint(0, number_of_vertices)\n file.write(f\"lv {vertex_to_look}\\n\")\n else:\n source_arc_to_look = np.random.randint(0, number_of_vertices)\n target_arc_to_look = np.random.randint(0, number_of_vertices)\n file.write(f\"la {source_arc_to_look} {target_arc_to_look}\\n\")\n \n \n if(current_vertex % 1000 == 0):\n print(f\"Written arcs for {current_vertex} vertices\")\n \n # after processing the arcs of an vertex add a rare random removal command\n if(np.random.ranf()<removal_probability):\n if(np.random.ranf()<probability_of_removing_a_vertex):\n vertex_to_remove = np.random.randint(0, number_of_vertices)\n file.write(f\"rv {vertex_to_remove}\\n\")\n else:\n source_arc_to_rmv = np.random.randint(0, number_of_vertices)\n target_arc_to_rmv = np.random.randint(0, number_of_vertices)\n file.write(f\"ra {source_arc_to_rmv} {target_arc_to_rmv}\\n\")\n \n print(\"Written arcs\")\n \n \nwith open(read_heavy_test_name, \"w\") as file:\n # write the read_test_operations read operations\n for i in range(0, read_test_operations):\n # before each read operation add a rare random write command\n if(np.random.ranf()<random_add_probability):\n file.write(f\"av x{i}\\n\")\n\n if(np.random.ranf()<probability_of_looking_up_a_vertex):\n vertex_to_look = np.random.randint(0, number_of_vertices)\n file.write(f\"lv {vertex_to_look}\\n\")\n else:\n source_arc_to_look = np.random.randint(0, number_of_vertices)\n target_arc_to_look = np.random.randint(0, number_of_vertices)\n file.write(f\"la {source_arc_to_look} {target_arc_to_look}\\n\")\n \n if(i % 10_000 == 0):\n print(f\"Written {i} lookups\")\n \n \n print(\"Written lookups\")\n \n \n" ]
[ [ "numpy.random.normal", "numpy.random.randint", "numpy.random.ranf" ] ]
thinkmoore/das
[ "d9faabf3de987b890a5079b914f5aba597215b14" ]
[ "programs/engine/unit_tests/json_nodes_test.py" ]
[ "#\n#\n# Needs to be expanded to accommodate the common occurrence of sparse.multiSparse objects in the geounitNode class vs pure numpy arrays\n#\n#\n\nimport os\nimport sys\n# If there is __init__.py in the directory where this file is, then Python adds das_decennial directory to sys.path\n# automatically. Not sure why and how it works, therefore, keeping the following line as a double\nsys.path.append(os.path.dirname(os.path.dirname(os.path.dirname(os.path.dirname(__file__)))))\n\n\nimport programs.engine.nodes as nodes\nimport numpy as np\n\n\ndef buildTestNode():\n \n raw = np.array([0,1,2,3,4])\n syn = np.array([6,7,8,9,5])\n geocode = \"0\"\n geolevel = \"National\"\n \n node = nodes.geounitNode(geocode, geolevel=geolevel, raw=raw, syn=syn, geocodeDict={16: \"Block\", 12: \"Block_Group\", 11: \"Tract\", 5: \"County\", 2: \"State\", 1: \"National\"})\n return node\n\n# NODES NO LONGER HAVE toJSON, as this is not needed\n# def test_slotsToJSON():\n# node = buildTestNode()\n#\n# jsonStr = node.toJSON()\n# assert jsonStr == '{\"geocode\": \"0\", \"geocodeDict\": {\"16\": \"Block\", \"12\": \"Block_Group\", \"11\": \"Tract\", \"5\": \"County\", \"2\": \"State\", \"1\": \"National\"}, \"geolevel\": \"National\", \"parentGeocode\": \"0\", \"raw\": [0, 1, 2, 3, 4], \"dp\": null, \"syn\": [6, 7, 8, 9, 5], \"syn_unrounded\": null, \"cons\": null, \"invar\": null, \"dp_queries\": null, \"congDistGeocode\": null, \"sldlGeocode\": null, \"slduGeocode\": null, \"minimalSchemaArray\": null, \"grbVars\": null, \"grbPenaltyVarsPos\": null, \"grbPenaltyVarsNeg\": null, \"ancestorsDP\": null, \"ancestorsRaw\": null}'\n#\n# jsonStr = node.toJSON(keepAttrs=[\"raw\", \"syn\"])\n# assert jsonStr == '{\"raw\": [0, 1, 2, 3, 4], \"syn\": [6, 7, 8, 9, 5]}'\n#\n# jsontuple = node.toJSON(addClassName=True)\n# assert jsontuple == ('geounitNode', '{\"geocode\": \"0\", \"geocodeDict\": {\"16\": \"Block\", \"12\": \"Block_Group\", \"11\": \"Tract\", \"5\": \"County\", \"2\": \"State\", \"1\": \"National\"}, \"geolevel\": \"National\", \"parentGeocode\": \"0\", \"raw\": [0, 1, 2, 3, 4], \"dp\": null, \"syn\": [6, 7, 8, 9, 5], \"syn_unrounded\": null, \"cons\": null, \"invar\": null, \"dp_queries\": null, \"congDistGeocode\": null, \"sldlGeocode\": null, \"slduGeocode\": null, \"minimalSchemaArray\": null, \"grbVars\": null, \"grbPenaltyVarsPos\": null, \"grbPenaltyVarsNeg\": null, \"ancestorsDP\": null, \"ancestorsRaw\": null}')\n#\n# classname, jsonStr = jsontuple\n# assert classname == 'geounitNode'\n# assert jsonStr == '{\"geocode\": \"0\", \"geocodeDict\": {\"16\": \"Block\", \"12\": \"Block_Group\", \"11\": \"Tract\", \"5\": \"County\", \"2\": \"State\", \"1\": \"National\"}, \"geolevel\": \"National\", \"parentGeocode\": \"0\", \"raw\": [0, 1, 2, 3, 4], \"dp\": null, \"syn\": [6, 7, 8, 9, 5], \"syn_unrounded\": null, \"cons\": null, \"invar\": null, \"dp_queries\": null, \"congDistGeocode\": null, \"sldlGeocode\": null, \"slduGeocode\": null, \"minimalSchemaArray\": null, \"grbVars\": null, \"grbPenaltyVarsPos\": null, \"grbPenaltyVarsNeg\": null, \"ancestorsDP\": null, \"ancestorsRaw\": null}'\n\n\n" ]
[ [ "numpy.array" ] ]
slowbull/leaf
[ "a2eda2b551fb0db8ddf88ae8c9e60adf965c7e85" ]
[ "models/metrics/writer.py" ]
[ "\"\"\"Writes the given metrics in a csv.\"\"\"\n\nimport numpy as np\nimport os\nimport pandas as pd\nimport sys\n\nmodels_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\nsys.path.append(models_dir)\n\nfrom baseline_constants import CLIENT_ID_KEY, NUM_ROUND_KEY, NUM_SAMPLES_KEY\n\n\nCOLUMN_NAMES = [\n CLIENT_ID_KEY, NUM_ROUND_KEY, 'hierarchy', NUM_SAMPLES_KEY]\n\n\ndef print_metrics(\n round_number,\n client_ids,\n metrics,\n hierarchies,\n num_samples,\n path):\n \"\"\"Prints or appends the given metrics in a csv.\n\n The resulting dataframe is of the form:\n client_id, round_number, hierarchy, num_samples, metric1, metric2\n twebbstack, 0, , 18, 0.5, 0.89\n\n Args:\n round_number: Number of the round the metrics correspond to. If\n 0, then the file in path is overwritten. If not 0, we append to\n that file.\n client_ids: Ids of the clients. Not all ids must be in the following\n dicts.\n metrics: Dict keyed by client id. Each element is a dict of metrics\n for that client in the specified round. The dicts for all clients\n are expected to have the same set of keys.\n hierarchies: Dict keyed by client id. Each element is a list of hierarchies\n to which the client belongs.\n num_samples: Dict keyed by client id. Each element is the number of test\n samples for the client.\n \"\"\"\n columns = COLUMN_NAMES + get_metrics_names(metrics)\n client_data = pd.DataFrame(columns=columns)\n for i, c_id in enumerate(client_ids):\n current_client = {\n 'client_id': c_id,\n 'round_number': round_number,\n 'hierarchy': ','.join(hierarchies.get(c_id, [])),\n 'num_samples': num_samples.get(c_id, np.nan)\n }\n\n current_metrics = metrics.get(c_id, {})\n for metric, metric_value in current_metrics.items():\n current_client[metric] = metric_value\n client_data.loc[len(client_data)] = current_client\n\n mode = 'w' if round_number == 0 else 'a'\n print_dataframe(client_data, path, mode)\n\n\ndef print_dataframe(df, path, mode='w'):\n \"\"\"Writes the given dataframe in path as a csv\"\"\"\n header = mode == 'w'\n df.to_csv(path, mode=mode, header=header, index=False)\n\n\ndef get_metrics_names(metrics):\n \"\"\"Gets the names of the metrics.\n\n Args:\n metrics: Dict keyed by client id. Each element is a dict of metrics\n for that client in the specified round. The dicts for all clients\n are expected to have the same set of keys.\"\"\"\n if len(metrics) == 0:\n return []\n metrics_dict = next(iter(metrics.values()))\n return list(metrics_dict.keys())\n\n\n" ]
[ [ "pandas.DataFrame" ] ]
Tacha-S/perception
[ "7a715477b066528b67a8246f28fa8bac79180051" ]
[ "sbpl_perception/src/scripts/tools/fat_dataset/lib/utils/symbol.py" ]
[ "# --------------------------------------------------------\n# Deep Iterative Matching Network\n# Licensed under The Apache-2.0 License [see LICENSE for details]\n# Written by Yi Li\n# --------------------------------------------------------\nfrom __future__ import print_function, division\nimport numpy as np\n\n\nclass Symbol:\n def __init__(self):\n self.arg_shape_dict = None\n self.out_shape_dict = None\n self.aux_shape_dict = None\n self.sym = None\n\n @property\n def symbol(self):\n return self.sym\n\n def get_symbol(self, cfg, is_train=True):\n \"\"\"\n return a generated symbol, it also need to be assigned to self.sym\n \"\"\"\n raise NotImplementedError()\n\n def init_weights(self, cfg, arg_params, aux_params):\n raise NotImplementedError()\n\n def get_msra_std(self, shape):\n fan_in = float(shape[1])\n if len(shape) > 2:\n fan_in *= np.prod(shape[2:])\n print(np.sqrt(2 / fan_in))\n return np.sqrt(2 / fan_in)\n\n def infer_shape(self, data_shape_dict):\n # infer shape\n arg_shape, out_shape, aux_shape = self.sym.infer_shape(**data_shape_dict)\n self.arg_shape_dict = dict(zip(self.sym.list_arguments(), arg_shape))\n self.out_shape_dict = dict(zip(self.sym.list_outputs(), out_shape))\n self.aux_shape_dict = dict(zip(self.sym.list_auxiliary_states(), aux_shape))\n\n def check_parameter_shapes(\n self, arg_params, aux_params, data_shape_dict, is_train=True\n ):\n for k in self.sym.list_arguments():\n if k in data_shape_dict or (False if is_train else \"label\" in k):\n continue\n assert k in arg_params, k + \" not initialized\"\n assert arg_params[k].shape == self.arg_shape_dict[k], (\n \"shape inconsistent for \"\n + k\n + \" inferred \"\n + str(self.arg_shape_dict[k])\n + \" provided \"\n + str(arg_params[k].shape)\n )\n for k in self.sym.list_auxiliary_states():\n assert k in aux_params, k + \" not initialized\"\n assert aux_params[k].shape == self.aux_shape_dict[k], (\n \"shape inconsistent for \"\n + k\n + \" inferred \"\n + str(self.aux_shape_dict[k])\n + \" provided \"\n + str(aux_params[k].shape)\n )\n" ]
[ [ "numpy.prod", "numpy.sqrt" ] ]
dkurt/NiftyNet
[ "3a4d54544c0886751bacfdbddb42eb90fe0d5b54" ]
[ "niftynet/network/simulator_gan.py" ]
[ "# -*- coding: utf-8 -*-\nfrom __future__ import absolute_import, print_function, division\n\nimport numpy as np\nimport tensorflow as tf\n\nfrom niftynet.layer.activation import ActiLayer\nfrom niftynet.layer.convolution import ConvolutionalLayer\nfrom niftynet.layer.deconvolution import DeconvolutionalLayer\nfrom niftynet.layer.fully_connected import FullyConnectedLayer\nfrom niftynet.layer.gan_blocks import BaseDiscriminator\nfrom niftynet.layer.gan_blocks import BaseGenerator\nfrom niftynet.layer.gan_blocks import GANImageBlock\n\n\nclass SimulatorGAN(GANImageBlock):\n \"\"\"\n implementation of\n Hu et al., \"Freehand Ultrasound Image Simulation with Spatially-Conditioned\n Generative Adversarial Networks\", MICCAI RAMBO 2017\n https://arxiv.org/abs/1707.05392\n \"\"\"\n\n def __init__(self, name='simulator_GAN'):\n super(SimulatorGAN, self).__init__(\n generator=ImageGenerator(name='generator'),\n discriminator=ImageDiscriminator(name='discriminator'),\n clip=None,\n name=name)\n\n\nclass ImageGenerator(BaseGenerator):\n def __init__(self, name):\n super(ImageGenerator, self).__init__(name=name)\n self.initializers = {'w': tf.random_normal_initializer(0, 0.02),\n 'b': tf.constant_initializer(0.001)}\n self.noise_channels_per_layer = 0\n self.with_conditionings = [True, True, True, True, False]\n\n def layer_op(self, random_source, image_size, conditioning, is_training):\n keep_prob_ph = 1 # not passed in as a placeholder\n add_noise = self.noise_channels_per_layer\n if conditioning is not None:\n conditioning_channels = conditioning.shape.as_list()[-1]\n conditioning_channels = conditioning_channels + add_noise\n else:\n conditioning_channels = add_noise\n\n # feature channels design pattern\n ch = [512]\n sz = image_size[:-1]\n for i in range(4):\n # compute output n_feature_channels of i-th layer\n new_ch = ch[-1] + conditioning_channels * self.with_conditionings[i]\n new_ch = round(new_ch / 2)\n ch.append(new_ch)\n # compute output spatial size of i-th layer\n sz = [int(round(spatial_len / 2)) for spatial_len in sz]\n ch.append(1) # last layer single channel image\n\n # resizing utilities\n spatial_rank = len(image_size) - 1\n if spatial_rank == 3:\n def resize_func(x, sz):\n sz_x = x.shape.as_list()\n r1 = tf.image.resize_images(\n tf.reshape(x, sz_x[:3] + [-1]), sz[0:2])\n r2 = tf.image.resize_images(\n tf.reshape(r1, [sz_x[0], sz[0] * sz[1], sz_x[3], -1]),\n [sz[0] * sz[1], sz[2]])\n resized_3d = tf.reshape(r2, [sz_x[0]] + sz + [sz_x[-1]])\n return resized_3d\n elif spatial_rank == 2:\n resize_func = tf.image.resize_bilinear\n\n def concat_cond(x, with_conditioning):\n noise = []\n if add_noise:\n feature_shape = x.shape.as_list()[0:-1]\n noise_shape = feature_shape + [add_noise]\n noise = [tf.random_normal(noise_shape, 0.0, 0.1)]\n\n if with_conditioning and conditioning is not None:\n with tf.name_scope('concat_conditioning'):\n spatial_shape = x.shape.as_list()[1:-1]\n resized_cond = resize_func(conditioning, spatial_shape)\n return tf.concat([x, resized_cond] + noise, axis=-1)\n return x\n\n def conv(ch, x):\n with tf.name_scope('conv'):\n conv_layer = ConvolutionalLayer(\n n_output_chns=ch,\n kernel_size=3,\n feature_normalization='batch',\n with_bias=False,\n acti_func='relu',\n w_initializer=self.initializers['w'])\n return conv_layer(x, is_training=is_training)\n\n def up(ch, x):\n with tf.name_scope('up'):\n deconv_layer = DeconvolutionalLayer(\n n_output_chns=ch,\n kernel_size=3,\n stride=2,\n feature_normalization='batch',\n with_bias=False,\n acti_func='relu',\n w_initializer=self.initializers['w'])\n return deconv_layer(x, is_training=is_training)\n\n def up_block(ch, x, with_conditioning):\n with tf.name_scope('up_block'):\n u = up(ch, x)\n cond = concat_cond(u, with_conditioning)\n return conv(cond.shape.as_list()[-1], cond)\n\n def noise_to_image(sz, ch, rand_tensor, with_conditioning):\n batch_size = rand_tensor.shape.as_list()[0]\n output_shape = [batch_size] + sz + [ch]\n with tf.name_scope('noise_to_image'):\n g_no_0 = np.prod(sz) * ch\n fc_layer = FullyConnectedLayer(\n n_output_chns=g_no_0,\n feature_normalization=None,\n with_bias=True,\n w_initializer=self.initializers['w'],\n b_initializer=self.initializers['b'])\n g_h1p = fc_layer(rand_tensor, keep_prob=keep_prob_ph)\n g_h1p = tf.reshape(g_h1p, output_shape)\n g_h1p = concat_cond(g_h1p, with_conditioning)\n return conv(ch + conditioning_channels, g_h1p)\n\n def final_image(n_chns, x):\n with tf.name_scope('final_image'):\n if add_noise > 0:\n feature_shape = x.shape.as_list()[0:-1]\n noise_shape = feature_shape + [add_noise]\n noise = tf.random_normal(noise_shape, 0, .1)\n x = tf.concat([x, noise], axis=3)\n conv_layer = ConvolutionalLayer(\n n_output_chns=n_chns,\n kernel_size=3,\n acti_func='tanh',\n feature_normalization=None,\n with_bias=True,\n w_initializer=self.initializers['w'],\n b_initializer=self.initializers['b'])\n x_sample = conv_layer(\n x, is_training=is_training, keep_prob=keep_prob_ph)\n return tf.image.resize_images(x_sample, image_size[:-1])\n\n # let the tensors flow...\n flow = random_source\n for (idx, chns) in enumerate(ch):\n if idx == 0: # first layer fully-connected\n flow = noise_to_image(\n sz, chns, flow, self.with_conditionings[idx])\n elif idx == len(ch) - 1: # final conv without bn\n return final_image(chns, flow)\n else: # upsampling block\n flow = up_block(chns, flow, self.with_conditionings[idx])\n\n\nclass ImageDiscriminator(BaseDiscriminator):\n def __init__(self, name):\n super(ImageDiscriminator, self).__init__(name=name)\n\n w_init = tf.random_normal_initializer(0, 0.02)\n b_init = tf.constant_initializer(0.001)\n # w_init = tf.contrib.layers.variance_scaling_initializer()\n # b_init = tf.constant_initializer(0)\n\n self.initializers = {'w': w_init, 'b': b_init}\n self.chns = [32, 64, 128, 256, 512, 1024, 1]\n\n def layer_op(self, image, conditioning, is_training):\n\n batch_size = image.shape.as_list()[0]\n\n def down(ch, x):\n with tf.name_scope('downsample'):\n conv_layer = ConvolutionalLayer(\n n_output_chns=ch,\n kernel_size=3,\n stride=2,\n feature_normalization='batch',\n acti_func='selu',\n w_initializer=self.initializers['w'])\n return conv_layer(x, is_training=is_training)\n\n def convr(ch, x):\n conv_layer = ConvolutionalLayer(\n n_output_chns=ch,\n kernel_size=3,\n feature_normalization='batch',\n acti_func='selu',\n w_initializer=self.initializers['w'])\n return conv_layer(x, is_training=is_training)\n\n def conv(ch, x, s):\n conv_layer = ConvolutionalLayer(\n n_output_chns=ch,\n kernel_size=3,\n feature_normalization='batch',\n w_initializer=self.initializers['w'])\n acti_layer = ActiLayer(func='selu')\n\n # combining two flows\n res_flow = conv_layer(x, is_training=is_training) + s\n return acti_layer(res_flow)\n\n def down_block(ch, x):\n with tf.name_scope('down_resnet'):\n s = down(ch, x)\n r = convr(ch, s)\n return conv(ch, r, s)\n\n def feature_block(ch, image):\n with tf.name_scope('feature'):\n conv_layer = ConvolutionalLayer(\n n_output_chns=ch,\n kernel_size=5,\n with_bias=True,\n feature_normalization=None,\n acti_func='selu',\n w_initializer=self.initializers['w'],\n b_initializer=self.initializers['b'])\n d_h1s = conv_layer(image, is_training=is_training)\n d_h1r = convr(ch, d_h1s)\n return conv(ch, d_h1r, d_h1s)\n\n def fully_connected(ch, features):\n with tf.name_scope('fully_connected'):\n # with bn?\n fc_layer = FullyConnectedLayer(\n n_output_chns=ch, feature_normalization=None, with_bias=True)\n return fc_layer(features, is_training=is_training)\n\n if conditioning is not None:\n image = tf.concat([image, conditioning], axis=-1)\n\n # let the tensors flow...\n flow = image\n for (idx, n_chns) in enumerate(self.chns):\n if idx == 0: # first layer\n flow = feature_block(n_chns, flow)\n elif idx == len(self.chns) - 1: # last layer\n return fully_connected(n_chns, flow)\n else:\n flow = down_block(n_chns, flow)\n" ]
[ [ "tensorflow.constant_initializer", "tensorflow.concat", "tensorflow.reshape", "numpy.prod", "tensorflow.name_scope", "tensorflow.image.resize_images", "tensorflow.random_normal", "tensorflow.random_normal_initializer" ] ]
SebastianVeile/PreSumm
[ "780c340e04fd5911badb4a8b2af2284c5cdbb8b5" ]
[ "src/models/predictor.py" ]
[ "#!/usr/bin/env python\n\"\"\" Translator Class and builder \"\"\"\nfrom __future__ import print_function\nimport codecs\nimport os\nimport math\n\nimport torch\n\nfrom tensorboardX import SummaryWriter\n\nfrom others.utils import rouge_results_to_str, test_rouge, tile\nfrom translate.beam import GNMTGlobalScorer\n\n\ndef build_predictor(args, tokenizer, symbols, model, logger=None):\n scorer = GNMTGlobalScorer(args.alpha,length_penalty='wu')\n\n translator = Translator(args, model, tokenizer, symbols, global_scorer=scorer, logger=logger)\n return translator\n\n\nclass Translator(object):\n \"\"\"\n Uses a model to translate a batch of sentences.\n\n\n Args:\n model (:obj:`onmt.modules.NMTModel`):\n NMT model to use for translation\n fields (dict of Fields): data fields\n beam_size (int): size of beam to use\n n_best (int): number of translations produced\n max_length (int): maximum length output to produce\n global_scores (:obj:`GlobalScorer`):\n object to rescore final translations\n copy_attn (bool): use copy attention during translation\n cuda (bool): use cuda\n beam_trace (bool): trace beam search for debugging\n logger(logging.Logger): logger.\n \"\"\"\n\n def __init__(self,\n args,\n model,\n vocab,\n symbols,\n global_scorer=None,\n logger=None,\n dump_beam=\"\"):\n self.logger = logger\n self.cuda = args.visible_gpus != '-1'\n\n self.args = args\n self.model = model\n self.generator = self.model.generator\n self.vocab = vocab\n self.symbols = symbols\n self.start_token = symbols['BOS']\n self.end_token = symbols['EOS']\n\n self.global_scorer = global_scorer\n self.beam_size = args.beam_size\n self.min_length = args.min_length\n self.max_length = args.max_length\n\n self.dump_beam = dump_beam\n\n # for debugging\n self.beam_trace = self.dump_beam != \"\"\n self.beam_accum = None\n\n tensorboard_log_dir = args.model_path\n\n self.tensorboard_writer = SummaryWriter(tensorboard_log_dir, comment=\"Unmt\")\n\n if self.beam_trace:\n self.beam_accum = {\n \"predicted_ids\": [],\n \"beam_parent_ids\": [],\n \"scores\": [],\n \"log_probs\": []}\n\n def _build_target_tokens(self, pred):\n # vocab = self.fields[\"tgt\"].vocab\n tokens = []\n for tok in pred:\n tok = int(tok)\n tokens.append(tok)\n if tokens[-1] == self.end_token:\n tokens = tokens[:-1]\n break\n tokens = [t for t in tokens if t < len(self.vocab)]\n tokens = self.vocab.DecodeIds(tokens).split(' ')\n return tokens\n\n def from_batch(self, translation_batch):\n batch = translation_batch[\"batch\"]\n assert (len(translation_batch[\"gold_score\"]) ==\n len(translation_batch[\"predictions\"]))\n batch_size = batch.batch_size\n\n preds, pred_score, gold_score, tgt_str, src = translation_batch[\"predictions\"],translation_batch[\"scores\"],translation_batch[\"gold_score\"],batch.tgt_str, batch.src\n\n translations = []\n for b in range(batch_size):\n pred_sents = self.vocab.convert_ids_to_tokens([int(n) for n in preds[b][0]])\n pred_sents = ' '.join(pred_sents).replace(' ##','')\n gold_sent = ' '.join(tgt_str[b].split())\n # translation = Translation(fname[b],src[:, b] if src is not None else None,\n # src_raw, pred_sents,\n # attn[b], pred_score[b], gold_sent,\n # gold_score[b])\n # src = self.spm.DecodeIds([int(t) for t in translation_batch['batch'].src[0][5] if int(t) != len(self.spm)])\n raw_src = [self.vocab.ids_to_tokens[int(t)] for t in src[b]][:500]\n raw_src = ' '.join(raw_src)\n translation = (pred_sents, gold_sent, raw_src)\n # translation = (pred_sents[0], gold_sent)\n translations.append(translation)\n\n return translations\n\n def translate(self,\n data_iter, step,\n attn_debug=False):\n\n self.model.eval()\n gold_path = self.args.result_path + '.%d.gold' % step\n can_path = self.args.result_path + '.%d.candidate' % step\n self.gold_out_file = codecs.open(gold_path, 'w', 'utf-8')\n self.can_out_file = codecs.open(can_path, 'w', 'utf-8')\n\n # raw_gold_path = self.args.result_path + '.%d.raw_gold' % step\n # raw_can_path = self.args.result_path + '.%d.raw_candidate' % step\n self.gold_out_file = codecs.open(gold_path, 'w', 'utf-8')\n self.can_out_file = codecs.open(can_path, 'w', 'utf-8')\n\n raw_src_path = self.args.result_path + '.%d.raw_src' % step\n self.src_out_file = codecs.open(raw_src_path, 'w', 'utf-8')\n\n # pred_results, gold_results = [], []\n ct = 0\n with torch.no_grad():\n for batch in data_iter:\n if(self.args.recall_eval):\n gold_tgt_len = batch.tgt.size(1)\n self.min_length = gold_tgt_len + 20\n self.max_length = gold_tgt_len + 60\n batch_data = self.translate_batch(batch)\n translations = self.from_batch(batch_data)\n\n for trans in translations:\n pred, gold, src = trans\n pred_str = pred.replace('[unused1]', '').replace('[unused4]', '').replace('[PAD]', '').replace('[unused2]', '').replace(r' +', ' ').replace(' [unused3] ', '<q>').replace('[unused3]', '').strip()\n gold_str = gold.strip()\n if(self.args.recall_eval):\n _pred_str = ''\n gap = 1e3\n for sent in pred_str.split('<q>'):\n can_pred_str = _pred_str+ '<q>'+sent.strip()\n can_gap = math.fabs(len(_pred_str.split())-len(gold_str.split()))\n # if(can_gap>=gap):\n if(len(can_pred_str.split())>=len(gold_str.split())+10):\n pred_str = _pred_str\n break\n else:\n gap = can_gap\n _pred_str = can_pred_str\n\n\n\n # pred_str = ' '.join(pred_str.split()[:len(gold_str.split())])\n # self.raw_can_out_file.write(' '.join(pred).strip() + '\\n')\n # self.raw_gold_out_file.write(' '.join(gold).strip() + '\\n')\n self.can_out_file.write(pred_str + '\\n')\n self.gold_out_file.write(gold_str + '\\n')\n self.src_out_file.write(src.strip() + '\\n')\n ct += 1\n self.can_out_file.flush()\n self.gold_out_file.flush()\n self.src_out_file.flush()\n\n self.can_out_file.close()\n self.gold_out_file.close()\n self.src_out_file.close()\n\n if (step != -1):\n rouges = self._report_rouge(gold_path, can_path)\n self.logger.info('Rouges at step %d \\n%s' % (step, rouge_results_to_str(rouges)))\n if self.tensorboard_writer is not None:\n self.tensorboard_writer.add_scalar('test/rouge1-F', rouges['rouge_1_f_score'], step)\n self.tensorboard_writer.add_scalar('test/rouge2-F', rouges['rouge_2_f_score'], step)\n self.tensorboard_writer.add_scalar('test/rougeL-F', rouges['rouge_l_f_score'], step)\n\n def _report_rouge(self, gold_path, can_path):\n self.logger.info(\"Calculating Rouge\")\n results_dict = test_rouge(self.args.temp_dir, can_path, gold_path)\n return results_dict\n\n def translate_batch(self, batch, fast=False):\n \"\"\"\n Translate a batch of sentences.\n\n Mostly a wrapper around :obj:`Beam`.\n\n Args:\n batch (:obj:`Batch`): a batch from a dataset object\n data (:obj:`Dataset`): the dataset object\n fast (bool): enables fast beam search (may not support all features)\n\n Todo:\n Shouldn't need the original dataset.\n \"\"\"\n with torch.no_grad():\n return self._fast_translate_batch(\n batch,\n self.max_length,\n min_length=self.min_length)\n\n def _fast_translate_batch(self,\n batch,\n max_length,\n min_length=0):\n # TODO: faster code path for beam_size == 1.\n\n # TODO: support these blacklisted features.\n assert not self.dump_beam\n\n beam_size = self.beam_size\n batch_size = batch.batch_size\n src = batch.src\n segs = batch.segs\n mask_src = batch.mask_src\n\n src_features = self.model.bert(src, segs, mask_src)\n dec_states = self.model.decoder.init_decoder_state(src, src_features, with_cache=True)\n device = src_features.device\n\n # Tile states and memory beam_size times.\n dec_states.map_batch_fn(\n lambda state, dim: tile(state, beam_size, dim=dim))\n src_features = tile(src_features, beam_size, dim=0)\n batch_offset = torch.arange(\n batch_size, dtype=torch.long, device=device)\n beam_offset = torch.arange(\n 0,\n batch_size * beam_size,\n step=beam_size,\n dtype=torch.long,\n device=device)\n alive_seq = torch.full(\n [batch_size * beam_size, 1],\n self.start_token,\n dtype=torch.long,\n device=device)\n\n # Give full probability to the first beam on the first step.\n topk_log_probs = (\n torch.tensor([0.0] + [float(\"-inf\")] * (beam_size - 1),\n device=device).repeat(batch_size))\n\n # Structure that holds finished hypotheses.\n hypotheses = [[] for _ in range(batch_size)] # noqa: F812\n\n results = {}\n results[\"predictions\"] = [[] for _ in range(batch_size)] # noqa: F812\n results[\"scores\"] = [[] for _ in range(batch_size)] # noqa: F812\n results[\"gold_score\"] = [0] * batch_size\n results[\"batch\"] = batch\n\n for step in range(max_length):\n decoder_input = alive_seq[:, -1].view(1, -1)\n\n # Decoder forward.\n decoder_input = decoder_input.transpose(0,1)\n\n dec_out, dec_states = self.model.decoder(decoder_input, src_features, dec_states,\n step=step)\n\n # Generator forward.\n log_probs = self.generator.forward(dec_out.transpose(0,1).squeeze(0))\n vocab_size = log_probs.size(-1)\n\n if step < min_length:\n log_probs[:, self.end_token] = -1e20\n\n # Multiply probs by the beam probability.\n log_probs += topk_log_probs.view(-1).unsqueeze(1)\n\n alpha = self.global_scorer.alpha\n length_penalty = ((5.0 + (step + 1)) / 6.0) ** alpha\n\n # Flatten probs into a list of possibilities.\n curr_scores = log_probs / length_penalty\n\n if(self.args.block_trigram):\n cur_len = alive_seq.size(1)\n if(cur_len>3):\n for i in range(alive_seq.size(0)):\n fail = False\n words = [int(w) for w in alive_seq[i]]\n words = [self.vocab.ids_to_tokens[w] for w in words]\n words = ' '.join(words).replace(' ##','').split()\n if(len(words)<=3):\n continue\n trigrams = [(words[i-1],words[i],words[i+1]) for i in range(1,len(words)-1)]\n trigram = tuple(trigrams[-1])\n if trigram in trigrams[:-1]:\n fail = True\n if fail:\n curr_scores[i] = -10e20\n\n curr_scores = curr_scores.reshape(-1, beam_size * vocab_size)\n topk_scores, topk_ids = curr_scores.topk(beam_size, dim=-1)\n\n # Recover log probs.\n topk_log_probs = topk_scores * length_penalty\n\n # Resolve beam origin and true word ids.\n topk_beam_index = topk_ids.div(vocab_size)\n topk_ids = topk_ids.fmod(vocab_size)\n\n # Map beam_index to batch_index in the flat representation.\n batch_index = (\n topk_beam_index\n + beam_offset[:topk_beam_index.size(0)].unsqueeze(1))\n select_indices = batch_index.view(-1)\n\n # Append last prediction.\n alive_seq = torch.cat(\n [alive_seq.index_select(0, select_indices),\n topk_ids.view(-1, 1)], -1)\n\n is_finished = topk_ids.eq(self.end_token)\n if step + 1 == max_length:\n is_finished.fill_(1)\n # End condition is top beam is finished.\n end_condition = is_finished[:, 0].eq(1)\n # Save finished hypotheses.\n if is_finished.any():\n predictions = alive_seq.view(-1, beam_size, alive_seq.size(-1))\n for i in range(is_finished.size(0)):\n b = batch_offset[i]\n if end_condition[i]:\n is_finished[i].fill_(1)\n finished_hyp = is_finished[i].nonzero().view(-1)\n # Store finished hypotheses for this batch.\n for j in finished_hyp:\n hypotheses[b].append((\n topk_scores[i, j],\n predictions[i, j, 1:]))\n # If the batch reached the end, save the n_best hypotheses.\n if end_condition[i]:\n best_hyp = sorted(\n hypotheses[b], key=lambda x: x[0], reverse=True)\n score, pred = best_hyp[0]\n\n results[\"scores\"][b].append(score)\n results[\"predictions\"][b].append(pred)\n non_finished = end_condition.eq(0).nonzero().view(-1)\n # If all sentences are translated, no need to go further.\n if len(non_finished) == 0:\n break\n # Remove finished batches for the next step.\n topk_log_probs = topk_log_probs.index_select(0, non_finished)\n batch_index = batch_index.index_select(0, non_finished)\n batch_offset = batch_offset.index_select(0, non_finished)\n alive_seq = predictions.index_select(0, non_finished) \\\n .view(-1, alive_seq.size(-1))\n # Reorder states.\n select_indices = batch_index.view(-1)\n src_features = src_features.index_select(0, select_indices)\n dec_states.map_batch_fn(\n lambda state, dim: state.index_select(dim, select_indices))\n\n return results\n\n\nclass Translation(object):\n \"\"\"\n Container for a translated sentence.\n\n Attributes:\n src (`LongTensor`): src word ids\n src_raw ([str]): raw src words\n\n pred_sents ([[str]]): words from the n-best translations\n pred_scores ([[float]]): log-probs of n-best translations\n attns ([`FloatTensor`]) : attention dist for each translation\n gold_sent ([str]): words from gold translation\n gold_score ([float]): log-prob of gold translation\n\n \"\"\"\n\n def __init__(self, fname, src, src_raw, pred_sents,\n attn, pred_scores, tgt_sent, gold_score):\n self.fname = fname\n self.src = src\n self.src_raw = src_raw\n self.pred_sents = pred_sents\n self.attns = attn\n self.pred_scores = pred_scores\n self.gold_sent = tgt_sent\n self.gold_score = gold_score\n\n def log(self, sent_number):\n \"\"\"\n Log translation.\n \"\"\"\n\n output = '\\nSENT {}: {}\\n'.format(sent_number, self.src_raw)\n\n best_pred = self.pred_sents[0]\n best_score = self.pred_scores[0]\n pred_sent = ' '.join(best_pred)\n output += 'PRED {}: {}\\n'.format(sent_number, pred_sent)\n output += \"PRED SCORE: {:.4f}\\n\".format(best_score)\n\n if self.gold_sent is not None:\n tgt_sent = ' '.join(self.gold_sent)\n output += 'GOLD {}: {}\\n'.format(sent_number, tgt_sent)\n output += (\"GOLD SCORE: {:.4f}\\n\".format(self.gold_score))\n if len(self.pred_sents) > 1:\n output += '\\nBEST HYP:\\n'\n for score, sent in zip(self.pred_scores, self.pred_sents):\n output += \"[{:.4f}] {}\\n\".format(score, sent)\n\n return output\n" ]
[ [ "torch.no_grad", "torch.full", "torch.arange" ] ]
aemerick/yt
[ "984484616d75c6d7603e71b9d45c5d617705a0e5", "984484616d75c6d7603e71b9d45c5d617705a0e5", "984484616d75c6d7603e71b9d45c5d617705a0e5" ]
[ "yt/frontends/gdf/io.py", "yt/frontends/athena_pp/data_structures.py", "yt/geometry/particle_geometry_handler.py" ]
[ "import numpy as np\nfrom yt.utilities.on_demand_imports import _h5py as h5py\nfrom yt.funcs import \\\n mylog\nfrom yt.geometry.selection_routines import GridSelector\nfrom yt.utilities.io_handler import \\\n BaseIOHandler\n\n\ndef _grid_dname(grid_id):\n return \"/data/grid_%010i\" % grid_id\n\n\ndef _field_dname(grid_id, field_name):\n return \"%s/%s\" % (_grid_dname(grid_id), field_name)\n\n\n# TODO all particle bits were removed\nclass IOHandlerGDFHDF5(BaseIOHandler):\n _dataset_type = \"grid_data_format\"\n _offset_string = 'data:offsets=0'\n _data_string = 'data:datatype=0'\n\n def _read_fluid_selection(self, chunks, selector, fields, size):\n from sys import version\n rv = {}\n chunks = list(chunks)\n\n if isinstance(selector, GridSelector):\n if not (len(chunks) == len(chunks[0].objs) == 1):\n raise RuntimeError\n grid = chunks[0].objs[0]\n h5f = h5py.File(grid.filename, mode='r')\n gds = h5f.get(_grid_dname(grid.id))\n for ftype, fname in fields:\n if self.ds.field_ordering == 1:\n rv[(ftype, fname)] = gds.get(fname)[()].swapaxes(0, 2)\n else:\n rv[(ftype, fname)] = gds.get(fname)[()]\n h5f.close()\n return rv\n if size is None:\n size = sum((grid.count(selector) for chunk in chunks\n for grid in chunk.objs))\n\n if any((ftype != \"gdf\" for ftype, fname in fields)):\n raise NotImplementedError\n\n for field in fields:\n ftype, fname = field\n fsize = size\n # check the dtype instead\n rv[field] = np.empty(fsize, dtype=\"float64\")\n ngrids = sum(len(chunk.objs) for chunk in chunks)\n mylog.debug(\"Reading %s cells of %s fields in %s blocks\",\n size, [fn for ft, fn in fields], ngrids)\n ind = 0\n for chunk in chunks:\n fid = None\n for grid in chunk.objs:\n if grid.filename is None:\n continue\n if fid is None:\n if version < '3':\n fid = h5py.h5f.open(grid.filename,h5py.h5f.ACC_RDONLY)\n else:\n fid = h5py.h5f.open(bytes(grid.filename,'utf-8'),h5py.h5f.ACC_RDONLY)\n if self.ds.field_ordering == 1:\n # check the dtype instead\n data = np.empty(grid.ActiveDimensions[::-1],\n dtype=\"float64\")\n data_view = data.swapaxes(0, 2)\n else:\n # check the dtype instead\n data_view = data = np.empty(grid.ActiveDimensions,\n dtype=\"float64\")\n for field in fields:\n ftype, fname = field\n if version < '3':\n dg = h5py.h5d.open(fid, _field_dname(grid.id, fname))\n else:\n dg = h5py.h5d.open(fid, bytes(_field_dname(grid.id, fname),'utf-8'))\n dg.read(h5py.h5s.ALL, h5py.h5s.ALL, data)\n # caches\n nd = grid.select(selector, data_view, rv[field], ind)\n ind += nd # I don't get that part, only last nd is added\n if fid is not None:\n fid.close()\n return rv\n", "import numpy as np\nimport os\nimport weakref\n\nfrom yt.funcs import \\\n mylog, get_pbar, \\\n ensure_tuple\nfrom yt.data_objects.grid_patch import \\\n AMRGridPatch\nfrom yt.geometry.grid_geometry_handler import \\\n GridIndex\nfrom yt.data_objects.static_output import \\\n Dataset\nfrom yt.geometry.geometry_handler import \\\n YTDataChunk\nfrom yt.utilities.file_handler import \\\n HDF5FileHandler\nfrom yt.geometry.unstructured_mesh_handler import \\\n UnstructuredIndex\nfrom yt.data_objects.unstructured_mesh import \\\n SemiStructuredMesh\nfrom itertools import chain, product\nfrom .fields import AthenaPPFieldInfo\nfrom yt.utilities.chemical_formulas import \\\n default_mu\n\ngeom_map = {\"cartesian\": \"cartesian\",\n \"cylindrical\": \"cylindrical\",\n \"spherical_polar\": \"spherical\",\n \"minkowski\": \"cartesian\",\n \"tilted\": \"cartesian\",\n \"sinusoidal\": \"cartesian\",\n \"schwarzschild\": \"spherical\",\n \"kerr-schild\": \"spherical\"}\n\n_cis = np.fromiter(chain.from_iterable(product([0,1], [0,1], [0,1])),\n dtype=np.int64, count = 8*3)\n_cis.shape = (8, 3)\n\nclass AthenaPPLogarithmicMesh(SemiStructuredMesh):\n _index_offset = 0\n\n def __init__(self, mesh_id, filename, connectivity_indices,\n connectivity_coords, index, blocks, dims):\n super(AthenaPPLogarithmicMesh, self).__init__(mesh_id, filename, \n connectivity_indices,\n connectivity_coords, index)\n self.mesh_blocks = blocks\n self.mesh_dims = dims\n\nclass AthenaPPLogarithmicIndex(UnstructuredIndex):\n def __init__(self, ds, dataset_type = 'athena_pp'):\n self._handle = ds._handle\n super(AthenaPPLogarithmicIndex, self).__init__(ds, dataset_type)\n self.index_filename = self.dataset.filename\n self.directory = os.path.dirname(self.dataset.filename)\n self.dataset_type = dataset_type\n\n def _initialize_mesh(self):\n mylog.debug(\"Setting up meshes.\")\n num_blocks = self._handle.attrs[\"NumMeshBlocks\"]\n log_loc = self._handle['LogicalLocations']\n levels = self._handle[\"Levels\"]\n x1f = self._handle[\"x1f\"]\n x2f = self._handle[\"x2f\"]\n x3f = self._handle[\"x3f\"]\n nbx, nby, nbz = tuple(np.max(log_loc, axis=0)+1)\n nlevel = self._handle.attrs[\"MaxLevel\"]+1\n\n nb = np.array([nbx, nby, nbz], dtype='int')\n self.mesh_factors = np.ones(3, dtype='int')*((nb > 1).astype(\"int\")+1)\n\n block_grid = -np.ones((nbx,nby,nbz,nlevel), dtype=np.int)\n block_grid[log_loc[:,0],log_loc[:,1],log_loc[:,2],levels[:]] = np.arange(num_blocks)\n\n block_list = np.arange(num_blocks, dtype='int64')\n bc = []\n for i in range(num_blocks):\n if block_list[i] >= 0:\n ii, jj, kk = log_loc[i]\n neigh = block_grid[ii:ii+2,jj:jj+2,kk:kk+2,levels[i]]\n if np.all(neigh > -1):\n loc_ids = neigh.transpose().flatten()\n bc.append(loc_ids)\n block_list[loc_ids] = -1\n else:\n bc.append(np.array(i))\n block_list[i] = -1\n\n num_meshes = len(bc)\n\n self.meshes = []\n pbar = get_pbar(\"Constructing meshes\", num_meshes)\n for i in range(num_meshes):\n ob = bc[i][0]\n x = x1f[ob,:]\n y = x2f[ob,:]\n z = x3f[ob,:]\n if nbx > 1:\n x = np.concatenate([x, x1f[bc[i][1],1:]])\n if nby > 1:\n y = np.concatenate([y, x2f[bc[i][2],1:]])\n if nbz > 1:\n z = np.concatenate([z, x3f[bc[i][4],1:]])\n nxm = x.size\n nym = y.size\n nzm = z.size\n coords = np.zeros((nxm, nym, nzm, 3), dtype=\"float64\", order=\"C\")\n coords[:,:,:,0] = x[:,None,None]\n coords[:,:,:,1] = y[None,:,None]\n coords[:,:,:,2] = z[None,None,:]\n coords.shape = (nxm * nym * nzm, 3)\n cycle = np.rollaxis(np.indices((nxm-1,nym-1,nzm-1)), 0, 4)\n cycle.shape = ((nxm-1)*(nym-1)*(nzm-1), 3)\n off = _cis + cycle[:, np.newaxis]\n connectivity = ((off[:,:,0] * nym) + off[:,:,1]) * nzm + off[:,:,2]\n mesh = AthenaPPLogarithmicMesh(i, self.index_filename, connectivity,\n coords, self, bc[i],\n np.array([nxm-1, nym-1, nzm-1]))\n self.meshes.append(mesh)\n pbar.update(i)\n pbar.finish()\n mylog.debug(\"Done setting up meshes.\")\n\n def _detect_output_fields(self):\n self.field_list = [(\"athena_pp\", k) for k in self.ds._field_map]\n\n def _chunk_io(self, dobj, cache = True, local_only = False):\n gobjs = getattr(dobj._current_chunk, \"objs\", dobj._chunk_info)\n for subset in gobjs:\n yield YTDataChunk(dobj, \"io\", [subset],\n self._count_selection(dobj, [subset]),\n cache = cache)\n\nclass AthenaPPGrid(AMRGridPatch):\n _id_offset = 0\n\n def __init__(self, id, index, level):\n AMRGridPatch.__init__(self, id, filename = index.index_filename,\n index = index)\n self.Parent = None\n self.Children = []\n self.Level = level\n\n def _setup_dx(self):\n # So first we figure out what the index is. We don't assume\n # that dx=dy=dz , at least here. We probably do elsewhere.\n id = self.id - self._id_offset\n LE, RE = self.index.grid_left_edge[id,:], \\\n self.index.grid_right_edge[id,:]\n self.dds = self.ds.arr((RE-LE)/self.ActiveDimensions, \"code_length\")\n if self.ds.dimensionality < 2: self.dds[1] = 1.0\n if self.ds.dimensionality < 3: self.dds[2] = 1.0\n self.field_data['dx'], self.field_data['dy'], self.field_data['dz'] = self.dds\n\n def __repr__(self):\n return \"AthenaPPGrid_%04i (%s)\" % (self.id, self.ActiveDimensions)\n\nclass AthenaPPHierarchy(GridIndex):\n\n grid = AthenaPPGrid\n _dataset_type='athena_pp'\n _data_file = None\n\n def __init__(self, ds, dataset_type='athena_pp'):\n self.dataset = weakref.proxy(ds)\n self.directory = os.path.dirname(self.dataset.filename)\n self.dataset_type = dataset_type\n # for now, the index file is the dataset!\n self.index_filename = self.dataset.filename\n self._handle = ds._handle\n GridIndex.__init__(self, ds, dataset_type)\n\n def _detect_output_fields(self):\n self.field_list = [(\"athena_pp\", k) for k in self.dataset._field_map]\n\n def _count_grids(self):\n self.num_grids = self._handle.attrs[\"NumMeshBlocks\"]\n\n def _parse_index(self):\n num_grids = self._handle.attrs[\"NumMeshBlocks\"]\n\n self.grid_left_edge = np.zeros((num_grids, 3), dtype='float64')\n self.grid_right_edge = np.zeros((num_grids, 3), dtype='float64')\n self.grid_dimensions = np.zeros((num_grids, 3), dtype='int32')\n\n for i in range(num_grids):\n x = self._handle[\"x1f\"][i,:]\n y = self._handle[\"x2f\"][i,:]\n z = self._handle[\"x3f\"][i,:]\n self.grid_left_edge[i] = np.array([x[0], y[0], z[0]], dtype='float64')\n self.grid_right_edge[i] = np.array([x[-1], y[-1], z[-1]], dtype='float64')\n self.grid_dimensions[i] = self._handle.attrs[\"MeshBlockSize\"]\n levels = self._handle[\"Levels\"][:]\n\n self.grid_left_edge = self.ds.arr(self.grid_left_edge, \"code_length\")\n self.grid_right_edge = self.ds.arr(self.grid_right_edge, \"code_length\")\n\n self.grids = np.empty(self.num_grids, dtype='object')\n for i in range(num_grids):\n self.grids[i] = self.grid(i, self, levels[i])\n\n if self.dataset.dimensionality <= 2:\n self.grid_right_edge[:,2] = self.dataset.domain_right_edge[2]\n if self.dataset.dimensionality == 1:\n self.grid_right_edge[:,1:] = self.dataset.domain_right_edge[1:]\n self.grid_particle_count = np.zeros([self.num_grids, 1], dtype='int64')\n\n def _populate_grid_objects(self):\n for g in self.grids:\n g._prepare_grid()\n g._setup_dx()\n self.max_level = self._handle.attrs[\"MaxLevel\"]\n\n def _chunk_io(self, dobj, cache = True, local_only = False):\n gobjs = getattr(dobj._current_chunk, \"objs\", dobj._chunk_info)\n for subset in gobjs:\n yield YTDataChunk(dobj, \"io\", [subset],\n self._count_selection(dobj, [subset]),\n cache = cache)\n\nclass AthenaPPDataset(Dataset):\n _field_info_class = AthenaPPFieldInfo\n _dataset_type = \"athena_pp\"\n\n def __init__(self, filename, dataset_type='athena_pp',\n storage_filename=None, parameters=None,\n units_override=None, unit_system=\"code\"):\n self.fluid_types += (\"athena_pp\",)\n if parameters is None:\n parameters = {}\n self.specified_parameters = parameters\n if units_override is None:\n units_override = {}\n self._handle = HDF5FileHandler(filename)\n xrat = self._handle.attrs[\"RootGridX1\"][2]\n yrat = self._handle.attrs[\"RootGridX2\"][2]\n zrat = self._handle.attrs[\"RootGridX3\"][2]\n if xrat != 1.0 or yrat != 1.0 or zrat != 1.0:\n self._index_class = AthenaPPLogarithmicIndex\n self.logarithmic = True\n else:\n self._index_class = AthenaPPHierarchy\n self.logarithmic = False\n Dataset.__init__(self, filename, dataset_type, units_override=units_override,\n unit_system=unit_system)\n self.filename = filename\n if storage_filename is None:\n storage_filename = '%s.yt' % filename.split('/')[-1]\n self.storage_filename = storage_filename\n self.backup_filename = self.filename[:-4] + \"_backup.gdf\"\n\n def _set_code_unit_attributes(self):\n \"\"\"\n Generates the conversion to various physical _units based on the\n parameter file\n \"\"\"\n if \"length_unit\" not in self.units_override:\n self.no_cgs_equiv_length = True\n for unit, cgs in [(\"length\", \"cm\"), (\"time\", \"s\"), (\"mass\", \"g\"),\n (\"temperature\", \"K\")]:\n # We set these to cgs for now, but they may have been overridden\n if getattr(self, unit+'_unit', None) is not None:\n continue\n mylog.warning(\"Assuming 1.0 = 1.0 %s\", cgs)\n setattr(self, \"%s_unit\" % unit, self.quan(1.0, cgs))\n\n self.magnetic_unit = np.sqrt(4*np.pi * self.mass_unit /\n (self.time_unit**2 * self.length_unit))\n self.magnetic_unit.convert_to_units(\"gauss\")\n self.velocity_unit = self.length_unit / self.time_unit\n\n def _parse_parameter_file(self):\n\n xmin, xmax = self._handle.attrs[\"RootGridX1\"][:2]\n ymin, ymax = self._handle.attrs[\"RootGridX2\"][:2]\n zmin, zmax = self._handle.attrs[\"RootGridX3\"][:2]\n\n self.domain_left_edge = np.array([xmin, ymin, zmin], dtype='float64')\n self.domain_right_edge = np.array([xmax, ymax, zmax], dtype='float64')\n\n self.geometry = geom_map[self._handle.attrs[\"Coordinates\"].decode('utf-8')]\n self.domain_width = self.domain_right_edge-self.domain_left_edge\n self.domain_dimensions = self._handle.attrs[\"RootGridSize\"]\n\n self._field_map = {}\n k = 0\n for (i, dname), num_var in zip(enumerate(self._handle.attrs[\"DatasetNames\"]),\n self._handle.attrs[\"NumVariables\"]):\n for j in range(num_var):\n fname = self._handle.attrs[\"VariableNames\"][k].decode(\"ascii\",\"ignore\")\n self._field_map[fname] = (dname.decode(\"ascii\",\"ignore\"), j)\n k += 1\n\n self.refine_by = 2\n dimensionality = 3\n if self.domain_dimensions[2] == 1:\n dimensionality = 2\n if self.domain_dimensions[1] == 1:\n dimensionality = 1\n self.dimensionality = dimensionality\n self.current_time = self._handle.attrs[\"Time\"]\n self.unique_identifier = self.parameter_filename.__hash__()\n self.cosmological_simulation = False\n self.num_ghost_zones = 0\n self.field_ordering = 'fortran'\n self.boundary_conditions = [1]*6\n if 'periodicity' in self.specified_parameters:\n self.periodicity = ensure_tuple(self.specified_parameters['periodicity'])\n else:\n self.periodicity = (True,True,True,)\n if 'gamma' in self.specified_parameters:\n self.gamma = float(self.specified_parameters['gamma'])\n else:\n self.gamma = 5./3.\n\n self.current_redshift = self.omega_lambda = self.omega_matter = \\\n self.hubble_constant = self.cosmological_simulation = 0.0\n self.parameters['Time'] = self.current_time # Hardcode time conversion for now.\n self.parameters[\"HydroMethod\"] = 0 # Hardcode for now until field staggering is supported.\n if \"gamma\" in self.specified_parameters:\n self.parameters[\"Gamma\"] = self.specified_parameters[\"gamma\"]\n else:\n self.parameters[\"Gamma\"] = 5./3.\n self.mu = self.specified_parameters.get(\"mu\", default_mu)\n\n @classmethod\n def _is_valid(self, *args, **kwargs):\n try:\n if args[0].endswith('athdf'):\n return True\n except Exception: pass\n return False\n\n @property\n def _skip_cache(self):\n return True\n\n def __repr__(self):\n return self.basename.rsplit(\".\", 1)[0]\n", "import collections\nimport errno\nimport numpy as np\nimport os\nimport weakref\nimport struct\n\nfrom yt.funcs import \\\n get_pbar, \\\n only_on_root\nfrom yt.utilities.logger import ytLogger as mylog\nfrom yt.geometry.geometry_handler import \\\n Index, \\\n YTDataChunk\nfrom yt.geometry.particle_oct_container import ParticleBitmap\nfrom yt.data_objects.particle_container import ParticleContainer\nfrom yt.utilities.lib.fnv_hash import fnv_hash\n\nCHUNKSIZE = 64**3\n\nclass ParticleIndex(Index):\n \"\"\"The Index subclass for particle datasets\"\"\"\n def __init__(self, ds, dataset_type):\n self.dataset_type = dataset_type\n self.dataset = weakref.proxy(ds)\n self.float_type = np.float64\n super(ParticleIndex, self).__init__(ds, dataset_type)\n self._initialize_index()\n\n def _setup_geometry(self):\n self.regions = None\n\n def get_smallest_dx(self):\n \"\"\"\n Returns (in code units) the smallest cell size in the simulation.\n \"\"\"\n return self.ds.arr(0, 'code_length')\n\n def _get_particle_type_counts(self):\n result = collections.defaultdict(lambda: 0)\n for df in self.data_files:\n for k in df.total_particles.keys():\n result[k] += df.total_particles[k]\n return dict(result)\n\n def convert(self, unit):\n return self.dataset.conversion_factors[unit]\n\n def _setup_filenames(self):\n template = self.dataset.filename_template\n ndoms = self.dataset.file_count\n cls = self.dataset._file_class\n self.data_files = []\n fi = 0\n for i in range(int(ndoms)):\n start = 0\n end = start + CHUNKSIZE\n while 1:\n df = cls(self.dataset, self.io, template % {'num':i}, fi, (start, end))\n if max(df.total_particles.values()) == 0:\n break\n fi += 1\n self.data_files.append(df)\n start = end\n end += CHUNKSIZE\n self.total_particles = sum(\n sum(d.total_particles.values()) for d in self.data_files)\n\n def _initialize_index(self):\n ds = self.dataset\n only_on_root(mylog.info, \"Allocating for %0.3e particles\",\n self.total_particles, global_rootonly = True)\n\n # if we have not yet set domain_left_edge and domain_right_edge then do\n # an I/O pass over the particle coordinates to determine a bounding box\n if self.ds.domain_left_edge is None:\n min_ppos = np.empty(3, dtype='float64')\n min_ppos[:] = np.nan\n max_ppos = np.empty(3, dtype='float64')\n max_ppos[:] = np.nan\n only_on_root(\n mylog.info,\n 'Bounding box cannot be inferred from metadata, reading '\n 'particle positions to infer bounding box')\n for df in self.data_files:\n for _, ppos in self.io._yield_coordinates(df):\n min_ppos = np.nanmin(np.vstack([min_ppos, ppos]), axis=0)\n max_ppos = np.nanmax(np.vstack([max_ppos, ppos]), axis=0)\n only_on_root(\n mylog.info,\n 'Load this dataset with bounding_box=[%s, %s] to avoid I/O '\n 'overhead from inferring bounding_box.' % (min_ppos, max_ppos))\n ds.domain_left_edge = ds.arr(1.05*min_ppos, 'code_length')\n ds.domain_right_edge = ds.arr(1.05*max_ppos, 'code_length')\n ds.domain_width = ds.domain_right_edge - ds.domain_left_edge\n\n # use a trivial morton index for datasets containing a single chunk\n if len(self.data_files) == 1:\n order1 = 1\n order2 = 1\n else:\n order1 = ds.index_order[0]\n order2 = ds.index_order[1]\n\n if order1 == 1 and order2 == 1:\n dont_cache = True\n else:\n dont_cache = False\n\n # If we have applied a bounding box then we can't cache the\n # ParticleBitmap because it is doman dependent\n if getattr(ds, \"_domain_override\", False):\n dont_cache = True\n\n if not hasattr(self.ds, '_file_hash'):\n self.ds._file_hash = self._generate_hash()\n\n self.regions = ParticleBitmap(\n ds.domain_left_edge, ds.domain_right_edge,\n ds.periodicity, self.ds._file_hash,\n len(self.data_files), \n index_order1=order1,\n index_order2=order2)\n\n # Load Morton index from file if provided\n if getattr(ds, 'index_filename', None) is None:\n fname = ds.parameter_filename + \".index{}_{}.ewah\".format(\n self.regions.index_order1, self.regions.index_order2)\n else:\n fname = ds.index_filename\n\n dont_load = dont_cache and not hasattr(ds, 'index_filename')\n try:\n if dont_load:\n raise OSError\n rflag = self.regions.load_bitmasks(fname)\n rflag = self.regions.check_bitmasks()\n self._initialize_frontend_specific()\n if rflag == 0:\n raise OSError\n except (OSError, struct.error):\n self.regions.reset_bitmasks()\n self._initialize_coarse_index()\n self._initialize_refined_index()\n wdir = os.path.dirname(fname)\n if not dont_cache and os.access(wdir, os.W_OK):\n # Sometimes os mis-reports whether a directory is writable,\n # So pass if writing the bitmask file fails.\n try:\n self.regions.save_bitmasks(fname)\n except OSError:\n pass\n rflag = self.regions.check_bitmasks()\n \n def _initialize_coarse_index(self):\n pb = get_pbar(\"Initializing coarse index \", len(self.data_files))\n for i, data_file in enumerate(self.data_files):\n pb.update(i)\n for ptype, pos in self.io._yield_coordinates(data_file):\n ds = self.ds\n if hasattr(ds, '_sph_ptypes') and ptype == ds._sph_ptypes[0]:\n hsml = self.io._get_smoothing_length(\n data_file, pos.dtype, pos.shape)\n else:\n hsml = None\n self.regions._coarse_index_data_file(\n pos, hsml, data_file.file_id)\n self.regions._set_coarse_index_data_file(data_file.file_id)\n pb.finish()\n self.regions.find_collisions_coarse()\n\n def _initialize_refined_index(self):\n mask = self.regions.masks.sum(axis=1).astype('uint8')\n max_npart = max(sum(d.total_particles.values())\n for d in self.data_files) * 28\n sub_mi1 = np.zeros(max_npart, \"uint64\")\n sub_mi2 = np.zeros(max_npart, \"uint64\")\n pb = get_pbar(\"Initializing refined index\", len(self.data_files))\n mask_threshold = getattr(self, '_index_mask_threshold', 2)\n count_threshold = getattr(self, '_index_count_threshold', 256)\n mylog.debug(\"Using estimated thresholds of %s and %s for refinement\", mask_threshold, count_threshold)\n total_refined = 0\n total_coarse_refined = ((mask >= 2) & (self.regions.particle_counts > count_threshold)).sum()\n mylog.debug(\"This should produce roughly %s zones, for %s of the domain\",\n total_coarse_refined, 100 * total_coarse_refined / mask.size)\n for i, data_file in enumerate(self.data_files):\n coll = None\n pb.update(i)\n nsub_mi = 0\n for ptype, pos in self.io._yield_coordinates(data_file):\n if pos.size == 0: continue\n if hasattr(self.ds, '_sph_ptypes') and ptype == self.ds._sph_ptypes[0]:\n hsml = self.io._get_smoothing_length(\n data_file, pos.dtype, pos.shape)\n else:\n hsml = None\n nsub_mi, coll = self.regions._refined_index_data_file(\n coll, pos, hsml, mask, sub_mi1, sub_mi2,\n data_file.file_id, nsub_mi, count_threshold = count_threshold,\n mask_threshold = mask_threshold)\n total_refined += nsub_mi\n self.regions.bitmasks.append(data_file.file_id, coll)\n pb.finish()\n self.regions.find_collisions_refined()\n\n def _detect_output_fields(self):\n # TODO: Add additional fields\n self._setup_filenames()\n dsl = []\n units = {}\n pcounts = self._get_particle_type_counts()\n field_cache = {}\n for dom in self.data_files:\n if dom.filename in field_cache:\n fl, _units = field_cache[dom.filename]\n else:\n fl, _units = self.io._identify_fields(dom)\n field_cache[dom.filename] = fl, _units\n units.update(_units)\n dom._calculate_offsets(fl, pcounts)\n for f in fl:\n if f not in dsl: dsl.append(f)\n self.field_list = dsl\n ds = self.dataset\n ds.particle_types = tuple(set(pt for pt, ds in dsl))\n # This is an attribute that means these particle types *actually*\n # exist. As in, they are real, in the dataset.\n ds.field_units.update(units)\n ds.particle_types_raw = ds.particle_types\n\n def _identify_base_chunk(self, dobj):\n # Must check that chunk_info contains the right number of ghost zones\n if getattr(dobj, \"_chunk_info\", None) is None:\n if isinstance(dobj, ParticleContainer):\n dobj._chunk_info = [dobj]\n else:\n # TODO: only return files\n if getattr(dobj.selector, 'is_all_data', False):\n nfiles = self.regions.nfiles\n dfi = np.arange(nfiles)\n else:\n dfi, file_masks, addfi = self.regions.identify_file_masks(\n dobj.selector)\n nfiles = len(file_masks)\n dobj._chunk_info = [None for _ in range(nfiles)]\n for i in range(nfiles):\n domain_id = i+1\n dobj._chunk_info[i] = ParticleContainer(\n dobj, [self.data_files[dfi[i]]],\n domain_id = domain_id)\n # NOTE: One fun thing about the way IO works is that it\n # consolidates things quite nicely. So we should feel free to\n # create as many objects as part of the chunk as we want, since\n # it'll take the set() of them. So if we break stuff up like\n # this here, we end up in a situation where we have the ability\n # to break things down further later on for buffer zones and the\n # like.\n dobj._current_chunk, = self._chunk_all(dobj)\n\n def _chunk_all(self, dobj):\n oobjs = getattr(dobj._current_chunk, \"objs\", dobj._chunk_info)\n yield YTDataChunk(dobj, \"all\", oobjs, None)\n\n def _chunk_spatial(self, dobj, ngz, sort = None, preload_fields = None):\n sobjs = getattr(dobj._current_chunk, \"objs\", dobj._chunk_info)\n for og in sobjs:\n with og._expand_data_files():\n if ngz > 0:\n g = og.retrieve_ghost_zones(ngz, [], smoothed=True)\n else:\n g = og\n yield YTDataChunk(dobj, \"spatial\", [g])\n\n def _chunk_io(self, dobj, cache = True, local_only = False):\n oobjs = getattr(dobj._current_chunk, \"objs\", dobj._chunk_info)\n for container in oobjs:\n yield YTDataChunk(dobj, \"io\", [container], None, cache = cache)\n\n def _generate_hash(self):\n # Generate an FNV hash by creating a byte array containing the\n # modification time of as well as the first and last 1 MB of data in\n # every output file\n ret = bytearray()\n for pfile in self.data_files:\n\n # only look at \"real\" files, not \"fake\" files generated by the\n # chunking system\n if pfile.start not in (0, None):\n continue\n try:\n mtime = os.path.getmtime(pfile.filename)\n except OSError as e:\n if e.errno == errno.ENOENT:\n # this is an in-memory file so we return with a dummy\n # value\n return -1\n else:\n raise\n ret.extend(str(mtime).encode('utf-8'))\n size = os.path.getsize(pfile.filename)\n if size > 1e6:\n size = int(1e6)\n with open(pfile.filename, 'rb') as fh:\n # read in first and last 1 MB of data\n data = fh.read(size)\n fh.seek(-size, os.SEEK_END)\n data = fh.read(size)\n ret.extend(data)\n return fnv_hash(ret)\n\n def _initialize_frontend_specific(self):\n \"\"\"This is for frontend-specific initialization code\n\n If there are frontend-specific things that need to be set while \n creating the index, this function forces these operations to happen\n in cases where we are reloading the index from a sidecar file.\n \"\"\"\n pass\n" ]
[ [ "numpy.empty" ], [ "numpy.max", "numpy.concatenate", "numpy.array", "numpy.empty", "numpy.zeros", "numpy.ones", "numpy.arange", "numpy.sqrt", "numpy.all", "numpy.indices" ], [ "numpy.vstack", "numpy.arange", "numpy.empty", "numpy.zeros" ] ]
YeongHyeon/ReXNet-TF2
[ "a391f4372d628044be7cc49641d096cb3f320255" ]
[ "source/layers.py" ]
[ "import tensorflow as tf\n\nclass Layers(object):\n\n def __init__(self):\n\n self.name_bank, self.params_trainable = [], []\n self.num_params = 0\n self.initializer_xavier = tf.initializers.glorot_normal()\n\n def elu(self, inputs): return tf.nn.elu(inputs)\n def relu(self, inputs): return tf.nn.relu(inputs)\n def sigmoid(self, inputs): return tf.nn.sigmoid(inputs)\n def softmax(self, inputs): return tf.nn.softmax(inputs)\n def swish(self, inputs): return tf.nn.swish(inputs)\n def relu6(self, inputs): return tf.nn.relu6(inputs)\n\n def dropout(self, inputs, rate): return tf.nn.dropout(inputs, rate=rate)\n\n def maxpool(self, inputs, pool_size, stride_size):\n\n return tf.nn.max_pool2d(inputs, ksize=[1, pool_size, pool_size, 1], \\\n padding='VALID', strides=[1, stride_size, stride_size, 1])\n\n def avgpool(self, inputs, pool_size, stride_size):\n\n return tf.nn.avg_pool2d(inputs, ksize=[1, pool_size, pool_size, 1], \\\n padding='VALID', strides=[1, stride_size, stride_size, 1])\n\n def get_weight(self, vshape, transpose=False, bias=True, name=\"\"):\n\n try:\n idx_w = self.name_bank.index(\"%s_w\" %(name))\n if(bias): idx_b = self.name_bank.index(\"%s_b\" %(name))\n except:\n w = tf.Variable(self.initializer_xavier(vshape), \\\n name=\"%s_w\" %(name), trainable=True, dtype=tf.float32)\n self.name_bank.append(\"%s_w\" %(name))\n self.params_trainable.append(w)\n\n tmpparams = 1\n for d in vshape: tmpparams *= d\n self.num_params += tmpparams\n\n if(bias):\n if(transpose): b = tf.Variable(self.initializer_xavier([vshape[-2]]), \\\n name=\"%s_b\" %(name), trainable=True, dtype=tf.float32)\n else: b = tf.Variable(self.initializer_xavier([vshape[-1]]), \\\n name=\"%s_b\" %(name), trainable=True, dtype=tf.float32)\n self.name_bank.append(\"%s_b\" %(name))\n self.params_trainable.append(b)\n\n self.num_params += vshape[-2]\n else:\n w = self.params_trainable[idx_w]\n if(bias): b = self.params_trainable[idx_b]\n\n if(bias): return w, b\n else: return w\n\n def fullcon(self, inputs, variables):\n\n [weights, biasis] = variables\n out = tf.matmul(inputs, weights) + biasis\n\n return out\n\n def conv2d(self, inputs, variables, stride_size, padding):\n\n [weights, biasis] = variables\n out = tf.nn.conv2d(inputs, weights, \\\n strides=[1, stride_size, stride_size, 1], padding=padding) + biasis\n\n return out\n\n def dwconv2d(self, inputs, variables, stride_size, padding):\n\n [weights, biasis] = variables\n\n out = tf.nn.depthwise_conv2d(inputs, weights, \\\n strides=[1, stride_size, stride_size, 1], padding=padding) + biasis\n\n return out\n\n def batch_norm(self, inputs, name=\"\"):\n\n # https://arxiv.org/pdf/1502.03167.pdf\n\n mean = tf.reduce_mean(inputs)\n std = tf.math.reduce_std(inputs)\n var = std**2\n\n try:\n idx_offset = self.name_bank.index(\"%s_offset\" %(name))\n idx_scale = self.name_bank.index(\"%s_scale\" %(name))\n except:\n offset = tf.Variable(0, \\\n name=\"%s_offset\" %(name), trainable=True, dtype=tf.float32)\n self.name_bank.append(\"%s_offset\" %(name))\n self.params_trainable.append(offset)\n self.num_params += 1\n scale = tf.Variable(1, \\\n name=\"%s_scale\" %(name), trainable=True, dtype=tf.float32)\n self.name_bank.append(\"%s_scale\" %(name))\n self.params_trainable.append(scale)\n self.num_params += 1\n else:\n offset = self.params_trainable[idx_offset]\n scale = self.params_trainable[idx_scale]\n\n offset # zero\n scale # one\n out = tf.nn.batch_normalization(\n x = inputs,\n mean=mean,\n variance=var,\n offset=offset,\n scale=scale,\n variance_epsilon=1e-12,\n name=name\n )\n\n return out\n" ]
[ [ "tensorflow.nn.swish", "tensorflow.nn.batch_normalization", "tensorflow.initializers.glorot_normal", "tensorflow.nn.relu", "tensorflow.nn.conv2d", "tensorflow.math.reduce_std", "tensorflow.matmul", "tensorflow.nn.relu6", "tensorflow.Variable", "tensorflow.nn.depthwise_conv2d", "tensorflow.nn.max_pool2d", "tensorflow.nn.elu", "tensorflow.nn.softmax", "tensorflow.nn.sigmoid", "tensorflow.reduce_mean", "tensorflow.nn.avg_pool2d", "tensorflow.nn.dropout" ] ]
b-safwat/multi_action_recognition
[ "1a85da64cf236b9fb7c9a58ae75bdd092d05fab8", "1a85da64cf236b9fb7c9a58ae75bdd092d05fab8" ]
[ "c3d_model/predict_c3d_ucf101.py", "Miscillanious/random_split_train_val.py" ]
[ "# Copyright 2015 Google Inc. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\n\"\"\"Trains and Evaluates the MNIST network using a feed dictionary.\"\"\"\n# pylint: disable=missing-docstring\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os.path\nimport time\nfrom six.moves import xrange # pylint: disable=redefined-builtin\nimport tensorflow as tf\nimport input_data\nimport c3d_model\nimport numpy as np\n\n# Basic model parameters as external flags.\nflags = tf.app.flags\ngpu_num = 1\n\n\ndef placeholder_inputs(batch_size):\n \"\"\"Generate placeholder variables to represent the input tensors.\n These placeholders are used as inputs by the rest of the model building\n code and will be fed from the downloaded data in the .run() loop, below.\n Args:\n batch_size: The batch size will be baked into both placeholders.\n Returns:\n images_placeholder: Images placeholder.\n labels_placeholder: Labels placeholder.\n \"\"\"\n # Note that the shapes of the placeholders match the shapes of the full\n # image and label tensors, except the first dimension is now batch_size\n # rather than the full size of the train or test data sets.\n images_placeholder = tf.placeholder(tf.float32, shape=(batch_size,c3d_model.NUM_FRAMES_PER_CLIP,c3d_model.CROP_SIZE,\n c3d_model.CROP_SIZE,c3d_model.CHANNELS))\n\n labels_placeholder = tf.placeholder(tf.int64, shape=(batch_size))\n return images_placeholder, labels_placeholder\n\ndef _variable_on_cpu(name, shape, initializer):\n #with tf.device('/cpu:%d' % cpu_id):\n with tf.device('/cpu:0'):\n var = tf.get_variable(name, shape, initializer=initializer)\n return var\n\ndef _variable_with_weight_decay(name, shape, stddev, wd):\n var = _variable_on_cpu(name, shape, tf.truncated_normal_initializer(stddev=stddev))\n if wd is not None:\n weight_decay = tf.nn.l2_loss(var) * wd\n tf.add_to_collection('losses', weight_decay)\n return var\n\ndef run_test(ds_dir, mean_file, model_name, test_list_file, batch_size):\n tf.reset_default_graph()\n try:\n FLAGS = flags.FLAGS\n FLAGS.batch_size = batch_size\n except:\n flags.DEFINE_integer('batch_size', batch_size, 'Batch size.')\n FLAGS = flags.FLAGS\n\n #model_name = \"./models-5sec/c3d_ucf_model-4999\"\n #model_name = \"./models.5sec/c3d_ucf_model-75450\"\n #model_name = \"./models-1sec/c3d_ucf_model-4999\"\n #model_name = \"./models.5sec.summarized.1sec/c3d_ucf_model-4999\"\n #model_name = \"./models-multi-5sec-5sec_sum_1/c3d_ucf_model-4999\"\n #model_name = \"./models-multi-5-5sum1/c3d_ucf_model-9999\"\n\n num_test_videos = len(list(open(test_list_file,'r')))\n print(\"Number of test videos={}\".format(num_test_videos))\n\n # max_bt_sz = -1;min\n #\n # for factor in range(1, 31):\n # if num_test_videos%factor==0:\n # max_bt_sz=factor\n # if max_bt_sz == 1:\n # print(\"no good batchsize available, setting to 25\")\n # max_bt_sz = 20\n\n # FLAGS.batch_size = max_bt_sz\n # print(\"batch size:\", FLAGS.batch_size)\n\n # Get the sets of images and labels for testing\n images_placeholder, labels_placeholder = placeholder_inputs(FLAGS.batch_size * gpu_num)\n\n with tf.variable_scope('var_name') as var_scope:\n weights = {\n 'wc1': _variable_with_weight_decay('wc1', [3, 3, 3, 3, 64], 0.04, 0.00),\n 'wc2': _variable_with_weight_decay('wc2', [3, 3, 3, 64, 128], 0.04, 0.00),\n 'wc3a': _variable_with_weight_decay('wc3a', [3, 3, 3, 128, 256], 0.04, 0.00),\n 'wc3b': _variable_with_weight_decay('wc3b', [3, 3, 3, 256, 256], 0.04, 0.00),\n 'wc4a': _variable_with_weight_decay('wc4a', [3, 3, 3, 256, 512], 0.04, 0.00),\n 'wc4b': _variable_with_weight_decay('wc4b', [3, 3, 3, 512, 512], 0.04, 0.00),\n 'wc5a': _variable_with_weight_decay('wc5a', [3, 3, 3, 512, 512], 0.04, 0.00),\n 'wc5b': _variable_with_weight_decay('wc5b', [3, 3, 3, 512, 512], 0.04, 0.00),\n 'wd1': _variable_with_weight_decay('wd1', [8192, 4096], 0.04, 0.001),\n 'wd2': _variable_with_weight_decay('wd2', [4096, 4096], 0.04, 0.002),\n 'out': _variable_with_weight_decay('wout', [4096, c3d_model.NUM_CLASSES], 0.04, 0.005)\n }\n biases = {\n 'bc1': _variable_with_weight_decay('bc1', [64], 0.04, 0.0),\n 'bc2': _variable_with_weight_decay('bc2', [128], 0.04, 0.0),\n 'bc3a': _variable_with_weight_decay('bc3a', [256], 0.04, 0.0),\n 'bc3b': _variable_with_weight_decay('bc3b', [256], 0.04, 0.0),\n 'bc4a': _variable_with_weight_decay('bc4a', [512], 0.04, 0.0),\n 'bc4b': _variable_with_weight_decay('bc4b', [512], 0.04, 0.0),\n 'bc5a': _variable_with_weight_decay('bc5a', [512], 0.04, 0.0),\n 'bc5b': _variable_with_weight_decay('bc5b', [512], 0.04, 0.0),\n 'bd1': _variable_with_weight_decay('bd1', [4096], 0.04, 0.0),\n 'bd2': _variable_with_weight_decay('bd2', [4096], 0.04, 0.0),\n 'out': _variable_with_weight_decay('bout', [c3d_model.NUM_CLASSES], 0.04, 0.0),\n }\n\n logits = []\n\n for gpu_index in range(0, gpu_num):\n with tf.device('/gpu:%d' % gpu_index):\n logit = c3d_model.inference_c3d(images_placeholder[gpu_index * FLAGS.batch_size:(gpu_index + 1)\n * FLAGS.batch_size,:,:,:,:],\n 0,\n FLAGS.batch_size,\n weights,\n biases)\n logits.append(logit)\n\n logits = tf.concat(logits,0)\n norm_score = tf.nn.softmax(logits)\n\n saver = tf.train.Saver()\n sess = tf.Session(config=tf.ConfigProto(allow_soft_placement=True))\n\n init = tf.global_variables_initializer()\n sess.run(init)\n\n # Restoring a saved model.\n if not model_name.__contains__(\".meta\"):\n saver = tf.train.import_meta_graph(model_name+'.meta')\n else:\n # saver = tf.train.import_meta_graph(model_name)\n var_list = [v for v in tf.trainable_variables()]\n saver = tf.train.Saver(weights.values() + biases.values())\n\n saver.restore(sess, model_name)\n\n # And then after everything is built, start the testing loop.\n bufsize = 0\n write_file = open(\"predict_ret.txt\", \"w+\", bufsize)\n next_start_pos = 0\n all_steps = int((num_test_videos - 1) / (FLAGS.batch_size * gpu_num) + 1)\n\n print (\"num_test_videos, batch_size, gpu_num,all steps\", num_test_videos, FLAGS.batch_size, gpu_num, all_steps)\n\n total_testing_duration = 0\n\n for step in range(all_steps):\n # Fill a feed dictionary with the actual set of images and labels\n # for this particular testing step.\n start_time = time.time()\n# try:\n test_images, test_labels, next_start_pos, _, valid_len = \\\n input_data.read_clip_and_label(\n ds_dir,\n mean_file,\n test_list_file,\n FLAGS.batch_size * gpu_num,\n start_pos=next_start_pos,\n num_frames_per_clip=c3d_model.NUM_FRAMES_PER_CLIP\n )\n# except:\n# print(\"exception occured loading at step:\", step)\n # try:\n predict_score = norm_score.eval(\n session=sess,\n feed_dict={images_placeholder: test_images}\n )\n # except:\n # print(\"exception occured prediction at step:\", step)\n\n duration = time.time() - start_time\n print('Step %d: %.3f sec' % (step, duration), 'next start index:', next_start_pos)\n total_testing_duration += duration\n\n# try:\n for i in range(0, valid_len):\n true_label = test_labels[i],\n top1_predicted_label = np.argmax(predict_score[i])\n\n # Write results: true label, class prob for true label, predicted label, class prob for predicted label\n write_file.write('{}, {}, {}, {}\\n'.format(\n true_label[0],\n predict_score[i][true_label],\n top1_predicted_label,\n predict_score[i][top1_predicted_label]))\n# except:\n# print (\"exception occured saving predictions at step:\", step)\n # break # test only 1 batch\n\n print('Prediction time taken =', total_testing_duration)\n\n import datetime\n now = datetime.datetime.now()\n\n with open('stats.txt', 'a') as f:\n f.write(now.strftime(\"%Y-%m-%d %H:%M\\n\"))\n f.write(\" testing time:\"+ str(total_testing_duration) + \"\\n\")\n\n write_file.close()\n print(\"done\")\n\nimport sys\n\ndef main(_):\n # run_test(sys.argv[1])\n ds_dir = \"/home/bassel/data/office-actions/office_actions_19/short_clips/resized_frms\"\n mean_file = \"../c3d_data_preprocessing/oa_kinetics_calculated_mean.npy\"\n model_name = \"c3d_ucf_model-14698\"\n testing_file = \"\"\n TESTING_BATCH_SIZE = 16\n run_test(ds_dir, mean_file, \"model/\" + model_name, testing_file, TESTING_BATCH_SIZE)\n\n\nif __name__ == '__main__':\n tf.app.run()\n", "import numpy as np\n\n\ndef save_list_to_file(z_list, z_file):\n with open(z_file, 'w') as fw:\n fw.writelines(z_list)\n\n\ndef random_split_train_test(train_file, out_train_file, out_test_file, train_percentage=0.8):\n with open(train_file) as fr:\n lines = fr.readlines()\n\n np.random.shuffle(lines)\n\n train_data, test_data = lines[0:int(train_percentage*len(lines))], lines[int(train_percentage*len(lines)):]\n\n save_list_to_file(train_data, out_train_file)\n save_list_to_file(test_data, out_test_file)\n\nrandom_split_train_test(\"/home/bassel/data/oa_kinetics/lbls/actions_stack_list.txt\",\n \"/home/bassel/data/oa_kinetics/lbls/action_train_stacks_list.txt\",\n \"/home/bassel/data/oa_kinetics/lbls/action_test_stacks_list.txt\")" ]
[ [ "tensorflow.trainable_variables", "tensorflow.truncated_normal_initializer", "tensorflow.concat", "tensorflow.train.import_meta_graph", "tensorflow.reset_default_graph", "tensorflow.train.Saver", "tensorflow.nn.l2_loss", "tensorflow.ConfigProto", "tensorflow.variable_scope", "tensorflow.placeholder", "tensorflow.get_variable", "numpy.argmax", "tensorflow.device", "tensorflow.nn.softmax", "tensorflow.app.run", "tensorflow.global_variables_initializer", "tensorflow.add_to_collection" ], [ "numpy.random.shuffle" ] ]
RonSherfey/data-act-broker-backend
[ "d287abda2cac06dd479ecf0127e789cb8e59387d" ]
[ "tests/integration/error_warning_file_tests.py" ]
[ "import os\nimport csv\nimport logging\nimport itertools\nimport pandas as pd\nimport psutil as ps\nfrom _pytest.monkeypatch import MonkeyPatch\n\nfrom dataactcore.interfaces.db import GlobalDB\nfrom dataactcore.config import CONFIG_SERVICES\nfrom dataactcore.models.domainModels import concat_tas_dict\nfrom dataactcore.models.lookups import (FILE_TYPE_DICT, JOB_TYPE_DICT, JOB_STATUS_DICT, RULE_SEVERITY_DICT)\nfrom dataactcore.models.jobModels import Submission, Job, FileType\nfrom dataactcore.models.userModel import User\nfrom dataactcore.models.errorModels import ErrorMetadata\nfrom dataactcore.models.stagingModels import (\n Appropriation, ObjectClassProgramActivity, AwardFinancial, FlexField, TotalObligations)\nfrom dataactvalidator.health_check import create_app\nimport dataactvalidator.validation_handlers.validationManager\nfrom dataactvalidator.validation_handlers.validationManager import (\n ValidationManager, FileColumn, CsvReader, parse_fields\n)\nimport dataactvalidator.validation_handlers.validator\nfrom dataactbroker.handlers.fileHandler import report_file_name\n\nfrom tests.unit.dataactcore.factories.domain import SF133Factory, TASFactory\nfrom tests.integration.baseTestValidator import BaseTestValidator\nfrom tests.integration.integration_test_helper import insert_submission, insert_job\n\nFILES_DIR = os.path.join('tests', 'integration', 'data')\n\n# Valid Files\nAPPROP_FILE = os.path.join(FILES_DIR, 'appropValid.csv')\nAFINANCIAL_FILE = os.path.join(FILES_DIR, 'awardFinancialValid.csv')\nCROSS_FILE_A = os.path.join(FILES_DIR, 'cross_file_A.csv')\nCROSS_FILE_B = os.path.join(FILES_DIR, 'cross_file_B.csv')\n\n# Invalid Files\nHEADER_ERROR = os.path.join(FILES_DIR, 'appropHeaderError.csv')\nREAD_ERROR = os.path.join(FILES_DIR, 'appropReadError.csv')\nLENGTH_ERROR = os.path.join(FILES_DIR, 'appropLengthError.csv')\nTYPE_ERROR = os.path.join(FILES_DIR, 'appropTypeError.csv')\nREQUIRED_ERROR = os.path.join(FILES_DIR, 'appropRequiredError.csv')\nRULE_FAILED_WARNING = os.path.join(FILES_DIR, 'appropInvalidWarning.csv')\nRULE_FAILED_ERROR = os.path.join(FILES_DIR, 'appropInvalidError.csv')\nINVALID_CROSS_A = os.path.join(FILES_DIR, 'invalid_cross_file_A.csv')\nINVALID_CROSS_B = os.path.join(FILES_DIR, 'invalid_cross_file_B.csv')\nBLANK_C = os.path.join(FILES_DIR, 'awardFinancialBlank.csv')\n\n\nclass ErrorWarningTests(BaseTestValidator):\n \"\"\" Overall integration tests for error/warning reports.\n\n For each file type (single-file, cross-file, errors, warnings), test if each has\n - the correct structure\n - each column's content is correct after testing each possible type of error:\n - formatting\n - length\n - types\n - required/optional\n - SQL validation\n\n Attributes:\n session: the database session connection\n validator: validator instance to be used for the tests\n submission_id: the id of the submission foundation\n submission: the submission foundation to be used for all the tests\n val_job: the validation job to be used for all the tests\n \"\"\"\n CHUNK_SIZES = [4]\n PARALLEL_OPTIONS = [True, False]\n BATCH_SQL_OPTIONS = [True, False]\n CONFIGS = list(itertools.product(CHUNK_SIZES, PARALLEL_OPTIONS, BATCH_SQL_OPTIONS))\n\n @classmethod\n def setUpClass(cls):\n \"\"\" Set up class-wide resources (test data) \"\"\"\n super(ErrorWarningTests, cls).setUpClass()\n\n logging.getLogger('dataactcore').setLevel(logging.ERROR)\n logging.getLogger('dataactvalidator').setLevel(logging.ERROR)\n\n with create_app().app_context():\n cls.monkeypatch = MonkeyPatch()\n\n # get the submission test users\n sess = GlobalDB.db().session\n cls.session = sess\n\n # set up default e-mails for tests\n admin_user = sess.query(User).filter(User.email == cls.test_users['admin_user']).one()\n\n cls.validator = ValidationManager(directory=CONFIG_SERVICES['error_report_path'])\n\n # Just have one valid submission and then keep on reloading files\n cls.submission_id = insert_submission(sess, admin_user.user_id, cgac_code='SYS', start_date='01/2001',\n end_date='03/2001', is_quarter=True)\n cls.submission = sess.query(Submission).filter_by(submission_id=cls.submission_id).one()\n cls.val_job = insert_job(cls.session, FILE_TYPE_DICT['appropriations'], JOB_STATUS_DICT['ready'],\n JOB_TYPE_DICT['csv_record_validation'], cls.submission_id,\n filename=JOB_TYPE_DICT['csv_record_validation'])\n cls.original_reports = set(os.listdir(CONFIG_SERVICES['error_report_path']))\n\n # adding TAS to ensure valid file is valid\n tas1 = TASFactory(account_num=1, allocation_transfer_agency='019', agency_identifier='072',\n beginning_period_of_availa=None, ending_period_of_availabil=None,\n availability_type_code='X', main_account_code='0306', sub_account_code='000',\n internal_start_date='01-01-2000', financial_indicator2='F')\n tas2 = TASFactory(account_num=2, allocation_transfer_agency=None, agency_identifier='019',\n beginning_period_of_availa='2016', ending_period_of_availabil='2016',\n availability_type_code=None, main_account_code='0113', sub_account_code='000',\n internal_start_date='01-01-2000', financial_indicator2='F')\n tas3 = TASFactory(account_num=3, allocation_transfer_agency=None, agency_identifier='028',\n beginning_period_of_availa=None, ending_period_of_availabil=None,\n availability_type_code='X', main_account_code='0406', sub_account_code='000',\n internal_start_date='01-01-2000', financial_indicator2='F')\n tas4 = TASFactory(account_num=4, allocation_transfer_agency=None, agency_identifier='028',\n beginning_period_of_availa='2010', ending_period_of_availabil='2011',\n availability_type_code=None, main_account_code='0406', sub_account_code='000',\n internal_start_date='01-01-2000', financial_indicator2='F')\n tas5 = TASFactory(account_num=5, allocation_transfer_agency='069', agency_identifier='013',\n beginning_period_of_availa=None, ending_period_of_availabil=None,\n availability_type_code='X', main_account_code='2050', sub_account_code='005',\n internal_start_date='01-01-2000', financial_indicator2='F')\n tas6 = TASFactory(account_num=6, allocation_transfer_agency='028', agency_identifier='028',\n beginning_period_of_availa=None, ending_period_of_availabil=None,\n availability_type_code='X', main_account_code='8007', sub_account_code='000',\n internal_start_date='01-01-2000', financial_indicator2='F')\n tas7 = TASFactory(account_num=7, allocation_transfer_agency=None, agency_identifier='049',\n beginning_period_of_availa=None, ending_period_of_availabil=None,\n availability_type_code='X', main_account_code='0100', sub_account_code='000',\n internal_start_date='01-01-2000', financial_indicator2='F')\n tas8 = TASFactory(account_num=8, allocation_transfer_agency=None, agency_identifier='049',\n beginning_period_of_availa='2010', ending_period_of_availabil='2011',\n availability_type_code=None, main_account_code='0100', sub_account_code='000',\n internal_start_date='01-01-2000', financial_indicator2='F')\n tas9 = TASFactory(account_num=9, allocation_transfer_agency=None, agency_identifier='049',\n beginning_period_of_availa='2014', ending_period_of_availabil='2015',\n availability_type_code=None, main_account_code='0100', sub_account_code='000',\n internal_start_date='01-01-2000', financial_indicator2='F')\n tas10 = TASFactory(account_num=10, allocation_transfer_agency=None, agency_identifier='049',\n beginning_period_of_availa='2015', ending_period_of_availabil='2016',\n availability_type_code=None, main_account_code='0100', sub_account_code='000',\n internal_start_date='01-01-2000')\n sess.add_all([tas1, tas2, tas3, tas4, tas5, tas6, tas7, tas8, tas9, tas10])\n\n # adding GTAS to ensure valid file is valid\n gtas1 = SF133Factory(tas=concat_tas_dict(tas1.component_dict()), allocation_transfer_agency='019',\n agency_identifier='072', beginning_period_of_availa=None, line=1009,\n ending_period_of_availabil=None, availability_type_code='X',\n main_account_code='0306', sub_account_code='000', period=6, fiscal_year=2001)\n gtas2 = SF133Factory(tas=concat_tas_dict(tas2.component_dict()), allocation_transfer_agency=None,\n agency_identifier='019', beginning_period_of_availa='2016', line=1009,\n ending_period_of_availabil='2016', availability_type_code=None,\n main_account_code='0113', sub_account_code='000', period=6, fiscal_year=2001)\n gtas3 = SF133Factory(tas=concat_tas_dict(tas3.component_dict()), allocation_transfer_agency=None,\n agency_identifier='028', beginning_period_of_availa=None, line=1009,\n ending_period_of_availabil=None, availability_type_code='X',\n main_account_code='0406', sub_account_code='000', period=6, fiscal_year=2001)\n gtas4 = SF133Factory(tas=concat_tas_dict(tas4.component_dict()), allocation_transfer_agency=None,\n agency_identifier='028', beginning_period_of_availa='2010', line=1009,\n ending_period_of_availabil='2011', availability_type_code=None,\n main_account_code='0406', sub_account_code='000', period=6, fiscal_year=2001)\n gtas5 = SF133Factory(tas=concat_tas_dict(tas5.component_dict()), allocation_transfer_agency='069',\n agency_identifier='013', beginning_period_of_availa=None, line=1009,\n ending_period_of_availabil=None, availability_type_code='X',\n main_account_code='2050', sub_account_code='005', period=6, fiscal_year=2001)\n gtas6 = SF133Factory(tas=concat_tas_dict(tas6.component_dict()), allocation_transfer_agency='028',\n agency_identifier='028', beginning_period_of_availa=None, line=1009,\n ending_period_of_availabil=None, availability_type_code='X',\n main_account_code='8007', sub_account_code='000', period=6, fiscal_year=2001)\n gtas7 = SF133Factory(tas=concat_tas_dict(tas7.component_dict()), allocation_transfer_agency=None,\n agency_identifier='049', beginning_period_of_availa=None, line=1009,\n ending_period_of_availabil=None, availability_type_code='X',\n main_account_code='0100', sub_account_code='000', period=6, fiscal_year=2001)\n gtas8 = SF133Factory(tas=concat_tas_dict(tas8.component_dict()), allocation_transfer_agency=None,\n agency_identifier='049', beginning_period_of_availa='2010', line=1009,\n ending_period_of_availabil='2011', availability_type_code=None,\n main_account_code='0100', sub_account_code='000', period=6, fiscal_year=2001)\n gtas9 = SF133Factory(tas=concat_tas_dict(tas9.component_dict()), allocation_transfer_agency=None,\n agency_identifier='049', beginning_period_of_availa='2014', line=1009,\n ending_period_of_availabil='2015', availability_type_code=None,\n main_account_code='0100', sub_account_code='000', period=6, fiscal_year=2001)\n gtas10 = SF133Factory(tas=concat_tas_dict(tas10.component_dict()), allocation_transfer_agency=None,\n agency_identifier='049', beginning_period_of_availa='2015', line=1009,\n ending_period_of_availabil='2016', availability_type_code=None,\n main_account_code='0100', sub_account_code='000', period=6, fiscal_year=2001)\n sess.add_all([gtas1, gtas2, gtas3, gtas4, gtas5, gtas6, gtas7, gtas8, gtas9, gtas10])\n sess.commit()\n\n def setUp(self):\n \"\"\"Test set-up.\"\"\"\n super(ErrorWarningTests, self).setUp()\n\n def get_report_path(self, file_type, warning=False, cross_type=None):\n filename = report_file_name(self.submission_id, warning, file_type, cross_type)\n return os.path.join(CONFIG_SERVICES['error_report_path'], filename)\n\n def setup_csv_record_validation(self, file, file_type):\n self.session.query(Job).delete(synchronize_session='fetch')\n self.val_job = insert_job(self.session, FILE_TYPE_DICT[file_type], JOB_STATUS_DICT['ready'],\n JOB_TYPE_DICT['csv_record_validation'], self.submission_id,\n filename=file)\n\n def setup_validation(self):\n self.session.query(Job).delete(synchronize_session='fetch')\n self.val_job = insert_job(self.session, None, JOB_STATUS_DICT['ready'],\n JOB_TYPE_DICT['validation'], self.submission_id,\n filename=None)\n\n def get_report_content(self, report_path, cross_file=False):\n report_content = []\n report_headers = None\n with open(report_path, 'r') as report_csv:\n reader = csv.DictReader(report_csv)\n for row in reader:\n report_content.append(row)\n report_headers = reader.fieldnames\n row_number_col = 'Row Number' if not cross_file else 'Source Row Number'\n if row_number_col in report_headers:\n report_content = list(sorted(report_content, key=lambda x: int(x[row_number_col] or 0)))\n return report_headers, report_content\n\n def generate_file_report(self, file, file_type, warning=False, ignore_error=False):\n self.setup_csv_record_validation(file, file_type)\n if ignore_error:\n try:\n self.validator.validate_job(self.val_job.job_id)\n except Exception:\n pass\n else:\n self.validator.validate_job(self.val_job.job_id)\n report_path = self.get_report_path(file_type, warning=warning)\n report_content = self.get_report_content(report_path, cross_file=False)\n return report_content\n\n def generate_cross_file_report(self, cross_files, warning=False, ignore_error=False):\n cross_types = []\n for cross_file in cross_files:\n cross_types.append(cross_file[1])\n self.generate_file_report(cross_file[0], cross_file[1], warning=warning, ignore_error=ignore_error)\n\n self.setup_validation()\n if ignore_error:\n try:\n self.validator.validate_job(self.val_job.job_id)\n except Exception:\n pass\n else:\n self.validator.validate_job(self.val_job.job_id)\n report_path = self.get_report_path(cross_types[0], cross_type=cross_types[1], warning=warning)\n report_content = self.get_report_content(report_path, cross_file=True)\n return report_content\n\n def cleanup(self):\n new_reports = set(os.listdir(CONFIG_SERVICES['error_report_path'])) - self.original_reports\n for new_report in new_reports:\n os.remove(os.path.join(CONFIG_SERVICES['error_report_path'], new_report))\n self.session.query(Appropriation).delete(synchronize_session='fetch')\n self.session.query(ObjectClassProgramActivity).delete(synchronize_session='fetch')\n self.session.query(AwardFinancial).delete(synchronize_session='fetch')\n self.session.query(ErrorMetadata).delete(synchronize_session='fetch')\n self.session.query(FlexField).delete(synchronize_session='fetch')\n self.session.commit()\n\n def test_single_file_warnings(self):\n for chunk_size, parallel, batch_sql in self.CONFIGS:\n self.monkeypatch.setattr(dataactvalidator.validation_handlers.validationManager, 'CHUNK_SIZE', chunk_size)\n self.monkeypatch.setattr(dataactvalidator.validation_handlers.validationManager, 'PARALLEL', parallel)\n self.monkeypatch.setattr(dataactvalidator.validation_handlers.validationManager, 'BATCH_SQL_VAL_RESULTS',\n batch_sql)\n self.monkeypatch.setattr(dataactvalidator.validation_handlers.validator, 'SQL_VALIDATION_BATCH_SIZE',\n chunk_size)\n self.single_file_warnings()\n\n def single_file_warnings(self):\n self.cleanup()\n # Valid\n report_headers, report_content = self.generate_file_report(APPROP_FILE, 'appropriations', warning=True)\n appro_count = self.session.query(Appropriation).filter_by(submission_id=self.submission_id).count()\n assert appro_count == 10\n flex_count = self.session.query(FlexField).filter_by(submission_id=self.submission_id).count()\n assert flex_count == 20\n error_count = self.session.query(ErrorMetadata).filter_by(job_id=self.val_job.job_id,\n severity_id=RULE_SEVERITY_DICT['warning']).count()\n assert self.validator.job.number_of_rows == 11\n assert self.validator.job.number_of_rows_valid == 10\n assert error_count == 0\n assert report_headers == self.validator.report_headers\n assert len(report_content) == 0\n self.cleanup()\n\n # Blank File\n report_headers, report_content = self.generate_file_report(BLANK_C, 'award_financial', warning=True)\n awfin_count = self.session.query(AwardFinancial).filter_by(submission_id=self.submission_id).count()\n assert awfin_count == 0\n flex_count = self.session.query(FlexField).filter_by(submission_id=self.submission_id).count()\n assert flex_count == 0\n error_count = self.session.query(ErrorMetadata).filter_by(job_id=self.val_job.job_id,\n severity_id=RULE_SEVERITY_DICT['warning']).count()\n assert self.validator.job.number_of_rows == 1\n assert self.validator.job.number_of_rows_valid == 2\n assert error_count == 1\n assert report_headers == self.validator.report_headers\n expected_values = [\n {\n 'Unique ID': '',\n 'Field Name': 'Blank File',\n 'Rule Message': 'File does not contain data. For files A and B, this must be addressed prior to'\n ' publication/certification. Blank file C does not prevent publication/certification.',\n 'Value Provided': '',\n 'Expected Value': '',\n 'Difference': '',\n 'Flex Field': '',\n 'Row Number': '',\n 'Rule Label': 'DABSBLANK'\n }\n ]\n assert report_content == expected_values\n self.cleanup()\n\n # SQL Validation\n report_headers, report_content = self.generate_file_report(RULE_FAILED_WARNING, 'appropriations', warning=True)\n appro_count = self.session.query(Appropriation).filter_by(submission_id=self.submission_id).count()\n assert appro_count == 10\n flex_count = self.session.query(FlexField).filter_by(submission_id=self.submission_id).count()\n assert flex_count == 20\n error_count = self.session.query(ErrorMetadata).filter_by(job_id=self.val_job.job_id,\n severity_id=RULE_SEVERITY_DICT['warning']).count()\n assert self.validator.job.number_of_rows == 11\n assert self.validator.job.number_of_rows_valid == 10\n assert error_count == 1\n assert report_headers == self.validator.report_headers\n expected_values = [\n {\n 'Unique ID': 'TAS: 028-2010/2011-0406-000',\n 'Field Name': 'budgetauthorityunobligatedbalancebroughtforward_fyb',\n 'Rule Message': 'All the elements that have FYB in file A are expected in the first submission'\n ' for a fiscal year',\n 'Value Provided': 'budgetauthorityunobligatedbalancebroughtforward_fyb: ',\n 'Expected Value': 'If the reporting period is Quarter 1, a non-null amount should be submitted for the'\n ' following elements: BudgetAuthorityUnobligatedBalanceBroughtForward_FYB',\n 'Difference': '',\n 'Flex Field': 'flex_field_a: FLEX_A, flex_field_b: FLEX_B',\n 'Row Number': '5',\n 'Rule Label': 'A16.1'\n }\n ]\n assert report_content == expected_values\n self.cleanup()\n\n def test_single_file_errors(self):\n for chunk_size, parallel, batch_sql in self.CONFIGS:\n self.monkeypatch.setattr(dataactvalidator.validation_handlers.validationManager, 'CHUNK_SIZE', chunk_size)\n self.monkeypatch.setattr(dataactvalidator.validation_handlers.validationManager, 'PARALLEL', parallel)\n self.monkeypatch.setattr(dataactvalidator.validation_handlers.validationManager, 'BATCH_SQL_VAL_RESULTS',\n batch_sql)\n self.monkeypatch.setattr(dataactvalidator.validation_handlers.validator, 'SQL_VALIDATION_BATCH_SIZE',\n chunk_size)\n self.single_file_errors()\n\n def single_file_errors(self):\n self.cleanup()\n\n # Valid\n report_headers, report_content = self.generate_file_report(APPROP_FILE, 'appropriations', warning=False)\n appro_count = self.session.query(Appropriation).filter_by(submission_id=self.submission_id).count()\n assert appro_count == 10\n flex_count = self.session.query(FlexField).filter_by(submission_id=self.submission_id).count()\n assert flex_count == 20\n error_count = self.session.query(ErrorMetadata).filter_by(job_id=self.val_job.job_id,\n severity_id=RULE_SEVERITY_DICT['fatal']).count()\n assert self.validator.job.number_of_rows == 11\n assert self.validator.job.number_of_rows_valid == 10\n assert error_count == 0\n assert report_headers == self.validator.report_headers\n assert len(report_content) == 0\n self.cleanup()\n\n # Header Error\n report_headers, report_content = self.generate_file_report(HEADER_ERROR, 'appropriations', warning=False,\n ignore_error=True)\n appro_count = self.session.query(Appropriation).filter_by(submission_id=self.submission_id).count()\n assert appro_count == 0\n flex_count = self.session.query(FlexField).filter_by(submission_id=self.submission_id).count()\n assert flex_count == 0\n error_count = self.session.query(ErrorMetadata).filter_by(job_id=self.val_job.job_id,\n severity_id=RULE_SEVERITY_DICT['fatal']).count()\n assert self.validator.job.number_of_rows is None\n assert self.validator.job.number_of_rows_valid == 0\n # Header errors do not get saved to the database\n assert error_count == 0\n assert report_headers == ['Error type', 'Header name']\n expected_values = [\n {\n 'Error type': 'Duplicated header',\n 'Header name': 'AllocationTransferAgencyIdentifier'\n },\n {\n 'Error type': 'Missing header',\n 'Header name': 'AdjustmentsToUnobligatedBalanceBroughtForward_CPE'\n },\n {\n 'Error type': 'Missing header',\n 'Header name': 'AgencyIdentifier'\n },\n {\n 'Error type': 'Missing header',\n 'Header name': 'BudgetAuthorityUnobligatedBalanceBroughtForward_FYB'\n },\n {\n 'Error type': 'Missing header',\n 'Header name': 'DeobligationsRecoveriesRefundsByTAS_CPE'\n },\n {\n 'Error type': 'Missing header',\n 'Header name': 'GrossOutlayAmountByTAS_CPE'\n },\n {\n 'Error type': 'Missing header',\n 'Header name': 'ObligationsIncurredTotalByTAS_CPE'\n },\n {\n 'Error type': 'Missing header',\n 'Header name': 'StatusOfBudgetaryResourcesTotal_CPE'\n }\n ]\n assert report_content == expected_values\n self.cleanup()\n\n # Read Error\n report_headers, report_content = self.generate_file_report(READ_ERROR, 'appropriations', warning=False)\n appro_count = self.session.query(Appropriation).filter_by(submission_id=self.submission_id).count()\n assert appro_count == 6\n flex_count = self.session.query(FlexField).filter_by(submission_id=self.submission_id).count()\n assert flex_count == 12\n assert self.validator.job.number_of_rows == 11\n assert self.validator.job.number_of_rows_valid == 6\n format_errors = self.session.query(ErrorMetadata).filter_by(job_id=self.val_job.job_id,\n severity_id=RULE_SEVERITY_DICT['fatal']).one()\n format_error_count = format_errors.occurrences\n assert format_error_count == 4\n assert report_headers == self.validator.report_headers\n expected_values = [\n {\n 'Unique ID': '',\n 'Field Name': 'Formatting Error',\n 'Rule Message': 'Could not parse this record correctly.',\n 'Value Provided': '',\n 'Expected Value': '',\n 'Difference': '',\n 'Flex Field': '',\n 'Row Number': '2',\n 'Rule Label': ''\n },\n {\n 'Unique ID': '',\n 'Field Name': 'Formatting Error',\n 'Rule Message': 'Could not parse this record correctly.',\n 'Value Provided': '',\n 'Expected Value': '',\n 'Difference': '',\n 'Flex Field': '',\n 'Row Number': '3',\n 'Rule Label': ''\n },\n {\n 'Unique ID': '',\n 'Field Name': 'Formatting Error',\n 'Rule Message': 'Could not parse this record correctly.',\n 'Value Provided': '',\n 'Expected Value': '',\n 'Difference': '',\n 'Flex Field': '',\n 'Row Number': '5',\n 'Rule Label': ''\n },\n {\n 'Unique ID': '',\n 'Field Name': 'Formatting Error',\n 'Rule Message': 'Could not parse this record correctly.',\n 'Value Provided': '',\n 'Expected Value': '',\n 'Difference': '',\n 'Flex Field': '',\n 'Row Number': '7',\n 'Rule Label': ''\n }\n ]\n assert report_content == expected_values\n self.cleanup()\n\n # Type Error\n report_headers, report_content = self.generate_file_report(TYPE_ERROR, 'appropriations', warning=False)\n appro_count = self.session.query(Appropriation).filter_by(submission_id=self.submission_id).count()\n assert appro_count == 9\n flex_count = self.session.query(FlexField).filter_by(submission_id=self.submission_id).count()\n assert flex_count == 18\n assert self.validator.job.number_of_rows == 11\n assert self.validator.job.number_of_rows_valid == 9\n error_count = self.session.query(ErrorMetadata).filter_by(job_id=self.val_job.job_id,\n severity_id=RULE_SEVERITY_DICT['fatal']).count()\n assert error_count == 1\n assert report_headers == self.validator.report_headers\n expected_values = [\n {\n 'Unique ID': 'TAS: 069-013-X-2050-005',\n 'Field Name': 'statusofbudgetaryresourcestotal_cpe',\n 'Rule Message': 'The value provided was of the wrong type. Note that all type errors in a line must be'\n ' fixed before the rest of the validation logic is applied to that line.',\n 'Value Provided': 'statusofbudgetaryresourcestotal_cpe: A',\n 'Expected Value': 'This field must be a decimal',\n 'Difference': '',\n 'Flex Field': 'flex_field_a: FLEX_A, flex_field_b: FLEX_B',\n 'Row Number': '6',\n 'Rule Label': ''\n }\n ]\n assert report_content == expected_values\n self.cleanup()\n\n # Length Error\n report_headers, report_content = self.generate_file_report(LENGTH_ERROR, 'appropriations', warning=False)\n appro_count = self.session.query(Appropriation).filter_by(submission_id=self.submission_id).count()\n assert appro_count == 10\n flex_count = self.session.query(FlexField).filter_by(submission_id=self.submission_id).count()\n assert flex_count == 20\n assert self.validator.job.number_of_rows == 11\n assert self.validator.job.number_of_rows_valid == 9\n error_count = self.session.query(ErrorMetadata).filter_by(job_id=self.val_job.job_id,\n severity_id=RULE_SEVERITY_DICT['fatal']).count()\n assert error_count == 1\n assert report_headers == self.validator.report_headers\n expected_values = [\n {\n 'Unique ID': 'TAS: 069-013-X-2050-005',\n 'Field Name': 'grossoutlayamountbytas_cpe',\n 'Rule Message': 'Value was longer than maximum length for this field.',\n 'Value Provided': 'grossoutlayamountbytas_cpe: 35000000000000000000000000',\n 'Expected Value': 'Max length: 21',\n 'Difference': '',\n 'Flex Field': 'flex_field_a: FLEX_A, flex_field_b: FLEX_B',\n 'Row Number': '6',\n 'Rule Label': ''\n }\n ]\n assert report_content == expected_values\n self.cleanup()\n\n # Required Error + SQL Validation\n report_headers, report_content = self.generate_file_report(REQUIRED_ERROR, 'appropriations', warning=False)\n appro_count = self.session.query(Appropriation).filter_by(submission_id=self.submission_id).count()\n assert appro_count == 10\n flex_count = self.session.query(FlexField).filter_by(submission_id=self.submission_id).count()\n assert flex_count == 20\n assert self.validator.job.number_of_rows == 11\n assert self.validator.job.number_of_rows_valid == 9\n error_count = self.session.query(ErrorMetadata).filter_by(job_id=self.val_job.job_id,\n severity_id=RULE_SEVERITY_DICT['fatal']).count()\n assert error_count == 3\n assert report_headers == self.validator.report_headers\n expected_values = [\n {\n 'Unique ID': 'TAS: 019-2016/2016-0113-000',\n 'Field Name': 'statusofbudgetaryresourcestotal_cpe',\n 'Rule Message': 'This field is required for all submissions but was not provided in this row.',\n 'Value Provided': '',\n 'Expected Value': '(not blank)',\n 'Difference': '',\n 'Flex Field': 'flex_field_a: FLEX_A, flex_field_b: FLEX_B',\n 'Row Number': '3',\n 'Rule Label': ''\n },\n {\n 'Unique ID': 'TAS: 019-2016/2016-0113-000',\n 'Field Name': 'statusofbudgetaryresourcestotal_cpe, obligationsincurredtotalbytas_cpe,'\n ' unobligatedbalance_cpe',\n 'Rule Message': 'StatusOfBudgetaryResourcesTotal_CPE= ObligationsIncurredTotalByTAS_CPE'\n ' + UnobligatedBalance_CPE',\n 'Value Provided': 'statusofbudgetaryresourcestotal_cpe: , obligationsincurredtotalbytas_cpe: 8.08,'\n ' unobligatedbalance_cpe: 2.02',\n 'Expected Value': 'StatusOfBudgetaryResourcesTotal_CPE must equal the sum of these elements:'\n ' ObligationsIncurredTotalByTAS_CPE + UnobligatedBalance_CPE. The Broker cannot'\n ' distinguish which item is incorrect for this rule. Refer to related rule errors'\n ' and warnings in this report (rules A15, A22, A23) to distinguish which elements'\n ' may be incorrect.',\n 'Difference': '-10.10',\n 'Flex Field': 'flex_field_a: FLEX_A, flex_field_b: FLEX_B',\n 'Row Number': '3',\n 'Rule Label': 'A4'\n },\n {\n 'Unique ID': 'TAS: 019-2016/2016-0113-000',\n 'Field Name': 'statusofbudgetaryresourcestotal_cpe, totalbudgetaryresources_cpe',\n 'Rule Message': 'StatusOfBudgetaryResourcesTotal_CPE = TotalBudgetaryResources_CPE',\n 'Value Provided': 'statusofbudgetaryresourcestotal_cpe: , totalbudgetaryresources_cpe: 10.1',\n 'Expected Value': 'StatusOfBudgetaryResourcesTotal_CPE must equal TotalBudgetaryResources_CPE. The'\n ' Broker cannot distinguish which side of the equation is correct for this rule.'\n ' Refer to related rule errors and warnings in this report (rules A6, A23) to'\n ' distinguish which elements may be incorrect.',\n 'Difference': '-10.1',\n 'Flex Field': 'flex_field_a: FLEX_A, flex_field_b: FLEX_B',\n 'Row Number': '3',\n 'Rule Label': 'A24'\n }\n ]\n assert report_content == expected_values\n self.cleanup()\n\n # SQL Validation (with difference)\n report_headers, report_content = self.generate_file_report(RULE_FAILED_ERROR, 'appropriations', warning=False)\n appro_count = self.session.query(Appropriation).filter_by(submission_id=self.submission_id).count()\n assert appro_count == 10\n flex_count = self.session.query(FlexField).filter_by(submission_id=self.submission_id).count()\n assert flex_count == 20\n assert self.validator.job.number_of_rows == 11\n assert self.validator.job.number_of_rows_valid == 10\n error_count = self.session.query(ErrorMetadata).filter_by(job_id=self.val_job.job_id,\n severity_id=RULE_SEVERITY_DICT['fatal']).count()\n assert error_count == 0\n assert report_headers == self.validator.report_headers\n # TODO put this back when we put A2 back\n # expected_values = [\n # {\n # 'Unique ID': 'TAS: 049-2014/2015-0100-000',\n # 'Field Name': 'totalbudgetaryresources_cpe, budgetauthorityappropriatedamount_cpe,'\n # ' budgetauthorityunobligatedbalancebroughtforward_fyb,'\n # ' adjustmentstounobligatedbalancebroughtforward_cpe, otherbudgetaryresourcesamount_cpe',\n # 'Rule Message': 'TotalBudgetaryResources_CPE = BudgetAuthorityAppropriatedAmount_CPE +'\n # ' BudgetAuthorityUnobligatedBalanceBroughtForward_FYB +'\n # ' AdjustmentsToUnobligatedBalanceBroughtForward_CPE +'\n # ' OtherBudgetaryResourcesAmount_CPE',\n # 'Value Provided': 'totalbudgetaryresources_cpe: 10.1, budgetauthorityappropriatedamount_cpe: 0.01,'\n # ' budgetauthorityunobligatedbalancebroughtforward_fyb: 3.03,'\n # ' adjustmentstounobligatedbalancebroughtforward_cpe: 2.02,'\n # ' otherbudgetaryresourcesamount_cpe: 4.04',\n # 'Expected Value': 'TotalBudgetaryResources_CPE must equal the sum of these elements:'\n # ' BudgetAuthorityAppropriatedAmount_CPE +'\n # ' BudgetAuthorityUnobligatedBalanceBroughtForward_FYB +'\n # ' AdjustmentsToUnobligatedBalanceBroughtForward_CPE +'\n # ' OtherBudgetaryResourcesAmount_CPE. The Broker cannot distinguish which item is'\n # ' incorrect for this rule. Refer to related rule errors and warnings in this report'\n # ' (rules A3, A6, A7, A8, A12) to distinguish which elements may be incorrect.',\n # 'Difference': '1.00',\n # 'Flex Field': 'flex_field_a: FLEX_A, flex_field_b: FLEX_B',\n # 'Row Number': '10',\n # 'Rule Label': 'A2'\n # }\n # ]\n # assert report_content == expected_values\n self.cleanup()\n\n # Ensure total_obligations are being calculated correctly\n self.generate_file_report(AFINANCIAL_FILE, 'award_financial', warning=False)\n totals = self.session.query(TotalObligations).filter_by(submission_id=self.submission_id).one()\n assert totals.total_obligations == 12000.00\n assert totals.total_proc_obligations == 8000.00\n assert totals.total_asst_obligations == 4000.00\n self.cleanup()\n\n def test_cross_file_warnings(self):\n for chunk_size, parallel, batch_sql in self.CONFIGS:\n self.monkeypatch.setattr(dataactvalidator.validation_handlers.validationManager, 'CHUNK_SIZE', chunk_size)\n self.monkeypatch.setattr(dataactvalidator.validation_handlers.validationManager, 'PARALLEL', parallel)\n self.monkeypatch.setattr(dataactvalidator.validation_handlers.validationManager, 'BATCH_SQL_VAL_RESULTS',\n batch_sql)\n self.monkeypatch.setattr(dataactvalidator.validation_handlers.validator, 'SQL_VALIDATION_BATCH_SIZE',\n chunk_size)\n self.cross_file_warnings()\n\n def cross_file_warnings(self):\n self.cleanup()\n\n # Valid\n report_headers, report_content = self.generate_cross_file_report([(CROSS_FILE_A, 'appropriations'),\n (CROSS_FILE_B, 'program_activity')],\n warning=True)\n error_count = self.session.query(ErrorMetadata).filter_by(job_id=self.val_job.job_id,\n severity_id=RULE_SEVERITY_DICT['warning']).count()\n assert error_count == 0\n assert report_headers == self.validator.cross_file_report_headers\n assert len(report_content) == 0\n self.cleanup()\n\n # SQL Validation\n report_headers, report_content = self.generate_cross_file_report([(INVALID_CROSS_A, 'appropriations'),\n (INVALID_CROSS_B, 'program_activity')],\n warning=True)\n warnings = list(self.session.query(ErrorMetadata).filter_by(job_id=self.val_job.job_id,\n severity_id=RULE_SEVERITY_DICT['warning']).all())\n assert len(warnings) == 3\n assert warnings[0].occurrences == 3\n assert warnings[1].occurrences == 3\n assert warnings[2].occurrences == 3\n assert report_headers == self.validator.cross_file_report_headers\n expected_values = [\n {\n 'Unique ID': 'TAS: 019-2016/2016-0113-000',\n 'Source File': 'appropriations',\n 'Source Field Name': 'grossoutlayamountbytas_cpe',\n 'Target File': 'program_activity',\n 'Target Field Name': 'gross_outlay_amount_by_pro_cpe_sum',\n 'Rule Message': 'The GrossOutlayAmountByTAS_CPE amount in the appropriation file (A) does not equal the'\n ' sum of the corresponding GrossOutlayAmountByProgramObjectClass_CPE values in the'\n ' award financial file (B). {This value is the sum of all Gross Outlay Amounts reported'\n ' in file B, to indicate year-to-date activity by TAS/Subaccount.}',\n 'Source Value Provided': 'grossoutlayamountbytas_cpe: 10000',\n 'Target Value Provided': 'gross_outlay_amount_by_pro_cpe_sum: 6000',\n 'Difference': '4000',\n 'Source Flex Field': 'flex_field_a: FLEX_A, flex_field_b: FLEX_B',\n 'Source Row Number': '5',\n 'Rule Label': 'A18'\n },\n {\n 'Unique ID': 'TAS: 019-2016/2016-0113-000',\n 'Source File': 'appropriations',\n 'Source Field Name': 'obligationsincurredtotalbytas_cpe',\n 'Target File': 'program_activity',\n 'Target Field Name': 'obligations_incurred_by_pr_cpe_sum',\n 'Rule Message': 'The ObligationsIncurredTotalByTAS_CPE amount in the appropriation file (A) does not'\n ' equal the negative sum of the corresponding'\n ' ObligationsIncurredByProgramObjectClass_CPE values in the award financial file (B).',\n 'Source Value Provided': 'obligationsincurredtotalbytas_cpe: 12000',\n 'Target Value Provided': 'obligations_incurred_by_pr_cpe_sum: 6000',\n 'Difference': '18000',\n 'Source Flex Field': 'flex_field_a: FLEX_A, flex_field_b: FLEX_B',\n 'Source Row Number': '5',\n 'Rule Label': 'A19'\n },\n {\n 'Unique ID': 'TAS: 019-2016/2016-0113-000',\n 'Source File': 'appropriations',\n 'Source Field Name': 'deobligationsrecoveriesrefundsbytas_cpe',\n 'Target File': 'program_activity',\n 'Target Field Name': 'ussgl487100_downward_adjus_cpe_sum, ussgl497100_downward_adjus_cpe_sum,'\n ' ussgl487200_downward_adjus_cpe_sum, ussgl497200_downward_adjus_cpe_sum',\n 'Rule Message': 'DeobligationsRecoveriesRefundsByTAS_CPE in File A should equal USSGL'\n ' (4871_CPE+ 4971_CPE+ 4872_CPE+ 4972_CPE) for the TAS in File B.',\n 'Source Value Provided': 'deobligationsrecoveriesrefundsbytas_cpe: 16000',\n 'Target Value Provided': 'ussgl487100_downward_adjus_cpe_sum: 2000,'\n ' ussgl497100_downward_adjus_cpe_sum: 2000,'\n ' ussgl487200_downward_adjus_cpe_sum: 400,'\n ' ussgl497200_downward_adjus_cpe_sum: 2000',\n 'Difference': '9600',\n 'Source Flex Field': 'flex_field_a: FLEX_A, flex_field_b: FLEX_B',\n 'Source Row Number': '5',\n 'Rule Label': 'A35'\n },\n {\n 'Unique ID': 'TAS: 019-2016/2016-0113-000',\n 'Source File': 'appropriations',\n 'Source Field Name': 'grossoutlayamountbytas_cpe',\n 'Target File': 'program_activity',\n 'Target Field Name': 'gross_outlay_amount_by_pro_cpe_sum',\n 'Rule Message': 'The GrossOutlayAmountByTAS_CPE amount in the appropriation file (A) does not equal the'\n ' sum of the corresponding GrossOutlayAmountByProgramObjectClass_CPE values in the'\n ' award financial file (B). {This value is the sum of all Gross Outlay Amounts reported'\n ' in file B, to indicate year-to-date activity by TAS/Subaccount.}',\n 'Source Value Provided': 'grossoutlayamountbytas_cpe: 10000',\n 'Target Value Provided': 'gross_outlay_amount_by_pro_cpe_sum: 6000',\n 'Difference': '4000',\n 'Source Flex Field': 'flex_field_a: FLEX_A, flex_field_b: FLEX_B',\n 'Source Row Number': '10',\n 'Rule Label': 'A18'\n },\n {\n 'Unique ID': 'TAS: 019-2016/2016-0113-000',\n 'Source File': 'appropriations',\n 'Source Field Name': 'obligationsincurredtotalbytas_cpe',\n 'Target File': 'program_activity',\n 'Target Field Name': 'obligations_incurred_by_pr_cpe_sum',\n 'Rule Message': 'The ObligationsIncurredTotalByTAS_CPE amount in the appropriation file (A) does not'\n ' equal the negative sum of the corresponding'\n ' ObligationsIncurredByProgramObjectClass_CPE values in the award financial file (B).',\n 'Source Value Provided': 'obligationsincurredtotalbytas_cpe: 12000',\n 'Target Value Provided': 'obligations_incurred_by_pr_cpe_sum: 6000',\n 'Difference': '18000',\n 'Source Flex Field': 'flex_field_a: FLEX_A, flex_field_b: FLEX_B',\n 'Source Row Number': '10',\n 'Rule Label': 'A19'\n },\n {\n 'Unique ID': 'TAS: 019-2016/2016-0113-000',\n 'Source File': 'appropriations',\n 'Source Field Name': 'deobligationsrecoveriesrefundsbytas_cpe',\n 'Target File': 'program_activity',\n 'Target Field Name': 'ussgl487100_downward_adjus_cpe_sum, ussgl497100_downward_adjus_cpe_sum,'\n ' ussgl487200_downward_adjus_cpe_sum, ussgl497200_downward_adjus_cpe_sum',\n 'Rule Message': 'DeobligationsRecoveriesRefundsByTAS_CPE in File A should equal USSGL'\n ' (4871_CPE+ 4971_CPE+ 4872_CPE+ 4972_CPE) for the TAS in File B.',\n 'Source Value Provided': 'deobligationsrecoveriesrefundsbytas_cpe: 16000',\n 'Target Value Provided': 'ussgl487100_downward_adjus_cpe_sum: 2000,'\n ' ussgl497100_downward_adjus_cpe_sum: 2000,'\n ' ussgl487200_downward_adjus_cpe_sum: 400,'\n ' ussgl497200_downward_adjus_cpe_sum: 2000',\n 'Difference': '9600',\n 'Source Flex Field': 'flex_field_a: FLEX_A, flex_field_b: FLEX_B',\n 'Source Row Number': '10',\n 'Rule Label': 'A35'\n },\n {\n 'Unique ID': 'TAS: 019-2016/2016-0113-000',\n 'Source File': 'appropriations',\n 'Source Field Name': 'grossoutlayamountbytas_cpe',\n 'Target File': 'program_activity',\n 'Target Field Name': 'gross_outlay_amount_by_pro_cpe_sum',\n 'Rule Message': 'The GrossOutlayAmountByTAS_CPE amount in the appropriation file (A) does not equal the'\n ' sum of the corresponding GrossOutlayAmountByProgramObjectClass_CPE values in the'\n ' award financial file (B). {This value is the sum of all Gross Outlay Amounts reported'\n ' in file B, to indicate year-to-date activity by TAS/Subaccount.}',\n 'Source Value Provided': 'grossoutlayamountbytas_cpe: 10000',\n 'Target Value Provided': 'gross_outlay_amount_by_pro_cpe_sum: 6000',\n 'Difference': '4000',\n 'Source Flex Field': 'flex_field_a: FLEX_A, flex_field_b: FLEX_B',\n 'Source Row Number': '15',\n 'Rule Label': 'A18'\n },\n {\n 'Unique ID': 'TAS: 019-2016/2016-0113-000',\n 'Source File': 'appropriations',\n 'Source Field Name': 'obligationsincurredtotalbytas_cpe',\n 'Target File': 'program_activity',\n 'Target Field Name': 'obligations_incurred_by_pr_cpe_sum',\n 'Rule Message': 'The ObligationsIncurredTotalByTAS_CPE amount in the appropriation file (A) does not'\n ' equal the negative sum of the corresponding'\n ' ObligationsIncurredByProgramObjectClass_CPE values in the award financial file (B).',\n 'Source Value Provided': 'obligationsincurredtotalbytas_cpe: 12000',\n 'Target Value Provided': 'obligations_incurred_by_pr_cpe_sum: 6000',\n 'Difference': '18000',\n 'Source Flex Field': 'flex_field_a: FLEX_A, flex_field_b: FLEX_B',\n 'Source Row Number': '15',\n 'Rule Label': 'A19'\n },\n {\n 'Unique ID': 'TAS: 019-2016/2016-0113-000',\n 'Source File': 'appropriations',\n 'Source Field Name': 'deobligationsrecoveriesrefundsbytas_cpe',\n 'Target File': 'program_activity',\n 'Target Field Name': 'ussgl487100_downward_adjus_cpe_sum, ussgl497100_downward_adjus_cpe_sum,'\n ' ussgl487200_downward_adjus_cpe_sum, ussgl497200_downward_adjus_cpe_sum',\n 'Rule Message': 'DeobligationsRecoveriesRefundsByTAS_CPE in File A should equal USSGL'\n ' (4871_CPE+ 4971_CPE+ 4872_CPE+ 4972_CPE) for the TAS in File B.',\n 'Source Value Provided': 'deobligationsrecoveriesrefundsbytas_cpe: 16000',\n 'Target Value Provided': 'ussgl487100_downward_adjus_cpe_sum: 2000,'\n ' ussgl497100_downward_adjus_cpe_sum: 2000,'\n ' ussgl487200_downward_adjus_cpe_sum: 400,'\n ' ussgl497200_downward_adjus_cpe_sum: 2000',\n 'Difference': '9600',\n 'Source Flex Field': 'flex_field_a: FLEX_A, flex_field_b: FLEX_B',\n 'Source Row Number': '15',\n 'Rule Label': 'A35'\n }\n ]\n assert report_content == expected_values\n self.cleanup()\n\n def test_cross_file_errors(self):\n for chunk_size, parallel, batch_sql in self.CONFIGS:\n self.monkeypatch.setattr(dataactvalidator.validation_handlers.validationManager, 'CHUNK_SIZE', chunk_size)\n self.monkeypatch.setattr(dataactvalidator.validation_handlers.validationManager, 'PARALLEL', parallel)\n self.monkeypatch.setattr(dataactvalidator.validation_handlers.validationManager, 'BATCH_SQL_VAL_RESULTS',\n batch_sql)\n self.monkeypatch.setattr(dataactvalidator.validation_handlers.validator, 'SQL_VALIDATION_BATCH_SIZE',\n chunk_size)\n self.cross_file_errors()\n\n def cross_file_errors(self):\n self.cleanup()\n\n # Valid\n report_headers, report_content = self.generate_cross_file_report([(CROSS_FILE_A, 'appropriations'),\n (CROSS_FILE_B, 'program_activity')],\n warning=False)\n error_count = self.session.query(ErrorMetadata).filter_by(job_id=self.val_job.job_id,\n severity_id=RULE_SEVERITY_DICT['fatal']).count()\n assert error_count == 0\n assert report_headers == self.validator.cross_file_report_headers\n assert len(report_content) == 0\n self.cleanup()\n\n # SQL Validation\n report_headers, report_content = self.generate_cross_file_report([(INVALID_CROSS_A, 'appropriations'),\n (INVALID_CROSS_B, 'program_activity')],\n warning=False)\n warnings = list(self.session.query(ErrorMetadata).filter_by(job_id=self.val_job.job_id,\n severity_id=RULE_SEVERITY_DICT['fatal']).all())\n assert len(warnings) == 1\n assert warnings[0].occurrences == 3\n assert report_headers == self.validator.cross_file_report_headers\n expected_values = [\n {\n 'Unique ID': 'TAS: 019-072-X-0306-000',\n 'Source File': 'appropriations',\n 'Source Field Name': 'allocationtransferagencyidentifier, agencyidentifier,'\n ' beginningperiodofavailability, endingperiodofavailability,'\n ' availabilitytypecode, mainaccountcode, subaccountcode',\n 'Target File': 'program_activity',\n 'Target Field Name': '',\n 'Rule Message': 'All TAS values in File A (appropriations) should exist in File B'\n ' (object class program activity)',\n 'Source Value Provided': 'allocationtransferagencyidentifier: 019, agencyidentifier: 072,'\n ' beginningperiodofavailability: , endingperiodofavailability: ,'\n ' availabilitytypecode: X, mainaccountcode: 0306, subaccountcode: 000',\n 'Target Value Provided': '',\n 'Difference': '',\n 'Source Flex Field': 'flex_field_a: FLEX_A, flex_field_b: FLEX_B',\n 'Source Row Number': '2',\n 'Rule Label': 'A30.1'\n },\n {\n 'Unique ID': 'TAS: 019-072-X-0306-000',\n 'Source File': 'appropriations',\n 'Source Field Name': 'allocationtransferagencyidentifier, agencyidentifier,'\n ' beginningperiodofavailability, endingperiodofavailability,'\n ' availabilitytypecode, mainaccountcode, subaccountcode',\n 'Target File': 'program_activity',\n 'Target Field Name': '',\n 'Rule Message': 'All TAS values in File A (appropriations) should exist in File B'\n ' (object class program activity)',\n 'Source Value Provided': 'allocationtransferagencyidentifier: 019, agencyidentifier: 072,'\n ' beginningperiodofavailability: , endingperiodofavailability: ,'\n ' availabilitytypecode: X, mainaccountcode: 0306, subaccountcode: 000',\n 'Target Value Provided': '',\n 'Difference': '',\n 'Source Flex Field': 'flex_field_a: FLEX_A, flex_field_b: FLEX_B',\n 'Source Row Number': '7',\n 'Rule Label': 'A30.1'\n },\n {\n 'Unique ID': 'TAS: 019-072-X-0306-000',\n 'Source File': 'appropriations',\n 'Source Field Name': 'allocationtransferagencyidentifier, agencyidentifier,'\n ' beginningperiodofavailability, endingperiodofavailability,'\n ' availabilitytypecode, mainaccountcode, subaccountcode',\n 'Target File': 'program_activity',\n 'Target Field Name': '',\n 'Rule Message': 'All TAS values in File A (appropriations) should exist in File B'\n ' (object class program activity)',\n 'Source Value Provided': 'allocationtransferagencyidentifier: 019, agencyidentifier: 072,'\n ' beginningperiodofavailability: , endingperiodofavailability: ,'\n ' availabilitytypecode: X, mainaccountcode: 0306, subaccountcode: 000',\n 'Target Value Provided': '',\n 'Difference': '',\n 'Source Flex Field': 'flex_field_a: FLEX_A, flex_field_b: FLEX_B',\n 'Source Row Number': '12',\n 'Rule Label': 'A30.1'\n }\n ]\n assert report_content == expected_values\n self.cleanup()\n\n def test_validation_parallelize_error(self):\n # Test the parallelize function with a broken call to see if the process is properly cleaned up\n self.monkeypatch.setattr(dataactvalidator.validation_handlers.validationManager, 'MULTIPROCESSING_POOLS', 2)\n\n # Setting up all the other elements of the validator to simulate the integration test\n self.validator.submission_id = 1\n self.validator.file_type = self.session.query(FileType).filter_by(\n file_type_id=FILE_TYPE_DICT['appropriations']).one()\n self.validator.file_name = APPROP_FILE\n self.setup_csv_record_validation(APPROP_FILE, 'appropriations')\n self.validator.is_fabs = False\n self.validator.reader = CsvReader()\n self.validator.error_list = {}\n self.validator.error_rows = []\n self.validator.total_rows = 1\n self.validator.total_data_rows = 0\n self.validator.short_rows = []\n self.validator.long_rows = []\n self.validator.has_data = False\n self.validator.model = Appropriation\n\n self.validator.error_file_name = report_file_name(self.validator.submission_id, False,\n self.validator.file_type.name)\n self.validator.error_file_path = ''.join([CONFIG_SERVICES['error_report_path'],\n self.validator.error_file_name])\n self.validator.warning_file_name = report_file_name(self.validator.submission_id, True,\n self.validator.file_type.name)\n self.validator.warning_file_path = ''.join([CONFIG_SERVICES['error_report_path'],\n self.validator.warning_file_name])\n\n self.validator.fields = self.session.query(FileColumn) \\\n .filter(FileColumn.file_id == FILE_TYPE_DICT[self.validator.file_type.name]) \\\n .order_by(FileColumn.daims_name.asc()).all()\n self.validator.expected_headers, self.validator.parsed_fields = parse_fields(self.session,\n self.validator.fields)\n self.validator.csv_schema = {row.name_short: row for row in self.validator.fields}\n\n with open(self.validator.error_file_path, 'w', newline='') as error_file, \\\n open(self.validator.warning_file_path, 'w', newline='') as warning_file:\n error_csv = csv.writer(error_file, delimiter=',', quoting=csv.QUOTE_MINIMAL, lineterminator='\\n')\n warning_csv = csv.writer(warning_file, delimiter=',', quoting=csv.QUOTE_MINIMAL, lineterminator='\\n')\n error_csv.writerow(self.validator.report_headers)\n warning_csv.writerow(self.validator.report_headers)\n\n # Finally open the file for loading into the database with baseline validations\n self.validator.filename = self.validator.reader.get_filename(None, None, self.validator.file_name)\n self.validator.reader.open_file(None, None, self.validator.file_name, self.validator.fields, None,\n self.validator.get_file_name(self.validator.error_file_name),\n self.validator.daims_to_short_dict[self.validator.file_type.file_type_id],\n self.validator.short_to_daims_dict[self.validator.file_type.file_type_id],\n is_local=self.validator.is_local)\n\n # Going back to reprocess the header row\n self.validator.reader.file.seek(0)\n reader_obj = pd.read_csv(self.validator.reader.file, dtype=str, delimiter=',', error_bad_lines=False,\n na_filter=False, chunksize=2, warn_bad_lines=False)\n # Setting this outside of reader/file type objects which may not be used during processing\n self.validator.flex_fields = ['flex_field_a', 'flex_field_b']\n self.validator.header_dict = self.validator.reader.header_dict\n self.validator.file_type_name = self.validator.file_type.name\n self.validator.file_type_id = self.validator.file_type.file_type_id\n self.validator.job_id = 2\n\n # Making a broken list of chunks (one that should process fine, another with an error, another fine)\n # This way we can tell that the latter chunks processed later are ignored due to the error\n normal_chunks = list(reader_obj)\n broken_chunks = [normal_chunks[0], 'BREAK', normal_chunks[1], normal_chunks[2]]\n\n with self.assertRaises(Exception) as val_except:\n # making the reader object a list of strings instead, causing the inner function to break\n self.validator.parallel_data_loading(self.session, broken_chunks)\n self.assertTrue(type(val_except.exception) == AttributeError)\n self.assertTrue(str(val_except.exception) == \"'str' object has no attribute 'empty'\")\n\n # Check to see the processes are killed\n job = ps.Process(os.getpid())\n assert len(job.children(recursive=True)) == 0\n" ]
[ [ "pandas.read_csv" ] ]
zzheng93/code_uhws
[ "37e9c992066ad77c9c028c84d5f231ef442ca207" ]
[ "3_model_valid/pred/apply_model_members.py" ]
[ "import xarray as xr\nimport pandas as pd\nimport numpy as np\nimport xgboost as xgb\nimport time\nimport pickle\nimport sys\n\nfrom xgboost import XGBRegressor\n\n# load dataframe with maximal temp\ndef load_df_max_TREFHT(member, start_date, end_date):\n path = \"/glade/scratch/zhonghua/CESM-LE-members-csv/\"\n print(\"***************Start loading member\",member,\"***************\")\n t0 = time.time()\n df = pd.read_csv(path+member+\"_\"+start_date+\"_\"+end_date+\".csv\")\n elapsed_time = time.time() - t0\n print(\"It takes elapsed_time\", elapsed_time, \"to read csv\")\n print(\"***************Start convert lat/lon to string***************\")\n t1=time.time()\n df[[\"lat\",\"lon\"]]=df[[\"lat\",\"lon\"]].round(4).astype(str)\n elapsed_time = time.time() - t1\n print(\"It takes elapsed_time\", elapsed_time, \"to convert lat/lon to string\")\n print(\"***************Start One Hot Encoding***************\")\n # https://stackoverflow.com/questions/44124436/python-datetime-to-season\n t2=time.time()\n df[\"time\"]=pd.to_datetime(df[\"time\"],errors=\"coerce\")\n #df = df.dropna(subset=['time'])\n months = [\"Jan\",\"Feb\", \"Mar\", \"Apr\", \"May\", \"June\", \"July\", \"Aug\", \"Sept\", \"Oct\", \"Nov\", \"Dec\"]\n month_to_months = dict(zip(range(1,13), months))\n df = pd.concat([df,pd.get_dummies(df[\"time\"].dt.month.map(month_to_months).astype('category'))],axis=1)\n elapsed_time = time.time() - t2\n print(\"It takes elapsed_time\", elapsed_time, \"to finish the one hot encoding\")\n return df\n\ndef XGB_test(df,year,lat,lon,member):\n t_0=time.time()\n #df_temp = df[(df[\"lat\"]==lat) & (df[\"lon\"]==lon)].reset_index()\n df_lat = df[df[\"lat\"]==lat]\n df_temp = df_lat[df_lat[\"lon\"]==lon]\n \n vari_ls = [\"QBOT\",\"UBOT\",\"VBOT\",\n \"TREFHT\",\n \"FLNS\",\"FSNS\",\n \"PRECT\",\"PRSN\",\n \"Jan\",\"Feb\", \"Mar\", \n \"Apr\", \"May\", \"June\", \n \"July\", \"Aug\", \"Sept\", \n \"Oct\", \"Nov\", \"Dec\"]\n \n\n XGBreg = pickle.load(open(\"/glade/scratch/zhonghua/ensem_model/\"+year+\"/\"+\"MX_\"+lat+\"_\"+lon+\".dat\",\"rb\"))\n df_temp[member]=XGBreg.predict(df_temp[vari_ls])\n \n #print(\"rmse:\",np.sqrt(mean_squared_error(df_temp[member],df_temp[pred])))\n #print(\"mae:\",mean_absolute_error(df_temp[member],df_temp[pred]))\n df_return=df_temp[[\"lat\",\"lon\",\"time\",member,\"TREFMXAV_U\"]]\n df_return[[\"lat\",\"lon\"]]=df_return[[\"lat\",\"lon\"]].astype(np.float32)\n \n elapsed_time = time.time() - t_0\n print(\"It takes elapsed_time\", elapsed_time, \"to apply the model\")\n \n return df_return.set_index([\"lat\",\"lon\",\"time\"])\n\n\n#########################################################\nlat_lon_dict=pickle.load(open(\"/glade/scratch/zhonghua/lat_lon_dict.dat\",\"rb\"))\n\nmember=sys.argv[1]\nstart_date=sys.argv[2]\nend_date=sys.argv[3]\n\ndf = load_df_max_TREFHT(member, start_date, end_date)\n\ni=1\ndf_final_ls=[]\nfor lat in lat_lon_dict:\n print(lat)\n for lon in lat_lon_dict[lat]:\n df_final_ls.append(XGB_test(df,start_date,lat,lon,member))\n i+=1\n if (i%10==0):\n print(i)\npd.concat(df_final_ls).to_csv(\"/glade/scratch/zhonghua/CESM_validation/\"+start_date+\"/\"+member+\"_ens.csv\")\n" ]
[ [ "pandas.to_datetime", "pandas.read_csv", "pandas.concat" ] ]
shiyipaisizuo/tensorflow-project
[ "ce111d2def54bce4f1777f7a8921d60bdd0c668c" ]
[ "cats_dogs/base.py" ]
[ "\"\"\"\n Training data and validation accuracy.\n\"\"\"\n\n# Author: Changyu Liu <Shiyipaisizuo@gmail.com>\n# Last modified: 2018-07-06\n# LICENSE: MIT\n\nimport os\nimport numpy as np\nimport tensorflow as tf\nfrom PIL import Image\n\nimport train_test_split\nimport cnn\n\n\nN_CLASSES = 2 # dogs and cats\nIMG_W = 208 # resize the image, if the input image is too large, training will be very slow\nIMG_H = 208\nBATCH_SIZE = 16\nCAPACITY = 2000\nMAX_STEP = 15000\n# with current parameters, it is suggested to use learning rate<0.0001\nlearning_rate = 0.0001\n\n\ndef run_training():\n # Set there directories .\n train_dir = './data/train/'\n logs_train_dir = './logs/train/'\n\n train, train_label = train_test_split.get_files(train_dir)\n\n train_batch, train_label_batch = train_test_split.get_batch(train,\n train_label,\n IMG_W,\n IMG_H,\n BATCH_SIZE,\n CAPACITY)\n train_logits = cnn.inference(train_batch, BATCH_SIZE, N_CLASSES)\n train_loss = cnn.losses(train_logits, train_label_batch)\n train_op = cnn.training(train_loss, learning_rate)\n train__acc = cnn.evaluation(train_logits, train_label_batch)\n\n summary_op = tf.summary.merge_all()\n sess = tf.Session()\n train_writer = tf.summary.FileWriter(logs_train_dir, sess.graph)\n saver = tf.train.Saver()\n\n sess.run(tf.global_variables_initializer())\n coord = tf.train.Coordinator()\n threads = tf.train.start_queue_runners(sess=sess, coord=coord)\n\n try:\n for step in np.arange(MAX_STEP):\n if coord.should_stop():\n break\n _, tra_loss, tra_acc = sess.run([train_op, train_loss, train__acc])\n\n if step % 50 == 0:\n print(\n \"Step {}, \".format(step),\n \"train loss = {:.2f}, \".format(tra_loss),\n \"train accuracy = {:.2f}%\".format(tra_acc * 100.0))\n summary_str = sess.run(summary_op)\n train_writer.add_summary(summary_str, step)\n\n if step % 2000 == 0 or (step + 1) == MAX_STEP:\n checkpoint_path = os.path.join(logs_train_dir, \"model.ckpt\")\n saver.save(sess, checkpoint_path, global_step=step)\n\n except tf.errors.OutOfRangeError:\n print(\"Done training -- epoch limit reached\")\n finally:\n coord.request_stop()\n\n coord.join(threads)\n sess.close()\n\n\ndef get_image(train):\n \"\"\"\n Randomly pick one image from training data\n ====================\n Args:\n train: train data\n ====================\n Return:\n image\n \"\"\"\n n = len(train)\n ind = np.random.randint(0, n)\n img_dir = train[ind]\n\n image = Image.open(img_dir)\n image = image.resize([208, 208])\n image = np.array(image)\n return image\n\n\ndef evaluate():\n \"\"\"\n Test one image against the saved models and parameters\n \"\"\"\n\n # you need to change the directories to yours.\n train_dir = './data/train/'\n train, train_label = train_test_split.get_files(train_dir)\n image_array = get_image(train)\n\n with tf.Graph().as_default():\n batch_size = 1\n n_classes = 2\n\n image = tf.cast(image_array, tf.float32)\n image = tf.image.per_image_standardization(image)\n image = tf.reshape(image, [1, 208, 208, 3])\n logits = cnn.inference(image, batch_size, n_classes)\n\n logits = tf.nn.softmax(logits)\n\n X = tf.placeholder(tf.float32, shape=[208, 208, 3])\n\n # you need to change the directories to yours.\n logs_train_dir = './logs/train/'\n\n saver = tf.train.Saver()\n\n with tf.Session() as sess:\n\n print(\"Reading checkpoints...\")\n ckpt = tf.train.get_checkpoint_state(logs_train_dir)\n if ckpt and ckpt.model_checkpoint_path:\n global_step = ckpt.model_checkpoint_path.split(\n '/')[-1].split('-')[-1]\n saver.restore(sess, ckpt.model_checkpoint_path)\n print(\"Loading success, global_step is %s\".format(global_step))\n else:\n print(\"No checkpoint file found\")\n\n prediction = sess.run(logits, feed_dict={X: image_array})\n max_index = np.argmax(prediction)\n if max_index == 0:\n print(\"This is a cat with possibility {:.6f}\".format(\n prediction[:, 0]))\n else:\n print(\"This is a dog with possibility {:.6f}\".format(\n prediction[:, 1]))\n" ]
[ [ "tensorflow.train.start_queue_runners", "numpy.array", "tensorflow.train.Coordinator", "tensorflow.Graph", "tensorflow.Session", "tensorflow.train.Saver", "tensorflow.train.get_checkpoint_state", "tensorflow.reshape", "numpy.random.randint", "numpy.arange", "tensorflow.image.per_image_standardization", "tensorflow.nn.softmax", "tensorflow.summary.merge_all", "tensorflow.summary.FileWriter", "tensorflow.placeholder", "tensorflow.global_variables_initializer", "numpy.argmax", "tensorflow.cast" ] ]
tmsick/deep-learning-from-scratch
[ "96c52aeb74eb77bbfce505a47bbdcdba70e47559", "96c52aeb74eb77bbfce505a47bbdcdba70e47559" ]
[ "ch05/two_layer_net.py", "ch04/gradient_simplenet.py" ]
[ "# coding: utf-8\nimport sys, os\n\nsys.path.append(os.pardir) # 親ディレクトリのファイルをインポートするための設定\nimport numpy as np\nfrom common.layers import *\nfrom common.gradient import numerical_gradient\nfrom collections import OrderedDict\n\n\nclass TwoLayerNet:\n def __init__(self, input_size, hidden_size, output_size, weight_init_std=0.01):\n # 重みの初期化\n self.params = {}\n self.params[\"W1\"] = weight_init_std * np.random.randn(input_size, hidden_size)\n self.params[\"b1\"] = np.zeros(hidden_size)\n self.params[\"W2\"] = weight_init_std * np.random.randn(hidden_size, output_size)\n self.params[\"b2\"] = np.zeros(output_size)\n\n # レイヤの生成\n self.layers = OrderedDict()\n self.layers[\"Affine1\"] = Affine(self.params[\"W1\"], self.params[\"b1\"])\n self.layers[\"Relu1\"] = Relu()\n self.layers[\"Affine2\"] = Affine(self.params[\"W2\"], self.params[\"b2\"])\n\n self.lastLayer = SoftmaxWithLoss()\n\n def predict(self, x):\n for layer in self.layers.values():\n x = layer.forward(x)\n\n return x\n\n # x:入力データ, t:教師データ\n def loss(self, x, t):\n y = self.predict(x)\n return self.lastLayer.forward(y, t)\n\n def accuracy(self, x, t):\n y = self.predict(x)\n y = np.argmax(y, axis=1)\n if t.ndim != 1:\n t = np.argmax(t, axis=1)\n\n accuracy = np.sum(y == t) / float(x.shape[0])\n return accuracy\n\n # x:入力データ, t:教師データ\n def numerical_gradient(self, x, t):\n loss_W = lambda W: self.loss(x, t)\n\n grads = {}\n grads[\"W1\"] = numerical_gradient(loss_W, self.params[\"W1\"])\n grads[\"b1\"] = numerical_gradient(loss_W, self.params[\"b1\"])\n grads[\"W2\"] = numerical_gradient(loss_W, self.params[\"W2\"])\n grads[\"b2\"] = numerical_gradient(loss_W, self.params[\"b2\"])\n\n return grads\n\n def gradient(self, x, t):\n # forward\n self.loss(x, t)\n\n # backward\n dout = 1\n dout = self.lastLayer.backward(dout)\n\n layers = list(self.layers.values())\n layers.reverse()\n for layer in layers:\n dout = layer.backward(dout)\n\n # 設定\n grads = {}\n grads[\"W1\"], grads[\"b1\"] = self.layers[\"Affine1\"].dW, self.layers[\"Affine1\"].db\n grads[\"W2\"], grads[\"b2\"] = self.layers[\"Affine2\"].dW, self.layers[\"Affine2\"].db\n\n return grads\n", "# coding: utf-8\nimport sys, os\n\nsys.path.append(os.pardir) # 親ディレクトリのファイルをインポートするための設定\nimport numpy as np\nfrom common.functions import softmax, cross_entropy_error\nfrom common.gradient import numerical_gradient\n\n\nclass simpleNet:\n def __init__(self):\n self.W = np.random.randn(2, 3)\n\n def predict(self, x):\n return np.dot(x, self.W)\n\n def loss(self, x, t):\n z = self.predict(x)\n y = softmax(z)\n loss = cross_entropy_error(y, t)\n\n return loss\n\n\nx = np.array([0.6, 0.9])\nt = np.array([0, 0, 1])\n\nnet = simpleNet()\n\nf = lambda w: net.loss(x, t)\ndW = numerical_gradient(f, net.W)\n\nprint(dW)\n" ]
[ [ "numpy.sum", "numpy.random.randn", "numpy.argmax", "numpy.zeros" ], [ "numpy.array", "numpy.dot", "numpy.random.randn" ] ]
gidden/cyclopts
[ "e346b1721c8d8722af2862823844ab2e7864141b" ]
[ "cyclopts/functionals.py" ]
[ "\"\"\"A module for useful functions. \n\n:author: Matthew Gidden <matthew.gidden _at_ gmail.com>\n\"\"\"\nimport numpy as np \n\nrms = lambda a, axis=None: np.sqrt(np.mean(np.square(a), axis=axis))\n" ]
[ [ "numpy.square" ] ]
samstern/MSc-Project
[ "79e38803589d4169a11ae96e6bb90f373e8ed1db" ]
[ "pybrain/rl/environments/timeseries/performanceEvaluation.py" ]
[ "import pandas as pd\nfrom math import exp, log,sqrt\nfrom numpy import cumsum,std,sum, mean\n\ndef outData(ts,actionHist,indx,startIndex=0):\n out=pd.DataFrame(ts,index=indx,columns=['ts']).applymap(lambda x: x/100)\n out=out[startIndex:]\n out['cum_log_ts']=cumsum([log(1+i) for i in out['ts']])\n out['Action_Hist']=actionHist[startIndex:]\n out['trading rets']=calculateTradingReturn(out['Action_Hist'],out['ts'])\n out['cum_log_rets']=cumsum([log(1+x) for x in out['trading rets']])\n return out\n\ndef calculateTradingReturn(actionHistory,tsReturn,delta=0):\n if ((type(tsReturn)==pd.core.frame.DataFrame) or (type(tsReturn)==pd.core.frame.Series)):\n rets=pd.Series(index=tsReturn.index)\n else:\n rets=[0 for i in range(len(tsReturn))]\n for t in range(len(tsReturn)-1):\n cost=delta*abs(actionHistory[t+1]-actionHistory[t])\n rets[t]=(1+(actionHistory[t]*tsReturn[t]))*(1-cost)-1\n return rets\n\ndef maximumDrawdown(ts):\n return min(ts)\n\ndef annualisedSharpe(rs,rf=0):\n rs=rs[:-1]\n if (type(rf)==int)|(type(rf)==float):\n rf=[rf for i in rs]\n mean_ann_ret=mean([(rs[i]*252)-rf[i] for i in range(len(rs))])\n stand= std(rs)*sqrt(252)\n return (mean_ann_ret)/stand\n\ndef percentOfOutperformedMonths(tradingRets,tsRets):\n monthlyTrating=tradingRets.resample('M').apply(logCumSum)\n monthlyMkt=tsRets.resample('M',how=logCumSum)\n numOutperform=0\n for i in range(len(monthlyMkt)):\n if monthlyTrating[i]>monthlyMkt[i]:\n numOutperform+=1\n return 100*((1.0*numOutperform)/len(monthlyMkt))\n\ndef numTradesPerYear(actionHistory):\n count=0\n for i in range(1,len(actionHistory)):\n if actionHistory[i]!=actionHistory[i-1]:\n count+=1\n return count/252\n\ndef totalReturn(log_returns):\n return exp(sum(log_returns+1))-1\n\ndef logCumSum(ts):\n return sum([log(1+t) for t in ts])\n pass\n" ]
[ [ "numpy.std", "pandas.DataFrame", "numpy.sum", "pandas.Series" ] ]
whilemind/subtitle
[ "f911d97232c602b091f5eabb643667e9b72199df" ]
[ "src/subtitle.py" ]
[ "import time\nimport scipy.io.wavfile as wavfile\nimport numpy as np\nimport speech_recognition as sr\nimport librosa\nimport argparse\nimport os\nfrom glob import glob\n\nfrom pydub import AudioSegment\nfrom pydub.silence import split_on_silence, detect_nonsilent\nfrom pydub.playback import play\nimport pysrt\nimport math\nimport shutil\n\n\ndef get_arguments():\n parser = argparse.ArgumentParser()\n parser.add_argument('-v', '--video', type=str, required=True, help='Path to video *.mp4 file')\n parser.add_argument('-o', '--output', type=str, default='output/', help='Output file location')\n parser.add_argument('-l', '--lang', type=str, default='en', help='Language of the video file')\n arguments = parser.parse_args()\n return arguments\n\ndef recognize(wav_filename, lang):\n data, s = librosa.load(wav_filename)\n librosa.output.write_wav('output/tmp.wav', data, s)\n y = (np.iinfo(np.int32).max * (data/np.abs(data).max())).astype(np.int32)\n wavfile.write('output/tmp_32.wav', s, y)\n\n r = sr.Recognizer()\n with sr.AudioFile('output/tmp_32.wav') as source:\n audio = r.record(source) \n\n print('Audio file has been loaded')\n\n try:\n result = r.recognize_google(audio, language = lang).lower()\n except sr.UnknownValueError:\n print(\"Failed to determine audio file\")\n result = ''\n # finally: \n # os.remove(wav_filename) \n\n return result\n\ndef get_audio(videofile, audiofile):\n os.system('ffmpeg -y -threads 4 -i {} -f wav -ab 192000 -vn {}'.format(videofile, audiofile))\n\ndef split_into_frames(audiofile, samplesLocation):\n os.system('rm {}/*'.format(samplesLocation))\n time.sleep(2.0)\n data, sr = librosa.load(audiofile)\n duration = librosa.get_duration(data, sr)\n print('video duration, hours: {}'.format(duration/3600))\n for i in range(0,int(duration-1),20):\n tmp_batch = data[(i)*sr:sr*(i+20)]\n librosa.output.write_wav('{}/{}.wav'.format(samplesLocation, chr(int(i/20)+65)), tmp_batch, sr)\n\ndef separate_music_voice(audioFile, outputLocation):\n os.system('spleeter separate -i {} -p spleeter:2stems -o {}'.format(audioFile, outputLocation))\n\n\n# Define a function to normalize a chunk to a target amplitude.\ndef match_target_amplitude(aChunk, target_dBFS):\n ''' Normalize given audio chunk '''\n change_in_dBFS = target_dBFS - aChunk.dBFS\n return aChunk.apply_gain(change_in_dBFS)\n\ndef get_timestamp(duration):\n hr = math.floor(duration / 3600000)\n total_min = duration % 3600000\n \n mins = math.floor(total_min / 60000)\n total_secs = total_min % 60000\n\n secs = math.floor(total_secs / 1000)\n milisecs = total_min % 1000\n\n return \"{:02d}:{:02d}:{:02d},{:03d}\".format(hr, mins, secs, milisecs)\n\n\ndef gen_subtitle(wavFile, samplesLocation, srtFile, lang):\n srt_file = pysrt.SubRipFile()\n\n # Load your audio.\n print(\"loading wav file...\")\n # song = AudioSegment.from_mp3(\"your_audio.mp3\")\n #song = AudioSegment.from_wav(\"vocals.wav\")\n song = AudioSegment.from_file(wavFile, format=\"wav\")\n # play(song)\n dBFS = song.dBFS\n\n\n # Nonsilence track start and end positions.\n nonsilence = detect_nonsilent(\n song,\n min_silence_len = 500,\n silence_thresh = dBFS-16\n )\n file_count = len(nonsilence)\n print(\"Nonsilence chunk length {}\".format(str(file_count)))\n\n # for [start, end] in nonsilence:\n # print(\"start: {0} end: {1}\".format(get_timestamp(start), get_timestamp(end)))\n\n # Split track where the silence is 2 seconds or more and get chunks using \n # the imported function.\n print(\"Start spliting file...\")\n chunks = split_on_silence(\n song, \n min_silence_len = 500,\n silence_thresh = dBFS-16,\n # optional\n keep_silence = 250\n )\n\n print(\"Spliting done...\" + str(len(chunks)))\n # Process each chunk with your parameters\n for i, chunk in enumerate(chunks):\n # Create a silence chunk that's 0.5 seconds (or 500 ms) long for padding.\n silence_chunk = AudioSegment.silent(duration=1000)\n\n # Add the padding chunk to beginning and end of the entire chunk.\n audio_chunk = silence_chunk + chunk + silence_chunk\n # audio_chunk = chunk\n\n # Normalize the entire chunk.\n normalized_chunk = match_target_amplitude(audio_chunk, -20.0)\n\n # Export the audio chunk with new bitrate.\n starttime = nonsilence[i][0]\n endtime = nonsilence[i][1]\n print(\"\\n>>{} of {}, Exporting {}chunk{}.wav start: {} end: {}\".format(i, file_count, samplesLocation, i, starttime, endtime))\n\n chunk_file_path = \"{}chunk{}.wav\".format(samplesLocation, str(i))\n normalized_chunk.export(\n chunk_file_path,\n bitrate = \"192k\",\n format = \"wav\"\n )\n \n time.sleep(2)\n print(\"Going to generete the dialogs of file {}\".format(chunk_file_path))\n dialogs = recognize(chunk_file_path, lang)\n print(\"{} file dialog is: {}\".format(chunk_file_path, dialogs))\n \n start_time = get_timestamp(starttime)\n end_time = get_timestamp(endtime)\n sub = pysrt.SubRipItem((i+1), start=start_time, end=end_time, text=\"{} {}\".format(str(i+1), dialogs))\n srt_file.append(sub)\n\n srt_file.save(srtFile)\n\n\nif __name__ == '__main__':\n outputLoc = 'output/'\n inputWaveFile = 'current.wav'\n vocals_file = 'current/vocals.wav'\n samples_location = 'samples/'\n srt_file = '.srt'\n\n start = time.time()\n\n args = get_arguments()\n\n outputLoc = args.output\n shutil.rmtree(outputLoc)\n time.sleep(2)\n os.makedirs(outputLoc, exist_ok=True)\n inputWaveFile = outputLoc + inputWaveFile\n vocals_file = outputLoc + vocals_file\n samples_location = outputLoc + samples_location\n os.makedirs(samples_location, exist_ok=True)\n srt_file = os.path.splitext(args.video)[0] + srt_file\n print('srt file will be {}'.format(srt_file))\n time.sleep(2)\n\n get_audio(args.video, inputWaveFile)\n separate_music_voice(inputWaveFile, outputLoc)\n gen_subtitle(vocals_file, samples_location, srt_file, args.lang)\n \n end = time.time()\n print('elapsed time: {}'.format(end - start))\n # shutil.rmtree(outputLoc)\n" ]
[ [ "numpy.abs", "numpy.iinfo", "scipy.io.wavfile.write" ] ]
facebookresearch/beanmachine
[ "225114d9964b90c3a49adddc4387b4a47d1b4262", "225114d9964b90c3a49adddc4387b4a47d1b4262" ]
[ "src/beanmachine/ppl/diagnostics/common_plots.py", "src/beanmachine/ppl/compiler/special_function_caller.py" ]
[ "# Copyright (c) Meta Platforms, Inc. and affiliates.\n#\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\nfrom typing import Callable, List, NamedTuple, Tuple\n\nimport numpy as np\nimport plotly.graph_objs as go\nimport torch\nfrom torch import Tensor\n\n\nclass SamplesSummary(NamedTuple):\n num_chain: int\n num_samples: int\n single_sample_sz: Tensor\n\n\ndef _samples_info(query_samples: Tensor) -> SamplesSummary:\n return SamplesSummary(\n num_chain=query_samples.size(0),\n num_samples=query_samples.size(1),\n # pyre-fixme[6]: For 3rd param expected `Tensor` but got `Size`.\n single_sample_sz=query_samples.size()[2:],\n )\n\n\ndef trace_helper(\n x: List[List[List[int]]], y: List[List[List[float]]], labels: List[str]\n) -> Tuple[List[go.Scatter], List[str]]:\n \"\"\"\n this function gets results prepared by a plot-related function and\n outputs a tuple including plotly object and its corresponding legend.\n \"\"\"\n all_traces = []\n num_chains = len(x)\n num_indices = len(x[0])\n for index in range(num_indices):\n trace = []\n for chain in range(num_chains):\n trace.append(\n go.Scatter(\n x=x[chain][index],\n y=y[chain][index],\n mode=\"lines\",\n name=\"chain\" + str(chain),\n )\n )\n all_traces.append(trace)\n return (all_traces, labels)\n\n\ndef plot_helper(\n query_samples: Tensor, func: Callable\n) -> Tuple[List[go.Scatter], List[str]]:\n \"\"\"\n this function executes a plot-related function, passed as input parameter func, and\n outputs a tuple including plotly object and its corresponding legend.\n \"\"\"\n num_chain, num_samples, single_sample_sz = _samples_info(query_samples)\n\n x_axis, y_axis, all_labels = [], [], []\n for chain in range(num_chain):\n flattened_data = query_samples[chain].reshape(num_samples, -1)\n numel = flattened_data[0].numel()\n x_axis_data, y_axis_data, labels = [], [], []\n for i in range(numel):\n index = np.unravel_index(i, single_sample_sz)\n data = flattened_data[:, i]\n partial_label = f\" for {list(index)}\"\n\n x_data, y_data = func(data.detach())\n x_axis_data.append(x_data)\n y_axis_data.append(y_data)\n labels.append(partial_label)\n x_axis.append(x_axis_data)\n y_axis.append(y_axis_data)\n all_labels.append(labels)\n return trace_helper(x_axis, y_axis, all_labels[0])\n\n\ndef autocorr(x: Tensor) -> Tuple[List[int], List[float]]:\n def autocorr_calculation(x: Tensor, lag: int) -> Tensor:\n y1 = x[: (len(x) - lag)]\n y2 = x[lag:]\n\n sum_product = (\n (y1 - (x.mean(dim=0).expand(y1.size())))\n * (y2 - (x.mean(dim=0).expand(y2.size())))\n ).sum(0)\n return sum_product / ((len(x) - lag) * torch.var(x, dim=0))\n\n max_lag = x.size(0)\n y_axis_data = [autocorr_calculation(x, lag).item() for lag in range(max_lag)]\n x_axis_data = list(range(max_lag))\n return (x_axis_data, y_axis_data)\n\n\ndef trace_plot(x: Tensor) -> Tuple[List[int], Tensor]:\n return (list(range(x.size(0))), x)\n", "# Copyright (c) Meta Platforms, Inc. and affiliates.\n#\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\n\nimport inspect\nimport math\nimport operator\nfrom types import MethodType\nfrom typing import Any, Callable, Dict, List, NoReturn, Optional, Set, Tuple\n\nimport beanmachine.ppl.compiler.bmg_nodes as bn\nimport torch\nimport torch.distributions as dist\nfrom beanmachine.ppl.compiler.beanstalk_common import allowed_functions\nfrom beanmachine.ppl.compiler.bm_graph_builder import BMGraphBuilder\nfrom beanmachine.ppl.compiler.bmg_nodes import BMGNode\nfrom beanmachine.ppl.compiler.hint import log1mexp, math_log1mexp\n\n\n_in_place_operator_names = {\n operator.iadd: \"__iadd__\",\n operator.iand: \"__iand__\",\n operator.ifloordiv: \"__ifloordiv__\",\n operator.ilshift: \"__ilshift__\",\n operator.imatmul: \"__imatmul__\",\n operator.imod: \"__imod__\",\n operator.imul: \"__imul__\",\n operator.ior: \"__ior__\",\n operator.ipow: \"__ipow__\",\n operator.irshift: \"__irshift__\",\n operator.isub: \"__isub__\",\n operator.itruediv: \"__idiv__\",\n operator.ixor: \"__ixor__\",\n}\n\n_in_place_to_regular = {\n operator.iadd: operator.add,\n operator.iand: operator.and_,\n operator.ifloordiv: operator.floordiv,\n operator.ilshift: operator.lshift,\n operator.imatmul: operator.matmul,\n operator.imod: operator.mod,\n operator.imul: operator.mul,\n operator.ior: operator.or_,\n operator.ipow: operator.pow,\n operator.irshift: operator.rshift,\n operator.isub: operator.sub,\n operator.itruediv: operator.truediv,\n operator.ixor: operator.xor,\n}\n\n\ndef _raise_unsupported(func: Any) -> NoReturn:\n if inspect.ismethoddescriptor(func) or isinstance(\n func, _builtin_function_or_method\n ):\n func = func.__name__\n\n raise ValueError(f\"Function {func} is not supported by Bean Machine Graph.\")\n\n\ndef _is_in_place_operator(func: Callable) -> bool:\n return func in _in_place_to_regular\n\n\ndef _ordinary_arg_or_const(arg: Any) -> bool:\n return isinstance(arg, bn.ConstantNode) or not isinstance(arg, BMGNode)\n\n\ndef only_ordinary_arguments(args, kwargs) -> bool:\n if any(isinstance(arg, BMGNode) for arg in args):\n return False\n if any(isinstance(arg, BMGNode) for arg in kwargs.values()):\n return False\n return True\n\n\ndef _only_ordinary_arguments_or_constants(\n args: List[Any], kwargs: Dict[str, Any]\n) -> bool:\n return all(_ordinary_arg_or_const(arg) for arg in args) and all(\n _ordinary_arg_or_const(arg) for arg in kwargs.values()\n )\n\n\ndef _get_ordinary_value(x: Any) -> Any:\n return x.value if isinstance(x, bn.ConstantNode) else x\n\n\ndef _is_standard_normal(x: Any) -> bool:\n return isinstance(x, dist.Normal) and x.mean == 0.0 and x.stddev == 1.0\n\n\ndef _is_phi_bound(f: Any, arguments: List[Any], kwargs: Dict[str, Any]) -> bool:\n # Is this Normal(0.0, 1.0).cdf(x) ?\n # TODO: Support kwargs\n return (\n isinstance(f, MethodType)\n and f.__func__ is dist.Normal.cdf\n and len(arguments) == 1\n and _is_standard_normal(f.__self__)\n )\n\n\ndef _is_phi_unbound(f: Any, arguments: List[Any], kwargs: Dict[str, Any]) -> bool:\n # Is this Normal.cdf(Normal(0.0, 1.0), x)?\n # TODO: Support kwargs\n return (\n f is dist.Normal.cdf\n and len(arguments) == 2\n and _is_standard_normal(arguments[0])\n )\n\n\ndef _is_phi(f: Any, arguments: List[Any], kwargs: Dict[str, Any]) -> bool:\n return _is_phi_unbound(f, arguments, kwargs) or _is_phi_bound(f, arguments, kwargs)\n\n\ndef _flatten_all_lists(xs):\n \"\"\"Takes a list-of-lists, with arbitrary nesting level;\n returns an iteration of all elements.\"\"\"\n if isinstance(xs, list):\n for x in xs:\n yield from _flatten_all_lists(x)\n else:\n yield xs\n\n\ndef _list_to_zeros(xs):\n \"\"\"Takes a list-of-lists, with arbitrary nesting level;\n returns a list-of-lists of the same shape but with every non-list\n element replaced with zero.\"\"\"\n if isinstance(xs, list):\n return [_list_to_zeros(x) for x in xs]\n return 0\n\n\ndef _hashable(x: Any) -> bool:\n # Oddly enough, Python does not allow you to test for set inclusion\n # if the object is not hashable. Since it is impossible for an unhashable\n # object to be in a set, Python could simply say no when asked if a set\n # contains any unhashable object. It does not, so we are forced to do so.\n\n # All hashable objects have a callable __hash__ attribute.\n if not hasattr(x, \"__hash__\"):\n return False\n if not isinstance(x.__hash__, Callable):\n return False\n\n # It is possible that callable __hash__ exists but throws, which makes it\n # unhashable. Eliminate that possibility as well.\n try:\n hash(x)\n except Exception:\n return False\n\n return True\n\n\n_empty_args = []\n_empty_kwargs = {}\n\n# Oddly enough there does not appear to be an easy way to obtain the type\n# of builtin methods.\n_builtin_function_or_method = type(abs)\n\n\ndef _is_any_torch_function(f: Callable) -> bool:\n # Torch functions we either know about or we reject them immediately;\n # we do not attempt to extract a graph of a model which contains\n # a call to an unknown torch function with stochastic arguments.\n #\n # Given a reference to a function, how can we know if it is\n # a torch function? Torch does not make it very easy on us to figure\n # out what module a function is from. Let's choose some typical\n # methods as examples, like arccos or erf:\n #\n # * torch.Tensor.arccos has no __module__ attribute.\n # * torch.arccos.__module__ is None but .__objclass__ has a module string.\n # * torch.special.erf.__module__ is the string \"torch.special.erf.__module__\"\n # * torch.tensor(1).arccos.__module__ is None and has no .__objclass__, but\n # does have a __self__ with a module.\n #\n # Our first step then is to see if we have a module.\n m = getattr(f, \"__module__\", None)\n if m is None:\n # We don't have a module. Do we have an __objclass__ with a module?\n oc = getattr(f, \"__objclass__\", None)\n if oc is not None:\n m = getattr(oc, \"__module__\", None)\n\n if m is None:\n # We still don't have a module. Maybe __self__ has a module.\n s = getattr(f, \"__self__\", None)\n if s is not None:\n m = getattr(s, \"__module__\", None)\n\n if m is not None:\n return isinstance(m, str) and (m == \"torch\" or m.startswith(\"torch.\"))\n\n # We don't have a module or an objclass.\n #\n # If we have something like torch.arccos then we can simply\n # check the torch module to see if we can find this exact reference.\n return any(item is f for _, item in torch.__dict__.items())\n\n\ndef _is_tensor_unbound_instance_method(f: Callable) -> bool:\n # This identifies if a function object is a method *descriptor*\n # such as torch.Tensor.add; that is, the method before it is bound\n # to a particular self. This function does NOT identify if a function\n # is a bound instance method, such as torch.tensor(1.0).add. See below.\n if not inspect.ismethoddescriptor(f):\n return False\n objc = getattr(f, \"__objclass__\", None)\n return objc is torch.Tensor or objc in torch.Tensor.__bases__\n\n\ndef _is_tensor_bound_instance_method(f: Callable) -> bool:\n # This identifies if a function object is an instance method of\n # a tensor already bound to a particular self. All such functions\n # in torch are marked as builtin.\n return isinstance(f, _builtin_function_or_method) and isinstance(\n getattr(f, \"__self__\", None), torch.Tensor\n )\n\n\ndef _get_unbound_tensor_method(f: Callable) -> Callable:\n # Given a bound-to-self tensor instance method, obtain its corresponding\n # unbound descriptor. In normal Python, the protocol is that the bound\n # method has attribute __func__ pointing back to the descriptor but\n # torch does not follow this protocol. Rather, we'll look it up by name.\n assert _is_tensor_bound_instance_method(f)\n unbound = getattr(torch.Tensor, f.__name__, None)\n assert _is_tensor_unbound_instance_method(unbound)\n return unbound\n\n\ndef canonicalize_function(\n function: Any, arguments: List[Any]\n) -> Tuple[Callable, List[Any]]:\n # In Python a function that is a member of a class can be in either a \"bound\"\n # or \"unbound\" form. Suppose c is of type C and we are calling foo with argument\n # x. We could have:\n #\n # bound: c.foo(x)\n # unbound: C.foo(c, x)\n #\n # The bound version calls the unbound version. How? In the bound case the fetch\n # of c.foo returns a method object with attribute __self__ set to c and attribute\n # __func__ set to C.foo. The call on the method object then invokes\n # __func__(__self__, x).\n #\n # Unfortunately, calls to torch tensor methods do not follow this convention;\n # instead of returning a method object with __func__ and __self__, it returns\n # a builtin method object with __self__ but no __func__, so we call special helpers\n # for those.\n #\n # It is useful when analyzing calls to have them in a consistent form. This function\n # turns bound function calls into the equivalent unbound function call.\n if isinstance(function, MethodType):\n f = function.__func__\n args = [function.__self__] + arguments\n assert isinstance(f, Callable)\n elif _is_tensor_bound_instance_method(function):\n f = _get_unbound_tensor_method(function)\n args = [function.__self__] + arguments\n elif isinstance(function, Callable):\n f = function\n args = arguments\n else:\n _raise_unsupported(function)\n assert isinstance(f, Callable), ( # pyre-ignore\n \"_canonicalize_function should return callable \"\n + f\"but got {type(f)} {str(f)}\" # pyre-ignore\n )\n return (f, args) # pyre-ignore\n\n\n# This helper class is to solve a problem in the simulated\n# execution of the model during graph accumulation. Consider\n# a model fragment like:\n#\n# n = normal()\n# y = n.exp()\n#\n# During graph construction, n will be a SampleNode whose\n# operand is a NormalNode, but SampleNode does not have a\n# method \"exp\".\n#\n# The lifted program we execute will be something like:\n#\n# n = bmg.handle_function(normal, [])\n# func = bmg.handle_dot(n, \"exp\")\n# y = bmg.handle_function(func, [])\n#\n# The \"func\" that is returned is one of these KnownFunction\n# objects, which captures the notion \"I am an invocation\n# of known function Tensor.exp on a receiver that is a BMG\n# node\". We then turn that into a exp node in handle_function.\n\n\nclass KnownFunction:\n receiver: BMGNode\n function: Callable\n\n def __init__(self, receiver: BMGNode, function: Callable) -> None:\n if not isinstance(receiver, BMGNode):\n raise TypeError(\n f\"KnownFunction receiver must be BMGNode but is {type(receiver)}\"\n )\n if not isinstance(function, Callable):\n raise TypeError(\n f\"KnownFunction function must be Callable but is {type(function)}\"\n )\n\n self.receiver = receiver\n self.function = function\n\n\nclass SpecialFunctionCaller:\n # As we execute the lifted program, we accumulate graph nodes in the\n # graph builder,and the program passes around graph nodes instead of\n # regular values. What happens when a graph node is passed to a\n # function, or used as the receiver of a function? That function will be\n # expecting a regular value as its argument or receiver.\n #\n # Certain function calls are special because they call graph nodes to\n # be created; we have a dictionary here that maps Python function objects\n # to the graph builder method that knows how to create the appropriate\n # node type.\n #\n # There are also some functions which we know can be passed a graph node\n # and will treat it correctly even though it is a graph node and not\n # a value. For example, the function which constructs a dictionary\n # or the function which constructs a list. When we encounter one of\n # these functions in the lifted program, we do not create a graph node\n # or call a special helper function; we simply allow it to be called normally.\n\n _bmg: BMGraphBuilder\n _function_map: Dict[Callable, Callable]\n _special_tensor_instance_function_names: Set[str]\n\n def __init__(self, bmg: BMGraphBuilder) -> None:\n self._bmg = bmg\n self._function_map = {\n #\n # Built-in functions\n #\n float: self._builtin_float,\n #\n # Math functions\n #\n math.exp: self._math_exp,\n math.log: self._math_log,\n #\n # Hints\n #\n log1mexp: self._hint_log1mexp,\n math_log1mexp: self._hint_log1mexp,\n #\n # Operators as functions\n #\n operator.add: self._operator_add,\n operator.and_: self._operator_and,\n operator.contains: self._operator_contains,\n operator.eq: self._operator_eq,\n operator.floordiv: self._operator_floordiv,\n operator.ge: self._operator_ge,\n operator.gt: self._operator_gt,\n operator.inv: self._operator_inv,\n operator.is_: self._operator_is,\n operator.is_not: self._operator_is_not,\n operator.le: self._operator_le,\n operator.lshift: self._operator_lshift,\n operator.lt: self._operator_lt,\n operator.matmul: self._operator_matmul,\n operator.mod: self._operator_mod,\n operator.mul: self._operator_mul,\n operator.ne: self._operator_ne,\n operator.neg: self._operator_neg,\n operator.not_: self._operator_not,\n operator.or_: self._operator_or,\n operator.pos: self._operator_pos,\n operator.pow: self._operator_pow,\n operator.rshift: self._operator_rshift,\n operator.sub: self._operator_sub,\n operator.truediv: self._operator_truediv,\n operator.xor: self._operator_xor,\n #\n #\n # Torch distributions\n #\n # (Remember to add a case to distribution_to_node.)\n #\n dist.Bernoulli: self._dist_bernoulli,\n dist.Beta: self._dist_beta,\n dist.Binomial: self._dist_binomial,\n dist.Categorical: self._dist_categorical,\n # TODO: Cauchy\n dist.Chi2: self._dist_chi2,\n # TODO: ContinuousBernoulli\n dist.Dirichlet: self._dist_dirichlet,\n # TODO: Exponential\n # TODO: FisherSnedecor\n dist.Gamma: self._dist_gamma,\n # TODO: Geometric\n # TODO: Gumbel\n dist.HalfCauchy: self._dist_halfcauchy,\n dist.HalfNormal: self._dist_halfnormal,\n # TODO: Independent\n # TODO: Kumaraswamy\n # TODO: LKJCholesky\n # TODO: Laplace\n # TODO: LogNormal\n # TODO: LowRankMultivariateNormal\n # TODO: MixtureSameFamily\n # TODO: Multinomial\n # TODO: MultivariateNormal\n # TODO: NegativeBinomial\n dist.Normal: self._dist_normal,\n # TODO: OneHotCategorical\n # TODO: Pareto\n # TODO: Poisson\n dist.Poisson: self._dist_poisson,\n # TODO: RelaxedBernoulli\n # TODO: LogitRelaxedBernoulli\n # TODO: RelaxedOneHotCategorical\n dist.StudentT: self._dist_studentt,\n # TODO: TransformedDistribution\n dist.Uniform: self._dist_uniform,\n # TODO: VonMises\n # TODO: Weibull\n #\n # Torch functions\n #\n torch.Tensor.add: self._torch_add,\n torch.add: self._torch_add,\n torch.Tensor.bitwise_and: self._torch_bitwise_and,\n torch.bitwise_and: self._torch_bitwise_and,\n torch.Tensor.bitwise_not: self._torch_bitwise_not,\n torch.bitwise_not: self._torch_bitwise_not,\n torch.Tensor.bitwise_or: self._torch_bitwise_or,\n torch.bitwise_or: self._torch_bitwise_or,\n torch.Tensor.bitwise_xor: self._torch_bitwise_xor,\n torch.bitwise_xor: self._torch_bitwise_xor,\n torch.Tensor.bitwise_left_shift: self._torch_bitwise_left_shift,\n torch.bitwise_left_shift: self._torch_bitwise_left_shift,\n torch.Tensor.bitwise_right_shift: self._torch_bitwise_right_shift,\n torch.bitwise_right_shift: self._torch_bitwise_right_shift,\n torch.Tensor.cholesky: self._torch_cholesky,\n torch.linalg.cholesky: self._torch_cholesky,\n torch.Tensor.div: self._torch_div,\n torch.div: self._torch_div,\n torch.Tensor.divide: self._torch_div,\n torch.divide: self._torch_div,\n torch.Tensor.eq: self._torch_eq,\n torch.eq: self._torch_eq,\n torch.Tensor.equal: self._torch_eq,\n torch.equal: self._torch_eq,\n torch.Tensor.exp: self._torch_exp,\n torch.exp: self._torch_exp,\n torch.Tensor.exp2: self._torch_exp2,\n torch.exp2: self._torch_exp2,\n torch.special.exp2: self._torch_exp2,\n torch.Tensor.expm1: self._torch_expm1,\n torch.expm1: self._torch_expm1,\n torch.special.expm1: self._torch_expm1,\n torch.Tensor.float: self._torch_float,\n # TODO: float_power\n torch.Tensor.floor_divide: self._torch_floor_divide,\n torch.floor_divide: self._torch_floor_divide,\n torch.Tensor.fmod: self._torch_fmod,\n torch.fmod: self._torch_fmod,\n torch.Tensor.ge: self._torch_ge,\n torch.ge: self._torch_ge,\n torch.Tensor.greater: self._torch_gt,\n torch.greater: self._torch_gt,\n torch.Tensor.greater_equal: self._torch_ge,\n torch.greater_equal: self._torch_ge,\n torch.Tensor.gt: self._torch_gt,\n torch.gt: self._torch_gt,\n torch.Tensor.int: self._torch_int,\n torch.Tensor.item: self._torch_item,\n torch.Tensor.le: self._torch_le,\n torch.le: self._torch_le,\n torch.Tensor.less: self._torch_lt,\n torch.less: self._torch_lt,\n torch.Tensor.less_equal: self._torch_le,\n torch.less_equal: self._torch_le,\n torch.Tensor.log: self._torch_log,\n torch.log: self._torch_log,\n torch.Tensor.log10: self._torch_log10,\n torch.log10: self._torch_log10,\n torch.Tensor.log1p: self._torch_log1p,\n torch.log1p: self._torch_log1p,\n torch.special.log1p: self._torch_log1p,\n torch.Tensor.log2: self._torch_log2,\n torch.log2: self._torch_log2,\n # TODO: logical_and\n # TODO: special.logit\n torch.Tensor.logical_not: self._torch_logical_not,\n torch.logical_not: self._torch_logical_not,\n # TODO: logical_or\n # TODO: logical_xor\n torch.Tensor.logsumexp: self._torch_logsumexp,\n torch.logsumexp: self._torch_logsumexp,\n torch.special.logsumexp: self._torch_logsumexp,\n torch.Tensor.lt: self._torch_lt,\n torch.lt: self._torch_lt,\n torch.Tensor.matmul: self._torch_matmul,\n torch.matmul: self._torch_matmul,\n torch.Tensor.mm: self._torch_mm,\n torch.mm: self._torch_mm,\n torch.Tensor.mul: self._torch_mul,\n torch.mul: self._torch_mul,\n torch.Tensor.multiply: self._torch_mul,\n torch.multiply: self._torch_mul,\n torch.Tensor.ne: self._torch_ne,\n torch.ne: self._torch_ne,\n torch.Tensor.not_equal: self._torch_ne,\n torch.not_equal: self._torch_ne,\n torch.Tensor.neg: self._torch_neg,\n torch.neg: self._torch_neg,\n torch.Tensor.negative: self._torch_neg,\n torch.negative: self._torch_neg,\n torch.Tensor.pow: self._torch_pow,\n torch.pow: self._torch_pow,\n torch.Tensor.remainder: self._torch_fmod,\n torch.remainder: self._torch_fmod,\n torch.sigmoid: self._torch_sigmoid,\n torch.Tensor.sigmoid: self._torch_sigmoid,\n torch.special.expit: self._torch_sigmoid,\n torch.Tensor.sqrt: self._torch_sqrt,\n torch.sqrt: self._torch_sqrt,\n torch.Tensor.sub: self._torch_sub,\n torch.sub: self._torch_sub,\n torch.Tensor.subtract: self._torch_sub,\n torch.subtract: self._torch_sub,\n torch.Tensor.sum: self._torch_sum,\n torch.sum: self._torch_sum,\n torch.Tensor.true_divide: self._torch_div,\n torch.true_divide: self._torch_div,\n }\n self._special_tensor_instance_function_names = {\n f.__name__\n for f in self._function_map\n if _is_tensor_unbound_instance_method(f)\n }\n\n def _is_special_tensor_bound_instance_method_name(self, name: str) -> bool:\n return name in self._special_tensor_instance_function_names\n\n def bind_tensor_instance_function(\n self, receiver: BMGNode, name: str\n ) -> KnownFunction:\n # TODO: What if the node represents a distribution, not a tensor?\n # Should we produce a better error message?\n if hasattr(torch.Tensor, name):\n return KnownFunction(receiver, getattr(torch.Tensor, name))\n _raise_unsupported(name)\n\n def is_special_tensor_bound_instance_method(self, f: Callable) -> bool:\n return self._is_special_tensor_bound_instance_method_name(\n f.__name__\n ) and _is_tensor_bound_instance_method(f)\n\n def get_special_tensor_unbound_instance_method(self, f: Callable) -> Callable:\n assert self.is_special_tensor_bound_instance_method(f)\n return _get_unbound_tensor_method(f)\n\n def _make_constant(self, arg: Any) -> BMGNode:\n return arg if isinstance(arg, BMGNode) else self._bmg.add_constant(arg)\n\n def is_special_function(\n self,\n func: Callable,\n args: List[Any] = _empty_args, # TODO: Unused\n kwargs: Dict[str, Any] = _empty_kwargs, # TODO: Unused\n ) -> bool:\n if isinstance(func, KnownFunction):\n return True\n if _is_any_torch_function(func):\n return True\n if not _hashable(func):\n return False\n if func in allowed_functions:\n return True\n if func in self._function_map:\n return True\n # All in-place operators are special functions.\n if _is_in_place_operator(func):\n return True\n return False\n\n def _canonicalize_function(\n self, func: Callable, args: List[Any]\n ) -> Tuple[Callable, List[Any]]:\n if isinstance(func, KnownFunction):\n args = [func.receiver] + args\n func = func.function\n else:\n func, args = canonicalize_function(func, args)\n return func, args\n\n def do_special_call_maybe_stochastic(\n self,\n func: Any,\n args: List[Any],\n kwargs: Dict[str, Any] = _empty_kwargs,\n ) -> Any:\n # If we possibly can, just call the original function with ordinary arguments.\n # Otherwise, convert everything to a graph node and call our helper which\n # does node construction.\n\n assert self.is_special_function(func, args, kwargs)\n func, args = self._canonicalize_function(func, args)\n if func is torch.tensor:\n return self._tensor_constructor(*args, **kwargs)\n if (\n _only_ordinary_arguments_or_constants(args, kwargs)\n or func in allowed_functions\n ):\n new_args = (_get_ordinary_value(arg) for arg in args)\n new_kwargs = {key: _get_ordinary_value(arg) for key, arg in kwargs.items()}\n return func(*new_args, **new_kwargs)\n\n if _is_in_place_operator(func):\n return self._in_place_operator(func, *args)\n\n return self.do_special_call_always_stochastic(func, args, kwargs)\n\n def do_special_call_always_stochastic(\n self,\n func: Callable,\n args: List[Any],\n kwargs: Dict[str, Any] = _empty_kwargs,\n ) -> BMGNode:\n # Never call the original function with ordinary arguments. Convert everything\n # to a graph node and call our helper which does node construction.\n assert self.is_special_function(func, args, kwargs)\n # We should never call do_special_call_always_stochastic on (1) a tensor\n # constructor, or (2) a function known to be allowed to take any values.\n assert func not in allowed_functions\n assert func is not torch.tensor\n func, args = self._canonicalize_function(func, args)\n\n if _is_phi_unbound(func, args, kwargs):\n args = args[1:]\n node_constructor = self._phi\n elif _hashable(func) and func in self._function_map:\n node_constructor = self._function_map[func]\n else:\n # We are trying to do an always-stochastic call on a function that\n # we do not yet know how to handle.\n _raise_unsupported(func)\n new_args = (self._make_constant(arg) for arg in args)\n new_kwargs = {key: self._make_constant(arg) for key, arg in kwargs.items()}\n return node_constructor(*new_args, **new_kwargs) # pyre-ignore\n\n #\n # Builtins; these must have the same signature as their corresponding\n # builtin functions.\n #\n\n def _builtin_float(self, input: BMGNode) -> BMGNode:\n # TODO: Do we want to do this at all? Why should float(t) insert a\n # TO_REAL node into the graph? We can simply insert TO_REAL where required\n # by the BMG type system.\n return self._bmg.add_to_real(input)\n\n #\n # Math functions\n #\n def _math_exp(self, input: BMGNode) -> BMGNode:\n # TODO: Right signature?\n return self._bmg.add_exp(input)\n\n def _math_log(self, input: BMGNode) -> BMGNode:\n return self._bmg.add_log(input)\n\n #\n # Hints\n # TODO: Eliminate this hack. Write a problem fixer which detects these\n # patterns and rewrites them into the more efficient operator.\n #\n\n def _hint_log1mexp(self, x: BMGNode) -> BMGNode:\n return self._bmg.add_log1mexp(x)\n\n #\n # Distributions; these must have the same signature as the corresponding\n # constructor.\n #\n def distribution_to_node( # noqa\n self, distribution: dist.Distribution\n ) -> bn.DistributionNode:\n t = type(distribution)\n\n if isinstance(distribution, dist.Bernoulli):\n args = [distribution.probs]\n elif isinstance(distribution, dist.Beta):\n args = [distribution.concentration1, distribution.concentration0]\n elif isinstance(distribution, dist.Binomial):\n args = [distribution.total_count, distribution.probs]\n elif isinstance(distribution, dist.Categorical):\n args = [distribution.probs]\n elif isinstance(distribution, dist.Chi2):\n args = [distribution.df]\n elif isinstance(distribution, dist.Dirichlet):\n args = [distribution.concentration]\n elif isinstance(distribution, dist.Gamma):\n args = [distribution.concentration, distribution.rate]\n elif isinstance(distribution, dist.HalfCauchy):\n args = [distribution.scale]\n elif isinstance(distribution, dist.HalfNormal):\n args = [distribution.scale]\n elif isinstance(distribution, dist.Normal):\n args = [distribution.mean, distribution.stddev]\n elif isinstance(distribution, dist.Poisson):\n args = [distribution.rate]\n elif isinstance(distribution, dist.StudentT):\n args = [distribution.df, distribution.loc, distribution.scale]\n elif isinstance(distribution, dist.Uniform):\n args = [distribution.low, distribution.high]\n else:\n # TODO: Better error\n raise TypeError(\n f\"Distribution '{t.__name__}' is not supported by Bean Machine Graph.\"\n )\n\n d = self.do_special_call_always_stochastic(t, args, {})\n assert isinstance(d, bn.DistributionNode)\n return d\n\n def _dist_bernoulli(\n self,\n probs: Optional[BMGNode] = None,\n logits: Optional[BMGNode] = None,\n validate_args: Any = None,\n ) -> BMGNode:\n if (probs is None and logits is None) or (\n probs is not None and logits is not None\n ):\n raise ValueError(\"Bernoulli requires exactly one of probs or logits\")\n if logits is not None:\n return self._bmg.add_bernoulli_logit(logits)\n return self._bmg.add_bernoulli(probs)\n\n def _dist_beta(\n self,\n concentration1: BMGNode,\n concentration0: BMGNode,\n validate_args: Any = None,\n ) -> BMGNode:\n return self._bmg.add_beta(concentration1, concentration0)\n\n def _dist_binomial(\n self,\n total_count: Optional[BMGNode] = None,\n probs: Optional[BMGNode] = None,\n logits: Optional[BMGNode] = None,\n validate_args: Any = None,\n ) -> BMGNode:\n if (probs is None and logits is None) or (\n probs is not None and logits is not None\n ):\n raise ValueError(\"Binomial requires exactly one of probs or logits\")\n\n # TODO: Create a test case for Binomial(probs=0.5) where total_count\n # is omitted.\n if total_count is None:\n total_count = self._make_constant(1)\n\n if logits is not None:\n return self._bmg.add_binomial_logit(total_count, logits)\n return self._bmg.add_binomial(total_count, probs)\n\n def _dist_categorical(\n self,\n probs: Optional[BMGNode] = None,\n logits: Optional[BMGNode] = None,\n validate_args: Any = None,\n ) -> BMGNode:\n if (probs is None and logits is None) or (\n probs is not None and logits is not None\n ):\n raise ValueError(\"Categorical requires exactly one of probs or logits\")\n if logits is not None:\n return self._bmg.add_categorical_logit(logits)\n return self._bmg.add_categorical(probs)\n\n def _dist_chi2(self, df: BMGNode, validate_args: Any = None) -> BMGNode:\n return self._bmg.add_chi2(df)\n\n def _dist_dirichlet(self, concentration: BMGNode, validate_args=None) -> BMGNode:\n return self._bmg.add_dirichlet(concentration)\n\n def _dist_gamma(\n self, concentration: BMGNode, rate: BMGNode, validate_args=None\n ) -> BMGNode:\n return self._bmg.add_gamma(concentration, rate)\n\n def _dist_halfcauchy(self, scale: BMGNode, validate_args=None) -> BMGNode:\n return self._bmg.add_halfcauchy(scale)\n\n def _dist_halfnormal(self, scale: Any, validate_args=None) -> BMGNode:\n return self._bmg.add_halfnormal(scale)\n\n def _dist_normal(self, loc: BMGNode, scale: BMGNode, validate_args=None) -> BMGNode:\n return self._bmg.add_normal(loc, scale)\n\n def _dist_poisson(self, rate: BMGNode) -> BMGNode:\n return self._bmg.add_poisson(rate)\n\n def _dist_studentt(\n self,\n df: BMGNode,\n loc: Optional[BMGNode] = None,\n scale: Optional[BMGNode] = None,\n validate_args=None,\n ) -> BMGNode:\n if loc is None:\n loc = self._make_constant(0)\n if scale is None:\n scale = self._make_constant(1)\n return self._bmg.add_studentt(df, loc, scale)\n\n def _dist_uniform(self, low: BMGNode, high: BMGNode, validate_args=None) -> BMGNode:\n return self._bmg.add_uniform(low, high)\n\n #\n # Tensor constructor\n #\n\n def _tensor_constructor(self, data: Any) -> Any:\n\n # The tensor constructor is a bit tricky because it takes a single\n # argument that is either a value or a list of values. We need:\n # (1) a flattened list of all the arguments, and\n # (2) the size of the original tensor.\n\n flattened_args = list(_flatten_all_lists(data))\n if not any(isinstance(arg, BMGNode) for arg in flattened_args):\n # None of the arguments are graph nodes. We can just\n # construct the tensor normally.\n return torch.tensor(data)\n # At least one of the arguments is a graph node.\n #\n # If we're constructing a singleton tensor and the single value\n # is a graph node, we can just keep it as that graph node.\n if len(flattened_args) == 1:\n return flattened_args[0]\n\n # We have two or more arguments and at least one is a graph node.\n # Convert them all to graph nodes.\n for index, arg in enumerate(flattened_args):\n if not isinstance(arg, BMGNode):\n flattened_args[index] = self._bmg.add_constant(arg)\n\n # What shape is this tensor? Rather than duplicating the logic in the\n # tensor class, let's just construct the same shape made of entirely\n # zeros and then ask what shape it is.\n size = torch.tensor(_list_to_zeros(data)).size()\n return self._bmg.add_tensor(size, *flattened_args)\n\n #\n # Tensor functions; these must have the same signature as the\n # corresponding torch function.\n #\n # TODO: We do not support mutation of stochastic tensors; we should produce an\n # error if there are any \"out\" values.\n\n def _phi(self, value: BMGNode) -> BMGNode:\n return self._bmg.add_phi(value)\n\n def _torch_add(\n self,\n input: BMGNode,\n other: BMGNode,\n alpha: Optional[BMGNode] = None,\n out: Any = None,\n ) -> BMGNode:\n # TODO: tensor add has the semantics input + alpha * other; if alpha is present\n # then we need to generate a multiply and an addition.\n return self._bmg.add_addition(input, other)\n\n def _torch_bitwise_and(\n self, input: BMGNode, other: BMGNode, out: Any = None\n ) -> BMGNode:\n return self._bmg.add_bitand(input, other)\n\n def _torch_bitwise_left_shift(\n self, input: BMGNode, other: BMGNode, out: Any = None\n ) -> BMGNode:\n # TODO: In torch, a << b is not bitwise at all. Rather it is simply an\n # an alias for a * (2 ** b). Make a rewriter that turns shifts into\n # this operation.\n return self._bmg.add_lshift(input, other)\n\n def _torch_bitwise_not(self, input: BMGNode, out: Any = None) -> BMGNode:\n return self._bmg.add_invert(input)\n\n def _torch_bitwise_or(\n self, input: BMGNode, other: BMGNode, out: Any = None\n ) -> BMGNode:\n return self._bmg.add_bitor(input, other)\n\n def _torch_bitwise_right_shift(\n self, input: BMGNode, other: BMGNode, out: Any = None\n ) -> BMGNode:\n # TODO: In torch, a >> b is not bitwise at all. Rather it is simply an\n # an alias for a * (2 ** -b). Make a rewriter that turns shifts into\n # this operation.\n return self._bmg.add_rshift(input, other)\n\n def _torch_bitwise_xor(\n self, input: BMGNode, other: BMGNode, out: Any = None\n ) -> BMGNode:\n return self._bmg.add_bitxor(input, other)\n\n def _torch_cholesky(\n self,\n input: BMGNode,\n upper: Optional[BMGNode] = None,\n out: Any = None,\n ) -> BMGNode:\n # TODO: What to do with upper?\n return self._bmg.add_cholesky(input)\n\n def _torch_div(\n self,\n input: BMGNode,\n other: BMGNode,\n rounding_mode: Optional[BMGNode] = None,\n out: Any = None,\n ) -> BMGNode:\n # TODO: Should we give an error if there is a rounding mode?\n return self._bmg.add_division(input, other)\n\n def _torch_eq(self, input: BMGNode, other: BMGNode, out: Any = None) -> BMGNode:\n return self._bmg.add_equal(input, other)\n\n def _torch_exp(self, input: BMGNode, out: Any = None) -> BMGNode:\n return self._bmg.add_exp(input)\n\n def _torch_exp2(self, input: BMGNode, out: Any = None) -> BMGNode:\n return self._bmg.add_exp2(input)\n\n def _torch_expm1(self, input: BMGNode, out: Any = None) -> BMGNode:\n return self._bmg.add_expm1(input)\n\n def _torch_float(\n self, input: BMGNode, memory_format: Optional[BMGNode] = None\n ) -> BMGNode:\n # TODO: Do we want to do this at all? Why should t.float() insert a\n # TO_REAL node into the graph? We can simply insert TO_REAL where required\n # by the BMG type system.\n # TODO: If we do keep this, what should we do with memory_format?\n return self._bmg.add_to_real(input)\n\n def _torch_floor_divide(\n self,\n input: BMGNode,\n other: BMGNode,\n out: Any = None,\n ) -> BMGNode:\n return self._bmg.add_floordiv(input, other)\n\n def _torch_fmod(\n self,\n input: BMGNode,\n other: BMGNode,\n out: Any = None,\n ) -> BMGNode:\n return self._bmg.add_mod(input, other)\n\n def _torch_ge(self, input: BMGNode, other: BMGNode, out: Any = None) -> BMGNode:\n return self._bmg.add_greater_than_equal(input, other)\n\n def _torch_gt(self, input: BMGNode, other: BMGNode, out: Any = None) -> BMGNode:\n return self._bmg.add_greater_than(input, other)\n\n def _torch_int(\n self, input: BMGNode, memory_format: Optional[BMGNode] = None\n ) -> BMGNode:\n # TODO: What should we do with memory_format?\n return self._bmg.add_to_int(input)\n\n def _torch_item(self, input: BMGNode) -> Any:\n return self._bmg.add_item(input)\n\n def _torch_le(self, input: BMGNode, other: BMGNode, out: Any = None) -> BMGNode:\n return self._bmg.add_less_than_equal(input, other)\n\n def _torch_log(self, input: BMGNode, out: Any = None) -> Any:\n return self._bmg.add_log(input)\n\n def _torch_log10(self, input: BMGNode, out: Any = None) -> Any:\n return self._bmg.add_log10(input)\n\n def _torch_log1p(self, input: BMGNode, out: Any = None) -> Any:\n return self._bmg.add_log1p(input)\n\n def _torch_log2(self, input: BMGNode, out: Any = None) -> Any:\n return self._bmg.add_log2(input)\n\n def _torch_logical_not(self, input: BMGNode, out: Any = None) -> Any:\n return self._bmg.add_not(input)\n\n def _torch_logsumexp(\n self,\n input: BMGNode,\n dim: BMGNode,\n keepdim: Optional[BMGNode] = None,\n out: Any = None,\n ) -> Any:\n if keepdim is None:\n keepdim = self._make_constant(False)\n return self._bmg.add_logsumexp_torch(input, dim, keepdim)\n\n def _torch_lt(self, input: BMGNode, other: BMGNode, out: Any = None) -> BMGNode:\n return self._bmg.add_less_than(input, other)\n\n def _torch_matmul(self, input: BMGNode, other: BMGNode, out: Any = None) -> BMGNode:\n # TODO: mm and matmul have different behavior; we probably need to make\n # a distinction here.\n return self._bmg.add_matrix_multiplication(input, other)\n\n def _torch_mm(self, input: BMGNode, mat2: BMGNode, out: Any = None) -> BMGNode:\n # TODO: mm and matmul have different behavior; we probably need to make\n # a distinction here.\n return self._bmg.add_matrix_multiplication(input, mat2)\n\n def _torch_mul(self, input: BMGNode, other: BMGNode, out: Any = None) -> BMGNode:\n return self._bmg.add_multiplication(input, other)\n\n def _torch_ne(self, input: BMGNode, other: BMGNode, out: Any = None) -> BMGNode:\n return self._bmg.add_not_equal(input, other)\n\n def _torch_neg(self, input: BMGNode, out: Any = None) -> BMGNode:\n return self._bmg.add_negate(input)\n\n def _torch_pow(self, input: BMGNode, exponent: BMGNode, out: Any = None) -> BMGNode:\n return self._bmg.add_power(input, exponent)\n\n def _torch_sigmoid(self, input: BMGNode, out: Any = None) -> BMGNode:\n return self._bmg.add_logistic(input)\n\n def _torch_sqrt(self, input: BMGNode, out: Any = None) -> Any:\n return self._bmg.add_squareroot(input)\n\n def _torch_sub(\n self,\n input: BMGNode,\n other: BMGNode,\n alpha: Optional[BMGNode] = None,\n out: Any = None,\n ) -> BMGNode:\n # TODO: tensor sub has the semantics input - alpha * other; if alpha is present\n # then we need to generate a multiply and an subtraction\n return self._bmg.add_subtraction(input, other)\n\n def _torch_sum(\n self,\n input: BMGNode,\n dtype: Any = None,\n ) -> Any:\n return self._bmg.add_sum(input)\n\n #\n # Operators as functions\n #\n\n def _operator_add(self, a: BMGNode, b: BMGNode) -> BMGNode:\n return self._bmg.add_addition(a, b)\n\n def _operator_and(self, a: BMGNode, b: BMGNode) -> BMGNode:\n return self._bmg.add_bitand(a, b)\n\n def _operator_contains(self, a: BMGNode, b: BMGNode) -> BMGNode:\n # Note that \"a\" is the container and \"b\" is the query. That is,\n # this means \"b in a\", NOT \"a in b\"\n return self._bmg.add_in(b, a)\n\n def _operator_eq(self, a: Any, b: Any) -> Any:\n return self._bmg.add_equal(a, b)\n\n def _operator_floordiv(self, a: BMGNode, b: BMGNode) -> BMGNode:\n return self._bmg.add_floordiv(a, b)\n\n def _operator_ge(self, a: Any, b: Any) -> Any:\n return self._bmg.add_greater_than_equal(a, b)\n\n def _operator_gt(self, a: Any, b: Any) -> Any:\n return self._bmg.add_greater_than(a, b)\n\n def _operator_inv(self, obj: BMGNode) -> BMGNode:\n return self._bmg.add_invert(obj)\n\n def _operator_is(self, a: Any, b: Any) -> Any:\n return self._bmg.add_is(a, b)\n\n def _operator_is_not(self, a: Any, b: Any) -> Any:\n return self._bmg.add_is_not(a, b)\n\n def _operator_le(self, a: Any, b: Any) -> Any:\n return self._bmg.add_less_than_equal(a, b)\n\n def _operator_lshift(self, a: BMGNode, b: BMGNode) -> BMGNode:\n # TODO: In torch, a << b is not bitwise at all. Rather it is simply an\n # an alias for a * (2 ** b). Make a rewriter that turns shifts into\n # this operation.\n return self._bmg.add_lshift(a, b)\n\n def _operator_lt(self, a: Any, b: Any) -> Any:\n return self._bmg.add_less_than(a, b)\n\n def _operator_matmul(self, a: BMGNode, b: BMGNode) -> BMGNode:\n return self._bmg.add_matrix_multiplication(a, b)\n\n def _operator_mod(self, a: BMGNode, b: BMGNode) -> BMGNode:\n return self._bmg.add_mod(a, b)\n\n def _operator_mul(self, a: BMGNode, b: BMGNode) -> BMGNode:\n return self._bmg.add_multiplication(a, b)\n\n def _operator_ne(self, a: Any, b: Any) -> Any:\n return self._bmg.add_not_equal(a, b)\n\n def _operator_neg(self, obj: BMGNode) -> BMGNode:\n return self._bmg.add_negate(obj)\n\n def _operator_not(self, obj: BMGNode) -> BMGNode:\n return self._bmg.add_not(obj)\n\n def _operator_or(self, a: BMGNode, b: BMGNode) -> BMGNode:\n return self._bmg.add_bitor(a, b)\n\n def _operator_pos(self, obj: BMGNode) -> BMGNode:\n # unary + is an identity on graph nodes\n return obj\n\n def _operator_pow(self, a: BMGNode, b: BMGNode) -> BMGNode:\n return self._bmg.add_power(a, b)\n\n def _operator_rshift(self, a: BMGNode, b: BMGNode) -> BMGNode:\n # TODO: In torch, a >> b is not bitwise at all. Rather it is simply an\n # an alias for a * (2 ** -b). Make a rewriter that turns shifts into\n # this operation.\n return self._bmg.add_rshift(a, b)\n\n def _operator_sub(self, a: BMGNode, b: BMGNode) -> BMGNode:\n return self._bmg.add_subtraction(a, b)\n\n def _operator_truediv(self, a: BMGNode, b: BMGNode) -> BMGNode:\n return self._bmg.add_division(a, b)\n\n def _operator_xor(self, a: BMGNode, b: BMGNode) -> BMGNode:\n return self._bmg.add_bitxor(a, b)\n\n #\n # Augmented assignment operators\n #\n\n def _in_place_operator(\n self,\n native_in_place: Callable, # operator.iadd, for example\n left: Any,\n right: Any,\n ) -> Any:\n # Handling augmented assignments (+=, -=, *=, and so on) has a lot of cases;\n # to cut down on code duplication we call this higher-level method. Throughout\n # the comments below we assume that we're handling a +=; the logic is the same\n # for all the operators.\n\n # TODO: We have a problem that we need to resolve regarding compilation of models\n # which have mutations of aliased tensors. Compare the action of these two similar:\n # models in the original Bean Machine implementation:\n #\n # @functional def foo():\n # x = flip() # 0 or 1\n # y = x # y is an alias for x\n # y += 1 # y is mutated in place and continues to alias x\n # return x # returns 1 or 2\n #\n # vs\n #\n # @functional def foo():\n # x = flip() # 0 or 1\n # y = x # y is an alias for x\n # y = y + 1 # y no longer aliases x; y is 1 or 2\n # return x # returns 0 or 1\n #\n # Suppose we are asked to compile the first model; how should we execute\n # the rewritten form of it so as to accumulate the correct graph? Unlike\n # tensors, graph nodes are not mutable!\n #\n # Here's what we're going to do for now:\n #\n # If neither operand is a graph node then do exactly what the model would\n # normally do:\n #\n if not isinstance(left, BMGNode) and not isinstance(right, BMGNode):\n return native_in_place(left, right)\n\n assert native_in_place in _in_place_to_regular\n native_regular = _in_place_to_regular[native_in_place]\n\n # At least one operand is a graph node. If we have tensor += graph_node\n # or graph_node += anything then optimistically assume that there\n # is NOT any alias of the mutated left side, and treat the += as though\n # it is a normal addition.\n #\n # TODO: Should we produce some sort of warning here telling the user that\n # the compiled model semantics might be different than the original model?\n # Or is that too noisy? There are going to be a lot of models with += where\n # one of the operands is an ordinary tensor and one is a graph node, but which\n # do not have any aliasing problem.\n\n if isinstance(left, torch.Tensor) or isinstance(left, BMGNode):\n return self.do_special_call_always_stochastic(\n native_regular, [left, right], {}\n )\n\n # If we've made it here then we have x += graph_node, where x is not a\n # tensor. There are two possibilities: either x is some type which implements\n # mutating in-place +=, or it is not. If it is, then just call the mutator\n # and hope for the best.\n #\n # TODO: This scenario is another opportunity for a warning or error, since\n # the model is probably not one that can be compiled if it is depending on\n # in-place mutation of an object which has a stochastic quantity added to it.\n\n assert isinstance(right, BMGNode)\n assert native_in_place in _in_place_operator_names\n if hasattr(left, _in_place_operator_names[native_in_place]):\n # It is possible that the operator exists but either returns\n # NotImplemented or raises NotImplementedError. In either case,\n # assume that we can fall back to non-mutating addition.\n try:\n result = native_in_place(left, right)\n if result is not NotImplemented:\n return result\n except NotImplementedError:\n pass\n\n # We have x += graph_node, and x is not mutating in place, so just\n # do x + graph_node:\n return self.do_special_call_maybe_stochastic(native_regular, [left, right], {})\n" ]
[ [ "numpy.unravel_index", "torch.var" ], [ "torch.__dict__.items", "torch.tensor" ] ]
richarai9/FastSpeech2
[ "d044c00a44cbfa3e1c89a22c8285a374a00e27a9", "d044c00a44cbfa3e1c89a22c8285a374a00e27a9" ]
[ "model/optimizer.py", "transformer/Layers.py" ]
[ "import torch\r\nimport numpy as np\r\n\r\n\r\nclass ScheduledOptim:\r\n \"\"\" A simple wrapper class for learning rate scheduling \"\"\"\r\n\r\n def __init__(self, model, train_config, model_config, current_step):\r\n\r\n self._optimizer = torch.optim.Adam(\r\n model.parameters(),\r\n betas=train_config[\"optimizer\"][\"betas\"],\r\n eps=train_config[\"optimizer\"][\"eps\"],\r\n weight_decay=train_config[\"optimizer\"][\"weight_decay\"],\r\n )\r\n self.n_warmup_steps = train_config[\"optimizer\"][\"warm_up_step\"]\r\n self.anneal_steps = train_config[\"optimizer\"][\"anneal_steps\"]\r\n self.anneal_rate = train_config[\"optimizer\"][\"anneal_rate\"]\r\n self.current_step = current_step\r\n self.init_lr = np.power(model_config[\"transformer\"][\"encoder_hidden\"], -0.5)\r\n\r\n def step_and_update_lr(self):\r\n self._update_learning_rate()\r\n self._optimizer.step()\r\n\r\n def zero_grad(self):\r\n # print(self.init_lr)\r\n self._optimizer.zero_grad()\r\n\r\n def load_state_dict(self, path):\r\n self._optimizer.load_state_dict(path)\r\n\r\n def _get_lr_scale(self):\r\n lr = np.min(\r\n [\r\n np.power(self.current_step, -0.5),\r\n np.power(self.n_warmup_steps, -1.5) * self.current_step,\r\n ]\r\n )\r\n for s in self.anneal_steps:\r\n if self.current_step > s:\r\n lr = lr * self.anneal_rate\r\n return lr\r\n\r\n def _update_learning_rate(self):\r\n \"\"\" Learning rate scheduling per step \"\"\"\r\n self.current_step += 1\r\n lr = self.init_lr * self._get_lr_scale()\r\n\r\n for param_group in self._optimizer.param_groups:\r\n param_group[\"lr\"] = lr\r\n", "from collections import OrderedDict\n\nimport torch\nimport torch.nn as nn\nimport numpy as np\nfrom torch.nn import functional as F\n\nfrom .SubLayers import MultiHeadAttention, PositionwiseFeedForward\n\n\nclass FFTBlock(torch.nn.Module):\n \"\"\"FFT Block\"\"\"\n\n def __init__(self, d_model, n_head, d_k, d_v, d_inner, kernel_size, dropout=0.1):\n super(FFTBlock, self).__init__()\n self.slf_attn = MultiHeadAttention(n_head, d_model, d_k, d_v, dropout=dropout)\n self.pos_ffn = PositionwiseFeedForward(\n d_model, d_inner, kernel_size, dropout=dropout\n )\n\n def forward(self, enc_input, mask=None, slf_attn_mask=None):\n enc_output, enc_slf_attn = self.slf_attn(\n enc_input, enc_input, enc_input, mask=slf_attn_mask\n )\n enc_output = enc_output.masked_fill(mask.unsqueeze(-1), 0)\n\n enc_output = self.pos_ffn(enc_output)\n enc_output = enc_output.masked_fill(mask.unsqueeze(-1), 0)\n\n return enc_output, enc_slf_attn\n\n\nclass ConvNorm(torch.nn.Module):\n def __init__(\n self,\n in_channels,\n out_channels,\n kernel_size=1,\n stride=1,\n padding=None,\n dilation=1,\n bias=True,\n w_init_gain=\"linear\",\n ):\n super(ConvNorm, self).__init__()\n\n if padding is None:\n assert kernel_size % 2 == 1\n padding = int(dilation * (kernel_size - 1) / 2)\n\n self.conv = torch.nn.Conv1d(\n in_channels,\n out_channels,\n kernel_size=kernel_size,\n stride=stride,\n padding=padding,\n dilation=dilation,\n bias=bias,\n )\n\n def forward(self, signal):\n conv_signal = self.conv(signal)\n\n return conv_signal\n\n\nclass PostNet(nn.Module):\n \"\"\"\n PostNet: Five 1-d convolution with 512 channels and kernel size 5\n \"\"\"\n\n def __init__(\n self,\n n_mel_channels=80,\n postnet_embedding_dim=512,\n postnet_kernel_size=5,\n postnet_n_convolutions=5,\n ):\n\n super(PostNet, self).__init__()\n self.convolutions = nn.ModuleList()\n\n self.convolutions.append(\n nn.Sequential(\n ConvNorm(\n n_mel_channels,\n postnet_embedding_dim,\n kernel_size=postnet_kernel_size,\n stride=1,\n padding=int((postnet_kernel_size - 1) / 2),\n dilation=1,\n w_init_gain=\"tanh\",\n ),\n nn.BatchNorm1d(postnet_embedding_dim),\n )\n )\n\n for i in range(1, postnet_n_convolutions - 1):\n self.convolutions.append(\n nn.Sequential(\n ConvNorm(\n postnet_embedding_dim,\n postnet_embedding_dim,\n kernel_size=postnet_kernel_size,\n stride=1,\n padding=int((postnet_kernel_size - 1) / 2),\n dilation=1,\n w_init_gain=\"tanh\",\n ),\n nn.BatchNorm1d(postnet_embedding_dim),\n )\n )\n\n self.convolutions.append(\n nn.Sequential(\n ConvNorm(\n postnet_embedding_dim,\n n_mel_channels,\n kernel_size=postnet_kernel_size,\n stride=1,\n padding=int((postnet_kernel_size - 1) / 2),\n dilation=1,\n w_init_gain=\"linear\",\n ),\n nn.BatchNorm1d(n_mel_channels),\n )\n )\n\n def forward(self, x):\n x = x.contiguous().transpose(1, 2)\n\n for i in range(len(self.convolutions) - 1):\n x = F.dropout(torch.tanh(self.convolutions[i](x)), 0.5, self.training)\n x = F.dropout(self.convolutions[-1](x), 0.5, self.training)\n\n x = x.contiguous().transpose(1, 2)\n return x\n" ]
[ [ "numpy.power" ], [ "torch.nn.BatchNorm1d", "torch.nn.ModuleList", "torch.nn.Conv1d" ] ]
xxworkspace/Paddle
[ "74ad4b6a700795d5edce8dd49d6c2df6f15e8935", "74ad4b6a700795d5edce8dd49d6c2df6f15e8935" ]
[ "python/paddle/fluid/trainer_factory.py", "python/paddle/fluid/dataloader/worker.py" ]
[ "# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Defination of TrainerFactory.\"\"\"\n\nimport threading\nimport time\nimport logging\nimport numpy as np\nfrom paddle.fluid.log_helper import get_logger\n\nlocal_logger = get_logger(\n __name__, logging.INFO, fmt='%(asctime)s-%(levelname)s: %(message)s')\n\nfrom .trainer_desc import MultiTrainer, DistMultiTrainer, PipelineTrainer, HeterXpuTrainer, PSGPUTrainer\nfrom .device_worker import Hogwild, DownpourSGD, Section, DownpourSGDOPT\nfrom .framework import Variable\nfrom multiprocessing import Process, Manager\n\n__all__ = [\"TrainerFactory\", \"FetchHandlerMonitor\"]\n\n\nclass TrainerFactory(object):\n \"\"\"\n Create trainer and device worker.\n If opt_info is not None, it will get configs from opt_info,\n otherwise create MultiTrainer and Hogwild.\n \"\"\"\n\n def __init__(self):\n pass\n\n def _create_trainer(self, opt_info=None):\n trainer = None\n device_worker = None\n if not opt_info:\n # default is MultiTrainer + Hogwild\n trainer = MultiTrainer()\n device_worker = Hogwild()\n trainer._set_device_worker(device_worker)\n else:\n trainer_class = opt_info.get(\"trainer\", \"MultiTrainer\")\n device_worker_class = opt_info.get(\"device_worker\", \"Hogwild\")\n trainer = globals()[trainer_class]()\n device_worker = globals()[device_worker_class]()\n\n # for debug tools\n if opt_info is not None:\n if opt_info.get(\"dump_slot\") is not None:\n trainer._set_dump_slot(opt_info[\"dump_slot\"])\n if opt_info.get(\"mpi_rank\") is not None:\n trainer._set_mpi_rank(opt_info[\"mpi_rank\"])\n if opt_info.get(\"mpi_size\") is not None:\n trainer._set_mpi_size(opt_info[\"mpi_size\"])\n if opt_info.get(\"dump_fields\") is not None and len(\n opt_info.get(\"dump_fields\")) != 0:\n trainer._set_dump_fields(opt_info[\"dump_fields\"])\n if opt_info.get(\"dump_fields_path\") is not None and len(\n opt_info.get(\"dump_fields_path\")) != 0:\n trainer._set_dump_fields_path(opt_info[\"dump_fields_path\"])\n if opt_info.get(\"dump_file_num\") is not None:\n trainer._set_dump_file_num(opt_info[\"dump_file_num\"])\n if opt_info.get(\"dump_converter\") is not None:\n trainer._set_dump_converter(opt_info[\"dump_converter\"])\n if opt_info.get(\"dump_param\") is not None and len(\n opt_info.get(\"dump_param\")) != 0:\n trainer._set_dump_param(opt_info[\"dump_param\"])\n if opt_info.get(\"worker_places\") is not None:\n trainer._set_worker_places(opt_info[\"worker_places\"])\n if opt_info.get(\"use_ps_gpu\") is not None:\n trainer._set_use_ps_gpu(opt_info[\"use_ps_gpu\"])\n if opt_info.get(\"enable_random_dump\") is not None:\n trainer._set_enable_random_dump(opt_info[\n \"enable_random_dump\"])\n if opt_info.get(\"dump_interval\") is not None:\n trainer._set_dump_interval(opt_info[\"dump_interval\"])\n if opt_info.get(\"random_with_lineid\") is not None:\n trainer._set_random_with_lineid(opt_info[\n \"random_with_lineid\"])\n\n if \"fleet_desc\" in opt_info:\n device_worker._set_fleet_desc(opt_info[\"fleet_desc\"])\n trainer._set_fleet_desc(opt_info[\"fleet_desc\"])\n if opt_info.get(\"use_cvm\") is not None:\n trainer._set_use_cvm(opt_info[\"use_cvm\"])\n if opt_info.get(\"no_cvm\") is not None:\n trainer._set_no_cvm(opt_info[\"no_cvm\"])\n if opt_info.get(\"scale_datanorm\") is not None:\n trainer._set_scale_datanorm(opt_info[\"scale_datanorm\"])\n if opt_info.get(\"adjust_ins_weight\") is not None:\n trainer._set_adjust_ins_weight(opt_info[\n \"adjust_ins_weight\"])\n if opt_info.get(\"copy_table\") is not None:\n trainer._set_copy_table_config(opt_info[\"copy_table\"])\n if opt_info.get(\"check_nan_var_names\") is not None:\n trainer._set_check_nan_var_names(opt_info[\n \"check_nan_var_names\"])\n if opt_info.get(\"loss_names\") is not None:\n trainer._set_loss_names(opt_info[\"loss_names\"])\n trainer._set_device_worker(device_worker)\n return trainer\n\n\nclass FetchHandlerMonitor(object):\n \"\"\"\n Defination of FetchHandlerMonitor class,\n it's for fetch handler.\n \"\"\"\n\n def __init__(self, scope, handler):\n self.fetch_instance = handler\n self.fetch_thread = threading.Thread(\n target=self.handler_launch_func, args=(scope, self.fetch_instance))\n self.running_lock = threading.Lock()\n self.running = False\n\n def handler_launch_func(self, scope, handler):\n fetch_instance = handler\n period_secs = fetch_instance.period_secs\n var_name_to_key = {}\n for key in fetch_instance.var_dict:\n if isinstance(fetch_instance.var_dict[key], Variable):\n var_name_to_key[fetch_instance.var_dict[key].name] = key\n else:\n local_logger.warning(\"the value of {} is not a Variable\".format(\n key))\n var_name_to_key[\"None.var\"] = key\n elapsed_secs = 0\n while True:\n self.running_lock.acquire()\n if self.running == False:\n break\n if elapsed_secs < period_secs:\n # TODO(guru4elephant): needs customized condition\n time.sleep(1)\n elapsed_secs += 1\n else:\n elapsed_secs = 0\n fetch_dict = {}\n for key in var_name_to_key:\n var = scope.find_var(key)\n fetch_dict[key] = var\n if var == None:\n local_logger.warning(\"{} value currently not available\".\n format(var_name_to_key[key]))\n res_dict = {}\n for key in fetch_dict:\n user_name = var_name_to_key[key]\n if fetch_dict[key] == None:\n res_dict[user_name] = None\n continue\n else:\n res_dict[user_name] = fetch_dict[key].get_tensor()\n\n lod = res_dict[user_name].lod()\n if len(lod) > 0:\n raise RuntimeError(\"Some of your fetched tensors \\\n hold LoD information. \\\n They can not be completely cast \\\n to Python ndarray. We can \\\n not return LoDTensor itself directly, \\\n please choose another targets\")\n if res_dict[user_name]._is_initialized():\n res_dict[user_name] = np.array(res_dict[user_name])\n else:\n res_dict[user_name] = None\n fetch_instance.handler(res_dict)\n self.running_lock.release()\n\n def start(self):\n \"\"\"\n start monitor,\n it will start a monitor thread.\n \"\"\"\n self.running_lock.acquire()\n self.running = True\n self.running_lock.release()\n self.fetch_thread.setDaemon(True)\n self.fetch_thread.start()\n\n def stop(self):\n self.running_lock.acquire()\n self.running = False\n self.running_lock.release()\n", "# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport os\nimport six\nimport sys\nimport paddle\nimport numpy as np\nimport traceback\nfrom collections import namedtuple\nfrom .. import core\nfrom .fetcher import _IterableDatasetFetcher, _MapDatasetFetcher\nfrom ..multiprocess_utils import _cleanup_mmap, CleanupFuncRegistrar, MP_STATUS_CHECK_INTERVAL\nfrom ..framework import in_dygraph_mode\nfrom .flat import _flatten_batch\n\n# NOTE: queue has a different name in python2 and python3\nimport queue\n\n__all__ = ['get_worker_info']\n\n\nclass _IterableDatasetStopIteration(object):\n def __init__(self, worker_id):\n self.worker_id = worker_id\n\n\nclass _DatasetKind(object):\n MAP = 0\n ITER = 1\n\n @staticmethod\n def create_fetcher(kind, dataset, auto_collate_batch, collate_fn,\n drop_last):\n if kind == _DatasetKind.MAP:\n return _MapDatasetFetcher(dataset, auto_collate_batch, collate_fn,\n drop_last)\n elif kind == _DatasetKind.ITER:\n return _IterableDatasetFetcher(dataset, auto_collate_batch,\n collate_fn, drop_last)\n else:\n raise NotImplementedError(\"unknown Dataset kind {}\".format(kind))\n\n\nclass ParentWatchDog(object):\n def __init__(self):\n self._parent_pid = os.getppid()\n self._parent_alive = True\n\n def is_alive(self):\n if self._parent_alive:\n self._parent_alive = os.getppid() == self._parent_pid\n return self._parent_alive\n\n\n# worker information for each workers, used for splitting data copy\n# for IteratorDataset in worker processes.\n_worker_info = None\n\n\ndef get_worker_info():\n \"\"\"\n Get DataLoader worker process information function, this function is\n used to split data copy in worker process for IterableDataset\n (see :code:`paddle.io.IterableDataset`), worker information contains\n following fields:\n\n :attr:`num_workers`: total worker process number, see `paddle.io.DataLoader`\n\n :attr:`id`: the worker processs id, count from 0 to :attr:`num_workers - 1`\n\n :attr:`dataset`: the dataset object in this worker process\n\n Returns:\n WorkerInfo: an instance of WorkerInfo which contains fields above.\n\n .. note::\n For more usage and examples, please see :code:`paddle.io.IterableDataset`\n\n Example:\n\n .. code-block:: python\n\n import math\n import paddle\n import numpy as np\n from paddle.io import IterableDataset, DataLoader, get_worker_info\n\n class SplitedIterableDataset(IterableDataset):\n def __init__(self, start, end):\n self.start = start\n self.end = end\n\n def __iter__(self):\n worker_info = get_worker_info()\n if worker_info is None:\n iter_start = self.start\n iter_end = self.end\n else:\n per_worker = int(\n math.ceil((self.end - self.start) / float(\n worker_info.num_workers)))\n worker_id = worker_info.id\n iter_start = self.start + worker_id * per_worker\n iter_end = min(iter_start + per_worker, self.end)\n\n for i in range(iter_start, iter_end):\n yield np.array([i])\n\n place = paddle.CPUPlace()\n dataset = SplitedIterableDataset(start=2, end=9)\n dataloader = DataLoader(\n dataset,\n places=place,\n num_workers=2,\n batch_size=1,\n drop_last=True)\n\n for data in dataloader:\n print(data)\n # outputs: [2, 5, 3, 6, 4, 7]\n\n \"\"\"\n return _worker_info\n\n\nclass WorkerInfo(object):\n __initialized = False\n\n def __init__(self, **kwargs):\n for k, v in kwargs.items():\n setattr(self, k, v)\n self.__initialized = True\n\n def __setattr__(self, key, val):\n if self.__initialized:\n raise RuntimeError(\"Cannot assign attributes to {} objects\".format(\n self.__class__.__name__))\n return super(WorkerInfo, self).__setattr__(key, val)\n\n\nclass _WorkerException(object):\n def __init__(self, worker_id, exc_info=None):\n self.worker_id = worker_id\n exc_info = exc_info or sys.exc_info()\n self.exc_type = exc_info[0]\n self.exc_msg = \"\".join(traceback.format_exception(*exc_info))\n\n def reraise(self):\n msg = \"DataLoader worker({}) caught {} with message:\\n{}\".format(\n self.worker_id, self.exc_type.__name__, self.exc_msg)\n if getattr(self.exc_type, \"message\", None):\n raise self.exc_type(message=msg)\n raise self.exc_type(msg)\n\n\n# The function `_generate_states` is adapted from `numpy.random.SeedSequence`\n# from https://github.com/numpy/numpy/blob/main/numpy/random/bit_generator.pyx\n# Here is the copyright:\n\n# SeedSequence is derived from Melissa E. O'Neill's C++11 `std::seed_seq`\n# implementation, as it has a lot of nice properties that we want.\n# https://gist.github.com/imneme/540829265469e673d045\n# http://www.pcg-random.org/posts/developing-a-seed_seq-alternative.html\n\n# The MIT License (MIT)\n\n# Copyright (c) 2015 Melissa E. O'Neill\n# Copyright (c) 2019 NumPy Developers\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n\nINIT_A = 0x43b0d7e5\nMULT_A = 0x931e8875\nINIT_B = 0x8b51f9dd\nMULT_B = 0x58f38ded\nMIX_MULT_L = 0xca01f9dd\nMIX_MULT_R = 0x4973f715\nXSHIFT = np.dtype(np.uint32).itemsize * 8 // 2\nMASK32 = 0xFFFFFFFF\n\n\ndef _generate_states(base_seed=0, worker_id=0):\n # init hash constant\n hash_const_A = INIT_A\n hash_const_B = INIT_B\n\n def hash(value):\n nonlocal hash_const_A\n value = (value ^ hash_const_A) & MASK32\n hash_const_A = (hash_const_A * MULT_A) & MASK32\n value = (value * hash_const_A) & MASK32\n value = (value ^ (value >> XSHIFT)) & MASK32\n return value\n\n def mix(x, y):\n result_x = (MIX_MULT_L * x) & MASK32\n result_y = (MIX_MULT_R * y) & MASK32\n result = (result_x - result_y) & MASK32\n result = (result ^ (result >> XSHIFT)) & MASK32\n return result\n\n # init entropys with based_seed and worker_id and calculate pool\n entropys = [worker_id, base_seed & MASK32, base_seed >> 32, 0]\n pool = [hash(entropy) for entropy in entropys]\n\n # mix all bits together\n for i in range(len(pool)):\n for j in range(len(pool)):\n if i != j:\n pool[j] = mix(pool[j], hash(pool[i]))\n\n states = []\n for p in pool:\n state = (p ^ hash_const_B) & MASK32\n hash_const_B = (hash_const_B * MULT_B) & MASK32\n state = (state * hash_const_B) & MASK32\n state = (state ^ (state >> XSHIFT)) & MASK32\n states.append(state)\n\n return states\n\n\ndef _worker_loop(dataset, dataset_kind, indices_queue, out_queue, done_event,\n auto_collate_batch, collate_fn, init_fn, worker_id,\n num_workers, use_shared_memory):\n try:\n # NOTE: [ mmap files clear ] When the child process exits unexpectedly,\n # some shared memory objects may have been applied for but have not yet\n # been put into the inter-process Queue. This part of the object needs\n # to be cleaned up when the process ends.\n CleanupFuncRegistrar.register(_cleanup_mmap)\n\n # set signal handler\n core._set_process_signal_handler()\n\n # set different numpy seed for each worker\n try:\n import numpy as np\n import time\n except ImportError:\n pass\n else:\n np.random.seed(_generate_states(int(time.time()), worker_id))\n\n global _worker_info\n _worker_info = WorkerInfo(\n id=worker_id, num_workers=num_workers, dataset=dataset)\n\n init_exception = None\n try:\n if init_fn is not None:\n init_fn(worker_id)\n fetcher = _DatasetKind.create_fetcher(\n dataset_kind, dataset, auto_collate_batch, collate_fn, True)\n except:\n init_exception = _WorkerException(worker_id)\n\n iterator_drained = False\n parent_watch_dog = ParentWatchDog()\n\n while parent_watch_dog.is_alive():\n try:\n data = indices_queue.get(MP_STATUS_CHECK_INTERVAL)\n except queue.Empty:\n continue\n\n # None as poison piil, so worker event should be set\n if data is None:\n assert done_event.is_set() or iterator_drained, \\\n \"get None when worker done_event set\"\n break\n # If worker done event is set but get still get data in\n # indices_queue, remaining data should be get and skipped.\n if done_event.is_set() or iterator_drained:\n continue\n\n idx, indices = data\n try:\n if init_exception is not None:\n batch = init_exception\n init_exception = None\n else:\n # NOTE: GPU tensor operation is not supported in sub-process\n # but default device is GPU in paddle-gpu version, which\n # may copy CPU tensor to GPU even if users want to use\n # CPU tensor operation, so we add CPUPlace guard here\n # to make sure tensor will be operated only on CPU\n with paddle.fluid.dygraph.guard(place=paddle.CPUPlace()):\n batch = fetcher.fetch(indices)\n except Exception as e:\n if isinstance(\n e, StopIteration) and dataset_kind == _DatasetKind.ITER:\n out_queue.put(_IterableDatasetStopIteration(worker_id))\n iterator_drained = True\n else:\n out_queue.put((idx, _WorkerException(worker_id), None))\n else:\n if isinstance(batch, _WorkerException):\n out_queue.put((idx, batch, None))\n batch, structure = _flatten_batch(batch)\n if use_shared_memory:\n tensor_list = [\n core._array_to_share_memory_tensor(b)\n if isinstance(b, np.ndarray) else b._share_memory()\n for b in batch\n ]\n out_queue.put((idx, tensor_list, structure))\n core._remove_tensor_list_mmap_fds(tensor_list)\n else:\n out_queue.put((idx, batch, structure))\n except KeyboardInterrupt:\n # NOTE: Main process will raise KeyboardInterrupt anyways, ignore it in child process\n pass\n except:\n six.reraise(*sys.exc_info())\n finally:\n if use_shared_memory:\n _cleanup_mmap()\n" ]
[ [ "numpy.array" ], [ "numpy.dtype" ] ]
Viktour19/adversarial-robustness-toolbox
[ "96ba28170d0de1b5db6a16330a312248cc3b8972" ]
[ "art/attacks/elastic_net.py" ]
[ "# MIT License\n#\n# Copyright (C) IBM Corporation 2018\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated\n# documentation files (the \"Software\"), to deal in the Software without restriction, including without limitation the\n# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit\n# persons to whom the Software is furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in all copies or substantial portions of the\n# Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE\n# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,\n# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\nimport logging\n\nimport numpy as np\nimport six\n\nfrom art import NUMPY_DTYPE\nfrom art.attacks.attack import Attack\nfrom art.utils import compute_success, get_labels_np_array\n\nlogger = logging.getLogger(__name__)\n\n\nclass ElasticNet(Attack):\n \"\"\"\n The elastic net attack of Pin-Yu Chen et al. (2018). Paper link: https://arxiv.org/abs/1709.04114.\n \"\"\"\n attack_params = Attack.attack_params + ['confidence', 'targeted', 'learning_rate', 'max_iter', 'beta',\n 'binary_search_steps', 'initial_const', 'batch_size', 'decision_rule']\n\n def __init__(self, classifier, confidence=0.0, targeted=True, learning_rate=1e-2, binary_search_steps=9,\n max_iter=10000, beta=1e-3, initial_const=1e-3, batch_size=128, decision_rule='EN'):\n \"\"\"\n Create an ElasticNet attack instance.\n\n :param classifier: A trained model.\n :type classifier: :class:`.Classifier`\n :param confidence: Confidence of adversarial examples: a higher value produces examples that are farther\n away, from the original input, but classified with higher confidence as the target class.\n :type confidence: `float`\n :param targeted: Should the attack target one specific class.\n :type targeted: `bool`\n :param learning_rate: The initial learning rate for the attack algorithm. Smaller values produce better\n results but are slower to converge.\n :type learning_rate: `float`\n :param binary_search_steps: Number of times to adjust constant with binary search (positive value).\n :type binary_search_steps: `int`\n :param max_iter: The maximum number of iterations.\n :type max_iter: `int`\n :param beta: Hyperparameter trading off L2 minimization for L1 minimization.\n :type beta: `float`\n :param initial_const: The initial trade-off constant `c` to use to tune the relative importance of distance\n and confidence. If `binary_search_steps` is large, the initial constant is not important, as discussed in\n Carlini and Wagner (2016).\n :type initial_const: `float`\n :param batch_size: Internal size of batches on which adversarial samples are generated.\n :type batch_size: `int`\n :param decision_rule: Decision rule. 'EN' means Elastic Net rule, 'L1' means L1 rule, 'L2' means L2 rule.\n :type decision_rule: `string`\n \"\"\"\n super(ElasticNet, self).__init__(classifier)\n\n kwargs = {'confidence': confidence,\n 'targeted': targeted,\n 'learning_rate': learning_rate,\n 'binary_search_steps': binary_search_steps,\n 'max_iter': max_iter,\n 'beta': beta,\n 'initial_const': initial_const,\n 'batch_size': batch_size,\n 'decision_rule': decision_rule\n }\n assert self.set_params(**kwargs)\n\n def _loss(self, x, x_adv):\n \"\"\"\n Compute the loss function values.\n\n :param x: An array with the original input.\n :type x: `np.ndarray`\n :param x_adv: An array with the adversarial input.\n :type x_adv: `np.ndarray`\n :return: A tuple holding the current logits, l1 distance, l2 distance and elastic net loss.\n :rtype: `(np.ndarray, float, float, float)`\n \"\"\"\n l1dist = np.sum(np.abs(x - x_adv).reshape(x.shape[0], -1), axis=1)\n l2dist = np.sum(np.square(x - x_adv).reshape(x.shape[0], -1), axis=1)\n endist = self.beta * l1dist + l2dist\n z = self.classifier.predict(np.array(x_adv, dtype=NUMPY_DTYPE), logits=True)\n\n return np.argmax(z, axis=1), l1dist, l2dist, endist\n\n def _gradient_of_loss(self, target, x, x_adv, c):\n \"\"\"\n Compute the gradient of the loss function.\n\n :param target: An array with the target class (one-hot encoded).\n :type target: `np.ndarray`\n :param x: An array with the original input.\n :type x: `np.ndarray`\n :param x_adv: An array with the adversarial input.\n :type x_adv: `np.ndarray`\n :param c: Weight of the loss term aiming for classification as target.\n :type c: `float`\n :return: An array with the gradient of the loss function.\n :type target: `np.ndarray`\n \"\"\"\n # Compute the current logits\n z = self.classifier.predict(np.array(x_adv, dtype=NUMPY_DTYPE), logits=True)\n\n if self.targeted:\n i_sub = np.argmax(target, axis=1)\n i_add = np.argmax(z * (1 - target) + (np.min(z, axis=1) - 1)[:, np.newaxis] * target, axis=1)\n else:\n i_add = np.argmax(target, axis=1)\n i_sub = np.argmax(z * (1 - target) + (np.min(z, axis=1) - 1)[:, np.newaxis] * target, axis=1)\n\n loss_gradient = self.classifier.class_gradient(x_adv, label=i_add, logits=True)\n loss_gradient -= self.classifier.class_gradient(x_adv, label=i_sub, logits=True)\n loss_gradient = loss_gradient.reshape(x.shape)\n\n c_mult = c\n for _ in range(len(x.shape)-1):\n c_mult = c_mult[:, np.newaxis]\n\n loss_gradient *= c_mult\n loss_gradient += 2 * (x_adv - x)\n\n return loss_gradient\n\n def _decay_learning_rate(self, global_step, end_learning_rate, decay_steps):\n \"\"\"\n Applies a square-root decay to the learning rate.\n\n :param global_step: Global step to use for the decay computation.\n :type global_step: `int`\n :param end_learning_rate: The minimal end learning rate.\n :type end_learning_rate: `float`\n :param decay_steps: Number of decayed steps.\n :type decay_steps: `int`\n :return: The decayed learning rate\n :rtype: `float`\n \"\"\"\n decayed_learning_rate = (self.learning_rate - end_learning_rate) * (1 - global_step / decay_steps)**2 + \\\n end_learning_rate\n\n return decayed_learning_rate\n\n def generate(self, x, **kwargs):\n \"\"\"\n Generate adversarial samples and return them in an array.\n\n :param x: An array with the original inputs to be attacked.\n :type x: `np.ndarray`\n :param y: If `self.targeted` is true, then `y` represents the target labels. Otherwise, the targets are the\n original class labels.\n :type y: `np.ndarray`\n :return: An array holding the adversarial examples.\n :rtype: `np.ndarray`\n \"\"\"\n x_adv = x.astype(NUMPY_DTYPE)\n (clip_min, clip_max) = self.classifier.clip_values\n\n # Parse and save attack-specific parameters\n params_cpy = dict(kwargs)\n y = params_cpy.pop(str('y'), None)\n self.set_params(**params_cpy)\n\n # Assert that, if attack is targeted, y is provided:\n if self.targeted and y is None:\n raise ValueError('Target labels `y` need to be provided for a targeted attack.')\n\n # No labels provided, use model prediction as correct class\n if y is None:\n y = get_labels_np_array(self.classifier.predict(x, logits=False))\n\n # Compute adversarial examples with implicit batching\n nb_batches = int(np.ceil(x_adv.shape[0] / float(self.batch_size)))\n for batch_id in range(nb_batches):\n logger.debug('Processing batch %i out of %i', batch_id, nb_batches)\n\n batch_index_1, batch_index_2 = batch_id * self.batch_size, (batch_id + 1) * self.batch_size\n x_batch = x_adv[batch_index_1:batch_index_2]\n y_batch = y[batch_index_1:batch_index_2]\n x_adv[batch_index_1:batch_index_2] = self._generate_batch(x_batch, y_batch)\n\n # Apply clip\n x_adv = np.clip(x_adv, clip_min, clip_max)\n\n # Compute success rate of the EAD attack\n logger.info('Success rate of EAD attack: %.2f%%',\n 100 * compute_success(self.classifier, x, y, x_adv, self.targeted))\n\n return x_adv\n\n def _generate_batch(self, x_batch, y_batch):\n \"\"\"\n Run the attack on a batch of images and labels.\n\n :param x_batch: A batch of original examples.\n :type x_batch: `np.ndarray`\n :param y_batch: A batch of targets (0-1 hot).\n :type y_batch: `np.ndarray`\n :return: A batch of adversarial examples.\n :rtype: `np.ndarray`\n \"\"\"\n # Initialize binary search:\n c = self.initial_const * np.ones(x_batch.shape[0])\n c_lower_bound = np.zeros(x_batch.shape[0])\n c_upper_bound = 10e10 * np.ones(x_batch.shape[0])\n\n # Initialize best distortions and best attacks globally\n o_best_dist = np.inf * np.ones(x_batch.shape[0])\n o_best_attack = x_batch.copy()\n\n # Start with a binary search\n for bss in range(self.binary_search_steps):\n logger.debug('Binary search step %i out of %i (c_mean==%f)', bss, self.binary_search_steps, np.mean(c))\n\n # Run with 1 specific binary search step\n best_dist, best_label, best_attack = self._generate_bss(x_batch, y_batch, c)\n\n # Update best results so far\n o_best_attack[best_dist < o_best_dist] = best_attack[best_dist < o_best_dist]\n o_best_dist[best_dist < o_best_dist] = best_dist[best_dist < o_best_dist]\n\n # Adjust the constant as needed\n c, c_lower_bound, c_upper_bound = self._update_const(y_batch, best_label, c, c_lower_bound, c_upper_bound)\n\n return o_best_attack\n\n def _update_const(self, y_batch, best_label, c, c_lower_bound, c_upper_bound):\n \"\"\"\n Update constants.\n\n :param y_batch: A batch of targets (0-1 hot).\n :type y_batch: `np.ndarray`\n :param best_label: A batch of best labels.\n :type best_label: `np.ndarray`\n :param c: A batch of constants.\n :type c: `np.ndarray`\n :param c_lower_bound: A batch of lower bound constants.\n :type c_lower_bound: `np.ndarray`\n :param c_upper_bound: A batch of upper bound constants.\n :type c_upper_bound: `np.ndarray`\n :return: A tuple of three batches of updated constants and lower/upper bounds.\n :rtype: `tuple`\n \"\"\"\n def compare(o1, o2):\n if self.targeted:\n return o1 == o2\n else:\n return o1 != o2\n\n for i in range(len(c)):\n if compare(best_label[i], np.argmax(y_batch[i])) and best_label[i] != -np.inf:\n # Successful attack\n c_upper_bound[i] = min(c_upper_bound[i], c[i])\n if c_upper_bound[i] < 1e9:\n c[i] = (c_lower_bound[i] + c_upper_bound[i]) / 2.0\n\n else:\n # Failure attack\n c_lower_bound[i] = max(c_lower_bound[i], c[i])\n if c_upper_bound[i] < 1e9:\n c[i] = (c_lower_bound[i] + c_upper_bound[i]) / 2.0\n else:\n c[i] *= 10\n\n return c, c_lower_bound, c_upper_bound\n\n def _generate_bss(self, x_batch, y_batch, c):\n \"\"\"\n Generate adversarial examples for a batch of inputs with a specific batch of constants.\n\n :param x_batch: A batch of original examples.\n :type x_batch: `np.ndarray`\n :param y_batch: A batch of targets (0-1 hot).\n :type y_batch: `np.ndarray`\n :param c: A batch of constants.\n :type c: `np.ndarray`\n :return: A tuple of best elastic distances, best labels, best attacks\n :rtype: `tuple`\n \"\"\"\n def compare(o1, o2):\n if self.targeted:\n return o1 == o2\n else:\n return o1 != o2\n\n # Initialize best distortions and best changed labels and best attacks\n best_dist = np.inf * np.ones(x_batch.shape[0])\n best_label = [-np.inf] * x_batch.shape[0]\n best_attack = x_batch.copy()\n\n # Implement the algorithm 1 in the EAD paper\n x_adv = x_batch.copy()\n y_adv = x_batch.copy()\n for it in range(self.max_iter):\n logger.debug('Iteration step %i out of %i', it, self.max_iter)\n\n # Update learning rate\n lr = self._decay_learning_rate(global_step=it, end_learning_rate=0, decay_steps=self.max_iter)\n\n # Compute adversarial examples\n grad = self._gradient_of_loss(target=y_batch, x=x_batch, x_adv=y_adv, c=c)\n x_adv_next = self._shrinkage_threshold(y_adv - lr * grad, x_batch, self.beta)\n y_adv = x_adv_next + (1.0 * it / (it + 3)) * (x_adv_next - x_adv)\n x_adv = x_adv_next\n\n # Adjust the best result\n (z, l1dist, l2dist, endist) = self._loss(x=x_batch, x_adv=x_adv)\n\n if self.decision_rule == 'EN':\n zip_set = zip(endist, z)\n elif self.decision_rule == 'L1':\n zip_set = zip(l1dist, z)\n elif self.decision_rule == 'L2':\n zip_set = zip(l2dist, z)\n else:\n raise ValueError(\"The decision rule only supports `EN`, `L1`, `L2`.\")\n\n for j, (d, s) in enumerate(zip_set):\n if d < best_dist[j] and compare(s, np.argmax(y_batch[j])):\n best_dist[j] = d\n best_attack[j] = x_adv[j]\n best_label[j] = s\n\n return best_dist, best_label, best_attack\n\n @staticmethod\n def _shrinkage_threshold(z, x, beta):\n \"\"\"\n Implement the element-wise projected shrinkage-threshold function.\n\n :param z: a batch of examples.\n :type z: `np.ndarray`\n :param x: a batch of original examples.\n :type x: `np.ndarray`\n :param beta: the shrink parameter.\n :type beta: `float`\n :return: a shrinked version of z.\n :rtype: `np.ndarray`\n \"\"\"\n cond1 = (z - x) > beta\n cond2 = np.abs(z - x) <= beta\n cond3 = (z - x) < -beta\n\n upper = np.minimum(z - beta, 1.0)\n lower = np.maximum(z + beta, 0.0)\n\n result = cond1 * upper + cond2 * x + cond3 * lower\n\n return result\n\n def set_params(self, **kwargs):\n \"\"\"\n Take in a dictionary of parameters and applies attack-specific checks before saving them as attributes.\n\n :param confidence: Confidence of adversarial examples: a higher value produces examples that are farther\n away, from the original input, but classified with higher confidence as the target class.\n :type confidence: `float`\n :param targeted: Should the attack target one specific class.\n :type targeted: `bool`\n :param learning_rate: The initial learning rate for the attack algorithm. Smaller values produce better\n results but are slower to converge.\n :type learning_rate: `float`\n :param binary_search_steps: Number of times to adjust constant with binary search (positive value).\n :type binary_search_steps: `int`\n :param max_iter: The maximum number of iterations.\n :type max_iter: `int`\n :param beta: Hyperparameter trading off L2 minimization for L1 minimization.\n :type beta: `float`\n :param initial_const: The initial trade-off constant `c` to use to tune the relative importance of distance\n and confidence. If `binary_search_steps` is large, the initial constant is not important, as discussed in\n Carlini and Wagner (2016).\n :type initial_const: `float`\n :param batch_size: Internal size of batches on which adversarial samples are generated.\n :type batch_size: `int`\n :param decision_rule: Decision rule. 'EN' means Elastic Net rule, 'L1' means L1 rule, 'L2' means L2 rule.\n :type decision_rule: `string`\n \"\"\"\n # Save attack-specific parameters\n super(ElasticNet, self).set_params(**kwargs)\n\n if type(self.binary_search_steps) is not int or self.binary_search_steps < 0:\n raise ValueError(\"The number of binary search steps must be a non-negative integer.\")\n\n if type(self.max_iter) is not int or self.max_iter < 0:\n raise ValueError(\"The number of iterations must be a non-negative integer.\")\n\n if type(self.batch_size) is not int or self.batch_size < 1:\n raise ValueError(\"The batch size must be an integer greater than zero.\")\n\n if not isinstance(self.decision_rule, six.string_types) or self.decision_rule not in ['EN', 'L1', 'L2']:\n raise ValueError(\"The decision rule only supports `EN`, `L1`, `L2`.\")\n\n return True\n" ]
[ [ "numpy.square", "numpy.array", "numpy.zeros", "numpy.minimum", "numpy.ones", "numpy.min", "numpy.mean", "numpy.argmax", "numpy.abs", "numpy.clip", "numpy.maximum" ] ]
kothiga/N-Smallest
[ "6e46fe1e4872449c1c918bae41dfa5e17008cda0" ]
[ "main.py" ]
[ "import numpy as np\n\nimport os\nimport time\n\nimport argparse\nimport PruneAndSearch as algs\n\ndef get_args():\n parser = argparse.ArgumentParser (\n prog='PruneAndSearch', \n description='Implementation of the Prune and Search Algorithm. ',\n usage='python main.py { --rand RAND | --file FILE | --list LIST | --test [--trial TRIAL] [--vals VALS] [--verb] } [--seed SEED]'\n )\n parser.add_argument('-n', '--small', default=None, type=int, help='The N-th smallest element to find in the values. (default: {})'.format('MEDIAN'))\n parser.add_argument('-r', '--rand', default=None, type=int, help='Generate N random numbers in range 1 - 10,000. (default: {})'.format('DISABLED'))\n parser.add_argument('-f', '--file', default=None, help='Read in a list from a text file. (default: {})'.format('DISABLED'))\n parser.add_argument('-l', '--list', default=None, type=int, nargs='+', help='Provide input as a list from the command line. (default: {})'.format('DISABLED'))\n parser.add_argument('-x', '--seed', default=123, type=int, help='Seed for Numpy RNG. (default: {})'.format(123))\n parser.add_argument('-t', '--test', default=False, action='store_true', help='Perform a timed test, random trials T times. (default: {})'.format('DISABLED'))\n parser.add_argument('-T', '--trial', default=1000, type=int, help='Number of timed trials to conduct. (default: {})'.format(1000))\n parser.add_argument('-v', '--vals', default=100, type=int, help='Number of random values to during testing. (default: {})'.format(100))\n parser.add_argument('-V', '--verb', default=False, action='store_true', help='Verbose output. (default: {})'.format('DISABLED'))\n args = parser.parse_args()\n\n count = 0\n if args.rand != None: count += 1\n if args.file != None: count += 1\n if args.list != None: count += 1\n if args.test: count += 1\n\n if count > 1: print(\"\\n[ERROR] Too many arguments provided!!\\n\")\n if count == 0: print(\"\\n[ERROR] No arguments provided!!\\n\")\n if count != 1:\n parser.print_help()\n print(\"\\n Please provide the program with an argument using one of the following:\\n\")\n print(\"\\t python main.py --rand 20\")\n print(\"\\t python main.py --file a.data\")\n print(\"\\t python main.py --list 1 2 3 4 5 6 7 8\")\n print(\"\\t python main.py --test --trial 300 --vals 100 --verb --seed 123\")\n print(\" \")\n exit()\n \n return args\n\n\n\ndef get_list(args):\n\n # Simple getter function to get some list\n # based on the arguments passed in.\n\n if args.rand != None:\n values = np.random.randint(1, 10000, size=args.rand)\n print(\"Generated {} random values between 1 - 10,000.\".format(args.rand))\n return values\n \n\n if args.file != None:\n if not os.path.exists(args.file):\n print(\"[ERROR] File ``{}`` does not exist!!\".format(args.file))\n print(\"\\t Please provide the path to a file.\")\n exit()\n \n values = np.loadtxt(args.file, dtype=np.int32)\n return values\n\n\n if args.list != None:\n values = np.asarray(args.list, dtype=np.int32)\n return values\n\n\n\ndef test_algorithm(seed, numTrials=1000, numVals=100, maxVal=10000, verbose=True):\n\n # Run a series of trials on both algorithms.\n\n numVals = int(numVals) # 1e6\n maxVal = int(maxVal) # 1e10\n\n if verbose:\n print(\"\\n\")\n print(\" -- Prune and Search Algorithm -- \")\n print(\" ================================ \")\n print(\" Random Numbers Seed = {} \".format(seed) )\n print(\" Number of Trials = {} \".format(numTrials))\n print(\" Number of Values in List = {} \".format(numVals) )\n print(\" Maximum Value in List = {} \".format(maxVal) )\n print(\"\\n\")\n \n\n # Seed The first trial for consistency.\n np.random.seed( seed )\n\n # Keep a buffer of the returned finds for later comparison.\n SortAndSearchAnsBuffer = []\n SortAndSearchTimeBuffer = []\n\n # Begin the trials!\n print(\"Beginning {} Trial on {} elements for Sort And Search . . . \".format(numTrials, numVals), end='', flush=True)\n for _ in range(numTrials):\n randomList = np.random.randint(maxVal, size=numVals)\n findVal = np.random.randint(1, numVals+1)\n \n startTime = time.time()\n ansVal = algs.SortAndSearch(randomList, findVal)\n endTime = time.time()\n \n SortAndSearchAnsBuffer.append(ansVal)\n SortAndSearchTimeBuffer.append( endTime - startTime )\n \n\n print(\"\\u0394 : {:.4f}, \\u03bc : {:.6f} \\u00B1 {:.6f} \".format( \n np.sum( SortAndSearchTimeBuffer ), \n np.mean( SortAndSearchTimeBuffer ), \n np.std( SortAndSearchTimeBuffer ) \n ))\n\n \n\n # Seed The first trial for consistency.\n np.random.seed( seed )\n\n # Keep a buffer of the returned finds for later comparison.\n PruneAndSearchAnsBuffer = []\n PruneAndSearchTimeBuffer = []\n\n # Begin the trials!\n print(\"Beginning {} Trial on {} elements for Prune And Search . . . \".format(numTrials, numVals), end='', flush=True)\n for _ in range(numTrials):\n randomList = np.random.randint(maxVal, size=numVals)\n findVal = np.random.randint(1, numVals+1)\n\n startTime = time.time()\n ansVal = algs.PruneAndSearch(randomList, findVal)\n endTime = time.time()\n\n PruneAndSearchAnsBuffer.append(ansVal)\n PruneAndSearchTimeBuffer.append( endTime - startTime )\n \n\n print(\"\\u0394 : {:.4f}, \\u03bc : {:.6f} \\u00B1 {:.6f} \".format( \n np.sum( PruneAndSearchTimeBuffer ), \n np.mean( PruneAndSearchTimeBuffer ), \n np.std( PruneAndSearchTimeBuffer ) \n ))\n \n #for a,b in zip(SortAndSearchAnsBuffer, PruneAndSearchAnsBuffer):\n # print(a, b, \" \" if a == b else \"\\t!!X!!\")\n print(\"\\nDid the Algorithms find the same solutions? ==> {}\\n\".format(PruneAndSearchAnsBuffer == SortAndSearchAnsBuffer))\n\n return\n \n\n\ndef main():\n\n # Fetch Arguments.\n args = get_args()\n\n # Seed the RNG.\n np.random.seed(args.seed)\n\n # Perform a timed trial and return.\n if args.test:\n test_algorithm(args.seed, numTrials=args.trial, numVals=args.vals, verbose=args.verb)\n return\n\n # From the args get the list.\n values = get_list(args)\n\n # Sent the n-value to find, median if small was not set.\n findVal = args.small if args.small != None else len(values) // 2\n \n\n print(\"\\n\")\n print(\" -- Prune and Search Algorithm -- \")\n print(\" ================================ \")\n print(\" Find The {}-Smallest Value \".format(findVal))\n print(\" In The List = \")\n elPerRow = 5\n for idx in range(0, len(values), elPerRow):\n print(\" \", *values[ idx : idx+elPerRow ])\n print(\"\\n\")\n \n # Naive solution in O( n log n ).\n print(\"Beginning Sort And Search . . . \", end='', flush=True)\n startTime = time.time()\n ansVal_A = algs.SortAndSearch(values, findVal)\n endTime = time.time()\n print(\"\\u0394 : {:.6f}\".format( endTime - startTime ))\n\n print(\"Beginning Prune And Search . . . \", end='', flush=True)\n startTime = time.time()\n ansVal_B = algs.PruneAndSearch(values, findVal)\n endTime = time.time()\n print(\"\\u0394 : {:.6f}\".format( endTime - startTime ))\n \n print(\"\\nDid the Algorithms find the same solutions? ==> {}\\n\".format(ansVal_A == ansVal_B))\n print(\"The {}-Smallest Value is {}\".format(findVal, ansVal_A))\n\n return \n \n\nif __name__ == '__main__':\n main()\n\n" ]
[ [ "numpy.asarray", "numpy.random.seed", "numpy.sum", "numpy.mean", "numpy.std", "numpy.loadtxt", "numpy.random.randint" ] ]
4uiiurz1/kaggle-tgs-salt-identification-challenge
[ "aad93eeaac0f04d80428e098d582008205d1e99c" ]
[ "losses.py" ]
[ "import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport numpy as np\n\nfrom LovaszSoftmax.pytorch.lovasz_losses import lovasz_hinge\n\n\nclass SoftDiceLoss(nn.Module):\n def __init__(self):\n super(SoftDiceLoss, self).__init__()\n\n def forward(self, input, target):\n smooth = 1e-5\n input = F.sigmoid(input)\n num = target.size(0)\n input = input.view(num, -1)\n target = target.view(num, -1)\n intersection = (input * target)\n score = (2. * intersection.sum(1) + smooth) / (input.sum(1) + target.sum(1) + smooth)\n score = 1 - score.sum() / num\n return score\n\n\nclass BCEDiceLoss(nn.Module):\n def __init__(self):\n super(BCEDiceLoss, self).__init__()\n\n def forward(self, input, target):\n bce = F.binary_cross_entropy_with_logits(input, target)\n smooth = 1e-5\n input = F.sigmoid(input)\n num = target.size(0)\n input = input.view(num, -1)\n target = target.view(num, -1)\n intersection = (input * target)\n dice = (2. * intersection.sum(1) + smooth) / (input.sum(1) + target.sum(1) + smooth)\n dice = 1 - dice.sum() / num\n return 0.5 * bce + 0.5 * dice\n\n\nclass LovaszHingeLoss(nn.Module):\n def __init__(self):\n super(LovaszHingeLoss, self).__init__()\n\n def forward(self, input, target):\n input = input.squeeze(1)\n target = target.squeeze(1)\n loss = lovasz_hinge(input, target, per_image=True)\n\n return loss\n\n\nclass DSVLovaszHingeLoss(nn.Module):\n def __init__(self):\n super(DSVLovaszHingeLoss, self).__init__()\n\n def forward(self, input, target):\n for i in range(target.shape[0]):\n if not torch.sum(target[i]).data.cpu().numpy() > 1:\n target[i] = -1\n\n input = input.squeeze(1)\n target = target.squeeze(1)\n loss = lovasz_hinge(input, target, per_image=True, ignore=-1)\n\n return loss\n" ]
[ [ "torch.nn.functional.binary_cross_entropy_with_logits", "torch.nn.functional.sigmoid", "torch.sum" ] ]
HiroshiKERA/monomial-agnostic-vanishing-ideal
[ "ddfb53ded0ee87f129ec029603e8245565f653d2" ]
[ "mavi/jax/util/plot.py" ]
[ "import jax.numpy as np\nimport matplotlib.pyplot as plt\n\n\ndef plot(vi, X, \n target='vanishing', \n n=1000, scale=1.5, x_max=1.0, y_max=1.0,\n z_func=lambda x_, y_: 0.0,\n show=False, splitshow=False):\n\n nvars = X.shape[-1]\n if nvars == 2:\n _plot2d(vi, X, target=target, \n n=n, scale=scale, x_max=x_max, y_max=y_max,\n show=show, splitshow=splitshow)\n elif nvars == 3:\n _plot3d(vi, X, z_func, target=target, \n n=n, scale=scale, x_max=x_max, y_max=y_max,\n show=show, splitshow=splitshow)\n else: \n print(f'Cannot plot {nvars}-variate polynomials')\n\ndef _plot2d(vi, X, target='vanishing', n=1000, scale=1.5, x_max=1.0, y_max=1.0, show=False, splitshow=False):\n\n ## set plot range\n m = np.mean(X, axis=0)\n x_max = y_max = np.max(np.abs(X))\n # x = np.arange(-scale*x_max, scale*x_max, resolution)\n # y = np.arange(-scale*y_max, scale*y_max, resolution)\n x = np.linspace(-scale*x_max, scale*x_max, 50)\n y = np.linspace(-scale*y_max, scale*y_max, 50)\n Z1, Z2 = np.meshgrid(x, y)\n\n ## set plot setting\n npolys = 0\n if target == 'vanishing':\n # npolys = sum([Gt.shape[-1] for Gt in vi.basis.vanishings()])\n npolys = sum([Bt.n_vanishings() for Bt in vi.basis])\n # npolys = sum([len(Gt) for Gt in vi.basis.vanishings()])\n elif target == 'nonvanishing':\n npolys = sum([Bt.n_nonvanishings() for Bt in vi.basis])\n\n colors = plt.cm.Dark2(np.linspace(0,1,8))\n linestyles = ['solid','dashed','dashdot', 'dotted']\n nfigs = min(npolys, n)\n\n for i in range(nfigs):\n f = lambda x_, y_: vi.evaluate(np.array([[x_,y_]]), target=target)[0,i]\n f = np.vectorize(f)\n plt.contour(Z1,Z2,f(Z1, Z2), levels=[0], colors=[colors[i%len(colors)]], linewidths=[1.], linestyles=[linestyles[i%4]])\n\n if splitshow:\n plt.plot(X[:,0], X[:,1], 'o', mfc='none', alpha=0.8)\n plt.gca().set_aspect('equal', adjustable='box')\n plt.show()\n\n if not splitshow:\n plt.plot(X[:,0], X[:,1], 'o', mfc='none', alpha=0.8)\n plt.gca().set_aspect('equal', adjustable='box') \n# plt.savefig('graph_Z.pdf') \n \n if not splitshow and show: \n plt.show()\n\n\ndef _plot3d(vi, X, z_func, target='vanishing', n=1000, scale=1.5, x_max=1.0, y_max=1.0, show=False, splitshow=False):\n\n ## set plot range\n m = np.mean(X, axis=0)\n x_max = y_max = np.max(np.abs(X))\n x = np.linspace(-scale*x_max, scale*x_max, 50)\n y = np.linspace(-scale*y_max, scale*y_max, 50)\n Z1, Z2 = np.meshgrid(x, y)\n\n ## set plot setting\n npolys = 0\n if target == 'vanishing':\n npolys = sum([np.asarray(Gt).shape[-1] for Gt in vi.basis.vanishings()])\n # npolys = sum([len(Gt) for Gt in vi.basis.vanishings()])\n elif target == 'nonvanishing':\n npolys = sum([np.asarray(Ft).shape[-1] for Ft in vi.basis.nonvanishings()])\n else:\n print('unknown target: %s' % target)\n\n colors = plt.cm.Dark2(np.linspace(0,1,8))\n linestyles = ['solid','dashed','dashdot', 'dotted']\n nfigs = min(npolys, n)\n\n for i in range(nfigs):\n f = lambda x_, y_: vi.evaluate(np.array([[x_,y_, z_func(x_,y_)]]), target=target)[0,i]\n f = np.vectorize(f)\n plt.contour(Z1,Z2,f(Z1, Z2), levels=[0], colors=[colors[i%len(colors)]], linewidths=[1.], linestyles=[linestyles[i%4]])\n\n if splitshow:\n plt.plot(X[:,0], X[:,1], 'o', mfc='none', alpha=0.8)\n plt.gca().set_aspect('equal', adjustable='box')\n plt.show()\n\n if not splitshow:\n plt.plot(X[:,0], X[:,1], 'o', mfc='none', alpha=0.8)\n plt.gca().set_aspect('equal', adjustable='box') \n# plt.savefig('graph_Z.pdf') \n \n if not splitshow and show: \n plt.show()\n" ]
[ [ "matplotlib.pyplot.show", "matplotlib.pyplot.gca", "matplotlib.pyplot.plot" ] ]
coresresearch/p2d_li_ion_battery
[ "7ea1a2332eb885bea65e47e82ea231f80d28ca18" ]
[ "li_ion_battery_p2d_functions.py" ]
[ "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Dec 6 11:37:06 2018\n\n@author: dkorff\n\"\"\"\n\nimport numpy as np\nimport cantera as ct\nfrom assimulo.problem import Implicit_Problem\n\nfrom li_ion_battery_p2d_init import anode as an\nfrom li_ion_battery_p2d_init import cathode as cat\nfrom li_ion_battery_p2d_init import separator as sep\nfrom li_ion_battery_p2d_init import Inputs\nfrom li_ion_battery_p2d_init import anode_obj as anode\nfrom li_ion_battery_p2d_init import anode_surf_obj as anode_s\nfrom li_ion_battery_p2d_init import elyte_obj as elyte\nfrom li_ion_battery_p2d_init import cathode_surf_obj as cathode_s\nfrom li_ion_battery_p2d_init import cathode_obj as cathode\nfrom li_ion_battery_p2d_init import conductor_obj as conductor\n\n\n\n\nclass Extended_Problem(Implicit_Problem):\n sw0 = True\n def Battery_Func(t, SV, SV_dot, sw):\n\n \"\"\"=================================================================\"\"\"\n \"\"\"==========================INITIALIZE=============================\"\"\"\n \"\"\"=================================================================\"\"\"\n\n print(t)\n nSV = len(SV)\n res = np.zeros([nSV])\n\n offset_vec = sep.offsets\n\n \"\"\" anode = an.obj['electrode']\n anode_s = an.obj['surf']\n elyte = an.obj['elyte']\n cathode = cat.obj['electrode']\n cathode_s = cat.obj['surf']\"\"\"\n\n nsp_an = anode.n_species; nsp_cat = cathode.n_species\n\n F = ct.faraday; R = ct.gas_constant; T = Inputs.T\n #sigma_eff_an = an.params['sigma_eff_ed']; dyInv = an.geom['dyInv']\n #u_Li_elyte = an.params['u_Li_elyte']; D_Li_an = an.params['D_Li_ed']\n #dr = an.dr\n# %%\n \"\"\"=================================================================\"\"\"\n \"\"\"============================ANODE================================\"\"\"\n \"\"\"=================================================================\"\"\"\n # --------------------------------\n # ANODE CURRENT COLLECTOR BOUNDARY\n # --------------------------------\n\n # Looking at node 1, j=0, set THIS node conditions\n offset = an.offsets\n ptr = an.ptr\n j = 0\n\n N_io_m = 0\n i_io_m = 0\n i_el_m = an.i_ext\n\n X_an_1 = SV[offset[j] + ptr['X_ed'][-1]]\n rho_k_elyte_1 = SV[offset[j] + ptr['rho_k_elyte']]\n\n phi_elec_an_1 = SV[offset[j] + ptr['Phi_ed']]\n phi_elec_elyte_1 = phi_elec_an_1 - SV[offset[j] + ptr['Phi_dl']]\n\n anode.X = [X_an_1, 1-X_an_1]\n anode.electric_potential = phi_elec_an_1\n conductor.electric_potential = phi_elec_an_1\n\n #elyte.TDY = Inputs.T, np.sum(rho_k_elyte_1), rho_k_elyte_1\n elyte.Y = rho_k_elyte_1/np.sum(rho_k_elyte_1)\n elyte.electric_potential = phi_elec_elyte_1\n\n sdot_1 = anode_s.net_production_rates\n\n # Shift forward to node 2, j=1, set NEXT node conditions\n j = 1; offset = int(offset_vec[j])\n\n X_an_2 = SV[offset + an.ptr['X_ed'][-1]]\n rho_k_elyte_2 = SV[offset + an.ptr['rho_k_elyte']]\n\n phi_elec_an_2 = SV[offset + an.ptr['Phi_ed']]\n phi_elec_elyte_2 = phi_elec_an_2 - SV[offset + an.ptr['Phi_dl']]\n\n anode.X = [X_an_2, 1-X_an_2]\n conductor.electric_potential = phi_elec_an_2\n anode.electric_potential = phi_elec_an_2\n\n #elyte.TDY = Inputs.T, np.sum(rho_k_elyte_2), rho_k_elyte_2\n elyte.Y = rho_k_elyte_2/np.sum(rho_k_elyte_2)\n elyte.electric_potential = phi_elec_elyte_2\n\n sdot_2 = anode_s.net_production_rates\n\n # Shift back to node 1, j=0, set THIS node outlet conditions\n j = 0; offset = int(offset_vec[j])\n\n i_el_p = an.sigma_eff_ed*(phi_elec_an_1-phi_elec_an_2)*an.dyInv\n\n N_io_p = (-an.u_Li_elyte*elyte.density_mole*(R*T*(rho_k_elyte_2 - rho_k_elyte_1)\n + F*(phi_elec_elyte_2 - phi_elec_elyte_1))*an.dyInv)\n\n i_io_p = np.dot(N_io_p,Inputs.z_k_elyte)*F\n\n i_Far_1 = sdot_1[an.ptr['iFar']]*F*an.A_surf/an.dyInv\n\n X_Li = 1 - SV[offset + an.ptr['X_ed']]\n DiffFlux = np.zeros([an.nshells+1])\n DiffFlux[1:-1] = an.D_Li_ed*(X_Li[0:-1] - X_Li[1:])/an.dr\n DiffFlux[-1] = sdot_1[0]/anode.density_mole\n\n k_m = np.arange(0, an.nshells)/an.nshells\n k_p = np.arange(1, an.nshells+1)/an.nshells\n\n# print(anode_s.forward_rate_constants, phi_elec_an_1, sdot_1[an.ptr['iFar']])\n\n \"\"\"Calculate the change in X_C6 in the particle interior.\n Note that the DiffFlux is the diffusion of lithium\n toward the particle surface, and that diffusion of Li\n into the shell decreases the amount of C6. The fluxes\n must be scaled by the shell interface surface area\n relative to the total particle surface area\"\"\"\n res[offset + an.ptr['X_ed']] = (SV_dot[offset + an.ptr['X_ed']]\n - ((DiffFlux[1:]*k_p**2 - DiffFlux[0:-1]*k_m**2)\n * an.A_surf/an.eps_ed/an.V_shell))\n\n \"\"\"Change in electrolyte_composition\"\"\"\n res[offset + an.ptr['rho_k_elyte']] = (SV_dot[offset + an.ptr['rho_k_elyte']]\n - (((N_io_m - N_io_p)*an.dyInv + sdot_1[nsp_an]*an.A_surf)\n /elyte.density_mole/an.eps_elyte))\n\n \"\"\"Double-layer voltage\"\"\"\n res[offset + an.ptr['Phi_dl']] = (SV_dot[offset + an.ptr['Phi_dl']]\n - (i_Far_1 + i_el_m - i_el_p)*an.dyInv/an.C_dl/an.A_surf)\n\n \"\"\"Algebraic equation for ANODE electric potential boundary condition\"\"\"\n res[offset + an.ptr['Phi_ed']] = SV[offset + an.ptr['Phi_ed']]\n# (i_el_m - i_el_p + i_io_m - i_io_p)\n# SV_dot[offset + an.ptr['V_ed']]\n# %%\n \"\"\"============================ANODE================================\"\"\"\n \"\"\"INTERIOR NODES\"\"\"\n for j in np.arange(2, an.npoints):\n\n # Save previous node outlet conditions as new inlet conditions\n N_io_m = N_io_p\n i_io_m = i_io_p\n i_el_m = i_el_p\n X_an_1 = X_an_2\n rho_k_elyte_1 = rho_k_elyte_2\n phi_elec_an_1 = phi_elec_an_2\n phi_elec_elyte_1 = phi_elec_elyte_2\n sdot_1 = sdot_2\n\n # Shift forward to NEXT node\n offset = int(an.offsets[j])\n\n X_an_2 = SV[offset + an.ptr['X_ed'][-1]]\n rho_k_elyte_2 = SV[offset + an.ptr['rho_k_elyte']]\n\n phi_elec_an_2 = SV[offset + an.ptr['Phi_ed']]\n phi_elec_elyte_2 = phi_elec_an_2 - SV[offset + an.ptr['Phi_dl']]\n\n anode.X = [X_an_2, 1-X_an_2]\n anode.electric_potential = phi_elec_an_2\n conductor.electric_potential = phi_elec_an_2\n\n elyte.Y = rho_k_elyte_2/np.sum(rho_k_elyte_2)\n elyte.electric_potential = phi_elec_elyte_2\n\n sdot_2 = anode_s.net_production_rates\n\n # Shift back to THIS node, set THIS node outlet conditions\n offset = int(an.offsets[j - 1])\n\n i_el_p = an.sigma_eff_ed*(phi_elec_an_1-phi_elec_an_2)*an.dyInv\n\n N_io_p = (-an.u_Li_elyte*elyte.density_mole*(R*T*(rho_k_elyte_2 - rho_k_elyte_1)\n + F*(phi_elec_elyte_2 - phi_elec_elyte_1))*an.dyInv)\n\n i_io_p = np.dot(N_io_p,Inputs.z_k_elyte)*F\n\n i_Far_1 = sdot_1[an.ptr['iFar']]*F*an.A_surf/an.dyInv\n\n X_Li = 1 - SV[offset + an.ptr['X_ed']]\n DiffFlux = np.zeros([an.nshells+1])\n DiffFlux[1:-1] = an.D_Li_ed*(X_Li[0:-1] - X_Li[1:])/an.dr\n DiffFlux[-1] = sdot_1[0]/anode.density_mole\n\n \"\"\"Calculate the change in X_C6 in the particle interior.\"\"\"\n res[offset + an.ptr['X_ed']] = (SV_dot[offset + an.ptr['X_ed']]\n - ((DiffFlux[1:]*k_p**2 - DiffFlux[0:-1]*k_m**2)\n * an.A_surf/an.eps_ed/an.V_shell))\n\n \"\"\"Change in electrolyte_composition\"\"\"\n res[offset + an.ptr['rho_k_elyte']] = (SV_dot[offset + an.ptr['rho_k_elyte']]\n - (((N_io_m - N_io_p)*an.dyInv + sdot_1[nsp_an]*an.A_surf)\n /elyte.density_mole/an.eps_elyte))\n\n \"\"\"Double-layer voltage\"\"\"\n res[offset + an.ptr['Phi_dl']] = (SV_dot[offset + an.ptr['Phi_dl']]\n - (i_Far_1 + i_el_m - i_el_p)*an.dyInv/an.C_dl/an.A_surf)\n\n \"\"\"Algebraic equation for ANODE electric potential boundary condition\"\"\"\n res[offset + an.ptr['Phi_ed']] = (i_el_m - i_el_p + i_io_m - i_io_p)\n# %%\n \"\"\"============================ANODE================================\"\"\"\n \"\"\"Separator boundary\"\"\"\n # Save previous node outlet conditions as new inlet conditions\n N_io_m = N_io_p\n i_io_m = i_io_p\n i_el_m = i_el_p\n X_an_1 = X_an_2\n rho_k_elyte_1 = rho_k_elyte_2\n phi_elec_an_1 = phi_elec_an_2\n phi_elec_elyte_1 = phi_elec_elyte_2\n sdot_1 = sdot_2\n\n # Shift forward to NEXT node (first separator node)\n# j = an.npoints; offset = int(offset_vec[j])\n#\n# X_elyte_2 = SV[offset + sep.ptr['X_elyte']]\n#\n# phi_elec_elyte_2 = SV[offset + sep.ptr['V_elyte']]\n\n # Shift back to THIS node, set THIS node outlet conditions\n i_el_p = 0\n\n# N_io_p = (-u_Li_elyte*elyte.density_mole*(R*T*(X_elyte_2 - X_elyte_1)\n# + F*(phi_elec_elyte_2 - phi_elec_elyte_1))*dyInv)\n#\n# i_io_p = N_io_p*F\n\n # Set j to final ANODE node\n j = an.npoints-1; offset = int(an.offsets[j])\n\n i_Far_1 = sdot_1[an.ptr['iFar']]*F*an.A_surf/an.dyInv\n\n i_io_p = an.i_ext\n #THIS IS TEMPORARY, NON-GENERALIZED CODE:\n N_io_p = np.zeros_like(N_io_p)\n N_io_p[2] = i_io_p/F\n\n X_Li = 1 - SV[offset + an.ptr['X_ed']]\n DiffFlux = np.zeros([an.nshells+1])\n DiffFlux[1:-1] = an.D_Li_ed*(X_Li[0:-1] - X_Li[1:])/an.dr\n DiffFlux[-1] = sdot_1[0]/anode.density_mole\n\n \"\"\"Calculate the change in X_C6 in the particle interior.\"\"\"\n res[offset + an.ptr['X_ed']] = (SV_dot[offset + an.ptr['X_ed']]\n - ((DiffFlux[1:]*k_p**2 - DiffFlux[0:-1]*k_m**2)\n * an.A_surf/an.eps_ed/an.V_shell))\n\n \"\"\"Change in electrolyte_composition\"\"\"\n res[offset + an.ptr['rho_k_elyte']] = (SV_dot[offset + an.ptr['rho_k_elyte']]\n - (((N_io_m - N_io_p)*an.dyInv + sdot_1[nsp_an]*an.A_surf)\n /elyte.density_mole/an.eps_elyte))\n\n \"\"\"Double-layer voltage\"\"\"\n res[offset + an.ptr['Phi_dl']] = (SV_dot[offset + an.ptr['Phi_dl']]\n - (i_Far_1 + i_el_m - i_el_p)*an.dyInv/an.C_dl/an.A_surf)\n\n \"\"\"Algebraic equation for ANODE electric potential boundary condition\"\"\"\n res[offset + an.ptr['Phi_ed']] = (i_el_m - i_el_p + i_io_m - i_io_p)\n# %%\n \"\"\"=================================================================\"\"\"\n \"\"\"==========================SEPARATOR==============================\"\"\"\n \"\"\"=================================================================\"\"\"\n# for j in np.arange(an.npoints+1, sep.sep_max):\n#\n# X_elyte_1 = X_elyte_2\n# phi_elec_elyte_1 = phi_elec_elyte_2\n# N_io_m = N_io_p\n# i_io_m = i_io_p\n\n # Shift forward to NEXT node\n# offset = int(offset_vec[j])\n#\n# X_elyte_2 = SV[offset + sep.ptr['X_elyte']]\n# phi_elec_elyte_2 = SV[offset + sep.ptr['V_elyte']]\n\n # Step back to THIS node to calculate outlet flux\n# offset = int(offset_vec[j-1])\n\n# N_io_p = (-u_Li_elyte*elyte.density_mole*(R*T*(X_elyte_2 - X_elyte_1)\n# + F*(phi_elec_elyte_2 - phi_elec_elyte_1))*sep.geom['dyInv'])\n#\n# i_io_p = N_io_p*F\n\n# i_io_p = an.params['i_ext']\n# N_io_p = i_io_p/F\n#\n# \"\"\"Change in electrolyte_composition\"\"\"\n# res[offset + sep.ptr['X_elyte']] = (SV_dot[offset + sep.ptr['X_elyte']]\n# - (((N_io_m - N_io_p)*dyInv)/elyte.density_mole/sep.geom['phi_elyte']))\n#\n# \"\"\"Charge neutrality enforced\"\"\"\n# res[offset + sep.ptr['V_elyte']] = (i_io_m - i_io_p)\n# %%\n # Looking at LAST node in separator\n# X_elyte_1 = X_elyte_2\n# phi_elec_elyte_1 = phi_elec_elyte_2\n# N_io_m = N_io_p\n# i_io_m = i_io_p\n\n # Shift forward to NEXT node, first cathode node\n# j = sep.sep_max; offset = int(offset_vec[j])\n#\n# X_cat_2 = SV[offset + cat.ptr['X_ed'][-1]]\n# X_elyte_2 = SV[offset + cat.ptr['X_elyte']]\n#\n# phi_elec_cat_2 = SV[offset + cat.ptr['V_ed']]\n# phi_elec_elyte_2 = phi_elec_cat_2 - SV[offset + cat.ptr['V_dl']]\n#\n# cathode.X = [1-X_cat_2, X_cat_2]\n# cathode.electric_potential = phi_elec_cat_2\n#\n# elyte.X = [X_elyte_2, 7.8730103237e-2, 2.8328131770e-1]\n# elyte.electric_potential = phi_elec_elyte_2\n#\n# sdot_2 = cathode_s.net_production_rates\n\n # Shift back to THIS node (last separator node)\n# j = sep.sep_max-1; offset = int(offset_vec[j])\n#\n# i_el_p = 0\n\n# N_io_p = (-u_Li_elyte*elyte.density_mole*(R*T*(X_elyte_2 - X_elyte_1)\n# + F*(phi_elec_elyte_2 - phi_elec_elyte_1))*sep.geom['dyInv'])\n#\n# i_io_p = N_io_p*F\n\n# i_io_p = an.params['i_ext']\n# N_io_p = i_io_p/F\n#\n# \"\"\"Change in electrolyte_composition\"\"\"\n# res[offset + sep.ptr['X_elyte']] = (SV_dot[offset + sep.ptr['X_elyte']]\n# - (((N_io_m - N_io_p)*dyInv)/elyte.density_mole/sep.geom['phi_elyte']))\n#\n# \"\"\"Charge neutrality enforced\"\"\"\n# res[offset + sep.ptr['V_elyte']] = (i_io_m - i_io_p)\n# print(SV, res)\n# SV[offset + sep.ptr['V_elyte']]\n# (i_io_m - i_io_p)\n\n# %%\n \"\"\"=================================================================\"\"\"\n \"\"\"===========================CATHODE===============================\"\"\"\n \"\"\"=================================================================\"\"\"\n\n # Alrighty, CATHODE time\n\n# sigma_eff_cat = cat.params['sigma_eff_ed']; dyInv = cat.geom['dyInv']\n# D_Li_cat = cat.params['D_Li_ed']\n#\n# i_io_m = i_io_p\n# N_io_m = N_io_p\n# i_el_m = i_el_p\n# X_cat_1 = X_cat_2\n# X_elyte_1 = X_elyte_2\n# phi_elec_cat_1 = phi_elec_cat_2\n# phi_elec_elyte_1 = phi_elec_elyte_2\n# sdot_1 = sdot_2\n# j = sep.cat_max-1; offset = int(offset_vec[j])\n# i_el_p = -an.params['i_ext']\n# i_io_p = 0\n# N_io_p = i_io_p/F\n#\n# i_Far_1 = sdot_1[cat.ptr['iFar']]*F*cat.geom['A_surf']/dyInv\n# print(cathode_s.forward_rate_constants, phi_elec_cat_1, sdot_1[cat.ptr['iFar']])\n# X_Li = 1 - SV[offset + cat.ptr['X_ed']]\n# DiffFlux = np.zeros([cat.nshells+1])\n# DiffFlux[1:-1] = D_Li_cat*(X_Li[0:-1] - X_Li[1:])/dr\n# DiffFlux[-1] = sdot_1[0]/cathode.density_mole\n#\n# \"\"\"Calculate the change in CoO2 in the particle interior.\"\"\"\n# res[offset + cat.ptr['X_ed']] = (SV_dot[offset + cat.ptr['X_ed']]\n# - ((DiffFlux[1:]*k_p**2 - DiffFlux[0:-1]*k_m**2)\n# * cat.geom['A_surf']/cat.geom['phi_ed']/cat.params['V_shell']))\n#\n# \"\"\"Change in electrolyte_composition\"\"\"\n# res[offset + cat.ptr['X_elyte']] = (SV_dot[offset + cat.ptr['X_elyte']]\n# - (((N_io_m - N_io_p)*dyInv + sdot_1[nsp_cat]*cat.geom['A_surf'])\n# /elyte.density_mole/cat.geom['phi_elyte']))\n#\n# \"\"\"Double-layer voltage\"\"\"\n# res[offset + cat.ptr['V_dl']] = (SV_dot[offset + cat.ptr['V_dl']]\n# - (i_Far_1 + i_el_m - i_el_p)*dyInv/cat.params['C_dl']/cat.geom['A_surf'])\n#\n# \"\"\"Algebraic equation for CATHODE electric potential boundary condition\"\"\"\n# res[offset + cat.ptr['V_ed']] = (i_el_m - i_el_p + i_io_m - i_io_p)\n# print(SV, res)\n\n\n# for j in np.arange(an.npoints + sep.npoints, sep.cat_max-1):\n# N_io_m = N_io_p\n# i_io_m = i_io_p\n# i_el_m = i_el_p\n# X_cat_1 = X_cat_2\n# X_elyte_1 = X_elyte_2\n# phi_elec_cat_1 = phi_elec_cat_2\n# phi_elec_elyte_1 = phi_elec_elyte_2\n# sdot_1 = sdot_2\n#\n# # Look at NEXT node\n# offset = int(offset_vec[j])\n#\n# X_cat_2 = SV[offset + cat.ptr['X_ed'][-1]]\n# X_elyte_2 = SV[offset + cat.ptr['X_elyte']]\n#\n# phi_elec_cat_2 = SV[offset + cat.ptr['V_ed']]\n# phi_elec_elyte_2 = phi_elec_cat_2 - SV[offset + cat.ptr['V_dl']]\n#\n# cathode.X = [1-X_cat_2, X_cat_2]\n# cathode.electric_potential = phi_elec_cat_2\n#\n# elyte.X = [X_elyte_2, 1-X_elyte_2]\n# elyte.electric_potential = phi_elec_elyte_2\n#\n# sdot_2 = cathode_s.net_production_rates\n#\n# # Shift back to THIS node, set THIS node outlet conditions\n# offset = int(offset_vec[j-1])\n#\n# i_el_p = sigma_eff_cat*(phi_elec_cat_1 - phi_elec_cat_2)*dyInv\n#\n# N_io_p = (-u_Li_elyte*elyte.density_mole*(R*T*(X_elyte_2 - X_elyte_1)\n# + F*(phi_elec_elyte_2 - phi_elec_elyte_1))*dyInv)\n#\n# i_io_p = N_io_p*F\n#\n# i_Far_1 = sdot_1[cat.ptr['iFar']]*F*cat.geom['A_surf']/dyInv\n#\n# X_Li = 1 - SV[offset + cat.ptr['X_ed']]\n# DiffFlux = np.zeros([cat.nshells+1])\n# DiffFlux[1:-1] = D_Li_cat*(X_Li[0:-1] - X_Li[1:])/dr\n# DiffFlux[-1] = sdot_1[1]/cathode.density_mole\n#\n# \"\"\"Calculate the change in CoO2 in the particle interior.\"\"\"\n# res[offset + cat.ptr['X_ed']] = (SV_dot[offset + cat.ptr['X_ed']])\n# \"\"\"- ((DiffFlux[1:]*k_p**2 - DiffFlux[0:-1]*k_m**2)\n# * cat.geom['A_surf']/cat.geom['phi_ed']/cat.params['V_shell']))\"\"\"\n#\n# \"\"\"Change in electrolyte_composition\"\"\"\n# res[offset + cat.ptr['X_elyte']] = (SV_dot[offset + cat.ptr['X_elyte']])\n# \"\"\"- (((N_io_m - N_io_p)*dyInv + sdot_1[nsp_cat]*cat.geom['A_surf'])\n# /elyte.density_mole/cat.geom['phi_elyte']))\"\"\"\n#\n# \"\"\"Double-layer voltage\"\"\"\n# res[offset + cat.ptr['V_dl']] = (SV_dot[offset + cat.ptr['V_dl']]\n# - (i_Far_1 + i_el_m - i_el_p)*dyInv/cat.params['C_dl']/cat.geom['A_surf'])\n#\n# \"\"\"Algebraic equation for CATHODE electric potential boundary condition\"\"\"\n# res[offset + cat.ptr['V_ed']] = (i_el_m - i_el_p + i_io_m - i_io_p)\n\n# %%\n \"\"\"=========================CATHODE=============================\"\"\"\n \"\"\"current collector boundary\"\"\"\n\n# N_io_m = N_io_p\n# i_io_m = i_io_p\n# i_el_m = i_el_p\n# X_cat_1 = X_cat_2\n# X_elyte_1 = X_elyte_2\n# phi_elec_cat_1 = phi_elec_cat_2\n# phi_elec_elyte_1 = phi_elec_elyte_2\n# sdot_1 = sdot_2\n#\n# # Set THIS node outlet conditions (last node BCs)\n# j = sep.cat_max - 1; offset = int(offset_vec[j])\n# i_io_p = 0\n# N_io_p = 0\n# i_el_p = cat.params['i_ext']\n#\n# i_Far_1 = sdot_1[cat.ptr['iFar']]*F*cat.geom['A_surf']/dyInv\n#\n# X_Li = 1 - SV[offset + cat.ptr['X_ed']]\n# DiffFlux = np.zeros([cat.nshells+1])\n# DiffFlux[1:-1] = D_Li_cat*(X_Li[0:-1] - X_Li[1:])/dr\n# DiffFlux[-1] = sdot_1[1]/cathode.density_mole\n#\n# \"\"\"Calculate the change in CoO2 in the particle interior.\"\"\"\n# res[offset + cat.ptr['X_ed']] = (SV_dot[offset + cat.ptr['X_ed']])\n# \"\"\"- ((DiffFlux[1:]*k_p**2 - DiffFlux[0:-1]*k_m**2)\n# * cat.geom['A_surf']/cat.geom['phi_ed']/cat.params['V_shell']))\"\"\"\n#\n# \"\"\"Change in electrolyte_composition\"\"\"\n# res[offset + cat.ptr['X_elyte']] = (SV_dot[offset + cat.ptr['X_elyte']])\n# \"\"\"- (((N_io_m - N_io_p)*dyInv + sdot_1[nsp_cat]*cat.geom['A_surf'])\n# /elyte.density_mole/cat.geom['phi_elyte']))\"\"\"\n#\n# \"\"\"Double-layer voltage\"\"\"\n# res[offset + cat.ptr['V_dl']] = (SV_dot[offset + cat.ptr['V_dl']]\n# - (i_Far_1 + i_el_m - i_el_p)*dyInv/cat.params['C_dl']/cat.geom['A_surf'])\n#\n# \"\"\"Algebraic equation for CATHODE electric potential boundary condition\"\"\"\n# res[offset + cat.ptr['V_ed']] = (i_el_m - i_el_p + i_io_m - i_io_p)\n\n return res\n\n \"\"\"=====================================================================\"\"\"\n \"\"\"=====================================================================\"\"\"\n \"\"\"=====================================================================\"\"\"\n# %%\n def state_events(self, t, y, yd, sw):\n event1 = np.zeros([an.params['npoints']])\n event2 = np.zeros([an.params['npoints']])\n event3 = np.zeros([an.params['nshells']])\n event4 = np.zeros([an.params['nshells']])\n\n for j in np.arange(0, an.params['npoints']):\n offset = j*an.params['nVars']\n\n event1[j] = (y[offset + an.ptr['V_dl']])\n event2[j] = (1 - y[offset + an.ptr['V_dl']])\n\n for i in np.arange(0, an.params['nshells']):\n event3[i] = y[offset + an.ptr['X_ed'][i]] - (1 - an.params['X_Li_max'])\n event4[i] = (((1 - an.params['X_Li_min']) - y[offset + an.ptr['X_ed'][i]]))\n\n event5 = np.zeros([cat.params['npoints']])\n event6 = np.zeros([cat.params['npoints']])\n event7 = np.zeros([cat.params['nshells']])\n event8 = np.zeros([cat.params['nshells']])\n\n for j in np.arange(0, cat.params['npoints']):\n offset = j*cat.params['nVars'] + an.npoints*an.nVars + sep.npoints*sep.nVars\n\n event5[j] = (y[offset + cat.ptr['V_dl']])\n event6[j] = (y[offset + cat.ptr['V_dl']] - 5)\n\n for i in np.arange(0, cat.params['nshells']):\n event7[i] = y[offset + cat.ptr['X_ed'][i]] - (1 - cat.params['X_Li_max'])\n event8[i] = (1 - cat.params['X_Li_min']) - y[offset + cat.ptr['X_ed'][i]]\n\n event9 = np.zeros([sep.npoints])\n event10 = np.zeros([sep.npoints])\n for j in np.arange(0, sep.npoints):\n offset = an.npoints*an.nVars\n event9[j] = 1 - y[offset + sep.ptr['X_elyte']]\n event10[j] = y[offset + sep.ptr['X_elyte']]\n\n events = np.concatenate((event1, event2, event3, event4, event5, event6,\n event7, event8, event9, event10))\n\n return events\n\n \"\"\"=====================================================================\"\"\"\n\n def handle_event(self, solver, event_info):\n while True:\n self.event_switch(solver, event_info)\n self.init_mode(solver)\n\n if not True in event_info:\n break\n\n def event_switch(self, solver, event_info):\n if not all(event_info):\n solver.sw = [not solver.sw]\n\n def init_mode(self, solver):\n an.t_flag = solver.t\n if an.params['i_ext'] != 0:\n an.params['i_ext'] = 0\n cat.params['i_ext'] = 0\n" ]
[ [ "numpy.concatenate", "numpy.zeros_like", "numpy.dot", "numpy.zeros", "numpy.sum", "numpy.arange" ] ]
nayyarv/CodeANet
[ "30c8d95fff96bdca72b49de551f38e33cd59a5f6" ]
[ "tests/test_layers4.py" ]
[ "#!/usr/bin/env py.test\n# -*- coding: utf-8 -*-\n__author__ = \"Varun Nayyar <nayyarv@gmail.com>\"\n\nimport numpy as np\nimport pytest\n\nimport NN.layerversions.layers4 as layer\n\n\ndef test_fc():\n l1 = layer.FullyConnected(5, 10)\n x = np.ones((100, 5))\n y, c = l1.forward(x)\n assert y.shape == (100, 10)\n assert np.all(c == x)\n\n\ndef test_tanh():\n l = layer.Tanh()\n x = np.ones((100, 5))\n y, c = l.forward(x)\n assert y.shape == (100, 5)\n assert np.all(c == y)\n\n\n@pytest.fixture()\ndef optim():\n return layer.sgd_optimiser(0.01)\n\n\ndef test_back_fc(optim):\n l1 = layer.FullyConnected(5, 10)\n\n x = np.ones((100, 5))\n dldy = np.random.randn(100, 10)\n\n dldx = l1.backward(dldy, x, optim)\n assert dldx.shape == (100, 5)\n\n\ndef test_back_tanh():\n l1 = layer.Tanh()\n x = np.random.randn(100, 5)\n dldy = np.random.randn(100, 5)\n\n dldx = l1.backward(dldy, np.tanh(x), optim)\n assert dldx.shape == (100, 5)\n\n\ndef test_network():\n from NN.loss import MSELoss\n x = np.random.randn(100, 10)\n y = np.random.randn(100, 3)\n net = layer.Network(\n layer.FullyConnected(10, 20),\n layer.Tanh(),\n layer.FullyConnected(20, 3),\n layer.Tanh()\n )\n\n mse = MSELoss()\n\n layer.train(net, (x, y), 10)\n\n yhat, _ = net.forward(x)\n initloss = mse.loss(y, yhat)\n layer.train(net, (x, y), 10)\n yhat, _ = net.forward(x)\n finloss = mse.loss(yhat, y)\n\n assert initloss > finloss\n" ]
[ [ "numpy.all", "numpy.tanh", "numpy.ones", "numpy.random.randn" ] ]
ryflect/CS683-xcom
[ "37db4ab3bc8996780de6485a79898dd1395bddd0" ]
[ "helper.py" ]
[ "import numpy as np\nimport math\nfrom arena import Arena\nfrom agent import HAgent, AAgent\nimport random\n# np.random.seed(1234)\n\n\n# place the humans on the arena\ndef place_soldiers(n, arena, agents):\n x = 0\n y = 0\n\n for i in range(n):\n agents[i + 1] = HAgent([x, y])\n arena.arena[x, y] = 1\n y += 2\n\n return arena, agents\n\n\n# place the alien agents on the arena\ndef place_targets(n, arena, targets, pos=None):\n if pos is not None:\n orig_pos = pos\n for i in range(n):\n targets[i + 1] = AAgent(pos[i])\n arena.arena[pos[i][0], pos[i][1]] = 2\n else:\n orig_pos = []\n for i in range(n):\n while True:\n x = np.rint(np.array([(arena.size - 1) * np.random.rand(1),\n (arena.size - 1) * np.random.rand(1)]))\n if x[0] > 7 or x[1] > 7:\n break\n x = [int(i) for i in x]\n # x = [19, 19]\n targets[i + 1] = AAgent(x)\n arena.arena[x[0], x[1]] = 2\n orig_pos.append([x[0], x[1]])\n return arena, targets, orig_pos\n\n\n# adds half-cover tiles in random locations in the arena\n# At most n cover tiles added, though potentially fewer\ndef place_half_cover(n, arena):\n for i in range(n):\n x = np.random.randint(0, (arena.size - 1))\n y = np.random.randint(0, (arena.size - 1))\n if arena.arena[x, y] == 0:\n arena.arena[x, y] = 3\n return arena\n\n\n# movement for agents\ndef move(agent, arena, loc):\n # Check that agent has movement, if not, do nothing\n if agent.moves <= 0:\n # print('unsuccessful move')\n return agent, arena\n # Check if in movement range\n elif abs((loc[0] - agent.pos[0]) + (loc[1] - agent.pos[1])) <= agent.move_range:\n # print('successful move')\n # update the arena matrix\n arena.arena[agent.pos[0], agent.pos[1]] = 0\n arena.arena[loc[0], loc[1]] = 1\n # update agent location, number of moves\n agent.moves -= 1\n agent.pos = loc\n arena.time += 1\n return agent, arena\n # if not in movement range, do nothing\n else:\n # print('unsuccessful move')\n return agent, arena\n\n\n# reload action\ndef reload(agent):\n if agent.moves > 0:\n agent.moves -= 1\n agent.ammo = 5\n return agent\n\n\ndef fire(agent, arena, target):\n # for the moment, assume anything can be fired on\n # set firing agent's moves to zero\n agent.moves = 0\n agent.ammo -= 1\n cover = 0\n # check if target is in (half) cover\n if agent.pos[0] + 1 > target.pos[0]:\n if arena.arena[target.pos[0] - 1, target.pos[1]] == 3:\n cover = 20\n if agent.pos[0] - 1 < target.pos[0]:\n if arena.arena[target.pos[0] + 1, target.pos[1]] == 3:\n cover = 20\n if agent.pos[1] + 1 > target.pos[1]:\n if arena.arena[target.pos[0], target.pos[1] - 1] == 3:\n cover = 20\n if agent.pos[1] - 1 < target.pos[1]:\n if arena.arena[target.pos[0], target.pos[1] + 1] == 3:\n cover = 20\n # for distance equation, see\n # https://www.ufopaedia.org/index.php/Chance_to_Hit_(EU2012)\n\n diff = [agent.pos[0] - target.pos[0], agent.pos[1] - target.pos[1]]\n distance_chance = 42 - 4.5 * (np.linalg.norm(diff))\n # Hit chance is base aim, less cover, plus distance modifier\n to_hit = agent.aim - cover + distance_chance\n if np.random.randint(100) >= to_hit:\n # miss, so no change\n arena.time += 1\n return agent, arena, target\n else:\n flanking = 0\n crit_modifier = 1\n # check if critical\n if cover == 0:\n flanking = 50\n crit_chance = agent.base_crit + flanking\n # crit modifier in xcom is 1.5x damage\n if np.random.randint(100) < crit_chance:\n crit_modifier = 1.5\n # slight random variance from base damage, +1 to -1\n damage = math.floor(crit_modifier * (np.random.randint(-1, 2) + agent.damage))\n # apply damage and return\n target.health -= damage\n # check if damage causes death\n arena, target = check_death_enemy(arena, target)\n arena.time += 1\n return agent, arena, target\n\n\n# check to see if character is dead, update arena information if so\ndef check_death_enemy(arena, target):\n if target.health <= 0:\n target.moves = 0\n arena.arena[target.pos] = 0\n arena.targets -= 1\n if arena.targets <= 0:\n arena.targets = 0\n return arena, target\n\n\n# refresh movement for non-dead characters\ndef new_turn(arena, agents, targets):\n for i in agents:\n if i.health > 0:\n i.moves = 2\n for j in targets:\n if j.health > 0:\n j.moves = 2\n\n return arena, agents, targets\n\n\n# get a valid move\ndef get_valid_move(agent):\n x_old = agent.pos[0]\n y_old = agent.pos[1]\n # print(x_old, y_old)\n x = int(random.randint(x_old - 3, x_old + 3))\n y = int(random.randint(y_old - 3, y_old + 3))\n if x < 0:\n x = x * -1\n if y < 0:\n y = y * -1\n if x > 19:\n x = 19\n if y > 19:\n y = 19\n # print(x, y)\n return x, y\n" ]
[ [ "numpy.linalg.norm", "numpy.random.randint", "numpy.random.rand" ] ]
RogelioHiguera/Python-2.0
[ "af4e471da86d132c5bece8c93756e29a6bc6d437" ]
[ "IibreriaNumpy2.py" ]
[ "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Oct 26 20:21:07 2019\nTecnológico Nacional de México (TECNM)\nTecnológico de Estudios Superiores de Ixtapaluca (TESI)\nDivisión de ingeniería electrónica\nIntroducción a la librería Numpy 2\nM. en C. Rogelio Manuel Higuera Gonzalez\n\"\"\"\nimport numpy as np\n##################################################################################\nages = np.array([34,14,37,5,13]) #Crea un arreglo de edades\nsorted_ages = np.sort(ages) #Acomoda los elementos del arreglo ages del menor al mayor\n#ages.sort() #Acomoda los elementos del arreglo original ages del menor al mayor\nargages = ages.argsort() #Indica el indice que clasifica a cada uno de los elementos del arreglo ages (del menor al mayor)\nages1 = ages[ages.argsort()] #Crea un arreglo ages ordenado dependiendo de su indice\n##################################################################################\npersons = np.array(['Johnny','Mary','Peter','Will','Joe'])\nheights = np.array([1.76,1.2,1.68,0.5,1.25])\nsort_indices = np.argsort(ages) #Realiza una clasificación basada en edades\n#print(persons[sort_indices]) #Imprime la lista de personas clasificadas por su edad\n#print(heights[sort_indices]) #Imprime la lista de altura clasificadas por su esdad\n#print(ages[sort_indices]) #Imprime la lista de edad clasificadas por su edad\nsort_indices1 = np.argsort(persons)\n#print(persons[sort_indices1])\n#print(ages[sort_indices1])\n#print(heights[sort_indices1])\n#Para ordenar en orden desendente las estaturas usar la notación en Python [::-1]\nsort_indices2 = np.argsort(heights)[::-1]\n#print(persons[sort_indices2])\n#print(ages[sort_indices2])\n#print(heights[sort_indices2])\n##################################################################################\nlist1 = [[1,2,3,4],[5,6,7,8]]\na1 = np.array(list1)\na2 = a1\na2[0][0] = 11 #Hacer un cambio en a2 afecta a a1\na1.shape = 1,-1 #a2 tambien cambia su forma\n##################################################################################\nlist2 = [[10,11,12,13],[14,15,16,17]]\na3 = np.array(list2)\na4 = a3.view() #Copia superficial, cuando cambias la forma de a3, a4 no es afectado\na3.shape = 1,-1\n##################################################################################\nlist3 = [[20,21,22,23],[24,25,26,27]]\na5 = np.array(list3)\na6 = a5.copy() #La función copy() crea una copia profunda del arreglo \na5[0][0] = 10 #El cambio no es reflejado en a6\na5.shape = 1,-1 #a6 no cambia su forma \n\n" ]
[ [ "numpy.array", "numpy.sort", "numpy.argsort" ] ]
zijiewu3/mbuild
[ "dc6a1053ddec7b5682b0413bd5b2d2a187cd24e8" ]
[ "mbuild/coordinate_transform.py" ]
[ "\"\"\"Coordinate transformation functions.\"\"\"\nfrom warnings import simplefilter, warn\n\nsimplefilter(\"always\", DeprecationWarning)\n\nimport numpy as np\nfrom numpy.linalg import inv, norm, svd\n\nfrom mbuild.utils.exceptions import RemovedFuncError\n\n__all__ = [\n \"force_overlap\",\n \"x_axis_transform\",\n \"y_axis_transform\",\n \"z_axis_transform\",\n # Deprecated\n \"equivalence_transform\",\n \"rotate\",\n \"rotate_around_x\",\n \"rotate_around_y\",\n \"rotate_around_z\",\n \"spin\",\n \"spin_x\",\n \"spin_y\",\n \"spin_z\",\n \"translate\",\n \"translate_to\",\n]\n\n\ndef force_overlap(move_this, from_positions, to_positions, add_bond=True):\n \"\"\"Move a Compound such that a position overlaps with another.\n\n Computes an affine transformation that maps the from_positions to the\n respective to_positions, and applies this transformation to the compound.\n\n Parameters\n ----------\n move_this : mb.Compound\n The Compound to be moved.\n from_positions : np.ndarray, shape=(n, 3), dtype=float\n Original positions.\n to_positions : np.ndarray, shape=(n, 3), dtype=float\n New positions.\n add_bond : bool, optional, default=True\n If `from_positions` and `to_positions` are `Ports`, create a bond\n between the two anchor atoms.\n \"\"\"\n from mbuild.port import Port\n\n T = None\n if isinstance(from_positions, (list, tuple)) and isinstance(\n to_positions, (list, tuple)\n ):\n equivalence_pairs = zip(from_positions, to_positions)\n elif isinstance(from_positions, Port) and isinstance(to_positions, Port):\n equivalence_pairs, T = _choose_correct_port(\n from_positions, to_positions\n )\n from_positions.used = True\n to_positions.used = True\n else:\n equivalence_pairs = [(from_positions, to_positions)]\n\n if not T:\n T = _create_equivalence_transform(equivalence_pairs)\n atom_positions = move_this.xyz_with_ports\n atom_positions = T.apply_to(atom_positions)\n move_this.xyz_with_ports = atom_positions\n\n if add_bond:\n if isinstance(from_positions, Port) and isinstance(to_positions, Port):\n if not from_positions.anchor or not to_positions.anchor:\n warn(\"Attempting to form bond from port that has no anchor\")\n else:\n from_positions.anchor.parent.add_bond(\n (from_positions.anchor, to_positions.anchor)\n )\n to_positions.anchor.parent.add_bond(\n (from_positions.anchor, to_positions.anchor)\n )\n from_positions.anchor.parent.remove(from_positions)\n to_positions.anchor.parent.remove(to_positions)\n\n\nclass CoordinateTransform(object):\n \"\"\"Coordinate transforms.\"\"\"\n\n def __init__(self, T=None):\n if T is None:\n T = np.eye(4)\n\n self.T = T\n self.Tinv = inv(T)\n\n def apply_to(self, A):\n \"\"\"Apply the coordinate transformation to points in A.\"\"\"\n if A.ndim == 1:\n A = np.expand_dims(A, axis=0)\n rows, cols = A.shape\n A_new = np.hstack([A, np.ones((rows, 1))])\n\n A_new = np.transpose(self.T.dot(np.transpose(A_new)))\n return A_new[:, 0:cols]\n\n\nclass Translation(CoordinateTransform):\n \"\"\"Cartesian translation.\"\"\"\n\n def __init__(self, P):\n T = np.eye(4)\n T[0, 3] = P[0]\n T[1, 3] = P[1]\n T[2, 3] = P[2]\n super(Translation, self).__init__(T)\n\n\nclass RotationAroundZ(CoordinateTransform):\n \"\"\"Rotation around the z-axis.\"\"\"\n\n def __init__(self, theta):\n T = np.eye(4)\n T[0, 0] = np.cos(theta)\n T[0, 1] = -np.sin(theta)\n T[1, 0] = np.sin(theta)\n T[1, 1] = np.cos(theta)\n super(RotationAroundZ, self).__init__(T)\n\n\nclass RotationAroundY(CoordinateTransform):\n \"\"\"Rotation around the y-axis.\"\"\"\n\n def __init__(self, theta):\n T = np.eye(4)\n T[0, 0] = np.cos(theta)\n T[0, 2] = np.sin(theta)\n T[2, 0] = -np.sin(theta)\n T[2, 2] = np.cos(theta)\n super(RotationAroundY, self).__init__(T)\n\n\nclass RotationAroundX(CoordinateTransform):\n \"\"\"Rotation around the x-axis.\"\"\"\n\n def __init__(self, theta):\n T = np.eye(4)\n T[1, 1] = np.cos(theta)\n T[1, 2] = -np.sin(theta)\n T[2, 1] = np.sin(theta)\n T[2, 2] = np.cos(theta)\n super(RotationAroundX, self).__init__(T)\n\n\nclass Rotation(CoordinateTransform):\n \"\"\"Rotation around vector by angle theta.\"\"\"\n\n def __init__(self, theta, around):\n assert around.size == 3\n\n T = np.eye(4)\n\n s = np.sin(theta)\n c = np.cos(theta)\n t = 1 - c\n\n n = around / norm(around)\n\n x = n[0]\n y = n[1]\n z = n[2]\n m = np.array(\n [\n [t * x * x + c, t * x * y - s * z, t * x * z + s * y],\n [t * x * y + s * z, t * y * y + c, t * y * z - s * x],\n [t * x * z - s * y, t * y * z + s * x, t * z * z + c],\n ]\n )\n T[0:3, 0:3] = m\n super(Rotation, self).__init__(T)\n\n\nclass ChangeOfBasis(CoordinateTransform):\n \"\"\"Convert the basis of coordinates to another basis.\"\"\"\n\n def __init__(self, basis, origin=None):\n assert np.shape(basis) == (3, 3)\n if origin is None:\n origin = np.array([0.0, 0.0, 0.0])\n\n T = np.eye(4)\n\n T[0:3, 0:3] = basis\n T = inv(T)\n\n T[0:3, 3:4] = -np.array([origin]).transpose()\n super(ChangeOfBasis, self).__init__(T)\n\n\nclass AxisTransform(CoordinateTransform):\n \"\"\"Axis transform.\"\"\"\n\n def __init__(\n self, new_origin=None, point_on_x_axis=None, point_on_xy_plane=None\n ):\n if new_origin is None:\n new_origin = np.array([0.0, 0.0, 0.0])\n if point_on_x_axis is None:\n point_on_x_axis = np.array([1.0, 0.0, 0.0])\n if point_on_xy_plane is None:\n point_on_xy_plane = np.array([1.0, 1.0, 0.0])\n # Change the basis such that p1 is the origin, p2 is on the x axis and\n # p3 is in the xy plane.\n p1 = new_origin\n p2 = point_on_x_axis # positive x axis\n p3 = point_on_xy_plane # positive y part of the x axis\n\n # The direction vector of our new x axis.\n newx = unit_vector(p2 - p1)\n p3_u = unit_vector(p3 - p1)\n newz = unit_vector(np.cross(newx, p3_u))\n newy = np.cross(newz, newx)\n\n # Translation that moves new_origin to the origin.\n T_tr = np.eye(4)\n T_tr[0:3, 3:4] = -np.array([p1]).transpose()\n\n # Rotation that moves newx to the x axis, newy to the y axis, newz to\n # the z axis.\n B = np.eye(4)\n B[0:3, 0:3] = np.vstack((newx, newy, newz))\n\n # The concatentaion of translation and rotation.\n B_tr = np.dot(B, T_tr)\n\n super(AxisTransform, self).__init__(B_tr)\n\n\nclass RigidTransform(CoordinateTransform):\n \"\"\"Computes the rigid transformation that maps points A to points B.\n\n See http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.173.2196&rep=\n rep1&type=pdf\n\n Parameters\n ----------\n A : np.ndarray, shape=(n, 3), dtype=float\n Points in source coordinate system.\n B : np.ndarray, shape=(n, 3), dtype=float\n Points in destination coordinate system.\n \"\"\"\n\n def __init__(self, A, B):\n rows, _ = np.shape(A)\n centroid_A = np.mean(A, axis=0)\n centroid_B = np.mean(B, axis=0)\n centroid_A.shape = (1, 3)\n centroid_B.shape = (1, 3)\n\n H = np.zeros((3, 3), dtype=float)\n\n for i in range(rows):\n H = H + np.transpose(A[i, :] - centroid_A).dot(\n (B[i, :] - centroid_B)\n )\n\n U, _, V = svd(H)\n V = np.transpose(V)\n R = V.dot(np.transpose(U))\n\n C_A = np.eye(3)\n C_A = np.vstack(\n [\n np.hstack([C_A, np.transpose(centroid_A) * -1.0]),\n np.array([0, 0, 0, 1]),\n ]\n )\n\n R_new = np.vstack(\n [np.hstack([R, np.array([[0], [0], [0]])]), np.array([0, 0, 0, 1])]\n )\n\n C_B = np.eye(3)\n C_B = np.vstack(\n [np.hstack([C_B, np.transpose(centroid_B)]), np.array([0, 0, 0, 1])]\n )\n\n T = C_B.dot(R_new).dot(C_A)\n\n super(RigidTransform, self).__init__(T)\n\n\ndef unit_vector(v):\n \"\"\"Return the unit vector of the vector.\"\"\"\n return v / norm(v)\n\n\ndef angle(u, v, w=None):\n \"\"\"Return the angle in radians between two vectors.\"\"\"\n if w is not None:\n u = u - v\n v = w - v\n c = np.dot(u, v) / norm(u) / norm(v)\n return np.arccos(np.clip(c, -1, 1))\n\n\ndef _create_equivalence_transform(equiv):\n \"\"\"Compute an equivalence transformation.\n\n Transforms this compound to another compound's coordinate system.\n\n Parameters\n ----------\n equiv : np.ndarray, shape=(n, 3), dtype=float\n Array of equivalent points.\n\n Returns\n -------\n T : CoordinateTransform\n Transform that maps this point cloud to the other point cloud's\n coordinates system.\n \"\"\"\n from mbuild.compound import Compound\n\n self_points = np.array([])\n self_points.shape = (0, 3)\n other_points = np.array([])\n other_points.shape = (0, 3)\n\n for pair in equiv:\n if not isinstance(pair, tuple) or len(pair) != 2:\n raise ValueError(\"Equivalence pair not a 2-tuple\")\n if not (\n isinstance(pair[0], Compound) and isinstance(pair[1], Compound)\n ):\n raise ValueError(\n \"Equivalence pair type mismatch: pair[0] is a {0} \"\n \"and pair[1] is a {1}\".format(type(pair[0]), type(pair[1]))\n )\n\n if not pair[0].children:\n self_points = np.vstack([self_points, pair[0].pos])\n other_points = np.vstack([other_points, pair[1].pos])\n else:\n for atom0 in pair[0]._particles(include_ports=True):\n self_points = np.vstack([self_points, atom0.pos])\n for atom1 in pair[1]._particles(include_ports=True):\n other_points = np.vstack([other_points, atom1.pos])\n T = RigidTransform(self_points, other_points)\n return T\n\n\ndef equivalence_transform(\n compound, from_positions, to_positions, add_bond=True\n):\n \"\"\"Compute an affine transformation.\n\n Maps the from_positions to the respective to_positions, and applies this\n transformation to the compound.\n\n Parameters\n ----------\n compound : mb.Compound\n The Compound to be transformed.\n from_positions : np.ndarray, shape=(n, 3), dtype=float\n Original positions.\n to_positions : np.ndarray, shape=(n, 3), dtype=float\n New positions.\n \"\"\"\n warn(\n \"The `equivalence_transform` function is being phased out in favor of\"\n \" `force_overlap`.\",\n DeprecationWarning,\n )\n from mbuild.port import Port\n\n T = None\n if isinstance(from_positions, (list, tuple)) and isinstance(\n to_positions, (list, tuple)\n ):\n equivalence_pairs = zip(from_positions, to_positions)\n elif isinstance(from_positions, Port) and isinstance(to_positions, Port):\n equivalence_pairs, T = _choose_correct_port(\n from_positions, to_positions\n )\n from_positions.used = True\n to_positions.used = True\n else:\n equivalence_pairs = [(from_positions, to_positions)]\n\n if not T:\n T = _create_equivalence_transform(equivalence_pairs)\n atom_positions = compound.xyz_with_ports\n atom_positions = T.apply_to(atom_positions)\n compound.xyz_with_ports = atom_positions\n\n if add_bond:\n if isinstance(from_positions, Port) and isinstance(to_positions, Port):\n if not from_positions.anchor or not to_positions.anchor:\n warn(\"Attempting to form bond from port that has no anchor\")\n else:\n from_positions.anchor.parent.add_bond(\n (from_positions.anchor, to_positions.anchor)\n )\n to_positions.anchor.parent.add_bond(\n (from_positions.anchor, to_positions.anchor)\n )\n\n\ndef _choose_correct_port(from_port, to_port):\n \"\"\"Chooses the direction when using an equivalence transform on two Ports.\n\n Each Port object actually contains 2 sets of 4 atoms, either of which can be\n used to make a connection with an equivalence transform. This function\n chooses the set of 4 atoms that makes the anchor atoms not overlap which is\n the intended behavior for most use-cases.\n\n Parameters\n ----------\n from_port : mb.Port\n to_port : mb.Port\n\n Returns\n -------\n equivalence_pairs : tuple of Ports, shape=(2,)\n Technically, a tuple of the Ports' sub-Compounds ('up' or 'down') that\n are used to make the correct connection between components.\n \"\"\"\n # First we try matching the two 'up' ports.\n T1 = _create_equivalence_transform([(from_port[\"up\"], to_port[\"up\"])])\n new_position = T1.apply_to(np.array(from_port.anchor.pos, ndmin=2))\n\n dist_between_anchors_up_up = norm(new_position[0] - to_port.anchor.pos)\n\n # Then matching a 'down' with an 'up' port.\n T2 = _create_equivalence_transform([(from_port[\"down\"], to_port[\"up\"])])\n new_position = T2.apply_to(np.array(from_port.anchor.pos, ndmin=2))\n\n # Determine which transform places the anchors further away from each other.\n dist_between_anchors_down_up = norm(new_position[0] - to_port.anchor.pos)\n difference_between_distances = (\n dist_between_anchors_down_up - dist_between_anchors_up_up\n )\n\n if difference_between_distances > 0:\n correct_port = from_port[\"down\"]\n T = T2\n else:\n correct_port = from_port[\"up\"]\n T = T1\n return [(correct_port, to_port[\"up\"])], T\n\n\ndef translate(compound, pos):\n \"\"\"Translate a compound by a vector.\n\n Parameters\n ----------\n compound : mb.Compound\n The compound being translated.\n pos : np.ndarray, shape=(3,), dtype=float\n The vector to translate the compound by.\n \"\"\"\n raise RemovedFuncError(\n \"translate()\", \"Compound.translate()\", \"0.7.0\", \"0.11.0\"\n )\n\n\ndef translate_to(compound, pos):\n \"\"\"Translate a compound to a coordinate.\n\n Parameters\n ----------\n compound : mb.Compound\n The compound being translated.\n pos : np.ndarray, shape=(3,), dtype=float\n The coordinate to translate the compound to.\n \"\"\"\n raise RemovedFuncError(\n \"translate_to()\", \"Compound.translate_to()\", \"0.7.0\", \"0.11.0\"\n )\n\n\ndef _translate(coordinates, by):\n \"\"\"Translate a set of coordinates by a vector.\n\n Parameters\n ----------\n coordinates : np.ndarray, shape=(n,3), dtype=float\n The coordinates being translated.\n by : np.ndarray, shape=(3,), dtype=float\n The vector to translate the coordinates by.\n \"\"\"\n return Translation(by).apply_to(coordinates)\n\n\ndef _translate_to(coordinates, to):\n \"\"\"Translate a set of coordinates to a location.\n\n Parameters\n ----------\n coordinates : np.ndarray, shape=(n,3), dtype=float\n The coordinates being translated.\n to : np.ndarray, shape=(3,), dtype=float\n The new average position of the coordinates.\n \"\"\"\n coordinates -= np.mean(coordinates, axis=0)\n return Translation(to).apply_to(coordinates)\n\n\ndef _rotate(coordinates, theta, around):\n \"\"\"Rotate a set of coordinates around an arbitrary vector.\n\n Parameters\n ----------\n coordinates : np.ndarray, shape=(n,3), dtype=float\n The coordinates being rotated.\n theta : float\n The angle by which to rotate the coordinates, in radians.\n around : np.ndarray, shape=(3,), dtype=float\n The vector about which to rotate the coordinates.\n \"\"\"\n around = np.asarray(around).reshape(3)\n if np.array_equal(around, np.zeros(3)):\n raise ValueError(\"Cannot rotate around a zero vector\")\n return Rotation(theta, around).apply_to(coordinates)\n\n\ndef rotate(compound, theta, around):\n \"\"\"Rotate a compound around an arbitrary vector.\n\n Parameters\n ----------\n compound : mb.Compound\n The compound being rotated.\n theta : float\n The angle by which to rotate the compound, in radians.\n around : np.ndarray, shape=(3,), dtype=float\n The vector about which to rotate the compound.\n \"\"\"\n raise RemovedFuncError(\"rotate()\", \"Compound.rotate()\", \"0.7.0\", \"0.11.0\")\n\n\ndef rotate_around_x(compound, theta):\n \"\"\"Rotate a compound around the x axis.\n\n Parameters\n ----------\n compound : mb.Compound\n The compound being rotated.\n theta : float\n The angle by which to rotate the compound.\n \"\"\"\n raise RemovedFuncError(\n \"rotate_around_x()\", \"Compound.rotate_around_x()\", \"0.7.0\", \"0.11.0\"\n )\n\n\ndef rotate_around_y(compound, theta):\n \"\"\"Rotate a compound around the y axis.\n\n Parameters\n ----------\n compound : mb.Compound\n The compound being rotated.\n theta : float\n The angle by which to rotate the compound.\n \"\"\"\n raise RemovedFuncError(\n \"rotate_around_y()\", \"Compound.rotate_around_y()\", \"0.7.0\", \"0.11.0\"\n )\n\n\ndef rotate_around_z(compound, theta):\n \"\"\"Rotate a compound around the z axis.\n\n Parameters\n ----------\n compound : mb.Compound\n The compound being rotated.\n theta : float\n The angle by which to rotate the compound.\n \"\"\"\n raise RemovedFuncError(\n \"rotate_around_z()\", \"Compound.rotate_around_z()\", \"0.7.0\", \"0.11.0\"\n )\n\n\ndef spin(compound, theta, around):\n \"\"\"Rotate a compound in place around an arbitrary vector.\n\n Parameters\n ----------\n compound : mb.Compound\n The compound being rotated.\n theta : float\n The angle by which to rotate the compound, in radians.\n around : np.ndarray, shape=(3,), dtype=float\n The axis about which to spin the compound.\n \"\"\"\n raise RemovedFuncError(\"spin()\", \"Compound.spin()\", \"0.7.0\", \"0.11.0\")\n\n\ndef _spin(coordinates, theta, around):\n \"\"\"Rotate a set of coordinates in place around an arbitrary vector.\n\n Parameters\n ----------\n coordinates : np.ndarray, shape=(n,3), dtype=float\n The coordinates being spun.\n theta : float\n The angle by which to spin the coordinates, in radians.\n around : np.ndarray, shape=(3,), dtype=float\n The axis about which to spin the coordinates.\n \"\"\"\n around = np.asarray(around).reshape(3)\n if np.array_equal(around, np.zeros(3)):\n raise ValueError(\"Cannot spin around a zero vector\")\n center_pos = np.mean(coordinates, axis=0)\n coordinates -= center_pos\n coordinates = _rotate(coordinates, theta, around)\n coordinates += center_pos\n return coordinates\n\n\ndef spin_x(compound, theta):\n \"\"\"Rotate a compound in place around the x axis.\n\n Parameters\n ----------\n compound : mb.Compound\n The compound being rotated.\n theta : float\n The angle by which to rotate the compound.\n \"\"\"\n raise RemovedFuncError(\"spin_x()\", \"Compound.spin_x()\", \"0.7.0\", \"0.11.0\")\n\n\ndef spin_y(compound, theta):\n \"\"\"Rotate a compound in place around the y axis.\n\n Parameters\n ----------\n compound : mb.Compound\n The compound being rotated.\n theta : float\n The angle by which to rotate the compound.\n \"\"\"\n raise RemovedFuncError(\"spin_y()\", \"Compound.spin_y()\", \"0.7.0\", \"0.11.0\")\n\n\ndef spin_z(compound, theta):\n \"\"\"Rotate a compound in place around the z axis.\n\n Parameters\n ----------\n compound : mb.Compound\n The compound being rotated.\n theta : float\n The angle by which to rotate the compound.\n \"\"\"\n raise RemovedFuncError(\"spin_z()\", \"Compound.spin_z()\", \"0.7.0\", \"0.11.0\")\n\n\ndef x_axis_transform(\n compound, new_origin=None, point_on_x_axis=None, point_on_xy_plane=None\n):\n \"\"\"Move a compound such that the x-axis lies on specified points.\n\n Parameters\n ----------\n compound : mb.Compound\n The compound to move.\n new_origin : mb.Compound or list-like of size 3, default=[0.0, 0.0, 0.0]\n Where to place the new origin of the coordinate system.\n point_on_x_axis : mb.Compound or list-like of size 3, default=[1, 0, 0]\n A point on the new x-axis.\n point_on_xy_plane : mb.Compound or list-like of size 3, default=[1, 0, 0]\n A point on the new xy-plane.\n \"\"\"\n import mbuild as mb\n\n if new_origin is None:\n new_origin = np.array([0, 0, 0])\n elif isinstance(new_origin, mb.Compound):\n new_origin = new_origin.pos\n elif isinstance(new_origin, (tuple, list, np.ndarray)):\n new_origin = np.asarray(new_origin)\n else:\n raise TypeError(\n \"x_axis_transform, y_axis_transform, and z_axis_transform only \"\n \"accept mb.Compounds, list-like of length 3 or None for the \"\n f\"new_origin parameter. User passed type: {type(new_origin)}.\"\n )\n if point_on_x_axis is None:\n point_on_x_axis = np.array([1.0, 0.0, 0.0])\n elif isinstance(point_on_x_axis, mb.Compound):\n point_on_x_axis = point_on_x_axis.pos\n elif isinstance(point_on_x_axis, (list, tuple, np.ndarray)):\n point_on_x_axis = np.asarray(point_on_x_axis)\n else:\n raise TypeError(\n \"x_axis_transform, y_axis_transform, and z_axis_transform only \"\n \"accept mb.Compounds, list-like of size 3, or None for the \"\n \"point_on_x_axis parameter. User passed type: \"\n \"{}.\".format(type(point_on_x_axis))\n )\n if point_on_xy_plane is None:\n point_on_xy_plane = np.array([1.0, 1.0, 0.0])\n elif isinstance(point_on_xy_plane, mb.Compound):\n point_on_xy_plane = point_on_xy_plane.pos\n elif isinstance(point_on_xy_plane, (list, tuple, np.ndarray)):\n point_on_xy_plane = np.asarray(point_on_xy_plane)\n else:\n raise TypeError(\n \"x_axis_transform, y_axis_transform, and z_axis_transform only \"\n \"accept mb.Compounds, list-like of size 3, or None for the \"\n \"point_on_xy_plane parameter. User passed type: \"\n \"{}.\".format(type(point_on_xy_plane))\n )\n\n atom_positions = compound.xyz_with_ports\n transform = AxisTransform(\n new_origin=new_origin,\n point_on_x_axis=point_on_x_axis,\n point_on_xy_plane=point_on_xy_plane,\n )\n atom_positions = transform.apply_to(atom_positions)\n compound.xyz_with_ports = atom_positions\n\n\ndef y_axis_transform(\n compound, new_origin=None, point_on_y_axis=None, point_on_xy_plane=None\n):\n \"\"\"Move a compound such that the y-axis lies on specified points.\n\n Parameters\n ----------\n compound : mb.Compound\n The compound to move.\n new_origin : mb.Compound or like-like of size 3, default=[0, 0, 0]\n Where to place the new origin of the coordinate system.\n point_on_y_axis : mb.Compound or list-like of size 3, default=[0, 1, 0]\n A point on the new y-axis.\n point_on_xy_plane : mb.Compound or list-like of size 3, default=[0, 1, 0]\n A point on the new xy-plane.\n \"\"\"\n x_axis_transform(\n compound,\n new_origin=new_origin,\n point_on_x_axis=point_on_y_axis,\n point_on_xy_plane=point_on_xy_plane,\n )\n rotate_around_z(compound, np.pi / 2)\n\n\ndef z_axis_transform(\n compound, new_origin=None, point_on_z_axis=None, point_on_zx_plane=None\n):\n \"\"\"Move a compound such that the z-axis lies on specified points.\n\n Parameters\n ----------\n compound : mb.Compound\n The compound to move.\n new_origin : mb.Compound or list-like of size 3, default=[0, 0, 0]\n Where to place the new origin of the coordinate system.\n point_on_z_axis : mb.Compound or list-like of size 3, default=[0, 0, 1]\n A point on the new z-axis.\n point_on_zx_plane : mb.Compound or list-like of size 3, default=[0, 0, 1]\n A point on the new xz-plane.\n \"\"\"\n x_axis_transform(\n compound,\n new_origin=new_origin,\n point_on_x_axis=point_on_z_axis,\n point_on_xy_plane=point_on_zx_plane,\n )\n rotate_around_y(compound, np.pi * 3 / 2)\n" ]
[ [ "numpy.array", "numpy.linalg.norm", "numpy.sin", "numpy.dot", "numpy.zeros", "numpy.clip", "numpy.asarray", "numpy.ones", "numpy.mean", "numpy.eye", "numpy.shape", "numpy.vstack", "numpy.transpose", "numpy.cos", "numpy.linalg.svd", "numpy.expand_dims", "numpy.linalg.inv", "numpy.cross" ] ]
CompassMentis/practical_python_in_10_lines
[ "4821d5f813f99abe41bbe4fcac558114333f93eb" ]
[ "read_csv_and_plot/read_and_plot.py" ]
[ "# Data from https://www.kaggle.com/crawford/80-cereals/version/2\nimport pandas, matplotlib\ndata = pandas.read_csv('http://www.compassmentis.com/wp-content/uploads/2019/04/cereal.csv')\ndata = data.set_index('name')\ndata = data.calories.sort_values()[-10:]\nax = data.plot(kind='barh')\nax.set_xlabel('Calories per serving')\nax.set_ylabel('Cereal')\nax.set_title('Top 10 cereals by calories')\nmatplotlib.pyplot.subplots_adjust(left=0.25)\nmatplotlib.pyplot.show()\n" ]
[ [ "matplotlib.pyplot.show", "pandas.read_csv", "matplotlib.pyplot.subplots_adjust" ] ]
gaog94/GDAN_QC_CopyNumber
[ "668089f4122bfd1df51954977183bf0615a21f1f" ]
[ "scripts/AnalysisCode.py" ]
[ "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Feb 22 17:28:54 2018\n\n@author: galengao\n\nThis is the original analysis code as it exists in the environment where it was writen and initially run.\nPortions and modifications of this script constitute all other .py scripts in this directory.\n\"\"\"\nimport numpy as np\nimport pandas as pd\n\nfrom collections import Counter\n\nimport matplotlib.pyplot as plt\nimport seaborn as sns\n\n\n### Helper Function to Load in the Data ###\ndef load_data(coh, thresh=False):\n \"\"\"Load in the hg38 and hg19 gistic thresholded data. Assume GISTIC runs \n for each tumor type live in a parent directory (hg38_gistic or hg19_gistic)\n one level up from this script.\"\"\"\n if thresh:\n hg38 = '../hg38_gistic/'+coh+'/all_thresholded.by_genes.txt'\n hg19 = '../hg19_gistic/'+coh+'/all_thresholded.by_genes.txt'\n hg38drops = ['Cytoband', 'Locus ID']\n else:\n hg38 = '../hg38_gistic/'+coh+'/all_data_by_genes.txt'\n hg19 = '../hg19_gistic/'+coh+'/all_data_by_genes.txt'\n hg38drops = ['Cytoband', 'Gene ID']\n \n df_hg19 = pd.read_table(hg19, index_col=[0]).drop(['Cytoband', 'Locus ID'], axis=1)\n df_hg38 = pd.read_table(hg38, index_col=[0]).drop(hg38drops, axis=1)\n \n same_samps = list(set(df_hg38.columns) & set(df_hg19.columns))\n same_genes = list(set(df_hg38.index) & set(df_hg19.index))\n print(coh, len(same_genes), len(same_samps))\n return df_hg38[same_samps].T[same_genes], df_hg19[same_samps].T[same_genes]\n \n return df_hg38, df_hg19\n\n\n### Raw Copy Number Values Analysis Code ###\ndef raw_value_comparison(coh, plot=False):\n \"\"\"Return the average differences in raw copy number values between the\n gene-level calls in hg19 and hg38 for each gene for a given tumor type \n 'coh.' If plot=True, plot the genes' differences in a histogram.\"\"\"\n \n # load in the data\n df_38, df_19 = load_data(coh, thresh=False)\n\n # compute average sample-by-sample differences for each gene\n df_s = df_38 - df_19\n avg_diff = {g:np.average(df_s[g]) for g in df_s.columns.get_level_values('Gene Symbol')}\n \n # take note of which genes are altered more than our threshold of 4*std\n results = []\n std = np.std([avg_diff[x] for x in avg_diff])\n for g in avg_diff:\n if avg_diff[g] > 4 * std:\n results.append([coh, 'Pos', g, avg_diff[g]])\n elif avg_diff[g] < -4 * std:\n results.append([coh, 'Neg', g, avg_diff[g]])\n \n if plot:\n plt.hist([avg_diff[x] for x in avg_diff], bins=1000)\n plt.title(coh, fontsize=16)\n plt.xlabel('Average CN Difference Between Alignments', fontsize=14)\n plt.ylabel('Genes', fontsize=14)\n sns.despine()\n plt.savefig('./genehists/'+coh+'_genehist.pdf')\n plt.savefig('./genehists/'+coh+'_genehist.png')\n plt.clf()\n \n return results\n\ndef sequential_cohort_test_raw_values(cohs, plot=False):\n \"\"\"Sequentially compare raw gene-level calls for the given tumor types.\"\"\"\n c_results = []\n for coh in cohs: # perform raw value comparison for each cohort\n c_results += raw_value_comparison(coh, plot=plot)\n \n # compile results together\n df_r = pd.DataFrame(c_results, columns=['Cohort', 'Direction', 'Gene', 'Difference'])\n gcount = Counter(df_r['Gene']) \n pos_gcount = Counter(df_r[df_r['Direction']=='Pos']['Gene'])\n neg_gcount = Counter(df_r[df_r['Direction']=='Neg']['Gene'])\n df = pd.DataFrame([gcount[x] for x in gcount], index=gcount.keys(), columns=['Count'])\n df['Count_pos'] = [pos_gcount[x] if x in pos_gcount else 0 for x in gcount]\n df['Count_neg'] = [neg_gcount[x] if x in neg_gcount else 0 for x in gcount]\n\n if plot: # write output\n plt.plot(np.sort([gcount[x] for x in gcount])[::-1], 'b-')\n plt.xlabel('Gene by Rank', fontsize=16)\n plt.ylabel('Number of Occurences', fontsize=16)\n sns.despine()\n plt.savefig('GeneDevianceDropoff.pdf')\n plt.savefig('GeneDevianceDropoff.png')\n df_r.to_csv('./genehists/LargestDifferences.tsv', sep='\\t', index=False)\n df.to_csv('./genehists/LargestDifferenceGenes_ByCount.tsv', sep='\\t', index=True)\n\n\n### Thresholded Copy Number Values Analysis Code ###\ndef thresholded_value_comparison(df_hg38, df_hg19, metric='hamming'):\n \"\"\"Compare -2,-1,0,1,2 gene-level thresholded calls. metric can be either\n hamming (number of discrepancies in each gene) or manhattan (sum of \n 'distances' between each gene so a 1 to -1 change is 2). Returns a vector\n of each gene's metric.\"\"\"\n out = []\n for i, g in enumerate(df_hg38.columns):\n if metric == 'hamming':\n out.append(sum(df_hg19[g] != df_hg38[g])/len(df_hg19))\n elif metric == 'manhattan':\n out.append(sum(abs((df_hg19[g] - df_hg38[g]))))\n return pd.DataFrame(out, index=df_hg38.columns)\n\ndef sequential_cohort_test_thresholded_values(cohs):\n \"\"\"Compare thresholded gene-level calls for input tumor types.\"\"\"\n df_out = pd.DataFrame([])\n for coh in cohs:\n df_hg38, df_hg19 = load_data(coh, thresh=True)\n df_results = thresholded_value_comparison(df_hg38, df_hg19, metric='hamming')\n df_results.columns = [coh]\n df_out = df_out.join(df_results, how='outer')\n \n df_out.to_csv('../readout/DiscordantSampleFractions_perGene_perCohort_thresholdedCalls.tsv', sep='\\t')\n return df_out\n\ndef plot_fractionDisagreements_perCohort(cohs):\n \"\"\"Visualize fraction of samples with disagreements in thresholded copy \n number for each gene. Run sequential_cohort_test_thresholded_values()\n before this function.\"\"\"\n # Read in data written by sequential_cohort_test_thresholded_values\n df = sequential_cohort_test_thresholded_values(cohs)\n df_box = pd.melt(df.reset_index(), id_vars='Gene Symbol').set_index('Gene Symbol')\n df_box.columns = ['Tumor Type', 'Fraction of Samples with Disagreements']\n dft = df.T\n dft['med_degenerates'] = df.median(axis=0)\n boxorder = dft.sort_values('med_degenerates', axis=0).index\n\n # read in copy number burden data (requires aneuploidy RecurrentSCNA calls)\n df_cn = pd.read_table('../../PanCanAneuploidy/bin/PANCAN_armonly_ASandpuritycalls_092817_xcellcalls.txt', index_col=0, usecols=[0,1,2,16])\n coh_medians = [int(np.median(df_cn[df_cn['Type']==x]['RecurrentSCNA'].dropna())) for x in df_cn.Type.unique()]\n df_med = pd.DataFrame(coh_medians, index=df_cn.Type.unique(), columns=['med'])\n\n # plot it out\n pal = sns.color_palette('Blues', max(df_med.med)-min(df_med.med)+1)\n my_pal = {c: pal[df_med.at[c,'med']] for c in df_med.index}\n g = sns.boxplot(x=df_box.columns[0], y=df_box.columns[1], data=df_box, \\\n order=boxorder, fliersize=1, palette=my_pal, linewidth=0.5)\n newxticks = [x+' ('+str(df_med.loc[x]['med'])+')' for x in boxorder]\n g.set_xticklabels(newxticks, rotation=90)\n plt.ylabel('Fraction with Disagreements', fontsize=12)\n sns.despine()\n plt.gcf().set_size_inches((8,3))\n plt.savefig('2_thresholdedCN_boxplot.pdf', bbox_inches='tight')\n plt.savefig('2_thresholdedCN_boxplot.png', bbox_inches='tight')\n\n\n### Significantly Altered Focal Peaks Analysis Code ###\ndef peakgene_overlaps(combos, same_genes, normalize=False):\n \"\"\"Count the number of genes that overlap when examing the hg19 & hg38 \n GISTIC runs' focal peaks.\"\"\"\n venn_numbers, gsu, gsi = [], [], []\n for coh, ad in combos:\n print(coh)\n # put all significant genes in a list\n fnames = ['../hg19_gistic/'+coh+ad+'genes.conf_99.txt', '../hg38_gistic/'+coh+ad+'genes.txt']\n df38 = pd.read_table(fnames[0], index_col=0).drop(['q value','residual q value','wide peak boundaries'])\n df19 = pd.read_table(fnames[1], index_col=0).drop(['q value','residual q value','wide peak boundaries'])\n g_38 = set([x for col in df38.columns for x in df38[col].dropna()]) & same_genes\n g_19 = set([x for col in df19.columns for x in df19[col].dropna()]) & same_genes\n intersect, union = g_38 & g_19, g_38 | g_19\n gsu.append(union)\n gsi.append(intersect)\n if normalize:\n venn_numbers.append([len(g_19-intersect)/len(union),len(intersect)/len(union), len(g_38-intersect)/len(union)])\n else:\n venn_numbers.append([len(g_19-intersect),len(intersect), len(g_38-intersect)])\n\n index = [x[0]+'_'+x[1][1:-1] for x in combos]\n return pd.DataFrame(venn_numbers, index=index, columns=['hg19 only','Intersection','hg38 only'])\n\ndef plot_peakgene_overlaps(combos, same_genes, write=False):\n \"\"\"Visualize the results of peakgene_overlaps function in bargraph form.\"\"\"\n df_out = peakgene_overlaps(combos, same_genes, normalize=False)\n df_d, df_a = df_out[df_out.index.str.split('_').str[-1] == 'del'], \\\n df_out[df_out.index.str.split('_').str[-1] == 'amp']\n for x in zip((df_d, df_a), ('Deletion Peak Memberships', 'Amplification Peak Memberships')):\n x[0].index = x[0].index.str.split('_').str[0]\n x[0].plot.bar(stacked=True, color=['#af8dc3', '#f7f7f7', '#7fbf7b'], linewidth=1, edgecolor='k')\n plt.gca().set_xticklabels(x[0].index, rotation=90)\n plt.title(x[1], fontsize=18)\n plt.gcf().set_size_inches(10,8)\n sns.despine()\n plt.savefig(x[1].split(' ')[0]+'_peakMemberships.pdf', bbox_inches='tight')\n plt.savefig(x[1].split(' ')[0]+'_peakMemberships.png', bbox_inches='tight')\n plt.clf()\n if write:\n df_out.to_csv('VennStats_focalpeaks.tsv', sep='\\t')\n\n\n### Conservation of Significant Copy Number Driver Events Analysis Code ###\ndef documented_driver_differences():\n \"\"\"Scan and analyze manually currated DocumentedDriverDifferences.txt file.\n Returns: 1) Number of driver genes called in both hg19 & hg38 GISTIC peaks\n 2) Number of drivers missing in hg38 peaks that appeared in hg19 peaks and\n 3) Number of drivers present in hg38 peaks but absent from hg19 peaks.\"\"\"\n # read in table of documented driver differences\n # (this table needs a manual curation to be generated)\n df = pd.read_table('../DocumentedDriverDifferences.txt', index_col=0)\n # process entries to have just yes/no calls (without parens & brackets)\n df['hg19?'] = df['present in hg19?'].str.strip(')').str.strip('(').str.strip('[').str.strip(']')\n df['hg38?'] = df['present in hg38?'].str.strip(')').str.strip('(').str.strip('[').str.strip(']')\n\n # number of documented drivers that match in hg19 & hg38\n matches = sum(df['hg19?'] == df['hg38?'])\n # number of documented drivers that are in hg19 but not hg38 & vice versa\n lostdrivers = len(df[(df['hg19?'] == 'yes') & (df['hg38?'] == 'no')])\n recovereddrivers = len(df[(df['hg19?'] == 'no') & (df['hg38?'] == 'yes')])\n \n # Return in order\n return matches, lostdrivers, recovereddrivers\n\n\n\n# set up the tumor types we want to analyze\ncohs = ['ACC','BLCA','CESC','CHOL','COAD','DLBC','ESCA','GBM', 'HNSC','KICH',\\\n 'KIRC','KIRP','LAML','LGG','LIHC','LUAD','LUSC','OV','PAAD','PCPG',\\\n 'PRAD','READ','SARC','SKCM','STAD','TGCT','THCA','THYM','UCEC','UCS','UVM'] \nads = ['/amp_', '/del_']\ncombos = [(c, a) for c in cohs for a in ads]\n\n# grab list of genes present in both hg19 & hg38\ndf_hg38 = pd.read_table('../hg38_gistic/CHOL/all_thresholded.by_genes.txt', index_col=0, usecols=[0,1])\ndf_hg19 = pd.read_table('../hg19_gistic/CHOL/all_thresholded.by_genes.txt', index_col=0, usecols=[0,1])\nsame_genes = set(df_hg38.index) & set(df_hg19.index)\n\n\n# action lines -- run the analysis\nsequential_cohort_test_raw_values(cohs, plot=True)\nplot_fractionDisagreements_perCohort(cohs)\nplot_peakgene_overlaps(combos, same_genes, write=True)\nprint(documented_driver_differences())\n" ]
[ [ "pandas.read_table", "matplotlib.pyplot.savefig", "pandas.DataFrame", "matplotlib.pyplot.title", "matplotlib.pyplot.xlabel", "numpy.std", "matplotlib.pyplot.hist", "matplotlib.pyplot.gcf", "matplotlib.pyplot.ylabel", "numpy.sort", "numpy.average", "matplotlib.pyplot.clf", "matplotlib.pyplot.gca" ] ]
shanky97/tensorflow
[ "0f2192d6439bf6826d71f2ca46dbe44d585883af" ]
[ "tensorflow/python/ops/math_ops.py" ]
[ "# Copyright 2015 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Basic arithmetic operators.\n\nSee the [python/math_ops](python/math_ops) guide.\n\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport numpy as np\nfrom six.moves import xrange # pylint: disable=redefined-builtin\n\nfrom tensorflow.python.eager import context\nfrom tensorflow.python.framework import common_shapes\nfrom tensorflow.python.framework import constant_op\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.framework import graph_util\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.framework import sparse_tensor\nfrom tensorflow.python.framework import tensor_shape\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.ops import gen_data_flow_ops\nfrom tensorflow.python.ops import gen_math_ops\nfrom tensorflow.python.ops import gen_nn_ops\nfrom tensorflow.python.ops import gen_sparse_ops\n# go/tf-wildcard-import\n# pylint: disable=wildcard-import\nfrom tensorflow.python.ops.gen_math_ops import *\n# pylint: enable=wildcard-import\nfrom tensorflow.python.platform import tf_logging as logging\nfrom tensorflow.python.util import compat\nfrom tensorflow.python.util import deprecation\nfrom tensorflow.python.util import dispatch\nfrom tensorflow.python.util import nest\nfrom tensorflow.python.util.tf_export import tf_export\n\n# Aliases for some automatically-generated names.\nlinspace = gen_math_ops.lin_space\nnextafter = gen_math_ops.next_after\n\narg_max = deprecation.deprecated(None, \"Use `tf.math.argmax` instead\")(arg_max) # pylint: disable=used-before-assignment\narg_min = deprecation.deprecated(None, \"Use `tf.math.argmin` instead\")(arg_min) # pylint: disable=used-before-assignment\ntf_export(v1=[\"arg_max\"])(arg_max)\ntf_export(v1=[\"arg_min\"])(arg_min)\n\n# This is set by resource_variable_ops.py. It is included in this way since\n# there is a circular dependency between math_ops and resource_variable_ops\n_resource_variable_type = None\n\n\ndef _set_doc(doc):\n\n def _decorator(func):\n func.__doc__ = doc\n return func\n\n return _decorator\n\n\n# pylint: disable=redefined-builtin\n@tf_export(v1=[\"math.argmax\", \"argmax\"])\n@deprecation.deprecated_args(None, \"Use the `axis` argument instead\",\n \"dimension\")\n@_set_doc(\n gen_math_ops.arg_max.__doc__.replace(\"dimensions\", \"axes\").replace(\n \"dimension\", \"axis\"))\ndef argmax(input,\n axis=None,\n name=None,\n dimension=None,\n output_type=dtypes.int64):\n axis = deprecation.deprecated_argument_lookup(\n \"axis\", axis, \"dimension\", dimension)\n return argmax_v2(input, axis, output_type, name)\n\n\n@tf_export(\"math.argmax\", \"argmax\", v1=[])\ndef argmax_v2(input,\n axis=None,\n output_type=dtypes.int64,\n name=None):\n \"\"\"Returns the index with the largest value across axes of a tensor.\n\n Note that in case of ties the identity of the return value is not guaranteed.\n\n Args:\n input: A `Tensor`. Must be one of the following types: `float32`, `float64`,\n `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`,\n `qint32`, `bfloat16`, `uint16`, `complex128`, `half`, `uint32`, `uint64`.\n axis: A `Tensor`. Must be one of the following types: `int32`, `int64`.\n int32 or int64, must be in the range `-rank(input), rank(input))`.\n Describes which axis of the input Tensor to reduce across. For vectors,\n use axis = 0.\n output_type: An optional `tf.DType` from: `tf.int32, tf.int64`.\n Defaults to `tf.int64`.\n name: A name for the operation (optional).\n\n Returns:\n A `Tensor` of type `output_type`.\n \"\"\"\n if axis is None:\n axis = 0\n return gen_math_ops.arg_max(input, axis, name=name, output_type=output_type)\n\n\n@tf_export(v1=[\"math.argmin\", \"argmin\"])\n@deprecation.deprecated_args(None, \"Use the `axis` argument instead\",\n \"dimension\")\n@_set_doc(\n gen_math_ops.arg_min.__doc__.replace(\"dimensions\", \"axes\").replace(\n \"dimension\", \"axis\"))\ndef argmin(input,\n axis=None,\n name=None,\n dimension=None,\n output_type=dtypes.int64):\n axis = deprecation.deprecated_argument_lookup(\n \"axis\", axis, \"dimension\", dimension)\n return argmin_v2(input, axis, output_type, name)\n\n\n@tf_export(\"math.argmin\", \"argmin\", v1=[])\ndef argmin_v2(input,\n axis=None,\n output_type=dtypes.int64,\n name=None):\n \"\"\"Returns the index with the smallest value across axes of a tensor.\n\n Note that in case of ties the identity of the return value is not guaranteed.\n\n Args:\n input: A `Tensor`. Must be one of the following types: `float32`, `float64`,\n `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`,\n `qint32`, `bfloat16`, `uint16`, `complex128`, `half`, `uint32`, `uint64`.\n axis: A `Tensor`. Must be one of the following types: `int32`, `int64`.\n int32 or int64, must be in the range `-rank(input), rank(input))`.\n Describes which axis of the input Tensor to reduce across. For vectors,\n use axis = 0.\n output_type: An optional `tf.DType` from: `tf.int32, tf.int64`.\n Defaults to `tf.int64`.\n name: A name for the operation (optional).\n\n Returns:\n A `Tensor` of type `output_type`.\n \"\"\"\n if axis is None:\n axis = 0\n return gen_math_ops.arg_min(input, axis, name=name, output_type=output_type)\n\n\n# pylint: enable=redefined-builtin\n\n\n# pylint: disable=anomalous-backslash-in-string,protected-access\n# pylint: disable=g-docstring-has-escape\n@tf_export(\"math.abs\", \"abs\")\n@dispatch.add_dispatch_support\ndef abs(x, name=None): # pylint: disable=redefined-builtin\n r\"\"\"Computes the absolute value of a tensor.\n\n Given a tensor `x` of complex numbers, this operation returns a tensor of type\n `float32` or `float64` that is the absolute value of each element in `x`. All\n elements in `x` must be complex numbers of the form \\\\(a + bj\\\\). The\n absolute value is computed as \\\\( \\sqrt{a^2 + b^2}\\\\). For example:\n ```python\n x = tf.constant([[-2.25 + 4.75j], [-3.25 + 5.75j]])\n tf.abs(x) # [5.25594902, 6.60492229]\n ```\n\n Args:\n x: A `Tensor` or `SparseTensor` of type `float16`, `float32`, `float64`,\n `int32`, `int64`, `complex64` or `complex128`.\n name: A name for the operation (optional).\n\n Returns:\n A `Tensor` or `SparseTensor` the same size and type as `x` with absolute\n values.\n Note, for `complex64` or `complex128` input, the returned `Tensor` will be\n of type `float32` or `float64`, respectively.\n \"\"\"\n with ops.name_scope(name, \"Abs\", [x]) as name:\n x = ops.convert_to_tensor(x, name=\"x\")\n if x.dtype.is_complex:\n return gen_math_ops.complex_abs(x, Tout=x.dtype.real_dtype, name=name)\n return gen_math_ops._abs(x, name=name)\n# pylint: enable=g-docstring-has-escape\n\n\n# pylint: disable=redefined-builtin\ndef _bucketize(input, boundaries, name=None):\n return gen_math_ops.bucketize(input=input, boundaries=boundaries, name=name)\n\n\n# pylint: enable=redefined-builtin\n\n\nclass DivideDelegateWithName(object):\n \"\"\"Use Python2/Python3 division delegation to implement divide for tensors.\"\"\"\n\n def __init__(self, x, name):\n \"\"\"Construct DivideDelegateWithName.\n\n Args:\n x: Tensor to use as left operand in operator overloads\n name: The name that is preferred for the op created.\n \"\"\"\n self.x = x\n self.name = name\n\n def __truediv__(self, y):\n return _truediv_python3(self.x, y, self.name)\n\n def __floordiv__(self, y):\n return floordiv(self.x, y, self.name)\n\n def __div__(self, y):\n return _div_python2(self.x, y, self.name)\n\n\n@tf_export(\"math.divide\", \"divide\")\n@dispatch.add_dispatch_support\ndef divide(x, y, name=None):\n \"\"\"Computes Python style division of `x` by `y`.\"\"\"\n\n if name is not None:\n # Cannot use tensors operator overload, because it has no way to track\n # override names. Use a dummy class to track the runtime division behavior\n return DivideDelegateWithName(x, name) / y\n else:\n return x / y\n\n\n@tf_export(\"math.multiply\", \"multiply\")\n@dispatch.add_dispatch_support\ndef multiply(x, y, name=None):\n return gen_math_ops.mul(x, y, name)\n\n\nmultiply.__doc__ = gen_math_ops.mul.__doc__.replace(\"Multiply\", \"`tf.multiply`\")\n\n\n# TODO(aselle): put deprecation in after another round of global code changes\n@deprecation.deprecated(\n \"2016-12-30\",\n \"`tf.mul(x, y)` is deprecated, please use `tf.multiply(x, y)` or `x * y`\")\ndef _mul(x, y, name=None):\n return gen_math_ops.mul(x, y, name)\n\n\n_mul.__doc__ = (\n gen_math_ops.mul.__doc__ + (\"\" if _mul.__doc__ is None else _mul.__doc__))\n\n\n@tf_export(\"math.subtract\", \"subtract\")\n@dispatch.add_dispatch_support\ndef subtract(x, y, name=None):\n return gen_math_ops.sub(x, y, name)\n\n\nsubtract.__doc__ = gen_math_ops.sub.__doc__.replace(\"`Sub`\", \"`tf.subtract`\")\n\n\n# TODO(aselle): put deprecation in after another round of global code changes\n@deprecation.deprecated(\n \"2016-12-30\",\n \"`tf.sub(x, y)` is deprecated, please use `tf.subtract(x, y)` or `x - y`\")\ndef _sub(x, y, name=None):\n return gen_math_ops.sub(x, y, name)\n\n\n_sub.__doc__ = (\n gen_math_ops.sub.__doc__ + (\"\" if _sub.__doc__ is None else _sub.__doc__))\n\n\nnegative = gen_math_ops.neg\n\n\n# pylint: disable=g-docstring-has-escape\n@deprecation.deprecated(\n \"2016-12-30\",\n \"`tf.neg(x)` is deprecated, please use `tf.negative(x)` or `-x`\")\ndef _neg(x, name=None):\n \"\"\"Computes numerical negative value element-wise.\n\n I.e., \\\\(y = -x\\\\).\n\n Args:\n x: A `Tensor` or `SparseTensor`. Must be one of the following types: `half`,\n `float32`, `float64`, `int32`, `int64`, `complex64`, `complex128`.\n name: A name for the operation (optional).\n\n Returns:\n A `Tensor` or `SparseTensor`, respectively. Has the same type as `x`.\n \"\"\"\n return negative(x, name)\n\n\n# pylint: enable=g-docstring-has-escape\n\n\n@tf_export(v1=[\"math.scalar_mul\", \"scalar_mul\"])\ndef scalar_mul(scalar, x, name=None):\n \"\"\"Multiplies a scalar times a `Tensor` or `IndexedSlices` object.\n\n Intended for use in gradient code which might deal with `IndexedSlices`\n objects, which are easy to multiply by a scalar but more expensive to\n multiply with arbitrary tensors.\n\n Args:\n scalar: A 0-D scalar `Tensor`. Must have known shape.\n x: A `Tensor` or `IndexedSlices` to be scaled.\n name: A name for the operation (optional).\n\n Returns:\n `scalar * x` of the same type (`Tensor` or `IndexedSlices`) as `x`.\n\n Raises:\n ValueError: if scalar is not a 0-D `scalar`.\n \"\"\"\n scalar = ops.convert_to_tensor(\n scalar, dtype=x.dtype.base_dtype, name=\"scalar\")\n shape = scalar.get_shape()\n if shape.ndims == 0:\n if isinstance(x, ops.IndexedSlices):\n return ops.IndexedSlices(gen_math_ops.mul(scalar, x.values, name),\n x.indices, x.dense_shape)\n else:\n return gen_math_ops.mul(scalar, x, name)\n else:\n raise ValueError(\"Only scalar multiply works, got shape %s\" % shape)\n\n\n@tf_export(\"math.scalar_mul\", \"scalar_mul\", v1=[])\n@_set_doc(scalar_mul.__doc__)\ndef scalar_mul_v2(scalar, x, name=None):\n with ops.name_scope(name, \"scalar_mul\", [x]) as name:\n return scalar_mul(scalar, x, name)\n\n\n@tf_export(\"math.pow\", \"pow\")\n@dispatch.add_dispatch_support\ndef pow(x, y, name=None): # pylint: disable=redefined-builtin\n r\"\"\"Computes the power of one value to another.\n\n Given a tensor `x` and a tensor `y`, this operation computes \\\\(x^y\\\\) for\n corresponding elements in `x` and `y`. For example:\n\n ```python\n x = tf.constant([[2, 2], [3, 3]])\n y = tf.constant([[8, 16], [2, 3]])\n tf.pow(x, y) # [[256, 65536], [9, 27]]\n ```\n\n Args:\n x: A `Tensor` of type `float16`, `float32`, `float64`, `int32`, `int64`,\n `complex64`, or `complex128`.\n y: A `Tensor` of type `float16`, `float32`, `float64`, `int32`, `int64`,\n `complex64`, or `complex128`.\n name: A name for the operation (optional).\n\n Returns:\n A `Tensor`.\n \"\"\"\n with ops.name_scope(name, \"Pow\", [x]) as name:\n return gen_math_ops._pow(x, y, name=name)\n\n\n# pylint: disable=redefined-builtin,redefined-outer-name\n@tf_export(\"dtypes.complex\", \"complex\")\n@dispatch.add_dispatch_support\ndef complex(real, imag, name=None):\n r\"\"\"Converts two real numbers to a complex number.\n\n Given a tensor `real` representing the real part of a complex number, and a\n tensor `imag` representing the imaginary part of a complex number, this\n operation returns complex numbers elementwise of the form \\\\(a + bj\\\\), where\n *a* represents the `real` part and *b* represents the `imag` part.\n\n The input tensors `real` and `imag` must have the same shape.\n\n For example:\n\n ```python\n real = tf.constant([2.25, 3.25])\n imag = tf.constant([4.75, 5.75])\n tf.complex(real, imag) # [[2.25 + 4.75j], [3.25 + 5.75j]]\n ```\n\n Args:\n real: A `Tensor`. Must be one of the following types: `float32`,\n `float64`.\n imag: A `Tensor`. Must have the same type as `real`.\n name: A name for the operation (optional).\n\n Returns:\n A `Tensor` of type `complex64` or `complex128`.\n \"\"\"\n real = ops.convert_to_tensor(real, name=\"real\")\n imag = ops.convert_to_tensor(imag, name=\"imag\")\n with ops.name_scope(name, \"Complex\", [real, imag]) as name:\n input_types = (real.dtype, imag.dtype)\n if input_types == (dtypes.float64, dtypes.float64):\n Tout = dtypes.complex128\n elif input_types == (dtypes.float32, dtypes.float32):\n Tout = dtypes.complex64\n else:\n raise TypeError(\"real and imag have incorrect types: \"\n \"{} {}\".format(real.dtype.name, imag.dtype.name))\n return gen_math_ops._complex(real, imag, Tout=Tout, name=name)\n\n\n@tf_export(\"math.real\", v1=[\"math.real\", \"real\"])\n@deprecation.deprecated_endpoints(\"real\")\n@dispatch.add_dispatch_support\ndef real(input, name=None):\n r\"\"\"Returns the real part of a complex (or real) tensor.\n\n Given a tensor `input`, this operation returns a tensor of type `float` that\n is the real part of each element in `input` considered as a complex number.\n\n For example:\n\n ```python\n x = tf.constant([-2.25 + 4.75j, 3.25 + 5.75j])\n tf.real(x) # [-2.25, 3.25]\n ```\n\n If `input` is already real, it is returned unchanged.\n\n Args:\n input: A `Tensor`. Must have numeric type.\n name: A name for the operation (optional).\n\n Returns:\n A `Tensor` of type `float32` or `float64`.\n \"\"\"\n with ops.name_scope(name, \"Real\", [input]) as name:\n if input.dtype.is_complex:\n real_dtype = input.dtype.real_dtype\n return gen_math_ops.real(input, Tout=real_dtype, name=name)\n else:\n return input\n\n\n@tf_export(\"math.imag\", v1=[\"math.imag\", \"imag\"])\n@deprecation.deprecated_endpoints(\"imag\")\n@dispatch.add_dispatch_support\ndef imag(input, name=None):\n r\"\"\"Returns the imaginary part of a complex (or real) tensor.\n\n Given a tensor `input`, this operation returns a tensor of type `float` that\n is the imaginary part of each element in `input` considered as a complex\n number. If `input` is real, a tensor of all zeros is returned.\n\n For example:\n\n ```python\n x = tf.constant([-2.25 + 4.75j, 3.25 + 5.75j])\n tf.imag(x) # [4.75, 5.75]\n ```\n\n Args:\n input: A `Tensor`. Must be one of the following types: `float`, `double`,\n `complex64`, `complex128`.\n name: A name for the operation (optional).\n\n Returns:\n A `Tensor` of type `float32` or `float64`.\n \"\"\"\n with ops.name_scope(name, \"Imag\", [input]) as name:\n if input.dtype.is_complex:\n return gen_math_ops.imag(input, Tout=input.dtype.real_dtype, name=name)\n else:\n return array_ops.zeros_like(input)\n\n\n@tf_export(\"math.angle\", v1=[\"math.angle\", \"angle\"])\n@deprecation.deprecated_endpoints(\"angle\")\n@dispatch.add_dispatch_support\ndef angle(input, name=None):\n r\"\"\"Returns the element-wise argument of a complex (or real) tensor.\n\n Given a tensor `input`, this operation returns a tensor of type `float` that\n is the argument of each element in `input` considered as a complex number.\n\n The elements in `input` are considered to be complex numbers of the form\n \\\\(a + bj\\\\), where *a* is the real part and *b* is the imaginary part.\n If `input` is real then *b* is zero by definition.\n\n The argument returned by this function is of the form \\\\(atan2(b, a)\\\\).\n If `input` is real, a tensor of all zeros is returned.\n\n For example:\n\n ```\n # tensor 'input' is [-2.25 + 4.75j, 3.25 + 5.75j]\n tf.angle(input) ==> [2.0132, 1.056]\n ```\n\n Args:\n input: A `Tensor`. Must be one of the following types: `float`, `double`,\n `complex64`, `complex128`.\n name: A name for the operation (optional).\n\n Returns:\n A `Tensor` of type `float32` or `float64`.\n \"\"\"\n with ops.name_scope(name, \"Angle\", [input]) as name:\n if input.dtype.is_complex:\n return gen_math_ops.angle(input, Tout=input.dtype.real_dtype, name=name)\n else:\n return array_ops.zeros_like(input)\n\n\n# pylint: enable=redefined-outer-name,redefined-builtin\n\n\n@tf_export(\"math.round\", \"round\")\n@dispatch.add_dispatch_support\ndef round(x, name=None): # pylint: disable=redefined-builtin\n \"\"\"Rounds the values of a tensor to the nearest integer, element-wise.\n\n Rounds half to even. Also known as bankers rounding. If you want to round\n according to the current system rounding mode use tf::cint.\n For example:\n\n ```python\n x = tf.constant([0.9, 2.5, 2.3, 1.5, -4.5])\n tf.round(x) # [ 1.0, 2.0, 2.0, 2.0, -4.0 ]\n ```\n\n Args:\n x: A `Tensor` of type `float16`, `float32`, `float64`, `int32`, or `int64`.\n name: A name for the operation (optional).\n\n Returns:\n A `Tensor` of same shape and type as `x`.\n \"\"\"\n x = ops.convert_to_tensor(x, name=\"x\")\n if x.dtype.is_integer:\n return x\n else:\n return gen_math_ops.round(x, name=name)\n\n\n@tf_export(\"dtypes.cast\", \"cast\")\n@dispatch.add_dispatch_support\ndef cast(x, dtype, name=None):\n \"\"\"Casts a tensor to a new type.\n\n The operation casts `x` (in case of `Tensor`) or `x.values`\n (in case of `SparseTensor` or `IndexedSlices`) to `dtype`.\n\n For example:\n\n ```python\n x = tf.constant([1.8, 2.2], dtype=tf.float32)\n tf.cast(x, tf.int32) # [1, 2], dtype=tf.int32\n ```\n\n The operation supports data types (for `x` and `dtype`) of\n `uint8`, `uint16`, `uint32`, `uint64`, `int8`, `int16`, `int32`, `int64`,\n `float16`, `float32`, `float64`, `complex64`, `complex128`, `bfloat16`.\n In case of casting from complex types (`complex64`, `complex128`) to real\n types, only the real part of `x` is returned. In case of casting from real\n types to complex types (`complex64`, `complex128`), the imaginary part of the\n returned value is set to `0`. The handling of complex types here matches the\n behavior of numpy.\n\n Args:\n x: A `Tensor` or `SparseTensor` or `IndexedSlices` of numeric type. It could\n be `uint8`, `uint16`, `uint32`, `uint64`, `int8`, `int16`, `int32`,\n `int64`, `float16`, `float32`, `float64`, `complex64`, `complex128`,\n `bfloat16`.\n dtype: The destination type. The list of supported dtypes is the same as\n `x`.\n name: A name for the operation (optional).\n\n Returns:\n A `Tensor` or `SparseTensor` or `IndexedSlices` with same shape as `x` and\n same type as `dtype`.\n\n Raises:\n TypeError: If `x` cannot be cast to the `dtype`.\n \"\"\"\n base_type = dtypes.as_dtype(dtype).base_dtype\n if isinstance(x,\n (ops.Tensor, _resource_variable_type)) and base_type == x.dtype:\n return x\n with ops.name_scope(name, \"Cast\", [x]) as name:\n if isinstance(x, sparse_tensor.SparseTensor):\n values_cast = cast(x.values, base_type, name=name)\n x = sparse_tensor.SparseTensor(x.indices, values_cast, x.dense_shape)\n elif isinstance(x, ops.IndexedSlices):\n values_cast = cast(x.values, base_type, name=name)\n x = ops.IndexedSlices(values_cast, x.indices, x.dense_shape)\n else:\n # TODO(josh11b): If x is not already a Tensor, we could return\n # ops.convert_to_tensor(x, dtype=dtype, ...) here, but that\n # allows some conversions that cast() can't do, e.g. casting numbers to\n # strings.\n x = ops.convert_to_tensor(x, name=\"x\")\n if x.dtype.base_dtype != base_type:\n x = gen_math_ops.cast(x, base_type, name=name)\n if x.dtype.is_complex and base_type.is_floating:\n logging.warn(\"Casting complex to real discards imaginary part.\")\n return x\n\n\n@tf_export(\"dtypes.saturate_cast\", \"saturate_cast\")\n@dispatch.add_dispatch_support\ndef saturate_cast(value, dtype, name=None):\n \"\"\"Performs a safe saturating cast of `value` to `dtype`.\n\n This function casts the input to `dtype` without applying any scaling. If\n there is a danger that values would over or underflow in the cast, this op\n applies the appropriate clamping before the cast.\n\n Args:\n value: A `Tensor`.\n dtype: The desired output `DType`.\n name: A name for the operation (optional).\n\n Returns:\n `value` safely cast to `dtype`.\n \"\"\"\n # When casting to a type with smaller representable range, clamp.\n # Note that this covers casting to unsigned types as well.\n with ops.name_scope(name, \"saturate_cast\", [value]) as name:\n value = ops.convert_to_tensor(value, name=\"value\")\n dtype = dtypes.as_dtype(dtype).base_dtype\n if value.dtype.min < dtype.min:\n value = gen_math_ops.maximum(value,\n ops.convert_to_tensor(\n dtype.min, dtype=value.dtype,\n name=\"min\"))\n if value.dtype.max > dtype.max:\n value = gen_math_ops.minimum(value,\n ops.convert_to_tensor(\n dtype.max, dtype=value.dtype,\n name=\"max\"))\n return cast(value, dtype, name=name)\n\n@deprecation.deprecated(date=None, instructions=\"Use tf.cast instead.\")\n@tf_export(v1=[\"to_float\"])\ndef to_float(x, name=\"ToFloat\"):\n \"\"\"Casts a tensor to type `float32`.\n\n Args:\n x: A `Tensor` or `SparseTensor` or `IndexedSlices`.\n name: A name for the operation (optional).\n\n Returns:\n A `Tensor` or `SparseTensor` or `IndexedSlices` with same shape as `x` with\n type `float32`.\n\n Raises:\n TypeError: If `x` cannot be cast to the `float32`.\n \"\"\"\n return cast(x, dtypes.float32, name=name)\n\n\n@deprecation.deprecated(date=None, instructions=\"Use tf.cast instead.\")\n@tf_export(v1=[\"to_double\"])\ndef to_double(x, name=\"ToDouble\"):\n \"\"\"Casts a tensor to type `float64`.\n\n Args:\n x: A `Tensor` or `SparseTensor` or `IndexedSlices`.\n name: A name for the operation (optional).\n\n Returns:\n A `Tensor` or `SparseTensor` or `IndexedSlices` with same shape as `x` with\n type `float64`.\n\n Raises:\n TypeError: If `x` cannot be cast to the `float64`.\n \"\"\"\n return cast(x, dtypes.float64, name=name)\n\n\n@deprecation.deprecated(date=None, instructions=\"Use tf.cast instead.\")\n@tf_export(v1=[\"to_int32\"])\ndef to_int32(x, name=\"ToInt32\"):\n \"\"\"Casts a tensor to type `int32`.\n\n Args:\n x: A `Tensor` or `SparseTensor` or `IndexedSlices`.\n name: A name for the operation (optional).\n\n Returns:\n A `Tensor` or `SparseTensor` or `IndexedSlices` with same shape as `x` with\n type `int32`.\n\n Raises:\n TypeError: If `x` cannot be cast to the `int32`.\n \"\"\"\n return cast(x, dtypes.int32, name=name)\n\n\n@deprecation.deprecated(date=None, instructions=\"Use tf.cast instead.\")\n@tf_export(v1=[\"to_int64\"])\ndef to_int64(x, name=\"ToInt64\"):\n \"\"\"Casts a tensor to type `int64`.\n\n Args:\n x: A `Tensor` or `SparseTensor` or `IndexedSlices`.\n name: A name for the operation (optional).\n\n Returns:\n A `Tensor` or `SparseTensor` or `IndexedSlices` with same shape as `x` with\n type `int64`.\n\n Raises:\n TypeError: If `x` cannot be cast to the `int64`.\n \"\"\"\n return cast(x, dtypes.int64, name=name)\n\n\n@deprecation.deprecated(date=None, instructions=\"Use tf.cast instead.\")\n@tf_export(v1=[\"to_bfloat16\"])\ndef to_bfloat16(x, name=\"ToBFloat16\"):\n \"\"\"Casts a tensor to type `bfloat16`.\n\n Args:\n x: A `Tensor` or `SparseTensor` or `IndexedSlices`.\n name: A name for the operation (optional).\n\n Returns:\n A `Tensor` or `SparseTensor` or `IndexedSlices` with same shape as `x` with\n type `bfloat16`.\n\n Raises:\n TypeError: If `x` cannot be cast to the `bfloat16`.\n \"\"\"\n return cast(x, dtypes.bfloat16, name=name)\n\n\n@deprecation.deprecated(date=None, instructions=\"Use tf.cast instead.\")\n@tf_export(v1=[\"to_complex64\"])\ndef to_complex64(x, name=\"ToComplex64\"):\n \"\"\"Casts a tensor to type `complex64`.\n\n Args:\n x: A `Tensor` or `SparseTensor` or `IndexedSlices`.\n name: A name for the operation (optional).\n\n Returns:\n A `Tensor` or `SparseTensor` or `IndexedSlices` with same shape as `x` with\n type `complex64`.\n\n Raises:\n TypeError: If `x` cannot be cast to the `complex64`.\n \"\"\"\n return cast(x, dtypes.complex64, name=name)\n\n\n@deprecation.deprecated(date=None, instructions=\"Use tf.cast instead.\")\n@tf_export(v1=[\"to_complex128\"])\ndef to_complex128(x, name=\"ToComplex128\"):\n \"\"\"Casts a tensor to type `complex128`.\n\n Args:\n x: A `Tensor` or `SparseTensor` or `IndexedSlices`.\n name: A name for the operation (optional).\n\n Returns:\n A `Tensor` or `SparseTensor` or `IndexedSlices` with same shape as `x` with\n type `complex128`.\n\n Raises:\n TypeError: If `x` cannot be cast to the `complex128`.\n \"\"\"\n return cast(x, dtypes.complex128, name=name)\n\n\nops.Tensor._override_operator(\"__neg__\", gen_math_ops.neg)\nops.Tensor._override_operator(\"__abs__\", abs)\n# __invert__ corresponds to the ~ operator. Here we follow the numpy convention\n# ~ marks an elementwise bit-wise inverse. This is only implemented for boolean\n# tensors and will throw a TypeError if used on nonboolean arrays\nops.Tensor._override_operator(\"__invert__\", gen_math_ops.logical_not)\n\n\ndef _OverrideBinaryOperatorHelper(func, op_name, clazz_object=ops.Tensor):\n \"\"\"Register operators with different tensor and scalar versions.\n\n If `clazz_object` is `SparseTensor`, assumes `func` takes `(sp_indices,\n sp_values, sp_shape, dense)` and outputs `(new_sp_values)`.\n\n Args:\n func: the operator\n op_name: name of the operator being overridden\n clazz_object: class to override for. Either `Tensor` or `SparseTensor`.\n \"\"\"\n\n def binary_op_wrapper(x, y):\n with ops.name_scope(None, op_name, [x, y]) as name:\n if isinstance(x, ops.Tensor) and isinstance(y, ops.Tensor):\n return func(x, y, name=name)\n elif not isinstance(y, sparse_tensor.SparseTensor):\n try:\n y = ops.convert_to_tensor_v2(y, dtype_hint=x.dtype.base_dtype,\n name=\"y\")\n except TypeError:\n # If the RHS is not a tensor, it might be a tensor aware object\n # that can implement the operator with knowledge of itself\n # and the tensor.\n if hasattr(type(y), \"__r%s__\" % op_name):\n return NotImplemented\n else:\n raise\n return func(x, y, name=name)\n\n def binary_op_wrapper_sparse(sp_x, y):\n with ops.name_scope(None, op_name, [sp_x, y]) as name:\n y = ops.convert_to_tensor(y, dtype=sp_x.dtype.base_dtype, name=\"y\")\n return sparse_tensor.SparseTensor(sp_x.indices,\n func(\n sp_x.indices,\n sp_x.values,\n sp_x.dense_shape,\n y,\n name=name), sp_x.dense_shape)\n\n def r_binary_op_wrapper(y, x):\n with ops.name_scope(None, op_name, [x, y]) as name:\n x = ops.convert_to_tensor(x, dtype=y.dtype.base_dtype, name=\"x\")\n return func(x, y, name=name)\n\n # Propagate func.__doc__ to the wrappers\n try:\n doc = func.__doc__\n except AttributeError:\n doc = None\n binary_op_wrapper.__doc__ = doc\n r_binary_op_wrapper.__doc__ = doc\n binary_op_wrapper_sparse.__doc__ = doc\n\n if clazz_object is ops.Tensor:\n clazz_object._override_operator(\"__%s__\" % op_name, binary_op_wrapper)\n del binary_op_wrapper\n clazz_object._override_operator(\"__r%s__\" % op_name, r_binary_op_wrapper)\n del r_binary_op_wrapper\n else:\n clazz_object._override_operator(\"__%s__\" % op_name,\n binary_op_wrapper_sparse)\n del binary_op_wrapper_sparse\n\n\n# Conversion table for __truediv__. None entries mean no conversion required.\n_TRUEDIV_TABLE = {\n dtypes.uint8: dtypes.float32,\n dtypes.int8: dtypes.float32,\n dtypes.uint16: dtypes.float32,\n dtypes.int16: dtypes.float32,\n dtypes.int32: dtypes.float64,\n dtypes.int64: dtypes.float64,\n dtypes.bfloat16: None,\n dtypes.float16: None,\n dtypes.float32: None,\n dtypes.float64: None,\n dtypes.complex64: None,\n dtypes.complex128: None,\n}\n\n\n# NOTE: the support of \"sparse (true)div dense\" is currently not baked in into\n# \"tf.(true_)div()\". Until such an API decision is made, the supported usage is\n# to explicitly use the \"/\" operator to invoke either truediv or div.\ndef _sparse_dense_truediv(sp_indices, sp_values, sp_shape, y, name=None):\n \"\"\"Internal helper function for 'sp_t / dense_t'.\"\"\"\n with ops.name_scope(name, \"truediv\",\n [sp_indices, sp_values, sp_shape, y]) as name:\n sp_values = ops.convert_to_tensor(sp_values, name=\"sp_values\")\n y = ops.convert_to_tensor(y, name=\"y\")\n x_dtype = sp_values.dtype.base_dtype\n y_dtype = y.dtype.base_dtype\n if x_dtype != y_dtype:\n raise TypeError(\"x and y must have the same dtype, got %r != %r\" %\n (x_dtype, y_dtype))\n try:\n dtype = _TRUEDIV_TABLE[x_dtype]\n except KeyError:\n raise TypeError(\"Invalid dtype %r in __truediv__\" % x_dtype)\n if dtype is not None:\n sp_values = cast(sp_values, dtype)\n y = cast(y, dtype)\n return gen_sparse_ops.sparse_dense_cwise_div(\n sp_indices, sp_values, sp_shape, y, name=name)\n\n\ndef _truediv_python3(x, y, name=None):\n with ops.name_scope(name, \"truediv\", [x, y]) as name:\n x = ops.convert_to_tensor(x, name=\"x\")\n y = ops.convert_to_tensor(y, name=\"y\")\n x_dtype = x.dtype.base_dtype\n y_dtype = y.dtype.base_dtype\n if x_dtype != y_dtype:\n raise TypeError(\"x and y must have the same dtype, got %r != %r\" %\n (x_dtype, y_dtype))\n try:\n dtype = _TRUEDIV_TABLE[x_dtype]\n except KeyError:\n raise TypeError(\"Invalid dtype %r in __truediv__\" % x_dtype)\n if dtype is not None:\n x = cast(x, dtype)\n y = cast(y, dtype)\n return gen_math_ops.real_div(x, y, name=name)\n\n\ndef _div_python2(x, y, name=None):\n \"\"\"Divide two values using Python 2 semantics. Used for Tensor.__div__.\n\n Args:\n x: `Tensor` numerator of real numeric type.\n y: `Tensor` denominator of real numeric type.\n name: A name for the operation (optional).\n Returns:\n `x / y` returns the quotient of x and y.\n \"\"\"\n\n with ops.name_scope(name, \"div\", [x, y]) as name:\n x = ops.convert_to_tensor(x, name=\"x\")\n y = ops.convert_to_tensor(y, name=\"y\", dtype=x.dtype.base_dtype)\n x_dtype = x.dtype.base_dtype\n y_dtype = y.dtype.base_dtype\n if x_dtype != y_dtype:\n raise TypeError(\"x and y must have the same dtype, got %r != %r\" %\n (x_dtype, y_dtype))\n if x_dtype.is_floating or x_dtype.is_complex:\n return gen_math_ops.real_div(x, y, name=name)\n else:\n return gen_math_ops.floor_div(x, y, name=name)\n\n\n@tf_export(\"math.truediv\", \"truediv\")\n@dispatch.add_dispatch_support\ndef truediv(x, y, name=None):\n \"\"\"Divides x / y elementwise (using Python 3 division operator semantics).\n\n NOTE: Prefer using the Tensor operator or tf.divide which obey Python\n division operator semantics.\n\n This function forces Python 3 division operator semantics where all integer\n arguments are cast to floating types first. This op is generated by normal\n `x / y` division in Python 3 and in Python 2.7 with\n `from __future__ import division`. If you want integer division that rounds\n down, use `x // y` or `tf.math.floordiv`.\n\n `x` and `y` must have the same numeric type. If the inputs are floating\n point, the output will have the same type. If the inputs are integral, the\n inputs are cast to `float32` for `int8` and `int16` and `float64` for `int32`\n and `int64` (matching the behavior of Numpy).\n\n Args:\n x: `Tensor` numerator of numeric type.\n y: `Tensor` denominator of numeric type.\n name: A name for the operation (optional).\n\n Returns:\n `x / y` evaluated in floating point.\n\n Raises:\n TypeError: If `x` and `y` have different dtypes.\n \"\"\"\n return _truediv_python3(x, y, name)\n\n\n@deprecation.deprecated(\n date=None,\n instructions=\"Deprecated in favor of operator or tf.math.divide.\")\n@tf_export(v1=[\"div\"])\ndef div(x, y, name=None):\n \"\"\"Divides x / y elementwise (using Python 2 division operator semantics).\n\n NOTE: Prefer using the Tensor division operator or tf.divide which obey Python\n division operator semantics.\n\n This function divides `x` and `y`, forcing Python 2.7 semantics. That is,\n if one of `x` or `y` is a float, then the result will be a float.\n Otherwise, the output will be an integer type. Flooring semantics are used\n for integer division.\n\n Args:\n x: `Tensor` numerator of real numeric type.\n y: `Tensor` denominator of real numeric type.\n name: A name for the operation (optional).\n Returns:\n `x / y` returns the quotient of x and y.\n \"\"\"\n return _div_python2(x, y, name)\n\n\n@tf_export(\"div_no_nan\")\n@dispatch.add_dispatch_support\ndef div_no_nan(x, y, name=None):\n \"\"\"Computes an unsafe divide which returns 0 if the y is zero.\n\n Args:\n x: A `Tensor`. Must be one of the following types: `float32`, `float64`.\n y: A `Tensor` whose dtype is compatible with `x`.\n name: A name for the operation (optional).\n Returns:\n The element-wise value of the x divided by y.\n \"\"\"\n\n with ops.name_scope(name, \"div_no_nan\", [x, y]) as name:\n x = ops.convert_to_tensor(x, name=\"x\")\n y = ops.convert_to_tensor(y, name=\"y\", dtype=x.dtype.base_dtype)\n x_dtype = x.dtype.base_dtype\n y_dtype = y.dtype.base_dtype\n if x_dtype != y_dtype:\n raise TypeError(\"x and y must have the same dtype, got %r != %r\" %\n (x_dtype, y_dtype))\n return gen_math_ops.div_no_nan(x, y, name=name)\n\n\n# TODO(aselle): This should be removed\nmod = gen_math_ops.floor_mod\n\n\n# TODO(aselle): Deprecate this once all internal functionality uses\n# tf.truncatediv\n@tf_export(\"math.floordiv\", v1=[\"math.floordiv\", \"floordiv\"])\n@dispatch.add_dispatch_support\n@deprecation.deprecated_endpoints(\"floordiv\")\ndef floordiv(x, y, name=None):\n \"\"\"Divides `x / y` elementwise, rounding toward the most negative integer.\n\n The same as `tf.div(x,y)` for integers, but uses `tf.floor(tf.div(x,y))` for\n floating point arguments so that the result is always an integer (though\n possibly an integer represented as floating point). This op is generated by\n `x // y` floor division in Python 3 and in Python 2.7 with\n `from __future__ import division`.\n\n `x` and `y` must have the same type, and the result will have the same type\n as well.\n\n Args:\n x: `Tensor` numerator of real numeric type.\n y: `Tensor` denominator of real numeric type.\n name: A name for the operation (optional).\n\n Returns:\n `x / y` rounded down.\n\n Raises:\n TypeError: If the inputs are complex.\n \"\"\"\n with ops.name_scope(name, \"floordiv\", [x, y]) as name:\n return gen_math_ops.floor_div(x, y, name=name)\n\n\nrealdiv = gen_math_ops.real_div\ntruncatediv = gen_math_ops.truncate_div\n# TODO(aselle): Rename this to floordiv when we can.\nfloor_div = gen_math_ops.floor_div\ntruncatemod = gen_math_ops.truncate_mod\nfloormod = gen_math_ops.floor_mod\n\n\ndef _mul_dispatch(x, y, name=None):\n \"\"\"Dispatches cwise mul for \"Dense*Dense\" and \"Dense*Sparse\".\"\"\"\n is_tensor_y = isinstance(y, ops.Tensor)\n if is_tensor_y:\n return gen_math_ops.mul(x, y, name=name)\n else:\n assert isinstance(y, sparse_tensor.SparseTensor) # Case: Dense * Sparse.\n new_vals = gen_sparse_ops.sparse_dense_cwise_mul(y.indices, y.values,\n y.dense_shape, x, name)\n return sparse_tensor.SparseTensor(y.indices, new_vals, y.dense_shape)\n\n\n# NOTE(aselle): When integer division is added for sparse_dense_cwise,\n# div, truediv, and floordiv should be delegated appropriately for\n# Python sematnics, analogous to dense cwise tensor operations.\n_OverrideBinaryOperatorHelper(gen_sparse_ops.sparse_dense_cwise_div, \"div\",\n sparse_tensor.SparseTensor)\n_OverrideBinaryOperatorHelper(_sparse_dense_truediv, \"truediv\",\n sparse_tensor.SparseTensor)\n_OverrideBinaryOperatorHelper(gen_sparse_ops.sparse_dense_cwise_mul, \"mul\",\n sparse_tensor.SparseTensor)\n\n_OverrideBinaryOperatorHelper(gen_math_ops.add, \"add\")\n_OverrideBinaryOperatorHelper(gen_math_ops.sub, \"sub\")\n_OverrideBinaryOperatorHelper(_mul_dispatch, \"mul\")\n_OverrideBinaryOperatorHelper(_div_python2, \"div\")\n_OverrideBinaryOperatorHelper(_truediv_python3, \"truediv\")\n_OverrideBinaryOperatorHelper(floordiv, \"floordiv\")\n_OverrideBinaryOperatorHelper(gen_math_ops.floor_mod, \"mod\")\n_OverrideBinaryOperatorHelper(pow, \"pow\")\n\n\n@tf_export(\"math.logical_xor\", v1=[\"math.logical_xor\", \"logical_xor\"])\n@dispatch.add_dispatch_support\n@deprecation.deprecated_endpoints(\"logical_xor\")\ndef logical_xor(x, y, name=\"LogicalXor\"):\n \"\"\"x ^ y = (x | y) & ~(x & y).\"\"\"\n # TODO(alemi) Make this a cwise op if people end up relying on it.\n return gen_math_ops.logical_and(\n gen_math_ops.logical_or(x, y),\n gen_math_ops.logical_not(gen_math_ops.logical_and(x, y)),\n name=name)\n\n\n_OverrideBinaryOperatorHelper(gen_math_ops.logical_and, \"and\")\n_OverrideBinaryOperatorHelper(gen_math_ops.logical_or, \"or\")\n_OverrideBinaryOperatorHelper(logical_xor, \"xor\")\n\nops.Tensor._override_operator(\"__lt__\", gen_math_ops.less)\nops.Tensor._override_operator(\"__le__\", gen_math_ops.less_equal)\nops.Tensor._override_operator(\"__gt__\", gen_math_ops.greater)\nops.Tensor._override_operator(\"__ge__\", gen_math_ops.greater_equal)\n\n\n@tf_export(\"range\")\ndef range(start, limit=None, delta=1, dtype=None, name=\"range\"): # pylint: disable=redefined-builtin\n \"\"\"Creates a sequence of numbers.\n\n Creates a sequence of numbers that begins at `start` and extends by\n increments of `delta` up to but not including `limit`.\n\n The dtype of the resulting tensor is inferred from the inputs unless\n it is provided explicitly.\n\n Like the Python builtin `range`, `start` defaults to 0, so that\n `range(n) = range(0, n)`.\n\n For example:\n\n ```python\n start = 3\n limit = 18\n delta = 3\n tf.range(start, limit, delta) # [3, 6, 9, 12, 15]\n\n start = 3\n limit = 1\n delta = -0.5\n tf.range(start, limit, delta) # [3, 2.5, 2, 1.5]\n\n limit = 5\n tf.range(limit) # [0, 1, 2, 3, 4]\n ```\n\n Args:\n start: A 0-D `Tensor` (scalar). Acts as first entry in the range if\n `limit` is not None; otherwise, acts as range limit and first entry\n defaults to 0.\n limit: A 0-D `Tensor` (scalar). Upper limit of sequence,\n exclusive. If None, defaults to the value of `start` while the first\n entry of the range defaults to 0.\n delta: A 0-D `Tensor` (scalar). Number that increments\n `start`. Defaults to 1.\n dtype: The type of the elements of the resulting tensor.\n name: A name for the operation. Defaults to \"range\".\n\n Returns:\n An 1-D `Tensor` of type `dtype`.\n\n @compatibility(numpy)\n Equivalent to np.arange\n @end_compatibility\n \"\"\"\n if limit is None:\n start, limit = 0, start\n\n with ops.name_scope(name, \"Range\", [start, limit, delta]) as name:\n start = ops.convert_to_tensor(start, dtype=dtype, name=\"start\")\n limit = ops.convert_to_tensor(limit, dtype=dtype, name=\"limit\")\n delta = ops.convert_to_tensor(delta, dtype=dtype, name=\"delta\")\n\n # infer dtype if not explicitly provided\n if dtype is None:\n dtype_hierarchy = [\n dtypes.int32, dtypes.int64, dtypes.float32, dtypes.float64\n ]\n assert all(arg.dtype in dtype_hierarchy for arg in [start, limit, delta])\n inferred_dtype = max(\n [arg.dtype for arg in [start, limit, delta]],\n key=dtype_hierarchy.index)\n\n start = cast(start, inferred_dtype)\n limit = cast(limit, inferred_dtype)\n delta = cast(delta, inferred_dtype)\n\n return gen_math_ops._range(start, limit, delta, name=name)\n\n\n# Reduction operations\ndef _ReductionDims(x, axis, reduction_indices=None): # pylint: disable=invalid-name\n \"\"\"Returns range(0, rank(x)) if reduction_indices is None.\"\"\"\n # TODO(aselle): Remove this after deprecation\n if reduction_indices is not None:\n if axis is not None:\n raise ValueError(\"Can't specify both axis' and 'reduction_indices'.\")\n axis = reduction_indices\n if axis is not None:\n return axis\n else:\n # Fast path: avoid creating Rank and Range ops if ndims is known.\n rank = common_shapes.rank(x)\n if rank is not None:\n return constant_op.constant(np.arange(rank), dtype=dtypes.int32)\n if (isinstance(x, sparse_tensor.SparseTensor) and\n x.dense_shape.shape.is_fully_defined()):\n rank = x.dense_shape.shape.dims[0].value # sparse.dense_shape is 1-D.\n return constant_op.constant(np.arange(rank), dtype=dtypes.int32)\n\n # Otherwise, we rely on Range and Rank to do the right thing at run-time.\n return range(0, array_ops.rank(x))\n\n\ndef _may_reduce_to_scalar(keepdims, axis, output):\n \"\"\"Set a reduction's output shape to be a scalar if we are certain.\"\"\"\n if not common_shapes.has_fully_defined_shape(output) and (not keepdims) and (\n axis is None):\n output.set_shape(())\n return output\n\n\n@tf_export(v1=[\"math.reduce_sum\", \"reduce_sum\"])\n@deprecation.deprecated_args(\n None, \"keep_dims is deprecated, use keepdims instead\", \"keep_dims\")\ndef reduce_sum_v1(input_tensor,\n axis=None,\n keepdims=None,\n name=None,\n reduction_indices=None,\n keep_dims=None):\n \"\"\"Computes the sum of elements across dimensions of a tensor.\n\n Reduces `input_tensor` along the dimensions given in `axis`.\n Unless `keepdims` is true, the rank of the tensor is reduced by 1 for each\n entry in `axis`. If `keepdims` is true, the reduced dimensions\n are retained with length 1.\n\n If `axis` is None, all dimensions are reduced, and a\n tensor with a single element is returned.\n\n For example:\n\n ```python\n x = tf.constant([[1, 1, 1], [1, 1, 1]])\n tf.reduce_sum(x) # 6\n tf.reduce_sum(x, 0) # [2, 2, 2]\n tf.reduce_sum(x, 1) # [3, 3]\n tf.reduce_sum(x, 1, keepdims=True) # [[3], [3]]\n tf.reduce_sum(x, [0, 1]) # 6\n ```\n\n Args:\n input_tensor: The tensor to reduce. Should have numeric type.\n axis: The dimensions to reduce. If `None` (the default),\n reduces all dimensions. Must be in the range\n `[-rank(input_tensor), rank(input_tensor))`.\n keepdims: If true, retains reduced dimensions with length 1.\n name: A name for the operation (optional).\n reduction_indices: The old (deprecated) name for axis.\n keep_dims: Deprecated alias for `keepdims`.\n\n Returns:\n The reduced tensor, of the same dtype as the input_tensor.\n\n @compatibility(numpy)\n Equivalent to np.sum apart the fact that numpy upcast uint8 and int32 to\n int64 while tensorflow returns the same dtype as the input.\n @end_compatibility\n \"\"\"\n axis = deprecation.deprecated_argument_lookup(\n \"axis\", axis, \"reduction_indices\", reduction_indices)\n keepdims = deprecation.deprecated_argument_lookup(\"keepdims\", keepdims,\n \"keep_dims\", keep_dims)\n return reduce_sum(input_tensor, axis, keepdims, name)\n\n\n@tf_export(\"math.reduce_sum\", \"reduce_sum\", v1=[])\n@dispatch.add_dispatch_support\ndef reduce_sum(input_tensor, axis=None, keepdims=False, name=None):\n \"\"\"Computes the sum of elements across dimensions of a tensor.\n\n Reduces `input_tensor` along the dimensions given in `axis`.\n Unless `keepdims` is true, the rank of the tensor is reduced by 1 for each\n entry in `axis`. If `keepdims` is true, the reduced dimensions\n are retained with length 1.\n\n If `axis` is None, all dimensions are reduced, and a\n tensor with a single element is returned.\n\n For example:\n\n ```python\n x = tf.constant([[1, 1, 1], [1, 1, 1]])\n tf.reduce_sum(x) # 6\n tf.reduce_sum(x, 0) # [2, 2, 2]\n tf.reduce_sum(x, 1) # [3, 3]\n tf.reduce_sum(x, 1, keepdims=True) # [[3], [3]]\n tf.reduce_sum(x, [0, 1]) # 6\n ```\n\n Args:\n input_tensor: The tensor to reduce. Should have numeric type.\n axis: The dimensions to reduce. If `None` (the default), reduces all\n dimensions. Must be in the range `[-rank(input_tensor),\n rank(input_tensor))`.\n keepdims: If true, retains reduced dimensions with length 1.\n name: A name for the operation (optional).\n\n Returns:\n The reduced tensor, of the same dtype as the input_tensor.\n\n @compatibility(numpy)\n Equivalent to np.sum apart the fact that numpy upcast uint8 and int32 to\n int64 while tensorflow returns the same dtype as the input.\n @end_compatibility\n \"\"\"\n keepdims = False if keepdims is None else keepdims\n return _may_reduce_to_scalar(\n keepdims, axis,\n gen_math_ops._sum(\n input_tensor, _ReductionDims(input_tensor, axis), keepdims,\n name=name))\n\n\n@tf_export(v1=[\"math.count_nonzero\", \"count_nonzero\"])\n@deprecation.deprecated_args(\n None, \"keep_dims is deprecated, use keepdims instead\", \"keep_dims\")\n@deprecation.deprecated_args(\n None, \"reduction_indices is deprecated, use axis instead\", \"axis\")\ndef count_nonzero(input_tensor,\n axis=None,\n keepdims=None,\n dtype=dtypes.int64,\n name=None,\n reduction_indices=None,\n keep_dims=None):\n \"\"\"Computes number of nonzero elements across dimensions of a tensor.\n\n Reduces `input_tensor` along the dimensions given in `axis`.\n Unless `keepdims` is true, the rank of the tensor is reduced by 1 for each\n entry in `axis`. If `keepdims` is true, the reduced dimensions\n are retained with length 1.\n\n If `axis` has no entries, all dimensions are reduced, and a\n tensor with a single element is returned.\n\n **NOTE** Floating point comparison to zero is done by exact floating point\n equality check. Small values are **not** rounded to zero for purposes of\n the nonzero check.\n\n For example:\n\n ```python\n x = tf.constant([[0, 1, 0], [1, 1, 0]])\n tf.count_nonzero(x) # 3\n tf.count_nonzero(x, 0) # [1, 2, 0]\n tf.count_nonzero(x, 1) # [1, 2]\n tf.count_nonzero(x, 1, keepdims=True) # [[1], [2]]\n tf.count_nonzero(x, [0, 1]) # 3\n ```\n\n **NOTE** Strings are compared against zero-length empty string `\"\"`. Any\n string with a size greater than zero is already considered as nonzero.\n\n For example:\n ```python\n x = tf.constant([\"\", \"a\", \" \", \"b\", \"\"])\n tf.count_nonzero(x) # 3, with \"a\", \" \", and \"b\" as nonzero strings.\n ```\n\n Args:\n input_tensor: The tensor to reduce. Should be of numeric type, `bool`,\n or `string`.\n axis: The dimensions to reduce. If `None` (the default),\n reduces all dimensions. Must be in the range\n `[-rank(input_tensor), rank(input_tensor))`.\n keepdims: If true, retains reduced dimensions with length 1.\n dtype: The output dtype; defaults to `tf.int64`.\n name: A name for the operation (optional).\n reduction_indices: The old (deprecated) name for axis.\n keep_dims: Deprecated alias for `keepdims`.\n\n Returns:\n The reduced tensor (number of nonzero values).\n \"\"\"\n keepdims = deprecation.deprecated_argument_lookup(\"keepdims\", keepdims,\n \"keep_dims\", keep_dims)\n axis = deprecation.deprecated_argument_lookup(\n \"axis\", axis,\n \"reduction_indices\", reduction_indices\n )\n\n return count_nonzero_v2(input_tensor, axis, keepdims, dtype, name)\n\n\n@tf_export(\"math.count_nonzero\", v1=[])\ndef count_nonzero_v2(input, # pylint: disable=redefined-builtin\n axis=None,\n keepdims=None,\n dtype=dtypes.int64,\n name=None):\n \"\"\"Computes number of nonzero elements across dimensions of a tensor.\n\n Reduces `input` along the dimensions given in `axis`.\n Unless `keepdims` is true, the rank of the tensor is reduced by 1 for each\n entry in `axis`. If `keepdims` is true, the reduced dimensions\n are retained with length 1.\n\n If `axis` has no entries, all dimensions are reduced, and a\n tensor with a single element is returned.\n\n **NOTE** Floating point comparison to zero is done by exact floating point\n equality check. Small values are **not** rounded to zero for purposes of\n the nonzero check.\n\n For example:\n\n ```python\n x = tf.constant([[0, 1, 0], [1, 1, 0]])\n tf.count_nonzero(x) # 3\n tf.count_nonzero(x, 0) # [1, 2, 0]\n tf.count_nonzero(x, 1) # [1, 2]\n tf.count_nonzero(x, 1, keepdims=True) # [[1], [2]]\n tf.count_nonzero(x, [0, 1]) # 3\n ```\n\n **NOTE** Strings are compared against zero-length empty string `\"\"`. Any\n string with a size greater than zero is already considered as nonzero.\n\n For example:\n ```python\n x = tf.constant([\"\", \"a\", \" \", \"b\", \"\"])\n tf.count_nonzero(x) # 3, with \"a\", \" \", and \"b\" as nonzero strings.\n ```\n\n Args:\n input: The tensor to reduce. Should be of numeric type, `bool`,\n or `string`.\n axis: The dimensions to reduce. If `None` (the default),\n reduces all dimensions. Must be in the range\n `[-rank(input), rank(input))`.\n keepdims: If true, retains reduced dimensions with length 1.\n dtype: The output dtype; defaults to `tf.int64`.\n name: A name for the operation (optional).\n\n Returns:\n The reduced tensor (number of nonzero values).\n \"\"\"\n if keepdims is None:\n keepdims = False\n with ops.name_scope(name, \"count_nonzero\", [input]):\n input = ops.convert_to_tensor(input, name=\"input\")\n # A scalar of 'zero' is enough as `not_equal` will broadcast.\n zero = array_ops.zeros([], dtype=input.dtype)\n return cast(\n reduce_sum(\n # int64 reduction happens on GPU\n cast(gen_math_ops.not_equal(input, zero), dtypes.int64),\n axis=axis,\n keepdims=keepdims),\n dtype=dtype)\n\n\n@tf_export(v1=[\"math.reduce_mean\", \"reduce_mean\"])\ndef reduce_mean_v1(input_tensor,\n axis=None,\n keepdims=None,\n name=None,\n reduction_indices=None,\n keep_dims=None):\n \"\"\"Computes the mean of elements across dimensions of a tensor.\n\n Reduces `input_tensor` along the dimensions given in `axis`.\n Unless `keepdims` is true, the rank of the tensor is reduced by 1 for each\n entry in `axis`. If `keepdims` is true, the reduced dimensions\n are retained with length 1.\n\n If `axis` is None, all dimensions are reduced, and a\n tensor with a single element is returned.\n\n For example:\n\n ```python\n x = tf.constant([[1., 1.], [2., 2.]])\n tf.reduce_mean(x) # 1.5\n tf.reduce_mean(x, 0) # [1.5, 1.5]\n tf.reduce_mean(x, 1) # [1., 2.]\n ```\n\n Args:\n input_tensor: The tensor to reduce. Should have numeric type.\n axis: The dimensions to reduce. If `None` (the default),\n reduces all dimensions. Must be in the range\n `[-rank(input_tensor), rank(input_tensor))`.\n keepdims: If true, retains reduced dimensions with length 1.\n name: A name for the operation (optional).\n reduction_indices: The old (deprecated) name for axis.\n keep_dims: Deprecated alias for `keepdims`.\n\n Returns:\n The reduced tensor.\n\n @compatibility(numpy)\n Equivalent to np.mean\n\n Please note that `np.mean` has a `dtype` parameter that could be used to\n specify the output type. By default this is `dtype=float64`. On the other\n hand, `tf.reduce_mean` has an aggressive type inference from `input_tensor`,\n for example:\n\n ```python\n x = tf.constant([1, 0, 1, 0])\n tf.reduce_mean(x) # 0\n y = tf.constant([1., 0., 1., 0.])\n tf.reduce_mean(y) # 0.5\n ```\n\n @end_compatibility\n \"\"\"\n axis = deprecation.deprecated_argument_lookup(\n \"axis\", axis, \"reduction_indices\", reduction_indices)\n keepdims = deprecation.deprecated_argument_lookup(\"keepdims\", keepdims,\n \"keep_dims\", keep_dims)\n return reduce_mean(input_tensor, axis, keepdims, name)\n\n\n@tf_export(\"math.reduce_mean\", \"reduce_mean\", v1=[])\n@dispatch.add_dispatch_support\ndef reduce_mean(input_tensor, axis=None, keepdims=False, name=None):\n \"\"\"Computes the mean of elements across dimensions of a tensor.\n\n Reduces `input_tensor` along the dimensions given in `axis`.\n Unless `keepdims` is true, the rank of the tensor is reduced by 1 for each\n entry in `axis`. If `keepdims` is true, the reduced dimensions\n are retained with length 1.\n\n If `axis` is None, all dimensions are reduced, and a\n tensor with a single element is returned.\n\n For example:\n\n ```python\n x = tf.constant([[1., 1.], [2., 2.]])\n tf.reduce_mean(x) # 1.5\n tf.reduce_mean(x, 0) # [1.5, 1.5]\n tf.reduce_mean(x, 1) # [1., 2.]\n ```\n\n Args:\n input_tensor: The tensor to reduce. Should have numeric type.\n axis: The dimensions to reduce. If `None` (the default), reduces all\n dimensions. Must be in the range `[-rank(input_tensor),\n rank(input_tensor))`.\n keepdims: If true, retains reduced dimensions with length 1.\n name: A name for the operation (optional).\n\n Returns:\n The reduced tensor.\n\n @compatibility(numpy)\n Equivalent to np.mean\n\n Please note that `np.mean` has a `dtype` parameter that could be used to\n specify the output type. By default this is `dtype=float64`. On the other\n hand, `tf.reduce_mean` has an aggressive type inference from `input_tensor`,\n for example:\n\n ```python\n x = tf.constant([1, 0, 1, 0])\n tf.reduce_mean(x) # 0\n y = tf.constant([1., 0., 1., 0.])\n tf.reduce_mean(y) # 0.5\n ```\n\n @end_compatibility\n \"\"\"\n keepdims = False if keepdims is None else keepdims\n return _may_reduce_to_scalar(\n keepdims, axis,\n gen_math_ops.mean(\n input_tensor, _ReductionDims(input_tensor, axis), keepdims,\n name=name))\n\n\n@tf_export(\"math.reduce_variance\")\ndef reduce_variance(input_tensor, axis=None, keepdims=False, name=None):\n \"\"\"Computes the variance of elements across dimensions of a tensor.\n\n Reduces `input_tensor` along the dimensions given in `axis`.\n Unless `keepdims` is true, the rank of the tensor is reduced by 1 for each\n entry in `axis`. If `keepdims` is true, the reduced dimensions\n are retained with length 1.\n\n If `axis` is None, all dimensions are reduced, and a\n tensor with a single element is returned.\n\n For example:\n\n ```python\n x = tf.constant([[1., 2.], [3., 4.]])\n tf.reduce_variance(x) # 1.25\n tf.reduce_variance(x, 0) # [1., 1.]\n tf.reduce_variance(x, 1) # [0.25, 0.25]\n ```\n\n Args:\n input_tensor: The tensor to reduce. Should have numeric type.\n axis: The dimensions to reduce. If `None` (the default), reduces all\n dimensions. Must be in the range `[-rank(input_tensor),\n rank(input_tensor))`.\n keepdims: If true, retains reduced dimensions with length 1.\n name: A name scope for the associated operations (optional).\n\n Returns:\n The reduced tensor, of the same dtype as the input_tensor.\n\n @compatibility(numpy)\n Equivalent to np.var\n\n Please note that `np.var` has a `dtype` parameter that could be used to\n specify the output type. By default this is `dtype=float64`. On the other\n hand, `tf.reduce_variance` has an aggressive type inference from\n `input_tensor`,\n @end_compatibility\n \"\"\"\n name = name if name else \"reduce_variance\"\n with ops.name_scope(name):\n means = reduce_mean(input_tensor, axis=axis, keepdims=True)\n squared_deviations = gen_math_ops.square(input_tensor - means)\n return reduce_mean(squared_deviations, axis=axis, keepdims=keepdims)\n\n\n@tf_export(\"math.reduce_std\")\ndef reduce_std(input_tensor, axis=None, keepdims=False, name=None):\n \"\"\"Computes the standard deviation of elements across dimensions of a tensor.\n\n Reduces `input_tensor` along the dimensions given in `axis`.\n Unless `keepdims` is true, the rank of the tensor is reduced by 1 for each\n entry in `axis`. If `keepdims` is true, the reduced dimensions\n are retained with length 1.\n\n If `axis` is None, all dimensions are reduced, and a\n tensor with a single element is returned.\n\n For example:\n\n ```python\n x = tf.constant([[1., 2.], [3., 4.]])\n tf.reduce_std(x) # 1.1180339887498949\n tf.reduce_std(x, 0) # [1., 1.]\n tf.reduce_std(x, 1) # [0.5, 0.5]\n ```\n\n Args:\n input_tensor: The tensor to reduce. Should have numeric type.\n axis: The dimensions to reduce. If `None` (the default), reduces all\n dimensions. Must be in the range `[-rank(input_tensor),\n rank(input_tensor))`.\n keepdims: If true, retains reduced dimensions with length 1.\n name: A name scope for the associated operations (optional).\n\n Returns:\n The reduced tensor, of the same dtype as the input_tensor.\n\n @compatibility(numpy)\n Equivalent to np.std\n\n Please note that `np.std` has a `dtype` parameter that could be used to\n specify the output type. By default this is `dtype=float64`. On the other\n hand, `tf.reduce_std` has an aggressive type inference from `input_tensor`,\n @end_compatibility\n \"\"\"\n name = name if name else \"reduce_std\"\n with ops.name_scope(name):\n variance = reduce_variance(input_tensor, axis=axis, keepdims=keepdims)\n return gen_math_ops.sqrt(variance)\n\n\n@tf_export(\"math.reduce_prod\", \"reduce_prod\", v1=[])\n@dispatch.add_dispatch_support\ndef reduce_prod(input_tensor, axis=None, keepdims=False, name=None):\n \"\"\"Computes the product of elements across dimensions of a tensor.\n\n Reduces `input_tensor` along the dimensions given in `axis`.\n Unless `keepdims` is true, the rank of the tensor is reduced by 1 for each\n entry in `axis`. If `keepdims` is true, the reduced dimensions\n are retained with length 1.\n\n If `axis` is None, all dimensions are reduced, and a\n tensor with a single element is returned.\n\n Args:\n input_tensor: The tensor to reduce. Should have numeric type.\n axis: The dimensions to reduce. If `None` (the default),\n reduces all dimensions. Must be in the range\n `[-rank(input_tensor), rank(input_tensor))`.\n keepdims: If true, retains reduced dimensions with length 1.\n name: A name for the operation (optional).\n\n Returns:\n The reduced tensor.\n\n @compatibility(numpy)\n Equivalent to np.prod\n @end_compatibility\n \"\"\"\n keepdims = False if keepdims is None else keepdims\n return _may_reduce_to_scalar(\n keepdims, axis,\n gen_math_ops.prod(\n input_tensor, _ReductionDims(input_tensor, axis), keepdims,\n name=name))\n\n\n@tf_export(v1=[\"math.reduce_prod\", \"reduce_prod\"])\n@deprecation.deprecated_args(\n None, \"keep_dims is deprecated, use keepdims instead\", \"keep_dims\")\ndef reduce_prod_v1(input_tensor,\n axis=None,\n keepdims=None,\n name=None,\n reduction_indices=None,\n keep_dims=None):\n \"\"\"Computes the product of elements across dimensions of a tensor.\n\n Reduces `input_tensor` along the dimensions given in `axis`.\n Unless `keepdims` is true, the rank of the tensor is reduced by 1 for each\n entry in `axis`. If `keepdims` is true, the reduced dimensions\n are retained with length 1.\n\n If `axis` is None, all dimensions are reduced, and a\n tensor with a single element is returned.\n\n Args:\n input_tensor: The tensor to reduce. Should have numeric type.\n axis: The dimensions to reduce. If `None` (the default), reduces all\n dimensions. Must be in the range `[-rank(input_tensor),\n rank(input_tensor))`.\n keepdims: If true, retains reduced dimensions with length 1.\n name: A name for the operation (optional).\n reduction_indices: The old (deprecated) name for axis.\n keep_dims: Deprecated alias for `keepdims`.\n\n Returns:\n The reduced tensor.\n\n @compatibility(numpy)\n Equivalent to np.prod\n @end_compatibility\n \"\"\"\n axis = deprecation.deprecated_argument_lookup(\n \"axis\", axis, \"reduction_indices\", reduction_indices)\n keepdims = deprecation.deprecated_argument_lookup(\"keepdims\", keepdims,\n \"keep_dims\", keep_dims)\n return reduce_prod(input_tensor, axis, keepdims, name)\n\n\n@tf_export(v1=[\"math.reduce_min\", \"reduce_min\"])\n@deprecation.deprecated_args(\n None, \"keep_dims is deprecated, use keepdims instead\", \"keep_dims\")\ndef reduce_min_v1(input_tensor,\n axis=None,\n keepdims=None,\n name=None,\n reduction_indices=None,\n keep_dims=None):\n \"\"\"Computes the minimum of elements across dimensions of a tensor.\n\n Reduces `input_tensor` along the dimensions given in `axis`.\n Unless `keepdims` is true, the rank of the tensor is reduced by 1 for each\n entry in `axis`. If `keepdims` is true, the reduced dimensions\n are retained with length 1.\n\n If `axis` is None, all dimensions are reduced, and a\n tensor with a single element is returned.\n\n Args:\n input_tensor: The tensor to reduce. Should have real numeric type.\n axis: The dimensions to reduce. If `None` (the default), reduces all\n dimensions. Must be in the range `[-rank(input_tensor),\n rank(input_tensor))`.\n keepdims: If true, retains reduced dimensions with length 1.\n name: A name for the operation (optional).\n reduction_indices: The old (deprecated) name for axis.\n keep_dims: Deprecated alias for `keepdims`.\n\n Returns:\n The reduced tensor.\n\n @compatibility(numpy)\n Equivalent to np.min\n @end_compatibility\n \"\"\"\n axis = deprecation.deprecated_argument_lookup(\n \"axis\", axis, \"reduction_indices\", reduction_indices)\n keepdims = deprecation.deprecated_argument_lookup(\"keepdims\", keepdims,\n \"keep_dims\", keep_dims)\n return reduce_min(input_tensor, axis, keepdims, name)\n\n\n@tf_export(\"math.reduce_min\", \"reduce_min\", v1=[])\n@dispatch.add_dispatch_support\ndef reduce_min(input_tensor, axis=None, keepdims=False, name=None):\n \"\"\"Computes the minimum of elements across dimensions of a tensor.\n\n Reduces `input_tensor` along the dimensions given in `axis`.\n Unless `keepdims` is true, the rank of the tensor is reduced by 1 for each\n entry in `axis`. If `keepdims` is true, the reduced dimensions\n are retained with length 1.\n\n If `axis` is None, all dimensions are reduced, and a\n tensor with a single element is returned.\n\n Args:\n input_tensor: The tensor to reduce. Should have real numeric type.\n axis: The dimensions to reduce. If `None` (the default), reduces all\n dimensions. Must be in the range `[-rank(input_tensor),\n rank(input_tensor))`.\n keepdims: If true, retains reduced dimensions with length 1.\n name: A name for the operation (optional).\n\n Returns:\n The reduced tensor.\n\n @compatibility(numpy)\n Equivalent to np.min\n @end_compatibility\n \"\"\"\n keepdims = False if keepdims is None else keepdims\n return _may_reduce_to_scalar(\n keepdims, axis,\n gen_math_ops._min(\n input_tensor, _ReductionDims(input_tensor, axis), keepdims,\n name=name))\n\n\n@tf_export(v1=[\"math.reduce_max\", \"reduce_max\"])\n@deprecation.deprecated_args(\n None, \"keep_dims is deprecated, use keepdims instead\", \"keep_dims\")\ndef reduce_max_v1(input_tensor,\n axis=None,\n keepdims=None,\n name=None,\n reduction_indices=None,\n keep_dims=None):\n \"\"\"Computes the maximum of elements across dimensions of a tensor.\n\n Reduces `input_tensor` along the dimensions given in `axis`.\n Unless `keepdims` is true, the rank of the tensor is reduced by 1 for each\n entry in `axis`. If `keepdims` is true, the reduced dimensions\n are retained with length 1.\n\n If `axis` is None, all dimensions are reduced, and a\n tensor with a single element is returned.\n\n Args:\n input_tensor: The tensor to reduce. Should have real numeric type.\n axis: The dimensions to reduce. If `None` (the default),\n reduces all dimensions. Must be in the range\n `[-rank(input_tensor), rank(input_tensor))`.\n keepdims: If true, retains reduced dimensions with length 1.\n name: A name for the operation (optional).\n reduction_indices: The old (deprecated) name for axis.\n keep_dims: Deprecated alias for `keepdims`.\n\n Returns:\n The reduced tensor.\n\n @compatibility(numpy)\n Equivalent to np.max\n @end_compatibility\n \"\"\"\n axis = deprecation.deprecated_argument_lookup(\n \"axis\", axis, \"reduction_indices\", reduction_indices)\n keepdims = deprecation.deprecated_argument_lookup(\"keepdims\", keepdims,\n \"keep_dims\", keep_dims)\n return reduce_max(input_tensor, axis, keepdims, name)\n\n\n@tf_export(\"math.reduce_max\", \"reduce_max\", v1=[])\n@dispatch.add_dispatch_support\ndef reduce_max(input_tensor, axis=None, keepdims=False, name=None):\n \"\"\"Computes the maximum of elements across dimensions of a tensor.\n\n Reduces `input_tensor` along the dimensions given in `axis`.\n Unless `keepdims` is true, the rank of the tensor is reduced by 1 for each\n entry in `axis`. If `keepdims` is true, the reduced dimensions\n are retained with length 1.\n\n If `axis` is None, all dimensions are reduced, and a\n tensor with a single element is returned.\n\n Args:\n input_tensor: The tensor to reduce. Should have real numeric type.\n axis: The dimensions to reduce. If `None` (the default), reduces all\n dimensions. Must be in the range `[-rank(input_tensor),\n rank(input_tensor))`.\n keepdims: If true, retains reduced dimensions with length 1.\n name: A name for the operation (optional).\n\n Returns:\n The reduced tensor.\n\n @compatibility(numpy)\n Equivalent to np.max\n @end_compatibility\n \"\"\"\n keepdims = False if keepdims is None else keepdims\n return _may_reduce_to_scalar(\n keepdims, axis,\n gen_math_ops._max(\n input_tensor, _ReductionDims(input_tensor, axis), keepdims,\n name=name))\n\n\n@tf_export(v1=[\"math.reduce_all\", \"reduce_all\"])\n@deprecation.deprecated_args(\n None, \"keep_dims is deprecated, use keepdims instead\", \"keep_dims\")\ndef reduce_all_v1(input_tensor,\n axis=None,\n keepdims=None,\n name=None,\n reduction_indices=None,\n keep_dims=None):\n \"\"\"Computes the \"logical and\" of elements across dimensions of a tensor.\n\n Reduces `input_tensor` along the dimensions given in `axis`.\n Unless `keepdims` is true, the rank of the tensor is reduced by 1 for each\n entry in `axis`. If `keepdims` is true, the reduced dimensions\n are retained with length 1.\n\n If `axis` is None, all dimensions are reduced, and a\n tensor with a single element is returned.\n\n For example:\n\n ```python\n x = tf.constant([[True, True], [False, False]])\n tf.reduce_all(x) # False\n tf.reduce_all(x, 0) # [False, False]\n tf.reduce_all(x, 1) # [True, False]\n ```\n\n Args:\n input_tensor: The boolean tensor to reduce.\n axis: The dimensions to reduce. If `None` (the default), reduces all\n dimensions. Must be in the range `[-rank(input_tensor),\n rank(input_tensor))`.\n keepdims: If true, retains reduced dimensions with length 1.\n name: A name for the operation (optional).\n reduction_indices: The old (deprecated) name for axis.\n keep_dims: Deprecated alias for `keepdims`.\n\n Returns:\n The reduced tensor.\n\n @compatibility(numpy)\n Equivalent to np.all\n @end_compatibility\n \"\"\"\n axis = deprecation.deprecated_argument_lookup(\n \"axis\", axis, \"reduction_indices\", reduction_indices)\n keepdims = deprecation.deprecated_argument_lookup(\"keepdims\", keepdims,\n \"keep_dims\", keep_dims)\n return reduce_all(input_tensor, axis, keepdims, name)\n\n\n@tf_export(\"reduce_all\", \"math.reduce_all\", v1=[])\n@dispatch.add_dispatch_support\ndef reduce_all(input_tensor, axis=None, keepdims=False, name=None):\n \"\"\"Computes the \"logical and\" of elements across dimensions of a tensor.\n\n Reduces `input_tensor` along the dimensions given in `axis`.\n Unless `keepdims` is true, the rank of the tensor is reduced by 1 for each\n entry in `axis`. If `keepdims` is true, the reduced dimensions\n are retained with length 1.\n\n If `axis` is None, all dimensions are reduced, and a\n tensor with a single element is returned.\n\n For example:\n\n ```python\n x = tf.constant([[True, True], [False, False]])\n tf.reduce_all(x) # False\n tf.reduce_all(x, 0) # [False, False]\n tf.reduce_all(x, 1) # [True, False]\n ```\n\n Args:\n input_tensor: The boolean tensor to reduce.\n axis: The dimensions to reduce. If `None` (the default), reduces all\n dimensions. Must be in the range `[-rank(input_tensor),\n rank(input_tensor))`.\n keepdims: If true, retains reduced dimensions with length 1.\n name: A name for the operation (optional).\n\n Returns:\n The reduced tensor.\n\n @compatibility(numpy)\n Equivalent to np.all\n @end_compatibility\n \"\"\"\n keepdims = False if keepdims is None else keepdims\n return _may_reduce_to_scalar(\n keepdims, axis,\n gen_math_ops._all(\n input_tensor, _ReductionDims(input_tensor, axis), keepdims,\n name=name))\n\n\n@tf_export(v1=[\"math.reduce_any\", \"reduce_any\"])\n@deprecation.deprecated_args(\n None, \"keep_dims is deprecated, use keepdims instead\", \"keep_dims\")\ndef reduce_any_v1(input_tensor,\n axis=None,\n keepdims=None,\n name=None,\n reduction_indices=None,\n keep_dims=None):\n \"\"\"Computes the \"logical or\" of elements across dimensions of a tensor.\n\n Reduces `input_tensor` along the dimensions given in `axis`.\n Unless `keepdims` is true, the rank of the tensor is reduced by 1 for each\n entry in `axis`. If `keepdims` is true, the reduced dimensions\n are retained with length 1.\n\n If `axis` is None, all dimensions are reduced, and a\n tensor with a single element is returned.\n\n For example:\n\n ```python\n x = tf.constant([[True, True], [False, False]])\n tf.reduce_any(x) # True\n tf.reduce_any(x, 0) # [True, True]\n tf.reduce_any(x, 1) # [True, False]\n ```\n\n Args:\n input_tensor: The boolean tensor to reduce.\n axis: The dimensions to reduce. If `None` (the default), reduces all\n dimensions. Must be in the range `[-rank(input_tensor),\n rank(input_tensor))`.\n keepdims: If true, retains reduced dimensions with length 1.\n name: A name for the operation (optional).\n reduction_indices: The old (deprecated) name for axis.\n keep_dims: Deprecated alias for `keepdims`.\n\n Returns:\n The reduced tensor.\n\n @compatibility(numpy)\n Equivalent to np.any\n @end_compatibility\n \"\"\"\n axis = deprecation.deprecated_argument_lookup(\n \"axis\", axis, \"reduction_indices\", reduction_indices)\n keepdims = deprecation.deprecated_argument_lookup(\"keepdims\", keepdims,\n \"keep_dims\", keep_dims)\n return reduce_any(input_tensor, axis, keepdims, name)\n\n\n@tf_export(\"math.reduce_any\", \"reduce_any\", v1=[])\n@dispatch.add_dispatch_support\ndef reduce_any(input_tensor, axis=None, keepdims=False, name=None):\n \"\"\"Computes the \"logical or\" of elements across dimensions of a tensor.\n\n Reduces `input_tensor` along the dimensions given in `axis`.\n Unless `keepdims` is true, the rank of the tensor is reduced by 1 for each\n entry in `axis`. If `keepdims` is true, the reduced dimensions\n are retained with length 1.\n\n If `axis` is None, all dimensions are reduced, and a\n tensor with a single element is returned.\n\n For example:\n\n ```python\n x = tf.constant([[True, True], [False, False]])\n tf.reduce_any(x) # True\n tf.reduce_any(x, 0) # [True, True]\n tf.reduce_any(x, 1) # [True, False]\n ```\n\n Args:\n input_tensor: The boolean tensor to reduce.\n axis: The dimensions to reduce. If `None` (the default), reduces all\n dimensions. Must be in the range `[-rank(input_tensor),\n rank(input_tensor))`.\n keepdims: If true, retains reduced dimensions with length 1.\n name: A name for the operation (optional).\n\n Returns:\n The reduced tensor.\n\n @compatibility(numpy)\n Equivalent to np.any\n @end_compatibility\n \"\"\"\n keepdims = False if keepdims is None else keepdims\n return _may_reduce_to_scalar(\n keepdims, axis,\n gen_math_ops._any(\n input_tensor, _ReductionDims(input_tensor, axis), keepdims,\n name=name))\n\n\n@tf_export(v1=[\"math.reduce_logsumexp\", \"reduce_logsumexp\"])\n@deprecation.deprecated_args(\n None, \"keep_dims is deprecated, use keepdims instead\", \"keep_dims\")\ndef reduce_logsumexp_v1(input_tensor,\n axis=None,\n keepdims=None,\n name=None,\n reduction_indices=None,\n keep_dims=None):\n \"\"\"Computes log(sum(exp(elements across dimensions of a tensor))).\n\n Reduces `input_tensor` along the dimensions given in `axis`.\n Unless `keepdims` is true, the rank of the tensor is reduced by 1 for each\n entry in `axis`. If `keepdims` is true, the reduced dimensions\n are retained with length 1.\n\n If `axis` has no entries, all dimensions are reduced, and a\n tensor with a single element is returned.\n\n This function is more numerically stable than log(sum(exp(input))). It avoids\n overflows caused by taking the exp of large inputs and underflows caused by\n taking the log of small inputs.\n\n For example:\n\n ```python\n x = tf.constant([[0., 0., 0.], [0., 0., 0.]])\n tf.reduce_logsumexp(x) # log(6)\n tf.reduce_logsumexp(x, 0) # [log(2), log(2), log(2)]\n tf.reduce_logsumexp(x, 1) # [log(3), log(3)]\n tf.reduce_logsumexp(x, 1, keepdims=True) # [[log(3)], [log(3)]]\n tf.reduce_logsumexp(x, [0, 1]) # log(6)\n ```\n\n Args:\n input_tensor: The tensor to reduce. Should have numeric type.\n axis: The dimensions to reduce. If `None` (the default), reduces all\n dimensions. Must be in the range `[-rank(input_tensor),\n rank(input_tensor))`.\n keepdims: If true, retains reduced dimensions with length 1.\n name: A name for the operation (optional).\n reduction_indices: The old (deprecated) name for axis.\n keep_dims: Deprecated alias for `keepdims`.\n\n Returns:\n The reduced tensor.\n \"\"\"\n axis = deprecation.deprecated_argument_lookup(\n \"axis\", axis, \"reduction_indices\", reduction_indices)\n keepdims = deprecation.deprecated_argument_lookup(\"keepdims\", keepdims,\n \"keep_dims\", keep_dims)\n return reduce_logsumexp(input_tensor, axis, keepdims, name)\n\n\n@tf_export(\"math.reduce_logsumexp\", \"reduce_logsumexp\", v1=[])\ndef reduce_logsumexp(input_tensor, axis=None, keepdims=False, name=None):\n \"\"\"Computes log(sum(exp(elements across dimensions of a tensor))).\n\n Reduces `input_tensor` along the dimensions given in `axis`.\n Unless `keepdims` is true, the rank of the tensor is reduced by 1 for each\n entry in `axis`. If `keepdims` is true, the reduced dimensions\n are retained with length 1.\n\n If `axis` has no entries, all dimensions are reduced, and a\n tensor with a single element is returned.\n\n This function is more numerically stable than log(sum(exp(input))). It avoids\n overflows caused by taking the exp of large inputs and underflows caused by\n taking the log of small inputs.\n\n For example:\n\n ```python\n x = tf.constant([[0., 0., 0.], [0., 0., 0.]])\n tf.reduce_logsumexp(x) # log(6)\n tf.reduce_logsumexp(x, 0) # [log(2), log(2), log(2)]\n tf.reduce_logsumexp(x, 1) # [log(3), log(3)]\n tf.reduce_logsumexp(x, 1, keepdims=True) # [[log(3)], [log(3)]]\n tf.reduce_logsumexp(x, [0, 1]) # log(6)\n ```\n\n Args:\n input_tensor: The tensor to reduce. Should have numeric type.\n axis: The dimensions to reduce. If `None` (the default), reduces all\n dimensions. Must be in the range `[-rank(input_tensor),\n rank(input_tensor))`.\n keepdims: If true, retains reduced dimensions with length 1.\n name: A name for the operation (optional).\n\n Returns:\n The reduced tensor.\n \"\"\"\n keepdims = False if keepdims is None else keepdims\n input_tensor = ops.convert_to_tensor(input_tensor)\n with ops.name_scope(name, \"ReduceLogSumExp\", [input_tensor]) as name:\n raw_max = reduce_max(\n input_tensor,\n axis=axis,\n keepdims=True)\n my_max = array_ops.stop_gradient(\n array_ops.where(\n gen_math_ops.is_finite(raw_max), raw_max,\n array_ops.zeros_like(raw_max)))\n result = gen_math_ops.log(\n reduce_sum(\n gen_math_ops.exp(gen_math_ops.sub(input_tensor, my_max)),\n axis,\n keepdims=keepdims))\n if not keepdims:\n my_max = array_ops.reshape(my_max, array_ops.shape(result))\n result = gen_math_ops.add(result, my_max)\n return _may_reduce_to_scalar(keepdims, axis, result)\n\n\n@tf_export(\"linalg.trace\", v1=[\"linalg.trace\", \"trace\"])\n@deprecation.deprecated_endpoints(\"trace\")\ndef trace(x, name=None):\n \"\"\"Compute the trace of a tensor `x`.\n\n `trace(x)` returns the sum along the main diagonal of each inner-most matrix\n in x. If x is of rank `k` with shape `[I, J, K, ..., L, M, N]`, then output\n is a tensor of rank `k-2` with dimensions `[I, J, K, ..., L]` where\n\n `output[i, j, k, ..., l] = trace(x[i, j, i, ..., l, :, :])`\n\n For example:\n\n ```python\n x = tf.constant([[1, 2], [3, 4]])\n tf.linalg.trace(x) # 5\n\n x = tf.constant([[1, 2, 3],\n [4, 5, 6],\n [7, 8, 9]])\n tf.linalg.trace(x) # 15\n\n x = tf.constant([[[1, 2, 3],\n [4, 5, 6],\n [7, 8, 9]],\n [[-1, -2, -3],\n [-4, -5, -6],\n [-7, -8, -9]]])\n tf.linalg.trace(x) # [15, -15]\n ```\n\n Args:\n x: tensor.\n name: A name for the operation (optional).\n\n Returns:\n The trace of input tensor.\n \"\"\"\n with ops.name_scope(name, \"Trace\", [x]) as name:\n x = ops.convert_to_tensor(x, name=\"x\")\n return reduce_sum(array_ops.matrix_diag_part(x), [-1], name=name)\n\n\n@tf_export(\"linalg.matmul\", \"matmul\")\ndef matmul(a,\n b,\n transpose_a=False,\n transpose_b=False,\n adjoint_a=False,\n adjoint_b=False,\n a_is_sparse=False,\n b_is_sparse=False,\n name=None):\n \"\"\"Multiplies matrix `a` by matrix `b`, producing `a` * `b`.\n\n The inputs must, following any transpositions, be tensors of rank >= 2\n where the inner 2 dimensions specify valid matrix multiplication arguments,\n and any further outer dimensions match.\n\n Both matrices must be of the same type. The supported types are:\n `float16`, `float32`, `float64`, `int32`, `complex64`, `complex128`.\n\n Either matrix can be transposed or adjointed (conjugated and transposed) on\n the fly by setting one of the corresponding flag to `True`. These are `False`\n by default.\n\n If one or both of the matrices contain a lot of zeros, a more efficient\n multiplication algorithm can be used by setting the corresponding\n `a_is_sparse` or `b_is_sparse` flag to `True`. These are `False` by default.\n This optimization is only available for plain matrices (rank-2 tensors) with\n datatypes `bfloat16` or `float32`.\n\n For example:\n\n ```python\n # 2-D tensor `a`\n # [[1, 2, 3],\n # [4, 5, 6]]\n a = tf.constant([1, 2, 3, 4, 5, 6], shape=[2, 3])\n\n # 2-D tensor `b`\n # [[ 7, 8],\n # [ 9, 10],\n # [11, 12]]\n b = tf.constant([7, 8, 9, 10, 11, 12], shape=[3, 2])\n\n # `a` * `b`\n # [[ 58, 64],\n # [139, 154]]\n c = tf.matmul(a, b)\n\n\n # 3-D tensor `a`\n # [[[ 1, 2, 3],\n # [ 4, 5, 6]],\n # [[ 7, 8, 9],\n # [10, 11, 12]]]\n a = tf.constant(np.arange(1, 13, dtype=np.int32),\n shape=[2, 2, 3])\n\n # 3-D tensor `b`\n # [[[13, 14],\n # [15, 16],\n # [17, 18]],\n # [[19, 20],\n # [21, 22],\n # [23, 24]]]\n b = tf.constant(np.arange(13, 25, dtype=np.int32),\n shape=[2, 3, 2])\n\n # `a` * `b`\n # [[[ 94, 100],\n # [229, 244]],\n # [[508, 532],\n # [697, 730]]]\n c = tf.matmul(a, b)\n\n # Since python >= 3.5 the @ operator is supported (see PEP 465).\n # In TensorFlow, it simply calls the `tf.matmul()` function, so the\n # following lines are equivalent:\n d = a @ b @ [[10.], [11.]]\n d = tf.matmul(tf.matmul(a, b), [[10.], [11.]])\n ```\n\n Args:\n a: `Tensor` of type `float16`, `float32`, `float64`, `int32`, `complex64`,\n `complex128` and rank > 1.\n b: `Tensor` with same type and rank as `a`.\n transpose_a: If `True`, `a` is transposed before multiplication.\n transpose_b: If `True`, `b` is transposed before multiplication.\n adjoint_a: If `True`, `a` is conjugated and transposed before\n multiplication.\n adjoint_b: If `True`, `b` is conjugated and transposed before\n multiplication.\n a_is_sparse: If `True`, `a` is treated as a sparse matrix.\n b_is_sparse: If `True`, `b` is treated as a sparse matrix.\n name: Name for the operation (optional).\n\n Returns:\n A `Tensor` of the same type as `a` and `b` where each inner-most matrix is\n the product of the corresponding matrices in `a` and `b`, e.g. if all\n transpose or adjoint attributes are `False`:\n\n `output`[..., i, j] = sum_k (`a`[..., i, k] * `b`[..., k, j]),\n for all indices i, j.\n\n Note: This is matrix product, not element-wise product.\n\n\n Raises:\n ValueError: If transpose_a and adjoint_a, or transpose_b and adjoint_b\n are both set to True.\n \"\"\"\n with ops.name_scope(name, \"MatMul\", [a, b]) as name:\n if transpose_a and adjoint_a:\n raise ValueError(\"Only one of transpose_a and adjoint_a can be True.\")\n if transpose_b and adjoint_b:\n raise ValueError(\"Only one of transpose_b and adjoint_b can be True.\")\n\n if context.executing_eagerly():\n if not isinstance(a, (ops.EagerTensor, _resource_variable_type)):\n a = ops.convert_to_tensor(a, name=\"a\")\n if not isinstance(b, (ops.EagerTensor, _resource_variable_type)):\n b = ops.convert_to_tensor(b, name=\"b\")\n else:\n a = ops.convert_to_tensor(a, name=\"a\")\n b = ops.convert_to_tensor(b, name=\"b\")\n\n # TODO(apassos) remove _shape_tuple here when it is not needed.\n a_shape = a._shape_tuple() # pylint: disable=protected-access\n b_shape = b._shape_tuple() # pylint: disable=protected-access\n if (not a_is_sparse and\n not b_is_sparse) and ((a_shape is None or len(a_shape) > 2) and\n (b_shape is None or len(b_shape) > 2)):\n # BatchMatmul does not support transpose, so we conjugate the matrix and\n # use adjoint instead. Conj() is a noop for real matrices.\n if transpose_a:\n a = conj(a)\n adjoint_a = True\n if transpose_b:\n b = conj(b)\n adjoint_b = True\n return gen_math_ops.batch_mat_mul(\n a, b, adj_x=adjoint_a, adj_y=adjoint_b, name=name)\n\n # Neither matmul nor sparse_matmul support adjoint, so we conjugate\n # the matrix and use transpose instead. Conj() is a noop for real\n # matrices.\n if adjoint_a:\n a = conj(a)\n transpose_a = True\n if adjoint_b:\n b = conj(b)\n transpose_b = True\n\n use_sparse_matmul = False\n if a_is_sparse or b_is_sparse:\n sparse_matmul_types = [dtypes.bfloat16, dtypes.float32]\n use_sparse_matmul = (\n a.dtype in sparse_matmul_types and b.dtype in sparse_matmul_types)\n if ((a.dtype == dtypes.bfloat16 or b.dtype == dtypes.bfloat16) and\n a.dtype != b.dtype):\n # matmul currently doesn't handle mixed-precision inputs.\n use_sparse_matmul = True\n if use_sparse_matmul:\n ret = sparse_matmul(\n a,\n b,\n transpose_a=transpose_a,\n transpose_b=transpose_b,\n a_is_sparse=a_is_sparse,\n b_is_sparse=b_is_sparse,\n name=name)\n # sparse_matmul always returns float32, even with\n # bfloat16 inputs. This prevents us from configuring bfloat16 training.\n # casting to bfloat16 also matches non-sparse matmul behavior better.\n if a.dtype == dtypes.bfloat16 and b.dtype == dtypes.bfloat16:\n ret = cast(ret, dtypes.bfloat16)\n return ret\n else:\n return gen_math_ops.mat_mul(\n a, b, transpose_a=transpose_a, transpose_b=transpose_b, name=name)\n\n\n@tf_export(\"linalg.matvec\")\ndef matvec(a,\n b,\n transpose_a=False,\n adjoint_a=False,\n a_is_sparse=False,\n b_is_sparse=False,\n name=None):\n \"\"\"Multiplies matrix `a` by vector `b`, producing `a` * `b`.\n\n The matrix `a` must, following any transpositions, be a tensor of rank >= 2,\n and we must have `shape(b) = shape(a)[:-2] + [shape(a)[-1]]`.\n\n Both `a` and `b` must be of the same type. The supported types are:\n `float16`, `float32`, `float64`, `int32`, `complex64`, `complex128`.\n\n Matrix `a` can be transposed or adjointed (conjugated and transposed) on\n the fly by setting one of the corresponding flag to `True`. These are `False`\n by default.\n\n If one or both of the inputs contain a lot of zeros, a more efficient\n multiplication algorithm can be used by setting the corresponding\n `a_is_sparse` or `b_is_sparse` flag to `True`. These are `False` by default.\n This optimization is only available for plain matrices/vectors (rank-2/1\n tensors) with datatypes `bfloat16` or `float32`.\n\n For example:\n\n ```python\n # 2-D tensor `a`\n # [[1, 2, 3],\n # [4, 5, 6]]\n a = tf.constant([1, 2, 3, 4, 5, 6], shape=[2, 3])\n\n # 1-D tensor `b`\n # [7, 9, 11]\n b = tf.constant([7, 9, 11], shape=[3])\n\n # `a` * `b`\n # [ 58, 64]\n c = tf.matvec(a, b)\n\n\n # 3-D tensor `a`\n # [[[ 1, 2, 3],\n # [ 4, 5, 6]],\n # [[ 7, 8, 9],\n # [10, 11, 12]]]\n a = tf.constant(np.arange(1, 13, dtype=np.int32),\n shape=[2, 2, 3])\n\n # 2-D tensor `b`\n # [[13, 14, 15],\n # [16, 17, 18]]\n b = tf.constant(np.arange(13, 19, dtype=np.int32),\n shape=[2, 3])\n\n # `a` * `b`\n # [[ 86, 212],\n # [410, 563]]\n c = tf.matvec(a, b)\n ```\n\n Args:\n a: `Tensor` of type `float16`, `float32`, `float64`, `int32`, `complex64`,\n `complex128` and rank > 1.\n b: `Tensor` with same type and rank = `rank(a) - 1`.\n transpose_a: If `True`, `a` is transposed before multiplication.\n adjoint_a: If `True`, `a` is conjugated and transposed before\n multiplication.\n a_is_sparse: If `True`, `a` is treated as a sparse matrix.\n b_is_sparse: If `True`, `b` is treated as a sparse matrix.\n name: Name for the operation (optional).\n\n Returns:\n A `Tensor` of the same type as `a` and `b` where each inner-most vector is\n the product of the corresponding matrices in `a` and vectors in `b`, e.g. if\n all transpose or adjoint attributes are `False`:\n\n `output`[..., i] = sum_k (`a`[..., i, k] * `b`[..., k]), for all indices i.\n\n Note: This is matrix-vector product, not element-wise product.\n\n\n Raises:\n ValueError: If transpose_a and adjoint_a are both set to True.\n \"\"\"\n with ops.name_scope(name, \"MatVec\", [a, b]) as name:\n output = matmul(\n a,\n array_ops.expand_dims(b, axis=-1),\n transpose_a=transpose_a,\n adjoint_a=adjoint_a,\n a_is_sparse=a_is_sparse,\n b_is_sparse=b_is_sparse)\n return array_ops.squeeze(output, axis=-1)\n\n\n_OverrideBinaryOperatorHelper(matmul, \"matmul\")\n\nsparse_matmul = deprecation.deprecated(None, \"Use `tf.linalg.matmul` instead\")(\n gen_math_ops.sparse_mat_mul)\ntf_export(v1=[\"sparse_matmul\"])(sparse_matmul)\n\n\n@ops.RegisterStatistics(\"MatMul\", \"flops\")\ndef _calc_mat_mul_flops(graph, node):\n \"\"\"Calculates the compute resources needed for MatMul.\"\"\"\n transpose_a = node.attr[\"transpose_a\"].b\n a_shape = graph_util.tensor_shape_from_node_def_name(graph, node.input[0])\n a_shape.assert_is_fully_defined()\n if transpose_a:\n k = int(a_shape[0])\n else:\n k = int(a_shape[1])\n output_shape = graph_util.tensor_shape_from_node_def_name(graph, node.name)\n output_shape.assert_is_fully_defined()\n output_count = np.prod(output_shape.as_list())\n return ops.OpStats(\"flops\", (k * output_count * 2))\n\n\ndef _as_indexed_slices(x, optimize=True):\n \"\"\"Convert 'x' to IndexedSlices.\n\n Convert a dense Tensor to a block-sparse IndexedSlices.\n\n Args:\n x: Either a Tensor object, or an IndexedSlices object.\n optimize: if true, attempt to optimize the conversion of 'x'.\n\n Returns:\n An IndexedSlices object.\n\n Raises:\n TypeError: If 'x' is not a Tensor or an IndexedSlices object.\n \"\"\"\n # TODO(touts): op_scope\n if not isinstance(x, (ops.Tensor, ops.IndexedSlices)):\n raise TypeError(\"Not a Tensor or IndexedSlices: %s\" % type(x))\n if isinstance(x, ops.IndexedSlices):\n return x\n x_shape = array_ops.shape_internal(x, optimize=optimize)\n return ops.IndexedSlices(x, range(0, x_shape[0]), x_shape)\n\n\ndef _as_indexed_slices_list(inputs, optimize=True):\n \"\"\"Convert all elements of 'inputs' to IndexedSlices.\n\n Additionally, homogenize the types of all the indices to\n either int32 or int64.\n\n Args:\n inputs: List containing either Tensor or IndexedSlices objects.\n optimize: if true, attempt to optimize the conversion of each input.\n\n Returns:\n A list of IndexedSlices objects.\n\n Raises:\n TypeError: If 'inputs' is not a list or a tuple.\n \"\"\"\n if not isinstance(inputs, (list, tuple)):\n raise TypeError(\"Expected a list or tuple, not a %s\" % type(inputs))\n outputs = [_as_indexed_slices(i, optimize=optimize) for i in inputs]\n with_int32_index = [\n o.indices for o in outputs if o.indices.dtype == dtypes.int32\n ]\n if not with_int32_index or len(with_int32_index) == len(outputs):\n return outputs\n casted_outputs = []\n for o in outputs:\n if o.indices.dtype == dtypes.int32:\n casted_outputs.append(\n ops.IndexedSlices(o.values, cast(o.indices, dtypes.int64),\n o.dense_shape))\n else:\n casted_outputs.append(o)\n return casted_outputs\n\n\n@tf_export(\"math.add_n\", \"add_n\")\n@dispatch.add_dispatch_support\ndef add_n(inputs, name=None):\n \"\"\"Adds all input tensors element-wise.\n\n Converts `IndexedSlices` objects into dense tensors prior to adding.\n\n Args:\n inputs: A list of `Tensor` or `IndexedSlices` objects, each with same shape\n and type.\n name: A name for the operation (optional).\n\n Returns:\n A `Tensor` of same shape and type as the elements of `inputs`.\n\n Raises:\n ValueError: If `inputs` don't all have same shape and dtype or the shape\n cannot be inferred.\n \"\"\"\n if not inputs or not isinstance(inputs, (list, tuple)):\n raise ValueError(\"inputs must be a list of at least one \"\n \"Tensor/IndexedSlices with the same dtype and shape\")\n inputs = ops.convert_n_to_tensor_or_indexed_slices(inputs)\n if not all(isinstance(x, (ops.Tensor, ops.IndexedSlices)) for x in inputs):\n raise ValueError(\"inputs must be a list of at least one \"\n \"Tensor/IndexedSlices with the same dtype and shape\")\n\n if len(inputs) == 1:\n if isinstance(inputs[0], ops.IndexedSlices):\n values = ops.convert_to_tensor(inputs[0])\n else:\n values = inputs[0]\n if name:\n return array_ops.identity(values, name=name)\n return values\n return gen_math_ops.add_n(inputs, name=name)\n\n\n@tf_export(\"math.accumulate_n\", v1=[\"math.accumulate_n\", \"accumulate_n\"])\n@deprecation.deprecated_endpoints(\"accumulate_n\")\ndef accumulate_n(inputs, shape=None, tensor_dtype=None, name=None):\n \"\"\"Returns the element-wise sum of a list of tensors.\n\n Optionally, pass `shape` and `tensor_dtype` for shape and type checking,\n otherwise, these are inferred.\n\n `tf.math.accumulate_n` performs the same operation as `tf.add_n`, but does not\n wait for all of its inputs to be ready before beginning to sum. This can\n save memory if inputs are ready at different times, since minimum temporary\n storage is proportional to the output size rather than the inputs size.\n\n `accumulate_n` is differentiable (but wasn't previous to TensorFlow 1.7).\n\n For example:\n\n ```python\n a = tf.constant([[1, 2], [3, 4]])\n b = tf.constant([[5, 0], [0, 6]])\n tf.math.accumulate_n([a, b, a]) # [[7, 4], [6, 14]]\n\n # Explicitly pass shape and type\n tf.math.accumulate_n([a, b, a], shape=[2, 2], tensor_dtype=tf.int32)\n # [[7, 4],\n # [6, 14]]\n ```\n\n Args:\n inputs: A list of `Tensor` objects, each with same shape and type.\n shape: Shape of elements of `inputs`.\n tensor_dtype: The type of `inputs`.\n name: A name for the operation (optional).\n\n Returns:\n A `Tensor` of same shape and type as the elements of `inputs`.\n\n Raises:\n ValueError: If `inputs` don't all have same shape and dtype or the shape\n cannot be inferred.\n \"\"\"\n\n def _input_error():\n return ValueError(\"inputs must be a list of at least one Tensor with the \"\n \"same dtype and shape\")\n\n if not inputs or not isinstance(inputs, (list, tuple)):\n raise _input_error()\n inputs = ops.convert_n_to_tensor_or_indexed_slices(inputs)\n if not all(isinstance(x, ops.Tensor) for x in inputs):\n raise _input_error()\n if not all(x.dtype == inputs[0].dtype for x in inputs):\n raise _input_error()\n if shape is not None:\n shape = tensor_shape.as_shape(shape)\n else:\n shape = tensor_shape.unknown_shape()\n for input_tensor in inputs:\n if isinstance(input_tensor, ops.Tensor):\n shape = shape.merge_with(input_tensor.get_shape())\n\n # tensor_dtype is for safety only; operator's output type computed in C++\n if tensor_dtype is not None and tensor_dtype != inputs[0].dtype:\n raise TypeError(\"tensor_dtype is {}, but input is of type {}\".format(\n tensor_dtype, inputs[0].dtype))\n\n if len(inputs) == 1 and name is None:\n return inputs[0]\n elif len(inputs) == 1 and name is not None:\n return array_ops.identity(inputs[0], name=name)\n elif context.executing_eagerly():\n # TemporaryVariable not currently supported in eager mode; fall back\n # onto AddN for now.\n # TODO(frreiss) remove this once the lifetime of eager variables gets\n # addressed\n return add_n(inputs, name=name)\n else:\n return gen_math_ops.accumulate_nv2(inputs, name=name, shape=shape) # pylint: disable=protected-access\n\n\n@ops.RegisterGradient(\"AccumulateNV2\")\ndef _accumulate_n_grad(op, grad):\n \"\"\"Same as gradient for AddN. Copies the gradient to all inputs.\"\"\"\n # Not broadcasting.\n return [grad] * len(op.inputs)\n\n\n@tf_export(\"math.sigmoid\", \"nn.sigmoid\", \"sigmoid\")\ndef sigmoid(x, name=None):\n \"\"\"Computes sigmoid of `x` element-wise.\n\n Specifically, `y = 1 / (1 + exp(-x))`.\n\n Args:\n x: A Tensor with type `float16`, `float32`, `float64`, `complex64`,\n or `complex128`.\n name: A name for the operation (optional).\n\n Returns:\n A Tensor with the same type as `x`.\n\n @compatibility(scipy)\n Equivalent to scipy.special.expit\n @end_compatibility\n \"\"\"\n with ops.name_scope(name, \"Sigmoid\", [x]) as name:\n x = ops.convert_to_tensor(x, name=\"x\")\n return gen_math_ops.sigmoid(x, name=name)\n\n\n@tf_export(\"math.log_sigmoid\", v1=[\"math.log_sigmoid\", \"log_sigmoid\"])\n@dispatch.add_dispatch_support\n@deprecation.deprecated_endpoints(\"log_sigmoid\")\ndef log_sigmoid(x, name=None):\n \"\"\"Computes log sigmoid of `x` element-wise.\n\n Specifically, `y = log(1 / (1 + exp(-x)))`. For numerical stability,\n we use `y = -tf.nn.softplus(-x)`.\n\n Args:\n x: A Tensor with type `float32` or `float64`.\n name: A name for the operation (optional).\n\n Returns:\n A Tensor with the same type as `x`.\n \"\"\"\n with ops.name_scope(name, \"LogSigmoid\", [x]) as name:\n x = ops.convert_to_tensor(x, name=\"x\")\n return gen_math_ops.neg(gen_nn_ops.softplus(-x), name=name)\n\n\n@tf_export(\"math.bincount\", v1=[])\ndef bincount(arr,\n weights=None,\n minlength=None,\n maxlength=None,\n dtype=dtypes.int32,\n name=None):\n \"\"\"Counts the number of occurrences of each value in an integer array.\n\n If `minlength` and `maxlength` are not given, returns a vector with length\n `tf.reduce_max(arr) + 1` if `arr` is non-empty, and length 0 otherwise.\n If `weights` are non-None, then index `i` of the output stores the sum of the\n value in `weights` at each index where the corresponding value in `arr` is\n `i`.\n\n Args:\n arr: An int32 tensor of non-negative values.\n weights: If non-None, must be the same shape as arr. For each value in\n `arr`, the bin will be incremented by the corresponding weight instead of\n 1.\n minlength: If given, ensures the output has length at least `minlength`,\n padding with zeros at the end if necessary.\n maxlength: If given, skips values in `arr` that are equal or greater than\n `maxlength`, ensuring that the output has length at most `maxlength`.\n dtype: If `weights` is None, determines the type of the output bins.\n name: A name scope for the associated operations (optional).\n\n Returns:\n A vector with the same dtype as `weights` or the given `dtype`. The bin\n values.\n \"\"\"\n name = \"bincount\" if name is None else name\n with ops.name_scope(name):\n arr = ops.convert_to_tensor(arr, name=\"arr\", dtype=dtypes.int32)\n array_is_nonempty = reduce_prod(array_ops.shape(arr)) > 0\n output_size = cast(array_is_nonempty, dtypes.int32) * (reduce_max(arr) + 1)\n if minlength is not None:\n minlength = ops.convert_to_tensor(\n minlength, name=\"minlength\", dtype=dtypes.int32)\n output_size = gen_math_ops.maximum(minlength, output_size)\n if maxlength is not None:\n maxlength = ops.convert_to_tensor(\n maxlength, name=\"maxlength\", dtype=dtypes.int32)\n output_size = gen_math_ops.minimum(maxlength, output_size)\n if weights is not None:\n weights = ops.convert_to_tensor(weights, name=\"weights\")\n return gen_math_ops.unsorted_segment_sum(weights, arr, output_size)\n weights = constant_op.constant([], dtype)\n return gen_math_ops.bincount(arr, output_size, weights)\n\n\n@tf_export(v1=[\"math.bincount\", \"bincount\"])\n@deprecation.deprecated_endpoints(\"bincount\")\ndef bincount_v1(arr,\n weights=None,\n minlength=None,\n maxlength=None,\n dtype=dtypes.int32):\n \"\"\"Counts the number of occurrences of each value in an integer array.\n\n If `minlength` and `maxlength` are not given, returns a vector with length\n `tf.reduce_max(arr) + 1` if `arr` is non-empty, and length 0 otherwise.\n If `weights` are non-None, then index `i` of the output stores the sum of the\n value in `weights` at each index where the corresponding value in `arr` is\n `i`.\n\n Args:\n arr: An int32 tensor of non-negative values.\n weights: If non-None, must be the same shape as arr. For each value in\n `arr`, the bin will be incremented by the corresponding weight instead of\n 1.\n minlength: If given, ensures the output has length at least `minlength`,\n padding with zeros at the end if necessary.\n maxlength: If given, skips values in `arr` that are equal or greater than\n `maxlength`, ensuring that the output has length at most `maxlength`.\n dtype: If `weights` is None, determines the type of the output bins.\n\n Returns:\n A vector with the same dtype as `weights` or the given `dtype`. The bin\n values.\n \"\"\"\n return bincount(arr, weights, minlength, maxlength, dtype)\n\n\n@tf_export(\"math.cumsum\", \"cumsum\")\ndef cumsum(x, axis=0, exclusive=False, reverse=False, name=None):\n \"\"\"Compute the cumulative sum of the tensor `x` along `axis`.\n\n By default, this op performs an inclusive cumsum, which means that the first\n element of the input is identical to the first element of the output:\n\n ```python\n tf.cumsum([a, b, c]) # [a, a + b, a + b + c]\n ```\n\n By setting the `exclusive` kwarg to `True`, an exclusive cumsum is performed\n instead:\n\n ```python\n tf.cumsum([a, b, c], exclusive=True) # [0, a, a + b]\n ```\n\n By setting the `reverse` kwarg to `True`, the cumsum is performed in the\n opposite direction:\n\n ```python\n tf.cumsum([a, b, c], reverse=True) # [a + b + c, b + c, c]\n ```\n\n This is more efficient than using separate `tf.reverse` ops.\n\n The `reverse` and `exclusive` kwargs can also be combined:\n\n ```python\n tf.cumsum([a, b, c], exclusive=True, reverse=True) # [b + c, c, 0]\n ```\n\n Args:\n x: A `Tensor`. Must be one of the following types: `float32`, `float64`,\n `int64`, `int32`, `uint8`, `uint16`, `int16`, `int8`, `complex64`,\n `complex128`, `qint8`, `quint8`, `qint32`, `half`.\n axis: A `Tensor` of type `int32` (default: 0). Must be in the range\n `[-rank(x), rank(x))`.\n exclusive: If `True`, perform exclusive cumsum.\n reverse: A `bool` (default: False).\n name: A name for the operation (optional).\n\n Returns:\n A `Tensor`. Has the same type as `x`.\n \"\"\"\n with ops.name_scope(name, \"Cumsum\", [x]) as name:\n x = ops.convert_to_tensor(x, name=\"x\")\n return gen_math_ops.cumsum(\n x, axis, exclusive=exclusive, reverse=reverse, name=name)\n\n\n@tf_export(\"math.cumprod\", v1=[\"math.cumprod\", \"cumprod\"])\n@deprecation.deprecated_endpoints(\"cumprod\")\ndef cumprod(x, axis=0, exclusive=False, reverse=False, name=None):\n \"\"\"Compute the cumulative product of the tensor `x` along `axis`.\n\n By default, this op performs an inclusive cumprod, which means that the\n first element of the input is identical to the first element of the output:\n\n ```python\n tf.math.cumprod([a, b, c]) # [a, a * b, a * b * c]\n ```\n\n By setting the `exclusive` kwarg to `True`, an exclusive cumprod is\n performed\n instead:\n\n ```python\n tf.math.cumprod([a, b, c], exclusive=True) # [1, a, a * b]\n ```\n\n By setting the `reverse` kwarg to `True`, the cumprod is performed in the\n opposite direction:\n\n ```python\n tf.math.cumprod([a, b, c], reverse=True) # [a * b * c, b * c, c]\n ```\n\n This is more efficient than using separate `tf.reverse` ops.\n The `reverse` and `exclusive` kwargs can also be combined:\n\n ```python\n tf.math.cumprod([a, b, c], exclusive=True, reverse=True) # [b * c, c, 1]\n ```\n\n Args:\n x: A `Tensor`. Must be one of the following types: `float32`, `float64`,\n `int64`, `int32`, `uint8`, `uint16`, `int16`, `int8`, `complex64`,\n `complex128`, `qint8`, `quint8`, `qint32`, `half`.\n axis: A `Tensor` of type `int32` (default: 0). Must be in the range\n `[-rank(x), rank(x))`.\n exclusive: If `True`, perform exclusive cumprod.\n reverse: A `bool` (default: False).\n name: A name for the operation (optional).\n\n Returns:\n A `Tensor`. Has the same type as `x`.\n \"\"\"\n with ops.name_scope(name, \"Cumprod\", [x]) as name:\n x = ops.convert_to_tensor(x, name=\"x\")\n return gen_math_ops.cumprod(\n x, axis, exclusive=exclusive, reverse=reverse, name=name)\n\n\n@tf_export(\"math.conj\", v1=[\"math.conj\", \"conj\"])\n@dispatch.add_dispatch_support\n@deprecation.deprecated_endpoints(\"conj\")\ndef conj(x, name=None):\n r\"\"\"Returns the complex conjugate of a complex number.\n\n Given a tensor `input` of complex numbers, this operation returns a tensor of\n complex numbers that are the complex conjugate of each element in `input`. The\n complex numbers in `input` must be of the form \\\\(a + bj\\\\), where *a* is the\n real part and *b* is the imaginary part.\n\n The complex conjugate returned by this operation is of the form \\\\(a - bj\\\\).\n\n For example:\n\n # tensor 'input' is [-2.25 + 4.75j, 3.25 + 5.75j]\n tf.math.conj(input) ==> [-2.25 - 4.75j, 3.25 - 5.75j]\n\n If `x` is real, it is returned unchanged.\n\n Args:\n x: `Tensor` to conjugate. Must have numeric or variant type.\n name: A name for the operation (optional).\n\n Returns:\n A `Tensor` that is the conjugate of `x` (with the same type).\n\n Raises:\n TypeError: If `x` is not a numeric tensor.\n \"\"\"\n if isinstance(x, ops.Tensor):\n dt = x.dtype\n if dt.is_floating or dt.is_integer:\n return x\n with ops.name_scope(name, \"Conj\", [x]) as name:\n x = ops.convert_to_tensor(x, name=\"x\")\n if x.dtype.is_complex or x.dtype == dtypes.variant:\n return gen_math_ops.conj(x, name=name)\n elif x.dtype.is_floating or x.dtype.is_integer:\n return x\n else:\n raise TypeError(\n \"Expected numeric or variant tensor, got dtype %r\" % x.dtype)\n\n\ndef _BroadcastShape(op):\n \"\"\"Common shape function for binary operators that broadcast their inputs.\"\"\"\n return [\n common_shapes.broadcast_shape(op.inputs[0].get_shape(),\n op.inputs[1].get_shape())\n ]\n\n\ndef reduced_shape(input_shape, axes):\n \"\"\"Helper function for reduction ops.\n\n Args:\n input_shape: 1-D Tensor, the shape of the Tensor being reduced.\n axes: 1-D Tensor, the reduction axes.\n Returns:\n A 1-D Tensor, the output shape as if keepdims were set to True.\n \"\"\"\n # Example:\n # cast needed for SparseTensor reductions\n if context.executing_eagerly():\n input_shape = input_shape.numpy()\n axes = axes.numpy()\n input_shape[axes] = 1\n return input_shape\n\n input_shape = cast(input_shape, dtypes.int32) # [2, 3, 5, 7]\n axes = cast(axes, dtypes.int32) # [1, 2]\n\n input_rank = array_ops.size(input_shape) # 4\n axes = (axes + input_rank) % input_rank\n axes_shape = array_ops.shape(axes) # [2]\n return gen_data_flow_ops.dynamic_stitch( # [2, 1, 1, 7]\n [\n range(input_rank), # [0, 1, 2, 3]\n axes\n ], # [1, 2]\n [\n input_shape, # [2, 3, 5, 7]\n array_ops.fill(axes_shape, 1)\n ]) # [1, 1]\n\n\ndef _unsorted_segment_N(data, segment_ids, num_segments):\n \"\"\" Helper function for unsorted_segment_mean/_sqrtN. Computes the number\n of segment entries with 0-entries set to 1 to allow division by N.\n \"\"\"\n # bincount doesn't support negative indices so we use unsorted_segment_sum\n segment_ids_shape = array_ops.shape_internal(segment_ids)\n ones_tensor = array_ops.ones(segment_ids_shape, dtype=data.dtype)\n N = gen_math_ops.unsorted_segment_sum(ones_tensor, segment_ids, num_segments)\n # add dimensions for all non-reduced axes\n ndims_output = data.shape.ndims - segment_ids.shape.ndims\n broadcast_shape = [num_segments] + [1] * ndims_output\n N = array_ops.reshape(N, broadcast_shape)\n return gen_math_ops.maximum(N, 1)\n\n\n@tf_export(\n \"math.unsorted_segment_mean\",\n v1=[\"math.unsorted_segment_mean\", \"unsorted_segment_mean\"])\n@deprecation.deprecated_endpoints(\"unsorted_segment_mean\")\n@dispatch.add_dispatch_support\ndef unsorted_segment_mean(data, segment_ids, num_segments, name=None):\n r\"\"\"Computes the mean along segments of a tensor.\n\n Read [the section on\n segmentation](https://tensorflow.org/api_guides/python/math_ops#segmentation)\n for an explanation of segments.\n\n This operator is similar to the unsorted segment sum operator found\n [here](../../../api_docs/python/math_ops.md#UnsortedSegmentSum).\n Instead of computing the sum over segments, it computes the mean of all\n entries belonging to a segment such that:\n\n \\\\(output_i = 1/N_i \\sum_{j...} data[j...]\\\\) where the sum is over tuples\n `j...` such that `segment_ids[j...] == i` with \\\\N_i\\\\ being the number of\n occurrences of id \\\\i\\\\.\n\n If there is no entry for a given segment ID `i`, it outputs 0.\n\n If the given segment ID `i` is negative, the value is dropped and will not\n be added to the sum of the segment.\n\n Args:\n data: A `Tensor` with floating point or complex dtype.\n segment_ids: An integer tensor whose shape is a prefix of `data.shape`.\n num_segments: An integer scalar `Tensor`. The number of distinct\n segment IDs.\n name: A name for the operation (optional).\n\n Returns:\n A `Tensor`. Has same shape as data, except for the first `segment_ids.rank`\n dimensions, which are replaced with a single dimension which has size\n `num_segments`.\n \"\"\"\n with ops.name_scope(name, \"UnsortedSegmentMean\"):\n data = ops.convert_to_tensor(data)\n segment_ids = ops.convert_to_tensor(segment_ids)\n N = _unsorted_segment_N(data, segment_ids, num_segments)\n summed = gen_math_ops.unsorted_segment_sum(data, segment_ids, num_segments)\n return summed / N\n\n\n@tf_export(\n \"math.unsorted_segment_sqrt_n\",\n v1=[\"math.unsorted_segment_sqrt_n\", \"unsorted_segment_sqrt_n\"])\n@deprecation.deprecated_endpoints(\"unsorted_segment_sqrt_n\")\n@dispatch.add_dispatch_support\ndef unsorted_segment_sqrt_n(data, segment_ids, num_segments, name=None):\n r\"\"\"Computes the sum along segments of a tensor divided by the sqrt(N).\n\n Read [the section on\n segmentation](https://tensorflow.org/api_guides/python/math_ops#segmentation)\n for an explanation of segments.\n\n This operator is similar to the unsorted segment sum operator found\n [here](../../../api_docs/python/math_ops.md#UnsortedSegmentSum).\n Additionally to computing the sum over segments, it divides the results by\n sqrt(N).\n\n \\\\(output_i = 1/sqrt(N_i) \\sum_{j...} data[j...]\\\\) where the sum is over\n tuples `j...` such that `segment_ids[j...] == i` with \\\\N_i\\\\ being the\n number of occurrences of id \\\\i\\\\.\n\n If there is no entry for a given segment ID `i`, it outputs 0.\n\n Note that this op only supports floating point and complex dtypes,\n due to tf.sqrt only supporting these types.\n\n If the given segment ID `i` is negative, the value is dropped and will not\n be added to the sum of the segment.\n\n Args:\n data: A `Tensor` with floating point or complex dtype.\n segment_ids: An integer tensor whose shape is a prefix of `data.shape`.\n num_segments: An integer scalar `Tensor`. The number of distinct\n segment IDs.\n name: A name for the operation (optional).\n\n Returns:\n A `Tensor`. Has same shape as data, except for the first `segment_ids.rank`\n dimensions, which are replaced with a single dimension which has size\n `num_segments`.\n \"\"\"\n with ops.name_scope(name, \"UnsortedSegmentSqrtN\"):\n data = ops.convert_to_tensor(data)\n segment_ids = ops.convert_to_tensor(segment_ids)\n N = _unsorted_segment_N(data, segment_ids, num_segments)\n summed = gen_math_ops.unsorted_segment_sum(data, segment_ids, num_segments)\n return summed / gen_math_ops.sqrt(N)\n\n\n@tf_export(v1=[\"sparse.segment_sum\", \"sparse_segment_sum\"])\n@deprecation.deprecated_endpoints(\"sparse_segment_sum\")\ndef sparse_segment_sum(data, indices, segment_ids, name=None,\n num_segments=None):\n r\"\"\"Computes the sum along sparse segments of a tensor.\n\n Read [the section on\n segmentation](https://tensorflow.org/api_guides/python/math_ops#Segmentation)\n for an explanation of segments.\n\n Like `SegmentSum`, but `segment_ids` can have rank less than `data`'s first\n dimension, selecting a subset of dimension 0, specified by `indices`.\n `segment_ids` is allowed to have missing ids, in which case the output will\n be zeros at those indices. In those cases `num_segments` is used to determine\n the size of the output.\n\n For example:\n\n ```python\n c = tf.constant([[1,2,3,4], [-1,-2,-3,-4], [5,6,7,8]])\n\n # Select two rows, one segment.\n tf.sparse.segment_sum(c, tf.constant([0, 1]), tf.constant([0, 0]))\n # => [[0 0 0 0]]\n\n # Select two rows, two segment.\n tf.sparse.segment_sum(c, tf.constant([0, 1]), tf.constant([0, 1]))\n # => [[ 1 2 3 4]\n # [-1 -2 -3 -4]]\n\n # With missing segment ids.\n tf.sparse.segment_sum(c, tf.constant([0, 1]), tf.constant([0, 2]),\n num_segments=4)\n # => [[ 1 2 3 4]\n # [ 0 0 0 0]\n # [-1 -2 -3 -4]\n # [ 0 0 0 0]]\n\n # Select all rows, two segments.\n tf.sparse.segment_sum(c, tf.constant([0, 1, 2]), tf.constant([0, 0, 1]))\n # => [[0 0 0 0]\n # [5 6 7 8]]\n\n # Which is equivalent to:\n tf.segment_sum(c, tf.constant([0, 0, 1]))\n ```\n\n Args:\n data: A `Tensor` with data that will be assembled in the output.\n indices: A 1-D `Tensor` with indices into `data`. Has same rank as\n `segment_ids`.\n segment_ids: A 1-D `Tensor` with indices into the output `Tensor`.\n Values should be sorted and can be repeated.\n name: A name for the operation (optional).\n num_segments: An optional int32 scalar. Indicates the size of the output\n `Tensor`.\n\n Returns:\n A `tensor` of the shape as data, except for dimension 0 which\n has size `k`, the number of segments specified via `num_segments` or\n inferred for the last element in `segments_ids`.\n \"\"\"\n if num_segments is not None:\n return gen_math_ops.sparse_segment_sum_with_num_segments(\n data=data,\n indices=indices,\n segment_ids=segment_ids,\n num_segments=num_segments,\n name=name)\n else:\n return gen_math_ops.sparse_segment_sum(\n data=data, indices=indices, segment_ids=segment_ids, name=name)\n\n\n@tf_export(\"sparse.segment_sum\", v1=[])\ndef sparse_segment_sum_v2(data,\n indices,\n segment_ids,\n num_segments=None,\n name=None):\n return sparse_segment_mean(\n data, indices, segment_ids, name=name, num_segments=num_segments)\n\n\n@tf_export(v1=[\"sparse.segment_mean\", \"sparse_segment_mean\"])\n@deprecation.deprecated_endpoints(\"sparse_segment_mean\")\ndef sparse_segment_mean(data,\n indices,\n segment_ids,\n name=None,\n num_segments=None):\n r\"\"\"Computes the mean along sparse segments of a tensor.\n\n Read [the section on\n segmentation](https://tensorflow.org/api_guides/python/math_ops#Segmentation)\n for an explanation of segments.\n\n Like `SegmentMean`, but `segment_ids` can have rank less than `data`'s first\n dimension, selecting a subset of dimension 0, specified by `indices`.\n `segment_ids` is allowed to have missing ids, in which case the output will\n be zeros at those indices. In those cases `num_segments` is used to determine\n the size of the output.\n\n Args:\n data: A `Tensor` with data that will be assembled in the output.\n indices: A 1-D `Tensor` with indices into `data`. Has same rank as\n `segment_ids`.\n segment_ids: A 1-D `Tensor` with indices into the output `Tensor`.\n Values should be sorted and can be repeated.\n name: A name for the operation (optional).\n num_segments: An optional int32 scalar. Indicates the size of the output\n `Tensor`.\n\n Returns:\n A `tensor` of the shape as data, except for dimension 0 which\n has size `k`, the number of segments specified via `num_segments` or\n inferred for the last element in `segments_ids`.\n \"\"\"\n if num_segments is not None:\n return gen_math_ops.sparse_segment_mean_with_num_segments(\n data=data,\n indices=indices,\n segment_ids=segment_ids,\n num_segments=num_segments,\n name=name)\n else:\n return gen_math_ops.sparse_segment_mean(\n data=data, indices=indices, segment_ids=segment_ids, name=name)\n\n\n@tf_export(\"sparse.segment_mean\", v1=[])\ndef sparse_segment_mean_v2(data,\n indices,\n segment_ids,\n num_segments=None,\n name=None):\n r\"\"\"Computes the mean along sparse segments of a tensor.\n\n Read [the section on\n segmentation](https://tensorflow.org/api_guides/python/math_ops#Segmentation)\n for an explanation of segments.\n\n Like `SegmentMean`, but `segment_ids` can have rank less than `data`'s first\n dimension, selecting a subset of dimension 0, specified by `indices`.\n `segment_ids` is allowed to have missing ids, in which case the output will\n be zeros at those indices. In those cases `num_segments` is used to determine\n the size of the output.\n\n Args:\n data: A `Tensor` with data that will be assembled in the output.\n indices: A 1-D `Tensor` with indices into `data`. Has same rank as\n `segment_ids`.\n segment_ids: A 1-D `Tensor` with indices into the output `Tensor`. Values\n should be sorted and can be repeated.\n num_segments: An optional int32 scalar. Indicates the size of the output\n `Tensor`.\n name: A name for the operation (optional).\n\n Returns:\n A `tensor` of the shape as data, except for dimension 0 which\n has size `k`, the number of segments specified via `num_segments` or\n inferred for the last element in `segments_ids`.\n \"\"\"\n return sparse_segment_mean(\n data, indices, segment_ids, name=name, num_segments=num_segments)\n\n\n@tf_export(v1=[\"sparse.segment_sqrt_n\", \"sparse_segment_sqrt_n\"])\n@deprecation.deprecated_endpoints(\"sparse_segment_sqrt_n\")\ndef sparse_segment_sqrt_n(data,\n indices,\n segment_ids,\n name=None,\n num_segments=None):\n r\"\"\"Computes the sum along sparse segments of a tensor divided by the sqrt(N).\n\n `N` is the size of the segment being reduced.\n\n Args:\n data: A `Tensor` with data that will be assembled in the output.\n indices: A 1-D `Tensor` with indices into `data`. Has same rank as\n `segment_ids`.\n segment_ids: A 1-D `Tensor` with indices into the output `Tensor`.\n Values should be sorted and can be repeated.\n name: A name for the operation (optional).\n num_segments: An optional int32 scalar. Indicates the size of the output\n `Tensor`.\n\n Returns:\n A `tensor` of the shape as data, except for dimension 0 which\n has size `k`, the number of segments specified via `num_segments` or\n inferred for the last element in `segments_ids`.\n \"\"\"\n if num_segments is not None:\n return gen_math_ops.sparse_segment_sqrt_n_with_num_segments(\n data=data,\n indices=indices,\n segment_ids=segment_ids,\n num_segments=num_segments,\n name=name)\n else:\n return gen_math_ops.sparse_segment_sqrt_n(\n data=data, indices=indices, segment_ids=segment_ids, name=name)\n\n\n@tf_export(\"sparse.segment_sqrt_n\", v1=[])\ndef sparse_segment_sqrt_n_v2(data,\n indices,\n segment_ids,\n num_segments=None,\n name=None):\n r\"\"\"Computes the sum along sparse segments of a tensor divided by the sqrt(N).\n\n `N` is the size of the segment being reduced.\n\n Args:\n data: A `Tensor` with data that will be assembled in the output.\n indices: A 1-D `Tensor` with indices into `data`. Has same rank as\n `segment_ids`.\n segment_ids: A 1-D `Tensor` with indices into the output `Tensor`. Values\n should be sorted and can be repeated.\n num_segments: An optional int32 scalar. Indicates the size of the output\n `Tensor`.\n name: A name for the operation (optional).\n\n Returns:\n A `tensor` of the shape as data, except for dimension 0 which\n has size `k`, the number of segments specified via `num_segments` or\n inferred for the last element in `segments_ids`.\n \"\"\"\n return sparse_segment_sqrt_n(\n data, indices, segment_ids, name=name, num_segments=num_segments)\n\n\n@tf_export(\"tensordot\", \"linalg.tensordot\")\ndef tensordot(a, b, axes, name=None):\n r\"\"\"Tensor contraction of a and b along specified axes.\n\n Tensordot (also known as tensor contraction) sums the product of elements\n from `a` and `b` over the indices specified by `a_axes` and `b_axes`.\n The lists `a_axes` and `b_axes` specify those pairs of axes along which to\n contract the tensors. The axis `a_axes[i]` of `a` must have the same dimension\n as axis `b_axes[i]` of `b` for all `i` in `range(0, len(a_axes))`. The lists\n `a_axes` and `b_axes` must have identical length and consist of unique\n integers that specify valid axes for each of the tensors.\n\n This operation corresponds to `numpy.tensordot(a, b, axes)`.\n\n Example 1: When `a` and `b` are matrices (order 2), the case `axes = 1`\n is equivalent to matrix multiplication.\n\n Example 2: When `a` and `b` are matrices (order 2), the case\n `axes = [[1], [0]]` is equivalent to matrix multiplication.\n\n Example 3: Suppose that \\\\(a_{ijk}\\\\) and \\\\(b_{lmn}\\\\) represent two\n tensors of order 3. Then, `contract(a, b, [[0], [2]])` is the order 4 tensor\n \\\\(c_{jklm}\\\\) whose entry\n corresponding to the indices \\\\((j,k,l,m)\\\\) is given by:\n\n \\\\( c_{jklm} = \\sum_i a_{ijk} b_{lmi} \\\\).\n\n In general, `order(c) = order(a) + order(b) - 2*len(axes[0])`.\n\n Args:\n a: `Tensor` of type `float32` or `float64`.\n b: `Tensor` with the same type as `a`.\n axes: Either a scalar `N`, or a list or an `int32` `Tensor` of shape [2, k].\n If axes is a scalar, sum over the last N axes of a and the first N axes of\n b in order. If axes is a list or `Tensor` the first and second row contain\n the set of unique integers specifying axes along which the contraction is\n computed, for `a` and `b`, respectively. The number of axes for `a` and\n `b` must be equal.\n name: A name for the operation (optional).\n\n Returns:\n A `Tensor` with the same type as `a`.\n\n Raises:\n ValueError: If the shapes of `a`, `b`, and `axes` are incompatible.\n IndexError: If the values in axes exceed the rank of the corresponding\n tensor.\n \"\"\"\n\n def _tensordot_reshape(a, axes, flipped=False):\n \"\"\"Helper method to perform transpose and reshape for contraction op.\n\n This method is helpful in reducing `math_ops.tensordot` to `math_ops.matmul`\n using `array_ops.transpose` and `array_ops.reshape`. The method takes a\n tensor and performs the correct transpose and reshape operation for a given\n set of indices. It returns the reshaped tensor as well as a list of indices\n necessary to reshape the tensor again after matrix multiplication.\n\n Args:\n a: `Tensor`.\n axes: List or `int32` `Tensor` of unique indices specifying valid axes of\n `a`.\n flipped: An optional `bool`. Defaults to `False`. If `True`, the method\n assumes that `a` is the second argument in the contraction operation.\n\n Returns:\n A tuple `(reshaped_a, free_dims, free_dims_static)` where `reshaped_a` is\n the tensor `a` reshaped to allow contraction via `matmul`, `free_dims` is\n either a list of integers or an `int32` `Tensor`, depending on whether\n the shape of a is fully specified, and free_dims_static is either a list\n of integers and None values, or None, representing the inferred\n static shape of the free dimensions\n \"\"\"\n if a.get_shape().is_fully_defined() and isinstance(axes, (list, tuple)):\n shape_a = a.get_shape().as_list()\n axes = [i if i >= 0 else i + len(shape_a) for i in axes]\n free = [i for i in xrange(len(shape_a)) if i not in axes]\n free_dims = [shape_a[i] for i in free]\n prod_free = int(np.prod([shape_a[i] for i in free]))\n prod_axes = int(np.prod([shape_a[i] for i in axes]))\n perm = list(axes) + free if flipped else free + list(axes)\n new_shape = [prod_axes, prod_free] if flipped else [prod_free, prod_axes]\n reshaped_a = array_ops.reshape(array_ops.transpose(a, perm), new_shape)\n return reshaped_a, free_dims, free_dims\n else:\n if a.get_shape().ndims is not None and isinstance(axes, (list, tuple)):\n shape_a = a.get_shape().as_list()\n axes = [i if i >= 0 else i + len(shape_a) for i in axes]\n free = [i for i in xrange(len(shape_a)) if i not in axes]\n axes_dims = [shape_a[i] for i in axes]\n free_dims = [shape_a[i] for i in free]\n free_dims_static = free_dims\n axes = ops.convert_to_tensor(axes, dtype=dtypes.int32, name=\"axes\")\n free = ops.convert_to_tensor(free, dtype=dtypes.int32, name=\"free\")\n shape_a = array_ops.shape(a)\n else:\n free_dims_static = None\n shape_a = array_ops.shape(a)\n rank_a = array_ops.rank(a)\n axes = ops.convert_to_tensor(axes, dtype=dtypes.int32, name=\"axes\")\n axes = array_ops.where(axes >= 0, axes, axes + rank_a)\n free, _ = array_ops.setdiff1d(range(rank_a), axes)\n free_dims = array_ops.gather(shape_a, free)\n axes_dims = array_ops.gather(shape_a, axes)\n prod_free_dims = reduce_prod(free_dims)\n prod_axes_dims = reduce_prod(axes_dims)\n if flipped:\n perm = array_ops.concat([axes, free], 0)\n new_shape = array_ops.stack([prod_axes_dims, prod_free_dims])\n else:\n perm = array_ops.concat([free, axes], 0)\n new_shape = array_ops.stack([prod_free_dims, prod_axes_dims])\n reshaped_a = array_ops.reshape(array_ops.transpose(a, perm), new_shape)\n return reshaped_a, free_dims, free_dims_static\n\n def _tensordot_axes(a, axes):\n \"\"\"Generates two sets of contraction axes for the two tensor arguments.\"\"\"\n a_shape = a.get_shape()\n if isinstance(axes, compat.integral_types):\n if axes < 0:\n raise ValueError(\"'axes' must be at least 0.\")\n if a_shape.ndims is not None:\n if axes > a_shape.ndims:\n raise ValueError(\"'axes' must not be larger than the number of \"\n \"dimensions of tensor %s.\" % a)\n return (list(xrange(a_shape.ndims - axes, a_shape.ndims)),\n list(xrange(axes)))\n else:\n rank = array_ops.rank(a)\n return (range(rank - axes, rank, dtype=dtypes.int32),\n range(axes, dtype=dtypes.int32))\n elif isinstance(axes, (list, tuple)):\n if len(axes) != 2:\n raise ValueError(\"'axes' must be an integer or have length 2.\")\n a_axes = axes[0]\n b_axes = axes[1]\n if isinstance(a_axes, compat.integral_types) and \\\n isinstance(b_axes, compat.integral_types):\n a_axes = [a_axes]\n b_axes = [b_axes]\n if len(a_axes) != len(b_axes):\n raise ValueError(\n \"Different number of contraction axes 'a' and 'b', %s != %s.\" %\n (len(a_axes), len(b_axes)))\n return a_axes, b_axes\n else:\n axes = ops.convert_to_tensor(axes, name=\"axes\", dtype=dtypes.int32)\n return axes[0], axes[1]\n\n with ops.name_scope(name, \"Tensordot\", [a, b, axes]) as name:\n a = ops.convert_to_tensor(a, name=\"a\")\n b = ops.convert_to_tensor(b, name=\"b\")\n a_axes, b_axes = _tensordot_axes(a, axes)\n a_reshape, a_free_dims, a_free_dims_static = _tensordot_reshape(a, a_axes)\n b_reshape, b_free_dims, b_free_dims_static = _tensordot_reshape(\n b, b_axes, True)\n ab_matmul = matmul(a_reshape, b_reshape)\n if isinstance(a_free_dims, list) and isinstance(b_free_dims, list):\n return array_ops.reshape(ab_matmul, a_free_dims + b_free_dims, name=name)\n else:\n a_free_dims = ops.convert_to_tensor(a_free_dims, dtype=dtypes.int32)\n b_free_dims = ops.convert_to_tensor(b_free_dims, dtype=dtypes.int32)\n product = array_ops.reshape(\n ab_matmul, array_ops.concat([a_free_dims, b_free_dims], 0), name=name)\n if a_free_dims_static is not None and b_free_dims_static is not None:\n product.set_shape(a_free_dims_static + b_free_dims_static)\n return product\n\n\n@tf_export(\"math.polyval\")\ndef polyval(coeffs, x, name=None):\n r\"\"\"Computes the elementwise value of a polynomial.\n\n If `x` is a tensor and `coeffs` is a list n + 1 tensors, this function returns\n the value of the n-th order polynomial\n\n p(x) = coeffs[n-1] + coeffs[n-2] * x + ... + coeffs[0] * x**(n-1)\n\n evaluated using Horner's method, i.e.\n\n p(x) = coeffs[n-1] + x * (coeffs[n-2] + ... + x * (coeffs[1] +\n x * coeffs[0]))\n\n Args:\n coeffs: A list of `Tensor` representing the coefficients of the polynomial.\n x: A `Tensor` representing the variable of the polynomial.\n name: A name for the operation (optional).\n\n Returns:\n A `tensor` of the shape as the expression p(x) with usual broadcasting rules\n for element-wise addition and multiplication applied.\n\n @compatibility(numpy)\n Equivalent to numpy.polyval.\n @end_compatibility\n \"\"\"\n\n with ops.name_scope(name, \"polyval\", nest.flatten(coeffs) + [x]) as name:\n x = ops.convert_to_tensor(x, name=\"x\")\n if len(coeffs) < 1:\n return array_ops.zeros_like(x, name=name)\n coeffs = [\n ops.convert_to_tensor(coeff, name=(\"coeff_%d\" % index))\n for index, coeff in enumerate(coeffs)\n ]\n p = coeffs[0]\n for c in coeffs[1:]:\n p = c + p * x\n return p\n" ]
[ [ "tensorflow.python.ops.array_ops.identity", "tensorflow.python.ops.array_ops.ones", "tensorflow.python.ops.gen_math_ops.batch_mat_mul", "tensorflow.python.ops.gen_math_ops.cumsum", "tensorflow.python.ops.gen_math_ops._abs", "tensorflow.python.ops.gen_math_ops.angle", "numpy.prod", "tensorflow.python.ops.gen_math_ops.sub", "tensorflow.python.ops.gen_math_ops.sparse_segment_sum", "tensorflow.python.ops.array_ops.rank", "tensorflow.python.ops.array_ops.gather", "tensorflow.python.framework.ops.convert_n_to_tensor_or_indexed_slices", "tensorflow.python.ops.array_ops.matrix_diag_part", "tensorflow.python.framework.tensor_shape.unknown_shape", "tensorflow.python.ops.array_ops.zeros_like", "tensorflow.python.ops.array_ops.transpose", "tensorflow.python.ops.gen_math_ops.sparse_segment_sqrt_n_with_num_segments", "tensorflow.python.ops.array_ops.shape", "tensorflow.python.framework.common_shapes.rank", "tensorflow.python.util.tf_export.tf_export", "tensorflow.python.ops.gen_math_ops._pow", "tensorflow.python.ops.gen_math_ops.round", "tensorflow.python.ops.gen_math_ops.complex_abs", "tensorflow.python.ops.gen_math_ops.minimum", "tensorflow.python.ops.array_ops.expand_dims", "numpy.arange", "tensorflow.python.util.deprecation.deprecated", "tensorflow.python.ops.gen_math_ops.arg_max", "tensorflow.python.framework.ops.Tensor._override_operator", "tensorflow.python.ops.gen_math_ops.sub.__doc__.replace", "tensorflow.python.framework.dtypes.as_dtype", "tensorflow.python.ops.gen_math_ops.unsorted_segment_sum", "tensorflow.python.ops.gen_math_ops.mul", "tensorflow.python.ops.gen_math_ops.sparse_segment_sum_with_num_segments", "tensorflow.python.ops.gen_math_ops.bincount", "tensorflow.python.framework.constant_op.constant", "tensorflow.python.ops.array_ops.size", "tensorflow.python.ops.array_ops.reshape", "tensorflow.python.ops.gen_math_ops.not_equal", "tensorflow.python.ops.gen_math_ops.square", "tensorflow.python.ops.array_ops.where", "tensorflow.python.ops.gen_math_ops.sqrt", "tensorflow.python.ops.array_ops.stack", "tensorflow.python.framework.ops.OpStats", "tensorflow.python.ops.gen_nn_ops.softplus", "tensorflow.python.ops.gen_math_ops.bucketize", "tensorflow.python.ops.gen_sparse_ops.sparse_dense_cwise_div", "tensorflow.python.ops.gen_math_ops.sparse_segment_sqrt_n", "tensorflow.python.ops.gen_math_ops.sparse_segment_mean_with_num_segments", "tensorflow.python.ops.gen_math_ops.arg_min", "tensorflow.python.ops.array_ops.fill", "tensorflow.python.ops.gen_math_ops.div_no_nan", "tensorflow.python.ops.gen_math_ops.floor_div", "tensorflow.python.ops.gen_math_ops.real", "tensorflow.python.ops.array_ops.squeeze", "tensorflow.python.ops.array_ops.zeros", "tensorflow.python.ops.gen_math_ops.real_div", "tensorflow.python.ops.gen_math_ops.add", "tensorflow.python.ops.gen_math_ops.add_n", "tensorflow.python.ops.gen_math_ops.cast", "tensorflow.python.framework.ops.RegisterGradient", "tensorflow.python.util.deprecation.deprecated_args", "tensorflow.python.ops.gen_math_ops.arg_min.__doc__.replace", "tensorflow.python.ops.gen_math_ops.imag", "tensorflow.python.ops.gen_math_ops.sparse_segment_mean", "tensorflow.python.ops.gen_math_ops.accumulate_nv2", "tensorflow.python.util.nest.flatten", "tensorflow.python.framework.graph_util.tensor_shape_from_node_def_name", "tensorflow.python.eager.context.executing_eagerly", "tensorflow.python.ops.gen_math_ops._range", "tensorflow.python.platform.tf_logging.warn", "tensorflow.python.framework.ops.IndexedSlices", "tensorflow.python.ops.gen_math_ops.is_finite", "tensorflow.python.ops.array_ops.shape_internal", "tensorflow.python.ops.gen_math_ops.cumprod", "tensorflow.python.ops.array_ops.concat", "tensorflow.python.ops.gen_math_ops.conj", "tensorflow.python.framework.sparse_tensor.SparseTensor", "tensorflow.python.ops.gen_math_ops.mul.__doc__.replace", "tensorflow.python.ops.gen_math_ops.sigmoid", "tensorflow.python.ops.gen_math_ops._complex", "tensorflow.python.framework.common_shapes.has_fully_defined_shape", "tensorflow.python.framework.ops.convert_to_tensor_v2", "tensorflow.python.ops.gen_math_ops.maximum", "tensorflow.python.ops.gen_math_ops.logical_or", "tensorflow.python.ops.gen_math_ops.mat_mul", "tensorflow.python.util.deprecation.deprecated_argument_lookup", "tensorflow.python.ops.gen_math_ops.logical_and", "tensorflow.python.framework.ops.RegisterStatistics", "tensorflow.python.framework.ops.convert_to_tensor", "tensorflow.python.util.deprecation.deprecated_endpoints", "tensorflow.python.ops.gen_sparse_ops.sparse_dense_cwise_mul", "tensorflow.python.framework.tensor_shape.as_shape", "tensorflow.python.framework.ops.name_scope", "tensorflow.python.ops.gen_math_ops.arg_max.__doc__.replace" ] ]
anathnathphy67/geopandas
[ "07a8f4cc7ef3eac0a2f2d272feb95a0887077072" ]
[ "geopandas/tools/sjoin.py" ]
[ "import numpy as np\nimport pandas as pd\n\nfrom shapely import prepared\n\nfrom geopandas import GeoDataFrame\nfrom geopandas import _compat as compat\nfrom geopandas.array import _check_crs, _crs_mismatch_warn\n\n\ndef sjoin(\n left_df, right_df, how=\"inner\", op=\"intersects\", lsuffix=\"left\", rsuffix=\"right\"\n):\n \"\"\"Spatial join of two GeoDataFrames.\n\n Parameters\n ----------\n left_df, right_df : GeoDataFrames\n how : string, default 'inner'\n The type of join:\n\n * 'left': use keys from left_df; retain only left_df geometry column\n * 'right': use keys from right_df; retain only right_df geometry column\n * 'inner': use intersection of keys from both dfs; retain only\n left_df geometry column\n op : string, default 'intersects'\n Binary predicate, one of {'intersects', 'contains', 'within'}.\n See http://shapely.readthedocs.io/en/latest/manual.html#binary-predicates.\n lsuffix : string, default 'left'\n Suffix to apply to overlapping column names (left GeoDataFrame).\n rsuffix : string, default 'right'\n Suffix to apply to overlapping column names (right GeoDataFrame).\n\n \"\"\"\n if not isinstance(left_df, GeoDataFrame):\n raise ValueError(\n \"'left_df' should be GeoDataFrame, got {}\".format(type(left_df))\n )\n\n if not isinstance(right_df, GeoDataFrame):\n raise ValueError(\n \"'right_df' should be GeoDataFrame, got {}\".format(type(right_df))\n )\n\n allowed_hows = [\"left\", \"right\", \"inner\"]\n if how not in allowed_hows:\n raise ValueError(\n '`how` was \"%s\" but is expected to be in %s' % (how, allowed_hows)\n )\n\n allowed_ops = [\"contains\", \"within\", \"intersects\"]\n if op not in allowed_ops:\n raise ValueError(\n '`op` was \"%s\" but is expected to be in %s' % (op, allowed_ops)\n )\n\n if not _check_crs(left_df, right_df):\n _crs_mismatch_warn(left_df, right_df, stacklevel=3)\n\n index_left = \"index_%s\" % lsuffix\n index_right = \"index_%s\" % rsuffix\n\n # due to GH 352\n if any(left_df.columns.isin([index_left, index_right])) or any(\n right_df.columns.isin([index_left, index_right])\n ):\n raise ValueError(\n \"'{0}' and '{1}' cannot be names in the frames being\"\n \" joined\".format(index_left, index_right)\n )\n\n # Attempt to re-use spatial indexes, otherwise generate the spatial index\n # for the longer dataframe. If we are joining to an empty dataframe,\n # don't bother generating the index.\n if right_df._sindex_generated or (\n not left_df._sindex_generated and right_df.shape[0] > left_df.shape[0]\n ):\n tree_idx = right_df.sindex if len(left_df) > 0 else None\n tree_idx_right = True\n else:\n tree_idx = left_df.sindex if len(right_df) > 0 else None\n tree_idx_right = False\n\n # the rtree spatial index only allows limited (numeric) index types, but an\n # index in geopandas may be any arbitrary dtype. so reset both indices now\n # and store references to the original indices, to be reaffixed later.\n # GH 352\n left_df = left_df.copy(deep=True)\n try:\n left_index_name = left_df.index.name\n left_df.index = left_df.index.rename(index_left)\n except TypeError:\n index_left = [\n \"index_%s\" % lsuffix + str(pos)\n for pos, ix in enumerate(left_df.index.names)\n ]\n left_index_name = left_df.index.names\n left_df.index = left_df.index.rename(index_left)\n left_df = left_df.reset_index()\n\n right_df = right_df.copy(deep=True)\n try:\n right_index_name = right_df.index.name\n right_df.index = right_df.index.rename(index_right)\n except TypeError:\n index_right = [\n \"index_%s\" % rsuffix + str(pos)\n for pos, ix in enumerate(right_df.index.names)\n ]\n right_index_name = right_df.index.names\n right_df.index = right_df.index.rename(index_right)\n right_df = right_df.reset_index()\n\n if op == \"within\":\n # within implemented as the inverse of contains; swap names\n left_df, right_df = right_df, left_df\n tree_idx_right = not tree_idx_right\n\n r_idx = np.empty((0, 0))\n l_idx = np.empty((0, 0))\n # get rtree spatial index. If tree_idx does not exist, it is due to either a\n # failure to generate the index (e.g., if the column is empty), or the\n # other dataframe is empty so it wasn't necessary to generate it.\n if tree_idx_right and tree_idx:\n idxmatch = left_df.geometry.apply(lambda x: x.bounds).apply(\n lambda x: list(tree_idx.intersection(x)) if not x == () else []\n )\n idxmatch = idxmatch[idxmatch.apply(len) > 0]\n # indexes of overlapping boundaries\n if idxmatch.shape[0] > 0:\n r_idx = np.concatenate(idxmatch.values)\n l_idx = np.concatenate([[i] * len(v) for i, v in idxmatch.iteritems()])\n elif not tree_idx_right and tree_idx:\n # tree_idx_df == 'left'\n idxmatch = right_df.geometry.apply(lambda x: x.bounds).apply(\n lambda x: list(tree_idx.intersection(x)) if not x == () else []\n )\n idxmatch = idxmatch[idxmatch.apply(len) > 0]\n if idxmatch.shape[0] > 0:\n # indexes of overlapping boundaries\n l_idx = np.concatenate(idxmatch.values)\n r_idx = np.concatenate([[i] * len(v) for i, v in idxmatch.iteritems()])\n\n if len(r_idx) > 0 and len(l_idx) > 0:\n if compat.USE_PYGEOS:\n import pygeos\n\n predicate_d = {\n \"intersects\": pygeos.intersects,\n \"contains\": pygeos.contains,\n \"within\": pygeos.contains,\n }\n check_predicates = predicate_d[op]\n else:\n # Vectorize predicate operations\n def find_intersects(a1, a2):\n return a1.intersects(a2)\n\n def find_contains(a1, a2):\n return a1.contains(a2)\n\n predicate_d = {\n \"intersects\": find_intersects,\n \"contains\": find_contains,\n \"within\": find_contains,\n }\n\n check_predicates = np.vectorize(predicate_d[op])\n\n if compat.USE_PYGEOS:\n res = check_predicates(\n left_df.geometry[l_idx].values.data,\n right_df[right_df.geometry.name][r_idx].values.data,\n )\n else:\n res = check_predicates(\n left_df.geometry.apply(lambda x: prepared.prep(x))[l_idx],\n right_df[right_df.geometry.name][r_idx],\n )\n\n result = pd.DataFrame(np.column_stack([l_idx, r_idx, res]))\n\n result.columns = [\"_key_left\", \"_key_right\", \"match_bool\"]\n result = pd.DataFrame(result[result[\"match_bool\"] == 1]).drop(\n \"match_bool\", axis=1\n )\n\n else:\n # when output from the join has no overlapping geometries\n result = pd.DataFrame(columns=[\"_key_left\", \"_key_right\"], dtype=float)\n\n if op == \"within\":\n # within implemented as the inverse of contains; swap names\n left_df, right_df = right_df, left_df\n result = result.rename(\n columns={\"_key_left\": \"_key_right\", \"_key_right\": \"_key_left\"}\n )\n\n if how == \"inner\":\n result = result.set_index(\"_key_left\")\n joined = (\n left_df.merge(result, left_index=True, right_index=True)\n .merge(\n right_df.drop(right_df.geometry.name, axis=1),\n left_on=\"_key_right\",\n right_index=True,\n suffixes=(\"_%s\" % lsuffix, \"_%s\" % rsuffix),\n )\n .set_index(index_left)\n .drop([\"_key_right\"], axis=1)\n )\n if isinstance(index_left, list):\n joined.index.names = left_index_name\n else:\n joined.index.name = left_index_name\n\n elif how == \"left\":\n result = result.set_index(\"_key_left\")\n joined = (\n left_df.merge(result, left_index=True, right_index=True, how=\"left\")\n .merge(\n right_df.drop(right_df.geometry.name, axis=1),\n how=\"left\",\n left_on=\"_key_right\",\n right_index=True,\n suffixes=(\"_%s\" % lsuffix, \"_%s\" % rsuffix),\n )\n .set_index(index_left)\n .drop([\"_key_right\"], axis=1)\n )\n if isinstance(index_left, list):\n joined.index.names = left_index_name\n else:\n joined.index.name = left_index_name\n\n else: # how == 'right':\n joined = (\n left_df.drop(left_df.geometry.name, axis=1)\n .merge(\n result.merge(\n right_df, left_on=\"_key_right\", right_index=True, how=\"right\"\n ),\n left_index=True,\n right_on=\"_key_left\",\n how=\"right\",\n )\n .set_index(index_right)\n .drop([\"_key_left\", \"_key_right\"], axis=1)\n )\n if isinstance(index_right, list):\n joined.index.names = right_index_name\n else:\n joined.index.name = right_index_name\n\n return joined\n" ]
[ [ "numpy.concatenate", "numpy.empty", "numpy.vectorize", "pandas.DataFrame", "numpy.column_stack" ] ]
BPMJG/annotated-F-pointnet
[ "c14f1aea38ab22b4a17e0b0f0bfc371c488a641a" ]
[ "models/frustum_pointnets_v1.py" ]
[ "''' Frsutum PointNets v1 Model.\n'''\nfrom __future__ import print_function\n\nimport sys\nimport os\nimport tensorflow as tf\nimport numpy as np\nBASE_DIR = os.path.dirname(os.path.abspath(__file__))\nROOT_DIR = os.path.dirname(BASE_DIR)\nsys.path.append(BASE_DIR)\nsys.path.append(os.path.join(ROOT_DIR, 'utils'))\nimport tf_util\nfrom model_util import NUM_HEADING_BIN, NUM_SIZE_CLUSTER, NUM_OBJECT_POINT\nfrom model_util import point_cloud_masking, get_center_regression_net\nfrom model_util import placeholder_inputs, parse_output_to_tensors, get_loss\n\ndef get_instance_seg_v1_net(point_cloud, one_hot_vec,\n is_training, bn_decay, end_points):\n ''' 3D instance segmentation PointNet v1 network.\n Input:\n point_cloud: TF tensor in shape (B,N,4)\n frustum point clouds with XYZ and intensity in point channels\n XYZs are in frustum coordinate\n one_hot_vec: TF tensor in shape (B,3)\n length-3 vectors indicating predicted object type\n is_training: TF boolean scalar\n bn_decay: TF float scalar\n end_points: dict\n Output:\n logits: TF tensor in shape (B,N,2), scores for bkg/clutter and object\n end_points: dict\n '''\n\n batch_size = point_cloud.get_shape()[0].value\n num_point = point_cloud.get_shape()[1].value\n\n net = tf.expand_dims(point_cloud, 2)\n # (32, 2048, 1, 4) 论文第一格\n\n net = tf_util.conv2d(net, 64, [1,1],\n padding='VALID', stride=[1,1],\n bn=True, is_training=is_training,\n scope='conv1', bn_decay=bn_decay)\n # (32, 2048, 1, 64)\n net = tf_util.conv2d(net, 64, [1,1],\n padding='VALID', stride=[1,1],\n bn=True, is_training=is_training,\n scope='conv2', bn_decay=bn_decay)\n point_feat = tf_util.conv2d(net, 64, [1,1],\n padding='VALID', stride=[1,1],\n bn=True, is_training=is_training,\n scope='conv3', bn_decay=bn_decay)\n # (32, 2048, 1, 64) 论文第二格,拼接到第五格\n net = tf_util.conv2d(point_feat, 128, [1,1],\n padding='VALID', stride=[1,1],\n bn=True, is_training=is_training,\n scope='conv4', bn_decay=bn_decay)\n # (32, 2048, 1, 128)\n net = tf_util.conv2d(net, 1024, [1,1],\n padding='VALID', stride=[1,1],\n bn=True, is_training=is_training,\n scope='conv5', bn_decay=bn_decay)\n # (32, 2048, 1, 1024) 论文第三格\n global_feat = tf_util.max_pool2d(net, [num_point,1],\n padding='VALID', scope='maxpool')\n # (32, 1, 1, 1024) 论文第四格,拼接到第五格\n\n global_feat = tf.concat([global_feat, tf.expand_dims(tf.expand_dims(one_hot_vec, 1), 1)], axis=3)\n # (32, 1, 1, 1027)\n global_feat_expand = tf.tile(global_feat, [1, num_point, 1, 1])\n # (32, 2048, 1, 1027) tf.tile()复制扩展数据\n concat_feat = tf.concat(axis=3, values=[point_feat, global_feat_expand])\n # (32, 2048, 1, 1091) 论文第五格 2048*(1024+64+3)\n\n net = tf_util.conv2d(concat_feat, 512, [1,1],\n padding='VALID', stride=[1,1],\n bn=True, is_training=is_training,\n scope='conv6', bn_decay=bn_decay)\n # (32, 2048, 1, 512)\n net = tf_util.conv2d(net, 256, [1,1],\n padding='VALID', stride=[1,1],\n bn=True, is_training=is_training,\n scope='conv7', bn_decay=bn_decay)\n net = tf_util.conv2d(net, 128, [1,1],\n padding='VALID', stride=[1,1],\n bn=True, is_training=is_training,\n scope='conv8', bn_decay=bn_decay)\n net = tf_util.conv2d(net, 128, [1,1],\n padding='VALID', stride=[1,1],\n bn=True, is_training=is_training,\n scope='conv9', bn_decay=bn_decay)\n # (32, 2048, 1, 128)\n net = tf_util.dropout(net, is_training, 'dp1', keep_prob=0.5)\n\n logits = tf_util.conv2d(net, 2, [1,1],\n padding='VALID', stride=[1,1], activation_fn=None,\n scope='conv10')\n # (32, 2048, 1, 2)\n logits = tf.squeeze(logits, [2]) # BxNxC\n # (32, 2048, 2)论文第六格\n # to 182\n return logits, end_points\n \n\ndef get_3d_box_estimation_v1_net(object_point_cloud, one_hot_vec,\n is_training, bn_decay, end_points):\n ''' 3D Box Estimation PointNet v1 network.\n Input:\n object_point_cloud: TF tensor in shape (B,M,C)\n point clouds in object coordinate\n one_hot_vec: TF tensor in shape (B,3)\n length-3 vectors indicating predicted object type\n Output:\n output: TF tensor in shape (B,3+NUM_HEADING_BIN*2+NUM_SIZE_CLUSTER*4)\n including box centers, heading bin class scores and residuals,\n and size cluster scores and residuals\n ''' \n num_point = object_point_cloud.get_shape()[1].value\n net = tf.expand_dims(object_point_cloud, 2)\n # (32, 512,1, 3) 第一格\n net = tf_util.conv2d(net, 128, [1,1],\n padding='VALID', stride=[1,1],\n bn=True, is_training=is_training,\n scope='conv-reg1', bn_decay=bn_decay)\n net = tf_util.conv2d(net, 128, [1,1],\n padding='VALID', stride=[1,1],\n bn=True, is_training=is_training,\n scope='conv-reg2', bn_decay=bn_decay)\n net = tf_util.conv2d(net, 256, [1,1],\n padding='VALID', stride=[1,1],\n bn=True, is_training=is_training,\n scope='conv-reg3', bn_decay=bn_decay)\n net = tf_util.conv2d(net, 512, [1,1],\n padding='VALID', stride=[1,1],\n bn=True, is_training=is_training,\n scope='conv-reg4', bn_decay=bn_decay)\n # (32, 512, 1, 512) 第二格\n net = tf_util.max_pool2d(net, [num_point,1],\n padding='VALID', scope='maxpool2')\n # (32, 1, 1, 512)\n net = tf.squeeze(net, axis=[1,2])\n # (32, 512)\n net = tf.concat([net, one_hot_vec], axis=1)\n # (32, 512+3) 第三格\n net = tf_util.fully_connected(net, 512, scope='fc1', bn=True,\n is_training=is_training, bn_decay=bn_decay)\n net = tf_util.fully_connected(net, 256, scope='fc2', bn=True,\n is_training=is_training, bn_decay=bn_decay)\n # (32, 256)\n\n # The first 3 numbers: box center coordinates (cx,cy,cz),\n # the next NUM_HEADING_BIN*2: heading bin class scores and bin residuals\n # next NUM_SIZE_CLUSTER*4: box cluster scores and residuals\n output = tf_util.fully_connected(net,\n 3+NUM_HEADING_BIN*2+NUM_SIZE_CLUSTER*4, activation_fn=None, scope='fc3')\n return output, end_points\n # (3+4*8+2*12)\n # (3+4NS+2NH) 第四格\n # to 202\n\n\ndef get_model(point_cloud, one_hot_vec, is_training, bn_decay=None):\n ''' Frustum PointNets model. The model predict 3D object masks and\n amodel bounding boxes for objects in frustum point clouds.\n\n Input:\n point_cloud: TF tensor in shape (B,N,4)\n frustum point clouds with XYZ and intensity in point channels\n XYZs are in frustum coordinate\n one_hot_vec: TF tensor in shape (B,3)\n length-3 vectors indicating predicted object type\n is_training: TF boolean scalar\n bn_decay: TF float scalar\n Output:\n end_points: dict (map from name strings to TF tensors)\n '''\n end_points = {}\n \n # 3D Instance Segmentation PointNet\n # logits: TF tensor in shape(B, N, 2), scores for bkg / clutter and object\n # end_points: dict\n # to 18\n logits, end_points = get_instance_seg_v1_net(\\\n point_cloud, one_hot_vec,\n is_training, bn_decay, end_points)\n # (32, 2048, 2), {}\n end_points['mask_logits'] = logits\n\n # Masking\n # select masked points and translate to masked points' centroid\n object_point_cloud_xyz, mask_xyz_mean, end_points = \\\n point_cloud_masking(point_cloud, logits, end_points)\n # to model_util.py 217\n # (32, 512, 3) (32, 3) end_points['mask'] = mask\n\n\n # T-Net and coordinate translation\n # to model_util.py 295\n center_delta, end_points = get_center_regression_net(\\\n object_point_cloud_xyz, one_hot_vec,\n is_training, bn_decay, end_points)\n # (32,3) end_points['mask'] = mask\n stage1_center = center_delta + mask_xyz_mean # Bx3\n # (32,3)\n end_points['stage1_center'] = stage1_center\n # Get object point cloud in object coordinate\n object_point_cloud_xyz_new = \\\n object_point_cloud_xyz - tf.expand_dims(center_delta, 1)\n # (32, 512, 3) - (32, 1, 3)\n # in object coordinate\n\n # Amodel Box Estimation PointNet\n # to 105\n output, end_points = get_3d_box_estimation_v1_net(\\\n object_point_cloud_xyz_new, one_hot_vec,\n is_training, bn_decay, end_points)\n\n # Parse output to 3D box parameters\n end_points = parse_output_to_tensors(output, end_points)\n end_points['center'] = end_points['center_boxnet'] + stage1_center # Bx3\n # (32, 3)\n\n return end_points\n\nif __name__=='__main__':\n with tf.Graph().as_default():\n inputs = tf.zeros((32,1024,4))\n outputs = get_model(inputs, tf.ones((32,3)), tf.constant(True))\n for key in outputs:\n print((key, outputs[key]))\n loss = get_loss(tf.zeros((32,1024),dtype=tf.int32),\n tf.zeros((32,3)), tf.zeros((32,),dtype=tf.int32),\n tf.zeros((32,)), tf.zeros((32,),dtype=tf.int32),\n tf.zeros((32,3)), outputs)\n print(loss)\n" ]
[ [ "tensorflow.zeros", "tensorflow.concat", "tensorflow.expand_dims", "tensorflow.Graph", "tensorflow.ones", "tensorflow.constant", "tensorflow.squeeze", "tensorflow.tile" ] ]
treuds/statsmodels
[ "6b4aa33563ab639d168525dde0ef86c8e5c83d68" ]
[ "statsmodels/othermod/betareg.py" ]
[ "# -*- coding: utf-8 -*-\n\nu\"\"\"\nBeta regression for modeling rates and proportions.\n\nReferences\n----------\nGrün, Bettina, Ioannis Kosmidis, and Achim Zeileis. Extended beta regression\nin R: Shaken, stirred, mixed, and partitioned. No. 2011-22. Working Papers in\nEconomics and Statistics, 2011.\n\nSmithson, Michael, and Jay Verkuilen. \"A better lemon squeezer?\nMaximum-likelihood regression with beta-distributed dependent variables.\"\nPsychological methods 11.1 (2006): 54.\n\"\"\"\n\nimport numpy as np\nfrom scipy.special import gammaln as lgamma\nimport patsy\n\nimport statsmodels.base.wrapper as wrap\nimport statsmodels.regression.linear_model as lm\nfrom statsmodels.tools.decorators import cache_readonly\nfrom statsmodels.base.model import (\n GenericLikelihoodModel, GenericLikelihoodModelResults, _LLRMixin)\nfrom statsmodels.genmod import families\n\n\n_init_example = \"\"\"\n\n Beta regression with default of logit-link for exog and log-link\n for precision.\n\n >>> mod = BetaModel(endog, exog)\n >>> rslt = mod.fit()\n >>> print(rslt.summary())\n\n We can also specify a formula and a specific structure and use the\n identity-link for precision.\n\n >>> from sm.families.links import identity\n >>> Z = patsy.dmatrix('~ temp', dat, return_type='dataframe')\n >>> mod = BetaModel.from_formula('iyield ~ C(batch, Treatment(10)) + temp',\n ... dat, exog_precision=Z,\n ... link_precision=identity())\n\n In the case of proportion-data, we may think that the precision depends on\n the number of measurements. E.g for sequence data, on the number of\n sequence reads covering a site:\n\n >>> Z = patsy.dmatrix('~ coverage', df)\n >>> formula = 'methylation ~ disease + age + gender + coverage'\n >>> mod = BetaModel.from_formula(formula, df, Z)\n >>> rslt = mod.fit()\n\n\"\"\"\n\n\nclass BetaModel(GenericLikelihoodModel):\n __doc__ = \"\"\"Beta Regression.\n\n The Model is parameterized by mean and precision. Both can depend on\n explanatory variables through link functions.\n\n Parameters\n ----------\n endog : array_like\n 1d array of endogenous response variable.\n exog : array_like\n A nobs x k array where `nobs` is the number of observations and `k`\n is the number of regressors. An intercept is not included by default\n and should be added by the user (models specified using a formula\n include an intercept by default). See `statsmodels.tools.add_constant`.\n exog_precision : array_like\n 2d array of variables for the precision.\n link : link\n Any link in sm.families.links for mean, should have range in\n interval [0, 1]. Default is logit-link.\n link_precision : link\n Any link in sm.families.links for precision, should have\n range in positive line. Default is log-link.\n **kwds : extra keywords\n Keyword options that will be handled by super classes.\n Not all general keywords will be supported in this class.\n\n Notes\n -----\n Status: experimental, new in 0.13.\n Core results are verified, but api can change and some extra results\n specific to Beta regression are missing.\n\n Examples\n --------\n {example}\n\n See Also\n --------\n :ref:`links`\n\n \"\"\".format(example=_init_example)\n\n def __init__(self, endog, exog, exog_precision=None,\n link=families.links.Logit(),\n link_precision=families.links.Log(), **kwds):\n\n etmp = np.array(endog)\n assert np.all((0 < etmp) & (etmp < 1))\n if exog_precision is None:\n extra_names = ['precision']\n exog_precision = np.ones((len(endog), 1), dtype='f')\n else:\n extra_names = ['precision-%s' % zc for zc in\n (exog_precision.columns\n if hasattr(exog_precision, 'columns')\n else range(1, exog_precision.shape[1] + 1))]\n\n kwds['extra_params_names'] = extra_names\n\n super(BetaModel, self).__init__(endog, exog,\n exog_precision=exog_precision,\n **kwds)\n self.link = link\n self.link_precision = link_precision\n # not needed, handled by super:\n # self.exog_precision = exog_precision\n # inherited df do not account for precision params\n self.nobs = self.endog.shape[0]\n self.df_model = self.nparams - 1\n self.df_resid = self.nobs - self.nparams\n assert len(self.exog_precision) == len(self.endog)\n self.hess_type = \"oim\"\n if 'exog_precision' not in self._init_keys:\n self._init_keys.extend(['exog_precision'])\n self._init_keys.extend(['link', 'link_precision'])\n self._null_drop_keys = ['exog_precision']\n self.results_class = BetaResults\n self.results_class_wrapper = BetaResultsWrapper\n\n @classmethod\n def from_formula(cls, formula, data, exog_precision_formula=None,\n *args, **kwargs):\n if exog_precision_formula is not None:\n if 'subset' in kwargs:\n d = data.ix[kwargs['subset']]\n Z = patsy.dmatrix(exog_precision_formula, d)\n else:\n Z = patsy.dmatrix(exog_precision_formula, data)\n kwargs['exog_precision'] = Z\n\n return super(BetaModel, cls).from_formula(formula, data, *args,\n **kwargs)\n\n def _get_exogs(self):\n return (self.exog, self.exog_precision)\n\n def predict(self, params, exog=None, exog_precision=None, which=\"mean\"):\n \"\"\"Predict values for mean or precision\n\n Parameters\n ----------\n params : array_like\n The model parameters.\n exog : array_like\n Array of predictor variables for mean.\n exog_precision : array_like\n Array of predictor variables for precision parameter.\n which : str\n\n - \"mean\" : mean, conditional expectation E(endog | exog)\n - \"precision\" : predicted precision\n - \"linear\" : linear predictor for the mean function\n - \"linear-precision\" : linear predictor for the precision parameter\n\n Returns\n -------\n ndarray, predicted values\n \"\"\"\n # compatibility with old names and misspelling\n if which == \"linpred\":\n which = \"linear\"\n if which in [\"linpred_precision\", \"linear_precision\"]:\n which = \"linear-precision\"\n\n k_mean = self.exog.shape[1]\n if which in [\"mean\", \"linear\"]:\n if exog is None:\n exog = self.exog\n params_mean = params[:k_mean]\n # Zparams = params[k_mean:]\n linpred = np.dot(exog, params_mean)\n if which == \"mean\":\n mu = self.link.inverse(linpred)\n res = mu\n else:\n res = linpred\n\n elif which in [\"precision\", \"linear-precision\"]:\n if exog_precision is None:\n exog_precision = self.exog_precision\n params_prec = params[k_mean:]\n linpred_prec = np.dot(exog_precision, params_prec)\n\n if which == \"precision\":\n phi = self.link_precision.inverse(linpred_prec)\n res = phi\n else:\n res = linpred_prec\n\n elif which == \"var\":\n res = self._predict_var(\n params,\n exog=exog,\n exog_precision=exog_precision\n )\n\n else:\n raise ValueError('which = %s is not available' % which)\n\n return res\n\n def _predict_precision(self, params, exog_precision=None):\n \"\"\"Predict values for precision function for given exog_precision.\n\n Parameters\n ----------\n params : array_like\n The model parameters.\n exog_precision : array_like\n Array of predictor variables for precision.\n\n Returns\n -------\n Predicted precision.\n \"\"\"\n if exog_precision is None:\n exog_precision = self.exog_precision\n\n k_mean = self.exog.shape[1]\n params_precision = params[k_mean:]\n linpred_prec = np.dot(exog_precision, params_precision)\n phi = self.link_precision.inverse(linpred_prec)\n\n return phi\n\n def _predict_var(self, params, exog=None, exog_precision=None):\n \"\"\"predict values for conditional variance V(endog | exog)\n\n Parameters\n ----------\n params : array_like\n The model parameters.\n exog : array_like\n Array of predictor variables for mean.\n exog_precision : array_like\n Array of predictor variables for precision.\n\n Returns\n -------\n Predicted conditional variance.\n \"\"\"\n mean = self.predict(params, exog=exog)\n precision = self._predict_precision(params,\n exog_precision=exog_precision)\n\n var_endog = mean * (1 - mean) / (1 + precision)\n return var_endog\n\n def loglikeobs(self, params):\n \"\"\"\n Loglikelihood for observations of the Beta regressionmodel.\n\n Parameters\n ----------\n params : ndarray\n The parameters of the model, coefficients for linear predictors\n of the mean and of the precision function.\n\n Returns\n -------\n loglike : ndarray\n The log likelihood for each observation of the model evaluated\n at `params`.\n \"\"\"\n return self._llobs(self.endog, self.exog, self.exog_precision, params)\n\n def _llobs(self, endog, exog, exog_precision, params):\n \"\"\"\n Loglikelihood for observations with data arguments.\n\n Parameters\n ----------\n endog : ndarray\n 1d array of endogenous variable.\n exog : ndarray\n 2d array of explanatory variables.\n exog_precision : ndarray\n 2d array of explanatory variables for precision.\n params : ndarray\n The parameters of the model, coefficients for linear predictors\n of the mean and of the precision function.\n\n Returns\n -------\n loglike : ndarray\n The log likelihood for each observation of the model evaluated\n at `params`.\n \"\"\"\n y, X, Z = endog, exog, exog_precision\n nz = Z.shape[1]\n\n params_mean = params[:-nz]\n params_prec = params[-nz:]\n linpred = np.dot(X, params_mean)\n linpred_prec = np.dot(Z, params_prec)\n\n mu = self.link.inverse(linpred)\n phi = self.link_precision.inverse(linpred_prec)\n\n eps_lb = 1e-200\n alpha = np.clip(mu * phi, eps_lb, np.inf)\n beta = np.clip((1 - mu) * phi, eps_lb, np.inf)\n\n ll = (lgamma(phi) - lgamma(alpha)\n - lgamma(beta)\n + (mu * phi - 1) * np.log(y)\n + (((1 - mu) * phi) - 1) * np.log(1 - y))\n\n return ll\n\n def score(self, params):\n \"\"\"\n Returns the score vector of the log-likelihood.\n\n http://www.tandfonline.com/doi/pdf/10.1080/00949650903389993\n\n Parameters\n ----------\n params : ndarray\n Parameter at which score is evaluated.\n\n Returns\n -------\n score : ndarray\n First derivative of loglikelihood function.\n \"\"\"\n sf1, sf2 = self.score_factor(params)\n\n d1 = np.dot(sf1, self.exog)\n d2 = np.dot(sf2, self.exog_precision)\n return np.concatenate((d1, d2))\n\n def _score_check(self, params):\n \"\"\"Inherited score with finite differences\n\n Parameters\n ----------\n params : ndarray\n Parameter at which score is evaluated.\n\n Returns\n -------\n score based on numerical derivatives\n \"\"\"\n return super(BetaModel, self).score(params)\n\n def score_factor(self, params, endog=None):\n \"\"\"Derivative of loglikelihood function w.r.t. linear predictors.\n\n This needs to be multiplied with the exog to obtain the score_obs.\n\n Parameters\n ----------\n params : ndarray\n Parameter at which score is evaluated.\n\n Returns\n -------\n score_factor : ndarray, 2-D\n A 2d weight vector used in the calculation of the score_obs.\n\n Notes\n -----\n The score_obs can be obtained from score_factor ``sf`` using\n\n - d1 = sf[:, :1] * exog\n - d2 = sf[:, 1:2] * exog_precision\n\n \"\"\"\n from scipy import special\n digamma = special.psi\n\n y = self.endog if endog is None else endog\n X, Z = self.exog, self.exog_precision\n nz = Z.shape[1]\n Xparams = params[:-nz]\n Zparams = params[-nz:]\n\n # NO LINKS\n mu = self.link.inverse(np.dot(X, Xparams))\n phi = self.link_precision.inverse(np.dot(Z, Zparams))\n\n eps_lb = 1e-200 # lower bound for evaluating digamma, avoids -inf\n alpha = np.clip(mu * phi, eps_lb, np.inf)\n beta = np.clip((1 - mu) * phi, eps_lb, np.inf)\n\n ystar = np.log(y / (1. - y))\n dig_beta = digamma(beta)\n mustar = digamma(alpha) - dig_beta\n yt = np.log(1 - y)\n mut = dig_beta - digamma(phi)\n\n t = 1. / self.link.deriv(mu)\n h = 1. / self.link_precision.deriv(phi)\n #\n sf1 = phi * t * (ystar - mustar)\n sf2 = h * (mu * (ystar - mustar) + yt - mut)\n\n return (sf1, sf2)\n\n def score_hessian_factor(self, params, return_hessian=False,\n observed=True):\n \"\"\"Derivatives of loglikelihood function w.r.t. linear predictors.\n\n This calculates score and hessian factors at the same time, because\n there is a large overlap in calculations.\n\n Parameters\n ----------\n params : ndarray\n Parameter at which score is evaluated.\n return_hessian : bool\n If False, then only score_factors are returned\n If True, the both score and hessian factors are returned\n observed : bool\n If True, then the observed Hessian is returned (default).\n If False, then the expected information matrix is returned.\n\n Returns\n -------\n score_factor : ndarray, 2-D\n A 2d weight vector used in the calculation of the score_obs.\n (-jbb, -jbg, -jgg) : tuple\n A tuple with 3 hessian factors, corresponding to the upper\n triangle of the Hessian matrix.\n TODO: check why there are minus\n \"\"\"\n from scipy import special\n digamma = special.psi\n\n y, X, Z = self.endog, self.exog, self.exog_precision\n nz = Z.shape[1]\n Xparams = params[:-nz]\n Zparams = params[-nz:]\n\n # NO LINKS\n mu = self.link.inverse(np.dot(X, Xparams))\n phi = self.link_precision.inverse(np.dot(Z, Zparams))\n\n # We need to prevent mu = 0 and (1-mu) = 0 in digamma call\n eps_lb = 1e-200 # lower bound for evaluating digamma, avoids -inf\n alpha = np.clip(mu * phi, eps_lb, np.inf)\n beta = np.clip((1 - mu) * phi, eps_lb, np.inf)\n\n ystar = np.log(y / (1. - y))\n dig_beta = digamma(beta)\n mustar = digamma(alpha) - dig_beta\n yt = np.log(1 - y)\n mut = dig_beta - digamma(phi)\n\n t = 1. / self.link.deriv(mu)\n h = 1. / self.link_precision.deriv(phi)\n\n ymu_star = (ystar - mustar)\n sf1 = phi * t * ymu_star\n sf2 = h * (mu * ymu_star + yt - mut)\n\n if return_hessian:\n trigamma = lambda x: special.polygamma(1, x) # noqa\n trig_beta = trigamma(beta)\n var_star = trigamma(alpha) + trig_beta\n var_t = trig_beta - trigamma(phi)\n\n c = - trig_beta\n s = self.link.deriv2(mu)\n q = self.link_precision.deriv2(phi)\n\n jbb = (phi * t) * var_star\n if observed:\n jbb += s * t**2 * ymu_star\n\n jbb *= t * phi\n\n jbg = phi * t * h * (mu * var_star + c)\n if observed:\n jbg -= ymu_star * t * h\n\n jgg = h**2 * (mu**2 * var_star + 2 * mu * c + var_t)\n if observed:\n jgg += (mu * ymu_star + yt - mut) * q * h**3 # **3 ?\n\n return (sf1, sf2), (-jbb, -jbg, -jgg)\n else:\n return (sf1, sf2)\n\n def score_obs(self, params):\n \"\"\"\n Score, first derivative of the loglikelihood for each observation.\n\n Parameters\n ----------\n params : ndarray\n Parameter at which score is evaluated.\n\n Returns\n -------\n score_obs : ndarray, 2d\n The first derivative of the loglikelihood function evaluated at\n params for each observation.\n \"\"\"\n sf1, sf2 = self.score_factor(params)\n\n # elementwise product for each row (observation)\n d1 = sf1[:, None] * self.exog\n d2 = sf2[:, None] * self.exog_precision\n return np.column_stack((d1, d2))\n\n def hessian(self, params, observed=None):\n \"\"\"Hessian, second derivative of loglikelihood function\n\n Parameters\n ----------\n params : ndarray\n Parameter at which Hessian is evaluated.\n observed : bool\n If True, then the observed Hessian is returned (default).\n If False, then the expected information matrix is returned.\n\n Returns\n -------\n hessian : ndarray\n Hessian, i.e. observed information, or expected information matrix.\n \"\"\"\n if self.hess_type == \"eim\":\n observed = False\n else:\n observed = True\n _, hf = self.score_hessian_factor(params, return_hessian=True,\n observed=observed)\n\n hf11, hf12, hf22 = hf\n\n # elementwise product for each row (observation)\n d11 = (self.exog.T * hf11).dot(self.exog)\n d12 = (self.exog.T * hf12).dot(self.exog_precision)\n d22 = (self.exog_precision.T * hf22).dot(self.exog_precision)\n return np.block([[d11, d12], [d12.T, d22]])\n\n def hessian_factor(self, params, observed=True):\n \"\"\"Derivatives of loglikelihood function w.r.t. linear predictors.\n \"\"\"\n _, hf = self.score_hessian_factor(params, return_hessian=True,\n observed=observed)\n return hf\n\n def _start_params(self, niter=2, return_intermediate=False):\n \"\"\"find starting values\n\n Parameters\n ----------\n niter : int\n Number of iterations of WLS approximation\n return_intermediate : bool\n If False (default), then only the preliminary parameter estimate\n will be returned.\n If True, then also the two results instances of the WLS estimate\n for mean parameters and for the precision parameters will be\n returned.\n\n Returns\n -------\n sp : ndarray\n start parameters for the optimization\n res_m2 : results instance (optional)\n Results instance for the WLS regression of the mean function.\n res_p2 : results instance (optional)\n Results instance for the WLS regression of the precision function.\n\n Notes\n -----\n This calculates a few iteration of weighted least squares. This is not\n a full scoring algorithm.\n \"\"\"\n # WLS of the mean equation uses the implied weights (inverse variance),\n # WLS for the precision equations uses weights that only take\n # account of the link transformation of the precision endog.\n from statsmodels.regression.linear_model import OLS, WLS\n res_m = OLS(self.link(self.endog), self.exog).fit()\n fitted = self.link.inverse(res_m.fittedvalues)\n resid = self.endog - fitted\n\n prec_i = fitted * (1 - fitted) / np.maximum(np.abs(resid), 1e-2)**2 - 1\n res_p = OLS(self.link_precision(prec_i), self.exog_precision).fit()\n prec_fitted = self.link_precision.inverse(res_p.fittedvalues)\n # sp = np.concatenate((res_m.params, res_p.params))\n\n for _ in range(niter):\n y_var_inv = (1 + prec_fitted) / (fitted * (1 - fitted))\n # y_var = fitted * (1 - fitted) / (1 + prec_fitted)\n\n ylink_var_inv = y_var_inv / self.link.deriv(fitted)**2\n res_m2 = WLS(self.link(self.endog), self.exog,\n weights=ylink_var_inv).fit()\n fitted = self.link.inverse(res_m2.fittedvalues)\n resid2 = self.endog - fitted\n\n prec_i2 = (fitted * (1 - fitted) /\n np.maximum(np.abs(resid2), 1e-2)**2 - 1)\n w_p = 1. / self.link_precision.deriv(prec_fitted)**2\n res_p2 = WLS(self.link_precision(prec_i2), self.exog_precision,\n weights=w_p).fit()\n prec_fitted = self.link_precision.inverse(res_p2.fittedvalues)\n sp2 = np.concatenate((res_m2.params, res_p2.params))\n\n if return_intermediate:\n return sp2, res_m2, res_p2\n\n return sp2\n\n def fit(self, start_params=None, maxiter=1000, disp=False,\n method='bfgs', **kwds):\n \"\"\"\n Fit the model by maximum likelihood.\n\n Parameters\n ----------\n start_params : array-like\n A vector of starting values for the regression\n coefficients. If None, a default is chosen.\n maxiter : integer\n The maximum number of iterations\n disp : bool\n Show convergence stats.\n method : str\n The optimization method to use.\n kwds :\n Keyword arguments for the optimizer.\n\n Returns\n -------\n BetaResults instance.\n \"\"\"\n\n if start_params is None:\n start_params = self._start_params()\n# # http://www.ime.usp.br/~sferrari/beta.pdf suggests starting phi\n# # on page 8\n\n if \"cov_type\" in kwds:\n # this is a workaround because we cannot tell super to use eim\n if kwds[\"cov_type\"].lower() == \"eim\":\n self.hess_type = \"eim\"\n del kwds[\"cov_type\"]\n else:\n self.hess_type = \"oim\"\n\n res = super(BetaModel, self).fit(start_params=start_params,\n maxiter=maxiter, method=method,\n disp=disp, **kwds)\n if not isinstance(res, BetaResultsWrapper):\n # currently GenericLikelihoodModel doe not add wrapper\n res = BetaResultsWrapper(res)\n return res\n\n def _deriv_mean_dparams(self, params):\n \"\"\"\n Derivative of the expected endog with respect to the parameters.\n\n not verified yet\n\n Parameters\n ----------\n params : ndarray\n parameter at which score is evaluated\n\n Returns\n -------\n The value of the derivative of the expected endog with respect\n to the parameter vector.\n \"\"\"\n link = self.link\n lin_pred = self.predict(params, which=\"linear\")\n idl = link.inverse_deriv(lin_pred)\n dmat = self.exog * idl[:, None]\n return np.column_stack((dmat, np.zeros(self.exog_precision.shape)))\n\n def _deriv_score_obs_dendog(self, params):\n \"\"\"derivative of score_obs w.r.t. endog\n\n Parameters\n ----------\n params : ndarray\n parameter at which score is evaluated\n\n Returns\n -------\n derivative : ndarray_2d\n The derivative of the score_obs with respect to endog.\n \"\"\"\n from statsmodels.tools.numdiff import _approx_fprime_cs_scalar\n\n def f(y):\n if y.ndim == 2 and y.shape[1] == 1:\n y = y[:, 0]\n sf = self.score_factor(params, endog=y)\n return np.column_stack(sf)\n\n dsf = _approx_fprime_cs_scalar(self.endog[:, None], f)\n # deriv is 2d vector\n d1 = dsf[:, :1] * self.exog\n d2 = dsf[:, 1:2] * self.exog_precision\n\n return np.column_stack((d1, d2))\n\n\n # code duplication with results class\n def get_distribution_params(self, params, exog=None, exog_precision=None):\n \"\"\"\n Return distribution parameters converted from model prediction.\n\n Parameters\n ----------\n params : array_like\n The model parameters.\n exog : array_like\n Array of predictor variables for mean.\n exog_precision : array_like\n Array of predictor variables for mean.\n\n Returns\n -------\n (alpha, beta) : tuple of ndarrays\n Parameters for the scipy distribution to evaluate predictive\n distribution.\n \"\"\"\n mean = self.predict(params, exog=exog)\n precision = self.predict(params, exog_precision=exog_precision,\n which=\"precision\")\n return precision * mean, precision * (1 - mean)\n\n def get_distribution(self, params, exog=None, exog_precision=None):\n \"\"\"\n Return a instance of the predictive distribution.\n\n Parameters\n ----------\n params : array_like\n The model parameters.\n exog : array_like\n Array of predictor variables for mean.\n exog_precision : array_like\n Array of predictor variables for mean.\n\n Returns\n -------\n Instance of a scipy frozen distribution based on estimated\n parameters.\n\n See Also\n --------\n predict\n\n Notes\n -----\n This function delegates to the predict method to handle exog and\n exog_precision, which in turn makes any required transformations.\n\n Due to the behavior of ``scipy.stats.distributions objects``, the\n returned random number generator must be called with ``gen.rvs(n)``\n where ``n`` is the number of observations in the data set used\n to fit the model. If any other value is used for ``n``, misleading\n results will be produced.\n \"\"\"\n from scipy import stats\n args = self.get_distribution_params(params, exog=exog,\n exog_precision=exog_precision)\n distr = stats.beta(*args)\n return distr\n\n\nclass BetaResults(GenericLikelihoodModelResults, _LLRMixin):\n \"\"\"Results class for Beta regression\n\n This class inherits from GenericLikelihoodModelResults and not all\n inherited methods might be appropriate in this case.\n \"\"\"\n\n # GenericLikeihoodmodel doesn't define fittedvalues, residuals and similar\n @cache_readonly\n def fittedvalues(self):\n \"\"\"In-sample predicted mean, conditional expectation.\"\"\"\n return self.model.predict(self.params)\n\n @cache_readonly\n def fitted_precision(self):\n \"\"\"In-sample predicted precision\"\"\"\n return self.model.predict(self.params, which=\"precision\")\n\n @cache_readonly\n def resid(self):\n \"\"\"Response residual\"\"\"\n return self.model.endog - self.fittedvalues\n\n @cache_readonly\n def resid_pearson(self):\n \"\"\"Pearson standardize residual\"\"\"\n std = np.sqrt(self.model.predict(self.params, which=\"var\"))\n return self.resid / std\n\n @cache_readonly\n def prsquared(self):\n \"\"\"Cox-Snell Likelihood-Ratio pseudo-R-squared.\n\n 1 - exp((llnull - .llf) * (2 / nobs))\n \"\"\"\n return self.pseudo_rsquared(kind=\"lr\")\n\n def get_distribution_params(self, exog=None, exog_precision=None,\n transform=True):\n \"\"\"\n Return distribution parameters converted from model prediction.\n\n Parameters\n ----------\n params : array_like\n The model parameters.\n exog : array_like\n Array of predictor variables for mean.\n transform : bool\n If transform is True and formulas have been used, then predictor\n ``exog`` is passed through the formula processing. Default is True.\n\n Returns\n -------\n (alpha, beta) : tuple of ndarrays\n Parameters for the scipy distribution to evaluate predictive\n distribution.\n \"\"\"\n mean = self.predict(exog=exog, transform=transform)\n precision = self.predict(exog_precision=exog_precision,\n which=\"precision\", transform=transform)\n return precision * mean, precision * (1 - mean)\n\n def get_distribution(self, exog=None, exog_precision=None, transform=True):\n \"\"\"\n Return a instance of the predictive distribution.\n\n Parameters\n ----------\n exog : array_like\n Array of predictor variables for mean.\n exog_precision : array_like\n Array of predictor variables for mean.\n transform : bool\n If transform is True and formulas have been used, then predictor\n ``exog`` is passed through the formula processing. Default is True.\n\n Returns\n -------\n Instance of a scipy frozen distribution based on estimated\n parameters.\n\n See Also\n --------\n predict\n\n Notes\n -----\n This function delegates to the predict method to handle exog and\n exog_precision, which in turn makes any required transformations.\n\n Due to the behavior of ``scipy.stats.distributions objects``, the\n returned random number generator must be called with ``gen.rvs(n)``\n where ``n`` is the number of observations in the data set used\n to fit the model. If any other value is used for ``n``, misleading\n results will be produced.\n \"\"\"\n from scipy import stats\n args = self.get_distribution_params(exog=exog,\n exog_precision=exog_precision,\n transform=transform)\n args = (np.asarray(arg) for arg in args)\n distr = stats.beta(*args)\n return distr\n\n def bootstrap(self, *args, **kwargs):\n raise NotImplementedError\n\n\nclass BetaResultsWrapper(lm.RegressionResultsWrapper):\n pass\n\n\nwrap.populate_wrapper(BetaResultsWrapper,\n BetaResults)\n" ]
[ [ "numpy.concatenate", "numpy.array", "numpy.dot", "scipy.special.gammaln", "numpy.asarray", "numpy.log", "scipy.special.polygamma", "numpy.zeros", "numpy.block", "numpy.abs", "numpy.clip", "numpy.all", "numpy.column_stack", "scipy.stats.beta" ] ]
kguarian/Classification-Algorithms
[ "e9847760041f712c827778142d581529530ef93e" ]
[ "knn/main.py" ]
[ "import pandas as pd\nimport plotly.graph_objects as go\nimport plotly.express as px\nimport plotly.figure_factory as ff\nimport numpy as np\n\nfrom plotly.subplots import make_subplots\nfrom tqdm import tqdm\n\nfrom sklearn.preprocessing import StandardScaler\n\nfrom sklearn.decomposition import PCA\n\nfrom sklearn.feature_selection import VarianceThreshold\n\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.model_selection import cross_val_score\nfrom sklearn.model_selection import GridSearchCV\n\nfrom sklearn.svm import SVC\nfrom sklearn.neighbors import KNeighborsClassifier\n\nfrom sklearn.metrics import confusion_matrix\nfrom sklearn.metrics import accuracy_score\n\ntrain_df = pd.read_csv(\"../data/titanic/train.csv\")\ntrain = train_df.copy()\nfamily_column = train['SibSp'] + train['Parch']\ntrain['Family'] = family_column\ntrain = train[['Survived', 'Pclass', 'Name', 'Sex', 'Age', 'Family', 'Embarked', 'Fare']]\n\n# Account for missingness\ntrain['Age'] = train['Age'].interpolate()\ntrain['Fare'] = train['Fare'].interpolate()\n\ntrain.head(5)" ]
[ [ "pandas.read_csv" ] ]
DFrolova/human-rl
[ "c54ec02a48aba53a6e90d64570ebb7f62dfdea8e" ]
[ "universe-starter-agent/run.py" ]
[ "import os\nimport go_vncdriver\nimport tensorflow as tf\nimport argparse\nimport json\nimport envs\nfrom model import policies\nimport checkpoint_utils\n\nparser = argparse.ArgumentParser(description=\"Run commands\")\nparser.add_argument('logdir', type=str, help=\"Log directory path\")\n\nargs = parser.parse_args()\n\nwith open(args.logdir + \"/hparams.json\") as f:\n hparams = json.load(f)\n\nenv = envs.create_env(**hparams)\nobs = env.reset()\n\npolicyType = policies[hparams['policy']]\npolicy = policyType(env.observation_space.shape, env.action_space.n, **hparams)\nfeatures = policy.get_initial_features()\n\nsess = tf.Session()\n\n#import ipdb; ipdb.set_trace()\n\ncheckpoint_utils.init_from_checkpoint(args.logdir + '/train', {'global/':'/'})\n#saver = tf.train.Saver(sharded=True)\n#saver.restore(sess, os.path.join(args.logdir, 'train/model.ckpt-0'))\nsess.run(tf.global_variables_initializer())\n\nwith sess.as_default():\n \n while True:\n env.render()\n \n fetched = policy.act(obs, *features)\n action, value_, features = fetched[0], fetched[1], fetched[2:]\n\n obs, reward, done, info = env.step(action.argmax())\n \n if done:\n obs = env.reset()\n" ]
[ [ "tensorflow.Session", "tensorflow.global_variables_initializer" ] ]
nicolewang97/AICAPI_YW3760
[ "ce49c375acf35e86ca0878b2df56a562b3a3ad05" ]
[ "src/aicapi_yw3760/aicapi_yw3760.py" ]
[ "import pandas as pd\nimport numpy as np\nimport os\nimport json\nimport requests\nfrom dotenv import load_dotenv\nfrom PIL import Image\nfrom io import BytesIO\nfrom IPython.core.display import display, HTML\n\ndef art_search(art):\n '''\n Function to retrieve the information about collections in the Art institute of Chicago\n\n Parameters:\n -------------\n The key word that users want to search,\n for example: the artist's name, the title of the artwork.\n\n Returns:\n -------------\n Status code: str\n if the API request went through\n Dataframe: df\n includes the related info about the searched artworks.\n\n Example:\n -------------\n >>>art_search('monet')\n 0\t16568\tWater Lilies\tClaude Monet\\nFrench, 1840-1926\tFrance\t1906\t1906\tOil on canvas\t[Painting and Sculpture of Europe, Essentials]\n 1\t16571\tArrival of the Normandy Train, Gare Saint-Lazare\tClaude Monet\\nFrench, 1840-1926\tFrance\t1877\t1877\tOil on canvas\t[Painting and Sculpture of Europe]\n '''\n params_search = {'q': art} \n r = requests.get(\"https://api.artic.edu/api/v1/artworks/search?fields=id,title,date_start,date_end,artist_display,place_of_origin,medium_display,category_titles\", params = params_search) \n \n try:\n status = r.status_code\n except HTTPError as http_err:\n print(f'HTTP error occurred: {http_err}')\n except Exception as err:\n print(f'Other error occurred: {err}')\n else:\n print('no error (successfully made request)')\n r1 = json.dumps(r.json(), indent = 2)\n artsearch = json.loads(r1)\n artworks = pd.DataFrame(artsearch['data'])\n artworks_info = artworks[['id','title','artist_display','place_of_origin','date_start','date_end','medium_display','category_titles']]\n \n return artworks_info\n \ndef tour_search(tour):\n '''\n Function to retrieve the information about tour in the Art institute of Chicago\n\n Parameters:\n -------------\n The key word that users want to search,\n for example: the artist's name, the title of the artwork.\n\n Returns:\n -------------\n Status code: str\n if the API request went through\n Dataframe: df\n includes the related info about the searched tour.\n\n Example:\n -------------\n >>>tour_search('monet')\n 0\t4714\tMonet and Chicago\thttp://aic-mobile-tours.artic.edu/sites/defaul...\t<p>Monet and Chicago presents the city’s uniqu...\t<p>Monet and Chicago is the first exhibition t...\t[Cliff Walk at Pourville, Caricature of a Man ...\t[Claude Monet, Claude Monet, Claude Monet, Cla...\n 1\t4520\tManet and Modern Beauty\thttp://aic-mobile-tours.artic.edu/sites/defaul...\t<p>Dive deep into the life and mind of one the...\t<p>Manet is undoubtedly one of the most fascin...\t[]\t[]\n\n '''\n params_search_tour = {'q': tour} \n rt = requests.get(\"https://api.artic.edu/api/v1/tours/search?fields=id,image,title,description,intro,artwork_titles,artist_titles\", params = params_search_tour)\n try:\n status = rt.status_code\n except HTTPError as http_err:\n print(f'HTTP error occurred: {http_err}')\n except Exception as err:\n print(f'Other error occurred: {err}')\n else:\n print('no error (successfully made request)')\n rt1 = json.dumps(rt.json(), indent = 2)\n toursearch = json.loads(rt1)\n ntour = pd.DataFrame(toursearch['data'])\n tour_info = ntour[['id','title','image','description','intro','artwork_titles','artist_titles']]\n \n return tour_info\n \ndef pic_search(pic, artist):\n '''\n Function to retrieve the images of artworks collected in the Art institute of Chicago\n\n Parameters:\n -------------\n pic: the title of the artwork\n artist: the full name of the artist\n\n Returns:\n -------------\n Status code: str\n if the API request went through\n Image: jpg\n The image of the searched atwork\n Error Message:\n Error messsage if the search is invalid\n\n Example:\n -------------\n >>>pic_search('Water Lillies', 'Claude Monet')\n\n '''\n params_search_pic = {'q': pic} \n rp = requests.get(\"https://api.artic.edu/api/v1/artworks/search?fields=id,title,artist_display,image_id\", params = params_search_pic)\n \n linkhead = 'https://www.artic.edu/iiif/2/'\n linktail = '/full/843,/0/default.jpg'\n \n try:\n status = rp.status_code\n except HTTPError as http_err:\n print(f'HTTP error occurred: {http_err}')\n except Exception as err:\n print(f'Other error occurred: {err}')\n else:\n print('no error (successfully made request)')\n rp1 = json.dumps(rp.json(), indent = 2)\n picsearch = json.loads(rp1)\n npic = pd.DataFrame(picsearch['data'])\n pic_info = npic[['id','title','artist_display','image_id']] \n \n df_len = len(pic_info)\n for i in range(df_len):\n if pic_info.iloc[i]['title'] == pic and (artist in pic_info.iloc[i]['artist_display']): # match title and artist with user input\n get_image_id = pic_info.iloc[i]['image_id']\n image_link = linkhead + get_image_id + linktail\n response = requests.get(image_link)\n img = Image.open(BytesIO(response.content))\n return img\n \n print(\"Invalid Search! Please find related information below :)\")\n return pic_info\n \ndef product_search(product_art, product_category):\n '''\n Function to retrieve the information about products sold in the Art institute of Chicago\n\n Parameters:\n -------------\n pic: the title of the artwork\n artist: the full name of the artist\n\n Returns:\n -------------\n Status code: str\n if the API request went through\n DataFrame: a dataframe include related info about the products and images of the products\n\n Example:\n -------------\n >>>product_search('Rainy Day', 'Mug')\n >>>0\t245410\tGustave Caillebotte Paris Street; Rainy Day Mug\t\t$9.95...\n\n '''\n params_search_product = {'q': product_art} \n rpro = requests.get(\"https://api.artic.edu/api/v1/products?search\", params = params_search_product)\n\n try:\n status = rpro.status_code\n except HTTPError as http_err:\n print(f'HTTP error occurred: {http_err}')\n except Exception as err:\n print(f'Other error occurred: {err}')\n else:\n print('no error (successfully made request)')\n rpro1 = json.dumps(rpro.json(), indent = 2)\n productsearch = json.loads(rpro1)\n nproduct = pd.DataFrame(productsearch['data'])\n df_len1 = len(nproduct)\n for i in range(df_len1):\n if product_art in nproduct.iloc[i]['title'] and (product_category in nproduct.iloc[i]['description']): # match title and artist with user input\n product_info = nproduct[['id','title','image_url','price_display','description']]\n \n def path_to_image_html(path):\n return '<img src=\"'+ path + '\" width=\"60\" >'\n image_cols = ['image_url']\n\n format_dict={}\n for image_cols in image_cols:\n format_dict[image_cols] = path_to_image_html\n html = display(HTML(product_info.to_html(escape = False,formatters = format_dict)))\n return html\n else:\n return\"Invalid Search! Please try other artworks or categories:)\"\n \ndef product_show(product_art_show):\n '''\n Function to retrieve the information about top10 products sold in the Art institute of Chicago\n\n Parameters:\n -------------\n Type in any random word\n\n Returns:\n -------------\n Status code: str\n if the API request went through\n DataFrame: a dataframe include related info about the top 10 products and images of the products\n\n Example:\n -------------\n >>>product_search('')\n >>>0\t250620\tThe Age of French Impressionism—Softcover\t\t$30...\n\n '''\n params_show_product = {'q': product_art_show} \n rproshow = requests.get(\"https://api.artic.edu/api/v1/products?limit=10\", params = params_show_product)\n\n try:\n status = rproshow.status_code\n except HTTPError as http_err:\n print(f'HTTP error occurred: {http_err}')\n except Exception as err:\n print(f'Other error occurred: {err}')\n else:\n print('no error (successfully made request)')\n rproshow1 = json.dumps(rproshow.json(), indent = 2)\n productshow = json.loads(rproshow1)\n nproductshow = pd.DataFrame(productshow['data'])\n product_show_info = nproductshow[['id','title','image_url','price_display','description']]\n \n def path_to_image_html(path):\n return '<img src=\"'+ path + '\" width=\"60\" >'\n image_cols1 = ['image_url']\n\n format_dict={}\n for image_cols1 in image_cols1:\n format_dict[image_cols1] = path_to_image_html\n html1 = display(HTML(product_show_info.to_html(escape = False,formatters = format_dict)))\n return html1" ]
[ [ "pandas.DataFrame" ] ]
alecone/ROS_project
[ "5de01433d177fde5cac4423f05fd554e3c00794e" ]
[ "g2opy/python/examples/ba_anchored_inverse_depth_demo.py" ]
[ "# https://github.com/RainerKuemmerle/g2o/blob/master/g2o/examples/ba_anchored_inverse_depth/ba_anchored_inverse_depth_demo.cpp\n\nimport numpy as np\nimport g2o \n\nfrom collections import defaultdict\nimport argparse\n\nparser = argparse.ArgumentParser()\nparser.add_argument('--noise', dest='pixel_noise', type=float, default=1., \n help='noise in image pixel space (default: 1.0)')\nparser.add_argument('--outlier', dest='outlier_ratio', type=float, default=0., \n help='probability of spuroius observation (default: 0.0)')\nparser.add_argument('--robust', dest='robust_kernel', action='store_true', help='use robust kernel')\nparser.add_argument('--no-schur', dest='schur_trick', action='store_false', help='not use Schur-complement trick')\nparser.add_argument('--seed', type=int, default=0, help='random seed')\nargs = parser.parse_args()\n\n\n\ndef invert_depth(x):\n assert len(x) == 3 and x[2] != 0\n return np.array([x[0], x[1], 1]) / x[2]\n\n\ndef main():\n optimizer = g2o.SparseOptimizer()\n if args.schur_trick:\n solver = g2o.BlockSolverSE3(g2o.LinearSolverEigenSE3())\n else:\n solver = g2o.BlockSolverX(g2o.LinearSolverEigenX()) # slower\n solver = g2o.OptimizationAlgorithmLevenberg(solver)\n optimizer.set_algorithm(solver)\n\n true_points = np.hstack([\n np.random.random((500, 1)) * 3 - 1.5,\n np.random.random((500, 1)) - 0.5,\n np.random.random((500, 1)) + 3])\n\n \n focal_length = 1000.\n principal_point = (320, 240)\n cam = g2o.CameraParameters(focal_length, principal_point, 0)\n cam.set_id(0)\n\n optimizer.add_parameter(cam)\n\n true_poses = []\n num_pose = 15\n for i in range(num_pose):\n # pose here means transform points from world coordinates to camera coordinates\n pose = g2o.SE3Quat(np.identity(3), [i*0.04-1, 0, 0])\n true_poses.append(pose)\n\n v_se3 = g2o.VertexSE3Expmap()\n v_se3.set_id(i)\n v_se3.set_estimate(pose)\n if i < 2:\n v_se3.set_fixed(True)\n optimizer.add_vertex(v_se3)\n\n\n point_id = num_pose\n inliers = dict()\n sse = defaultdict(float)\n\n for i, point in enumerate(true_points):\n visible = []\n for j, pose in enumerate(true_poses):\n z = cam.cam_map(pose * point)\n if 0 <= z[0] < 640 and 0 <= z[1] < 480:\n visible.append((j, z))\n if len(visible) < 2:\n continue\n\n v_p = g2o.VertexSBAPointXYZ()\n v_p.set_id(point_id)\n v_p.set_marginalized(args.schur_trick)\n\n anchor = visible[0][0]\n point2 = true_poses[anchor] * (point + np.random.randn(3))\n if point2[2] == 0:\n continue\n v_p.set_estimate(invert_depth(point2))\n optimizer.add_vertex(v_p)\n\n inlier = True\n for j, z in visible:\n if np.random.random() < args.outlier_ratio:\n inlier = False\n z = np.random.random(2) * [640, 480]\n z += np.random.randn(2) * args.pixel_noise\n\n edge = g2o.EdgeProjectPSI2UV()\n edge.resize(3)\n edge.set_vertex(0, v_p)\n edge.set_vertex(1, optimizer.vertex(j))\n edge.set_vertex(2, optimizer.vertex(anchor))\n edge.set_measurement(z)\n edge.set_information(np.identity(2))\n if args.robust_kernel:\n edge.set_robust_kernel(g2o.RobustKernelHuber())\n\n edge.set_parameter_id(0, 0)\n optimizer.add_edge(edge)\n\n if inlier:\n inliers[point_id] = (i, anchor)\n error = (true_poses[anchor].inverse() * invert_depth(v_p.estimate()) - \n true_points[i])\n sse[0] += np.sum(error**2)\n point_id += 1\n\n print('Performing full BA:')\n optimizer.initialize_optimization()\n optimizer.set_verbose(True)\n optimizer.optimize(10)\n\n\n for i in inliers:\n v_p = optimizer.vertex(i)\n v_anchor = optimizer.vertex(inliers[i][1])\n error = (v_anchor.estimate().inverse() * invert_depth(v_p.estimate()) - \n true_points[inliers[i][0]])\n sse[1] += np.sum(error**2)\n\n\n print('\\nRMSE (inliers only):')\n print('before optimization:', np.sqrt(sse[0] / len(inliers)))\n print('after optimization:', np.sqrt(sse[1] / len(inliers)))\n\n\n\nif __name__ == '__main__':\n if args.seed > 0:\n np.random.seed(args.seed)\n\n main()" ]
[ [ "numpy.array", "numpy.random.seed", "numpy.sum", "numpy.random.randn", "numpy.identity", "numpy.random.random" ] ]
BIT-ENGD/OpenPrompt
[ "f4f0159943afab0c0ce158628092bd28404d5017" ]
[ "openprompt/prompts/one2one_verbalizer.py" ]
[ "import json\nfrom transformers.tokenization_utils import PreTrainedTokenizer\nfrom yacs.config import CfgNode\nfrom openprompt.data_utils import InputFeatures\nimport re\nfrom openprompt import Verbalizer\nfrom typing import *\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom openprompt.utils.logging import logger\n\n\n\nclass One2oneVerbalizer(Verbalizer):\n r\"\"\"\n The basic manually defined verbalizer class, this class is inherited from the :obj:`Verbalizer` class.\n This class restrict the use of label words to one words per label. For a verbalzer with less constraints,\n please use Basic ManualVerbalizer.\n\n Args:\n tokenizer (:obj:`PreTrainedTokenizer`): The tokenizer of the current pre-trained model to point out the vocabulary.\n classes (:obj:`classes`): The classes (or labels) of the current task.\n num_classes (:obj:`int`): Optional. The number of classes of the verbalizer. Only one of `classes` and `num_classes` should be used.\n label_words (:obj:`Union[Sequence[str], Mapping[str, str]]`, optional): The label words that are projected by the labels.\n prefix (:obj:`str`, optional): The prefix string of the verbalizer. (used in PLMs like RoBERTa, which is sensitive to prefix space)\n multi_token_handler (:obj:`str`, optional): The handling strategy for multiple tokens produced by the tokenizer.\n post_log_softmax (:obj:`bool`, optional): Whether to apply log softmax post processing on label_logits. Default to True.\n \"\"\"\n def __init__(self,\n tokenizer: PreTrainedTokenizer,\n num_classes: Optional[int] = None,\n classes: Optional[List] = None,\n label_words: Optional[Union[Sequence[str], Mapping[str, str]]] = None,\n prefix: Optional[str] = \" \",\n multi_token_handler: Optional[str] = \"first\",\n post_log_softmax: Optional[bool] = True,\n ):\n super().__init__(tokenizer=tokenizer, num_classes=num_classes, classes=classes)\n self.prefix = prefix\n self.multi_token_handler = multi_token_handler\n self.label_words = label_words\n self.post_log_softmax = post_log_softmax\n\n def on_label_words_set(self):\n super().on_label_words_set()\n self.label_words = self.add_prefix(self.label_words, self.prefix)\n self.generate_parameters()\n\n @staticmethod\n def add_prefix(label_words, prefix):\n r\"\"\"Add prefix to label words. For example, if a label words is in the middle of a template,\n the prefix should be ``' '``.\n\n Args:\n label_words (:obj:`Union[Sequence[str], Mapping[str, str]]`, optional): The label words that are projected by the labels.\n prefix (:obj:`str`, optional): The prefix string of the verbalizer.\n\n Returns:\n :obj:`Sequence[str]`: New label words with prefix.\n \"\"\"\n new_label_words = []\n if isinstance(label_words[0], list):\n assert max([len(w) for w in label_words]) == 1, \"Providing multiple label words, you should use other verbalizers instead.\"\n label_words = [w[0] for w in label_words]\n\n for word in label_words:\n if word.startswith(\"<!>\"):\n new_label_words.append(word.split(\"<!>\")[1])\n else:\n new_label_words.append(prefix + word)\n\n return new_label_words\n\n def generate_parameters(self) -> List:\n r\"\"\"In basic manual template, the parameters are generated from label words directly.\n In this implementation, the label_words should not be tokenized into more than one token.\n \"\"\"\n words_ids = []\n for word in self.label_words:\n word_ids = self.tokenizer.encode(word, add_special_tokens=False)\n if len(word_ids) > 1:\n logger.warning(\"Word {} is split into multiple tokens: {}. \\\n If this is not what you expect, try using another word for this verbalizer\" \\\n .format(word, self.tokenizer.convert_ids_to_tokens(word_ids)))\n words_ids.append(word_ids)\n\n\n max_len = max([len(ids) for ids in words_ids])\n words_ids_mask = [[1]*len(ids) + [0]*(max_len-len(ids)) for ids in words_ids]\n words_ids = [ids+[0]*(max_len-len(ids)) for ids in words_ids]\n\n words_ids_tensor = torch.tensor(words_ids)\n words_ids_mask = torch.tensor(words_ids_mask)\n self.label_words_ids = nn.Parameter(words_ids_tensor, requires_grad=False)\n self.label_words_mask = nn.Parameter(words_ids_mask, requires_grad=False)\n\n def project(self,\n logits: torch.Tensor,\n **kwargs,\n ) -> torch.Tensor:\n r\"\"\"\n Project the labels, the return value is the normalized (sum to 1) probs of label words.\n\n Args:\n logits (:obj:`torch.Tensor`): The orginal logits of label words.\n\n Returns:\n :obj:`torch.Tensor`: The normalized logits of label words\n \"\"\"\n label_words_logits = logits[:, self.label_words_ids]\n label_words_logits = self.handle_multi_token(label_words_logits, self.label_words_mask)\n return label_words_logits\n\n def process_logits(self, logits: torch.Tensor, **kwargs):\n r\"\"\"A whole framework to process the original logits over the vocabulary, which contains four steps:\n\n (1) Project the logits into logits of label words\n\n if self.post_log_softmax is True:\n\n (2) Normalize over all label words\n\n (3) Calibrate (optional)\n\n Args:\n logits (:obj:`torch.Tensor`): The orginal logits.\n\n Returns:\n (:obj:`torch.Tensor`): The final processed logits over the label words set.\n \"\"\"\n # project\n label_words_logits = self.project(logits, **kwargs) #Output: (batch_size, num_classes) or (batch_size, num_classes, num_label_words_per_label)\n\n if self.post_log_softmax:\n # normalize\n label_words_probs = self.normalize(label_words_logits)\n\n # calibrate\n if hasattr(self, \"_calibrate_logits\") and self._calibrate_logits is not None:\n label_words_probs = self.calibrate(label_words_probs=label_words_probs)\n\n # convert to logits\n label_words_logits = torch.log(label_words_probs+1e-15)\n\n return label_words_logits\n\n def normalize(self, logits: torch.Tensor) -> torch.Tensor:\n \"\"\"\n Given logits regarding the entire vocabulary, return the probs over the label words set.\n\n Args:\n logits (:obj:`Tensor`): The logits over the entire vocabulary.\n\n Returns:\n :obj:`Tensor`: The logits over the label words set.\n\n \"\"\"\n batch_size = logits.shape[0]\n return F.softmax(logits.reshape(batch_size, -1), dim=-1).reshape(*logits.shape)\n\n\n def calibrate(self, label_words_probs: torch.Tensor, **kwargs) -> torch.Tensor:\n r\"\"\"\n\n Args:\n label_words_probs (:obj:`torch.Tensor`): The probability distribution of the label words with the shape of [``batch_size``, ``num_classes``, ``num_label_words_per_class``]\n\n Returns:\n :obj:`torch.Tensor`: The calibrated probability of label words.\n \"\"\"\n shape = label_words_probs.shape\n assert self._calibrate_logits.dim() == 1, \"self._calibrate_logits are not 1-d tensor\"\n calibrate_label_words_probs = self.normalize(self.project(self._calibrate_logits.unsqueeze(0), **kwargs))\n assert calibrate_label_words_probs.shape[1:] == label_words_probs.shape[1:] \\\n and calibrate_label_words_probs.shape[0]==1, \"shape not match\"\n label_words_probs /= (calibrate_label_words_probs+1e-15)\n # normalize # TODO Test the performance\n norm = label_words_probs.reshape(shape[0], -1).sum(dim=-1,keepdim=True) # TODO Test the performance of detaching()\n label_words_probs /= norm\n return label_words_probs\n\n\n\n\n\n\n\n\n\n" ]
[ [ "torch.log", "torch.tensor", "torch.nn.Parameter" ] ]
Ashymad/praca.inz
[ "dbb1fba10e421c3610bb66a06b7601d2ca4366c6" ]
[ "tests/python/tests/conv/test.py" ]
[ "# pass test\nimport numpy as np\n\ndef prepare_input(input_size):\n return [np.random.rand(input_size), np.random.rand(input_size)]\n\ndef test_function(input_data):\n return np.convolve(input_data[0], input_data[1])\n" ]
[ [ "numpy.random.rand", "numpy.convolve" ] ]
byanofsky/tensorboard
[ "42368f5be4611eac2b4206a00a322f79083b1aa8" ]
[ "tensorboard/data/experimental/experiment_from_dev_test.py" ]
[ "# Copyright 2020 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Tests for tensorboard.uploader.exporter.\"\"\"\n\n\nfrom unittest import mock\n\nimport numpy as np\nimport pandas\n\nfrom tensorboard import test as tb_test\nfrom tensorboard.data.experimental import experiment_from_dev\nfrom tensorboard.uploader import test_util\nfrom tensorboard.uploader.proto import export_service_pb2\nfrom tensorboard.util import grpc_util\n\n\nclass ExperimentFromDevTest(tb_test.TestCase):\n def test_get_scalars_works(self):\n mock_api_client = mock.Mock()\n\n def stream_experiment_data(request, **kwargs):\n self.assertEqual(request.experiment_id, \"789\")\n self.assertEqual(kwargs[\"metadata\"], grpc_util.version_metadata())\n for run in (\"train\", \"test\"):\n for tag in (\"accuracy\", \"loss\"):\n response = export_service_pb2.StreamExperimentDataResponse()\n response.run_name = run\n response.tag_name = tag\n display_name = \"%s:%s\" % (request.experiment_id, tag)\n response.tag_metadata.CopyFrom(\n test_util.scalar_metadata(display_name)\n )\n for step in range(10):\n response.points.steps.append(step)\n if tag == \"loss\":\n if run == \"train\":\n value = 1.0 / (step + 1)\n seconds = step\n else:\n value = -1.0 / (step + 1)\n seconds = 600 + step\n else: # \"accuracy\"\n if run == \"train\":\n value = 1.0 / (10 - step)\n seconds = step * 2\n else:\n value = -1.0 / (10 - step)\n seconds = 600 + step * 2\n response.points.values.append(value)\n response.points.wall_times.add(seconds=seconds, nanos=0)\n yield response\n\n mock_api_client.StreamExperimentData = mock.Mock(\n wraps=stream_experiment_data\n )\n\n with mock.patch.object(\n experiment_from_dev,\n \"get_api_client\",\n lambda api_endpoint: mock_api_client,\n ):\n experiment = experiment_from_dev.ExperimentFromDev(\"789\")\n for pivot in (False, True):\n for include_wall_time in (False, True):\n with self.subTest(\n \"pivot=%s; include_wall_time=%s\"\n % (pivot, include_wall_time)\n ):\n dataframe = experiment.get_scalars(\n pivot=pivot, include_wall_time=include_wall_time\n )\n\n if pivot:\n run_key = (\n (\"run\", \"\") if include_wall_time else \"run\"\n )\n step_key = (\n (\"step\", \"\") if include_wall_time else \"step\"\n )\n accuracy_value_key = (\n (\"value\", \"accuracy\")\n if include_wall_time\n else \"accuracy\"\n )\n loss_value_key = (\n (\"value\", \"loss\")\n if include_wall_time\n else \"loss\"\n )\n data = {\n run_key: [\"test\"] * 10 + [\"train\"] * 10,\n step_key: np.concatenate(\n [np.arange(0, 10), np.arange(0, 10)]\n ),\n accuracy_value_key: np.concatenate(\n [\n -1.0 / (10.0 - np.arange(0, 10)),\n 1.0 / (10.0 - np.arange(0, 10)),\n ],\n ),\n loss_value_key: np.concatenate(\n [\n -1.0 / (1.0 + np.arange(0, 10)),\n 1.0 / (1.0 + np.arange(0, 10)),\n ],\n ),\n }\n if include_wall_time:\n data[\n (\"wall_time\", \"accuracy\")\n ] = np.concatenate(\n [\n 600.0 + 2.0 * np.arange(0, 10),\n 2.0 * np.arange(0, 10),\n ]\n )\n data[(\"wall_time\", \"loss\")] = np.concatenate(\n [\n 600.0 + np.arange(0, 10),\n 1.0 * np.arange(0, 10),\n ]\n )\n expected = pandas.DataFrame(data)\n else: # No pivot_table.\n data = {\n \"run\": [\"train\"] * 20 + [\"test\"] * 20,\n \"tag\": ([\"accuracy\"] * 10 + [\"loss\"] * 10) * 2,\n \"step\": list(np.arange(0, 10)) * 4,\n \"value\": np.concatenate(\n [\n 1.0 / (10.0 - np.arange(0, 10)),\n 1.0 / (1.0 + np.arange(0, 10)),\n -1.0 / (10.0 - np.arange(0, 10)),\n -1.0 / (1.0 + np.arange(0, 10)),\n ]\n ),\n }\n if include_wall_time:\n data[\"wall_time\"] = np.concatenate(\n [\n 2.0 * np.arange(0, 10),\n 1.0 * np.arange(0, 10),\n 600.0 + 2.0 * np.arange(0, 10),\n 600.0 + np.arange(0, 10),\n ]\n )\n expected = pandas.DataFrame(data)\n\n pandas.testing.assert_frame_equal(\n dataframe,\n expected,\n check_names=True,\n )\n\n def test_get_scalars_with_pivot_table_with_missing_value(self):\n mock_api_client = mock.Mock()\n\n def stream_experiment_data(request, **kwargs):\n self.assertEqual(request.experiment_id, \"789\")\n self.assertEqual(kwargs[\"metadata\"], grpc_util.version_metadata())\n response = export_service_pb2.StreamExperimentDataResponse()\n response.run_name = \"train\"\n response.tag_name = \"batch_loss\"\n response.points.steps.append(0)\n response.points.values.append(0.5)\n response.points.wall_times.add(seconds=0, nanos=0)\n response.points.steps.append(1)\n response.points.values.append(0.25)\n response.points.wall_times.add(seconds=1, nanos=0)\n yield response\n response = export_service_pb2.StreamExperimentDataResponse()\n response.run_name = \"train\"\n response.tag_name = \"epoch_loss\"\n response.points.steps.append(0)\n response.points.values.append(0.375)\n response.points.wall_times.add(seconds=2, nanos=0)\n yield response\n\n mock_api_client.StreamExperimentData = mock.Mock(\n wraps=stream_experiment_data\n )\n\n with mock.patch.object(\n experiment_from_dev,\n \"get_api_client\",\n lambda api_endpoint: mock_api_client,\n ):\n experiment = experiment_from_dev.ExperimentFromDev(\"789\")\n with self.assertRaisesRegexp(\n ValueError,\n r\"contains missing value\\(s\\).*different sets of \"\n r\"steps.*pivot=False\",\n ):\n experiment.get_scalars(pivot=True)\n\n def test_get_scalars_with_actual_inf_and_nan(self):\n \"\"\"Test for get_scalars() call that involve inf and nan in user data.\"\"\"\n mock_api_client = mock.Mock()\n\n def stream_experiment_data(request, **kwargs):\n self.assertEqual(request.experiment_id, \"789\")\n self.assertEqual(kwargs[\"metadata\"], grpc_util.version_metadata())\n response = export_service_pb2.StreamExperimentDataResponse()\n response.run_name = \"train\"\n response.tag_name = \"batch_loss\"\n response.points.steps.append(0)\n response.points.values.append(np.nan)\n response.points.wall_times.add(seconds=0, nanos=0)\n response.points.steps.append(1)\n response.points.values.append(np.inf)\n response.points.wall_times.add(seconds=10, nanos=0)\n yield response\n\n mock_api_client.StreamExperimentData = mock.Mock(\n wraps=stream_experiment_data\n )\n\n with mock.patch.object(\n experiment_from_dev,\n \"get_api_client\",\n lambda api_endpoint: mock_api_client,\n ):\n experiment = experiment_from_dev.ExperimentFromDev(\"789\")\n dataframe = experiment.get_scalars(pivot=True)\n\n expected = pandas.DataFrame(\n {\n \"run\": [\"train\"] * 2,\n \"step\": [0, 1],\n \"batch_loss\": [np.nan, np.inf],\n }\n )\n pandas.testing.assert_frame_equal(dataframe, expected, check_names=True)\n\n\nif __name__ == \"__main__\":\n tb_test.main()\n" ]
[ [ "pandas.DataFrame", "pandas.testing.assert_frame_equal", "numpy.arange" ] ]
tlangfor/ProPCA
[ "e94c9729f5ff9e1c4b70864fd9cb3dc85e4aebe1" ]
[ "misc/selection/galinsky.py" ]
[ "#!/bin/env python3\n\nimport sys\nimport numpy as np\nimport pandas as pd\nfrom pandas_plink import read_plink\nimport argparse\n\nif __name__=='__main__':\n\n\tparser = argparse.ArgumentParser(description=\"Calculate chi-squared selection statistics based on principal components from Galinsky et al. 2016\")\n\tparser.add_argument(\"outfile\",help=\"path to output file name\")\n\tparser.add_argument(\"plink\",help=\"path to PLINK prefix\")\n\tparser.add_argument(\"eval\",help=\"path to eigenvalue file\")\n\tparser.add_argument(\"proj\",help=\"path to projections file\")\n\tparser.add_argument(\"-v\",\"--verbose\",help=\"verbose mode (default: TRUE)\",action=\"store_false\")\n\tparser.add_argument(\"-m\",\"--missing\",help=\"missing mode (default: FALSE)\",action=\"store_true\")\n\tparser.add_argument(\"-c\",\"--chunk\",help=\"chunk size (default: 64)\",type=int,default=64)\n\n\targs = parser.parse_args()\n\t\n\toutfile = args.outfile\n\tfilename = args.plink\n\teigenvec_file = args.proj\n\teigenvals_file = args.eval\t\n\tverbose = args.verbose\n\tchunk_size = args.chunk\n\tmissing = args.missing\n\n\tevecs = np.loadtxt(eigenvec_file,dtype=np.float64)\n\tevals = np.loadtxt(eigenvals_file,dtype=np.float64,delimiter='\\n')\n\n\tevec_scalar = np.nansum(evecs,axis=0)[np.newaxis,:]\n\n\toutput=open(outfile,\"wb\")\n\n\t(bim, _, G) = read_plink(filename)\n\tsnps = bim['snp']\n\tdel(bim)\n\n\tncols = evecs.shape[0]\t\n\n\tfor counter in range(int(np.ceil(G.shape[0]/chunk_size))):\n\t\tif verbose:\n\t\t\tprint(\"Reading {}\".format((counter+1)*chunk_size))\n\t\t\n\t\tlabels = snps[counter*chunk_size:(counter+1)*chunk_size]\n\n\t\tgenos = G[counter*chunk_size:(counter+1)*chunk_size,:].compute()\n\t\t\n\t\tp = np.nanmean(genos,axis=1)/2\t\t\n\n\t\tif missing:\n\t\t\tgenos = np.nan_to_num(genos)\n\n\t\tscores = np.dot(genos,evecs)\n\n\t\tscores = scores - 2*np.dot(p[:,np.newaxis],evec_scalar)\n\n\t\tscores = scores / np.sqrt(2*p*(1-p))[:,np.newaxis]\n\n\t\tstatistic = (1/evals) * (scores**2)\n\n\t\tstatistic = np.insert(statistic.astype(str),0,labels,axis=1)\n\t\t\n\t\tnp.savetxt(output,statistic,delimiter=\"\\t\",fmt=\"%s\")\t\n\n\toutput.close()\n\texit(0)\n" ]
[ [ "numpy.ceil", "numpy.dot", "numpy.savetxt", "numpy.nan_to_num", "numpy.nansum", "numpy.nanmean", "numpy.loadtxt", "numpy.sqrt" ] ]
Circumstellar/MichaelJordan
[ "23c2442e83310866ce7bb57cff13e106845cd839" ]
[ "deredden.py" ]
[ "#==============================================================================\n# DEREDDEN.py Sean Andrews's deredden.pro ported to python3\n#\n# A simple function to provide the de-reddening factor in either magnitudes\n# (with keyword /mags set) or flux density at a range of input wavelengths,\n# given a visual extinction (Av).\n#\n# made composite extinction curves for different Av\n#\t\t\tregimes: at higher Av, use McClure 2009 model, but at\n#\t\t\tlower Av can use the Rv = 3.1 (DISM) Mathis 1990 model.\n# the McClure 2009 model switches at Ak = 1\n#==============================================================================\n\nfrom astropy.io import ascii\nimport numpy as np\nfrom scipy.interpolate import interp1d\nimport matplotlib.pyplot as plt\n\ndata = ascii.read(\"ext_curves.dat\")\nawl = data['wl'] #wavelength grid for extinction [microns]\nA1 = data['A1'] #Mathis Law\nA2 = data['A2'] # Valid 0.3 < Ak < 1\nA3 = data['A3'] # Valid 1 < Ak < 7\n\n#what is Av_me? An arbitrary cutoff assumed by Sean?\n# Alambda = Av * (1/7.75 * interpol(A2,awl,wl))\n\ndef deredden(wl, Av, thres=None, mags=True):\n '''Takes in wavelength array in microns. Valid between .1200 um and 1e4 microns.'''\n #- thresholds for different extinction curve regimes\n if thres is not None:\n Av_lo = thresh\n else:\n Av_lo = 0.0\n\n Av_me = 2.325 #McClure 2009 threshold: AK = 0.3\n\n if (Av_lo >= Av_me):\n Av_lo = 0.0\n\n Av_hi = 7.75 #McClure 2009 threshold: AK = 1.0\n\n if (Av >= Av_hi):\n AA = A3\n AvAk = 7.75\n\n if (Av >= Av_me) and (Av < Av_hi):\n AA = A2\n AvAk = 7.75\n\n if (Av >= Av_lo) and (Av < Av_me):\n AA = A2\n AvAk = 7.75\n\n if (Av < Av_lo):\n AA = A1\n AvAk = 9.03\n\n AK_AV = 1. / AvAk\n\n #interpolate extinction curve onto input wavelength grid\n Alambda_func = interp1d(awl, Av * AK_AV * AA)\n Alambda = Alambda_func(wl)\n\n # - return the extinction at input wavelengths\n #at this point, Alambda is in magnitudes\n\n if mags:\n return Alambda\n else:\n # to convert to flux, raise 10^(0.4 * Alambda)\n return 10. ** (0.4 * Alambda)\n\ndef av_point(wl):\n '''call this, get grid. multiply grid by Av to get redenning at that wavelength.'''\n # Using A2\n AK_AV = 1 / 7.75\n Alambda_func = interp1d(awl, AK_AV * A2, kind='linear')\n return Alambda_func(wl)\n\ndef create_red_grid(wl):\n avs = av_points(wl)\n np.save('red_grid.npy',avs)\n\n\ndef plot_curve():\n '''To test implementation'''\n fig = plt.figure()\n ax = fig.add_subplot(111)\n wl = np.linspace(.13, 10, num=300)\n ax.plot(wl, deredden(wl, .2, mags=False), label=\"0.2 mags\")\n ax.plot(wl, deredden(wl, 1.0, mags=False), label=\"1.0 mags\")\n ax.plot(wl, deredden(wl, 2.0, mags=False), label=\"2.0 mags\")\n avs = av_points(wl)\n ax.plot(wl, 10**(0.4 * avs), \"k:\", label=\"fiducial\")\n ax.legend(loc=\"upper right\")\n ax.set_xlabel(r\"$\\lambda\\quad[\\AA]$\")\n ax.set_ylabel(r\"$A_\\lambda$\")\n plt.savefig(\"redenning_curves.png\")\n\n\ndef main():\n plot_curve()\n\nif __name__ == \"__main__\":\n main()\n" ]
[ [ "scipy.interpolate.interp1d", "matplotlib.pyplot.savefig", "numpy.save", "matplotlib.pyplot.figure", "numpy.linspace" ] ]
budakn/INDRA
[ "393958b2ca7bc1ca5d054885c0634f434ff7496e" ]
[ "indra/tools/reading/submit_reading_pipeline.py" ]
[ "from __future__ import absolute_import, print_function, unicode_literals\n\nimport pickle\nfrom builtins import dict, str\n\nimport os\nimport re\nimport boto3\nimport logging\nimport botocore.session\nfrom time import sleep\nimport matplotlib as mpl\nfrom numpy import median, arange, array\n\nfrom indra.tools.reading.util.reporter import Reporter\nfrom indra.util.get_version import get_git_info\n\nmpl.use('Agg')\nfrom matplotlib import pyplot as plt\nfrom datetime import datetime, timedelta\nfrom indra.literature import elsevier_client as ec\nfrom indra.literature.elsevier_client import _ensure_api_keys\nfrom indra.util.aws import get_job_log, tag_instance, get_batch_command\nfrom indra.util.nested_dict import NestedDict\n\nbucket_name = 'bigmech'\n\nlogger = logging.getLogger('aws_reading')\n\n\nclass BatchReadingError(Exception):\n pass\n\n\ndef wait_for_complete(queue_name, job_list=None, job_name_prefix=None,\n poll_interval=10, idle_log_timeout=None,\n kill_on_log_timeout=False, stash_log_method=None,\n tag_instances=False, result_record=None):\n \"\"\"Return when all jobs in the given list finished.\n\n If not job list is given, return when all jobs in queue finished.\n\n Parameters\n ----------\n queue_name : str\n The name of the queue to wait for completion.\n job_list : Optional[list(dict)]\n A list of jobID-s in a dict, as returned by the submit function.\n Example: [{'jobId': 'e6b00f24-a466-4a72-b735-d205e29117b4'}, ...]\n If not given, this function will return if all jobs completed.\n job_name_prefix : Optional[str]\n A prefix for the name of the jobs to wait for. This is useful if the\n explicit job list is not available but filtering is needed.\n poll_interval : Optional[int]\n The time delay between API calls to check the job statuses.\n idle_log_timeout : Optional[int] or None\n If not None, then track the logs of the active jobs, and if new output\n is not produced after `idle_log_timeout` seconds, a warning is printed.\n If `kill_on_log_timeout` is set to True, the job will also be\n terminated.\n kill_on_log_timeout : Optional[bool]\n If True, and if `idle_log_timeout` is set, jobs will be terminated\n after timeout. This has no effect if `idle_log_timeout` is None.\n Default is False.\n stash_log_method : Optional[str]\n Select a method to store the job logs, either 's3' or 'local'. If no\n method is specified, the logs will not be loaded off of AWS. If 's3' is\n specified, then `job_name_prefix` must also be given, as this will\n indicate where on s3 to store the logs.\n tag_instances : bool\n Default is False. If True, apply tags to the instances. This is toady\n typically done by each job, so in most cases this should not be needed.\n result_record : dict\n A dict which will be modified in place to record the results of the job.\n \"\"\"\n if stash_log_method == 's3' and job_name_prefix is None:\n raise Exception('A job_name_prefix is required to post logs on s3.')\n\n start_time = datetime.now()\n if job_list is None:\n job_id_list = []\n else:\n job_id_list = [job['jobId'] for job in job_list]\n\n def get_jobs_by_status(status, job_id_filter=None, job_name_prefix=None):\n res = batch_client.list_jobs(jobQueue=queue_name,\n jobStatus=status, maxResults=10000)\n jobs = res['jobSummaryList']\n if job_name_prefix:\n jobs = [job for job in jobs if\n job['jobName'].startswith(job_name_prefix)]\n if job_id_filter:\n jobs = [job_def for job_def in jobs\n if job_def['jobId'] in job_id_filter]\n return jobs\n\n job_log_dict = {}\n\n def check_logs(job_defs):\n \"\"\"Updates teh job_log_dict.\"\"\"\n stalled_jobs = set()\n\n # Check the status of all the jobs we're tracking.\n for job_def in job_defs:\n try:\n # Get the logs for this job.\n log_lines = get_job_log(job_def, write_file=False)\n\n # Get the job id.\n jid = job_def['jobId']\n now = datetime.now()\n if jid not in job_log_dict.keys():\n # If the job is new...\n logger.info(\"Adding job %s to the log tracker at %s.\"\n % (jid, now))\n job_log_dict[jid] = {'log': log_lines,\n 'last change time': now}\n elif len(job_log_dict[jid]['log']) == len(log_lines):\n # If the job log hasn't changed, announce as such, and check\n # to see if it has been the same for longer than stall time.\n check_dt = now - job_log_dict[jid]['last change time']\n logger.warning(('Job \\'%s\\' has not produced output for '\n '%d seconds.')\n % (job_def['jobName'], check_dt.seconds))\n if check_dt.seconds > idle_log_timeout:\n logger.warning(\"Job \\'%s\\' has stalled.\"\n % job_def['jobName'])\n stalled_jobs.add(jid)\n else:\n # If the job is known, and the logs have changed, update the\n # \"last change time\".\n old_log = job_log_dict[jid]['log']\n old_log += log_lines[len(old_log):]\n job_log_dict[jid]['last change time'] = now\n except Exception as e:\n # Sometimes due to sync et al. issues, a part of this will fail.\n # Such things are usually transitory issues so we keep trying.\n logger.error(\"Failed to check log for: %s\" % str(job_def))\n logger.exception(e)\n\n # Pass up the set of job id's for stalled jobs.\n return stalled_jobs\n\n # Don't start watching jobs added after this command was initialized.\n observed_job_def_dict = {}\n def get_dict_of_job_tuples(job_defs):\n return {jdef['jobId']: [(k, jdef[k]) for k in ['jobName', 'jobId']]\n for jdef in job_defs}\n\n batch_client = boto3.client('batch')\n if tag_instances:\n ecs_cluster_name = get_ecs_cluster_for_queue(queue_name, batch_client)\n\n terminate_msg = 'Job log has stalled for at least %f minutes.'\n terminated_jobs = set()\n stashed_id_set = set()\n while True:\n pre_run = []\n for status in ('SUBMITTED', 'PENDING', 'RUNNABLE', 'STARTING'):\n pre_run += get_jobs_by_status(status, job_id_list, job_name_prefix)\n running = get_jobs_by_status('RUNNING', job_id_list, job_name_prefix)\n failed = get_jobs_by_status('FAILED', job_id_list, job_name_prefix)\n done = get_jobs_by_status('SUCCEEDED', job_id_list, job_name_prefix)\n\n observed_job_def_dict.update(get_dict_of_job_tuples(pre_run + running))\n\n logger.info('(%d s)=(pre: %d, running: %d, failed: %d, done: %d)' %\n ((datetime.now() - start_time).seconds, len(pre_run),\n len(running), len(failed), len(done)))\n\n # Check the logs for new output, and possibly terminate some jobs.\n stalled_jobs = check_logs(running)\n if idle_log_timeout is not None:\n if kill_on_log_timeout:\n # Keep track of terminated jobs so we don't send a terminate\n # message twice.\n for jid in stalled_jobs - terminated_jobs:\n batch_client.terminate_job(\n jobId=jid,\n reason=terminate_msg % (idle_log_timeout/60.0)\n )\n logger.info('Terminating %s.' % jid)\n terminated_jobs.add(jid)\n\n if job_id_list:\n if (len(failed) + len(done)) == len(job_id_list):\n ret = 0\n break\n else:\n if (len(failed) + len(done) > 0) and \\\n (len(pre_run) + len(running) == 0):\n ret = 0\n break\n\n if tag_instances:\n tag_instances_on_cluster(ecs_cluster_name)\n\n # Stash the logs of things that have finished so far. Note that jobs\n # terminated in this round will not be picked up until the next round.\n if stash_log_method:\n stash_logs(observed_job_def_dict, done, failed, queue_name,\n stash_log_method, job_name_prefix,\n start_time.strftime('%Y%m%d_%H%M%S'),\n ids_stashed=stashed_id_set)\n sleep(poll_interval)\n\n # Pick up any stragglers\n if stash_log_method:\n stash_logs(observed_job_def_dict, done, failed, queue_name,\n stash_log_method, job_name_prefix,\n start_time.strftime('%Y%m%d_%H%M%S'),\n ids_stashed=stashed_id_set)\n\n result_record['terminated'] = terminated_jobs\n result_record['failed'] = failed\n result_record['succeeded'] = done\n\n return ret\n\n\ndef _get_job_ids_to_stash(job_def_list, stashed_id_set):\n return [job_def['jobId'] for job_def in job_def_list\n if job_def['jobId'] not in stashed_id_set]\n\n\ndef stash_logs(job_defs, success_jobs, failure_jobs, queue_name, method='local',\n job_name_prefix=None, tag='stash', ids_stashed=None):\n if ids_stashed is None:\n ids_stashed = set()\n\n success_ids = _get_job_ids_to_stash(success_jobs, ids_stashed)\n failure_ids = _get_job_ids_to_stash(failure_jobs, ids_stashed)\n if method == 's3':\n s3_client = boto3.client('s3')\n\n def stash_log(log_str, name_base):\n name = '%s_%s.log' % (name_base, tag)\n s3_client.put_object(\n Bucket=bucket_name,\n Key='reading_results/%s/logs/%s/%s' % (\n job_name_prefix,\n queue_name,\n name),\n Body=log_str\n )\n elif method == 'local':\n if job_name_prefix is None:\n job_name_prefix = 'batch_%s' % tag\n dirname = '%s_job_logs' % job_name_prefix\n os.mkdir(dirname)\n\n def stash_log(log_str, name_base):\n with open(os.path.join(dirname, name_base + '.log'), 'w') as f:\n f.write(log_str)\n else:\n raise ValueError('Invalid method: %s' % method)\n\n for jobId, job_def_tpl in job_defs.items():\n if jobId not in success_ids and jobId not in failure_ids:\n continue # Logs aren't done and ready to be loaded.\n try:\n job_def = dict(job_def_tpl)\n lines = get_job_log(job_def, write_file=False)\n if lines is None:\n logger.warning(\"No logs found for %s.\" % job_def['jobName'])\n continue\n log_str = ''.join(lines)\n base_name = job_def['jobName']\n if job_def['jobId'] in success_ids:\n base_name += '/SUCCESS'\n elif job_def['jobId'] in failure_ids:\n base_name += '/FAILED'\n else:\n logger.error(\"Job cannot be logged unless completed.\")\n continue\n logger.info('Stashing ' + base_name)\n stash_log(log_str, base_name)\n except Exception as e:\n logger.error(\"Failed to save logs for: %s\" % str(job_def_tpl))\n logger.exception(e)\n ids_stashed |= {jid for jids in [success_ids, failure_ids] for jid in jids}\n return\n\n\ndef get_ecs_cluster_for_queue(queue_name, batch_client=None):\n \"\"\"Get the name of the ecs cluster using the batch client.\"\"\"\n if batch_client is None:\n batch_client = boto3.client('batch')\n\n queue_resp = batch_client.describe_job_queues(jobQueues=[queue_name])\n if len(queue_resp['jobQueues']) == 1:\n queue = queue_resp['jobQueues'][0]\n else:\n raise BatchReadingError('Error finding queue with name %s.'\n % queue_name)\n\n compute_env_names = queue['computeEnvironmentOrder']\n if len(compute_env_names) == 1:\n compute_env_name = compute_env_names[0]['computeEnvironment']\n else:\n raise BatchReadingError('Error finding the compute environment name '\n 'for %s.' % queue_name)\n\n compute_envs = batch_client.describe_compute_environments(\n computeEnvironments=[compute_env_name]\n )['computeEnvironments']\n if len(compute_envs) == 1:\n compute_env = compute_envs[0]\n else:\n raise BatchReadingError(\"Error getting compute environment %s for %s. \"\n \"Got %d enviornments instead of 1.\"\n % (compute_env_name, queue_name,\n len(compute_envs)))\n\n ecs_cluster_name = os.path.basename(compute_env['ecsClusterArn'])\n return ecs_cluster_name\n\n\ndef tag_instances_on_cluster(cluster_name, project='cwc'):\n \"\"\"Adds project tag to untagged instances in a given cluster.\n\n Parameters\n ----------\n cluster_name : str\n The name of the AWS ECS cluster in which running instances\n should be tagged.\n project : str\n The name of the project to tag instances with.\n \"\"\"\n # Get the relevent instance ids from the ecs cluster\n ecs = boto3.client('ecs')\n task_arns = ecs.list_tasks(cluster=cluster_name)['taskArns']\n if not task_arns:\n return\n tasks = ecs.describe_tasks(cluster=cluster_name, tasks=task_arns)['tasks']\n container_instances = ecs.describe_container_instances(\n cluster=cluster_name,\n containerInstances=[task['containerInstanceArn'] for task in tasks]\n )['containerInstances']\n ec2_instance_ids = [ci['ec2InstanceId'] for ci in container_instances]\n\n # Instantiate each instance to tag as a resource and create project tag\n for instance_id in ec2_instance_ids:\n tag_instance(instance_id, project=project)\n return\n\n\n@_ensure_api_keys('remote batch reading', [])\ndef get_elsevier_api_keys():\n return [\n {'name': ec.API_KEY_ENV_NAME,\n 'value': ec.ELSEVIER_KEYS.get('X-ELS-APIKey', '')},\n {'name': ec.INST_KEY_ENV_NAME,\n 'value': ec.ELSEVIER_KEYS.get('X-ELS-Insttoken', '')},\n ]\n\n\ndef get_environment():\n # Get AWS credentials\n # http://stackoverflow.com/questions/36287720/boto3-get-credentials-dynamically\n session = botocore.session.get_session()\n access_key = session.get_credentials().access_key\n secret_key = session.get_credentials().secret_key\n\n # Get the Elsevier keys from the Elsevier client\n environment_vars = [\n {'name': 'AWS_ACCESS_KEY_ID',\n 'value': access_key},\n {'name': 'AWS_SECRET_ACCESS_KEY',\n 'value': secret_key}\n ]\n environment_vars += get_elsevier_api_keys()\n\n # Only include values that are not empty.\n return [var_dict for var_dict in environment_vars\n if var_dict['value'] and var_dict['name']]\n\n\nclass Submitter(object):\n _s3_input_name = NotImplemented\n _purpose = NotImplemented\n _job_queue = NotImplemented\n _job_def = NotImplemented\n\n def __init__(self, basename, readers, project_name=None, **options):\n self.basename = basename\n if 'all' in readers:\n self.readers = ['reach', 'sparser']\n else:\n self.readers = readers\n self.project_name = project_name\n self.job_list = None\n self.options=options\n self.ids_per_job = None\n return\n\n def set_options(self, **kwargs):\n \"\"\"Set the options of reading job.\"\"\"\n # This should be more specifically implemented in a child class.\n self.options = kwargs\n return\n\n def _make_command(self, start_ix, end_ix):\n job_name = '%s_%d_%d' % (self.basename, start_ix, end_ix)\n cmd = self._get_base(job_name, start_ix, end_ix) + ['-r'] + self.readers\n cmd += self._get_extensions()\n for arg in cmd:\n if not isinstance(arg, str):\n logger.warning(\"Argument of command is not a string: %s\"\n % repr(arg))\n return job_name, cmd\n\n def _get_base(self, job_name, start_ix, end_ix):\n raise NotImplementedError\n\n def _get_extensions(self):\n return []\n\n def submit_reading(self, input_fname, start_ix, end_ix, ids_per_job,\n num_tries=2):\n # stash this for later.\n self.ids_per_job = ids_per_job\n\n # Upload the pmid_list to Amazon S3\n id_list_key = 'reading_results/%s/%s' % (self.basename,\n self._s3_input_name)\n s3_client = boto3.client('s3')\n s3_client.upload_file(input_fname, bucket_name, id_list_key)\n\n # If no end index is specified, read all the PMIDs\n if end_ix is None:\n with open(input_fname, 'rt') as f:\n lines = f.readlines()\n end_ix = len(lines)\n\n if start_ix is None:\n start_ix = 0\n\n # Get environment variables\n environment_vars = get_environment()\n\n # Iterate over the list of PMIDs and submit the job in chunks\n batch_client = boto3.client('batch', region_name='us-east-1')\n job_list = []\n for job_start_ix in range(start_ix, end_ix, ids_per_job):\n job_end_ix = job_start_ix + ids_per_job\n if job_end_ix > end_ix:\n job_end_ix = end_ix\n job_name, cmd = self._make_command(job_start_ix, job_end_ix)\n command_list = get_batch_command(cmd, purpose=self._purpose,\n project=self.project_name)\n logger.info('Command list: %s' % str(command_list))\n job_info = batch_client.submit_job(\n jobName=job_name,\n jobQueue=self._job_queue,\n jobDefinition=self._job_def,\n containerOverrides={\n 'environment': environment_vars,\n 'command': command_list},\n retryStrategy={'attempts': num_tries}\n )\n logger.info(\"submitted...\")\n job_list.append({'jobId': job_info['jobId']})\n self.job_list = job_list\n return job_list\n\n def watch_and_wait(self, poll_interval=10, idle_log_timeout=None,\n kill_on_timeout=False, stash_log_method=None,\n tag_instances=False, **kwargs):\n \"\"\"This provides shortcut access to the wait_for_complete_function.\"\"\"\n return wait_for_complete(self._job_queue, job_list=self.job_list,\n job_name_prefix=self.basename,\n poll_interval=poll_interval,\n idle_log_timeout=idle_log_timeout,\n kill_on_log_timeout=kill_on_timeout,\n stash_log_method=stash_log_method,\n tag_instances=tag_instances, **kwargs)\n\n\nclass PmidSubmitter(Submitter):\n _s3_input_name = 'pmids'\n _purpose = 'pmid_reading'\n _job_queue = 'run_reach_queue'\n _job_def = 'run_reach_jobdef'\n\n def _get_base(self, job_name, start_ix, end_ix):\n base = ['python', '-m', 'indra.tools.reading.pmid_reading.read_pmids_aws',\n self.basename, '/tmp', '16', str(start_ix), str(end_ix)]\n return base\n\n def _get_extensions(self):\n extensions = []\n for opt_key in ['force_read', 'force_fulltext']:\n if self.options.get(opt_key, False):\n extensions.append('--' + opt_key)\n return extensions\n\n def set_options(self, force_read=False, force_fulltext=False):\n \"\"\"Set the options for this run.\"\"\"\n self.options['force_read'] = force_read\n self.options['force_fulltext'] = force_fulltext\n return\n\n def submit_combine(self):\n job_ids = self.job_list\n if job_ids is not None and len(job_ids) > 20:\n print(\"WARNING: boto3 cannot support waiting for more than 20 jobs.\")\n print(\"Please wait for the reading to finish, then run again with the\")\n print(\"`combine` option.\")\n return\n\n # Get environment variables\n environment_vars = get_environment()\n\n job_name = '%s_combine_reading_results' % self.basename\n command_list = get_batch_command(\n ['python', '-m', 'indra.tools.reading.assemble_reading_stmts_aws',\n self.basename, '-r'] + self.readers,\n purpose='pmid_reading',\n project=self.project_name\n )\n logger.info('Command list: %s' % str(command_list))\n kwargs = {'jobName': job_name, 'jobQueue': self._job_queue,\n 'jobDefinition': self._job_def,\n 'containerOverrides': {'environment': environment_vars,\n 'command': command_list,\n 'memory': 60000, 'vcpus': 1}}\n if job_ids:\n kwargs['dependsOn'] = job_ids\n batch_client = boto3.client('batch')\n batch_client.submit_job(**kwargs)\n logger.info(\"submitted...\")\n return\n\n\ndef submit_reading(basename, pmid_list_filename, readers, start_ix=None,\n end_ix=None, pmids_per_job=3000, num_tries=2,\n force_read=False, force_fulltext=False, project_name=None):\n \"\"\"Submit an old-style pmid-centered no-database s3 only reading job.\n\n This function is provided for the sake of backward compatibility. It is\n preferred that you use the object-oriented PmidSubmitter and the\n submit_reading job going forward.\n \"\"\"\n sub = PmidSubmitter(basename, readers, project_name)\n sub.set_options(force_read, force_fulltext)\n sub.submit_reading(pmid_list_filename, start_ix, end_ix, pmids_per_job,\n num_tries)\n return sub.job_list\n\n\ndef submit_combine(basename, readers, job_ids=None, project_name=None):\n \"\"\"Submit a batch job to combine the outputs of a reading job.\n\n This function is provided for backwards compatibility. You should use the\n PmidSubmitter and submit_combine methods.\n \"\"\"\n sub = PmidSubmitter(basename, readers, project_name)\n sub.job_list = job_ids\n sub.submit_combine()\n return sub\n\n\nclass DbReadingSubmitter(Submitter):\n _s3_input_name = 'id_list'\n _purpose = 'db_reading'\n _job_queue = 'run_db_reading_queue'\n _job_def = 'run_db_reading_jobdef'\n\n def __init__(self, *args, **kwargs):\n super(DbReadingSubmitter, self).__init__(*args, **kwargs)\n self.time_tag = datetime.now().strftime('%Y%m%d_%H%M')\n self.reporter = Reporter(self.basename + '_summary_%s' % self.time_tag)\n self.reporter.sections = {'Plots': [], 'Totals': [], 'Git': []}\n self.reporter.set_section_order(['Git', 'Totals', 'Plots'])\n self.run_record = {}\n return\n\n def _get_base(self, job_name, start_ix, end_ix):\n read_mode = 'all' if self.options.get('force_read', False) else 'unread'\n stmt_mode = 'none' if self.options.get('no_stmts', False) else 'all'\n\n job_name = '%s_%d_%d' % (self.basename, start_ix, end_ix)\n base = ['python', '-m', 'indra.tools.reading.db_reading.read_db_aws',\n self.basename]\n base += [job_name]\n base += ['/tmp', read_mode, stmt_mode, '32', str(start_ix), str(end_ix)]\n return base\n\n def _get_extensions(self):\n extensions = []\n if self.options.get('force_fulltext', False):\n extensions.append('--force_fulltext')\n if self.options.get('prioritize', False):\n extensions.append('--read_best_fulltext')\n max_reach_input_len = self.options.get('max_reach_input_len')\n max_reach_space_ratio = self.options.get('max_reach_space_ratio')\n if max_reach_input_len is not None:\n extensions += ['--max_reach_input_len', max_reach_input_len]\n if max_reach_space_ratio is not None:\n extensions += ['--max_reach_space_ratio', max_reach_space_ratio]\n return extensions\n\n def set_options(self, force_read=False, no_stmts=False,\n force_fulltext=False, prioritize=False,\n max_reach_input_len=None, max_reach_space_ratio=None):\n self.options['force_fulltext'] = force_fulltext\n self.options['prioritize'] = prioritize\n self.options['max_reach_input_len'] = max_reach_input_len\n self.options['max_reach_space_ratio'] = max_reach_space_ratio\n return\n\n def watch_and_wait(self, *args, **kwargs):\n kwargs['result_record'] = self.run_record\n super(DbReadingSubmitter, self).watch_and_wait(*args, **kwargs)\n self.produce_report()\n\n @staticmethod\n def _parse_time(time_str):\n \"\"\"Create a timedelta or datetime object from default string reprs.\"\"\"\n try:\n # This is kinda terrible, but it is the easiest way to distinguish\n # them.\n if '-' in time_str:\n time_fmt = '%Y-%m-%d %H:%M:%S'\n if '.' in time_str:\n pre_dec, post_dec = time_str.split('.')\n dt = datetime.strptime(pre_dec, time_fmt)\n dt.replace(microsecond=int(post_dec))\n else:\n dt = datetime.strftime(time_str, time_fmt)\n return dt\n else:\n if 'day' in time_str:\n m = re.match(('(?P<days>[-\\d]+) day[s]*, '\n '(?P<hours>\\d+):(?P<minutes>\\d+):'\n '(?P<seconds>\\d[\\.\\d+]*)'),\n time_str)\n else:\n m = re.match(('(?P<hours>\\d+):(?P<minutes>\\d+):'\n '(?P<seconds>\\d[\\.\\d+]*)'),\n time_str)\n return timedelta(**{key: float(val)\n for key, val in m.groupdict().items()})\n except Exception as e:\n logger.error('Failed to parse \\\"%s\\\".' % time_str)\n raise e\n\n def _get_results_file_tree(self, s3, s3_prefix):\n relevant_files = s3.list_objects(Bucket=bucket_name, Prefix=s3_prefix)\n file_tree = NestedDict()\n file_keys = [entry['Key'] for entry in relevant_files['Contents']]\n pref_path = s3_prefix.split('/')[:-1] # avoid the trailing empty str.\n for key in file_keys:\n full_path = key.split('/')\n relevant_path = full_path[len(pref_path):]\n curr = file_tree\n for step in relevant_path:\n curr = curr[step]\n curr['key'] = key\n return file_tree\n\n def _get_txt_file_dict(self, file_bytes):\n line_list = file_bytes.decode('utf-8').splitlines()\n sc = ': '\n file_info = {}\n for line in line_list:\n segments = line.split(sc)\n file_info[segments[0].strip()] = sc.join(segments[1:]).strip()\n return file_info\n\n def _handle_git_info(self, ref, git_info, file_bytes):\n this_info = self._get_txt_file_dict(file_bytes)\n if git_info and this_info != git_info:\n logger.warning(\"Disagreement in git info in %s: \"\n \"%s vs. %s.\"\n % (ref, git_info, this_info))\n elif not git_info:\n git_info.update(this_info)\n return\n\n def _report_git_info(self, batch_git_info):\n self.reporter.add_text('Batch Git Info', section='Git', style='h1')\n for key, val in batch_git_info.items():\n label = key.replace('_', ' ').capitalize()\n self.reporter.add_text('%s: %s' % (label, val), section='Git')\n self.reporter.add_text('Launching System\\'s Git Info', section='Git',\n style='h1')\n git_info_dict = get_git_info()\n for key, val in git_info_dict.items():\n label = key.replace('_', ' ').capitalize()\n self.reporter.add_text('%s: %s' % (label, val), section='Git')\n return\n\n def _handle_timing(self, ref, timing_info, file_bytes):\n this_info = self._get_txt_file_dict(file_bytes)\n for stage, data in this_info.items():\n if stage not in timing_info.keys():\n logger.info(\"Adding timing stage: %s\" % stage)\n timing_info[stage] = {}\n stage_info = timing_info[stage]\n timing_pairs = re.findall(r'(\\w+):\\s+([ 0-9:.\\-]+)', data)\n if len(timing_pairs) is not 3:\n logger.warning(\"Not all timings present for %s \"\n \"in %s.\" % (stage, ref))\n for label, time_str in timing_pairs:\n if label not in stage_info.keys():\n stage_info[label] = {}\n # e.g. timing_info['reading']['start']['job_name'] = <datetime>\n stage_info[label][ref] = self._parse_time(time_str)\n return\n\n def _report_timing(self, timing_info):\n # Pivot the timing info.\n idx_patt = re.compile('%s_(\\d+)_(\\d+)' % self.basename)\n job_segs = NestedDict()\n plot_set = set()\n for stage, stage_d in timing_info.items():\n # e.g. reading, statement production...\n for metric, metric_d in stage_d.items():\n # e.g. start, end, ...\n for job_name, t in metric_d.items():\n # e.g. job_basename_startIx_endIx\n job_segs[job_name][stage][metric] = t\n m = idx_patt.match(job_name)\n if m is None:\n logger.error(\"Unexpectedly formatted name: %s.\"\n % job_name)\n continue\n key = tuple([int(n) for n in m.groups()] + [job_name])\n plot_set.add(key)\n plot_list = list(plot_set)\n plot_list.sort()\n\n # Use this for getting the minimum and maximum.\n all_times = [dt for job in job_segs.values() for stage in job.values()\n for metric, dt in stage.items() if metric != 'duration']\n all_start = min(all_times)\n all_end = max(all_times)\n\n def get_time_tuple(stage_data):\n start_seconds = (stage_data['start'] - all_start).total_seconds()\n return start_seconds, stage_data['duration'].total_seconds()\n\n # Make the broken barh plots.\n w = 6.5\n h = 9\n fig = plt.figure(figsize=(w, h))\n gs = plt.GridSpec(2, 1, height_ratios=[10, 1])\n ax0 = plt.subplot(gs[0])\n ytick_pairs = []\n stages = ['reading', 'statement production', 'stats']\n t = arange((all_end - all_start).total_seconds())\n counts = dict.fromkeys(['jobs'] + stages)\n for k in counts.keys():\n counts[k] = array([0 for _ in t])\n for i, job_tpl in enumerate(plot_list):\n s_ix, e_ix, job_name = job_tpl\n job_d = job_segs[job_name]\n xs = [get_time_tuple(job_d[stg]) for stg in stages]\n ys = (s_ix, (e_ix - s_ix)*0.9)\n ytick_pairs.append(((s_ix + e_ix)/2, '%s_%s' % (s_ix, e_ix)))\n logger.debug(\"Making plot for: %s\" % str((job_name, xs, ys)))\n ax0.broken_barh(xs, ys, facecolors=('red', 'green', 'blue'))\n\n for n, stg in enumerate(stages):\n cs = counts[stg]\n start = xs[n][0]\n dur = xs[n][1]\n cs[(t>start) & (t<(start + dur))] += 1\n cs = counts['jobs']\n cs[(t>xs[0][0]) & (t<(xs[-1][0] + xs[-1][1]))] += 1\n\n # Format the plot\n ax0.tick_params(top='off', left='off', right='off', bottom='off',\n labelleft='on', labelbottom='off')\n for spine in ax0.spines.values():\n spine.set_visible(False)\n total_time = (all_end - all_start).total_seconds()\n ax0.set_xlim(0, total_time)\n ax0.set_ylabel(self.basename + '_ ...')\n print(ytick_pairs)\n yticks, ylabels = zip(*ytick_pairs)\n print(yticks)\n if not self.ids_per_job:\n print([yticks[i+1] - yticks[i]\n for i in range(len(yticks) - 1)])\n # Infer if we don't have it.\n spacing = median([yticks[i+1] - yticks[i]\n for i in range(len(yticks) - 1)])\n spacing = max(1, spacing)\n else:\n spacing = self.ids_per_job\n print(spacing)\n print(yticks[0], yticks[-1])\n ytick_range = list(arange(yticks[0], yticks[-1] + spacing, spacing))\n ylabel_filled = []\n for ytick in ytick_range:\n if ytick in yticks:\n ylabel_filled.append(ylabels[yticks.index(ytick)])\n else:\n ylabel_filled.append('FAILED')\n ax0.set_ylim(0, max(ytick_range) + spacing)\n ax0.set_yticks(ytick_range)\n ax0.set_yticklabels(ylabel_filled)\n\n # Plot the lower axis.\n legend_list = []\n color_map = {'jobs': 'k', 'reading': 'r', 'statement production': 'g',\n 'stats': 'b'}\n ax1 = plt.subplot(gs[1], sharex=ax0)\n for k, cs in counts.items():\n legend_list.append(k)\n ax1.plot(t, cs, color=color_map[k])\n for lbl, spine in ax1.spines.items():\n spine.set_visible(False)\n max_n = max(counts['jobs'])\n ax1.set_ylim(0, max_n + 1)\n ax1.set_xlim(0, total_time)\n yticks = list(range(0, max_n-max_n//5, max(1, max_n//5)))\n ax1.set_yticks(yticks + [max_n])\n ax1.set_yticklabels([str(n) for n in yticks] + ['max=%d' % max_n])\n ax1.set_ylabel('N_jobs')\n ax1.set_xlabel('Time since beginning [seconds]')\n\n # Make the figue borders more sensible.\n fig.tight_layout()\n img_path = 'time_figure.png'\n fig.savefig(img_path)\n self.reporter.add_image(img_path, width=w, height=h, section='Plots')\n return\n\n def _handle_sum_data(self, job_ref, summary_info, file_bytes):\n one_sum_data_dict = pickle.loads(file_bytes)\n for k, v in one_sum_data_dict.items():\n if k not in summary_info.keys():\n summary_info[k] = {}\n summary_info[k][job_ref] = v\n return\n\n def _report_sum_data(self, summary_info):\n # Two kind of things to handle:\n for k, job_dict in summary_info.items():\n if isinstance(list(job_dict.values())[0], dict):\n continue\n\n # Overall totals\n self.reporter.add_text('total %s: %d' % (k, sum(job_dict.values())),\n section='Totals')\n\n # Hists of totals.\n if len(job_dict) <= 1:\n continue\n\n w = 6.5\n h = 4\n fig = plt.figure(figsize=(w, h))\n plt.hist(list(job_dict.values()), align='left')\n plt.xlabel(k)\n plt.ylabel('Number of Jobs')\n fig.tight_layout()\n fname = k + '_hist.png'\n fig.savefig(fname)\n self.reporter.add_image(fname, width=w, height=h, section='Plots')\n return\n\n def _handle_hist_data(self, job_ref, hist_dict, file_bytes):\n a_hist_data_dict = pickle.loads(file_bytes)\n for k, v in a_hist_data_dict.items():\n if k not in hist_dict.keys():\n hist_dict[k] = {}\n hist_dict[k][job_ref] = v\n return\n\n def _report_hist_data(self, hist_dict):\n for k, data_dict in hist_dict.items():\n w = 6.5\n if k == ('stmts', 'readers'):\n h = 6\n fig = plt.figure(figsize=(w, h))\n data = {}\n for job_datum in data_dict.values():\n for rdr, num in job_datum['data'].items():\n if rdr not in data.keys():\n data[rdr] = [num]\n else:\n data[rdr].append(num)\n N = len(data)\n key_list = list(data.keys())\n xtick_locs = arange(N)\n n = (N+1)*100 + 11\n ax0 = plt.subplot(n)\n ax0.bar(xtick_locs, [sum(data[k]) for k in key_list],\n align='center')\n ax0.set_xticks(xtick_locs, key_list)\n ax0.set_xlabel('readers')\n ax0.set_ylabel('stmts')\n ax0.set_title('Reader production')\n rdr_ax_list = []\n for rdr, stmt_counts in data.items():\n n += 1\n if not rdr_ax_list:\n ax = plt.subplot(n)\n else:\n ax = plt.subplot(n, sharex=rdr_ax_list[0])\n ax.set_title(rdr)\n ax.hist(stmt_counts, align='left')\n ax.set_ylabel('jobs')\n rdr_ax_list.append(ax)\n if rdr_ax_list:\n ax.set_xlabel('stmts')\n else: # TODO: Handle other summary plots.\n continue\n figname = '_'.join(k) + '.png'\n fig.savefig(figname)\n self.reporter.add_image(figname, width=w, height=h, section='Plots')\n\n return\n\n def produce_report(self):\n \"\"\"Produce a report of the batch jobs.\"\"\"\n s3_prefix = 'reading_results/%s/logs/%s/' % (self.basename,\n self._job_queue)\n logger.info(\"Producing batch report for %s, from prefix %s.\"\n % (self.basename, s3_prefix))\n s3 = boto3.client('s3')\n file_tree = self._get_results_file_tree(s3, s3_prefix)\n logger.info(\"Found %d relevant files.\" % len(file_tree))\n stat_files = {\n 'git_info.txt': (self._handle_git_info, self._report_git_info),\n 'timing.txt': (self._handle_timing, self._report_timing),\n 'raw_tuples.pkl': (None, None),\n 'hist_data.pkl': (self._handle_hist_data, self._report_hist_data),\n 'sum_data.pkl': (self._handle_sum_data, self._report_sum_data)\n }\n stat_aggs = {}\n for stat_file, (handle_stats, report_stats) in stat_files.items():\n logger.info(\"Aggregating %s...\" % stat_file)\n # Prep the data storage.\n my_agg = {}\n\n # Get a list of the relevant files (one per job).\n file_paths = file_tree.get_paths(stat_file)\n logger.info(\"Found %d files for %s.\" % (len(file_paths), stat_file))\n\n # Aggregate the data from all the jobs for each file type.\n for sub_path, file_entry in file_paths:\n s3_key = file_entry['key']\n ref = sub_path[0]\n file = s3.get_object(Bucket=bucket_name, Key=s3_key)\n file_bytes = file['Body'].read()\n if handle_stats is not None:\n handle_stats(ref, my_agg, file_bytes)\n\n if report_stats is not None and len(my_agg):\n report_stats(my_agg)\n\n stat_aggs[stat_file] = my_agg\n\n for end_type, jobs in self.run_record.items():\n self.reporter.add_text('Jobs %s: %d' % (end_type, len(jobs)),\n section='Totals')\n\n s3_prefix = 'reading_results/%s/' % self.basename\n fname = self.reporter.make_report()\n with open(fname, 'rb') as f:\n s3.put_object(Bucket=bucket_name,\n Key= s3_prefix + fname,\n Body=f.read())\n s3.put_object(Bucket=bucket_name,\n Key=s3_prefix + 'stat_aggregates_%s.pkl' % self.time_tag,\n Body=pickle.dumps(stat_aggs))\n return file_tree, stat_aggs\n\n\ndef submit_db_reading(basename, id_list_filename, readers, start_ix=None,\n end_ix=None, pmids_per_job=3000, num_tries=2,\n force_read=False, force_fulltext=False,\n read_all_fulltext=False, project_name=None,\n max_reach_input_len=None, max_reach_space_ratio=None,\n no_stmts=False):\n \"\"\"Submit batch reading jobs that uses the database for content and results.\n\n This function is provided for backwards compatibility, use DbReadingSubmitter\n and its submit_reading method instead.\n \"\"\"\n sub = DbReadingSubmitter(basename, readers, project_name)\n sub.set_options(force_read, no_stmts, force_fulltext, read_all_fulltext,\n max_reach_input_len, max_reach_space_ratio)\n sub.submit_reading(id_list_filename, start_ix, end_ix, pmids_per_job,\n num_tries)\n return sub\n\n\nif __name__ == '__main__':\n import argparse\n\n # Create the top-level parser\n parser = argparse.ArgumentParser(\n 'submit_reading_pipeline_aws.py',\n description=('Run reading with either the db or remote resources. For '\n 'more specific help, select one of the Methods with the '\n '`-h` option.'),\n epilog=('Note that `python wait_for_complete.py ...` should be run as '\n 'soon as this command completes successfully. For more '\n 'details use `python wait_for_complete.py -h`.')\n )\n subparsers = parser.add_subparsers(title='Method')\n subparsers.required = True\n subparsers.dest = 'method'\n\n # Create parser class for first layer of options\n grandparent_reading_parser = argparse.ArgumentParser(\n description='Run machine reading using AWS Batch.',\n add_help=False\n )\n\n # Create parent parser classes for second layer of options\n parent_submit_parser = argparse.ArgumentParser(add_help=False)\n parent_submit_parser.add_argument(\n 'basename',\n help='Defines job names and S3 keys'\n )\n parent_submit_parser.add_argument(\n '-r', '--readers',\n dest='readers',\n choices=['sparser', 'reach', 'all'],\n default=['all'],\n nargs='+',\n help='Choose which reader(s) to use.'\n )\n parent_submit_parser.add_argument(\n '--project',\n help=('Set the project name. Default is DEFAULT_AWS_PROJECT in the '\n 'config.')\n )\n parent_read_parser = argparse.ArgumentParser(add_help=False)\n parent_read_parser.add_argument(\n 'input_file',\n help=('Path to file containing input ids of content to read. For the '\n 'no-db options, this is simply a file with each line being a '\n 'pmid. For the with-db options, this is a file where each line '\n 'is of the form \\'<id type>:<id>\\', for example \\'pmid:12345\\'')\n )\n parent_read_parser.add_argument(\n '--start_ix',\n type=int,\n help='Start index of ids to read.'\n )\n parent_read_parser.add_argument(\n '--end_ix',\n type=int,\n help='End index of ids to read. If `None`, read content from all ids.'\n )\n parent_read_parser.add_argument(\n '--force_read',\n action='store_true',\n help='Read papers even if previously read by current REACH.'\n )\n parent_read_parser.add_argument(\n '--force_fulltext',\n action='store_true',\n help='Get full text content even if content already on S3.'\n )\n parent_read_parser.add_argument(\n '--ids_per_job',\n default=3000,\n type=int,\n help='Number of PMIDs to read for each AWS Batch job.'\n )\n ''' Not currently supported.\n parent_read_parser.add_argument(\n '--num_tries',\n default=2,\n type=int,\n help='Maximum number of times to try running job.'\n )\n '''\n parent_db_parser = argparse.ArgumentParser(add_help=False)\n '''Not currently supported\n parent_db_parser.add_argument(\n '--no_upload',\n action='store_true',\n help='Don\\'t upload results to the database.'\n )\n '''\n parent_db_parser.add_argument(\n '--read_best_fulltext',\n action='store_true',\n help='Read only the best fulltext for input ids.'\n )\n parent_db_parser.add_argument(\n '--no_statements',\n action='store_true',\n help='Choose to not produce any Statements; only readings will be done.'\n )\n parent_db_parser.add_argument(\n '--max_reach_space_ratio',\n type=float,\n help='Set the maximum ratio of spaces to non-spaces for REACH input.',\n default=None\n )\n parent_db_parser.add_argument(\n '--max_reach_input_len',\n type=int,\n help='Set the maximum length of content that REACH will read.',\n default=None\n )\n\n # Make non_db_parser and get subparsers\n non_db_parser = subparsers.add_parser(\n 'no-db',\n parents=[grandparent_reading_parser],\n description=('Run reading by collecting content, and save as pickles. '\n 'This option requires that ids are given as a list of '\n 'pmids, one line per pmid.'),\n formatter_class=argparse.ArgumentDefaultsHelpFormatter\n )\n non_db_subparsers = non_db_parser.add_subparsers(\n title='Job Type',\n help='Type of jobs to submit.'\n )\n non_db_subparsers.required = True\n non_db_subparsers.dest = 'job_type'\n\n # Create subparsers for the no-db option.\n read_parser = non_db_subparsers.add_parser(\n 'read',\n parents=[parent_read_parser, parent_submit_parser],\n help='Run REACH and cache INDRA Statements on S3.',\n description='Run REACH and cache INDRA Statements on S3.',\n formatter_class=argparse.ArgumentDefaultsHelpFormatter\n )\n combine_parser = non_db_subparsers.add_parser(\n 'combine',\n parents=[parent_submit_parser],\n help='Combine INDRA Statement subsets into a single file.',\n description='Combine INDRA Statement subsets into a single file.'\n )\n full_parser = non_db_subparsers.add_parser(\n 'full',\n parents=[parent_read_parser, parent_submit_parser],\n help='Run REACH and combine INDRA Statements when done.',\n description='Run REACH and combine INDRA Statements when done.',\n formatter_class=argparse.ArgumentDefaultsHelpFormatter\n )\n\n # Make db parser and get subparsers.\n db_parser = subparsers.add_parser(\n 'with-db',\n parents=[grandparent_reading_parser, parent_submit_parser,\n parent_read_parser, parent_db_parser],\n description=('Run reading with content on the db and submit results. '\n 'In this option, ids in \\'input_file\\' are given in the '\n 'format \\'<id type>:<id>\\'. Unlike no-db, there is no '\n 'need to combine pickles, and therefore no need to '\n 'specify your task further.'),\n formatter_class=argparse.ArgumentDefaultsHelpFormatter\n )\n\n args = parser.parse_args()\n\n job_ids = None\n if args.method == 'no-db':\n sub = PmidSubmitter(args.basename, args.readers, args.project)\n sub.set_options(args.force_read, args.force_fulltext)\n if args.job_type in ['read', 'full']:\n sub.submit_reading(args.input_file, args.start_ix, args.end_ix,\n args.ids_per_job)\n if args.job_type in ['combine', 'full']:\n sub.submit_combine()\n elif args.method == 'with-db':\n sub = DbReadingSubmitter(args.basename, args.readers, args.project)\n sub.set_options(args.force_read, args.no_statements,\n args.force_fulltext, args.prioritize,\n args.max_reach_input_len, args.max_reach_space_ratio)\n sub.submit_reading(args.input_file, args.start_ix, args.end_ix,\n args.ids_per_job)\n" ]
[ [ "matplotlib.use", "numpy.array", "matplotlib.pyplot.xlabel", "matplotlib.pyplot.figure", "numpy.arange", "matplotlib.pyplot.GridSpec", "matplotlib.pyplot.ylabel", "matplotlib.pyplot.subplot" ] ]
yamamototakas/fxtrading
[ "955d247b832de7180b8893edaad0b50df515809f" ]
[ "agents/tensorflow_iris.py" ]
[ "from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\n\nimport numpy as np\nimport tensorflow as tf\n\ntf.logging.set_verbosity(tf.logging.INFO)\n\n# Data sets\nIRIS_TRAINING = os.path.join(os.path.dirname(__file__), \"iris_training.csv\")\nIRIS_TEST = os.path.join(os.path.dirname(__file__), \"iris_test.csv\")\n\n\ndef main(unused_argv):\n # Load datasets.\n training_set = tf.contrib.learn.datasets.base.load_csv_with_header(\n filename=IRIS_TRAINING, target_dtype=np.int, features_dtype=np.float32)\n test_set = tf.contrib.learn.datasets.base.load_csv_with_header(\n filename=IRIS_TEST, target_dtype=np.int, features_dtype=np.float32)\n\n # Specify that all features have real-value data\n feature_columns = [tf.contrib.layers.real_valued_column(\"\", dimension=4)]\n\n validation_monitor = tf.contrib.learn.monitors.ValidationMonitor(\n test_set.data,\n test_set.target,\n every_n_steps=50)\n\n # Build 3 layer DNN with 10, 20, 10 units respectively.\n # classifier = tf.contrib.learn.DNNClassifier(feature_columns=feature_columns,\n # hidden_units=[10, 20, 10],\n # n_classes=3,\n # model_dir=\"/tmp/iris_model\")\n classifier = tf.contrib.learn.DNNClassifier(\n feature_columns=feature_columns,\n hidden_units=[10],\n n_classes=3,\n model_dir=\"/tmp/iris_model\",\n config=tf.contrib.learn.RunConfig(save_checkpoints_secs=1))\n\n # Fit model.\n # classifier.fit(x=training_set.data,\n # y=training_set.target,\n # steps=2000)\n classifier.fit(x=training_set.data,\n y=training_set.target,\n steps=2000,\n monitors=[validation_monitor])\n\n # Evaluate accuracy.\n accuracy_score = classifier.evaluate(x=test_set.data,\n y=test_set.target)[\"accuracy\"]\n print('Accuracy: {0:f}'.format(accuracy_score))\n\n # Classify two new flower samples.\n new_samples = np.array(\n [[6.4, 3.2, 4.5, 1.5], [5.8, 3.1, 5.0, 1.7]], dtype=np.float32)\n y = list(classifier.predict(new_samples, as_iterable=True))\n print('Predictions: {}'.format(str(y)))\n\n\nif __name__ == \"__main__\":\n tf.app.run()\n" ]
[ [ "tensorflow.logging.set_verbosity", "numpy.array", "tensorflow.contrib.learn.datasets.base.load_csv_with_header", "tensorflow.contrib.layers.real_valued_column", "tensorflow.contrib.learn.RunConfig", "tensorflow.contrib.learn.monitors.ValidationMonitor", "tensorflow.app.run" ] ]
huamichaelchen/streamlit
[ "02041f6726d3e98b8f872365bd8129c9f4cb31f4" ]
[ "lib/streamlit/DeltaGenerator.py" ]
[ "# Copyright 2018-2020 Streamlit Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Allows us to create and absorb changes (aka Deltas) to elements.\"\"\"\n\nimport functools\nimport json\nimport random\nimport textwrap\nimport numbers\nimport re\nfrom datetime import datetime\nfrom datetime import date\nfrom datetime import time\n\nfrom streamlit import caching\nfrom streamlit import config\nfrom streamlit import cursor\nfrom streamlit import type_util\nfrom streamlit.ReportThread import get_report_ctx\nfrom streamlit.errors import DuplicateWidgetID\nfrom streamlit.errors import StreamlitAPIException\nfrom streamlit.errors import NoSessionContext\nfrom streamlit.file_util import get_encoded_file_data\nfrom streamlit.js_number import JSNumber\nfrom streamlit.js_number import JSNumberBoundsException\nfrom streamlit.proto import Alert_pb2\nfrom streamlit.proto import Balloons_pb2\nfrom streamlit.proto import BlockPath_pb2\nfrom streamlit.proto import ForwardMsg_pb2\nfrom streamlit.proto.NumberInput_pb2 import NumberInput\nfrom streamlit.proto.TextInput_pb2 import TextInput\nfrom streamlit.logger import get_logger\nfrom streamlit.type_util import is_type\n\nLOGGER = get_logger(__name__)\n\n# Save the type built-in for when we override the name \"type\".\n_type = type\n\nMAX_DELTA_BYTES = 14 * 1024 * 1024 # 14MB\n\n# List of Streamlit commands that perform a Pandas \"melt\" operation on\n# input dataframes.\nDELTAS_TYPES_THAT_MELT_DATAFRAMES = (\"line_chart\", \"area_chart\", \"bar_chart\")\n\n\ndef _wraps_with_cleaned_sig(wrapped, num_args_to_remove):\n \"\"\"Simplify the function signature by removing arguments from it.\n\n Removes the first N arguments from function signature (where N is\n num_args_to_remove). This is useful since function signatures are visible\n in our user-facing docs, and many methods in DeltaGenerator have arguments\n that users have no access to.\n\n Note that \"self\" is ignored by default. So to remove both \"self\" and the\n next argument you'd pass num_args_to_remove=1.\n \"\"\"\n # By passing (None, ...), we're removing (arg1, ...) from *args\n args_to_remove = (None,) * num_args_to_remove\n fake_wrapped = functools.partial(wrapped, *args_to_remove)\n fake_wrapped.__doc__ = wrapped.__doc__\n fake_wrapped.__name__ = wrapped.__name__ # type: ignore[attr-defined]\n fake_wrapped.__module__ = wrapped.__module__\n\n return functools.wraps(fake_wrapped)\n\n\ndef _with_element(method):\n \"\"\"Wrap function and pass a NewElement proto to be filled.\n\n This is a function decorator.\n\n Converts a method of the with arguments (self, element, ...) into a method\n with arguments (self, ...). Thus, the instantiation of the element proto\n object and creation of the element are handled automatically.\n\n Parameters\n ----------\n method : callable\n A DeltaGenerator method with arguments (self, element, ...)\n\n Returns\n -------\n callable\n A new DeltaGenerator method with arguments (self, ...)\n\n \"\"\"\n\n @_wraps_with_cleaned_sig(method, 1) # Remove self and element from sig.\n def wrapped_method(dg, *args, **kwargs):\n # Warn if we're called from within an @st.cache function\n caching.maybe_show_cached_st_function_warning(dg, method.__name__)\n\n delta_type = method.__name__\n last_index = None\n\n if delta_type in DELTAS_TYPES_THAT_MELT_DATAFRAMES and len(args) > 0:\n data = args[0]\n if type_util.is_dataframe_compatible(data):\n data = type_util.convert_anything_to_df(data)\n\n if data.index.size > 0:\n last_index = data.index[-1]\n else:\n last_index = None\n\n def marshall_element(element):\n return method(dg, element, *args, **kwargs)\n\n return dg._enqueue_new_element_delta(marshall_element, delta_type, last_index)\n\n return wrapped_method\n\n\ndef _build_duplicate_widget_message(widget_type, user_key=None):\n if user_key is not None:\n message = textwrap.dedent(\n \"\"\"\n There are multiple identical `st.{widget_type}` widgets with\n `key='{user_key}'`.\n\n To fix this, please make sure that the `key` argument is unique for\n each `st.{widget_type}` you create.\n \"\"\"\n )\n else:\n message = textwrap.dedent(\n \"\"\"\n There are multiple identical `st.{widget_type}` widgets with the\n same generated key.\n\n (When a widget is created, it's assigned an internal key based on\n its structure. Multiple widgets with an identical structure will\n result in the same internal key, which causes this error.)\n\n To fix this, please pass a unique `key` argument to\n `st.{widget_type}`.\n \"\"\"\n )\n\n return message.strip(\"\\n\").format(widget_type=widget_type, user_key=user_key)\n\n\ndef _set_widget_id(widget_type, element, user_key=None):\n \"\"\"Set the widget id.\n\n Parameters\n ----------\n widget_type : str\n The type of the widget as stored in proto.\n element : proto\n The proto of the element\n user_key : str\n Optional user-specified key to use for the widget ID.\n If this is None, we'll generate an ID by hashing the element.\n\n \"\"\"\n element_hash = hash(element.SerializeToString())\n if user_key is not None:\n widget_id = \"%s-%s\" % (user_key, element_hash)\n else:\n widget_id = \"%s\" % element_hash\n\n ctx = get_report_ctx()\n if ctx is not None:\n added = ctx.widget_ids_this_run.add(widget_id)\n if not added:\n raise DuplicateWidgetID(\n _build_duplicate_widget_message(widget_type, user_key)\n )\n el = getattr(element, widget_type)\n el.id = widget_id\n\n\ndef _get_widget_ui_value(widget_type, element, user_key=None):\n \"\"\"Get the widget ui_value from the report context.\n NOTE: This function should be called after the proto has been filled.\n\n Parameters\n ----------\n widget_type : str\n The type of the widget as stored in proto.\n element : proto\n The proto of the element\n user_key : str\n Optional user-specified string to use as the widget ID.\n If this is None, we'll generate an ID by hashing the element.\n\n Returns\n -------\n ui_value : any\n The value of the widget set by the client or\n the default value passed. If the report context\n doesn't exist, None will be returned.\n\n \"\"\"\n _set_widget_id(widget_type, element, user_key)\n el = getattr(element, widget_type)\n ctx = get_report_ctx()\n ui_value = ctx.widgets.get_widget_value(el.id) if ctx else None\n return ui_value\n\n\ndef _get_pandas_index_attr(data, attr):\n return getattr(data.index, attr, None)\n\n\nclass NoValue(object):\n \"\"\"Return this from DeltaGenerator.foo_widget() when you want the st.foo_widget()\n call to return None. This is needed because `_enqueue_new_element_delta`\n replaces `None` with a `DeltaGenerator` (for use in non-widget elements).\n \"\"\"\n\n pass\n\n\nclass DeltaGenerator(object):\n \"\"\"Creator of Delta protobuf messages.\n\n Parameters\n ----------\n container: BlockPath_pb2.BlockPath or None\n The root container for this DeltaGenerator. If None, this is a null\n DeltaGenerator which doesn't print to the app at all (useful for\n testing).\n\n cursor: cursor.AbstractCursor or None\n \"\"\"\n\n # The pydoc below is for user consumption, so it doesn't talk about\n # DeltaGenerator constructor parameters (which users should never use). For\n # those, see above.\n def __init__(self, container=BlockPath_pb2.BlockPath.MAIN, cursor=None):\n \"\"\"Inserts or updates elements in Streamlit apps.\n\n As a user, you should never initialize this object by hand. Instead,\n DeltaGenerator objects are initialized for you in two places:\n\n 1) When you call `dg = st.foo()` for some method \"foo\", sometimes `dg`\n is a DeltaGenerator object. You can call methods on the `dg` object to\n update the element `foo` that appears in the Streamlit app.\n\n 2) This is an internal detail, but `st.sidebar` itself is a\n DeltaGenerator. That's why you can call `st.sidebar.foo()` to place\n an element `foo` inside the sidebar.\n\n \"\"\"\n self._container = container\n\n # This is either:\n # - None: if this is the running DeltaGenerator for a top-level\n # container.\n # - RunningCursor: if this is the running DeltaGenerator for a\n # non-top-level container (created with dg._block())\n # - LockedCursor: if this is a locked DeltaGenerator returned by some\n # other DeltaGenerator method. E.g. the dg returned in dg =\n # st.text(\"foo\").\n #\n # You should never use this! Instead use self._cursor, which is a\n # computed property that fetches the right cursor.\n #\n self._provided_cursor = cursor\n\n def __getattr__(self, name):\n import streamlit as st\n\n streamlit_methods = [\n method_name for method_name in dir(st) if callable(getattr(st, method_name))\n ]\n\n def wrapper(*args, **kwargs):\n if name in streamlit_methods:\n if self._container == BlockPath_pb2.BlockPath.SIDEBAR:\n message = (\n \"Method `%(name)s()` does not exist for \"\n \"`st.sidebar`. Did you mean `st.%(name)s()`?\" % {\"name\": name}\n )\n else:\n message = (\n \"Method `%(name)s()` does not exist for \"\n \"`DeltaGenerator` objects. Did you mean \"\n \"`st.%(name)s()`?\" % {\"name\": name}\n )\n else:\n message = \"`%(name)s()` is not a valid Streamlit command.\" % {\n \"name\": name\n }\n\n raise StreamlitAPIException(message)\n\n return wrapper\n\n @property\n def _cursor(self):\n if self._provided_cursor is None:\n return cursor.get_container_cursor(self._container)\n else:\n return self._provided_cursor\n\n def _get_coordinates(self):\n \"\"\"Returns the element's 4-component location as string like \"M.(1,2).3\".\n\n This function uniquely identifies the element's position in the front-end,\n which allows (among other potential uses) the MediaFileManager to maintain\n session-specific maps of MediaFile objects placed with their \"coordinates\".\n\n This way, users can (say) use st.image with a stream of different images,\n and Streamlit will expire the older images and replace them in place.\n \"\"\"\n container = self._container # Proto index of container (e.g. MAIN=1)\n\n if self._cursor:\n path = (\n self._cursor.path\n ) # [uint, uint] - \"breadcrumbs\" w/ ancestor positions\n index = self._cursor.index # index - element's own position\n else:\n # Case in which we have started up in headless mode.\n path = \"(,)\"\n index = \"\"\n\n return \"{}.{}.{}\".format(container, path, index)\n\n def _enqueue_new_element_delta(\n self,\n marshall_element,\n delta_type,\n last_index=None,\n element_width=None,\n element_height=None,\n ):\n \"\"\"Create NewElement delta, fill it, and enqueue it.\n\n Parameters\n ----------\n marshall_element : callable\n Function which sets the fields for a NewElement protobuf.\n element_width : int or None\n Desired width for the element\n element_height : int or None\n Desired height for the element\n\n Returns\n -------\n DeltaGenerator\n A DeltaGenerator that can be used to modify the newly-created\n element.\n\n \"\"\"\n rv = None\n\n # Always call marshall_element() so users can run their script without\n # Streamlit.\n msg = ForwardMsg_pb2.ForwardMsg()\n rv = marshall_element(msg.delta.new_element)\n\n msg_was_enqueued = False\n\n # Only enqueue message if there's a container.\n\n if self._container and self._cursor:\n msg.metadata.parent_block.container = self._container\n msg.metadata.parent_block.path[:] = self._cursor.path\n msg.metadata.delta_id = self._cursor.index\n\n if element_width is not None:\n msg.metadata.element_dimension_spec.width = element_width\n if element_height is not None:\n msg.metadata.element_dimension_spec.height = element_height\n\n _enqueue_message(msg)\n msg_was_enqueued = True\n\n if msg_was_enqueued:\n # Get a DeltaGenerator that is locked to the current element\n # position.\n output_dg = DeltaGenerator(\n container=self._container,\n cursor=self._cursor.get_locked_cursor(\n delta_type=delta_type, last_index=last_index\n ),\n )\n else:\n # If the message was not enqueued, just return self since it's a\n # no-op from the point of view of the app.\n output_dg = self\n\n return _value_or_dg(rv, output_dg)\n\n def _block(self):\n if self._container is None or self._cursor is None:\n return self\n\n msg = ForwardMsg_pb2.ForwardMsg()\n msg.delta.new_block = True\n msg.metadata.parent_block.container = self._container\n msg.metadata.parent_block.path[:] = self._cursor.path\n msg.metadata.delta_id = self._cursor.index\n\n # Normally we'd return a new DeltaGenerator that uses the locked cursor\n # below. But in this case we want to return a DeltaGenerator that uses\n # a brand new cursor for this new block we're creating.\n block_cursor = cursor.RunningCursor(\n path=self._cursor.path + (self._cursor.index,)\n )\n block_dg = DeltaGenerator(container=self._container, cursor=block_cursor)\n\n # Must be called to increment this cursor's index.\n self._cursor.get_locked_cursor(None)\n\n _enqueue_message(msg)\n\n return block_dg\n\n @_with_element\n def balloons(self, element):\n \"\"\"Draw celebratory balloons.\n\n Example\n -------\n >>> st.balloons()\n\n ...then watch your app and get ready for a celebration!\n\n \"\"\"\n element.balloons.type = Balloons_pb2.Balloons.DEFAULT\n element.balloons.execution_id = random.randrange(0xFFFFFFFF)\n\n @_with_element\n def text(self, element, body):\n \"\"\"Write fixed-width and preformatted text.\n\n Parameters\n ----------\n body : str\n The string to display.\n\n Example\n -------\n >>> st.text('This is some text.')\n\n .. output::\n https://share.streamlit.io/0.25.0-2JkNY/index.html?id=PYxU1kee5ubuhGR11NsnT1\n height: 50px\n\n \"\"\"\n\n element.text.body = _clean_text(body)\n\n @_with_element\n def markdown(self, element, body, unsafe_allow_html=False):\n \"\"\"Display string formatted as Markdown.\n\n Parameters\n ----------\n body : str\n The string to display as Github-flavored Markdown. Syntax\n information can be found at: https://github.github.com/gfm.\n\n This also supports:\n\n * Emoji shortcodes, such as `:+1:` and `:sunglasses:`.\n For a list of all supported codes,\n see https://raw.githubusercontent.com/omnidan/node-emoji/master/lib/emoji.json.\n\n * LaTeX expressions, by just wrapping them in \"$\" or \"$$\" (the \"$$\"\n must be on their own lines). Supported LaTeX functions are listed\n at https://katex.org/docs/supported.html.\n\n unsafe_allow_html : bool\n By default, any HTML tags found in the body will be escaped and\n therefore treated as pure text. This behavior may be turned off by\n setting this argument to True.\n\n That said, we *strongly advise against it*. It is hard to write\n secure HTML, so by using this argument you may be compromising your\n users' security. For more information, see:\n\n https://github.com/streamlit/streamlit/issues/152\n\n *Also note that `unsafe_allow_html` is a temporary measure and may\n be removed from Streamlit at any time.*\n\n If you decide to turn on HTML anyway, we ask you to please tell us\n your exact use case here:\n\n https://discuss.streamlit.io/t/96\n\n This will help us come up with safe APIs that allow you to do what\n you want.\n\n Example\n -------\n >>> st.markdown('Streamlit is **_really_ cool**.')\n\n .. output::\n https://share.streamlit.io/0.25.0-2JkNY/index.html?id=PXz9xgY8aB88eziDVEZLyS\n height: 50px\n\n \"\"\"\n element.markdown.body = _clean_text(body)\n element.markdown.allow_html = unsafe_allow_html\n\n @_with_element\n def latex(self, element, body):\n # This docstring needs to be \"raw\" because of the backslashes in the\n # example below.\n r\"\"\"Display mathematical expressions formatted as LaTeX.\n\n Supported LaTeX functions are listed at\n https://katex.org/docs/supported.html.\n\n Parameters\n ----------\n body : str or SymPy expression\n The string or SymPy expression to display as LaTeX. If str, it's\n a good idea to use raw Python strings since LaTeX uses backslashes\n a lot.\n\n\n Example\n -------\n >>> st.latex(r'''\n ... a + ar + a r^2 + a r^3 + \\cdots + a r^{n-1} =\n ... \\sum_{k=0}^{n-1} ar^k =\n ... a \\left(\\frac{1-r^{n}}{1-r}\\right)\n ... ''')\n\n .. output::\n https://share.streamlit.io/0.50.0-td2L/index.html?id=NJFsy6NbGTsH2RF9W6ioQ4\n height: 75px\n\n \"\"\"\n if type_util.is_sympy_expession(body):\n import sympy\n\n body = sympy.latex(body)\n\n element.markdown.body = \"$$\\n%s\\n$$\" % _clean_text(body)\n\n @_with_element\n def code(self, element, body, language=\"python\"):\n \"\"\"Display a code block with optional syntax highlighting.\n\n (This is a convenience wrapper around `st.markdown()`)\n\n Parameters\n ----------\n body : str\n The string to display as code.\n\n language : str\n The language that the code is written in, for syntax highlighting.\n If omitted, the code will be unstyled.\n\n Example\n -------\n >>> code = '''def hello():\n ... print(\"Hello, Streamlit!\")'''\n >>> st.code(code, language='python')\n\n .. output::\n https://share.streamlit.io/0.27.0-kBtt/index.html?id=VDRnaCEZWSBCNUd5gNQZv2\n height: 100px\n\n \"\"\"\n markdown = \"```%(language)s\\n%(body)s\\n```\" % {\n \"language\": language or \"\",\n \"body\": body,\n }\n element.markdown.body = _clean_text(markdown)\n\n @_with_element\n def json(self, element, body):\n \"\"\"Display object or string as a pretty-printed JSON string.\n\n Parameters\n ----------\n body : Object or str\n The object to print as JSON. All referenced objects should be\n serializable to JSON as well. If object is a string, we assume it\n contains serialized JSON.\n\n Example\n -------\n >>> st.json({\n ... 'foo': 'bar',\n ... 'baz': 'boz',\n ... 'stuff': [\n ... 'stuff 1',\n ... 'stuff 2',\n ... 'stuff 3',\n ... 'stuff 5',\n ... ],\n ... })\n\n .. output::\n https://share.streamlit.io/0.25.0-2JkNY/index.html?id=CTFkMQd89hw3yZbZ4AUymS\n height: 280px\n\n \"\"\"\n import streamlit as st\n\n if not isinstance(body, str):\n try:\n body = json.dumps(body, default=lambda o: str(type(o)))\n except TypeError as err:\n st.warning(\n \"Warning: this data structure was not fully serializable as \"\n \"JSON due to one or more unexpected keys. (Error was: %s)\" % err\n )\n body = json.dumps(body, skipkeys=True, default=lambda o: str(type(o)))\n\n element.json.body = body\n\n @_with_element\n def title(self, element, body):\n \"\"\"Display text in title formatting.\n\n Each document should have a single `st.title()`, although this is not\n enforced.\n\n Parameters\n ----------\n body : str\n The text to display.\n\n Example\n -------\n >>> st.title('This is a title')\n\n .. output::\n https://share.streamlit.io/0.25.0-2JkNY/index.html?id=SFcBGANWd8kWXF28XnaEZj\n height: 100px\n\n \"\"\"\n element.markdown.body = \"# %s\" % _clean_text(body)\n\n @_with_element\n def header(self, element, body):\n \"\"\"Display text in header formatting.\n\n Parameters\n ----------\n body : str\n The text to display.\n\n Example\n -------\n >>> st.header('This is a header')\n\n .. output::\n https://share.streamlit.io/0.25.0-2JkNY/index.html?id=AnfQVFgSCQtGv6yMUMUYjj\n height: 100px\n\n \"\"\"\n element.markdown.body = \"## %s\" % _clean_text(body)\n\n @_with_element\n def subheader(self, element, body):\n \"\"\"Display text in subheader formatting.\n\n Parameters\n ----------\n body : str\n The text to display.\n\n Example\n -------\n >>> st.subheader('This is a subheader')\n\n .. output::\n https://share.streamlit.io/0.25.0-2JkNY/index.html?id=LBKJTfFUwudrbWENSHV6cJ\n height: 100px\n\n \"\"\"\n element.markdown.body = \"### %s\" % _clean_text(body)\n\n @_with_element\n def error(self, element, body):\n \"\"\"Display error message.\n\n Parameters\n ----------\n body : str\n The error text to display.\n\n Example\n -------\n >>> st.error('This is an error')\n\n \"\"\"\n element.alert.body = _clean_text(body)\n element.alert.format = Alert_pb2.Alert.ERROR\n\n @_with_element\n def warning(self, element, body):\n \"\"\"Display warning message.\n\n Parameters\n ----------\n body : str\n The warning text to display.\n\n Example\n -------\n >>> st.warning('This is a warning')\n\n \"\"\"\n element.alert.body = _clean_text(body)\n element.alert.format = Alert_pb2.Alert.WARNING\n\n @_with_element\n def info(self, element, body):\n \"\"\"Display an informational message.\n\n Parameters\n ----------\n body : str\n The info text to display.\n\n Example\n -------\n >>> st.info('This is a purely informational message')\n\n \"\"\"\n element.alert.body = _clean_text(body)\n element.alert.format = Alert_pb2.Alert.INFO\n\n @_with_element\n def success(self, element, body):\n \"\"\"Display a success message.\n\n Parameters\n ----------\n body : str\n The success text to display.\n\n Example\n -------\n >>> st.success('This is a success message!')\n\n \"\"\"\n element.alert.body = _clean_text(body)\n element.alert.format = Alert_pb2.Alert.SUCCESS\n\n @_with_element\n def help(self, element, obj):\n \"\"\"Display object's doc string, nicely formatted.\n\n Displays the doc string for this object.\n\n Parameters\n ----------\n obj : Object\n The object whose docstring should be displayed.\n\n Example\n -------\n\n Don't remember how to initialize a dataframe? Try this:\n\n >>> st.help(pandas.DataFrame)\n\n Want to quickly check what datatype is output by a certain function?\n Try:\n\n >>> x = my_poorly_documented_function()\n >>> st.help(x)\n\n \"\"\"\n import streamlit.elements.doc_string as doc_string\n\n doc_string.marshall(element, obj)\n\n @_with_element\n def exception(self, element, exception):\n \"\"\"Display an exception.\n\n Parameters\n ----------\n exception : Exception\n The exception to display.\n\n Example\n -------\n >>> e = RuntimeError('This is an exception of type RuntimeError')\n >>> st.exception(e)\n\n \"\"\"\n import streamlit.elements.exception_proto as exception_proto\n\n exception_proto.marshall(element.exception, exception)\n\n def dataframe(self, data=None, width=None, height=None):\n \"\"\"Display a dataframe as an interactive table.\n\n Parameters\n ----------\n data : pandas.DataFrame, pandas.Styler, numpy.ndarray, Iterable, dict,\n or None\n The data to display.\n\n If 'data' is a pandas.Styler, it will be used to style its\n underyling DataFrame. Streamlit supports custom cell\n values and colors. (It does not support some of the more exotic\n pandas styling features, like bar charts, hovering, and captions.)\n Styler support is experimental!\n width : int or None\n Desired width of the UI element expressed in pixels. If None, a\n default width based on the page width is used.\n height : int or None\n Desired height of the UI element expressed in pixels. If None, a\n default height is used.\n\n Examples\n --------\n >>> df = pd.DataFrame(\n ... np.random.randn(50, 20),\n ... columns=('col %d' % i for i in range(20)))\n ...\n >>> st.dataframe(df) # Same as st.write(df)\n\n .. output::\n https://share.streamlit.io/0.25.0-2JkNY/index.html?id=165mJbzWdAC8Duf8a4tjyQ\n height: 330px\n\n >>> st.dataframe(df, 200, 100)\n\n You can also pass a Pandas Styler object to change the style of\n the rendered DataFrame:\n\n >>> df = pd.DataFrame(\n ... np.random.randn(10, 20),\n ... columns=('col %d' % i for i in range(20)))\n ...\n >>> st.dataframe(df.style.highlight_max(axis=0))\n\n .. output::\n https://share.streamlit.io/0.29.0-dV1Y/index.html?id=Hb6UymSNuZDzojUNybzPby\n height: 285px\n\n \"\"\"\n import streamlit.elements.data_frame_proto as data_frame_proto\n\n def set_data_frame(delta):\n data_frame_proto.marshall_data_frame(data, delta.data_frame)\n\n return self._enqueue_new_element_delta(\n set_data_frame, \"dataframe\", element_width=width, element_height=height\n )\n\n @_with_element\n def line_chart(\n self, element, data=None, width=0, height=0, use_container_width=True\n ):\n \"\"\"Display a line chart.\n\n This is just syntax-sugar around st.altair_chart. The main difference\n is this command uses the data's own column and indices to figure out\n the chart's spec. As a result this is easier to use for many \"just plot\n this\" scenarios, while being less customizable.\n\n Parameters\n ----------\n data : pandas.DataFrame, pandas.Styler, numpy.ndarray, Iterable, dict\n or None\n Data to be plotted.\n\n width : int\n The chart width in pixels. If 0, selects the width automatically.\n\n height : int\n The chart width in pixels. If 0, selects the height automatically.\n\n use_container_width : bool\n If True, set the chart width to the column width. This takes\n precedence over the width argument.\n\n Example\n -------\n >>> chart_data = pd.DataFrame(\n ... np.random.randn(20, 3),\n ... columns=['a', 'b', 'c'])\n ...\n >>> st.line_chart(chart_data)\n\n .. output::\n https://share.streamlit.io/0.50.0-td2L/index.html?id=BdxXG3MmrVBfJyqS2R2ki8\n height: 220px\n\n \"\"\"\n\n import streamlit.elements.altair as altair\n\n chart = altair.generate_chart(\"line\", data, width, height)\n altair.marshall(element.vega_lite_chart, chart, use_container_width)\n\n @_with_element\n def area_chart(\n self, element, data=None, width=0, height=0, use_container_width=True\n ):\n \"\"\"Display a area chart.\n\n This is just syntax-sugar around st.altair_chart. The main difference\n is this command uses the data's own column and indices to figure out\n the chart's spec. As a result this is easier to use for many \"just plot\n this\" scenarios, while being less customizable.\n\n Parameters\n ----------\n data : pandas.DataFrame, pandas.Styler, numpy.ndarray, Iterable, or dict\n Data to be plotted.\n\n width : int\n The chart width in pixels. If 0, selects the width automatically.\n\n height : int\n The chart width in pixels. If 0, selects the height automatically.\n\n use_container_width : bool\n If True, set the chart width to the column width. This takes\n precedence over the width argument.\n\n Example\n -------\n >>> chart_data = pd.DataFrame(\n ... np.random.randn(20, 3),\n ... columns=['a', 'b', 'c'])\n ...\n >>> st.area_chart(chart_data)\n\n .. output::\n https://share.streamlit.io/0.50.0-td2L/index.html?id=Pp65STuFj65cJRDfhGh4Jt\n height: 220px\n\n \"\"\"\n import streamlit.elements.altair as altair\n\n chart = altair.generate_chart(\"area\", data, width, height)\n altair.marshall(element.vega_lite_chart, chart, use_container_width)\n\n @_with_element\n def bar_chart(\n self, element, data=None, width=0, height=0, use_container_width=True\n ):\n \"\"\"Display a bar chart.\n\n This is just syntax-sugar around st.altair_chart. The main difference\n is this command uses the data's own column and indices to figure out\n the chart's spec. As a result this is easier to use for many \"just plot\n this\" scenarios, while being less customizable.\n\n Parameters\n ----------\n data : pandas.DataFrame, pandas.Styler, numpy.ndarray, Iterable, or dict\n Data to be plotted.\n\n width : int\n The chart width in pixels. If 0, selects the width automatically.\n\n height : int\n The chart width in pixels. If 0, selects the height automatically.\n\n use_container_width : bool\n If True, set the chart width to the column width. This takes\n precedence over the width argument.\n\n Example\n -------\n >>> chart_data = pd.DataFrame(\n ... np.random.randn(50, 3),\n ... columns=[\"a\", \"b\", \"c\"])\n ...\n >>> st.bar_chart(chart_data)\n\n .. output::\n https://share.streamlit.io/0.50.0-td2L/index.html?id=5U5bjR2b3jFwnJdDfSvuRk\n height: 220px\n\n \"\"\"\n import streamlit.elements.altair as altair\n\n chart = altair.generate_chart(\"bar\", data, width, height)\n altair.marshall(element.vega_lite_chart, chart, use_container_width)\n\n @_with_element\n def vega_lite_chart(\n self,\n element,\n data=None,\n spec=None,\n width=0,\n use_container_width=False,\n **kwargs,\n ):\n \"\"\"Display a chart using the Vega-Lite library.\n\n Parameters\n ----------\n data : pandas.DataFrame, pandas.Styler, numpy.ndarray, Iterable, dict,\n or None\n Either the data to be plotted or a Vega-Lite spec containing the\n data (which more closely follows the Vega-Lite API).\n\n spec : dict or None\n The Vega-Lite spec for the chart. If the spec was already passed in\n the previous argument, this must be set to None. See\n https://vega.github.io/vega-lite/docs/ for more info.\n\n width : number\n Deprecated. If != 0 (default), will show an alert.\n From now on you should set the width directly in the Vega-Lite\n spec. Please refer to the Vega-Lite documentation for details.\n\n use_container_width : bool\n If True, set the chart width to the column width. This takes\n precedence over Vega-Lite's native `width` value.\n\n **kwargs : any\n Same as spec, but as keywords.\n\n Example\n -------\n\n >>> import pandas as pd\n >>> import numpy as np\n >>>\n >>> df = pd.DataFrame(\n ... np.random.randn(200, 3),\n ... columns=['a', 'b', 'c'])\n >>>\n >>> st.vega_lite_chart(df, {\n ... 'mark': {'type': 'circle', 'tooltip': True},\n ... 'encoding': {\n ... 'x': {'field': 'a', 'type': 'quantitative'},\n ... 'y': {'field': 'b', 'type': 'quantitative'},\n ... 'size': {'field': 'c', 'type': 'quantitative'},\n ... 'color': {'field': 'c', 'type': 'quantitative'},\n ... },\n ... })\n\n .. output::\n https://share.streamlit.io/0.25.0-2JkNY/index.html?id=8jmmXR8iKoZGV4kXaKGYV5\n height: 200px\n\n Examples of Vega-Lite usage without Streamlit can be found at\n https://vega.github.io/vega-lite/examples/. Most of those can be easily\n translated to the syntax shown above.\n\n \"\"\"\n import streamlit.elements.vega_lite as vega_lite\n\n if width != 0:\n import streamlit as st\n\n st.warning(\n \"The `width` argument in `st.vega_lite_chart` is deprecated and will be removed on 2020-03-04. To set the width, you should instead use Vega-Lite's native `width` argument as described at https://vega.github.io/vega-lite/docs/size.html\"\n )\n\n vega_lite.marshall(\n element.vega_lite_chart,\n data,\n spec,\n use_container_width=use_container_width,\n **kwargs,\n )\n\n @_with_element\n def altair_chart(self, element, altair_chart, width=0, use_container_width=False):\n \"\"\"Display a chart using the Altair library.\n\n Parameters\n ----------\n altair_chart : altair.vegalite.v2.api.Chart\n The Altair chart object to display.\n\n width : number\n Deprecated. If != 0 (default), will show an alert.\n From now on you should set the width directly in the Altair\n spec. Please refer to the Altair documentation for details.\n\n use_container_width : bool\n If True, set the chart width to the column width. This takes\n precedence over Altair's native `width` value.\n\n Example\n -------\n\n >>> import pandas as pd\n >>> import numpy as np\n >>> import altair as alt\n >>>\n >>> df = pd.DataFrame(\n ... np.random.randn(200, 3),\n ... columns=['a', 'b', 'c'])\n ...\n >>> c = alt.Chart(df).mark_circle().encode(\n ... x='a', y='b', size='c', color='c', tooltip=['a', 'b', 'c'])\n >>>\n >>> st.altair_chart(c, use_container_width=True)\n\n .. output::\n https://share.streamlit.io/0.25.0-2JkNY/index.html?id=8jmmXR8iKoZGV4kXaKGYV5\n height: 200px\n\n Examples of Altair charts can be found at\n https://altair-viz.github.io/gallery/.\n\n \"\"\"\n import streamlit.elements.altair as altair\n\n if width != 0:\n import streamlit as st\n\n st.warning(\n \"The `width` argument in `st.vega_lite_chart` is deprecated and will be removed on 2020-03-04. To set the width, you should instead use altair's native `width` argument as described at https://altair-viz.github.io/user_guide/generated/toplevel/altair.Chart.html\"\n )\n\n altair.marshall(\n element.vega_lite_chart,\n altair_chart,\n use_container_width=use_container_width,\n )\n\n @_with_element\n def graphviz_chart(\n self, element, figure_or_dot, width=0, height=0, use_container_width=False\n ):\n \"\"\"Display a graph using the dagre-d3 library.\n\n Parameters\n ----------\n figure_or_dot : graphviz.dot.Graph, graphviz.dot.Digraph, str\n The Graphlib graph object or dot string to display\n\n width : number\n Deprecated. If != 0 (default), will show an alert.\n From now on you should set the width directly in the Graphviz\n spec. Please refer to the Graphviz documentation for details.\n\n height : number\n Deprecated. If != 0 (default), will show an alert.\n From now on you should set the height directly in the Graphviz\n spec. Please refer to the Graphviz documentation for details.\n\n use_container_width : bool\n If True, set the chart width to the column width. This takes\n precedence over the figure's native `width` value.\n\n Example\n -------\n\n >>> import streamlit as st\n >>> import graphviz as graphviz\n >>>\n >>> # Create a graphlib graph object\n >>> graph = graphviz.Digraph()\n >>> graph.edge('run', 'intr')\n >>> graph.edge('intr', 'runbl')\n >>> graph.edge('runbl', 'run')\n >>> graph.edge('run', 'kernel')\n >>> graph.edge('kernel', 'zombie')\n >>> graph.edge('kernel', 'sleep')\n >>> graph.edge('kernel', 'runmem')\n >>> graph.edge('sleep', 'swap')\n >>> graph.edge('swap', 'runswap')\n >>> graph.edge('runswap', 'new')\n >>> graph.edge('runswap', 'runmem')\n >>> graph.edge('new', 'runmem')\n >>> graph.edge('sleep', 'runmem')\n >>>\n >>> st.graphviz_chart(graph)\n\n Or you can render the chart from the graph using GraphViz's Dot\n language:\n\n >>> st.graphviz_chart('''\n digraph {\n run -> intr\n intr -> runbl\n runbl -> run\n run -> kernel\n kernel -> zombie\n kernel -> sleep\n kernel -> runmem\n sleep -> swap\n swap -> runswap\n runswap -> new\n runswap -> runmem\n new -> runmem\n sleep -> runmem\n }\n ''')\n\n .. output::\n https://share.streamlit.io/0.56.0-xTAd/index.html?id=GBn3GXZie5K1kXuBKe4yQL\n height: 400px\n\n \"\"\"\n import streamlit.elements.graphviz_chart as graphviz_chart\n\n if width != 0 and height != 0:\n import streamlit as st\n\n st.warning(\n \"The `width` and `height` arguments in `st.graphviz` are deprecated and will be removed on 2020-03-04\"\n )\n elif width != 0:\n import streamlit as st\n\n st.warning(\n \"The `width` argument in `st.graphviz` is deprecated and will be removed on 2020-03-04\"\n )\n elif height != 0:\n import streamlit as st\n\n st.warning(\n \"The `height` argument in `st.graphviz` is deprecated and will be removed on 2020-03-04\"\n )\n\n graphviz_chart.marshall(\n element.graphviz_chart, figure_or_dot, use_container_width\n )\n\n @_with_element\n def plotly_chart(\n self,\n element,\n figure_or_data,\n width=0,\n height=0,\n use_container_width=False,\n sharing=\"streamlit\",\n **kwargs,\n ):\n \"\"\"Display an interactive Plotly chart.\n\n Plotly is a charting library for Python. The arguments to this function\n closely follow the ones for Plotly's `plot()` function. You can find\n more about Plotly at https://plot.ly/python.\n\n Parameters\n ----------\n figure_or_data : plotly.graph_objs.Figure, plotly.graph_objs.Data,\n dict/list of plotly.graph_objs.Figure/Data, or\n matplotlib.figure.Figure\n\n See https://plot.ly/python/ for examples of graph descriptions.\n\n If a Matplotlib Figure, converts it to a Plotly figure and displays\n it.\n\n width : int\n Deprecated. If != 0 (default), will show an alert.\n From now on you should set the width directly in the figure.\n Please refer to the Plotly documentation for details.\n\n height : int\n Deprecated. If != 0 (default), will show an alert.\n From now on you should set the height directly in the figure.\n Please refer to the Plotly documentation for details.\n\n use_container_width : bool\n If True, set the chart width to the column width. This takes\n precedence over the figure's native `width` value.\n\n sharing : {'streamlit', 'private', 'secret', 'public'}\n Use 'streamlit' to insert the plot and all its dependencies\n directly in the Streamlit app, which means it works offline too.\n This is the default.\n Use any other sharing mode to send the app to Plotly's servers,\n and embed the result into the Streamlit app. See\n https://plot.ly/python/privacy/ for more. Note that these sharing\n modes require a Plotly account.\n\n **kwargs\n Any argument accepted by Plotly's `plot()` function.\n\n\n To show Plotly charts in Streamlit, just call `st.plotly_chart`\n wherever you would call Plotly's `py.plot` or `py.iplot`.\n\n Example\n -------\n\n The example below comes straight from the examples at\n https://plot.ly/python:\n\n >>> import streamlit as st\n >>> import plotly.figure_factory as ff\n >>> import numpy as np\n >>>\n >>> # Add histogram data\n >>> x1 = np.random.randn(200) - 2\n >>> x2 = np.random.randn(200)\n >>> x3 = np.random.randn(200) + 2\n >>>\n >>> # Group data together\n >>> hist_data = [x1, x2, x3]\n >>>\n >>> group_labels = ['Group 1', 'Group 2', 'Group 3']\n >>>\n >>> # Create distplot with custom bin_size\n >>> fig = ff.create_distplot(\n ... hist_data, group_labels, bin_size=[.1, .25, .5])\n >>>\n >>> # Plot!\n >>> st.plotly_chart(fig, use_container_width=True)\n\n .. output::\n https://share.streamlit.io/0.56.0-xTAd/index.html?id=TuP96xX8JnsoQeUGAPjkGQ\n height: 400px\n\n \"\"\"\n # NOTE: \"figure_or_data\" is the name used in Plotly's .plot() method\n # for their main parameter. I don't like the name, but it's best to\n # keep it in sync with what Plotly calls it.\n import streamlit.elements.plotly_chart as plotly_chart\n\n if width != 0 and height != 0:\n import streamlit as st\n\n st.warning(\n \"The `width` and `height` arguments in `st.plotly_chart` are deprecated and will be removed on 2020-03-04. To set these values, you should instead use Plotly's native arguments as described at https://plot.ly/python/setting-graph-size/\"\n )\n elif width != 0:\n import streamlit as st\n\n st.warning(\n \"The `width` argument in `st.plotly_chart` is deprecated and will be removed on 2020-03-04. To set the width, you should instead use Plotly's native `width` argument as described at https://plot.ly/python/setting-graph-size/\"\n )\n elif height != 0:\n import streamlit as st\n\n st.warning(\n \"The `height` argument in `st.plotly_chart` is deprecated and will be removed on 2020-03-04. To set the height, you should instead use Plotly's native `height` argument as described at https://plot.ly/python/setting-graph-size/\"\n )\n\n plotly_chart.marshall(\n element.plotly_chart, figure_or_data, use_container_width, sharing, **kwargs\n )\n\n @_with_element\n def pyplot(self, element, fig=None, clear_figure=None, **kwargs):\n \"\"\"Display a matplotlib.pyplot figure.\n\n Parameters\n ----------\n fig : Matplotlib Figure\n The figure to plot. When this argument isn't specified, which is\n the usual case, this function will render the global plot.\n\n clear_figure : bool\n If True, the figure will be cleared after being rendered.\n If False, the figure will not be cleared after being rendered.\n If left unspecified, we pick a default based on the value of `fig`.\n\n * If `fig` is set, defaults to `False`.\n\n * If `fig` is not set, defaults to `True`. This simulates Jupyter's\n approach to matplotlib rendering.\n\n **kwargs : any\n Arguments to pass to Matplotlib's savefig function.\n\n Example\n -------\n >>> import matplotlib.pyplot as plt\n >>> import numpy as np\n >>>\n >>> arr = np.random.normal(1, 1, size=100)\n >>> plt.hist(arr, bins=20)\n >>>\n >>> st.pyplot()\n\n .. output::\n https://share.streamlit.io/0.25.0-2JkNY/index.html?id=PwzFN7oLZsvb6HDdwdjkRB\n height: 530px\n\n Notes\n -----\n Matplotlib support several different types of \"backends\". If you're\n getting an error using Matplotlib with Streamlit, try setting your\n backend to \"TkAgg\"::\n\n echo \"backend: TkAgg\" >> ~/.matplotlib/matplotlibrc\n\n For more information, see https://matplotlib.org/faq/usage_faq.html.\n\n \"\"\"\n import streamlit.elements.pyplot as pyplot\n\n pyplot.marshall(self._get_coordinates, element, fig, clear_figure, **kwargs)\n\n @_with_element\n def bokeh_chart(self, element, figure, use_container_width=False):\n \"\"\"Display an interactive Bokeh chart.\n\n Bokeh is a charting library for Python. The arguments to this function\n closely follow the ones for Bokeh's `show` function. You can find\n more about Bokeh at https://bokeh.pydata.org.\n\n Parameters\n ----------\n figure : bokeh.plotting.figure.Figure\n A Bokeh figure to plot.\n\n use_container_width : bool\n If True, set the chart width to the column width. This takes\n precedence over Bokeh's native `width` value.\n\n To show Bokeh charts in Streamlit, just call `st.bokeh_chart`\n wherever you would call Bokeh's `show`.\n\n Example\n -------\n >>> import streamlit as st\n >>> from bokeh.plotting import figure\n >>>\n >>> x = [1, 2, 3, 4, 5]\n >>> y = [6, 7, 2, 4, 5]\n >>>\n >>> p = figure(\n ... title='simple line example',\n ... x_axis_label='x',\n ... y_axis_label='y')\n ...\n >>> p.line(x, y, legend='Trend', line_width=2)\n >>>\n >>> st.bokeh_chart(p, use_container_width=True)\n\n .. output::\n https://share.streamlit.io/0.56.0-xTAd/index.html?id=Fdhg51uMbGMLRRxXV6ubzp\n height: 600px\n\n \"\"\"\n import streamlit.elements.bokeh_chart as bokeh_chart\n\n bokeh_chart.marshall(element.bokeh_chart, figure, use_container_width)\n\n @_with_element\n def image(\n self,\n element,\n image,\n caption=None,\n width=None,\n use_column_width=False,\n clamp=False,\n channels=\"RGB\",\n format=\"JPEG\",\n ):\n \"\"\"Display an image or list of images.\n\n Parameters\n ----------\n image : numpy.ndarray, [numpy.ndarray], BytesIO, str, or [str]\n Monochrome image of shape (w,h) or (w,h,1)\n OR a color image of shape (w,h,3)\n OR an RGBA image of shape (w,h,4)\n OR a URL to fetch the image from\n OR a list of one of the above, to display multiple images.\n caption : str or list of str\n Image caption. If displaying multiple images, caption should be a\n list of captions (one for each image).\n width : int or None\n Image width. None means use the image width.\n use_column_width : bool\n If True, set the image width to the column width. This takes\n precedence over the `width` parameter.\n clamp : bool\n Clamp image pixel values to a valid range ([0-255] per channel).\n This is only meaningful for byte array images; the parameter is\n ignored for image URLs. If this is not set, and an image has an\n out-of-range value, an error will be thrown.\n channels : 'RGB' or 'BGR'\n If image is an nd.array, this parameter denotes the format used to\n represent color information. Defaults to 'RGB', meaning\n `image[:, :, 0]` is the red channel, `image[:, :, 1]` is green, and\n `image[:, :, 2]` is blue. For images coming from libraries like\n OpenCV you should set this to 'BGR', instead.\n format : 'JPEG' or 'PNG'\n This parameter specifies the image format to use when transferring\n the image data. Defaults to 'JPEG'.\n\n Example\n -------\n >>> from PIL import Image\n >>> image = Image.open('sunrise.jpg')\n >>>\n >>> st.image(image, caption='Sunrise by the mountains',\n ... use_column_width=True)\n\n .. output::\n https://share.streamlit.io/0.25.0-2JkNY/index.html?id=YCFaqPgmgpEz7jwE4tHAzY\n height: 630px\n\n \"\"\"\n from .elements import image_proto\n\n if use_column_width:\n width = -2\n elif width is None:\n width = -1\n elif width <= 0:\n raise StreamlitAPIException(\"Image width must be positive.\")\n\n image_proto.marshall_images(\n self._get_coordinates(),\n image,\n caption,\n width,\n element.imgs,\n clamp,\n channels,\n format,\n )\n\n @_with_element\n def audio(self, element, data, format=\"audio/wav\", start_time=0):\n \"\"\"Display an audio player.\n\n Parameters\n ----------\n data : str, bytes, BytesIO, numpy.ndarray, or file opened with\n io.open().\n Raw audio data, filename, or a URL pointing to the file to load.\n Numpy arrays and raw data formats must include all necessary file\n headers to match specified file format.\n start_time: int\n The time from which this element should start playing.\n format : str\n The mime type for the audio file. Defaults to 'audio/wav'.\n See https://tools.ietf.org/html/rfc4281 for more info.\n\n Example\n -------\n >>> audio_file = open('myaudio.ogg', 'rb')\n >>> audio_bytes = audio_file.read()\n >>>\n >>> st.audio(audio_bytes, format='audio/ogg')\n\n .. output::\n https://share.streamlit.io/0.25.0-2JkNY/index.html?id=Dv3M9sA7Cg8gwusgnVNTHb\n height: 400px\n\n \"\"\"\n from .elements import media_proto\n\n media_proto.marshall_audio(\n self._get_coordinates(), element.audio, data, format, start_time\n )\n\n @_with_element\n def video(self, element, data, format=\"video/mp4\", start_time=0):\n \"\"\"Display a video player.\n\n Parameters\n ----------\n data : str, bytes, BytesIO, numpy.ndarray, or file opened with\n io.open().\n Raw video data, filename, or URL pointing to a video to load.\n Includes support for YouTube URLs.\n Numpy arrays and raw data formats must include all necessary file\n headers to match specified file format.\n format : str\n The mime type for the video file. Defaults to 'video/mp4'.\n See https://tools.ietf.org/html/rfc4281 for more info.\n start_time: int\n The time from which this element should start playing.\n\n Example\n -------\n >>> video_file = open('myvideo.mp4', 'rb')\n >>> video_bytes = video_file.read()\n >>>\n >>> st.video(video_bytes)\n\n .. output::\n https://share.streamlit.io/0.25.0-2JkNY/index.html?id=Wba9sZELKfKwXH4nDCCbMv\n height: 600px\n\n .. note::\n Some videos may not display if they are encoded using MP4V (which is an export option in OpenCV), as this codec is\n not widely supported by browsers. Converting your video to H.264 will allow the video to be displayed in Streamlit.\n See this `StackOverflow post <https://stackoverflow.com/a/49535220/2394542>`_ or this\n `Streamlit forum post <https://discuss.streamlit.io/t/st-video-doesnt-show-opencv-generated-mp4/3193/2>`_\n for more information.\n\n \"\"\"\n from .elements import media_proto\n\n media_proto.marshall_video(\n self._get_coordinates(), element.video, data, format, start_time\n )\n\n @_with_element\n def button(self, element, label, key=None):\n \"\"\"Display a button widget.\n\n Parameters\n ----------\n label : str\n A short label explaining to the user what this button is for.\n key : str\n An optional string to use as the unique key for the widget.\n If this is omitted, a key will be generated for the widget\n based on its content. Multiple widgets of the same type may\n not share the same key.\n\n Returns\n -------\n bool\n If the button was clicked on the last run of the app.\n\n Example\n -------\n >>> if st.button('Say hello'):\n ... st.write('Why hello there')\n ... else:\n ... st.write('Goodbye')\n\n \"\"\"\n element.button.label = label\n element.button.default = False\n\n ui_value = _get_widget_ui_value(\"button\", element, user_key=key)\n current_value = ui_value if ui_value is not None else False\n return current_value\n\n @_with_element\n def checkbox(self, element, label, value=False, key=None):\n \"\"\"Display a checkbox widget.\n\n Parameters\n ----------\n label : str\n A short label explaining to the user what this checkbox is for.\n value : bool\n Preselect the checkbox when it first renders. This will be\n cast to bool internally.\n key : str\n An optional string to use as the unique key for the widget.\n If this is omitted, a key will be generated for the widget\n based on its content. Multiple widgets of the same type may\n not share the same key.\n\n Returns\n -------\n bool\n Whether or not the checkbox is checked.\n\n Example\n -------\n >>> agree = st.checkbox('I agree')\n >>>\n >>> if agree:\n ... st.write('Great!')\n\n \"\"\"\n element.checkbox.label = label\n element.checkbox.default = bool(value)\n\n ui_value = _get_widget_ui_value(\"checkbox\", element, user_key=key)\n current_value = ui_value if ui_value is not None else value\n return bool(current_value)\n\n @_with_element\n def multiselect(\n self, element, label, options, default=None, format_func=str, key=None\n ):\n \"\"\"Display a multiselect widget.\n The multiselect widget starts as empty.\n\n Parameters\n ----------\n label : str\n A short label explaining to the user what this select widget is for.\n options : list, tuple, numpy.ndarray, or pandas.Series\n Labels for the select options. This will be cast to str internally\n by default.\n default: [str] or None\n List of default values.\n format_func : function\n Function to modify the display of selectbox options. It receives\n the raw option as an argument and should output the label to be\n shown for that option. This has no impact on the return value of\n the selectbox.\n key : str\n An optional string to use as the unique key for the widget.\n If this is omitted, a key will be generated for the widget\n based on its content. Multiple widgets of the same type may\n not share the same key.\n\n Returns\n -------\n [str]\n A list with the selected options\n\n Example\n -------\n >>> options = st.multiselect(\n ... 'What are your favorite colors',\n ... ['Green', 'Yellow', 'Red', 'Blue'],\n ... ['Yellow', 'Red'])\n >>>\n >>> st.write('You selected:', options)\n\n .. note::\n User experience can be degraded for large lists of `options` (100+), as this widget\n is not designed to handle arbitrary text search efficiently. See this\n `thread <https://discuss.streamlit.io/t/streamlit-loading-column-data-takes-too-much-time/1791>`_\n on the Streamlit community forum for more information and\n `GitHub issue #1059 <https://github.com/streamlit/streamlit/issues/1059>`_ for updates on the issue.\n\n \"\"\"\n\n # Perform validation checks and return indices base on the default values.\n def _check_and_convert_to_indices(options, default_values):\n if default_values is None and None not in options:\n return None\n\n if not isinstance(default_values, list):\n # This if is done before others because calling if not x (done\n # right below) when x is of type pd.Series() or np.array() throws a\n # ValueError exception.\n if is_type(default_values, \"numpy.ndarray\") or is_type(\n default_values, \"pandas.core.series.Series\"\n ):\n default_values = list(default_values)\n elif not default_values:\n default_values = [default_values]\n else:\n default_values = list(default_values)\n\n for value in default_values:\n if value not in options:\n raise StreamlitAPIException(\n \"Every Multiselect default value must exist in options\"\n )\n\n return [options.index(value) for value in default_values]\n\n indices = _check_and_convert_to_indices(options, default)\n element.multiselect.label = label\n default_value = [] if indices is None else indices\n element.multiselect.default[:] = default_value\n element.multiselect.options[:] = [\n str(format_func(option)) for option in options\n ]\n\n ui_value = _get_widget_ui_value(\"multiselect\", element, user_key=key)\n current_value = ui_value.value if ui_value is not None else default_value\n return [options[i] for i in current_value]\n\n @_with_element\n def radio(self, element, label, options, index=0, format_func=str, key=None):\n \"\"\"Display a radio button widget.\n\n Parameters\n ----------\n label : str\n A short label explaining to the user what this radio group is for.\n options : list, tuple, numpy.ndarray, or pandas.Series\n Labels for the radio options. This will be cast to str internally\n by default.\n index : int\n The index of the preselected option on first render.\n format_func : function\n Function to modify the display of radio options. It receives\n the raw option as an argument and should output the label to be\n shown for that option. This has no impact on the return value of\n the radio.\n key : str\n An optional string to use as the unique key for the widget.\n If this is omitted, a key will be generated for the widget\n based on its content. Multiple widgets of the same type may\n not share the same key.\n\n Returns\n -------\n any\n The selected option.\n\n Example\n -------\n >>> genre = st.radio(\n ... \"What\\'s your favorite movie genre\",\n ... ('Comedy', 'Drama', 'Documentary'))\n >>>\n >>> if genre == 'Comedy':\n ... st.write('You selected comedy.')\n ... else:\n ... st.write(\"You didn\\'t select comedy.\")\n\n \"\"\"\n if not isinstance(index, int):\n raise StreamlitAPIException(\n \"Radio Value has invalid type: %s\" % type(index).__name__\n )\n\n if len(options) > 0 and not 0 <= index < len(options):\n raise StreamlitAPIException(\n \"Radio index must be between 0 and length of options\"\n )\n\n element.radio.label = label\n element.radio.default = index\n element.radio.options[:] = [str(format_func(option)) for option in options]\n\n ui_value = _get_widget_ui_value(\"radio\", element, user_key=key)\n current_value = ui_value if ui_value is not None else index\n\n return (\n options[current_value]\n if len(options) > 0 and options[current_value] is not None\n else NoValue\n )\n\n @_with_element\n def selectbox(self, element, label, options, index=0, format_func=str, key=None):\n \"\"\"Display a select widget.\n\n Parameters\n ----------\n label : str\n A short label explaining to the user what this select widget is for.\n options : list, tuple, numpy.ndarray, or pandas.Series\n Labels for the select options. This will be cast to str internally\n by default.\n index : int\n The index of the preselected option on first render.\n format_func : function\n Function to modify the display of the labels. It receives the option\n as an argument and its output will be cast to str.\n key : str\n An optional string to use as the unique key for the widget.\n If this is omitted, a key will be generated for the widget\n based on its content. Multiple widgets of the same type may\n not share the same key.\n\n Returns\n -------\n any\n The selected option\n\n Example\n -------\n >>> option = st.selectbox(\n ... 'How would you like to be contacted?',\n ... ('Email', 'Home phone', 'Mobile phone'))\n >>>\n >>> st.write('You selected:', option)\n\n \"\"\"\n if not isinstance(index, int):\n raise StreamlitAPIException(\n \"Selectbox Value has invalid type: %s\" % type(index).__name__\n )\n\n if len(options) > 0 and not 0 <= index < len(options):\n raise StreamlitAPIException(\n \"Selectbox index must be between 0 and length of options\"\n )\n\n element.selectbox.label = label\n element.selectbox.default = index\n element.selectbox.options[:] = [str(format_func(option)) for option in options]\n\n ui_value = _get_widget_ui_value(\"selectbox\", element, user_key=key)\n current_value = ui_value if ui_value is not None else index\n\n return (\n options[current_value]\n if len(options) > 0 and options[current_value] is not None\n else NoValue\n )\n\n @_with_element\n def slider(\n self,\n element,\n label,\n min_value=None,\n max_value=None,\n value=None,\n step=None,\n format=None,\n key=None,\n ):\n \"\"\"Display a slider widget.\n\n This also allows you to render a range slider by passing a two-element tuple or list as the `value`.\n\n Parameters\n ----------\n label : str or None\n A short label explaining to the user what this slider is for.\n min_value : int/float or None\n The minimum permitted value.\n Defaults to 0 if the value is an int, 0.0 otherwise.\n max_value : int/float or None\n The maximum permitted value.\n Defaults 100 if the value is an int, 1.0 otherwise.\n value : int/float or a tuple/list of int/float or None\n The value of the slider when it first renders. If a tuple/list\n of two values is passed here, then a range slider with those lower\n and upper bounds is rendered. For example, if set to `(1, 10)` the\n slider will have a selectable range between 1 and 10.\n Defaults to min_value.\n step : int/float or None\n The stepping interval.\n Defaults to 1 if the value is an int, 0.01 otherwise.\n format : str or None\n A printf-style format string controlling how the interface should\n display numbers. This does not impact the return value.\n Valid formatters: %d %e %f %g %i\n key : str\n An optional string to use as the unique key for the widget.\n If this is omitted, a key will be generated for the widget\n based on its content. Multiple widgets of the same type may\n not share the same key.\n\n Returns\n -------\n int/float or tuple of int/float\n The current value of the slider widget. The return type will match\n the data type of the value parameter.\n\n Examples\n --------\n >>> age = st.slider('How old are you?', 0, 130, 25)\n >>> st.write(\"I'm \", age, 'years old')\n\n And here's an example of a range slider:\n\n >>> values = st.slider(\n ... 'Select a range of values',\n ... 0.0, 100.0, (25.0, 75.0))\n >>> st.write('Values:', values)\n\n \"\"\"\n\n # Set value default.\n if value is None:\n value = min_value if min_value is not None else 0\n\n # Ensure that the value is either a single value or a range of values.\n single_value = isinstance(value, (int, float))\n range_value = isinstance(value, (list, tuple)) and len(value) in (0, 1, 2)\n if not single_value and not range_value:\n raise StreamlitAPIException(\n \"Slider value should either be an int/float or a list/tuple of \"\n \"0 to 2 ints/floats\"\n )\n\n # Ensure that the value is either an int/float or a list/tuple of ints/floats.\n if single_value:\n int_value = isinstance(value, int)\n float_value = isinstance(value, float)\n else:\n int_value = all(map(lambda v: isinstance(v, int), value))\n float_value = all(map(lambda v: isinstance(v, float), value))\n\n if not int_value and not float_value:\n raise StreamlitAPIException(\n \"Slider tuple/list components must be of the same type.\"\n )\n\n # Set corresponding defaults.\n if min_value is None:\n min_value = 0 if int_value else 0.0\n if max_value is None:\n max_value = 100 if int_value else 1.0\n if step is None:\n step = 1 if int_value else 0.01\n\n # Ensure that all arguments are of the same type.\n args = [min_value, max_value, step]\n int_args = all(map(lambda a: isinstance(a, int), args))\n float_args = all(map(lambda a: isinstance(a, float), args))\n if not int_args and not float_args:\n raise StreamlitAPIException(\n \"Slider value arguments must be of the same type.\"\n \"\\n`value` has %(value_type)s type.\"\n \"\\n`min_value` has %(min_type)s type.\"\n \"\\n`max_value` has %(max_type)s type.\"\n % {\n \"value_type\": type(value).__name__,\n \"min_type\": type(min_value).__name__,\n \"max_type\": type(max_value).__name__,\n }\n )\n\n # Ensure that the value matches arguments' types.\n all_ints = int_value and int_args\n all_floats = float_value and float_args\n if not all_ints and not all_floats:\n raise StreamlitAPIException(\n \"Both value and arguments must be of the same type.\"\n \"\\n`value` has %(value_type)s type.\"\n \"\\n`min_value` has %(min_type)s type.\"\n \"\\n`max_value` has %(max_type)s type.\"\n % {\n \"value_type\": type(value).__name__,\n \"min_type\": type(min_value).__name__,\n \"max_type\": type(max_value).__name__,\n }\n )\n\n # Ensure that min <= value <= max.\n if single_value:\n if not min_value <= value <= max_value:\n raise StreamlitAPIException(\n \"The default `value` of %(value)s \"\n \"must lie between the `min_value` of %(min)s \"\n \"and the `max_value` of %(max)s, inclusively.\"\n % {\"value\": value, \"min\": min_value, \"max\": max_value}\n )\n elif len(value) == 2:\n start, end = value\n if not min_value <= start <= end <= max_value:\n raise StreamlitAPIException(\n \"The value and/or arguments are out of range.\"\n )\n else:\n value = [min_value, max_value]\n\n # Bounds checks. JSNumber produces human-readable exceptions that\n # we simply re-package as StreamlitAPIExceptions.\n # (We check `min_value` and `max_value` here; `value` and `step` are\n # already known to be in the [min_value, max_value] range.)\n try:\n if all_ints:\n JSNumber.validate_int_bounds(min_value, \"`min_value`\")\n JSNumber.validate_int_bounds(max_value, \"`max_value`\")\n else:\n JSNumber.validate_float_bounds(min_value, \"`min_value`\")\n JSNumber.validate_float_bounds(max_value, \"`max_value`\")\n except JSNumberBoundsException as e:\n raise StreamlitAPIException(str(e))\n\n # Set format default.\n if format is None:\n if all_ints:\n format = \"%d\"\n else:\n format = \"%0.2f\"\n\n # It would be great if we could guess the number of decimal places from\n # the `step` argument, but this would only be meaningful if step were a\n # decimal. As a possible improvement we could make this function accept\n # decimals and/or use some heuristics for floats.\n\n element.slider.label = label\n element.slider.format = format\n element.slider.default[:] = [value] if single_value else value\n element.slider.min = min_value\n element.slider.max = max_value\n element.slider.step = step\n\n ui_value = _get_widget_ui_value(\"slider\", element, user_key=key)\n # Convert the current value to the appropriate type.\n current_value = ui_value if ui_value is not None else value\n # Cast ui_value to the same type as the input arguments\n if ui_value is not None:\n current_value = getattr(ui_value, \"value\")\n # Convert float array into int array if the rest of the arguments\n # are ints\n if all_ints:\n current_value = list(map(int, current_value))\n # If there is only one value in the array destructure it into a\n # single variable\n current_value = current_value[0] if single_value else current_value\n return current_value if single_value else tuple(current_value)\n\n @_with_element\n def file_uploader(\n self, element, label, type=None, encoding=\"auto\", key=None,\n ):\n \"\"\"Display a file uploader widget.\n\n By default, uploaded files are limited to 200MB. You can configure\n this using the `server.maxUploadSize` config option.\n\n Parameters\n ----------\n label : str or None\n A short label explaining to the user what this file uploader is for.\n type : str or list of str or None\n Array of allowed extensions. ['png', 'jpg']\n By default, all extensions are allowed.\n encoding : str or None\n The encoding to use when opening textual files (i.e. non-binary).\n For example: 'utf-8'. If set to 'auto', will try to guess the\n encoding. If None, will assume the file is binary.\n key : str\n An optional string to use as the unique key for the widget.\n If this is omitted, a key will be generated for the widget\n based on its content. Multiple widgets of the same type may\n not share the same key.\n\n Returns\n -------\n BytesIO or StringIO or or list of BytesIO/StringIO or None\n If no file has been uploaded, returns None. Otherwise, returns\n the data for the uploaded file(s):\n - If the file is in a well-known textual format (or if the encoding\n parameter is set), the file data is a StringIO.\n - Otherwise the file data is BytesIO.\n - If multiple_files is True, a list of file data will be returned.\n\n Note that BytesIO/StringIO are \"file-like\", which means you can\n pass them anywhere where a file is expected!\n\n Examples\n --------\n >>> uploaded_file = st.file_uploader(\"Choose a CSV file\", type=\"csv\")\n >>> if uploaded_file is not None:\n ... data = pd.read_csv(uploaded_file)\n ... st.write(data)\n\n \"\"\"\n # Don't release this just yet. (When ready to release, turn test back\n # on at file_uploader_test.py)\n accept_multiple_files = False\n\n if isinstance(type, str):\n type = [type]\n\n element.file_uploader.label = label\n element.file_uploader.type[:] = type if type is not None else []\n element.file_uploader.max_upload_size_mb = config.get_option(\n \"server.maxUploadSize\"\n )\n element.file_uploader.multiple_files = accept_multiple_files\n _set_widget_id(\"file_uploader\", element, user_key=key)\n\n files = None\n ctx = get_report_ctx()\n if ctx is not None:\n files = ctx.uploaded_file_mgr.get_files(\n session_id=ctx.session_id, widget_id=element.file_uploader.id\n )\n\n if files is None:\n return NoValue\n\n file_datas = [get_encoded_file_data(file.data, encoding) for file in files]\n return file_datas if accept_multiple_files else file_datas[0]\n\n @_with_element\n def beta_color_picker(self, element, label, value=None, key=None):\n \"\"\"Display a color picker widget.\n\n Note: This is a beta feature. See\n https://docs.streamlit.io/en/latest/pre_release_features.html for more\n information.\n\n Parameters\n ----------\n label : str\n A short label explaining to the user what this input is for.\n value : str or None\n The hex value of this widget when it first renders. If None,\n defaults to black.\n key : str\n An optional string to use as the unique key for the widget.\n If this is omitted, a key will be generated for the widget\n based on its content. Multiple widgets of the same type may\n not share the same key.\n\n Returns\n -------\n str\n The selected color as a hex string.\n\n Example\n -------\n >>> color = st.beta_color_picker('Pick A Color', '#00f900')\n >>> st.write('The current color is', color)\n\n \"\"\"\n # set value default\n if value is None:\n value = \"#000000\"\n\n # make sure the value is a string\n if not isinstance(value, str):\n raise StreamlitAPIException(\n \"\"\"\n Color Picker Value has invalid type: %s. Expects a hex string\n like '#00FFAA' or '#000'.\n \"\"\"\n % type(value).__name__\n )\n\n # validate the value and expects a hex string\n match = re.match(r\"^#(?:[0-9a-fA-F]{3}){1,2}$\", value)\n\n if not match:\n raise StreamlitAPIException(\n \"\"\"\n '%s' is not a valid hex code for colors. Valid ones are like\n '#00FFAA' or '#000'.\n \"\"\"\n % value\n )\n\n element.color_picker.label = label\n element.color_picker.default = str(value)\n\n ui_value = _get_widget_ui_value(\"color_picker\", element, user_key=key)\n current_value = ui_value if ui_value is not None else value\n\n return str(current_value)\n\n @_with_element\n def text_input(\n self, element, label, value=\"\", max_chars=None, key=None, type=\"default\"\n ):\n \"\"\"Display a single-line text input widget.\n\n Parameters\n ----------\n label : str\n A short label explaining to the user what this input is for.\n value : any\n The text value of this widget when it first renders. This will be\n cast to str internally.\n max_chars : int or None\n Max number of characters allowed in text input.\n key : str\n An optional string to use as the unique key for the widget.\n If this is omitted, a key will be generated for the widget\n based on its content. Multiple widgets of the same type may\n not share the same key.\n type : str\n The type of the text input. This can be either \"default\" (for\n a regular text input), or \"password\" (for a text input that\n masks the user's typed value). Defaults to \"default\".\n\n Returns\n -------\n str\n The current value of the text input widget.\n\n Example\n -------\n >>> title = st.text_input('Movie title', 'Life of Brian')\n >>> st.write('The current movie title is', title)\n\n \"\"\"\n element.text_input.label = label\n element.text_input.default = str(value)\n\n if max_chars is not None:\n element.text_input.max_chars = max_chars\n\n if type == \"default\":\n element.text_input.type = TextInput.DEFAULT\n elif type == \"password\":\n element.text_input.type = TextInput.PASSWORD\n else:\n raise StreamlitAPIException(\n \"'%s' is not a valid text_input type. Valid types are 'default' and 'password'.\"\n % type\n )\n\n ui_value = _get_widget_ui_value(\"text_input\", element, user_key=key)\n current_value = ui_value if ui_value is not None else value\n return str(current_value)\n\n @_with_element\n def text_area(\n self, element, label, value=\"\", height=None, max_chars=None, key=None\n ):\n \"\"\"Display a multi-line text input widget.\n\n Parameters\n ----------\n label : str\n A short label explaining to the user what this input is for.\n value : any\n The text value of this widget when it first renders. This will be\n cast to str internally.\n height : int or None\n Desired height of the UI element expressed in pixels. If None, a\n default height is used.\n max_chars : int or None\n Maximum number of characters allowed in text area.\n key : str\n An optional string to use as the unique key for the widget.\n If this is omitted, a key will be generated for the widget\n based on its content. Multiple widgets of the same type may\n not share the same key.\n\n Returns\n -------\n str\n The current value of the text input widget.\n\n Example\n -------\n >>> txt = st.text_area('Text to analyze', '''\n ... It was the best of times, it was the worst of times, it was\n ... the age of wisdom, it was the age of foolishness, it was\n ... the epoch of belief, it was the epoch of incredulity, it\n ... was the season of Light, it was the season of Darkness, it\n ... was the spring of hope, it was the winter of despair, (...)\n ... ''')\n >>> st.write('Sentiment:', run_sentiment_analysis(txt))\n\n \"\"\"\n element.text_area.label = label\n element.text_area.default = str(value)\n\n if height is not None:\n element.text_area.height = height\n\n if max_chars is not None:\n element.text_area.max_chars = max_chars\n\n ui_value = _get_widget_ui_value(\"text_area\", element, user_key=key)\n current_value = ui_value if ui_value is not None else value\n return str(current_value)\n\n @_with_element\n def time_input(self, element, label, value=None, key=None):\n \"\"\"Display a time input widget.\n\n Parameters\n ----------\n label : str\n A short label explaining to the user what this time input is for.\n value : datetime.time/datetime.datetime\n The value of this widget when it first renders. This will be\n cast to str internally. Defaults to the current time.\n key : str\n An optional string to use as the unique key for the widget.\n If this is omitted, a key will be generated for the widget\n based on its content. Multiple widgets of the same type may\n not share the same key.\n\n Returns\n -------\n datetime.time\n The current value of the time input widget.\n\n Example\n -------\n >>> t = st.time_input('Set an alarm for', datetime.time(8, 45))\n >>> st.write('Alarm is set for', t)\n\n \"\"\"\n # Set value default.\n if value is None:\n value = datetime.now().time()\n\n # Ensure that the value is either datetime/time\n if not isinstance(value, datetime) and not isinstance(value, time):\n raise StreamlitAPIException(\n \"The type of the value should be either datetime or time.\"\n )\n\n # Convert datetime to time\n if isinstance(value, datetime):\n value = value.time()\n\n element.time_input.label = label\n element.time_input.default = time.strftime(value, \"%H:%M\")\n\n ui_value = _get_widget_ui_value(\"time_input\", element, user_key=key)\n current_value = (\n datetime.strptime(ui_value, \"%H:%M\").time()\n if ui_value is not None\n else value\n )\n return current_value\n\n @_with_element\n def date_input(\n self,\n element,\n label,\n value=None,\n min_value=datetime.min,\n max_value=None,\n key=None,\n ):\n \"\"\"Display a date input widget.\n\n Parameters\n ----------\n label : str\n A short label explaining to the user what this date input is for.\n value : datetime.date or datetime.datetime or list/tuple of datetime.date or datetime.datetime or None\n The value of this widget when it first renders. If a list/tuple with\n 0 to 2 date/datetime values is provided, the datepicker will allow\n users to provide a range. Defaults to today as a single-date picker.\n min_value : datetime.date or datetime.datetime\n The minimum selectable date. Defaults to datetime.min.\n max_value : datetime.date or datetime.datetime\n The maximum selectable date. Defaults to today+10y.\n key : str\n An optional string to use as the unique key for the widget.\n If this is omitted, a key will be generated for the widget\n based on its content. Multiple widgets of the same type may\n not share the same key.\n\n Returns\n -------\n datetime.date\n The current value of the date input widget.\n\n Example\n -------\n >>> d = st.date_input(\n ... \"When\\'s your birthday\",\n ... datetime.date(2019, 7, 6))\n >>> st.write('Your birthday is:', d)\n\n \"\"\"\n # Set value default.\n if value is None:\n value = datetime.now().date()\n\n single_value = isinstance(value, (date, datetime))\n range_value = isinstance(value, (list, tuple)) and len(value) in (0, 1, 2)\n if not single_value and not range_value:\n raise StreamlitAPIException(\n \"DateInput value should either be an date/datetime or a list/tuple of \"\n \"0 - 2 date/datetime values\"\n )\n\n if single_value:\n value = [value]\n\n element.date_input.is_range = range_value\n\n value = [v.date() if isinstance(v, datetime) else v for v in value]\n\n element.date_input.label = label\n element.date_input.default[:] = [date.strftime(v, \"%Y/%m/%d\") for v in value]\n\n if isinstance(min_value, datetime):\n min_value = min_value.date()\n\n element.date_input.min = date.strftime(min_value, \"%Y/%m/%d\")\n\n if max_value is None:\n today = date.today()\n max_value = date(today.year + 10, today.month, today.day)\n\n if isinstance(max_value, datetime):\n max_value = max_value.date()\n\n element.date_input.max = date.strftime(max_value, \"%Y/%m/%d\")\n\n ui_value = _get_widget_ui_value(\"date_input\", element, user_key=key)\n\n if ui_value is not None:\n value = getattr(ui_value, \"data\")\n value = [datetime.strptime(v, \"%Y/%m/%d\").date() for v in value]\n\n if single_value:\n return value[0]\n else:\n return tuple(value)\n\n @_with_element\n def number_input(\n self,\n element,\n label,\n min_value=None,\n max_value=None,\n value=NoValue(),\n step=None,\n format=None,\n key=None,\n ):\n \"\"\"Display a numeric input widget.\n\n Parameters\n ----------\n label : str or None\n A short label explaining to the user what this input is for.\n min_value : int or float or None\n The minimum permitted value.\n If None, there will be no minimum.\n max_value : int or float or None\n The maximum permitted value.\n If None, there will be no maximum.\n value : int or float or None\n The value of this widget when it first renders.\n Defaults to min_value, or 0.0 if min_value is None\n step : int or float or None\n The stepping interval.\n Defaults to 1 if the value is an int, 0.01 otherwise.\n If the value is not specified, the format parameter will be used.\n format : str or None\n A printf-style format string controlling how the interface should\n display numbers. Output must be purely numeric. This does not impact\n the return value. Valid formatters: %d %e %f %g %i\n key : str\n An optional string to use as the unique key for the widget.\n If this is omitted, a key will be generated for the widget\n based on its content. Multiple widgets of the same type may\n not share the same key.\n\n Returns\n -------\n int or float\n The current value of the numeric input widget. The return type\n will match the data type of the value parameter.\n\n Example\n -------\n >>> number = st.number_input('Insert a number')\n >>> st.write('The current number is ', number)\n \"\"\"\n\n if isinstance(value, NoValue):\n if min_value:\n value = min_value\n else:\n value = 0.0 # We set a float as default\n\n int_value = isinstance(value, numbers.Integral)\n float_value = isinstance(value, float)\n\n if value is None:\n raise StreamlitAPIException(\n \"Default value for number_input should be an int or a float.\"\n )\n else:\n if format is None:\n format = \"%d\" if int_value else \"%0.2f\"\n\n if format in [\"%d\", \"%u\", \"%i\"] and float_value:\n # Warn user to check if displaying float as int was really intended.\n import streamlit as st\n\n st.warning(\n \"Warning: NumberInput value below is float, but format {} displays as integer.\".format(\n format\n )\n )\n\n if step is None:\n step = 1 if int_value else 0.01\n\n try:\n float(format % 2)\n except (TypeError, ValueError):\n raise StreamlitAPIException(\n \"Format string for st.number_input contains invalid characters: %s\"\n % format\n )\n\n # Ensure that all arguments are of the same type.\n args = [min_value, max_value, step]\n\n int_args = all(\n map(\n lambda a: (\n isinstance(a, numbers.Integral) or isinstance(a, type(None))\n ),\n args,\n )\n )\n float_args = all(\n map(lambda a: (isinstance(a, float) or isinstance(a, type(None))), args)\n )\n\n if not int_args and not float_args:\n raise StreamlitAPIException(\n \"All arguments must be of the same type.\"\n \"\\n`value` has %(value_type)s type.\"\n \"\\n`min_value` has %(min_type)s type.\"\n \"\\n`max_value` has %(max_type)s type.\"\n % {\n \"value_type\": type(value).__name__,\n \"min_type\": type(min_value).__name__,\n \"max_type\": type(max_value).__name__,\n }\n )\n\n # Ensure that the value matches arguments' types.\n all_ints = int_value and int_args\n all_floats = float_value and float_args\n\n if not all_ints and not all_floats:\n raise StreamlitAPIException(\n \"All numerical arguments must be of the same type.\"\n \"\\n`value` has %(value_type)s type.\"\n \"\\n`min_value` has %(min_type)s type.\"\n \"\\n`max_value` has %(max_type)s type.\"\n \"\\n`step` has %(step_type)s type.\"\n % {\n \"value_type\": type(value).__name__,\n \"min_type\": type(min_value).__name__,\n \"max_type\": type(max_value).__name__,\n \"step_type\": type(step).__name__,\n }\n )\n\n if (min_value and min_value > value) or (max_value and max_value < value):\n raise StreamlitAPIException(\n \"The default `value` of %(value)s \"\n \"must lie between the `min_value` of %(min)s \"\n \"and the `max_value` of %(max)s, inclusively.\"\n % {\"value\": value, \"min\": min_value, \"max\": max_value}\n )\n\n # Bounds checks. JSNumber produces human-readable exceptions that\n # we simply re-package as StreamlitAPIExceptions.\n try:\n if all_ints:\n if min_value is not None:\n JSNumber.validate_int_bounds(min_value, \"`min_value`\")\n if max_value is not None:\n JSNumber.validate_int_bounds(max_value, \"`max_value`\")\n if step is not None:\n JSNumber.validate_int_bounds(step, \"`step`\")\n JSNumber.validate_int_bounds(value, \"`value`\")\n else:\n if min_value is not None:\n JSNumber.validate_float_bounds(min_value, \"`min_value`\")\n if max_value is not None:\n JSNumber.validate_float_bounds(max_value, \"`max_value`\")\n if step is not None:\n JSNumber.validate_float_bounds(step, \"`step`\")\n JSNumber.validate_float_bounds(value, \"`value`\")\n except JSNumberBoundsException as e:\n raise StreamlitAPIException(str(e))\n\n number_input = element.number_input\n number_input.data_type = NumberInput.INT if all_ints else NumberInput.FLOAT\n number_input.label = label\n number_input.default = value\n\n if min_value is not None:\n number_input.min = min_value\n number_input.has_min = True\n\n if max_value is not None:\n number_input.max = max_value\n number_input.has_max = True\n\n if step is not None:\n number_input.step = step\n\n if format is not None:\n number_input.format = format\n\n ui_value = _get_widget_ui_value(\"number_input\", element, user_key=key)\n\n return ui_value if ui_value is not None else value\n\n @_with_element\n def progress(self, element, value):\n \"\"\"Display a progress bar.\n\n Parameters\n ----------\n value : int or float\n 0 <= value <= 100 for int\n\n 0.0 <= value <= 1.0 for float\n\n Example\n -------\n Here is an example of a progress bar increasing over time:\n\n >>> import time\n >>>\n >>> my_bar = st.progress(0)\n >>>\n >>> for percent_complete in range(100):\n ... time.sleep(0.1)\n ... my_bar.progress(percent_complete + 1)\n\n \"\"\"\n\n # TODO: standardize numerical type checking across st.* functions.\n\n if isinstance(value, float):\n if 0.0 <= value <= 1.0:\n element.progress.value = int(value * 100)\n else:\n raise StreamlitAPIException(\n \"Progress Value has invalid value [0.0, 1.0]: %f\" % value\n )\n\n elif isinstance(value, int):\n if 0 <= value <= 100:\n element.progress.value = value\n else:\n raise StreamlitAPIException(\n \"Progress Value has invalid value [0, 100]: %d\" % value\n )\n else:\n raise StreamlitAPIException(\n \"Progress Value has invalid type: %s\" % type(value).__name__\n )\n\n @_with_element\n def empty(self, element):\n \"\"\"Add a placeholder to the app.\n\n The placeholder can be filled any time by calling methods on the return\n value.\n\n Example\n -------\n >>> my_placeholder = st.empty()\n >>>\n >>> # Now replace the placeholder with some text:\n >>> my_placeholder.text(\"Hello world!\")\n >>>\n >>> # And replace the text with an image:\n >>> my_placeholder.image(my_image_bytes)\n\n \"\"\"\n # The protobuf needs something to be set\n element.empty.unused = True\n\n @_with_element\n def map(self, element, data=None, zoom=None, use_container_width=True):\n \"\"\"Display a map with points on it.\n\n This is a wrapper around st.pydeck_chart to quickly create scatterplot\n charts on top of a map, with auto-centering and auto-zoom.\n\n When using this command, we advise all users to use a personal Mapbox\n token. This ensures the map tiles used in this chart are more\n robust. You can do this with the mapbox.token config option.\n\n To get a token for yourself, create an account at\n https://mapbox.com. It's free! (for moderate usage levels) See\n https://docs.streamlit.io/en/latest/cli.html#view-all-config-options for more\n info on how to set config options.\n\n Parameters\n ----------\n data : pandas.DataFrame, pandas.Styler, numpy.ndarray, Iterable, dict,\n or None\n The data to be plotted. Must have columns called 'lat', 'lon',\n 'latitude', or 'longitude'.\n zoom : int\n Zoom level as specified in\n https://wiki.openstreetmap.org/wiki/Zoom_levels\n\n Example\n -------\n >>> import pandas as pd\n >>> import numpy as np\n >>>\n >>> df = pd.DataFrame(\n ... np.random.randn(1000, 2) / [50, 50] + [37.76, -122.4],\n ... columns=['lat', 'lon'])\n >>>\n >>> st.map(df)\n\n .. output::\n https://share.streamlit.io/0.53.0-SULT/index.html?id=9gTiomqPEbvHY2huTLoQtH\n height: 600px\n\n \"\"\"\n import streamlit.elements.map as streamlit_map\n\n element.deck_gl_json_chart.json = streamlit_map.to_deckgl_json(data, zoom)\n element.deck_gl_json_chart.use_container_width = use_container_width\n\n @_with_element\n def deck_gl_chart(self, element, spec=None, use_container_width=False, **kwargs):\n \"\"\"Draw a map chart using the Deck.GL library.\n\n This API closely follows Deck.GL's JavaScript API\n (https://deck.gl/#/documentation), with a few small adaptations and\n some syntax sugar.\n\n When using this command, we advise all users to use a personal Mapbox\n token. This ensures the map tiles used in this chart are more\n robust. You can do this with the mapbox.token config option.\n\n To get a token for yourself, create an account at\n https://mapbox.com. It's free! (for moderate usage levels) See\n https://docs.streamlit.io/en/latest/cli.html#view-all-config-options for more\n info on how to set config options.\n\n Parameters\n ----------\n\n spec : dict\n Keys in this dict can be:\n\n - Anything accepted by Deck.GL's top level element, such as\n \"viewport\", \"height\", \"width\".\n\n - \"layers\": a list of dicts containing information to build a new\n Deck.GL layer in the map. Each layer accepts the following keys:\n\n - \"data\" : DataFrame\n The data for the current layer.\n\n - \"type\" : str\n One of the Deck.GL layer types that are currently supported\n by Streamlit: ArcLayer, GridLayer, HexagonLayer, LineLayer,\n PointCloudLayer, ScatterplotLayer, ScreenGridLayer,\n TextLayer.\n\n - Plus anything accepted by that layer type. The exact keys that\n are accepted depend on the \"type\" field, above. For example, for\n ScatterplotLayer you can set fields like \"opacity\", \"filled\",\n \"stroked\", and so on.\n\n In addition, Deck.GL\"s documentation for ScatterplotLayer\n shows you can use a \"getRadius\" field to individually set\n the radius of each circle in the plot. So here you would\n set \"getRadius\": \"my_column\" where \"my_column\" is the name\n of the column containing the radius data.\n\n For things like \"getPosition\", which expect an array rather\n than a scalar value, we provide alternates that make the\n API simpler to use with dataframes:\n\n - Instead of \"getPosition\" : use \"getLatitude\" and\n \"getLongitude\".\n - Instead of \"getSourcePosition\" : use \"getLatitude\" and\n \"getLongitude\".\n - Instead of \"getTargetPosition\" : use \"getTargetLatitude\"\n and \"getTargetLongitude\".\n - Instead of \"getColor\" : use \"getColorR\", \"getColorG\",\n \"getColorB\", and (optionally) \"getColorA\", for red,\n green, blue and alpha.\n - Instead of \"getSourceColor\" : use the same as above.\n - Instead of \"getTargetColor\" : use \"getTargetColorR\", etc.\n\n use_container_width : bool\n If True, set the chart width to the column width. This takes\n precedence over the figure's native `width` value.\n\n **kwargs : any\n Same as spec, but as keywords. Keys are \"unflattened\" at the\n underscore characters. For example, foo_bar_baz=123 becomes\n foo={'bar': {'bar': 123}}.\n\n Example\n -------\n >>> st.deck_gl_chart(\n ... viewport={\n ... 'latitude': 37.76,\n ... 'longitude': -122.4,\n ... 'zoom': 11,\n ... 'pitch': 50,\n ... },\n ... layers=[{\n ... 'type': 'HexagonLayer',\n ... 'data': df,\n ... 'radius': 200,\n ... 'elevationScale': 4,\n ... 'elevationRange': [0, 1000],\n ... 'pickable': True,\n ... 'extruded': True,\n ... }, {\n ... 'type': 'ScatterplotLayer',\n ... 'data': df,\n ... }])\n ...\n\n .. output::\n https://share.streamlit.io/0.50.0-td2L/index.html?id=3GfRygWqxuqB5UitZLjz9i\n height: 530px\n\n \"\"\"\n\n suppress_deprecation_warning = config.get_option(\n \"global.suppressDeprecationWarnings\"\n )\n if not suppress_deprecation_warning:\n import streamlit as st\n\n st.warning(\n \"\"\"\n The `deck_gl_chart` widget is deprecated and will be removed on\n 2020-05-01. To render a map, you should use `st.pydeck_chart` widget.\n \"\"\"\n )\n\n import streamlit.elements.deck_gl as deck_gl\n\n deck_gl.marshall(element.deck_gl_chart, spec, use_container_width, **kwargs)\n\n @_with_element\n def pydeck_chart(self, element, pydeck_obj=None, use_container_width=False):\n \"\"\"Draw a chart using the PyDeck library.\n\n This supports 3D maps, point clouds, and more! More info about PyDeck\n at https://deckgl.readthedocs.io/en/latest/.\n\n These docs are also quite useful:\n\n - DeckGL docs: https://github.com/uber/deck.gl/tree/master/docs\n - DeckGL JSON docs: https://github.com/uber/deck.gl/tree/master/modules/json\n\n When using this command, we advise all users to use a personal Mapbox\n token. This ensures the map tiles used in this chart are more\n robust. You can do this with the mapbox.token config option.\n\n To get a token for yourself, create an account at\n https://mapbox.com. It's free! (for moderate usage levels) See\n https://docs.streamlit.io/en/latest/cli.html#view-all-config-options for more\n info on how to set config options.\n\n Parameters\n ----------\n spec: pydeck.Deck or None\n Object specifying the PyDeck chart to draw.\n\n Example\n -------\n Here's a chart using a HexagonLayer and a ScatterplotLayer on top of\n the light map style:\n\n >>> df = pd.DataFrame(\n ... np.random.randn(1000, 2) / [50, 50] + [37.76, -122.4],\n ... columns=['lat', 'lon'])\n >>>\n >>> st.pydeck_chart(pdk.Deck(\n ... map_style='mapbox://styles/mapbox/light-v9',\n ... initial_view_state=pdk.ViewState(\n ... latitude=37.76,\n ... longitude=-122.4,\n ... zoom=11,\n ... pitch=50,\n ... ),\n ... layers=[\n ... pdk.Layer(\n ... 'HexagonLayer',\n ... data=df,\n ... get_position='[lon, lat]',\n ... radius=200,\n ... elevation_scale=4,\n ... elevation_range=[0, 1000],\n ... pickable=True,\n ... extruded=True,\n ... ),\n ... pdk.Layer(\n ... 'ScatterplotLayer',\n ... data=df,\n ... get_position='[lon, lat]',\n ... get_color='[200, 30, 0, 160]',\n ... get_radius=200,\n ... ),\n ... ],\n ... ))\n\n .. output::\n https://share.streamlit.io/0.25.0-2JkNY/index.html?id=ASTdExBpJ1WxbGceneKN1i\n height: 530px\n\n \"\"\"\n import streamlit.elements.deck_gl_json_chart as deck_gl_json_chart\n\n deck_gl_json_chart.marshall(element, pydeck_obj, use_container_width)\n\n @_with_element\n def table(self, element, data=None):\n \"\"\"Display a static table.\n\n This differs from `st.dataframe` in that the table in this case is\n static: its entire contents are just laid out directly on the page.\n\n Parameters\n ----------\n data : pandas.DataFrame, pandas.Styler, numpy.ndarray, Iterable, dict,\n or None\n The table data.\n\n Example\n -------\n >>> df = pd.DataFrame(\n ... np.random.randn(10, 5),\n ... columns=('col %d' % i for i in range(5)))\n ...\n >>> st.table(df)\n\n .. output::\n https://share.streamlit.io/0.25.0-2JkNY/index.html?id=KfZvDMprL4JFKXbpjD3fpq\n height: 480px\n\n \"\"\"\n import streamlit.elements.data_frame_proto as data_frame_proto\n\n data_frame_proto.marshall_data_frame(data, element.table)\n\n def add_rows(self, data=None, **kwargs):\n \"\"\"Concatenate a dataframe to the bottom of the current one.\n\n Parameters\n ----------\n data : pandas.DataFrame, pandas.Styler, numpy.ndarray, Iterable, dict,\n or None\n Table to concat. Optional.\n\n **kwargs : pandas.DataFrame, numpy.ndarray, Iterable, dict, or None\n The named dataset to concat. Optional. You can only pass in 1\n dataset (including the one in the data parameter).\n\n Example\n -------\n >>> df1 = pd.DataFrame(\n ... np.random.randn(50, 20),\n ... columns=('col %d' % i for i in range(20)))\n ...\n >>> my_table = st.table(df1)\n >>>\n >>> df2 = pd.DataFrame(\n ... np.random.randn(50, 20),\n ... columns=('col %d' % i for i in range(20)))\n ...\n >>> my_table.add_rows(df2)\n >>> # Now the table shown in the Streamlit app contains the data for\n >>> # df1 followed by the data for df2.\n\n You can do the same thing with plots. For example, if you want to add\n more data to a line chart:\n\n >>> # Assuming df1 and df2 from the example above still exist...\n >>> my_chart = st.line_chart(df1)\n >>> my_chart.add_rows(df2)\n >>> # Now the chart shown in the Streamlit app contains the data for\n >>> # df1 followed by the data for df2.\n\n And for plots whose datasets are named, you can pass the data with a\n keyword argument where the key is the name:\n\n >>> my_chart = st.vega_lite_chart({\n ... 'mark': 'line',\n ... 'encoding': {'x': 'a', 'y': 'b'},\n ... 'datasets': {\n ... 'some_fancy_name': df1, # <-- named dataset\n ... },\n ... 'data': {'name': 'some_fancy_name'},\n ... }),\n >>> my_chart.add_rows(some_fancy_name=df2) # <-- name used as keyword\n\n \"\"\"\n if self._container is None or self._cursor is None:\n return self\n\n if not self._cursor.is_locked:\n raise StreamlitAPIException(\"Only existing elements can `add_rows`.\")\n\n # Accept syntax st.add_rows(df).\n if data is not None and len(kwargs) == 0:\n name = \"\"\n # Accept syntax st.add_rows(foo=df).\n elif len(kwargs) == 1:\n name, data = kwargs.popitem()\n # Raise error otherwise.\n else:\n raise StreamlitAPIException(\n \"Wrong number of arguments to add_rows().\"\n \"Command requires exactly one dataset\"\n )\n\n # When doing add_rows on an element that does not already have data\n # (for example, st.line_chart() without any args), call the original\n # st.foo() element with new data instead of doing an add_rows().\n if (\n self._cursor.props[\"delta_type\"] in DELTAS_TYPES_THAT_MELT_DATAFRAMES\n and self._cursor.props[\"last_index\"] is None\n ):\n # IMPORTANT: This assumes delta types and st method names always\n # match!\n st_method_name = self._cursor.props[\"delta_type\"]\n st_method = getattr(self, st_method_name)\n st_method(data, **kwargs)\n return\n\n data, self._cursor.props[\"last_index\"] = _maybe_melt_data_for_add_rows(\n data, self._cursor.props[\"delta_type\"], self._cursor.props[\"last_index\"]\n )\n\n msg = ForwardMsg_pb2.ForwardMsg()\n msg.metadata.parent_block.container = self._container\n msg.metadata.parent_block.path[:] = self._cursor.path\n msg.metadata.delta_id = self._cursor.index\n\n import streamlit.elements.data_frame_proto as data_frame_proto\n\n data_frame_proto.marshall_data_frame(data, msg.delta.add_rows.data)\n\n if name:\n msg.delta.add_rows.name = name\n msg.delta.add_rows.has_name = True\n\n _enqueue_message(msg)\n\n return self\n\n\ndef _maybe_melt_data_for_add_rows(data, delta_type, last_index):\n import pandas as pd\n import streamlit.elements.data_frame_proto as data_frame_proto\n\n # For some delta types we have to reshape the data structure\n # otherwise the input data and the actual data used\n # by vega_lite will be different and it will throw an error.\n if delta_type in DELTAS_TYPES_THAT_MELT_DATAFRAMES:\n if not isinstance(data, pd.DataFrame):\n data = type_util.convert_anything_to_df(data)\n\n if type(data.index) is pd.RangeIndex:\n old_step = _get_pandas_index_attr(data, \"step\")\n\n # We have to drop the predefined index\n data = data.reset_index(drop=True)\n\n old_stop = _get_pandas_index_attr(data, \"stop\")\n\n if old_step is None or old_stop is None:\n raise StreamlitAPIException(\n \"'RangeIndex' object has no attribute 'step'\"\n )\n\n start = last_index + old_step\n stop = last_index + old_step + old_stop\n\n data.index = pd.RangeIndex(start=start, stop=stop, step=old_step)\n last_index = stop - 1\n\n index_name = data.index.name\n if index_name is None:\n index_name = \"index\"\n\n data = pd.melt(data.reset_index(), id_vars=[index_name])\n\n return data, last_index\n\n\ndef _clean_text(text):\n return textwrap.dedent(str(text)).strip()\n\n\ndef _value_or_dg(value, dg):\n \"\"\"Return either value, or None, or dg.\n\n This is needed because Widgets have meaningful return values. This is\n unlike other elements, which always return None. Then we internally replace\n that None with a DeltaGenerator instance.\n\n However, sometimes a widget may want to return None, and in this case it\n should not be replaced by a DeltaGenerator. So we have a special NoValue\n object that gets replaced by None.\n\n \"\"\"\n if value is NoValue:\n return None\n if value is None:\n return dg\n return value\n\n\ndef _enqueue_message(msg):\n \"\"\"Enqueues a ForwardMsg proto to send to the app.\"\"\"\n ctx = get_report_ctx()\n\n if ctx is None:\n raise NoSessionContext()\n\n ctx.enqueue(msg)\n" ]
[ [ "pandas.RangeIndex" ] ]
bnusss/tensorflow_learning
[ "0db541bc1bf1255d9d548eb1e5256fb397be192e" ]
[ "08_midi_generate/test.py" ]
[ "import tensorflow as tf\nimport numpy as np\n \ndef body(x):\n a = tf.random_uniform(shape=[2, 2], dtype=tf.int32, maxval=100)\n b = tf.constant(np.array([[1, 2], [3, 4]]), dtype=tf.int32)\n c = a + b\n return tf.nn.relu(x + c)\n \ndef condition(x):\n return tf.reduce_sum(x) < 100\n \nx = tf.Variable(tf.constant(0, shape=[2, 2]))\n \nwith tf.Session():\n tf.global_variables_initializer().run()\n result = tf.while_loop(condition, body, [x])\n print(result.eval())\n" ]
[ [ "numpy.array", "tensorflow.nn.relu", "tensorflow.random_uniform", "tensorflow.Session", "tensorflow.while_loop", "tensorflow.constant", "tensorflow.reduce_sum", "tensorflow.global_variables_initializer" ] ]
Daniel1586/Initiative_recommender_system
[ "28d3be65266cba808fea2df5ed4405fae09cb549" ]
[ "tutorials/chapter_05_ProductNN/ProductNN.py" ]
[ "#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\nimport numpy as np\nfrom time import time\nimport tensorflow as tf\nfrom sklearn.base import BaseEstimator, TransformerMixin\nfrom sklearn.metrics import roc_auc_score\n\n\nclass ProductNN(BaseEstimator, TransformerMixin):\n def __init__(self, feature_size, field_size, embedding_size=8,\n deep_layers=None, deep_init_size=50, dropout_deep=None,\n deep_layer_activation=tf.nn.relu, epoch=10, batch_size=256,\n learning_rate=0.001, optimizer=\"adam\", batch_norm=0,\n batch_norm_decay=0.995, verbose=False, random_seed=2016,\n loss_type=\"logloss\", eval_metric=roc_auc_score,\n greater_is_better=True, use_inner=True):\n assert loss_type in [\"logloss\", \"mse\"],\\\n \"loss_type can be either 'logloss' for classification task or 'mse' for regression task\"\n if deep_layers is None:\n deep_layers = [32, 32]\n if dropout_deep is None:\n dropout_deep = [0.5, 0.5, 0.5]\n\n self.feature_size = feature_size\n self.field_size = field_size\n self.embedding_size = embedding_size\n\n self.deep_layers = deep_layers\n self.deep_init_size = deep_init_size\n self.dropout_dep = dropout_deep\n self.deep_layers_activation = deep_layer_activation\n\n self.epoch = epoch\n self.batch_size = batch_size\n self.learning_rate = learning_rate\n self.optimizer_type = optimizer\n\n self.batch_norm = batch_norm\n self.batch_norm_decay = batch_norm_decay\n\n self.verbose = verbose\n self.random_seed = random_seed\n self.loss_type = loss_type\n self.greater_is_better = greater_is_better\n self.train_result, self.valid_result = [], []\n\n self.use_inner = use_inner\n self._init_graph()\n\n def _init_graph(self):\n self.graph = tf.Graph()\n with self.graph.as_default():\n tf.set_random_seed(self.random_seed)\n\n # input data,模型输入\n self.feat_index = tf.placeholder(tf.int32, shape=[None, None],\n name='feat_index')\n self.feat_value = tf.placeholder(tf.float32, shape=[None, None],\n name='feat_value')\n self.label = tf.placeholder(tf.float32, shape=[None, 1], name='label')\n\n self.dropout_keep_deep = tf.placeholder(tf.float32, shape=[None],\n name='dropout_deep_deep')\n self.train_phase = tf.placeholder(tf.bool, name='train_phase')\n\n # weight initializing,权重初始化\n self.weights = self._initialize_weights()\n\n # model\n self.embeddings = tf.nn.embedding_lookup(self.weights['feature_embeddings'], self.feat_index)\n feat_value = tf.reshape(self.feat_value, shape=[-1, self.field_size, 1])\n self.embeddings = tf.multiply(self.embeddings, feat_value)\n\n # linear signal\n linear_output = []\n for i in range(self.deep_init_size):\n linear_output.append(tf.reshape(tf.reduce_sum(\n tf.multiply(self.embeddings, self.weights['product-linear'][i]),\n axis=[1, 2]), shape=(-1, 1)))\n\n self.lz = tf.concat(linear_output, axis=1)\n\n # quadratic signal\n quadratic_output = []\n if self.use_inner:\n for i in range(self.deep_init_size):\n theta = tf.multiply(\n self.embeddings, tf.reshape(self.weights['product-quadratic-inner'][i], (1, -1, 1)))\n quadratic_output.append(tf.reshape(\n tf.norm(tf.reduce_sum(theta, axis=1), axis=1), shape=(-1, 1)))\n else:\n embedding_sum = tf.reduce_sum(self.embeddings, axis=1)\n p = tf.matmul(tf.expand_dims(embedding_sum, 2), tf.expand_dims(embedding_sum, 1))\n for i in range(self.deep_init_size):\n theta = tf.multiply(p, tf.expand_dims(\n self.weights['product-quadratic-outer'][i], 0))\n quadratic_output.append(tf.reshape(\n tf.reduce_sum(theta, axis=[1, 2]), shape=(-1, 1)))\n\n self.lp = tf.concat(quadratic_output, axis=1)\n self.y_deep = tf.nn.relu(tf.add(tf.add(self.lz, self.lp), self.weights['product-bias']))\n self.y_deep = tf.nn.dropout(self.y_deep, self.dropout_keep_deep[0])\n\n # deep part\n for i in range(0, len(self.deep_layers)):\n self.y_deep = tf.add(tf.matmul(self.y_deep, self.weights[\"layer_%d\" % i]), self.weights[\"bias_%d\" % i])\n self.y_deep = self.deep_layers_activation(self.y_deep)\n self.y_deep = tf.nn.dropout(self.y_deep, self.dropout_keep_deep[i+1])\n\n self.out = tf.add(tf.matmul(self.y_deep, self.weights['output']), self.weights['output_bias'])\n\n # loss,代价函数\n if self.loss_type == \"logloss\":\n self.out = tf.nn.sigmoid(self.out)\n self.loss = tf.losses.log_loss(self.label, self.out)\n elif self.loss_type == \"mse\":\n self.loss = tf.nn.l2_loss(tf.subtract(self.label, self.out))\n\n # optimizer,优化器选择\n if self.optimizer_type == \"adam\":\n self.optimizer = tf.train.AdamOptimizer(learning_rate=self.learning_rate, beta1=0.9, beta2=0.999,\n epsilon=1e-8).minimize(self.loss)\n elif self.optimizer_type == \"adagrad\":\n self.optimizer = tf.train.AdagradOptimizer(learning_rate=self.learning_rate,\n initial_accumulator_value=1e-8).minimize(self.loss)\n elif self.optimizer_type == \"gd\":\n self.optimizer = tf.train.GradientDescentOptimizer(learning_rate=self.learning_rate).minimize(self.loss)\n elif self.optimizer_type == \"momentum\":\n self.optimizer = tf.train.MomentumOptimizer(learning_rate=self.learning_rate,\n momentum=0.95).minimize(self.loss)\n\n # init\n self.saver = tf.train.Saver()\n init = tf.global_variables_initializer()\n self.sess = tf.Session()\n self.sess.run(init)\n\n # number of params\n total_parameters = 0\n for variable in self.weights.values():\n shape = variable.get_shape()\n variable_parameters = 1\n for dim in shape:\n variable_parameters *= dim.value\n total_parameters += variable_parameters\n if self.verbose > 0:\n print(\"#params: %d\" % total_parameters)\n\n def _initialize_weights(self):\n weights = dict()\n\n # Sparse Features->Dense Embeddings weight initializing\n # one-hot编码后输入到Embedding的权重矩阵初始化\n weights['feature_embeddings'] = tf.Variable(tf.random_normal(\n [self.feature_size, self.embedding_size], 0.0, 0.01), name='feature_embeddings')\n weights['feature_bias'] = tf.Variable(tf.random_normal(\n [self.feature_size, 1], 0.0, 1.0), name='feature_bias')\n\n # Product Layers\n if self.use_inner:\n weights['product-quadratic-inner'] = tf.Variable(tf.random_normal(\n [self.deep_init_size, self.field_size], 0.0, 0.01))\n else:\n weights['product-quadratic-outer'] = tf.Variable(tf.random_normal(\n [self.deep_init_size, self.embedding_size, self.embedding_size], 0.0, 0.01))\n\n weights['product-linear'] = tf.Variable(tf.random_normal(\n [self.deep_init_size, self.field_size, self.embedding_size], 0.0, 0.01))\n weights['product-bias'] = tf.Variable(tf.random_normal([self.deep_init_size, ], 0, 0, 1.0))\n\n # Deep layers weight initializing,Xavier初始化\n num_layer = len(self.deep_layers)\n input_size = self.deep_init_size\n glorot = np.sqrt(2.0/(input_size + self.deep_layers[0])) # var(w)=2/(nin+nout)\n\n weights['layer_0'] = tf.Variable(np.random.normal(\n loc=0, scale=glorot, size=(input_size, self.deep_layers[0])), dtype=np.float32)\n weights['bias_0'] = tf.Variable(np.random.normal(\n loc=0, scale=glorot, size=(1, self.deep_layers[0])), dtype=np.float32)\n\n for i in range(1, num_layer):\n glorot = np.sqrt(2.0 / (self.deep_layers[i - 1] + self.deep_layers[i]))\n weights[\"layer_%d\" % i] = tf.Variable(np.random.normal(\n loc=0, scale=glorot, size=(self.deep_layers[i - 1], self.deep_layers[i])),\n dtype=np.float32) # layers[i-1] * layers[i]\n weights[\"bias_%d\" % i] = tf.Variable(np.random.normal(\n loc=0, scale=glorot, size=(1, self.deep_layers[i])),\n dtype=np.float32) # 1 * layer[i]\n\n # final concat projection layer\n glorot = np.sqrt(2.0/(input_size + 1))\n weights['output'] = tf.Variable(np.random.normal(\n loc=0, scale=glorot, size=(self.deep_layers[-1], 1)), dtype=np.float32)\n weights['output_bias'] = tf.Variable(tf.constant(0.01), dtype=np.float32)\n\n return weights\n\n # noinspection PyMethodMayBeStatic\n def get_batch(self, xi, xv, y, batch_size, index):\n start = index * batch_size\n end = (index + 1) * batch_size\n end = end if end < len(y) else len(y)\n return xi[start:end], xv[start:end], [[y_] for y_ in y[start:end]]\n\n # noinspection PyMethodMayBeStatic\n def shuffle_in_unison_scary(self, a, b, c):\n rng_state = np.random.get_state()\n np.random.shuffle(a)\n np.random.set_state(rng_state)\n np.random.shuffle(b)\n np.random.set_state(rng_state)\n np.random.shuffle(c)\n\n def predict(self, xi, xv):\n feed_dict = {self.feat_index: xi,\n self.feat_value: xv,\n self.dropout_keep_deep: [1.0] * len(self.dropout_dep),\n self.train_phase: True}\n out = self.sess.run(self.out, feed_dict=feed_dict)\n\n return out\n\n def evaluate(self, xi, xv, y):\n y = [[y_] for y_ in y]\n feed_dict = {self.feat_index: xi,\n self.feat_value: xv,\n self.label: y,\n self.dropout_keep_deep: [1.0] * len(self.dropout_dep),\n self.train_phase: True}\n loss = self.sess.run([self.loss], feed_dict=feed_dict)\n\n return loss\n\n def fit_on_batch(self, xi, xv, y):\n feed_dict = {self.feat_index: xi,\n self.feat_value: xv,\n self.label: y,\n self.dropout_keep_deep: self.dropout_dep,\n self.train_phase: True}\n loss, opt = self.sess.run([self.loss, self.optimizer], feed_dict=feed_dict)\n return loss\n\n def fit(self, xi_train, xv_train, y_train, xi_valid=None, xv_valid=None,\n y_valid=None, early_stopping=False, refit=False):\n \"\"\"\n :param xi_train: [[ind1_1, ind1_2, ...], ..., [indi_1, indi_2, ..., indi_j, ...], ...]\n indi_j is the feature index of feature field j of sample i in the training set\n :param xv_train: [[val1_1, val1_2, ...], ..., [vali_1, vali_2, ..., vali_j, ...], ...]\n vali_j is the feature value of feature field j of sample i in the training set\n vali_j can be either binary (1/0, for binary/categorical features)\n :param y_train: label of each sample in the training set\n :param xi_valid: list of list of feature indices of each sample in the validation set\n :param xv_valid: list of list of feature values of each sample in the validation set\n :param y_valid: label of each sample in the validation set\n :param early_stopping: perform early stopping or not\n :param refit: refit the model on the train+valid dataset or not\n :return: None\n \"\"\"\n\n has_valid = xv_valid is not None\n for epoch in range(self.epoch):\n t1 = time()\n # shuffle the dataset,打乱dataset顺序\n self.shuffle_in_unison_scary(xi_train, xv_train, y_train)\n total_batch = int(len(y_train) / self.batch_size)\n # get batch data and fit them,获得batch数据并fit\n for i in range(total_batch):\n xi_batch, xv_batch, y_batch = self.get_batch(xi_train, xv_train,\n y_train, self.batch_size, i)\n self.fit_on_batch(xi_batch, xv_batch, y_batch)\n\n # evaluate training and validation dataset,评价train/valid dataset\n train_result = self.evaluate(xi_train, xv_train, y_train)\n self.train_result.append(train_result[0])\n\n if has_valid:\n valid_result = self.evaluate(xi_valid, xv_valid, y_valid)\n self.valid_result.append(valid_result[0])\n if self.verbose > 0 and epoch % self.verbose == 0:\n if has_valid:\n print(\"[%d] train-loss=%.4f, valid-loss=%.4f [%.1f s]\"\n % (epoch + 1, train_result[0], valid_result[0], time() - t1))\n else:\n print(\"[%d] train-loss=%.4f [%.1f s]\"\n % (epoch + 1, train_result[0], time() - t1))\n if has_valid and early_stopping and self.training_termination(self.valid_result):\n break\n\n # fit a few more epoch on train+valid until result reaches the best_train_score\n if has_valid and refit:\n if self.greater_is_better:\n best_valid_score = max(self.valid_result)\n else:\n best_valid_score = min(self.valid_result)\n\n best_epoch = self.valid_result.index(best_valid_score)\n best_train_score = self.train_result[best_epoch]\n xi_train = xi_train + xi_valid\n xv_train = xv_train + xv_valid\n y_train = y_train + y_valid\n for epoch in range(100):\n self.shuffle_in_unison_scary(xi_train, xv_train, y_train)\n total_batch = int(len(y_train) / self.batch_size)\n for i in range(total_batch):\n xi_batch, xv_batch, y_batch = self.get_batch(xi_train, xv_train,\n y_train, self.batch_size, i)\n self.fit_on_batch(xi_batch, xv_batch, y_batch)\n # check the model performance\n train_result = self.evaluate(xi_train, xv_train, y_train)\n ckp1 = abs(train_result - best_train_score) < 0.001\n ckp2 = self.greater_is_better and train_result > best_train_score\n ckp3 = (not self.greater_is_better) and train_result < best_train_score\n if ckp1 or ckp2 or ckp3:\n break\n\n def training_termination(self, valid_result):\n if len(valid_result) > 5:\n if self.greater_is_better:\n if valid_result[-1] < valid_result[-2] < valid_result[-3] < valid_result[-4] < valid_result[-5]:\n return True\n else:\n if valid_result[-1] > valid_result[-2] > valid_result[-3] > valid_result[-4] > valid_result[-5]:\n return True\n return False\n" ]
[ [ "tensorflow.matmul", "tensorflow.train.AdagradOptimizer", "tensorflow.reshape", "tensorflow.losses.log_loss", "tensorflow.nn.embedding_lookup", "tensorflow.global_variables_initializer", "tensorflow.random_normal", "tensorflow.train.GradientDescentOptimizer", "tensorflow.set_random_seed", "numpy.random.normal", "tensorflow.concat", "tensorflow.subtract", "tensorflow.train.Saver", "tensorflow.constant", "numpy.sqrt", "tensorflow.nn.sigmoid", "tensorflow.add", "tensorflow.nn.dropout", "tensorflow.train.AdamOptimizer", "tensorflow.expand_dims", "tensorflow.Session", "numpy.random.shuffle", "tensorflow.placeholder", "tensorflow.reduce_sum", "numpy.random.set_state", "tensorflow.multiply", "tensorflow.Graph", "tensorflow.train.MomentumOptimizer", "numpy.random.get_state" ] ]
javierconcha/hypernets_tools
[ "bf15e343841d31262a402c0ad628e0f5e8c30441" ]
[ "hypernets/reader/spectra.py" ]
[ "from hypernets.reader.spectrum import Spectrum\nimport matplotlib.pyplot as plt\nfrom matplotlib.widgets import Button\n\n\n\ndef show_interactive_plots(spectra):\n\n # Next Button\n axnext = plt.axes([0.81, 0.05, 0.1, 0.075])\n bnext = Button(axnext, 'Next')\n bnext.on_clicked(spectra.next_spectrum)\n\n # Previous Button\n axprev = plt.axes([0.12, 0.05, 0.1, 0.075])\n bprev = Button(axprev, 'Previous')\n bprev.on_clicked(spectra.prev_spectrum)\n\n plt.show()\n\n\nclass Spectra(object):\n def __init__(self, filename, figure=None, axes=None):\n\n self.figure = figure\n self.axes = axes\n\n self.index = 0\n self.spectra_list = list()\n\n # Open the file and create a list of Spectrum\n with open(filename, 'rb') as fd:\n spectra_file = fd.read()\n\n index = 0 \n while index < len(spectra_file):\n current_spectrum = Spectrum(spectra_file[index:], verbose=True)\n self.spectra_list.append(current_spectrum)\n index += current_spectrum.total\n\n print(f\"{len(self.spectra_list)} spectra readed.\")\n\n self.update()\n\n def next_spectrum(self, event):\n self.index = (self.index + 1) % len(self.spectra_list)\n self.update()\n\n def prev_spectrum(self, event):\n self.index = (self.index - 1) % len(self.spectra_list)\n self.update()\n\n def update(self):\n self.current_spectrum = self.spectra_list[self.index]\n # print(self.current_spectrum)\n\n if self.axes is not None:\n self.axes.clear()\n self.axes.plot(range(len(self.current_spectrum.counts)), \n self.current_spectrum.counts)\n\n\n spec_info = Spectrum.read_spectrum_info(self.current_spectrum.spec_type)\n\n self.axes.set_title(f\"Spectrum {self.index+1}/{len(self.spectra_list)}\\n\"\n f\"{spec_info[0]} --> {spec_info[1]}\")\n\n # self.axes.set_xlabel(\"\")\n self.axes.set_ylabel(\"Raw Counts\")\n\n if self.figure is not None:\n self.figure.canvas.draw()\n" ]
[ [ "matplotlib.pyplot.show", "matplotlib.widgets.Button", "matplotlib.pyplot.axes" ] ]
hellpanderrr/twint
[ "05e5995dcc81e987e1cbb7b92b7cd707d5d73e23" ]
[ "twint/storage/panda.py" ]
[ "import datetime, pandas as pd, warnings\nfrom time import strftime, localtime\nfrom twint.tweet import Tweet_formats\n\nTweets_df = None\nFollow_df = None\nUser_df = None\n\n_object_blocks = {\n \"tweet\": [],\n \"user\": [],\n \"following\": [],\n \"followers\": []\n}\n\nweekdays = {\n \"Monday\": 1,\n \"Tuesday\": 2,\n \"Wednesday\": 3,\n \"Thursday\": 4,\n \"Friday\": 5,\n \"Saturday\": 6,\n \"Sunday\": 7,\n }\n\n_type = \"\"\n\ndef _concat(df, _type):\n if df is None:\n df = pd.DataFrame(_object_blocks[_type])\n else:\n _df = pd.DataFrame(_object_blocks[_type])\n df = pd.concat([df, _df], sort=True)\n return df\n\ndef _autoget(_type):\n global Tweets_df\n global Follow_df\n global User_df\n\n if _type == \"tweet\":\n Tweets_df = _concat(Tweets_df, _type)\n elif _type == \"followers\" or _type == \"following\":\n Follow_df = _concat(Follow_df, _type)\n elif _type == \"user\":\n User_df = _concat(User_df, _type)\n else:\n error(\"[x] Wrong type of object passed\")\n\n\ndef update(object, config):\n global _type\n\n #try:\n # _type = ((object.__class__.__name__ == \"tweet\")*\"tweet\" +\n # (object.__class__.__name__ == \"user\")*\"user\")\n #except AttributeError:\n # _type = config.Following*\"following\" + config.Followers*\"followers\"\n if object.__class__.__name__ == \"tweet\":\n _type = \"tweet\"\n elif object.__class__.__name__ == \"user\":\n _type = \"user\"\n elif object.__class__.__name__ == \"dict\":\n _type = config.Following*\"following\" + config.Followers*\"followers\"\n\n if _type == \"tweet\":\n Tweet = object\n if 'Naoaianeia a?aiy (ceia)' in Tweet.datetime:\n Tweet.datetime = Tweet.datetime[:19]\n try:\n datetime_ms = datetime.datetime.strptime(Tweet.datetime, Tweet_formats['datetime']).timestamp() * 1000\n except:\n datetime_ms = datetime.datetime.strptime(Tweet.datetime, '%Y-%m-%d %H:%M:%S').timestamp() * 1000\n day = weekdays[strftime(\"%A\", localtime(datetime_ms/1000))]\n dt = f\"{object.datestamp} {object.timestamp}\"\n _data = {\n \"id\": str(Tweet.id),\n \"conversation_id\": Tweet.conversation_id,\n \"created_at\": datetime_ms,\n \"date\": dt,\n \"timezone\": Tweet.timezone,\n \"place\": Tweet.place,\n \"tweet\": Tweet.tweet,\n \"language\": Tweet.lang,\n \"hashtags\": Tweet.hashtags,\n \"cashtags\": Tweet.cashtags,\n \"user_id\": Tweet.user_id,\n \"user_id_str\": Tweet.user_id_str,\n \"username\": Tweet.username,\n \"name\": Tweet.name,\n \"day\": day,\n \"hour\": strftime(\"%H\", localtime(datetime_ms/1000)),\n \"link\": Tweet.link,\n \"urls\": Tweet.urls,\n \"photos\": Tweet.photos,\n \"video\": Tweet.video,\n \"thumbnail\": Tweet.thumbnail,\n \"retweet\": Tweet.retweet,\n \"nlikes\": int(Tweet.likes_count),\n \"nreplies\": int(Tweet.replies_count),\n \"nretweets\": int(Tweet.retweets_count),\n \"quote_url\": Tweet.quote_url,\n \"search\": str(config.Search),\n \"near\": Tweet.near,\n \"geo\": Tweet.geo,\n \"source\": Tweet.source,\n \"user_rt_id\": Tweet.user_rt_id,\n \"user_rt\": Tweet.user_rt,\n \"retweet_id\": Tweet.retweet_id,\n \"reply_to\": Tweet.reply_to,\n \"retweet_date\": Tweet.retweet_date,\n \"translate\": Tweet.translate,\n \"trans_src\": Tweet.trans_src,\n \"trans_dest\": Tweet.trans_dest\n }\n _object_blocks[_type].append(_data)\n elif _type == \"user\":\n user = object\n try:\n background_image = user.background_image\n except:\n background_image = \"\"\n _data = {\n \"id\": user.id,\n \"name\": user.name,\n \"username\": user.username,\n \"bio\": user.bio,\n \"url\": user.url,\n \"join_datetime\": user.join_date + \" \" + user.join_time,\n \"join_date\": user.join_date,\n \"join_time\": user.join_time,\n \"tweets\": user.tweets,\n \"location\": user.location,\n \"following\": user.following,\n \"followers\": user.followers,\n \"likes\": user.likes,\n \"media\": user.media_count,\n \"private\": user.is_private,\n \"verified\": user.is_verified,\n \"avatar\": user.avatar,\n \"background_image\": background_image,\n }\n _object_blocks[_type].append(_data)\n elif _type == \"followers\" or _type == \"following\":\n _data = {\n config.Following*\"following\" + config.Followers*\"followers\" :\n {config.Username: object[_type]}\n }\n _object_blocks[_type] = _data\n else:\n print(\"Wrong type of object passed!\")\n\n\ndef clean():\n global Tweets_df\n global Follow_df\n global User_df\n _object_blocks[\"tweet\"].clear()\n _object_blocks[\"following\"].clear()\n _object_blocks[\"followers\"].clear()\n _object_blocks[\"user\"].clear()\n Tweets_df = None\n Follow_df = None\n User_df = None\n\ndef save(_filename, _dataframe, **options):\n if options.get(\"dataname\"):\n _dataname = options.get(\"dataname\")\n else:\n _dataname = \"twint\"\n\n if not options.get(\"type\"):\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\")\n _store = pd.HDFStore(_filename + \".h5\")\n _store[_dataname] = _dataframe\n _store.close()\n elif options.get(\"type\") == \"Pickle\":\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\")\n _dataframe.to_pickle(_filename + \".pkl\")\n else:\n print(\"\"\"Please specify: filename, DataFrame, DataFrame name and type\n (HDF5, default, or Pickle)\"\"\")\n\ndef read(_filename, **options):\n if not options.get(\"dataname\"):\n _dataname = \"twint\"\n else:\n _dataname = options.get(\"dataname\")\n\n if not options.get(\"type\"):\n _store = pd.HDFStore(_filename + \".h5\")\n _df = _store[_dataname]\n return _df\n elif options.get(\"type\") == \"Pickle\":\n _df = pd.read_pickle(_filename + \".pkl\")\n return _df\n else:\n print(\"\"\"Please specify: DataFrame, DataFrame name (twint as default),\n filename and type (HDF5, default, or Pickle\"\"\")\n" ]
[ [ "pandas.HDFStore", "pandas.DataFrame", "pandas.read_pickle", "pandas.concat" ] ]
ydiller/BalancedGroupSoftmax
[ "6fecf9fbb8ed1f54540787188e212ab39cd2b501" ]
[ "tools/test_lvis.py" ]
[ "import argparse\nimport os\nimport os.path as osp\nimport shutil\nimport tempfile\nimport json\nimport pdb\nimport numpy as np\nimport pickle\nimport pandas as pd\nimport mmcv\nimport torch\nimport torch.distributed as dist\nfrom mmcv.parallel import MMDataParallel, MMDistributedDataParallel\nfrom mmcv.runner import get_dist_info, load_checkpoint\n\nfrom mmdet.apis import init_dist\nfrom mmdet.core import lvis_eval, results2json, wrap_fp16_model\nfrom mmdet.datasets import build_dataloader, build_dataset\nfrom mmdet.models import build_detector\nfrom mmdet.core import build_assigner\nfrom utils import filter_logits_by_gt\n\nTEMP_DATASET_SIZE = 5000\n\ndef single_gpu_test(model, data_loader, show=False, cfg=None, index=0, img_meta=None):\n model.eval()\n results = []\n logits_list = []\n dataset = data_loader.dataset\n prog_bar = mmcv.ProgressBar(len(dataset))\n class_instances = pickle.load(open('train_instances_list.p', 'rb'))\n normalized_classes = np.zeros(1231)\n for i, c in enumerate(class_instances):\n if c:\n normalized_classes[i] = 1/np.sqrt(c)\n for i, data in enumerate(data_loader):\n # if i < TEMP_DATASET_SIZE*index:\n # continue\n if i >= TEMP_DATASET_SIZE*(index+1): # temporary condition for testing\n break\n with torch.no_grad():\n bbox_results, det_bboxes, det_labels, scores = model(return_loss=False, rescale=not show, **data, img_id=i, norm_cls=normalized_classes)\n det_bboxes = det_bboxes.detach().cpu()\n det_labels = det_labels.detach().cpu()\n scores = scores.detach().cpu()\n # save original logits:\n # filename = data['img_meta'][0].data[0][0]['filename'].split('/')[-1] # get the file name, e.g: '000000397133.jpg'\n # with open(f'test_logits/logits_per_img/{filename}.p', 'wb') as outfile:\n # pickle.dump(scores, outfile)\n results.append(bbox_results)\n logits_list.append((det_bboxes, det_labels, scores))\n\n if show:\n model.module.show_result(data, bbox_results)\n\n batch_size = data['img'][0].size(0)\n for _ in range(batch_size):\n prog_bar.update()\n return results, logits_list # return also class. logits and labels\n\ndef multi_gpu_test(model, data_loader, tmpdir=None):\n model.eval()\n results = []\n dataset = data_loader.dataset\n rank, world_size = get_dist_info()\n if rank == 0:\n prog_bar = mmcv.ProgressBar(len(dataset))\n for i, data in enumerate(data_loader):\n with torch.no_grad():\n result = model(return_loss=False, rescale=True, **data)\n results.append(result)\n\n if rank == 0:\n batch_size = data['img'][0].size(0)\n for _ in range(batch_size * world_size):\n prog_bar.update()\n\n # collect results from all ranks\n results = collect_results(results, len(dataset), tmpdir)\n\n return results\n\n\ndef collect_results(result_part, size, tmpdir=None):\n rank, world_size = get_dist_info()\n # create a tmp dir if it is not specified\n if tmpdir is None:\n MAX_LEN = 512\n # 32 is whitespace\n dir_tensor = torch.full((MAX_LEN, ),\n 32,\n dtype=torch.uint8,\n device='cuda')\n if rank == 0:\n tmpdir = tempfile.mkdtemp()\n tmpdir = torch.tensor(\n bytearray(tmpdir.encode()), dtype=torch.uint8, device='cuda')\n dir_tensor[:len(tmpdir)] = tmpdir\n dist.broadcast(dir_tensor, 0)\n tmpdir = dir_tensor.cpu().numpy().tobytes().decode().rstrip()\n else:\n mmcv.mkdir_or_exist(tmpdir)\n # dump the part result to the dir\n mmcv.dump(result_part, osp.join(tmpdir, 'part_{}.pkl'.format(rank)))\n dist.barrier()\n # collect all parts\n if rank != 0:\n return None\n else:\n # load results of all parts from tmp dir\n part_list = []\n for i in range(world_size):\n part_file = osp.join(tmpdir, 'part_{}.pkl'.format(i))\n part_list.append(mmcv.load(part_file))\n # sort the results\n ordered_results = []\n for res in zip(*part_list):\n ordered_results.extend(list(res))\n # the dataloader may pad some samples\n ordered_results = ordered_results[:size]\n # remove tmp dir\n shutil.rmtree(tmpdir)\n return ordered_results\n\n\ndef parse_args():\n parser = argparse.ArgumentParser(description='MMDet test detector')\n parser.add_argument('config', help='test config file path')\n parser.add_argument('checkpoint', help='checkpoint file')\n parser.add_argument('--out', help='output result file')\n parser.add_argument(\n '--json_out',\n help='output result file name without extension',\n type=str)\n parser.add_argument(\n '--eval',\n type=str,\n nargs='+',\n choices=['proposal', 'proposal_fast', 'bbox', 'segm', 'keypoints'],\n help='eval types')\n parser.add_argument('--show', action='store_true', help='show results')\n parser.add_argument('--tmpdir', help='tmp dir for writing some results')\n parser.add_argument(\n '--launcher',\n choices=['none', 'pytorch', 'slurm', 'mpi'],\n default='none',\n help='job launcher')\n parser.add_argument('--local_rank', type=int, default=0)\n parser.add_argument('--tau', type=float, default=0.0)\n parser.add_argument('--data_index', type=int, default=0)\n args = parser.parse_args()\n if 'LOCAL_RANK' not in os.environ:\n os.environ['LOCAL_RANK'] = str(args.local_rank)\n return args\n\ndef reweight_cls(model, tauuu):\n\n if tauuu == 0:\n return model\n\n model_dict = model.state_dict()\n\n def pnorm(weights, tau):\n normB = torch.norm(weights, 2, 1)\n ws = weights.clone()\n\n for i in range(0, weights.shape[0]):\n ws[i] = ws[i] / torch.pow(normB[i], tau)\n\n return ws\n\n reweight_set = ['bbox_head.fc_cls.weight']\n tau = tauuu\n for k in reweight_set:\n weight = model_dict[k] # ([1231, 1024])\n weight = pnorm(weight, tau)\n model_dict[k].copy_(weight)\n print('Reweight param {:<30} with tau={}'.format(k, tau))\n\n return model\n\n\ndef logits_process(logits):\n \"\"\"\n Get the logits as a tuple of softmax logits ,bounding boxes and labels.\n Output: to matrices:\n logits_mat in size (dataset, 300, 1231) - top 300 logits for each image.\n bboxes_mat in size (dataset, 300, 4) - top 300 bboxes for each image.\n labels_mat in size (dataset, 300, 1) - corresponding labels. 300 for each image.\n \"\"\"\n # all_bboxes_logits = []\n # for image in logits:\n # image_bboxes_logits = []\n # for i, bbox in enumerate(image[0]):\n # bboxes_logits_dict = dict() # image[0] = tensor including 300 bboxes\n # index = int(bbox[5].item()) # bbox[6] specifies the relevant line in the logits matrix\n # logits_vector = image[1][index]\n # bboxes_logits_dict['bbox'] = bbox[:4]\n # bboxes_logits_dict['score'] = bbox[4]\n # bboxes_logits_dict['logits'] = logits_vector\n # image_bboxes_logits.append(bboxes_logits_dict)\n # all_bboxes_logits.append(image_bboxes_logits)\n\n\n # for idx in range(len(dataset)):\n # img_id = dataset.img_ids[idx]\n\n logits_mat = np.zeros((TEMP_DATASET_SIZE, 300, 1231))\n bboxes_mat = np.zeros((TEMP_DATASET_SIZE, 300, 4))\n labels_mat = np.zeros((TEMP_DATASET_SIZE, 300))\n proposal_num = np.zeros((TEMP_DATASET_SIZE, 300, 1))\n for i, image in enumerate(logits):\n for j, bbox in enumerate(image[0]): # image[0] = tensor including 300 bboxes\n # bboxes_logits_dict = dict()\n index = int(bbox[5].item()) # bbox[5] specifies the relevant line in the logits matrix\n logits_vector = image[2][index] # image[2] includes the scores\n # bbox_arr = np.array(bbox[:4])\n bboxes_mat[i][j][:] = bbox[:4]\n logits_mat[i][j] = np.array(logits_vector)\n # added this to compute proposal numbers\n proposal_num[i][j] = bbox[-1]\n labels_mat[i] = image[1] # image[1] includes the labels\n\n return bboxes_mat, labels_mat, logits_mat, proposal_num\n\n\ndef main():\n args = parse_args()\n os.environ[\"CUDA_VISIBLE_DEVICES\"] = str(args.data_index % 2)\n assert args.out or args.show or args.json_out, \\\n ('Please specify at least one operation (save or show the results) '\n 'with the argument \"--out\" or \"--show\" or \"--json_out\"')\n\n if args.out is not None and not args.out.endswith(('.pkl', '.pickle')):\n raise ValueError('The output file must be a pkl file.')\n\n if args.json_out is not None and args.json_out.endswith('.json'):\n args.json_out = args.json_out[:-5]\n\n cfg = mmcv.Config.fromfile(args.config)\n # set cudnn_benchmark\n if cfg.get('cudnn_benchmark', False):\n torch.backends.cudnn.benchmark = True\n cfg.model.pretrained = None\n cfg.data.test.test_mode = True\n\n # init distributed env first, since logger depends on the dist info.\n if args.launcher == 'none':\n distributed = False\n else:\n distributed = True\n init_dist(args.launcher, **cfg.dist_params)\n\n # build the dataloader\n # TODO: support multiple images per gpu (only minor changes are needed)\n dataset = build_dataset(cfg.data.test) # original - test | changed to test_with_train_data\n data_loader = build_dataloader(\n dataset,\n imgs_per_gpu=1,\n workers_per_gpu=0, # cfg.data.workers_per_gpu\n dist=distributed,\n shuffle=False)\n\n # save gt boxes and labels for learning nms\n # for i, data in enumerate(data_loader):\n # img_id = dataset.img_infos[i]['id']\n # gt = dataset.get_ann_info(i)\n # gt_boxes = gt['bboxes']\n # gt_labels = gt['labels']\n # filename = f'test_logits/learning_nms_data/{i}/gt_boxes.p' # file name for new directory\n # os.makedirs(os.path.dirname(filename), exist_ok=True)\n # with open(f'test_logits/learning_nms_data/{i}/gt_boxes.p', 'wb') as outfile: # possible to include img_id\n # pickle.dump(gt_boxes, outfile)\n # with open(f'test_logits/learning_nms_data/{i}/gt_labels.p', 'wb') as outfile:\n # pickle.dump(gt_boxes, outfile)\n #\n # # filename = dataset.img_infos[i]['filename']\n # # with open(f'test_gt/{filename}.p', 'wb') as outfile:\n # # pickle.dump(gt_labels, outfile)\n\n # save gt instances per class\n # instances_list = np.zeros(1231)\n # for i, data in enumerate(data_loader): # original script in test_lvis_tnorm.py\n # gt = dataset.get_ann_info(i)\n # print(i)\n # for label in gt['labels']:\n # instances_list[label] += 1\n # with open('train_instances_list.p', 'wb') as outfile:\n # pickle.dump(instances_list, outfile)\n\n # build the model and load checkpoint\n model = build_detector(cfg.model, train_cfg=None, test_cfg=cfg.test_cfg)\n fp16_cfg = cfg.get('fp16', None)\n if fp16_cfg is not None:\n wrap_fp16_model(model)\n checkpoint = load_checkpoint(model, args.checkpoint, map_location='cpu')\n # old versions did not save class info in checkpoints, this walkaround is\n # for backward compatibility\n if 'CLASSES' in checkpoint['meta']:\n model.CLASSES = checkpoint['meta']['CLASSES']\n else:\n model.CLASSES = dataset.CLASSES\n\n model = reweight_cls(model, args.tau)\n\n if not distributed:\n model = MMDataParallel(model, device_ids=[0])\n outputs, logits = single_gpu_test(model, data_loader, args.show, cfg, args.data_index)\n else:\n model = MMDistributedDataParallel(model.cuda())\n outputs = multi_gpu_test(model, data_loader, args.tmpdir)\n\n # save outputs as csv:\n # pd.DataFrame(outputs).to_csv(\"original_outputs_full.csv\")\n # preprocess logits and save them on json file\n # otp = np.asarray(outputs) # temp\n # df = pd.DataFrame(otp)\n # df.to_csv('otp.csv', index=False)\n\n bboxes_mat, labels_mat, logits_mat, proposal_num = logits_process(logits)\n\n # save labels, boxes and logits\n # with open('test_logits/dragon_test_bboxes_mat.p', 'wb') as outfile:\n # pickle.dump(bboxes_mat, outfile)\n # with open('test_logits/dragon_labels_mat.p', 'wb') as outfile:\n # pickle.dump(labels_mat, outfile)\n # with open('logits_mat1.p', 'wb') as outfile:\n # pickle.dump(logits_mat[:1000], outfile)\n # with open('logits_mat2.p', 'wb') as outfile:\n # pickle.dump(logits_mat[1000:2000], outfile)\n # with open('logits_mat3.p', 'wb') as outfile:\n # pickle.dump(logits_mat[2000:3000], outfile)\n # with open('logits_mat4.p', 'wb') as outfile:\n # pickle.dump(logits_mat[3000:4000], outfile)\n # with open('logits_mat5.p', 'wb') as outfile:\n # pickle.dump(logits_mat[4000:], outfile)\n\n # filter detections by iou with gt (for dragon training)\n gt_list = []\n results_per_image = []\n for i, data in enumerate(data_loader): # original script in test_lvis_tnorm.py\n # if i < TEMP_DATASET_SIZE*args.data_index:\n # continue\n if i >= TEMP_DATASET_SIZE: # temporary condition for testing\n break\n print(i)\n img_id = dataset.img_infos[i]['id']\n gt = dataset.get_ann_info(i)\n gt_dict = dict()\n gt_dict['id'] = img_id\n gt_dict['bboxes'] = gt['bboxes']\n gt_dict['labels'] = gt['labels']\n gt_list.append(gt_dict)\n # filter logits according to equivalent ground truth.\n # after filtering, for each image we get a list in length of classes and detections belongs to this class.\n results = filter_logits_by_gt(bboxes_mat[i], logits_mat[i], gt_list[i], proposal_num[i], i)\n results_per_image.append(results)\n with open(f'dragon_bboxes_logits_map24.p', 'wb') as outfile:\n pickle.dump(results_per_image, outfile)\n print('saved')\n\n # evaluation:\n rank, _ = get_dist_info()\n if args.out and rank == 0:\n print('\\nwriting results to {}'.format(args.out))\n mmcv.dump(outputs, args.out)\n eval_types = args.eval\n if eval_types:\n print('Starting evaluate {}'.format(' and '.join(eval_types)))\n if eval_types == ['proposal_fast']:\n result_file = args.out\n lvis_eval(result_file, eval_types, dataset.lvis)\n else:\n if not isinstance(outputs[0], dict):\n result_files = results2json(dataset, outputs, args.out, args.data_index)\n lvis_eval(result_files, eval_types, dataset.lvis, max_dets=300)\n else:\n for name in outputs[0]:\n print('\\nEvaluating {}'.format(name))\n outputs_ = [out[name] for out in outputs]\n result_file = args.out + '.{}'.format(name)\n result_files = results2json(dataset, outputs_,\n result_file)\n lvis_eval(result_files, eval_types, dataset.lvis)\n\n\n\n # Save predictions in the COCO json format\n if args.json_out and rank == 0:\n if not isinstance(outputs[0], dict):\n results2json(dataset, outputs, args.json_out)\n else:\n for name in outputs[0]:\n outputs_ = [out[name] for out in outputs]\n result_file = args.json_out + '.{}'.format(name)\n results2json(dataset, outputs_, result_file)\n\n\nif __name__ == '__main__':\n main()\n" ]
[ [ "numpy.array", "numpy.zeros", "torch.norm", "torch.no_grad", "torch.pow", "torch.full", "numpy.sqrt", "torch.distributed.barrier", "torch.distributed.broadcast" ] ]
asizemore/multilayer_network_examples
[ "9c3fe3cd0320145d7d9695ea8de19dc064b485b5" ]
[ "mx_viz.py" ]
[ "# Functions for visualization\nimport numpy as np\nimport networkx as nx\nimport multinetx as mx\nfrom jinja2 import Environment, FileSystemLoader, Template\nimport json\nfrom networkx.readwrite import json_graph\n\n\ndef write_mx_to_json(filename, mg, nNodes, pos, nLayers, nodes_to_remove = []):\n \n # filename the complete name of the output file (data/slide_x.json)\n # mx the multilayer network as a multinetx object\n # nNodes the number of nodes in the first layer\n # pos a dictionary of node coordinates\n # nLayers the number of layers in the second aspect.\n # nodes_to_remove is a list of nodes that should not exist in each layer. Default = []\n \n\n \n \n # From the sparse adj, make a networkx graph and add node attributes\n G1 = nx.from_numpy_array(mx.adjacency_matrix(mg,weight='weight').todense())\n\n # Remove nodes from G\n G1.remove_nodes_from(nodes_to_remove)\n\n # Recreate the graph G to make the rest work nicely.\n\n G = nx.from_numpy_array(nx.adjacency_matrix(G1).todense())\n\n\n # Create dictionaries pretending like all nodes exist\n scalefact = 20\n L2_classes = np.arange(nLayers)\n L2_array_original = np.array([])\n z_shift = 2\n z_array_original = np.array([])\n x_orig = np.array([])\n y_orig = np.array([])\n L1_orig = np.array([])\n for level in L2_classes:\n L2_array_original = np.concatenate((L2_array_original, np.array([float(level) for i in np.arange(nNodes)])))\n z_array_original = np.concatenate((z_array_original, np.array([float(level*z_shift) for i in np.arange(nNodes)])))\n x_orig = np.concatenate((x_orig, [pos[key][0]+scalefact for key in pos]))\n y_orig = np.concatenate((y_orig, [pos[key][1]+scalefact for key in pos]))\n L1_orig = np.concatenate((L1_orig, [i for i in np.arange(nNodes)]))\n\n # Need to delete nodes from our attribute dictionaries, too\n L2_array = np.delete(L2_array_original, nodes_to_remove, 0)\n z_array = np.delete(z_array_original, nodes_to_remove, 0)\n x_array = np.delete(x_orig, nodes_to_remove, 0)\n y_array = np.delete(y_orig, nodes_to_remove, 0)\n L1_array = np.delete(L1_orig, nodes_to_remove, 0)\n\n ## Each node will get attributes L1=node id, L2=slice number, x position, y position, and name/id\n\n id_dict = {i:(\"id\"+str(i)) for i in np.arange(nNodes*nLayers)}\n x_dict = {}\n y_dict = {}\n L2_dict = {i:l2 for i,l2 in enumerate(L2_array)}\n z_dict = {i:z_val for i,z_val in enumerate(z_array)}\n x_dict = {i:x_val for i,x_val in enumerate(x_array)}\n y_dict = {i:y_val for i,y_val in enumerate(y_array)}\n L1_dict = {i:L1_val for i,L1_val in enumerate(L1_array)}\n\n\n nx.set_node_attributes(G, id_dict, name = \"name\")\n nx.set_node_attributes(G, x_dict, name = \"x\")\n nx.set_node_attributes(G, y_dict, name = \"y\")\n nx.set_node_attributes(G, z_dict, name = \"z\")\n nx.set_node_attributes(G, L1_dict, name= \"L1\")\n nx.set_node_attributes(G, L2_dict, name= \"L2\")\n\n\n G_json = json_graph.node_link_data(G)\n \n # Write for visualization function\n G_json_viz = json.dumps(G_json, indent = 4) \n \n # To save as a .json file\n with open(filename, 'w') as fp:\n json.dump(G_json, fp)\n\n print(f\"done writing mx to {filename}\")\n \n return G_json_viz\n\n\n\n\n\n#Finished defining functions\nprint(\"finished defining functions\")\n\n\ndef visualize(\n mxgraph,\n theme=\"dark\",\n path_html=\"visualization_output.html\",\n title=\"MX viz\",\n save_file=True,\n ):\n \n\n # Find the module absolute path and locate templates\n# module_root = os.path.join(os.path.dirname('./'), \"templates\")\n module_root = \"./\"\n env = Environment(loader=FileSystemLoader(module_root))\n\n \n # Read in the D3 save pages code and include in the exported html\n d3_save_svg_path = \"./d3-save-svg-gh-pages/assets/d3-save-svg.min.js\"\n with open(d3_save_svg_path,'r') as f:\n d3_save_svg = f.readlines()\n\n \n if theme==\"dark\":\n \n js_path = './javascript/mx_viz.js'\n with open(js_path, \"r\") as f:\n js_text = f.read()\n \n css_path = './style/style.css'\n with open(css_path, \"r\") as f:\n css_text = f.read()\n \n # Jinja\n template = env.get_template(\"./templates/mx_viz.html\").render(\n title=title,\n js_text=js_text,\n css_text=css_text,\n mxgraph=mxgraph,\n d3_save_svg=d3_save_svg[0]\n )\n \n elif theme == \"light\":\n \n js_path = './javascript/mx_vizlighttheme.js'\n with open(js_path, \"r\") as f:\n js_text = f.read()\n \n css_path = './style/style_lighttheme.css'\n with open(css_path, \"r\") as f:\n css_text = f.read()\n \n # Jinja\n template = env.get_template(\"./templates/mx_viz_lighttheme.html\").render(\n title=title,\n js_text=js_text,\n css_text=css_text,\n mxgraph=mxgraph,\n d3_save_svg=d3_save_svg[0]\n )\n \n\n \n\n if save_file:\n with open(path_html, \"wb\") as outfile:\n print(\"Wrote visualization to: %s\" % (path_html))\n outfile.write(template.encode(\"utf-8\"))\n\n return template\n\ndef visualize_timeseries(\n mxgraph,\n path_html=\"visualization_timeseries_output.html\",\n title=\"MX viz\",\n save_file=True,\n ):\n \n\n # Find the module absolute path and locate templates\n# module_root = os.path.join(os.path.dirname('./'), \"templates\")\n module_root = \"./\"\n env = Environment(loader=FileSystemLoader(module_root))\n \n # Read in the D3 save pages code and include in the exported html\n d3_save_svg_path = \"./d3-save-svg-gh-pages/assets/d3-save-svg.min.js\"\n with open(d3_save_svg_path,'r') as f:\n d3_save_svg = f.readlines()\n\n\n # Find the absolute module path and the static files\n# js_path = os.path.join(os.path.dirname(__file__), \"static\", \"kmapper.js\")\n js_path = './javascript/mx_viz_timeseries.js'\n with open(js_path, \"r\") as f:\n js_text = f.read()\n\n\n css_path = './style/style_timeseries.css'\n with open(css_path, \"r\") as f:\n css_text = f.read()\n\n # Jinja\n template = env.get_template(\"./templates/mx_viz_timeseries.html\").render(\n title=title,\n js_text=js_text,\n css_text=css_text,\n mxgraph=mxgraph,\n d3_save_svg=d3_save_svg[0]\n )\n\n\n\n if save_file:\n with open(path_html, \"wb\") as outfile:\n print(\"Wrote visualization to: %s\" % (path_html))\n outfile.write(template.encode(\"utf-8\"))\n\n return template\n\n" ]
[ [ "numpy.concatenate", "numpy.array", "numpy.arange", "numpy.delete" ] ]
rabaniten/qiskit-terra
[ "68da37be74d133faef530c42d1fd11d696a80c12" ]
[ "qiskit/tools/visualization/_matplotlib.py" ]
[ "# -*- coding: utf-8 -*-\n\n# Copyright 2018, IBM.\n#\n# This source code is licensed under the Apache License, Version 2.0 found in\n# the LICENSE.txt file in the root directory of this source tree.\n\n# pylint: disable=invalid-name,anomalous-backslash-in-string,missing-docstring\n\n\"\"\"mpl circuit visualization backend.\"\"\"\n\nimport collections\nimport fractions\nimport itertools\nimport json\nimport logging\nimport math\n\nimport numpy as np\n\ntry:\n from matplotlib import patches\n from matplotlib import pyplot as plt\n\n HAS_MATPLOTLIB = True\nexcept ImportError:\n HAS_MATPLOTLIB = False\n\nfrom qiskit.tools.visualization import exceptions\nfrom qiskit.tools.visualization import _qcstyle\n\nlogger = logging.getLogger(__name__)\n\nRegister = collections.namedtuple('Register', 'reg index')\n\nWID = 0.65\nHIG = 0.65\nDEFAULT_SCALE = 4.3\nPORDER_GATE = 5\nPORDER_LINE = 2\nPORDER_GRAY = 3\nPORDER_TEXT = 6\nPORDER_SUBP = 4\n\n\nclass Anchor:\n def __init__(self, reg_num, yind, fold):\n self.__yind = yind\n self.__fold = fold\n self.__reg_num = reg_num\n self.__gate_placed = []\n\n def plot_coord(self, index, gate_width):\n h_pos = index % self.__fold + 1\n # check folding\n if self.__fold > 0:\n if h_pos + (gate_width - 1) > self.__fold:\n index += self.__fold - (h_pos - 1)\n x_pos = index % self.__fold + 1 + 0.5 * (gate_width - 1)\n y_pos = self.__yind - (index // self.__fold) * (self.__reg_num + 1)\n else:\n x_pos = index + 1 + 0.5 * (gate_width - 1)\n y_pos = self.__yind\n\n return x_pos, y_pos\n\n def is_locatable(self, index, gate_width):\n hold = [index + i for i in range(gate_width)]\n for p in hold:\n if p in self.__gate_placed:\n return False\n return True\n\n def set_index(self, index, gate_width):\n h_pos = index % self.__fold + 1\n if h_pos + (gate_width - 1) > self.__fold:\n _index = index + self.__fold - (h_pos - 1)\n else:\n _index = index\n for ii in range(gate_width):\n if _index + ii not in self.__gate_placed:\n self.__gate_placed.append(_index + ii)\n self.__gate_placed.sort()\n\n def get_index(self):\n if self.__gate_placed:\n return self.__gate_placed[-1] + 1\n return 0\n\n\nclass MatplotlibDrawer:\n def __init__(self, qregs, cregs, ops,\n scale=1.0, style=None, plot_barriers=True,\n reverse_bits=False):\n\n if not HAS_MATPLOTLIB:\n raise ImportError('The class MatplotlibDrawer needs matplotlib. '\n 'Run \"pip install matplotlib\" before.')\n\n self._ast = None\n self._scale = DEFAULT_SCALE * scale\n self._creg = []\n self._qreg = []\n self._registers(cregs, qregs)\n self._ops = ops\n\n self._qreg_dict = collections.OrderedDict()\n self._creg_dict = collections.OrderedDict()\n self._cond = {\n 'n_lines': 0,\n 'xmax': 0,\n 'ymax': 0,\n }\n\n self._style = _qcstyle.QCStyle()\n self.plot_barriers = plot_barriers\n self.reverse_bits = reverse_bits\n if style:\n if isinstance(style, dict):\n self._style.set_style(style)\n elif isinstance(style, str):\n with open(style, 'r') as infile:\n dic = json.load(infile)\n self._style.set_style(dic)\n\n self.figure = plt.figure()\n self.figure.patch.set_facecolor(color=self._style.bg)\n self.ax = self.figure.add_subplot(111)\n self.ax.axis('off')\n self.ax.set_aspect('equal')\n self.ax.tick_params(labelbottom=False, labeltop=False,\n labelleft=False, labelright=False)\n\n def _registers(self, creg, qreg):\n self._creg = []\n for r in creg:\n self._creg.append(Register(reg=r[0], index=r[1]))\n self._qreg = []\n for r in qreg:\n self._qreg.append(Register(reg=r[0], index=r[1]))\n\n @property\n def ast(self):\n return self._ast\n\n def _gate(self, xy, fc=None, wide=False, text=None, subtext=None):\n xpos, ypos = xy\n\n if wide:\n wid = WID * 2.8\n else:\n wid = WID\n if fc:\n _fc = fc\n elif text:\n _fc = self._style.dispcol[text]\n else:\n _fc = self._style.gc\n\n box = patches.Rectangle(\n xy=(xpos - 0.5 * wid, ypos - 0.5 * HIG), width=wid, height=HIG,\n fc=_fc, ec=self._style.lc, linewidth=1.5, zorder=PORDER_GATE)\n self.ax.add_patch(box)\n\n if text:\n disp_text = \"${}$\".format(self._style.disptex[text])\n if subtext:\n self.ax.text(xpos, ypos + 0.15 * HIG, disp_text, ha='center',\n va='center', fontsize=self._style.fs,\n color=self._style.gt, clip_on=True,\n zorder=PORDER_TEXT)\n self.ax.text(xpos, ypos - 0.3 * HIG, subtext, ha='center',\n va='center', fontsize=self._style.sfs,\n color=self._style.sc, clip_on=True,\n zorder=PORDER_TEXT)\n else:\n self.ax.text(xpos, ypos, disp_text, ha='center', va='center',\n fontsize=self._style.fs,\n color=self._style.gt,\n clip_on=True,\n zorder=PORDER_TEXT)\n\n def _subtext(self, xy, text):\n xpos, ypos = xy\n\n self.ax.text(xpos, ypos - 0.3 * HIG, text, ha='center', va='top',\n fontsize=self._style.sfs,\n color=self._style.tc,\n clip_on=True,\n zorder=PORDER_TEXT)\n\n def _line(self, xy0, xy1, lc=None, ls=None):\n x0, y0 = xy0\n x1, y1 = xy1\n if lc is None:\n linecolor = self._style.lc\n else:\n linecolor = lc\n if ls is None:\n linestyle = 'solid'\n else:\n linestyle = ls\n if linestyle == 'doublet':\n theta = np.arctan2(np.abs(x1 - x0), np.abs(y1 - y0))\n dx = 0.05 * WID * np.cos(theta)\n dy = 0.05 * WID * np.sin(theta)\n self.ax.plot([x0 + dx, x1 + dx], [y0 + dy, y1 + dy],\n color=linecolor,\n linewidth=1.0,\n linestyle='solid',\n zorder=PORDER_LINE)\n self.ax.plot([x0 - dx, x1 - dx], [y0 - dy, y1 - dy],\n color=linecolor,\n linewidth=1.0,\n linestyle='solid',\n zorder=PORDER_LINE)\n else:\n self.ax.plot([x0, x1], [y0, y1],\n color=linecolor,\n linewidth=1.0,\n linestyle=linestyle,\n zorder=PORDER_LINE)\n\n def _measure(self, qxy, cxy, cid):\n qx, qy = qxy\n cx, cy = cxy\n\n self._gate(qxy, fc=self._style.dispcol['meas'])\n # add measure symbol\n arc = patches.Arc(xy=(qx, qy - 0.15 * HIG), width=WID * 0.7,\n height=HIG * 0.7, theta1=0, theta2=180, fill=False,\n ec=self._style.lc, linewidth=1.5,\n zorder=PORDER_GATE)\n self.ax.add_patch(arc)\n self.ax.plot([qx, qx + 0.35 * WID],\n [qy - 0.15 * HIG, qy + 0.20 * HIG],\n color=self._style.lc, linewidth=1.5, zorder=PORDER_GATE)\n # arrow\n self._line(qxy, [cx, cy + 0.35 * WID], lc=self._style.cc,\n ls=self._style.cline)\n arrowhead = patches.Polygon(((cx - 0.20 * WID, cy + 0.35 * WID),\n (cx + 0.20 * WID, cy + 0.35 * WID),\n (cx, cy)),\n fc=self._style.cc,\n ec=None)\n self.ax.add_artist(arrowhead)\n # target\n if self._style.bundle:\n self.ax.text(cx + .25, cy + .1, str(cid), ha='left', va='bottom',\n fontsize=0.8 * self._style.fs,\n color=self._style.tc,\n clip_on=True,\n zorder=PORDER_TEXT)\n\n def _conds(self, xy, istrue=False):\n xpos, ypos = xy\n\n if istrue:\n _fc = self._style.lc\n else:\n _fc = self._style.gc\n\n box = patches.Circle(xy=(xpos, ypos), radius=WID * 0.15,\n fc=_fc, ec=self._style.lc,\n linewidth=1.5, zorder=PORDER_GATE)\n self.ax.add_patch(box)\n\n def _ctrl_qubit(self, xy):\n xpos, ypos = xy\n\n box = patches.Circle(xy=(xpos, ypos), radius=WID * 0.15,\n fc=self._style.lc, ec=self._style.lc,\n linewidth=1.5, zorder=PORDER_GATE)\n self.ax.add_patch(box)\n\n def _tgt_qubit(self, xy):\n xpos, ypos = xy\n\n box = patches.Circle(xy=(xpos, ypos), radius=HIG * 0.35,\n fc=self._style.dispcol['target'],\n ec=self._style.lc, linewidth=1.5,\n zorder=PORDER_GATE)\n self.ax.add_patch(box)\n # add '+' symbol\n self.ax.plot([xpos, xpos], [ypos - 0.35 * HIG, ypos + 0.35 * HIG],\n color=self._style.lc, linewidth=1.0, zorder=PORDER_GATE)\n self.ax.plot([xpos - 0.35 * HIG, xpos + 0.35 * HIG], [ypos, ypos],\n color=self._style.lc, linewidth=1.0, zorder=PORDER_GATE)\n\n def _swap(self, xy):\n xpos, ypos = xy\n\n self.ax.plot([xpos - 0.20 * WID, xpos + 0.20 * WID],\n [ypos - 0.20 * WID, ypos + 0.20 * WID],\n color=self._style.lc, linewidth=1.5, zorder=PORDER_LINE)\n self.ax.plot([xpos - 0.20 * WID, xpos + 0.20 * WID],\n [ypos + 0.20 * WID, ypos - 0.20 * WID],\n color=self._style.lc, linewidth=1.5, zorder=PORDER_LINE)\n\n def _barrier(self, config, anc):\n xys = config['coord']\n group = config['group']\n y_reg = []\n for qreg in self._qreg_dict.values():\n if qreg['group'] in group:\n y_reg.append(qreg['y'])\n x0 = xys[0][0]\n\n box_y0 = min(y_reg) - int(anc / self._style.fold) * (self._cond['n_lines'] + 1) - 0.5\n box_y1 = max(y_reg) - int(anc / self._style.fold) * (self._cond['n_lines'] + 1) + 0.5\n box = patches.Rectangle(xy=(x0 - 0.3 * WID, box_y0),\n width=0.6 * WID, height=box_y1 - box_y0,\n fc=self._style.bc, ec=None, alpha=0.6,\n linewidth=1.5, zorder=PORDER_GRAY)\n self.ax.add_patch(box)\n for xy in xys:\n xpos, ypos = xy\n self.ax.plot([xpos, xpos], [ypos + 0.5, ypos - 0.5],\n linewidth=1, linestyle=\"dashed\",\n color=self._style.lc,\n zorder=PORDER_TEXT)\n\n def _linefeed_mark(self, xy):\n xpos, ypos = xy\n\n self.ax.plot([xpos - .1, xpos - .1],\n [ypos, ypos - self._cond['n_lines'] + 1],\n color=self._style.lc, zorder=PORDER_LINE)\n self.ax.plot([xpos + .1, xpos + .1],\n [ypos, ypos - self._cond['n_lines'] + 1],\n color=self._style.lc, zorder=PORDER_LINE)\n\n def draw(self, filename=None, verbose=False):\n self._draw_regs()\n self._draw_ops(verbose)\n _xl = - self._style.margin[0]\n _xr = self._cond['xmax'] + self._style.margin[1]\n _yb = - self._cond['ymax'] - self._style.margin[2] + 1 - 0.5\n _yt = self._style.margin[3] + 0.5\n self.ax.set_xlim(_xl, _xr)\n self.ax.set_ylim(_yb, _yt)\n # update figure size\n fig_w = _xr - _xl\n fig_h = _yt - _yb\n if self._style.figwidth < 0.0:\n self._style.figwidth = fig_w * self._scale * self._style.fs / 72 / WID\n self.figure.set_size_inches(self._style.figwidth, self._style.figwidth * fig_h / fig_w)\n if filename:\n self.figure.savefig(filename, dpi=self._style.dpi,\n bbox_inches='tight')\n plt.close(self.figure)\n return self.figure\n\n def _draw_regs(self):\n # quantum register\n for ii, reg in enumerate(self._qreg):\n if len(self._qreg) > 1:\n label = '${}_{{{}}}$'.format(reg.reg.name, reg.index)\n else:\n label = '${}$'.format(reg.reg.name)\n\n pos = -ii\n self._qreg_dict[ii] = {\n 'y': pos,\n 'label': label,\n 'index': reg.index,\n 'group': reg.reg\n }\n self._cond['n_lines'] += 1\n # classical register\n if self._creg:\n n_creg = self._creg.copy()\n n_creg.pop(0)\n idx = 0\n y_off = -len(self._qreg)\n for ii, (reg, nreg) in enumerate(itertools.zip_longest(\n self._creg, n_creg)):\n pos = y_off - idx\n if self._style.bundle:\n label = '${}$'.format(reg.reg.name)\n self._creg_dict[ii] = {\n 'y': pos,\n 'label': label,\n 'index': reg.index,\n 'group': reg.reg\n }\n if not (not nreg or reg.reg != nreg.reg):\n continue\n else:\n label = '${}_{{{}}}$'.format(reg.reg.name, reg.index)\n self._creg_dict[ii] = {\n 'y': pos,\n 'label': label,\n 'index': reg.index,\n 'group': reg.reg\n }\n\n self._cond['n_lines'] += 1\n idx += 1\n\n def _draw_regs_sub(self, n_fold, feedline_l=False, feedline_r=False):\n # quantum register\n for qreg in self._qreg_dict.values():\n if n_fold == 0:\n label = qreg['label'] + ' : $\\\\left|0\\\\right\\\\rangle$'\n else:\n label = qreg['label']\n y = qreg['y'] - n_fold * (self._cond['n_lines'] + 1)\n self.ax.text(-0.5, y, label, ha='right', va='center',\n fontsize=self._style.fs,\n color=self._style.tc,\n clip_on=True,\n zorder=PORDER_TEXT)\n self._line([0, y], [self._cond['xmax'], y])\n # classical register\n this_creg_dict = {}\n for creg in self._creg_dict.values():\n if n_fold == 0:\n label = creg['label'] + ' : 0 '\n else:\n label = creg['label']\n y = creg['y'] - n_fold * (self._cond['n_lines'] + 1)\n if y not in this_creg_dict.keys():\n this_creg_dict[y] = {'val': 1, 'label': label}\n else:\n this_creg_dict[y]['val'] += 1\n for y, this_creg in this_creg_dict.items():\n # bundle\n if this_creg['val'] > 1:\n self.ax.plot([.6, .7], [y - .1, y + .1],\n color=self._style.cc,\n zorder=PORDER_LINE)\n self.ax.text(0.5, y + .1, str(this_creg['val']), ha='left',\n va='bottom',\n fontsize=0.8 * self._style.fs,\n color=self._style.tc,\n clip_on=True,\n zorder=PORDER_TEXT)\n self.ax.text(-0.5, y, this_creg['label'], ha='right', va='center',\n fontsize=self._style.fs,\n color=self._style.tc,\n clip_on=True,\n zorder=PORDER_TEXT)\n self._line([0, y], [self._cond['xmax'], y], lc=self._style.cc,\n ls=self._style.cline)\n\n # lf line\n if feedline_r:\n self._linefeed_mark((self._style.fold + 1 - 0.1,\n - n_fold * (self._cond['n_lines'] + 1)))\n if feedline_l:\n self._linefeed_mark((0.1,\n - n_fold * (self._cond['n_lines'] + 1)))\n\n def _draw_ops(self, verbose=False):\n _wide_gate = 'u2 u3 cu2 cu3'.split()\n _barriers = {'coord': [], 'group': []}\n next_ops = self._ops.copy()\n if next_ops:\n next_ops.pop(0)\n this_anc = 0\n\n #\n # generate coordinate manager\n #\n q_anchors = {}\n for key, qreg in self._qreg_dict.items():\n q_anchors[key] = Anchor(reg_num=self._cond['n_lines'],\n yind=qreg['y'],\n fold=self._style.fold)\n c_anchors = {}\n for key, creg in self._creg_dict.items():\n c_anchors[key] = Anchor(reg_num=self._cond['n_lines'],\n yind=creg['y'],\n fold=self._style.fold)\n #\n # draw gates\n #\n prev_width = 0\n for layer_no, layer in enumerate(self._ops):\n\n layer_width = 1\n\n for op in layer:\n if op['name'] in _wide_gate:\n layer_width = 2\n\n for op in layer:\n\n _iswide = op['name'] in _wide_gate\n # get qreg index\n if 'qargs' in op.keys():\n q_idxs = []\n for qarg in op['qargs']:\n for index, reg in self._qreg_dict.items():\n if (reg['group'] == qarg[0] and\n reg['index'] == qarg[1]):\n q_idxs.append(index)\n break\n else:\n q_idxs = []\n # get creg index\n if 'cargs' in op.keys():\n c_idxs = []\n for carg in op['cargs']:\n for index, reg in self._creg_dict.items():\n if (reg['group'] == carg[0] and\n reg['index'] == carg[1]):\n c_idxs.append(index)\n break\n else:\n c_idxs = []\n\n this_anc = layer_no + prev_width\n\n occupied = q_idxs\n q_list = [ii for ii in range(min(occupied),\n max(occupied) + 1)]\n locs = [q_anchors[jj].is_locatable(\n this_anc, layer_width) for jj in q_list]\n if all(locs):\n for ii in q_list:\n if op['name'] in ['barrier', 'snapshot', 'load', 'save', 'noise'] \\\n and not self.plot_barriers:\n q_anchors[ii].set_index(this_anc - 1, layer_width)\n else:\n q_anchors[ii].set_index(this_anc, layer_width)\n\n # qreg coordinate\n q_xy = [q_anchors[ii].plot_coord(this_anc, layer_width) for ii in q_idxs]\n # creg coordinate\n c_xy = [c_anchors[ii].plot_coord(this_anc, layer_width) for ii in c_idxs]\n # bottom and top point of qreg\n qreg_b = min(q_xy, key=lambda xy: xy[1])\n qreg_t = max(q_xy, key=lambda xy: xy[1])\n\n if verbose:\n print(op)\n\n if 'op' in op.keys() and hasattr(op['op'], 'param'):\n param = self.param_parse(op['op'].params, self._style.pimode)\n else:\n param = None\n # conditional gate\n if 'condition' in op.keys() and op['condition']:\n c_xy = [c_anchors[ii].plot_coord(this_anc, layer_width) for\n ii in self._creg_dict]\n mask = 0\n for index, cbit in enumerate(self._creg):\n if cbit.reg == op['condition'][0]:\n mask |= (1 << index)\n val = op['condition'][1]\n # cbit list to consider\n fmt_c = '{{:0{}b}}'.format(len(c_xy))\n cmask = list(fmt_c.format(mask))[::-1]\n # value\n fmt_v = '{{:0{}b}}'.format(cmask.count('1'))\n vlist = list(fmt_v.format(val))[::-1]\n # plot conditionals\n v_ind = 0\n xy_plot = []\n for xy, m in zip(c_xy, cmask):\n if m == '1':\n if xy not in xy_plot:\n if vlist[v_ind] == '1' or self._style.bundle:\n self._conds(xy, istrue=True)\n else:\n self._conds(xy, istrue=False)\n xy_plot.append(xy)\n v_ind += 1\n creg_b = sorted(xy_plot, key=lambda xy: xy[1])[0]\n self._subtext(creg_b, hex(val))\n self._line(qreg_t, creg_b, lc=self._style.cc,\n ls=self._style.cline)\n #\n # draw special gates\n #\n if op['name'] == 'measure':\n vv = self._creg_dict[c_idxs[0]]['index']\n self._measure(q_xy[0], c_xy[0], vv)\n elif op['name'] in ['barrier', 'snapshot', 'load', 'save',\n 'noise']:\n _barriers = {'coord': [], 'group': []}\n for index, qbit in enumerate(q_idxs):\n q_group = self._qreg_dict[qbit]['group']\n\n if q_group not in _barriers['group']:\n _barriers['group'].append(q_group)\n _barriers['coord'].append(q_xy[index])\n if self.plot_barriers:\n self._barrier(_barriers, this_anc)\n #\n # draw single qubit gates\n #\n elif len(q_xy) == 1:\n disp = op['name']\n if param:\n self._gate(q_xy[0], wide=_iswide, text=disp,\n subtext='{}'.format(param))\n else:\n self._gate(q_xy[0], wide=_iswide, text=disp)\n #\n # draw multi-qubit gates (n=2)\n #\n elif len(q_xy) == 2:\n # cx\n if op['name'] in ['cx']:\n self._ctrl_qubit(q_xy[0])\n self._tgt_qubit(q_xy[1])\n # cz for latexmode\n elif op['name'] == 'cz':\n if self._style.latexmode:\n self._ctrl_qubit(q_xy[0])\n self._ctrl_qubit(q_xy[1])\n else:\n disp = op['name'].replace('c', '')\n self._ctrl_qubit(q_xy[0])\n self._gate(q_xy[1], wide=_iswide, text=disp)\n # control gate\n elif op['name'] in ['cy', 'ch', 'cu3', 'crz']:\n disp = op['name'].replace('c', '')\n self._ctrl_qubit(q_xy[0])\n if param:\n self._gate(q_xy[1], wide=_iswide, text=disp,\n subtext='{}'.format(param))\n else:\n self._gate(q_xy[1], wide=_iswide, text=disp)\n # cu1 for latexmode\n elif op['name'] in ['cu1']:\n disp = op['name'].replace('c', '')\n self._ctrl_qubit(q_xy[0])\n if self._style.latexmode:\n self._ctrl_qubit(q_xy[1])\n self._subtext(qreg_b, param)\n else:\n self._gate(q_xy[1], wide=_iswide, text=disp,\n subtext='{}'.format(param))\n # swap gate\n elif op['name'] == 'swap':\n self._swap(q_xy[0])\n self._swap(q_xy[1])\n # add qubit-qubit wiring\n self._line(qreg_b, qreg_t)\n #\n # draw multi-qubit gates (n=3)\n #\n elif len(q_xy) == 3:\n # cswap gate\n if op['name'] == 'cswap':\n self._ctrl_qubit(q_xy[0])\n self._swap(q_xy[1])\n self._swap(q_xy[2])\n # ccx gate\n elif op['name'] == 'ccx':\n self._ctrl_qubit(q_xy[0])\n self._ctrl_qubit(q_xy[1])\n self._tgt_qubit(q_xy[2])\n # add qubit-qubit wiring\n self._line(qreg_b, qreg_t)\n else:\n logger.critical('Invalid gate %s', op)\n raise exceptions.VisualizationError('invalid gate {}'.format(op))\n\n prev_width = layer_width - 1\n #\n # adjust window size and draw horizontal lines\n #\n anchors = [q_anchors[ii].get_index() for ii in self._qreg_dict]\n if anchors:\n max_anc = max(anchors)\n else:\n max_anc = 0\n n_fold = max(0, max_anc - 1) // self._style.fold\n # window size\n if max_anc > self._style.fold > 0:\n self._cond['xmax'] = self._style.fold + 1\n self._cond['ymax'] = (n_fold + 1) * (self._cond['n_lines'] + 1) - 1\n else:\n self._cond['xmax'] = max_anc + 1\n self._cond['ymax'] = self._cond['n_lines']\n # add horizontal lines\n for ii in range(n_fold + 1):\n feedline_r = (n_fold > 0 and n_fold > ii)\n feedline_l = (ii > 0)\n self._draw_regs_sub(ii, feedline_l, feedline_r)\n # draw gate number\n if self._style.index:\n for ii in range(max_anc):\n if self._style.fold > 0:\n x_coord = ii % self._style.fold + 1\n y_coord = - (ii // self._style.fold) * (self._cond['n_lines'] + 1) + 0.7\n else:\n x_coord = ii + 1\n y_coord = 0.7\n self.ax.text(x_coord, y_coord, str(ii + 1), ha='center',\n va='center', fontsize=self._style.sfs,\n color=self._style.tc, clip_on=True,\n zorder=PORDER_TEXT)\n\n @staticmethod\n def param_parse(v, pimode=False):\n for i, e in enumerate(v):\n if pimode:\n v[i] = MatplotlibDrawer.format_pi(e)\n else:\n v[i] = MatplotlibDrawer.format_numeric(e)\n if v[i].startswith('-'):\n v[i] = '$-$' + v[i][1:]\n param = ', '.join(v)\n return param\n\n @staticmethod\n def format_pi(val):\n fracvals = MatplotlibDrawer.fraction(val)\n buf = ''\n if fracvals:\n nmr, dnm = fracvals.numerator, fracvals.denominator\n if nmr == 1:\n buf += '$\\\\pi$'\n elif nmr == -1:\n buf += '-$\\\\pi$'\n else:\n buf += '{}$\\\\pi$'.format(nmr)\n if dnm > 1:\n buf += '/{}'.format(dnm)\n return buf\n else:\n coef = MatplotlibDrawer.format_numeric(val / np.pi)\n if coef == '0':\n return '0'\n return '{}$\\\\pi$'.format(coef)\n\n @staticmethod\n def format_numeric(val, tol=1e-5):\n abs_val = abs(val)\n if math.isclose(abs_val, 0.0, abs_tol=1e-100):\n return '0'\n if math.isclose(math.fmod(abs_val, 1.0),\n 0.0, abs_tol=tol) and 0.5 < abs_val < 9999.5:\n return str(int(val))\n if 0.1 <= abs_val < 100.0:\n return '{:.2f}'.format(val)\n return '{:.1e}'.format(val)\n\n @staticmethod\n def fraction(val, base=np.pi, n=100, tol=1e-5):\n abs_val = abs(val)\n for i in range(1, n):\n for j in range(1, n):\n if math.isclose(abs_val, i / j * base, rel_tol=tol):\n if val < 0:\n i *= -1\n return fractions.Fraction(i, j)\n return None\n" ]
[ [ "numpy.sin", "matplotlib.patches.Polygon", "matplotlib.pyplot.close", "matplotlib.pyplot.figure", "matplotlib.patches.Circle", "numpy.abs", "numpy.cos", "matplotlib.patches.Rectangle", "matplotlib.patches.Arc" ] ]
lufuhao/jcvi
[ "7b6f7c80d3bd034d492021b6114ac453ac19fd52" ]
[ "jcvi/assembly/kmer.py" ]
[ "#!/usr/bin/env python\n# -*- coding: UTF-8 -*-\n\n\"\"\"\nDeals with K-mers and K-mer distribution from reads or genome\n\"\"\"\nfrom __future__ import print_function\n\nimport os.path as op\nimport sys\nimport logging\nimport math\nimport numpy as np\n\nfrom collections import defaultdict\n\nfrom jcvi.graphics.base import (\n plt,\n asciiplot,\n set_human_axis,\n savefig,\n markup,\n panel_labels,\n normalize_axes,\n set_ticklabels_arial,\n write_messages,\n)\nfrom jcvi.formats.fasta import Fasta\nfrom jcvi.formats.base import BaseFile, must_open, get_number\nfrom jcvi.utils.cbook import thousands, percentage\nfrom jcvi.assembly.automaton import iter_project\nfrom jcvi.apps.grid import MakeManager\nfrom jcvi.apps.base import OptionParser, ActionDispatcher, sh, need_update, Popen, PIPE\n\n\nKMERYL, KSOAP, KALLPATHS = range(3)\n\n\nclass KmerSpectrum(BaseFile):\n def __init__(self, histfile):\n self.load_data(histfile)\n\n def load_data(self, histfile):\n self.data = []\n self.totalKmers = 0\n self.hist = {}\n kformat = self.guess_format(histfile)\n kformats = (\"Meryl\", \"Soap\", \"AllPaths\")\n logging.debug(\"Guessed format: {0}\".format(kformats[kformat]))\n\n fp = open(histfile)\n for rowno, row in enumerate(fp):\n if row[0] == \"#\":\n continue\n if kformat == KSOAP:\n K = rowno + 1\n counts = int(row.strip())\n else: # meryl histogram\n K, counts = row.split()[:2]\n K, counts = int(K), int(counts)\n\n Kcounts = K * counts\n self.totalKmers += Kcounts\n self.hist[K] = Kcounts\n self.data.append((K, counts))\n\n def guess_format(self, histfile):\n # Guess the format of the Kmer histogram\n fp = open(histfile)\n for row in fp:\n if row.startswith(\"# 1:\"):\n return KALLPATHS\n if len(row.split()) == 1:\n return KSOAP\n return KMERYL\n\n def get_xy(self, vmin=1, vmax=100):\n self.counts = sorted((a, b) for a, b in self.hist.items() if vmin <= a <= vmax)\n return zip(*self.counts)\n\n def analyze(self, ploidy=2, K=23, covmax=1000000):\n \"\"\"\n Analyze Kmer spectrum, calculations derived from\n allpathslg/src/kmers/KmerSpectra.cc\n \"\"\"\n from math import sqrt\n\n data = self.data\n kf_ceil = max(K for (K, c) in data)\n if kf_ceil > covmax:\n exceeds = sum(1 for (K, c) in data if K > covmax)\n logging.debug(\n \"A total of {0} distinct K-mers appear > \"\n \"{1} times. Ignored ...\".format(exceeds, covmax)\n )\n kf_ceil = covmax\n\n nkf = kf_ceil + 1\n a = [0] * nkf\n for kf, c in data:\n if kf > kf_ceil:\n continue\n a[kf] = c\n\n ndk = a # number of distinct kmers\n nk = [k * c for k, c in enumerate(a)] # number of kmers\n cndk = [0] * nkf # cumulative number of distinct kmers\n cnk = [0] * nkf # cumulative number of kmers\n for kf in range(1, nkf):\n cndk[kf] = cndk[kf - 1] + 0.5 * (ndk[kf - 1] + ndk[kf])\n cnk[kf] = cnk[kf - 1] + 0.5 * (nk[kf - 1] + nk[kf])\n\n # Separate kmer spectrum in 5 regions based on the kf\n # 1 ... kf_min1 : bad kmers with low frequency\n # kf_min1 ... kf_min2 : good kmers CN = 1/2 (SNPs)\n # kf_min2 ... kf_min3 : good kmers CN = 1\n # kf_min3 ... kf_hi : good kmers CN > 1 (repetitive)\n # kf_hi ... inf : bad kmers with high frequency\n\n # min1: find first minimum\n _kf_min1 = 10\n while _kf_min1 - 1 >= 2 and nk[_kf_min1 - 1] < nk[_kf_min1]:\n _kf_min1 -= 1\n while _kf_min1 <= kf_ceil and nk[_kf_min1 + 1] < nk[_kf_min1]:\n _kf_min1 += 1\n\n # max2: find absolute maximum mx2 above first minimum min1\n _kf_max2 = _kf_min1\n for kf in range(_kf_min1 + 1, int(0.8 * kf_ceil)):\n if nk[kf] > nk[_kf_max2]:\n _kf_max2 = kf\n\n # max2: resetting max2 for cases of very high polymorphism\n if ploidy == 2:\n ndk_half = ndk[_kf_max2 / 2]\n ndk_double = ndk[_kf_max2 * 2]\n if ndk_double > ndk_half:\n _kf_max2 *= 2\n\n # max1: SNPs local maximum max1 as half global maximum max2\n _kf_max1 = _kf_max2 / 2\n\n # min2: SNPs local minimum min2 between max1 and max2\n _kf_min2 = (\n _kf_max1\n * (2 * ndk[_kf_max1] + ndk[_kf_max2])\n / (ndk[_kf_max1] + ndk[_kf_max2])\n )\n\n # min1: refine between min1 and max2/2\n for kf in range(_kf_min1 + 1, _kf_max1):\n if nk[kf] < nk[_kf_min1]:\n _kf_min1 = kf\n\n # min3: not a minimum, really. upper edge of main peak\n _kf_min3 = _kf_max2 * 3 / 2\n\n print(\"kfs:\", _kf_min1, _kf_max1, _kf_min2, _kf_max2, _kf_min3, file=sys.stderr)\n self.min1 = _kf_min1\n self.max1 = _kf_max1\n self.min2 = _kf_min2\n self.max2 = _kf_max2\n self.min3 = _kf_min3\n\n # Define maximum kf above which we neglect data\n _kf_hi = (\n _kf_max2 * sqrt(4 * ndk[2 * _kf_max2] * _kf_max2)\n if 2 * _kf_max2 < len(ndk)\n else _kf_max2 * sqrt(4 * ndk[len(ndk) - 1] * _kf_max2)\n )\n _kf_hi = int(_kf_hi)\n\n if _kf_hi > kf_ceil:\n _kf_hi = kf_ceil\n\n _nk_total = cnk[len(cnk) - 1]\n _nk_bad_low_kf = cnk[_kf_min1]\n _nk_good_uniq = cnk[_kf_min3] - cnk[_kf_min2]\n _nk_bad_high_kf = _nk_total - cnk[_kf_hi]\n _ndk_good_snp = cndk[_kf_min2] - cndk[_kf_min1]\n _ndk_good_uniq = cndk[_kf_min3] - cndk[_kf_min2]\n\n # kmer coverage C_k\n _kf_ave_uniq = _nk_good_uniq * 1.0 / _ndk_good_uniq\n _genome_size = (_nk_total - _nk_bad_low_kf - _nk_bad_high_kf) / _kf_ave_uniq\n _genome_size_unique = _ndk_good_uniq + _ndk_good_snp / 2\n _genome_size_repetitive = _genome_size - _genome_size_unique\n _coverage = _nk_total / _genome_size if _genome_size else 0\n\n # SNP rate estimation, assumes uniform distribution of SNPs over the\n # genome and accounts for the reduction in SNP kmer counts when\n # polymorphism is very high\n if ploidy == 2:\n _d_SNP = (\n 1.0 / (1.0 - (1.0 - 0.5 * _ndk_good_snp / _genome_size) ** (1.0 / K))\n if _ndk_good_snp > 0\n else 1000000\n )\n\n G = int(_genome_size)\n G1 = int(_genome_size_unique)\n GR = int(_genome_size_repetitive)\n coverage = int(_coverage)\n\n m = \"Kmer (K={0}) Spectrum Analysis\\n\".format(K)\n m += \"Genome size estimate = {0}\\n\".format(thousands(G))\n m += \"Genome size estimate CN = 1 = {0} ({1})\\n\".format(\n thousands(G1), percentage(G1, G)\n )\n m += \"Genome size estimate CN > 1 = {0} ({1})\\n\".format(\n thousands(GR), percentage(GR, G)\n )\n m += \"Coverage estimate: {0} x\\n\".format(coverage)\n self.repetitive = \"Repeats: {0} percent\".format(GR * 100 / G)\n\n if ploidy == 2:\n d_SNP = int(_d_SNP)\n self.snprate = \"SNP rate ~= 1/{0}\".format(d_SNP)\n else:\n self.snprate = \"SNP rate not computed (Ploidy = {0})\".format(ploidy)\n m += self.snprate + \"\\n\"\n\n self.genomesize = int(round(self.totalKmers * 1.0 / self.max2))\n\n print(m, file=sys.stderr)\n\n\nclass KMCComplex(object):\n def __init__(self, indices):\n self.indices = indices\n\n def write(self, outfile, filename=\"stdout\", action=\"union\"):\n assert action in (\"union\", \"intersect\")\n op = \" + sum \" if action == \"union\" else \" * \"\n fw = must_open(filename, \"w\")\n print(\"INPUT:\", file=fw)\n ss = []\n pad = len(str(len(self.indices)))\n for i, e in enumerate(self.indices):\n s = \"s{0:0{1}d}\".format(i + 1, pad)\n ss.append(s)\n print(\"{} = {}\".format(s, e.rsplit(\".\", 1)[0]), file=fw)\n print(\"OUTPUT:\", file=fw)\n print(\"{} = {}\".format(outfile, op.join(ss)), file=fw)\n fw.close()\n\n\ndef main():\n\n actions = (\n # K-mer counting\n (\"jellyfish\", \"count kmers using `jellyfish`\"),\n (\"meryl\", \"count kmers using `meryl`\"),\n (\"kmc\", \"count kmers using `kmc`\"),\n (\"kmcop\", \"intersect or union kmc indices\"),\n (\"entropy\", \"calculate entropy for kmers from kmc dump\"),\n (\"bed\", \"map kmers on FASTA\"),\n # K-mer histogram\n (\"histogram\", \"plot the histogram based on meryl K-mer distribution\"),\n (\"multihistogram\", \"plot histogram across a set of K-mer sizes\"),\n # These forms a pipeline to count K-mers for given FASTA seq\n (\"dump\", \"convert FASTA sequences to list of K-mers\"),\n (\"bin\", \"serialize counts to bitarrays\"),\n (\"bincount\", \"count K-mers in the bin\"),\n (\"count\", \"run dump - jellyfish - bin - bincount in serial\"),\n (\"logodds\", \"compute log likelihood between two db\"),\n (\"model\", \"model kmer distribution given error rate\"),\n )\n p = ActionDispatcher(actions)\n p.dispatch(globals())\n\n\ndef entropy_score(kmer):\n \"\"\"\n Schmieder and Edwards. Quality control and preprocessing of metagenomic datasets. (2011) Bioinformatics\n https://academic.oup.com/bioinformatics/article/27/6/863/236283/Quality-control-and-preprocessing-of-metagenomic\n \"\"\"\n l = len(kmer) - 2\n k = l if l < 64 else 64\n counts = defaultdict(int)\n for i in range(l):\n trinuc = kmer[i : i + 3]\n counts[trinuc] += 1\n\n logk = math.log(k)\n res = 0\n for k, v in counts.items():\n f = v * 1.0 / l\n res += f * math.log(f) / logk\n return res * -100\n\n\ndef entropy(args):\n \"\"\"\n %prog entropy kmc_dump.out\n\n kmc_dump.out contains two columns:\n AAAAAAAAAAAGAAGAAAGAAA 34\n \"\"\"\n p = OptionParser(entropy.__doc__)\n p.add_option(\n \"--threshold\", default=0, type=\"int\", help=\"Complexity needs to be above\"\n )\n opts, args = p.parse_args(args)\n\n if len(args) != 1:\n sys.exit(not p.print_help())\n\n (kmc_out,) = args\n fp = open(kmc_out)\n for row in fp:\n kmer, count = row.split()\n score = entropy_score(kmer)\n if score >= opts.threshold:\n print(\" \".join((kmer, count, \"{:.2f}\".format(score))))\n\n\ndef bed(args):\n \"\"\"\n %prog bed fastafile kmer.dump.txt\n\n Map kmers on FASTA.\n \"\"\"\n from jcvi.formats.fasta import rc, parse_fasta\n\n p = OptionParser(bed.__doc__)\n opts, args = p.parse_args(args)\n\n if len(args) != 2:\n sys.exit(not p.print_help())\n\n fastafile, dumpfile = args\n fp = open(dumpfile)\n KMERS = set()\n for row in fp:\n kmer = row.split()[0]\n kmer_rc = rc(kmer)\n KMERS.add(kmer)\n KMERS.add(kmer_rc)\n\n K = len(kmer)\n logging.debug(\"Imported {} {}-mers\".format(len(KMERS), K))\n\n for name, seq in parse_fasta(fastafile):\n name = name.split()[0]\n for i in range(len(seq) - K):\n if i % 5000000 == 0:\n print(\"{}:{}\".format(name, i), file=sys.stderr)\n kmer = seq[i : i + K]\n if kmer in KMERS:\n print(\"\\t\".join(str(x) for x in (name, i, i + K, kmer)))\n\n\ndef kmcop(args):\n \"\"\"\n %prog kmcop *.kmc_suf\n\n Intersect or union kmc indices.\n \"\"\"\n p = OptionParser(kmcop.__doc__)\n p.add_option(\n \"--action\", choices=(\"union\", \"intersect\"), default=\"union\", help=\"Action\"\n )\n p.add_option(\"-o\", default=\"results\", help=\"Output name\")\n opts, args = p.parse_args(args)\n\n if len(args) < 2:\n sys.exit(not p.print_help())\n\n indices = args\n ku = KMCComplex(indices)\n ku.write(opts.o, action=opts.action)\n\n\ndef kmc(args):\n \"\"\"\n %prog kmc folder\n\n Run kmc3 on Illumina reads.\n \"\"\"\n p = OptionParser(kmc.__doc__)\n p.add_option(\"-k\", default=21, type=\"int\", help=\"Kmer size\")\n p.add_option(\n \"--ci\", default=2, type=\"int\", help=\"Exclude kmers with less than ci counts\"\n )\n p.add_option(\"--cs\", default=2, type=\"int\", help=\"Maximal value of a counter\")\n p.add_option(\n \"--cx\", default=None, type=\"int\", help=\"Exclude kmers with more than cx counts\"\n )\n p.add_option(\n \"--single\",\n default=False,\n action=\"store_true\",\n help=\"Input is single-end data, only one FASTQ/FASTA\",\n )\n p.add_option(\n \"--fasta\",\n default=False,\n action=\"store_true\",\n help=\"Input is FASTA instead of FASTQ\",\n )\n p.set_cpus()\n opts, args = p.parse_args(args)\n\n if len(args) != 1:\n sys.exit(not p.print_help())\n\n (folder,) = args\n K = opts.k\n n = 1 if opts.single else 2\n pattern = (\n \"*.fa,*.fa.gz,*.fasta,*.fasta.gz\"\n if opts.fasta\n else \"*.fq,*.fq.gz,*.fastq,*.fastq.gz\"\n )\n\n mm = MakeManager()\n for p, pf in iter_project(folder, pattern=pattern, n=n, commonprefix=False):\n pf = pf.split(\"_\")[0] + \".ms{}\".format(K)\n infiles = pf + \".infiles\"\n fw = open(infiles, \"w\")\n print(\"\\n\".join(p), file=fw)\n fw.close()\n\n cmd = \"kmc -k{} -m64 -t{}\".format(K, opts.cpus)\n cmd += \" -ci{} -cs{}\".format(opts.ci, opts.cs)\n if opts.cx:\n cmd += \" -cx{}\".format(opts.cx)\n if opts.fasta:\n cmd += \" -fm\"\n cmd += \" @{} {} .\".format(infiles, pf)\n outfile = pf + \".kmc_suf\"\n mm.add(p, outfile, cmd)\n\n mm.write()\n\n\ndef meryl(args):\n \"\"\"\n %prog meryl folder\n\n Run meryl on Illumina reads.\n \"\"\"\n p = OptionParser(meryl.__doc__)\n p.add_option(\"-k\", default=19, type=\"int\", help=\"Kmer size\")\n p.set_cpus()\n opts, args = p.parse_args(args)\n\n if len(args) != 1:\n sys.exit(not p.print_help())\n\n (folder,) = args\n K = opts.k\n cpus = opts.cpus\n mm = MakeManager()\n for p, pf in iter_project(folder):\n cmds = []\n mss = []\n for i, ip in enumerate(p):\n ms = \"{}{}.ms{}\".format(pf, i + 1, K)\n mss.append(ms)\n cmd = \"meryl -B -C -m {} -threads {}\".format(K, cpus)\n cmd += \" -s {} -o {}\".format(ip, ms)\n cmds.append(cmd)\n ams, bms = mss\n pms = \"{}.ms{}\".format(pf, K)\n cmd = \"meryl -M add -s {} -s {} -o {}\".format(ams, bms, pms)\n cmds.append(cmd)\n cmd = \"rm -f {}.mcdat {}.mcidx {}.mcdat {}.mcidx\".format(ams, ams, bms, bms)\n cmds.append(cmd)\n mm.add(p, pms + \".mcdat\", cmds)\n\n mm.write()\n\n\ndef model(args):\n \"\"\"\n %prog model erate\n\n Model kmer distribution given error rate. See derivation in FIONA paper:\n <http://bioinformatics.oxfordjournals.org/content/30/17/i356.full>\n \"\"\"\n from scipy.stats import binom, poisson\n\n p = OptionParser(model.__doc__)\n p.add_option(\"-k\", default=23, type=\"int\", help=\"Kmer size\")\n p.add_option(\"--cov\", default=50, type=\"int\", help=\"Expected coverage\")\n opts, args = p.parse_args(args)\n\n if len(args) != 1:\n sys.exit(not p.print_help())\n\n (erate,) = args\n erate = float(erate)\n cov = opts.cov\n k = opts.k\n\n xy = []\n # Range include c although it is unclear what it means to have c=0\n for c in range(0, cov * 2 + 1):\n Prob_Yk = 0\n for i in range(k + 1):\n # Probability of having exactly i errors\n pi_i = binom.pmf(i, k, erate)\n # Expected coverage of kmer with exactly i errors\n mu_i = cov * (erate / 3) ** i * (1 - erate) ** (k - i)\n # Probability of seeing coverage of c\n Prob_Yk_i = poisson.pmf(c, mu_i)\n # Sum i over 0, 1, ... up to k errors\n Prob_Yk += pi_i * Prob_Yk_i\n xy.append((c, Prob_Yk))\n\n x, y = zip(*xy)\n asciiplot(x, y, title=\"Model\")\n\n\ndef logodds(args):\n \"\"\"\n %prog logodds cnt1 cnt2\n\n Compute log likelihood between two db.\n \"\"\"\n from math import log\n from jcvi.formats.base import DictFile\n\n p = OptionParser(logodds.__doc__)\n opts, args = p.parse_args(args)\n\n if len(args) != 2:\n sys.exit(not p.print_help())\n\n cnt1, cnt2 = args\n d = DictFile(cnt2)\n fp = open(cnt1)\n for row in fp:\n scf, c1 = row.split()\n c2 = d[scf]\n c1, c2 = float(c1), float(c2)\n c1 += 1\n c2 += 1\n score = int(100 * (log(c1) - log(c2)))\n print(\"{0}\\t{1}\".format(scf, score))\n\n\ndef get_K(jfdb):\n \"\"\"\n Infer K from jellyfish db.\n \"\"\"\n j = jfdb.rsplit(\"_\", 1)[0].rsplit(\"-\", 1)[-1]\n assert j[0] == \"K\"\n return int(j[1:])\n\n\ndef count(args):\n \"\"\"\n %prog count fastafile jf.db\n\n Run dump - jellyfish - bin - bincount in serial.\n \"\"\"\n from bitarray import bitarray\n\n p = OptionParser(count.__doc__)\n opts, args = p.parse_args(args)\n\n if len(args) != 2:\n sys.exit(not p.print_help())\n\n fastafile, jfdb = args\n K = get_K(jfdb)\n cmd = \"jellyfish query {0} -C | cut -d' ' -f 2\".format(jfdb)\n t = must_open(\"tmp\", \"w\")\n proc = Popen(cmd, stdin=PIPE, stdout=t)\n t.flush()\n\n f = Fasta(fastafile, lazy=True)\n for name, rec in f.iteritems_ordered():\n kmers = list(make_kmers(rec.seq, K))\n print(\"\\n\".join(kmers), file=proc.stdin)\n proc.stdin.close()\n logging.debug(cmd)\n proc.wait()\n\n a = bitarray()\n binfile = \".\".join((fastafile, jfdb, \"bin\"))\n fw = open(binfile, \"w\")\n t.seek(0)\n for row in t:\n c = row.strip()\n a.append(int(c))\n a.tofile(fw)\n logging.debug(\"Serialize {0} bits to `{1}`.\".format(len(a), binfile))\n fw.close()\n sh(\"rm {0}\".format(t.name))\n\n logging.debug(\n \"Shared K-mers (K={0}) between `{1}` and `{2}` written to `{3}`.\".format(\n K, fastafile, jfdb, binfile\n )\n )\n cntfile = \".\".join((fastafile, jfdb, \"cnt\"))\n bincount([fastafile, binfile, \"-o\", cntfile, \"-K {0}\".format(K)])\n logging.debug(\"Shared K-mer counts written to `{0}`.\".format(cntfile))\n\n\ndef bincount(args):\n \"\"\"\n %prog bincount fastafile binfile\n\n Count K-mers in the bin.\n \"\"\"\n from bitarray import bitarray\n from jcvi.formats.sizes import Sizes\n\n p = OptionParser(bincount.__doc__)\n p.add_option(\"-K\", default=23, type=\"int\", help=\"K-mer size\")\n p.set_outfile()\n opts, args = p.parse_args(args)\n\n if len(args) != 2:\n sys.exit(not p.print_help())\n\n fastafile, binfile = args\n K = opts.K\n\n fp = open(binfile)\n a = bitarray()\n a.fromfile(fp)\n f = Sizes(fastafile)\n tsize = 0\n fw = must_open(opts.outfile, \"w\")\n for name, seqlen in f.iter_sizes():\n ksize = seqlen - K + 1\n b = a[tsize : tsize + ksize]\n bcount = b.count()\n print(\"\\t\".join(str(x) for x in (name, bcount)), file=fw)\n tsize += ksize\n\n\ndef bin(args):\n \"\"\"\n %prog bin filename filename.bin\n\n Serialize counts to bitarrays.\n \"\"\"\n from bitarray import bitarray\n\n p = OptionParser(bin.__doc__)\n opts, args = p.parse_args(args)\n\n if len(args) != 2:\n sys.exit(not p.print_help())\n\n inp, outp = args\n fp = must_open(inp)\n fw = must_open(outp, \"w\")\n a = bitarray()\n for row in fp:\n c = row.split()[-1]\n a.append(int(c))\n a.tofile(fw)\n fw.close()\n\n\ndef make_kmers(seq, K):\n seq = str(seq).upper().replace(\"N\", \"A\")\n seqlen = len(seq)\n for i in range(seqlen - K + 1):\n yield seq[i : i + K]\n\n\ndef dump(args):\n \"\"\"\n %prog dump fastafile\n\n Convert FASTA sequences to list of K-mers.\n \"\"\"\n p = OptionParser(dump.__doc__)\n p.add_option(\"-K\", default=23, type=\"int\", help=\"K-mer size\")\n p.set_outfile()\n opts, args = p.parse_args(args)\n\n if len(args) != 1:\n sys.exit(not p.print_help())\n\n (fastafile,) = args\n K = opts.K\n fw = must_open(opts.outfile, \"w\")\n f = Fasta(fastafile, lazy=True)\n for name, rec in f.iteritems_ordered():\n kmers = list(make_kmers(rec.seq, K))\n print(\"\\n\".join(kmers), file=fw)\n fw.close()\n\n\ndef jellyfish(args):\n \"\"\"\n %prog jellyfish [*.fastq|*.fasta]\n\n Run jellyfish to dump histogram to be used in kmer.histogram().\n \"\"\"\n from jcvi.apps.base import getfilesize\n from jcvi.utils.cbook import human_size\n\n p = OptionParser(jellyfish.__doc__)\n p.add_option(\"-K\", default=23, type=\"int\", help=\"K-mer size\")\n p.add_option(\n \"--coverage\", default=40, type=\"int\", help=\"Expected sequence coverage\",\n )\n p.add_option(\"--prefix\", default=\"jf\", help=\"Database prefix\")\n p.add_option(\n \"--nohist\", default=False, action=\"store_true\", help=\"Do not print histogram\",\n )\n p.set_home(\"jellyfish\")\n p.set_cpus()\n opts, args = p.parse_args(args)\n\n if len(args) < 1:\n sys.exit(not p.print_help())\n\n fastqfiles = args\n K = opts.K\n coverage = opts.coverage\n\n totalfilesize = sum(getfilesize(x) for x in fastqfiles)\n fq = fastqfiles[0]\n pf = opts.prefix\n gzip = fq.endswith(\".gz\")\n\n hashsize = totalfilesize / coverage\n logging.debug(\n \"Total file size: {0}, hashsize (-s): {1}\".format(\n human_size(totalfilesize, a_kilobyte_is_1024_bytes=True), hashsize\n )\n )\n\n jfpf = \"{0}-K{1}\".format(pf, K)\n jfdb = jfpf\n fastqfiles = \" \".join(fastqfiles)\n\n jfcmd = op.join(opts.jellyfish_home, \"jellyfish\")\n cmd = jfcmd\n cmd += \" count -t {0} -C -o {1}\".format(opts.cpus, jfpf)\n cmd += \" -s {0} -m {1}\".format(hashsize, K)\n if gzip:\n cmd = \"gzip -dc {0} | \".format(fastqfiles) + cmd + \" /dev/fd/0\"\n else:\n cmd += \" \" + fastqfiles\n\n if need_update(fastqfiles, jfdb):\n sh(cmd)\n\n if opts.nohist:\n return\n\n jfhisto = jfpf + \".histogram\"\n cmd = jfcmd + \" histo -t 64 {0} -o {1}\".format(jfdb, jfhisto)\n\n if need_update(jfdb, jfhisto):\n sh(cmd)\n\n\ndef merylhistogram(merylfile):\n \"\"\"\n Run meryl to dump histogram to be used in kmer.histogram(). The merylfile\n are the files ending in .mcidx or .mcdat.\n \"\"\"\n pf, sf = op.splitext(merylfile)\n outfile = pf + \".histogram\"\n if need_update(merylfile, outfile):\n cmd = \"meryl -Dh -s {0}\".format(pf)\n sh(cmd, outfile=outfile)\n\n return outfile\n\n\ndef multihistogram(args):\n \"\"\"\n %prog multihistogram *.histogram species\n\n Plot the histogram based on a set of K-mer hisotograms. The method is based\n on Star et al.'s method (Atlantic Cod genome paper).\n \"\"\"\n p = OptionParser(multihistogram.__doc__)\n p.add_option(\"--kmin\", default=15, type=\"int\", help=\"Minimum K-mer size, inclusive\")\n p.add_option(\"--kmax\", default=30, type=\"int\", help=\"Maximum K-mer size, inclusive\")\n p.add_option(\"--vmin\", default=2, type=\"int\", help=\"Minimum value, inclusive\")\n p.add_option(\"--vmax\", default=100, type=\"int\", help=\"Maximum value, inclusive\")\n opts, args, iopts = p.set_image_options(args, figsize=\"10x5\", dpi=300)\n\n if len(args) < 1:\n sys.exit(not p.print_help())\n\n histfiles = args[:-1]\n species = args[-1]\n fig = plt.figure(1, (iopts.w, iopts.h))\n root = fig.add_axes([0, 0, 1, 1])\n A = fig.add_axes([0.08, 0.12, 0.38, 0.76])\n B = fig.add_axes([0.58, 0.12, 0.38, 0.76])\n\n lines = []\n legends = []\n genomesizes = []\n for histfile in histfiles:\n ks = KmerSpectrum(histfile)\n x, y = ks.get_xy(opts.vmin, opts.vmax)\n K = get_number(op.basename(histfile).split(\".\")[0].split(\"-\")[-1])\n if not opts.kmin <= K <= opts.kmax:\n continue\n\n (line,) = A.plot(x, y, \"-\", lw=1)\n lines.append(line)\n legends.append(\"K = {0}\".format(K))\n ks.analyze(K=K)\n genomesizes.append((K, ks.genomesize / 1e6))\n\n leg = A.legend(lines, legends, shadow=True, fancybox=True)\n leg.get_frame().set_alpha(0.5)\n\n title = \"{0} genome K-mer histogram\".format(species)\n A.set_title(markup(title))\n xlabel, ylabel = \"Coverage (X)\", \"Counts\"\n A.set_xlabel(xlabel)\n A.set_ylabel(ylabel)\n set_human_axis(A)\n\n title = \"{0} genome size estimate\".format(species)\n B.set_title(markup(title))\n x, y = zip(*genomesizes)\n B.plot(x, y, \"ko\", mfc=\"w\")\n t = np.linspace(opts.kmin - 0.5, opts.kmax + 0.5, 100)\n p = np.poly1d(np.polyfit(x, y, 2))\n B.plot(t, p(t), \"r:\")\n\n xlabel, ylabel = \"K-mer size\", \"Estimated genome size (Mb)\"\n B.set_xlabel(xlabel)\n B.set_ylabel(ylabel)\n set_ticklabels_arial(B)\n\n labels = ((0.04, 0.96, \"A\"), (0.54, 0.96, \"B\"))\n panel_labels(root, labels)\n\n normalize_axes(root)\n imagename = species + \".multiK.pdf\"\n savefig(imagename, dpi=iopts.dpi, iopts=iopts)\n\n\ndef histogram(args):\n \"\"\"\n %prog histogram meryl.histogram species K\n\n Plot the histogram based on meryl K-mer distribution, species and N are\n only used to annotate the graphic.\n \"\"\"\n p = OptionParser(histogram.__doc__)\n p.add_option(\n \"--vmin\", dest=\"vmin\", default=1, type=\"int\", help=\"minimum value, inclusive\",\n )\n p.add_option(\n \"--vmax\", dest=\"vmax\", default=100, type=\"int\", help=\"maximum value, inclusive\",\n )\n p.add_option(\n \"--pdf\",\n default=False,\n action=\"store_true\",\n help=\"Print PDF instead of ASCII plot\",\n )\n p.add_option(\n \"--coverage\", default=0, type=\"int\", help=\"Kmer coverage [default: auto]\"\n )\n p.add_option(\n \"--nopeaks\",\n default=False,\n action=\"store_true\",\n help=\"Do not annotate K-mer peaks\",\n )\n opts, args = p.parse_args(args)\n\n if len(args) != 3:\n sys.exit(not p.print_help())\n\n histfile, species, N = args\n ascii = not opts.pdf\n peaks = not opts.nopeaks\n N = int(N)\n\n if histfile.rsplit(\".\", 1)[-1] in (\"mcdat\", \"mcidx\"):\n logging.debug(\"CA kmer index found\")\n histfile = merylhistogram(histfile)\n\n ks = KmerSpectrum(histfile)\n ks.analyze(K=N)\n\n Total_Kmers = int(ks.totalKmers)\n coverage = opts.coverage\n Kmer_coverage = ks.max2 if not coverage else coverage\n Genome_size = int(round(Total_Kmers * 1.0 / Kmer_coverage))\n\n Total_Kmers_msg = \"Total {0}-mers: {1}\".format(N, thousands(Total_Kmers))\n Kmer_coverage_msg = \"{0}-mer coverage: {1}\".format(N, Kmer_coverage)\n Genome_size_msg = \"Estimated genome size: {0:.1f}Mb\".format(Genome_size / 1e6)\n Repetitive_msg = ks.repetitive\n SNPrate_msg = ks.snprate\n\n for msg in (Total_Kmers_msg, Kmer_coverage_msg, Genome_size_msg):\n print(msg, file=sys.stderr)\n\n x, y = ks.get_xy(opts.vmin, opts.vmax)\n title = \"{0} {1}-mer histogram\".format(species, N)\n\n if ascii:\n asciiplot(x, y, title=title)\n return Genome_size\n\n plt.figure(1, (6, 6))\n plt.plot(x, y, \"g-\", lw=2, alpha=0.5)\n ax = plt.gca()\n\n if peaks:\n t = (ks.min1, ks.max1, ks.min2, ks.max2, ks.min3)\n tcounts = [(x, y) for x, y in ks.counts if x in t]\n if tcounts:\n x, y = zip(*tcounts)\n tcounts = dict(tcounts)\n plt.plot(x, y, \"ko\", lw=2, mec=\"k\", mfc=\"w\")\n ax.text(ks.max1, tcounts[ks.max1], \"SNP peak\", va=\"top\")\n ax.text(ks.max2, tcounts[ks.max2], \"Main peak\")\n\n messages = [\n Total_Kmers_msg,\n Kmer_coverage_msg,\n Genome_size_msg,\n Repetitive_msg,\n SNPrate_msg,\n ]\n write_messages(ax, messages)\n\n ymin, ymax = ax.get_ylim()\n ymax = ymax * 7 / 6\n\n ax.set_title(markup(title))\n ax.set_ylim((ymin, ymax))\n xlabel, ylabel = \"Coverage (X)\", \"Counts\"\n ax.set_xlabel(xlabel)\n ax.set_ylabel(ylabel)\n set_human_axis(ax)\n\n imagename = histfile.split(\".\")[0] + \".pdf\"\n savefig(imagename, dpi=100)\n\n return Genome_size\n\n\nif __name__ == \"__main__\":\n main()\n" ]
[ [ "scipy.stats.binom.pmf", "numpy.linspace", "scipy.stats.poisson.pmf", "numpy.polyfit" ] ]
ricardoprins/pennylane
[ "e0928abb1e413356848499cf0799037fc2082518" ]
[ "pennylane/devices/tests/test_gates.py" ]
[ "# Copyright 2018-2021 Xanadu Quantum Technologies Inc.\n\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n\n# http://www.apache.org/licenses/LICENSE-2.0\n\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"\nTests that application of gates and state preparations\nworks correctly an a device.\n\"\"\"\n# pylint: disable=no-self-use\n# pylint: disable=too-many-arguments\n# pylint: disable=pointless-statement\nfrom cmath import exp\nfrom math import cos, sin, sqrt\n\nimport pytest\nimport numpy as np\nimport pennylane as qml\n\nfrom scipy.linalg import block_diag\nfrom flaky import flaky\n\npytestmark = pytest.mark.skip_unsupported\n\nnp.random.seed(42)\n\n# ==========================================================\n# Some useful global variables\n\n# gates for which device support is tested\nops = {\n \"BasisState\": qml.BasisState(np.array([0]), wires=[0]),\n \"CNOT\": qml.CNOT(wires=[0, 1]),\n \"CRX\": qml.CRX(0, wires=[0, 1]),\n \"CRY\": qml.CRY(0, wires=[0, 1]),\n \"CRZ\": qml.CRZ(0, wires=[0, 1]),\n \"CRot\": qml.CRot(0, 0, 0, wires=[0, 1]),\n \"CSWAP\": qml.CSWAP(wires=[0, 1, 2]),\n \"CZ\": qml.CZ(wires=[0, 1]),\n \"CY\": qml.CY(wires=[0, 1]),\n \"DiagonalQubitUnitary\": qml.DiagonalQubitUnitary(np.array([1, 1]), wires=[0]),\n \"Hadamard\": qml.Hadamard(wires=[0]),\n \"MultiRZ\": qml.MultiRZ(0, wires=[0]),\n \"PauliX\": qml.PauliX(wires=[0]),\n \"PauliY\": qml.PauliY(wires=[0]),\n \"PauliZ\": qml.PauliZ(wires=[0]),\n \"PhaseShift\": qml.PhaseShift(0, wires=[0]),\n \"ControlledPhaseShift\": qml.ControlledPhaseShift(0, wires=[0, 1]),\n \"QubitStateVector\": qml.QubitStateVector(np.array([1.0, 0.0]), wires=[0]),\n \"QubitUnitary\": qml.QubitUnitary(np.eye(2), wires=[0]),\n \"ControlledQubitUnitary\": qml.ControlledQubitUnitary(np.eye(2), control_wires=[1], wires=[0]),\n \"MultiControlledX\": qml.MultiControlledX(control_wires=[1, 2], wires=[0]),\n \"RX\": qml.RX(0, wires=[0]),\n \"RY\": qml.RY(0, wires=[0]),\n \"RZ\": qml.RZ(0, wires=[0]),\n \"Rot\": qml.Rot(0, 0, 0, wires=[0]),\n \"S\": qml.S(wires=[0]),\n \"SWAP\": qml.SWAP(wires=[0, 1]),\n \"ISWAP\": qml.ISWAP(wires=[0, 1]),\n \"T\": qml.T(wires=[0]),\n \"SX\": qml.SX(wires=[0]),\n \"Toffoli\": qml.Toffoli(wires=[0, 1, 2]),\n \"QFT\": qml.QFT(wires=[0, 1, 2]),\n \"IsingXX\": qml.IsingXX(0, wires=[0, 1]),\n \"IsingZZ\": qml.IsingZZ(0, wires=[0, 1]),\n \"SingleExcitation\": qml.SingleExcitation(0, wires=[0, 1]),\n \"SingleExcitationPlus\": qml.SingleExcitationPlus(0, wires=[0, 1]),\n \"SingleExcitationMinus\": qml.SingleExcitationMinus(0, wires=[0, 1]),\n \"DoubleExcitation\": qml.DoubleExcitation(0, wires=[0, 1, 2, 3]),\n \"DoubleExcitationPlus\": qml.DoubleExcitationPlus(0, wires=[0, 1, 2, 3]),\n \"DoubleExcitationMinus\": qml.DoubleExcitationMinus(0, wires=[0, 1, 2, 3]),\n \"QubitCarry\": qml.QubitCarry(wires=[0, 1, 2, 3]),\n \"QubitSum:\": qml.QubitSum(wires=[0, 1, 2]),\n}\n\nall_ops = ops.keys()\n\n# non-parametrized qubit gates\nI = np.identity(2)\nX = np.array([[0, 1], [1, 0]])\nY = np.array([[0, -1j], [1j, 0]])\nZ = np.array([[1, 0], [0, -1]])\nH = np.array([[1, 1], [1, -1]]) / sqrt(2)\nS = np.diag([1, 1j])\nT = np.diag([1, np.exp(1j * np.pi / 4)])\nSX = 0.5 * np.array([[1 + 1j, 1 - 1j], [1 - 1j, 1 + 1j]])\nSWAP = np.array([[1, 0, 0, 0], [0, 0, 1, 0], [0, 1, 0, 0], [0, 0, 0, 1]])\nISWAP = np.array([[1, 0, 0, 0], [0, 0, 1j, 0], [0, 1j, 0, 0], [0, 0, 0, 1]])\nCNOT = np.array([[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 0, 1], [0, 0, 1, 0]])\nCZ = np.diag([1, 1, 1, -1])\nCY = np.array([[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 0, -1j], [0, 0, 1j, 0]])\ntoffoli = np.diag([1 for i in range(8)])\ntoffoli[6:8, 6:8] = np.array([[0, 1], [1, 0]])\nCSWAP = block_diag(I, I, SWAP)\n\n# parametrized qubit gates\nphase_shift = lambda phi: np.array([[1, 0], [0, np.exp(1j * phi)]])\nrx = lambda theta: cos(theta / 2) * I + 1j * sin(-theta / 2) * X\nry = lambda theta: cos(theta / 2) * I + 1j * sin(-theta / 2) * Y\nrz = lambda theta: cos(theta / 2) * I + 1j * sin(-theta / 2) * Z\nrot = lambda a, b, c: rz(c) @ (ry(b) @ rz(a))\ncrz = lambda theta: np.array(\n [\n [1, 0, 0, 0],\n [0, 1, 0, 0],\n [0, 0, np.exp(-1j * theta / 2), 0],\n [0, 0, 0, np.exp(1j * theta / 2)],\n ]\n)\ncry = lambda theta: np.array(\n [\n [1, 0, 0, 0],\n [0, 1, 0, 0],\n [0, 0, cos(theta / 2), -sin(theta / 2)],\n [0, 0, sin(theta / 2), cos(theta / 2)],\n ]\n)\ncrx = lambda theta: np.array(\n [\n [1, 0, 0, 0],\n [0, 1, 0, 0],\n [0, 0, cos(theta / 2), 1j * sin(-theta / 2)],\n [0, 0, 1j * sin(-theta / 2), cos(theta / 2)],\n ]\n)\ncrot = lambda phi, theta, omega: np.array(\n [\n [1, 0, 0, 0],\n [0, 1, 0, 0],\n [\n 0,\n 0,\n exp(-0.5j * (phi + omega)) * cos(theta / 2),\n -exp(0.5j * (phi - omega)) * sin(theta / 2),\n ],\n [\n 0,\n 0,\n exp(-0.5j * (phi - omega)) * sin(theta / 2),\n exp(0.5j * (phi + omega)) * cos(theta / 2),\n ],\n ]\n)\nIsingXX = lambda phi: np.array(\n [\n [cos(phi / 2), 0, 0, -1j * sin(phi / 2)],\n [0, cos(phi / 2), -1j * sin(phi / 2), 0],\n [0, -1j * sin(phi / 2), cos(phi / 2), 0],\n [-1j * sin(phi / 2), 0, 0, cos(phi / 2)],\n ]\n)\n\nIsingZZ = lambda phi: np.array(\n [\n [exp(-1.0j * phi / 2), 0, 0, 0],\n [0, exp(1.0j * phi / 2), 0, 0],\n [0, 0, exp(1.0j * phi / 2), 0],\n [0, 0, 0, exp(-1.0j * phi / 2)],\n ]\n)\n\n# list of all non-parametrized single-qubit gates,\n# along with the PennyLane operation name\nsingle_qubit = [\n (qml.PauliX, X),\n (qml.PauliY, Y),\n (qml.PauliZ, Z),\n (qml.Hadamard, H),\n (qml.S, S),\n (qml.T, T),\n (qml.SX, SX),\n]\n\n# list of all parametrized single-qubit gates\n# taking a single parameter\nsingle_qubit_param = [\n (qml.PhaseShift, phase_shift),\n (qml.RX, rx),\n (qml.RY, ry),\n (qml.RZ, rz),\n]\n# list of all non-parametrized two-qubit gates\ntwo_qubit = [(qml.CNOT, CNOT), (qml.SWAP, SWAP), (qml.ISWAP, ISWAP), (qml.CZ, CZ), (qml.CY, CY)]\n# list of all parametrized two-qubit gates\ntwo_qubit_param = [\n (qml.CRX, crx),\n (qml.CRY, cry),\n (qml.CRZ, crz),\n (qml.IsingXX, IsingXX),\n (qml.IsingZZ, IsingZZ),\n]\ntwo_qubit_multi_param = [(qml.CRot, crot)]\n# list of all three-qubit gates\nthree_qubit = [(qml.Toffoli, toffoli), (qml.CSWAP, CSWAP)]\n\n# single qubit unitary matrix\ntheta = 0.8364\nphi = -0.1234\nU = np.array(\n [\n [\n np.cos(theta / 2) * np.exp(np.complex(0, -phi / 2)),\n -np.sin(theta / 2) * np.exp(np.complex(0, phi / 2)),\n ],\n [\n np.sin(theta / 2) * np.exp(np.complex(0, -phi / 2)),\n np.cos(theta / 2) * np.exp(np.complex(0, phi / 2)),\n ],\n ]\n)\n\n# two qubit unitary matrix\nU2 = np.array([[0, 1, 1, 1], [1, 0, 1, -1], [1, -1, 0, 1], [1, 1, -1, 0]]) / sqrt(3)\n\n# single qubit Hermitian observable\nA = np.array([[1.02789352, 1.61296440 - 0.3498192j], [1.61296440 + 0.3498192j, 1.23920938 + 0j]])\n\n\n# ===============================================================\n\n\nclass TestSupportedGates:\n \"\"\"Test that the device can implement all gates that it claims to support.\"\"\"\n\n @pytest.mark.parametrize(\"operation\", all_ops)\n def test_supported_gates_can_be_implemented(self, device_kwargs, operation):\n \"\"\"Test that the device can implement all its supported gates.\"\"\"\n device_kwargs[\"wires\"] = 4 # maximum size of current gates\n dev = qml.device(**device_kwargs)\n\n assert hasattr(dev, \"operations\")\n if operation in dev.operations:\n\n @qml.qnode(dev)\n def circuit():\n ops[operation]\n return qml.expval(qml.Identity(wires=0))\n\n assert isinstance(circuit(), (float, np.ndarray))\n\n @pytest.mark.parametrize(\"operation\", all_ops)\n def test_inverse_gates_can_be_implemented(self, device_kwargs, operation):\n \"\"\"Test that the device can implement the inverse of all its supported gates.\n This test is skipped for devices that do not support inverse operations.\"\"\"\n device_kwargs[\"wires\"] = 4\n dev = qml.device(**device_kwargs)\n supports_inv = (\n \"supports_inverse_operations\" in dev.capabilities()\n and dev.capabilities()[\"supports_inverse_operations\"]\n )\n if not supports_inv:\n pytest.skip(\"Device does not support inverse operations.\")\n\n assert hasattr(dev, \"operations\")\n if operation in dev.operations:\n\n @qml.qnode(dev)\n def circuit():\n ops[operation].queue().inv()\n return qml.expval(qml.Identity(wires=0))\n\n assert isinstance(circuit(), (float, np.ndarray))\n\n\n@flaky(max_runs=10)\nclass TestGatesQubit:\n \"\"\"Test qubit-based devices' probability vector after application of gates.\"\"\"\n\n @pytest.mark.parametrize(\n \"basis_state\",\n [\n np.array([0, 0, 1, 0]),\n np.array([0, 0, 1, 0]),\n np.array([1, 0, 1, 0]),\n np.array([1, 1, 1, 1]),\n ],\n )\n def test_basis_state(self, device, basis_state, tol, skip_if):\n \"\"\"Test basis state initialization.\"\"\"\n n_wires = 4\n dev = device(n_wires)\n skip_if(dev, {\"returns_probs\": False})\n\n @qml.qnode(dev)\n def circuit():\n qml.BasisState(basis_state, wires=range(n_wires))\n return qml.probs(wires=range(n_wires))\n\n res = circuit()\n\n expected = np.zeros([2 ** n_wires])\n expected[np.ravel_multi_index(basis_state, [2] * n_wires)] = 1\n assert np.allclose(res, expected, atol=tol(dev.shots))\n\n def test_qubit_state_vector(self, device, init_state, tol, skip_if):\n \"\"\"Test QubitStateVector initialisation.\"\"\"\n n_wires = 1\n dev = device(n_wires)\n skip_if(dev, {\"returns_probs\": False})\n\n rnd_state = init_state(n_wires)\n\n @qml.qnode(dev)\n def circuit():\n qml.QubitStateVector(rnd_state, wires=range(n_wires))\n return qml.probs(range(n_wires))\n\n res = circuit()\n expected = np.abs(rnd_state) ** 2\n\n assert np.allclose(res, expected, atol=tol(dev.shots))\n\n @pytest.mark.parametrize(\"op,mat\", single_qubit)\n def test_single_qubit_no_parameters(self, device, init_state, op, mat, tol, skip_if):\n \"\"\"Test PauliX application.\"\"\"\n n_wires = 1\n dev = device(n_wires)\n skip_if(dev, {\"returns_probs\": False})\n\n rnd_state = init_state(n_wires)\n\n @qml.qnode(dev)\n def circuit():\n qml.QubitStateVector(rnd_state, wires=range(n_wires))\n op(wires=range(n_wires))\n return qml.probs(wires=range(n_wires))\n\n res = circuit()\n\n expected = np.abs(mat @ rnd_state) ** 2\n assert np.allclose(res, expected, atol=tol(dev.shots))\n\n @pytest.mark.parametrize(\"gamma\", [0.5432, -0.232])\n @pytest.mark.parametrize(\"op,func\", single_qubit_param)\n def test_single_qubit_parameters(self, device, init_state, op, func, gamma, tol, skip_if):\n \"\"\"Test single qubit gates taking a single scalar argument.\"\"\"\n n_wires = 1\n dev = device(n_wires)\n skip_if(dev, {\"returns_probs\": False})\n\n rnd_state = init_state(n_wires)\n\n @qml.qnode(dev)\n def circuit():\n qml.QubitStateVector(rnd_state, wires=range(n_wires))\n op(gamma, wires=range(n_wires))\n return qml.probs(wires=range(n_wires))\n\n res = circuit()\n\n expected = np.abs(func(gamma) @ rnd_state) ** 2\n assert np.allclose(res, expected, atol=tol(dev.shots))\n\n def test_rotation(self, device, init_state, tol, skip_if):\n \"\"\"Test three axis rotation gate.\"\"\"\n n_wires = 1\n dev = device(n_wires)\n skip_if(dev, {\"returns_probs\": False})\n\n rnd_state = init_state(n_wires)\n a = 0.542\n b = 1.3432\n c = -0.654\n\n @qml.qnode(dev)\n def circuit():\n qml.QubitStateVector(rnd_state, wires=range(n_wires))\n qml.Rot(a, b, c, wires=range(n_wires))\n return qml.probs(wires=range(n_wires))\n\n res = circuit()\n\n expected = np.abs(rot(a, b, c) @ rnd_state) ** 2\n assert np.allclose(res, expected, atol=tol(dev.shots))\n\n @pytest.mark.parametrize(\"op,mat\", two_qubit)\n def test_two_qubit_no_parameters(self, device, init_state, op, mat, tol, skip_if):\n \"\"\"Test two qubit gates.\"\"\"\n n_wires = 2\n dev = device(n_wires)\n skip_if(dev, {\"returns_probs\": False})\n\n rnd_state = init_state(n_wires)\n\n @qml.qnode(dev)\n def circuit():\n qml.QubitStateVector(rnd_state, wires=range(n_wires))\n op(wires=range(n_wires))\n return qml.probs(wires=range(n_wires))\n\n res = circuit()\n\n expected = np.abs(mat @ rnd_state) ** 2\n assert np.allclose(res, expected, atol=tol(dev.shots))\n\n @pytest.mark.parametrize(\"param\", [0.5432, -0.232])\n @pytest.mark.parametrize(\"op,func\", two_qubit_param)\n def test_two_qubit_parameters(self, device, init_state, op, func, param, tol, skip_if):\n \"\"\"Test parametrized two qubit gates taking a single scalar argument.\"\"\"\n n_wires = 2\n dev = device(n_wires)\n skip_if(dev, {\"returns_probs\": False})\n\n rnd_state = init_state(n_wires)\n\n @qml.qnode(dev)\n def circuit():\n qml.QubitStateVector(rnd_state, wires=range(n_wires))\n op(param, wires=range(n_wires))\n return qml.probs(wires=range(n_wires))\n\n res = circuit()\n\n expected = np.abs(func(param) @ rnd_state) ** 2\n assert np.allclose(res, expected, atol=tol(dev.shots))\n\n @pytest.mark.parametrize(\"mat\", [U, U2])\n def test_qubit_unitary(self, device, init_state, mat, tol, skip_if):\n \"\"\"Test QubitUnitary gate.\"\"\"\n n_wires = int(np.log2(len(mat)))\n dev = device(n_wires)\n\n if \"QubitUnitary\" not in dev.operations:\n pytest.skip(\"Skipped because device does not support QubitUnitary.\")\n\n skip_if(dev, {\"returns_probs\": False})\n\n rnd_state = init_state(n_wires)\n\n @qml.qnode(dev)\n def circuit():\n qml.QubitStateVector(rnd_state, wires=range(n_wires))\n qml.QubitUnitary(mat, wires=list(range(n_wires)))\n return qml.probs(wires=range(n_wires))\n\n res = circuit()\n\n expected = np.abs(mat @ rnd_state) ** 2\n assert np.allclose(res, expected, atol=tol(dev.shots))\n\n @pytest.mark.parametrize(\"op, mat\", three_qubit)\n def test_three_qubit_no_parameters(self, device, init_state, op, mat, tol, skip_if):\n \"\"\"Test three qubit gates without parameters.\"\"\"\n n_wires = 3\n dev = device(n_wires)\n\n skip_if(dev, {\"returns_probs\": False})\n\n rnd_state = init_state(n_wires)\n\n @qml.qnode(dev)\n def circuit():\n qml.QubitStateVector(rnd_state, wires=range(n_wires))\n op(wires=[0, 1, 2])\n return qml.probs(wires=range(n_wires))\n\n res = circuit()\n\n expected = np.abs(mat @ rnd_state) ** 2\n assert np.allclose(res, expected, atol=tol(dev.shots))\n\n\n@flaky(max_runs=10)\nclass TestInverseGatesQubit:\n \"\"\"Test the device's probability vector after application of inverse of gates.\"\"\"\n\n @pytest.mark.parametrize(\"op,mat\", single_qubit)\n def test_single_qubit_no_parameters(self, device, init_state, op, mat, tol, skip_if):\n \"\"\"Test inverse single qubit gate application.\"\"\"\n n_wires = 1\n dev = device(n_wires)\n skip_if(dev, {\"supports_inverse_operations\": False})\n skip_if(dev, {\"returns_probs\": False})\n\n rnd_state = init_state(1)\n\n @qml.qnode(dev)\n def circuit():\n qml.QubitStateVector(rnd_state, wires=range(n_wires))\n op(wires=range(n_wires)).inv()\n return qml.probs(wires=range(n_wires))\n\n res = circuit()\n\n mat = mat.conj().T\n expected = np.abs(mat @ rnd_state) ** 2\n assert np.allclose(res, expected, atol=tol(dev.shots))\n\n @pytest.mark.parametrize(\"gamma\", [0.5432, -0.232])\n @pytest.mark.parametrize(\"op,func\", single_qubit_param)\n def test_single_qubit_parameters(self, device, init_state, op, func, gamma, tol, skip_if):\n \"\"\"Test inverse single qubit gates taking one scalar parameter.\"\"\"\n n_wires = 1\n dev = device(n_wires)\n skip_if(dev, {\"supports_inverse_operations\": False})\n skip_if(dev, {\"returns_probs\": False})\n\n rnd_state = init_state(n_wires)\n\n @qml.qnode(dev)\n def circuit():\n qml.QubitStateVector(rnd_state, wires=range(n_wires))\n op(gamma, wires=range(n_wires)).inv()\n return qml.probs(wires=range(n_wires))\n\n res = circuit()\n\n mat = func(gamma)\n mat = mat.conj().T\n expected = np.abs(mat @ rnd_state) ** 2\n assert np.allclose(res, expected, atol=tol(dev.shots))\n\n def test_rotation(self, device, init_state, tol, skip_if):\n \"\"\"Test inverse three axis rotation gate.\"\"\"\n n_wires = 1\n dev = device(n_wires)\n skip_if(dev, {\"supports_inverse_operations\": False})\n skip_if(dev, {\"returns_probs\": False})\n\n rnd_state = init_state(1)\n a = 0.542\n b = 1.3432\n c = -0.654\n\n @qml.qnode(dev)\n def circuit():\n qml.QubitStateVector(rnd_state, wires=range(n_wires))\n qml.Rot(a, b, c, wires=range(n_wires)).inv()\n return qml.probs(wires=range(n_wires))\n\n res = circuit()\n\n mat = rot(a, b, c)\n mat = mat.conj().T\n expected = np.abs(mat @ rnd_state) ** 2\n assert np.allclose(res, expected, atol=tol(dev.shots))\n\n @pytest.mark.parametrize(\"op,mat\", two_qubit)\n def test_two_qubit_no_parameters(self, device, init_state, op, mat, tol, skip_if):\n \"\"\"Test inverse two qubit gates.\"\"\"\n n_wires = 2\n dev = device(n_wires)\n skip_if(dev, {\"supports_inverse_operations\": False})\n skip_if(dev, {\"returns_probs\": False})\n\n rnd_state = init_state(n_wires)\n\n @qml.qnode(dev)\n def circuit():\n qml.QubitStateVector(rnd_state, wires=range(n_wires))\n op(wires=range(n_wires)).inv()\n return qml.probs(wires=range(n_wires))\n\n res = circuit()\n\n mat = mat.conj().T\n expected = np.abs(mat @ rnd_state) ** 2\n assert np.allclose(res, expected, atol=tol(dev.shots))\n\n @pytest.mark.parametrize(\"gamma\", [0.5432, -0.232])\n @pytest.mark.parametrize(\"op,func\", two_qubit_param)\n def test_two_qubit_parameters(self, device, init_state, op, func, gamma, tol, skip_if):\n \"\"\"Test inverse of two qubit gates taking one parameter.\"\"\"\n n_wires = 2\n dev = device(n_wires)\n skip_if(dev, {\"supports_inverse_operations\": False})\n skip_if(dev, {\"returns_probs\": False})\n\n rnd_state = init_state(2)\n\n @qml.qnode(dev)\n def circuit():\n qml.QubitStateVector(rnd_state, wires=range(n_wires))\n op(gamma, wires=range(n_wires)).inv()\n return qml.probs(wires=range(n_wires))\n\n res = circuit()\n\n mat = func(gamma)\n mat = mat.conj().T\n expected = np.abs(mat @ rnd_state) ** 2\n assert np.allclose(res, expected, atol=tol(dev.shots))\n\n @pytest.mark.parametrize(\"mat\", [U, U2])\n def test_qubit_unitary(self, device, init_state, mat, tol, skip_if):\n \"\"\"Test inverse QubitUnitary gate.\"\"\"\n n_wires = int(np.log2(len(mat)))\n dev = device(n_wires)\n skip_if(dev, {\"supports_inverse_operations\": False})\n skip_if(dev, {\"returns_probs\": False})\n\n rnd_state = init_state(n_wires)\n\n @qml.qnode(dev)\n def circuit():\n qml.QubitStateVector(rnd_state, wires=range(n_wires))\n qml.QubitUnitary(mat, wires=list(range(n_wires))).inv()\n return qml.probs(wires=range(n_wires))\n\n res = circuit()\n\n mat = mat.conj().T\n expected = np.abs(mat @ rnd_state) ** 2\n assert np.allclose(res, expected, atol=tol(dev.shots))\n\n @pytest.mark.parametrize(\"op, mat\", three_qubit)\n def test_three_qubit_no_parameters(self, device, init_state, op, mat, tol, skip_if):\n \"\"\"Test inverse three qubit gates without parameters.\"\"\"\n n_wires = 3\n dev = device(n_wires)\n skip_if(dev, {\"supports_inverse_operations\": False})\n skip_if(dev, {\"returns_probs\": False})\n\n rnd_state = init_state(3)\n\n @qml.qnode(dev)\n def circuit():\n qml.QubitStateVector(rnd_state, wires=range(n_wires))\n op(wires=range(n_wires)).inv()\n return qml.probs(wires=range(n_wires))\n\n res = circuit()\n\n mat = mat.conj().T\n expected = np.abs(mat @ rnd_state) ** 2\n assert np.allclose(res, expected, atol=tol(dev.shots))\n" ]
[ [ "numpy.array", "numpy.sin", "numpy.zeros", "numpy.random.seed", "scipy.linalg.block_diag", "numpy.exp", "numpy.eye", "numpy.identity", "numpy.complex", "numpy.abs", "numpy.cos", "numpy.ravel_multi_index", "numpy.diag" ] ]
yhisaki/pfrl
[ "d89ddf66201bcfaaae6130bdee704d56ee4b7b76" ]
[ "examples/mujoco/reproduction/td3/train_td3.py" ]
[ "\"\"\"A training script of TD3 on OpenAI Gym Mujoco environments.\n\nThis script follows the settings of http://arxiv.org/abs/1802.09477 as much\nas possible.\n\"\"\"\n\nimport argparse\nimport logging\nimport sys\n\nimport gym\nimport gym.wrappers\nimport numpy as np\nimport torch\nfrom torch import nn\n\nimport pfrl\nfrom pfrl import experiments, explorers, replay_buffers, utils\n\n\ndef main():\n\n parser = argparse.ArgumentParser()\n parser.add_argument(\n \"--outdir\",\n type=str,\n default=\"results\",\n help=(\n \"Directory path to save output files.\"\n \" If it does not exist, it will be created.\"\n ),\n )\n parser.add_argument(\n \"--env\",\n type=str,\n default=\"Hopper-v2\",\n help=\"OpenAI Gym MuJoCo env to perform algorithm on.\",\n )\n parser.add_argument(\"--seed\", type=int, default=0, help=\"Random seed [0, 2 ** 32)\")\n parser.add_argument(\n \"--gpu\", type=int, default=0, help=\"GPU to use, set to -1 if no GPU.\"\n )\n parser.add_argument(\n \"--load\", type=str, default=\"\", help=\"Directory to load agent from.\"\n )\n parser.add_argument(\n \"--steps\",\n type=int,\n default=10**6,\n help=\"Total number of timesteps to train the agent.\",\n )\n parser.add_argument(\n \"--eval-n-runs\",\n type=int,\n default=10,\n help=\"Number of episodes run for each evaluation.\",\n )\n parser.add_argument(\n \"--eval-interval\",\n type=int,\n default=5000,\n help=\"Interval in timesteps between evaluations.\",\n )\n parser.add_argument(\n \"--replay-start-size\",\n type=int,\n default=10000,\n help=\"Minimum replay buffer size before \" + \"performing gradient updates.\",\n )\n parser.add_argument(\"--batch-size\", type=int, default=100, help=\"Minibatch size\")\n parser.add_argument(\n \"--render\", action=\"store_true\", help=\"Render env states in a GUI window.\"\n )\n parser.add_argument(\n \"--demo\", action=\"store_true\", help=\"Just run evaluation, not training.\"\n )\n parser.add_argument(\"--load-pretrained\", action=\"store_true\", default=False)\n parser.add_argument(\n \"--pretrained-type\", type=str, default=\"best\", choices=[\"best\", \"final\"]\n )\n parser.add_argument(\n \"--monitor\", action=\"store_true\", help=\"Wrap env with gym.wrappers.Monitor.\"\n )\n parser.add_argument(\n \"--log-level\", type=int, default=logging.INFO, help=\"Level of the root logger.\"\n )\n args = parser.parse_args()\n\n logging.basicConfig(level=args.log_level)\n\n args.outdir = experiments.prepare_output_dir(args, args.outdir, argv=sys.argv)\n print(\"Output files are saved in {}\".format(args.outdir))\n\n # Set a random seed used in PFRL\n utils.set_random_seed(args.seed)\n\n def make_env(test):\n env = gym.make(args.env)\n # Unwrap TimeLimit wrapper\n assert isinstance(env, gym.wrappers.TimeLimit)\n env = env.env\n # Use different random seeds for train and test envs\n env_seed = 2**32 - 1 - args.seed if test else args.seed\n env.seed(env_seed)\n # Cast observations to float32 because our model uses float32\n env = pfrl.wrappers.CastObservationToFloat32(env)\n if args.monitor:\n env = pfrl.wrappers.Monitor(env, args.outdir)\n if args.render and not test:\n env = pfrl.wrappers.Render(env)\n return env\n\n env = make_env(test=False)\n timestep_limit = env.spec.max_episode_steps\n obs_space = env.observation_space\n action_space = env.action_space\n print(\"Observation space:\", obs_space)\n print(\"Action space:\", action_space)\n\n obs_size = obs_space.low.size\n action_size = action_space.low.size\n\n policy = nn.Sequential(\n nn.Linear(obs_size, 400),\n nn.ReLU(),\n nn.Linear(400, 300),\n nn.ReLU(),\n nn.Linear(300, action_size),\n nn.Tanh(),\n pfrl.policies.DeterministicHead(),\n )\n policy_optimizer = torch.optim.Adam(policy.parameters())\n\n def make_q_func_with_optimizer():\n q_func = nn.Sequential(\n pfrl.nn.ConcatObsAndAction(),\n nn.Linear(obs_size + action_size, 400),\n nn.ReLU(),\n nn.Linear(400, 300),\n nn.ReLU(),\n nn.Linear(300, 1),\n )\n q_func_optimizer = torch.optim.Adam(q_func.parameters())\n return q_func, q_func_optimizer\n\n q_func1, q_func1_optimizer = make_q_func_with_optimizer()\n q_func2, q_func2_optimizer = make_q_func_with_optimizer()\n\n rbuf = replay_buffers.ReplayBuffer(10**6)\n\n explorer = explorers.AdditiveGaussian(\n scale=0.1, low=action_space.low, high=action_space.high\n )\n\n def burnin_action_func():\n \"\"\"Select random actions until model is updated one or more times.\"\"\"\n return np.random.uniform(action_space.low, action_space.high).astype(np.float32)\n\n # Hyperparameters in http://arxiv.org/abs/1802.09477\n agent = pfrl.agents.TD3(\n policy,\n q_func1,\n q_func2,\n policy_optimizer,\n q_func1_optimizer,\n q_func2_optimizer,\n rbuf,\n gamma=0.99,\n soft_update_tau=5e-3,\n explorer=explorer,\n replay_start_size=args.replay_start_size,\n gpu=args.gpu,\n minibatch_size=args.batch_size,\n burnin_action_func=burnin_action_func,\n )\n\n if len(args.load) > 0 or args.load_pretrained:\n # either load or load_pretrained must be false\n assert not len(args.load) > 0 or not args.load_pretrained\n if len(args.load) > 0:\n agent.load(args.load)\n else:\n agent.load(\n utils.download_model(\"TD3\", args.env, model_type=args.pretrained_type)[\n 0\n ]\n )\n\n eval_env = make_env(test=True)\n if args.demo:\n eval_stats = experiments.eval_performance(\n env=eval_env,\n agent=agent,\n n_steps=None,\n n_episodes=args.eval_n_runs,\n max_episode_len=timestep_limit,\n )\n print(\n \"n_runs: {} mean: {} median: {} stdev {}\".format(\n args.eval_n_runs,\n eval_stats[\"mean\"],\n eval_stats[\"median\"],\n eval_stats[\"stdev\"],\n )\n )\n import json\n import os\n\n with open(os.path.join(args.outdir, \"demo_scores.json\"), \"w\") as f:\n json.dump(eval_stats, f)\n else:\n experiments.train_agent_with_evaluation(\n agent=agent,\n env=env,\n steps=args.steps,\n eval_env=eval_env,\n eval_n_steps=None,\n eval_n_episodes=args.eval_n_runs,\n eval_interval=args.eval_interval,\n outdir=args.outdir,\n train_max_episode_len=timestep_limit,\n )\n\n\nif __name__ == \"__main__\":\n main()\n" ]
[ [ "torch.nn.Linear", "torch.nn.Tanh", "numpy.random.uniform", "torch.nn.ReLU" ] ]
cohenimhuji/HARK
[ "bb8549105ab979f853bd413d694f4a9b6572554e" ]
[ "HARK/interpolation.py" ]
[ "'''\nCustom interpolation methods for representing approximations to functions.\nIt also includes wrapper classes to enforce standard methods across classes.\nEach interpolation class must have a distance() method that compares itself to\nanother instance; this is used in HARK.core's solve() method to check for solution\nconvergence. The interpolator classes currently in this module inherit their\ndistance method from HARKobject.\n'''\nfrom __future__ import division, print_function\nfrom __future__ import absolute_import\nfrom builtins import range\nimport numpy as np\nfrom .core import HARKobject\nfrom copy import deepcopy\n\ndef _isscalar(x):\n '''\n Check whether x is if a scalar type, or 0-dim.\n\n Parameters\n ----------\n x : anything\n An input to be checked for scalar-ness.\n\n Returns\n -------\n is_scalar : boolean\n True if the input is a scalar, False otherwise.\n '''\n return np.isscalar(x) or hasattr(x, 'shape') and x.shape == ()\n\n\nclass HARKinterpolator1D(HARKobject):\n '''\n A wrapper class for 1D interpolation methods in HARK.\n '''\n distance_criteria = []\n\n def __call__(self,x):\n '''\n Evaluates the interpolated function at the given input.\n\n Parameters\n ----------\n x : np.array or float\n Real values to be evaluated in the interpolated function.\n\n Returns\n -------\n y : np.array or float\n The interpolated function evaluated at x: y = f(x), with the same\n shape as x.\n '''\n z = np.asarray(x)\n return (self._evaluate(z.flatten())).reshape(z.shape)\n\n def derivative(self,x):\n '''\n Evaluates the derivative of the interpolated function at the given input.\n\n Parameters\n ----------\n x : np.array or float\n Real values to be evaluated in the interpolated function.\n\n Returns\n -------\n dydx : np.array or float\n The interpolated function's first derivative evaluated at x:\n dydx = f'(x), with the same shape as x.\n '''\n z = np.asarray(x)\n return (self._der(z.flatten())).reshape(z.shape)\n\n def eval_with_derivative(self,x):\n '''\n Evaluates the interpolated function and its derivative at the given input.\n\n Parameters\n ----------\n x : np.array or float\n Real values to be evaluated in the interpolated function.\n\n Returns\n -------\n y : np.array or float\n The interpolated function evaluated at x: y = f(x), with the same\n shape as x.\n dydx : np.array or float\n The interpolated function's first derivative evaluated at x:\n dydx = f'(x), with the same shape as x.\n '''\n z = np.asarray(x)\n y, dydx = self._evalAndDer(z.flatten())\n return y.reshape(z.shape), dydx.reshape(z.shape)\n\n def _evaluate(self,x):\n '''\n Interpolated function evaluator, to be defined in subclasses.\n '''\n raise NotImplementedError()\n\n def _der(self,x):\n '''\n Interpolated function derivative evaluator, to be defined in subclasses.\n '''\n raise NotImplementedError()\n\n def _evalAndDer(self,x):\n '''\n Interpolated function and derivative evaluator, to be defined in subclasses.\n '''\n raise NotImplementedError()\n\n\nclass HARKinterpolator2D(HARKobject):\n '''\n A wrapper class for 2D interpolation methods in HARK.\n '''\n distance_criteria = []\n\n def __call__(self,x,y):\n '''\n Evaluates the interpolated function at the given input.\n\n Parameters\n ----------\n x : np.array or float\n Real values to be evaluated in the interpolated function.\n y : np.array or float\n Real values to be evaluated in the interpolated function; must be\n the same size as x.\n\n Returns\n -------\n fxy : np.array or float\n The interpolated function evaluated at x,y: fxy = f(x,y), with the\n same shape as x and y.\n '''\n xa = np.asarray(x)\n ya = np.asarray(y)\n return (self._evaluate(xa.flatten(),ya.flatten())).reshape(xa.shape)\n\n def derivativeX(self,x,y):\n '''\n Evaluates the partial derivative of interpolated function with respect\n to x (the first argument) at the given input.\n\n Parameters\n ----------\n x : np.array or float\n Real values to be evaluated in the interpolated function.\n y : np.array or float\n Real values to be evaluated in the interpolated function; must be\n the same size as x.\n\n Returns\n -------\n dfdx : np.array or float\n The derivative of the interpolated function with respect to x, eval-\n uated at x,y: dfdx = f_x(x,y), with the same shape as x and y.\n '''\n xa = np.asarray(x)\n ya = np.asarray(y)\n return (self._derX(xa.flatten(),ya.flatten())).reshape(xa.shape)\n\n def derivativeY(self,x,y):\n '''\n Evaluates the partial derivative of interpolated function with respect\n to y (the second argument) at the given input.\n\n Parameters\n ----------\n x : np.array or float\n Real values to be evaluated in the interpolated function.\n y : np.array or float\n Real values to be evaluated in the interpolated function; must be\n the same size as x.\n\n Returns\n -------\n dfdy : np.array or float\n The derivative of the interpolated function with respect to y, eval-\n uated at x,y: dfdx = f_y(x,y), with the same shape as x and y.\n '''\n xa = np.asarray(x)\n ya = np.asarray(y)\n return (self._derY(xa.flatten(),ya.flatten())).reshape(xa.shape)\n\n def _evaluate(self,x,y):\n '''\n Interpolated function evaluator, to be defined in subclasses.\n '''\n raise NotImplementedError()\n\n def _derX(self,x,y):\n '''\n Interpolated function x-derivative evaluator, to be defined in subclasses.\n '''\n raise NotImplementedError()\n\n def _derY(self,x,y):\n '''\n Interpolated function y-derivative evaluator, to be defined in subclasses.\n '''\n raise NotImplementedError()\n\n\nclass HARKinterpolator3D(HARKobject):\n '''\n A wrapper class for 3D interpolation methods in HARK.\n '''\n distance_criteria = []\n\n def __call__(self,x,y,z):\n '''\n Evaluates the interpolated function at the given input.\n\n Parameters\n ----------\n x : np.array or float\n Real values to be evaluated in the interpolated function.\n y : np.array or float\n Real values to be evaluated in the interpolated function; must be\n the same size as x.\n z : np.array or float\n Real values to be evaluated in the interpolated function; must be\n the same size as x.\n\n Returns\n -------\n fxyz : np.array or float\n The interpolated function evaluated at x,y,z: fxyz = f(x,y,z), with\n the same shape as x, y, and z.\n '''\n xa = np.asarray(x)\n ya = np.asarray(y)\n za = np.asarray(z)\n return (self._evaluate(xa.flatten(),ya.flatten(),za.flatten())).reshape(xa.shape)\n\n def derivativeX(self,x,y,z):\n '''\n Evaluates the partial derivative of the interpolated function with respect\n to x (the first argument) at the given input.\n\n Parameters\n ----------\n x : np.array or float\n Real values to be evaluated in the interpolated function.\n y : np.array or float\n Real values to be evaluated in the interpolated function; must be\n the same size as x.\n z : np.array or float\n Real values to be evaluated in the interpolated function; must be\n the same size as x.\n\n Returns\n -------\n dfdx : np.array or float\n The derivative with respect to x of the interpolated function evaluated\n at x,y,z: dfdx = f_x(x,y,z), with the same shape as x, y, and z.\n '''\n xa = np.asarray(x)\n ya = np.asarray(y)\n za = np.asarray(z)\n return (self._derX(xa.flatten(),ya.flatten(),za.flatten())).reshape(xa.shape)\n\n def derivativeY(self,x,y,z):\n '''\n Evaluates the partial derivative of the interpolated function with respect\n to y (the second argument) at the given input.\n\n Parameters\n ----------\n x : np.array or float\n Real values to be evaluated in the interpolated function.\n y : np.array or float\n Real values to be evaluated in the interpolated function; must be\n the same size as x.\n z : np.array or float\n Real values to be evaluated in the interpolated function; must be\n the same size as x.\n\n Returns\n -------\n dfdy : np.array or float\n The derivative with respect to y of the interpolated function evaluated\n at x,y,z: dfdy = f_y(x,y,z), with the same shape as x, y, and z.\n '''\n xa = np.asarray(x)\n ya = np.asarray(y)\n za = np.asarray(z)\n return (self._derY(xa.flatten(),ya.flatten(),za.flatten())).reshape(xa.shape)\n\n def derivativeZ(self,x,y,z):\n '''\n Evaluates the partial derivative of the interpolated function with respect\n to z (the third argument) at the given input.\n\n Parameters\n ----------\n x : np.array or float\n Real values to be evaluated in the interpolated function.\n y : np.array or float\n Real values to be evaluated in the interpolated function; must be\n the same size as x.\n z : np.array or float\n Real values to be evaluated in the interpolated function; must be\n the same size as x.\n\n Returns\n -------\n dfdz : np.array or float\n The derivative with respect to z of the interpolated function evaluated\n at x,y,z: dfdz = f_z(x,y,z), with the same shape as x, y, and z.\n '''\n xa = np.asarray(x)\n ya = np.asarray(y)\n za = np.asarray(z)\n return (self._derZ(xa.flatten(),ya.flatten(),za.flatten())).reshape(xa.shape)\n\n def _evaluate(self,x,y,z):\n '''\n Interpolated function evaluator, to be defined in subclasses.\n '''\n raise NotImplementedError()\n\n def _derX(self,x,y,z):\n '''\n Interpolated function x-derivative evaluator, to be defined in subclasses.\n '''\n raise NotImplementedError()\n\n def _derY(self,x,y,z):\n '''\n Interpolated function y-derivative evaluator, to be defined in subclasses.\n '''\n raise NotImplementedError()\n\n def _derZ(self,x,y,z):\n '''\n Interpolated function y-derivative evaluator, to be defined in subclasses.\n '''\n raise NotImplementedError()\n\n\nclass HARKinterpolator4D(HARKobject):\n '''\n A wrapper class for 4D interpolation methods in HARK.\n '''\n distance_criteria = []\n\n def __call__(self,w,x,y,z):\n '''\n Evaluates the interpolated function at the given input.\n\n Parameters\n ----------\n w : np.array or float\n Real values to be evaluated in the interpolated function.\n x : np.array or float\n Real values to be evaluated in the interpolated function; must be\n the same size as w.\n y : np.array or float\n Real values to be evaluated in the interpolated function; must be\n the same size as w.\n z : np.array or float\n Real values to be evaluated in the interpolated function; must be\n the same size as w.\n\n Returns\n -------\n fwxyz : np.array or float\n The interpolated function evaluated at w,x,y,z: fwxyz = f(w,x,y,z),\n with the same shape as w, x, y, and z.\n '''\n wa = np.asarray(w)\n xa = np.asarray(x)\n ya = np.asarray(y)\n za = np.asarray(z)\n return (self._evaluate(wa.flatten(),xa.flatten(),ya.flatten(),za.flatten())).reshape(wa.shape)\n\n def derivativeW(self,w,x,y,z):\n '''\n Evaluates the partial derivative with respect to w (the first argument)\n of the interpolated function at the given input.\n\n Parameters\n ----------\n w : np.array or float\n Real values to be evaluated in the interpolated function.\n x : np.array or float\n Real values to be evaluated in the interpolated function; must be\n the same size as w.\n y : np.array or float\n Real values to be evaluated in the interpolated function; must be\n the same size as w.\n z : np.array or float\n Real values to be evaluated in the interpolated function; must be\n the same size as w.\n\n Returns\n -------\n dfdw : np.array or float\n The derivative with respect to w of the interpolated function eval-\n uated at w,x,y,z: dfdw = f_w(w,x,y,z), with the same shape as inputs.\n '''\n wa = np.asarray(w)\n xa = np.asarray(x)\n ya = np.asarray(y)\n za = np.asarray(z)\n return (self._derW(wa.flatten(),xa.flatten(),ya.flatten(),za.flatten())).reshape(wa.shape)\n\n def derivativeX(self,w,x,y,z):\n '''\n Evaluates the partial derivative with respect to x (the second argument)\n of the interpolated function at the given input.\n\n Parameters\n ----------\n w : np.array or float\n Real values to be evaluated in the interpolated function.\n x : np.array or float\n Real values to be evaluated in the interpolated function; must be\n the same size as w.\n y : np.array or float\n Real values to be evaluated in the interpolated function; must be\n the same size as w.\n z : np.array or float\n Real values to be evaluated in the interpolated function; must be\n the same size as w.\n\n Returns\n -------\n dfdx : np.array or float\n The derivative with respect to x of the interpolated function eval-\n uated at w,x,y,z: dfdx = f_x(w,x,y,z), with the same shape as inputs.\n '''\n wa = np.asarray(w)\n xa = np.asarray(x)\n ya = np.asarray(y)\n za = np.asarray(z)\n return (self._derX(wa.flatten(),xa.flatten(),ya.flatten(),za.flatten())).reshape(wa.shape)\n\n def derivativeY(self,w,x,y,z):\n '''\n Evaluates the partial derivative with respect to y (the third argument)\n of the interpolated function at the given input.\n\n Parameters\n ----------\n w : np.array or float\n Real values to be evaluated in the interpolated function.\n x : np.array or float\n Real values to be evaluated in the interpolated function; must be\n the same size as w.\n y : np.array or float\n Real values to be evaluated in the interpolated function; must be\n the same size as w.\n z : np.array or float\n Real values to be evaluated in the interpolated function; must be\n the same size as w.\n\n Returns\n -------\n dfdy : np.array or float\n The derivative with respect to y of the interpolated function eval-\n uated at w,x,y,z: dfdy = f_y(w,x,y,z), with the same shape as inputs.\n '''\n wa = np.asarray(w)\n xa = np.asarray(x)\n ya = np.asarray(y)\n za = np.asarray(z)\n return (self._derY(wa.flatten(),xa.flatten(),ya.flatten(),za.flatten())).reshape(wa.shape)\n\n def derivativeZ(self,w,x,y,z):\n '''\n Evaluates the partial derivative with respect to z (the fourth argument)\n of the interpolated function at the given input.\n\n Parameters\n ----------\n w : np.array or float\n Real values to be evaluated in the interpolated function.\n x : np.array or float\n Real values to be evaluated in the interpolated function; must be\n the same size as w.\n y : np.array or float\n Real values to be evaluated in the interpolated function; must be\n the same size as w.\n z : np.array or float\n Real values to be evaluated in the interpolated function; must be\n the same size as w.\n\n Returns\n -------\n dfdz : np.array or float\n The derivative with respect to z of the interpolated function eval-\n uated at w,x,y,z: dfdz = f_z(w,x,y,z), with the same shape as inputs.\n '''\n wa = np.asarray(w)\n xa = np.asarray(x)\n ya = np.asarray(y)\n za = np.asarray(z)\n return (self._derZ(wa.flatten(),xa.flatten(),ya.flatten(),za.flatten())).reshape(wa.shape)\n\n def _evaluate(self,w,x,y,z):\n '''\n Interpolated function evaluator, to be defined in subclasses.\n '''\n raise NotImplementedError()\n\n def _derW(self,w,x,y,z):\n '''\n Interpolated function w-derivative evaluator, to be defined in subclasses.\n '''\n raise NotImplementedError()\n\n def _derX(self,w,x,y,z):\n '''\n Interpolated function w-derivative evaluator, to be defined in subclasses.\n '''\n raise NotImplementedError()\n\n def _derY(self,w,x,y,z):\n '''\n Interpolated function w-derivative evaluator, to be defined in subclasses.\n '''\n raise NotImplementedError()\n\n def _derZ(self,w,x,y,z):\n '''\n Interpolated function w-derivative evaluator, to be defined in subclasses.\n '''\n raise NotImplementedError()\n\n\nclass IdentityFunction(HARKobject):\n '''\n A fairly trivial interpolator that simply returns one of its arguments. Useful for avoiding\n numeric error in extreme cases.\n '''\n distance_criteria = ['i_dim']\n\n def __init__(self,i_dim=0,n_dims=1):\n '''\n Constructor for a new IdentityFunction.\n\n Parameters\n ----------\n i_dim : int\n Index of the dimension on which the identity is defined. f(*x) = x[i]\n n_dims : int\n Total number of input dimensions for this function.\n\n Returns\n -------\n None\n '''\n self.i_dim = i_dim\n self.n_dims = n_dims\n\n def __call__(self,*args):\n '''\n Evaluate the identity function.\n '''\n return args[self.i_dim]\n\n def derivative(self,*args):\n '''\n Returns the derivative of the function with respect to the first dimension.\n '''\n if self.i_dim == 0:\n return np.ones_like(*args[0])\n else:\n return np.zeros_like(*args[0])\n\n def derivativeX(self,*args):\n '''\n Returns the derivative of the function with respect to the X dimension.\n This is the first input whenever n_dims < 4 and the second input otherwise.\n '''\n if self.n_dims >= 4:\n j = 1\n else:\n j = 0\n if self.i_dim == j:\n return np.ones_like(*args[0])\n else:\n return np.zeros_like(*args[0])\n\n def derivativeY(self,*args):\n '''\n Returns the derivative of the function with respect to the Y dimension.\n This is the second input whenever n_dims < 4 and the third input otherwise.\n '''\n if self.n_dims >= 4:\n j = 2\n else:\n j = 1\n if self.i_dim == j:\n return np.ones_like(*args[0])\n else:\n return np.zeros_like(*args[0])\n\n def derivativeZ(self,*args):\n '''\n Returns the derivative of the function with respect to the Z dimension.\n This is the third input whenever n_dims < 4 and the fourth input otherwise.\n '''\n if self.n_dims >= 4:\n j = 3\n else:\n j = 2\n if self.i_dim == j:\n return np.ones_like(*args[0])\n else:\n return np.zeros_like(*args[0])\n\n def derivativeW(self,*args):\n '''\n Returns the derivative of the function with respect to the W dimension.\n This should only exist when n_dims >= 4.\n '''\n if self.n_dims >= 4:\n j = 0\n else:\n assert False, \"Derivative with respect to W can't be called when n_dims < 4!\"\n if self.i_dim == j:\n return np.ones_like(*args[0])\n else:\n return np.zeros_like(*args[0])\n\n\nclass ConstantFunction(HARKobject):\n '''\n A class for representing trivial functions that return the same real output for any input. This\n is convenient for models where an object might be a (non-trivial) function, but in some variations\n that object is just a constant number. Rather than needing to make a (Bi/Tri/Quad)-\n LinearInterpolation with trivial state grids and the same f_value in every entry, ConstantFunction\n allows the user to quickly make a constant/trivial function. This comes up, e.g., in models\n with endogenous pricing of insurance contracts; a contract's premium might depend on some state\n variables of the individual, but in some variations the premium of a contract is just a number.\n '''\n convergence_criteria = ['value']\n\n def __init__(self,value):\n '''\n Make a new ConstantFunction object.\n\n Parameters\n ----------\n value : float\n The constant value that the function returns.\n\n Returns\n -------\n None\n '''\n self.value = float(value)\n\n def __call__(self,*args):\n '''\n Evaluate the constant function. The first input must exist and should be an array.\n Returns an array of identical shape to args[0] (if it exists).\n '''\n if len(args) > 0: # If there is at least one argument, return appropriately sized array\n if _isscalar(args[0]):\n return self.value\n else:\n shape = args[0].shape\n return self.value*np.ones(shape)\n else: # Otherwise, return a single instance of the constant value\n return self.value\n\n def _der(self,*args):\n '''\n Evaluate the derivative of the function. The first input must exist and should be an array.\n Returns an array of identical shape to args[0] (if it exists). This is an array of zeros.\n '''\n if len(args) > 0:\n if _isscalar(args[0]):\n return 0.0\n else:\n shape = args[0].shape\n return np.zeros(shape)\n else:\n return 0.0\n\n # All other derivatives are also zero everywhere, so these methods just point to derivative\n derivative = _der\n derivativeX = derivative\n derivativeY = derivative\n derivativeZ = derivative\n derivativeW = derivative\n derivativeXX= derivative\n\n\nclass LinearInterp(HARKinterpolator1D):\n '''\n A \"from scratch\" 1D linear interpolation class. Allows for linear or decay\n extrapolation (approaching a limiting linear function from below).\n '''\n distance_criteria = ['x_list','y_list']\n\n def __init__(self,x_list,y_list,intercept_limit=None,slope_limit=None,lower_extrap=False):\n '''\n The interpolation constructor to make a new linear spline interpolation.\n\n Parameters\n ----------\n x_list : np.array\n List of x values composing the grid.\n y_list : np.array\n List of y values, representing f(x) at the points in x_list.\n intercept_limit : float\n Intercept of limiting linear function.\n slope_limit : float\n Slope of limiting linear function.\n lower_extrap : boolean\n Indicator for whether lower extrapolation is allowed. False means\n f(x) = NaN for x < min(x_list); True means linear extrapolation.\n\n Returns\n -------\n new instance of LinearInterp\n\n NOTE: When no input is given for the limiting linear function, linear\n extrapolation is used above the highest gridpoint.\n '''\n # Make the basic linear spline interpolation\n self.x_list = np.array(x_list)\n self.y_list = np.array(y_list)\n self.lower_extrap = lower_extrap\n self.x_n = self.x_list.size\n\n # Make a decay extrapolation\n if intercept_limit is not None and slope_limit is not None:\n slope_at_top = (y_list[-1] - y_list[-2])/(x_list[-1] - x_list[-2])\n level_diff = intercept_limit + slope_limit*x_list[-1] - y_list[-1]\n slope_diff = slope_limit - slope_at_top\n\n self.decay_extrap_A = level_diff\n self.decay_extrap_B = -slope_diff/level_diff\n self.intercept_limit = intercept_limit\n self.slope_limit = slope_limit\n self.decay_extrap = True\n else:\n self.decay_extrap = False\n\n\n def _evalOrDer(self,x,_eval,_Der):\n '''\n Returns the level and/or first derivative of the function at each value in\n x. Only called internally by HARKinterpolator1D.eval_and_der (etc).\n\n Parameters\n ----------\n x_list : scalar or np.array\n Set of points where we want to evlauate the interpolated function and/or its derivative..\n _eval : boolean\n Indicator for whether to evalute the level of the interpolated function.\n _Der : boolean\n Indicator for whether to evaluate the derivative of the interpolated function.\n\n Returns\n -------\n A list including the level and/or derivative of the interpolated function where requested.\n '''\n\n\n\n i = np.maximum(np.searchsorted(self.x_list[:-1],x),1)\n alpha = (x-self.x_list[i-1])/(self.x_list[i]-self.x_list[i-1])\n\n if _eval:\n y = (1.-alpha)*self.y_list[i-1] + alpha*self.y_list[i]\n if _Der:\n dydx = (self.y_list[i] - self.y_list[i-1])/(self.x_list[i] - self.x_list[i-1])\n\n if not self.lower_extrap:\n below_lower_bound = x < self.x_list[0]\n\n if _eval:\n y[below_lower_bound] = np.nan\n if _Der:\n dydx[below_lower_bound] = np.nan\n\n if self.decay_extrap:\n above_upper_bound = x > self.x_list[-1]\n x_temp = x[above_upper_bound] - self.x_list[-1]\n\n if _eval:\n y[above_upper_bound] = self.intercept_limit + \\\n self.slope_limit*x[above_upper_bound] - \\\n self.decay_extrap_A*np.exp(-self.decay_extrap_B*x_temp)\n\n if _Der:\n dydx[above_upper_bound] = self.slope_limit + \\\n self.decay_extrap_B*self.decay_extrap_A*\\\n np.exp(-self.decay_extrap_B*x_temp)\n\n output = []\n if _eval:\n output += [y,]\n if _Der:\n output += [dydx,]\n\n return output\n\n def _evaluate(self,x,return_indices = False):\n '''\n Returns the level of the interpolated function at each value in x. Only\n called internally by HARKinterpolator1D.__call__ (etc).\n '''\n return self._evalOrDer(x,True,False)[0]\n\n def _der(self,x):\n '''\n Returns the first derivative of the interpolated function at each value\n in x. Only called internally by HARKinterpolator1D.derivative (etc).\n '''\n return self._evalOrDer(x,False,True)[0]\n\n def _evalAndDer(self,x):\n '''\n Returns the level and first derivative of the function at each value in\n x. Only called internally by HARKinterpolator1D.eval_and_der (etc).\n '''\n y,dydx = self._evalOrDer(x,True,True)\n\n return y,dydx\n\n\n\n\nclass CubicInterp(HARKinterpolator1D):\n '''\n An interpolating function using piecewise cubic splines. Matches level and\n slope of 1D function at gridpoints, smoothly interpolating in between.\n Extrapolation above highest gridpoint approaches a limiting linear function\n if desired (linear extrapolation also enabled.)\n '''\n distance_criteria = ['x_list','y_list','dydx_list']\n\n def __init__(self,x_list,y_list,dydx_list,intercept_limit=None,slope_limit=None,lower_extrap=False):\n '''\n The interpolation constructor to make a new cubic spline interpolation.\n\n Parameters\n ----------\n x_list : np.array\n List of x values composing the grid.\n y_list : np.array\n List of y values, representing f(x) at the points in x_list.\n dydx_list : np.array\n List of dydx values, representing f'(x) at the points in x_list\n intercept_limit : float\n Intercept of limiting linear function.\n slope_limit : float\n Slope of limiting linear function.\n lower_extrap : boolean\n Indicator for whether lower extrapolation is allowed. False means\n f(x) = NaN for x < min(x_list); True means linear extrapolation.\n\n Returns\n -------\n new instance of CubicInterp\n\n NOTE: When no input is given for the limiting linear function, linear\n extrapolation is used above the highest gridpoint.\n '''\n self.x_list = np.asarray(x_list)\n self.y_list = np.asarray(y_list)\n self.dydx_list = np.asarray(dydx_list)\n self.n = len(x_list)\n\n # Define lower extrapolation as linear function (or just NaN)\n if lower_extrap:\n self.coeffs = [[y_list[0],dydx_list[0],0,0]]\n else:\n self.coeffs = [[np.nan,np.nan,np.nan,np.nan]]\n\n # Calculate interpolation coefficients on segments mapped to [0,1]\n for i in range(self.n-1):\n x0 = x_list[i]\n y0 = y_list[i]\n x1 = x_list[i+1]\n y1 = y_list[i+1]\n Span = x1 - x0\n dydx0 = dydx_list[i]*Span\n dydx1 = dydx_list[i+1]*Span\n\n temp = [y0, dydx0, 3*(y1 - y0) - 2*dydx0 - dydx1, 2*(y0 - y1) + dydx0 + dydx1];\n self.coeffs.append(temp)\n\n # Calculate extrapolation coefficients as a decay toward limiting function y = mx+b\n if slope_limit is None and intercept_limit is None:\n slope_limit = dydx_list[-1]\n intercept_limit = y_list[-1] - slope_limit*x_list[-1]\n gap = slope_limit*x1 + intercept_limit - y1\n slope = slope_limit - dydx_list[self.n-1]\n if (gap != 0) and (slope <= 0):\n temp = [intercept_limit, slope_limit, gap, slope/gap]\n elif slope > 0:\n temp = [intercept_limit, slope_limit, 0, 0] # fixing a problem when slope is positive\n else:\n temp = [intercept_limit, slope_limit, gap, 0]\n self.coeffs.append(temp)\n self.coeffs = np.array(self.coeffs)\n\n def _evaluate(self,x):\n '''\n Returns the level of the interpolated function at each value in x. Only\n called internally by HARKinterpolator1D.__call__ (etc).\n '''\n if _isscalar(x):\n pos = np.searchsorted(self.x_list,x)\n if pos == 0:\n y = self.coeffs[0,0] + self.coeffs[0,1]*(x - self.x_list[0])\n elif (pos < self.n):\n alpha = (x - self.x_list[pos-1])/(self.x_list[pos] - self.x_list[pos-1])\n y = self.coeffs[pos,0] + alpha*(self.coeffs[pos,1] + alpha*(self.coeffs[pos,2] + alpha*self.coeffs[pos,3]))\n else:\n alpha = x - self.x_list[self.n-1]\n y = self.coeffs[pos,0] + x*self.coeffs[pos,1] - self.coeffs[pos,2]*np.exp(alpha*self.coeffs[pos,3])\n else:\n m = len(x)\n pos = np.searchsorted(self.x_list,x)\n y = np.zeros(m)\n if y.size > 0:\n out_bot = pos == 0\n out_top = pos == self.n\n in_bnds = np.logical_not(np.logical_or(out_bot, out_top))\n\n # Do the \"in bounds\" evaluation points\n i = pos[in_bnds]\n coeffs_in = self.coeffs[i,:]\n alpha = (x[in_bnds] - self.x_list[i-1])/(self.x_list[i] - self.x_list[i-1])\n y[in_bnds] = coeffs_in[:,0] + alpha*(coeffs_in[:,1] + alpha*(coeffs_in[:,2] + alpha*coeffs_in[:,3]))\n\n # Do the \"out of bounds\" evaluation points\n y[out_bot] = self.coeffs[0,0] + self.coeffs[0,1]*(x[out_bot] - self.x_list[0])\n alpha = x[out_top] - self.x_list[self.n-1]\n y[out_top] = self.coeffs[self.n,0] + x[out_top]*self.coeffs[self.n,1] - self.coeffs[self.n,2]*np.exp(alpha*self.coeffs[self.n,3])\n return y\n\n def _der(self,x):\n '''\n Returns the first derivative of the interpolated function at each value\n in x. Only called internally by HARKinterpolator1D.derivative (etc).\n '''\n if _isscalar(x):\n pos = np.searchsorted(self.x_list,x)\n if pos == 0:\n dydx = self.coeffs[0,1]\n elif (pos < self.n):\n alpha = (x - self.x_list[pos-1])/(self.x_list[pos] - self.x_list[pos-1])\n dydx = (self.coeffs[pos,1] + alpha*(2*self.coeffs[pos,2] + alpha*3*self.coeffs[pos,3]))/(self.x_list[pos] - self.x_list[pos-1])\n else:\n alpha = x - self.x_list[self.n-1]\n dydx = self.coeffs[pos,1] - self.coeffs[pos,2]*self.coeffs[pos,3]*np.exp(alpha*self.coeffs[pos,3])\n else:\n m = len(x)\n pos = np.searchsorted(self.x_list,x)\n dydx = np.zeros(m)\n if dydx.size > 0:\n out_bot = pos == 0\n out_top = pos == self.n\n in_bnds = np.logical_not(np.logical_or(out_bot, out_top))\n\n # Do the \"in bounds\" evaluation points\n i = pos[in_bnds]\n coeffs_in = self.coeffs[i,:]\n alpha = (x[in_bnds] - self.x_list[i-1])/(self.x_list[i] - self.x_list[i-1])\n dydx[in_bnds] = (coeffs_in[:,1] + alpha*(2*coeffs_in[:,2] + alpha*3*coeffs_in[:,3]))/(self.x_list[i] - self.x_list[i-1])\n\n # Do the \"out of bounds\" evaluation points\n dydx[out_bot] = self.coeffs[0,1]\n alpha = x[out_top] - self.x_list[self.n-1]\n dydx[out_top] = self.coeffs[self.n,1] - self.coeffs[self.n,2]*self.coeffs[self.n,3]*np.exp(alpha*self.coeffs[self.n,3])\n return dydx\n\n\n def _evalAndDer(self,x):\n '''\n Returns the level and first derivative of the function at each value in\n x. Only called internally by HARKinterpolator1D.eval_and_der (etc).\n '''\n if _isscalar(x):\n pos = np.searchsorted(self.x_list,x)\n if pos == 0:\n y = self.coeffs[0,0] + self.coeffs[0,1]*(x - self.x_list[0])\n dydx = self.coeffs[0,1]\n elif (pos < self.n):\n alpha = (x - self.x_list[pos-1])/(self.x_list[pos] - self.x_list[pos-1])\n y = self.coeffs[pos,0] + alpha*(self.coeffs[pos,1] + alpha*(self.coeffs[pos,2] + alpha*self.coeffs[pos,3]))\n dydx = (self.coeffs[pos,1] + alpha*(2*self.coeffs[pos,2] + alpha*3*self.coeffs[pos,3]))/(self.x_list[pos] - self.x_list[pos-1])\n else:\n alpha = x - self.x_list[self.n-1]\n y = self.coeffs[pos,0] + x*self.coeffs[pos,1] - self.coeffs[pos,2]*np.exp(alpha*self.coeffs[pos,3])\n dydx = self.coeffs[pos,1] - self.coeffs[pos,2]*self.coeffs[pos,3]*np.exp(alpha*self.coeffs[pos,3])\n else:\n m = len(x)\n pos = np.searchsorted(self.x_list,x)\n y = np.zeros(m)\n dydx = np.zeros(m)\n if y.size > 0:\n out_bot = pos == 0\n out_top = pos == self.n\n in_bnds = np.logical_not(np.logical_or(out_bot, out_top))\n\n # Do the \"in bounds\" evaluation points\n i = pos[in_bnds]\n coeffs_in = self.coeffs[i,:]\n alpha = (x[in_bnds] - self.x_list[i-1])/(self.x_list[i] - self.x_list[i-1])\n y[in_bnds] = coeffs_in[:,0] + alpha*(coeffs_in[:,1] + alpha*(coeffs_in[:,2] + alpha*coeffs_in[:,3]))\n dydx[in_bnds] = (coeffs_in[:,1] + alpha*(2*coeffs_in[:,2] + alpha*3*coeffs_in[:,3]))/(self.x_list[i] - self.x_list[i-1])\n\n # Do the \"out of bounds\" evaluation points\n y[out_bot] = self.coeffs[0,0] + self.coeffs[0,1]*(x[out_bot] - self.x_list[0])\n dydx[out_bot] = self.coeffs[0,1]\n alpha = x[out_top] - self.x_list[self.n-1]\n y[out_top] = self.coeffs[self.n,0] + x[out_top]*self.coeffs[self.n,1] - self.coeffs[self.n,2]*np.exp(alpha*self.coeffs[self.n,3])\n dydx[out_top] = self.coeffs[self.n,1] - self.coeffs[self.n,2]*self.coeffs[self.n,3]*np.exp(alpha*self.coeffs[self.n,3])\n return y, dydx\n\n\n\nclass BilinearInterp(HARKinterpolator2D):\n '''\n Bilinear full (or tensor) grid interpolation of a function f(x,y).\n '''\n distance_criteria = ['x_list','y_list','f_values']\n\n def __init__(self,f_values,x_list,y_list,xSearchFunc=None,ySearchFunc=None):\n '''\n Constructor to make a new bilinear interpolation.\n\n Parameters\n ----------\n f_values : numpy.array\n An array of size (x_n,y_n) such that f_values[i,j] = f(x_list[i],y_list[j])\n x_list : numpy.array\n An array of x values, with length designated x_n.\n y_list : numpy.array\n An array of y values, with length designated y_n.\n xSearchFunc : function\n An optional function that returns the reference location for x values:\n indices = xSearchFunc(x_list,x). Default is np.searchsorted\n ySearchFunc : function\n An optional function that returns the reference location for y values:\n indices = ySearchFunc(y_list,y). Default is np.searchsorted\n\n Returns\n -------\n new instance of BilinearInterp\n '''\n self.f_values = f_values\n self.x_list = x_list\n self.y_list = y_list\n self.x_n = x_list.size\n self.y_n = y_list.size\n if xSearchFunc is None:\n xSearchFunc = np.searchsorted\n if ySearchFunc is None:\n ySearchFunc = np.searchsorted\n self.xSearchFunc = xSearchFunc\n self.ySearchFunc = ySearchFunc\n\n def _evaluate(self,x,y):\n '''\n Returns the level of the interpolated function at each value in x,y.\n Only called internally by HARKinterpolator2D.__call__ (etc).\n '''\n if _isscalar(x):\n x_pos = max(min(self.xSearchFunc(self.x_list,x),self.x_n-1),1)\n y_pos = max(min(self.ySearchFunc(self.y_list,y),self.y_n-1),1)\n else:\n x_pos = self.xSearchFunc(self.x_list,x)\n x_pos[x_pos < 1] = 1\n x_pos[x_pos > self.x_n-1] = self.x_n-1\n y_pos = self.ySearchFunc(self.y_list,y)\n y_pos[y_pos < 1] = 1\n y_pos[y_pos > self.y_n-1] = self.y_n-1\n alpha = (x - self.x_list[x_pos-1])/(self.x_list[x_pos] - self.x_list[x_pos-1])\n beta = (y - self.y_list[y_pos-1])/(self.y_list[y_pos] - self.y_list[y_pos-1])\n f = (\n (1-alpha)*(1-beta)*self.f_values[x_pos-1,y_pos-1]\n + (1-alpha)*beta*self.f_values[x_pos-1,y_pos]\n + alpha*(1-beta)*self.f_values[x_pos,y_pos-1]\n + alpha*beta*self.f_values[x_pos,y_pos])\n return f\n\n def _derX(self,x,y):\n '''\n Returns the derivative with respect to x of the interpolated function\n at each value in x,y. Only called internally by HARKinterpolator2D.derivativeX.\n '''\n if _isscalar(x):\n x_pos = max(min(self.xSearchFunc(self.x_list,x),self.x_n-1),1)\n y_pos = max(min(self.ySearchFunc(self.y_list,y),self.y_n-1),1)\n else:\n x_pos = self.xSearchFunc(self.x_list,x)\n x_pos[x_pos < 1] = 1\n x_pos[x_pos > self.x_n-1] = self.x_n-1\n y_pos = self.ySearchFunc(self.y_list,y)\n y_pos[y_pos < 1] = 1\n y_pos[y_pos > self.y_n-1] = self.y_n-1\n beta = (y - self.y_list[y_pos-1])/(self.y_list[y_pos] - self.y_list[y_pos-1])\n dfdx = (\n ((1-beta)*self.f_values[x_pos,y_pos-1]\n + beta*self.f_values[x_pos,y_pos]) -\n ((1-beta)*self.f_values[x_pos-1,y_pos-1]\n + beta*self.f_values[x_pos-1,y_pos]))/(self.x_list[x_pos] - self.x_list[x_pos-1])\n return dfdx\n\n def _derY(self,x,y):\n '''\n Returns the derivative with respect to y of the interpolated function\n at each value in x,y. Only called internally by HARKinterpolator2D.derivativeY.\n '''\n if _isscalar(x):\n x_pos = max(min(self.xSearchFunc(self.x_list,x),self.x_n-1),1)\n y_pos = max(min(self.ySearchFunc(self.y_list,y),self.y_n-1),1)\n else:\n x_pos = self.xSearchFunc(self.x_list,x)\n x_pos[x_pos < 1] = 1\n x_pos[x_pos > self.x_n-1] = self.x_n-1\n y_pos = self.ySearchFunc(self.y_list,y)\n y_pos[y_pos < 1] = 1\n y_pos[y_pos > self.y_n-1] = self.y_n-1\n alpha = (x - self.x_list[x_pos-1])/(self.x_list[x_pos] - self.x_list[x_pos-1])\n dfdy = (\n ((1-alpha)*self.f_values[x_pos-1,y_pos]\n + alpha*self.f_values[x_pos,y_pos]) -\n ((1-alpha)*self.f_values[x_pos-1,y_pos-1]\n + alpha*self.f_values[x_pos,y_pos-1]))/(self.y_list[y_pos] - self.y_list[y_pos-1])\n return dfdy\n\n\nclass TrilinearInterp(HARKinterpolator3D):\n '''\n Trilinear full (or tensor) grid interpolation of a function f(x,y,z).\n '''\n distance_criteria = ['f_values','x_list','y_list','z_list']\n\n def __init__(self,f_values,x_list,y_list,z_list,xSearchFunc=None,ySearchFunc=None,zSearchFunc=None):\n '''\n Constructor to make a new trilinear interpolation.\n\n Parameters\n ----------\n f_values : numpy.array\n An array of size (x_n,y_n,z_n) such that f_values[i,j,k] =\n f(x_list[i],y_list[j],z_list[k])\n x_list : numpy.array\n An array of x values, with length designated x_n.\n y_list : numpy.array\n An array of y values, with length designated y_n.\n z_list : numpy.array\n An array of z values, with length designated z_n.\n xSearchFunc : function\n An optional function that returns the reference location for x values:\n indices = xSearchFunc(x_list,x). Default is np.searchsorted\n ySearchFunc : function\n An optional function that returns the reference location for y values:\n indices = ySearchFunc(y_list,y). Default is np.searchsorted\n zSearchFunc : function\n An optional function that returns the reference location for z values:\n indices = zSearchFunc(z_list,z). Default is np.searchsorted\n\n Returns\n -------\n new instance of TrilinearInterp\n '''\n self.f_values = f_values\n self.x_list = x_list\n self.y_list = y_list\n self.z_list = z_list\n self.x_n = x_list.size\n self.y_n = y_list.size\n self.z_n = z_list.size\n if xSearchFunc is None:\n xSearchFunc = np.searchsorted\n if ySearchFunc is None:\n ySearchFunc = np.searchsorted\n if zSearchFunc is None:\n zSearchFunc = np.searchsorted\n self.xSearchFunc = xSearchFunc\n self.ySearchFunc = ySearchFunc\n self.zSearchFunc = zSearchFunc\n\n def _evaluate(self,x,y,z):\n '''\n Returns the level of the interpolated function at each value in x,y,z.\n Only called internally by HARKinterpolator3D.__call__ (etc).\n '''\n if _isscalar(x):\n x_pos = max(min(self.xSearchFunc(self.x_list,x),self.x_n-1),1)\n y_pos = max(min(self.ySearchFunc(self.y_list,y),self.y_n-1),1)\n z_pos = max(min(self.zSearchFunc(self.z_list,z),self.z_n-1),1)\n else:\n x_pos = self.xSearchFunc(self.x_list,x)\n x_pos[x_pos < 1] = 1\n x_pos[x_pos > self.x_n-1] = self.x_n-1\n y_pos = self.ySearchFunc(self.y_list,y)\n y_pos[y_pos < 1] = 1\n y_pos[y_pos > self.y_n-1] = self.y_n-1\n z_pos = self.zSearchFunc(self.z_list,z)\n z_pos[z_pos < 1] = 1\n z_pos[z_pos > self.z_n-1] = self.z_n-1\n alpha = (x - self.x_list[x_pos-1])/(self.x_list[x_pos] - self.x_list[x_pos-1])\n beta = (y - self.y_list[y_pos-1])/(self.y_list[y_pos] - self.y_list[y_pos-1])\n gamma = (z - self.z_list[z_pos-1])/(self.z_list[z_pos] - self.z_list[z_pos-1])\n f = (\n (1-alpha)*(1-beta)*(1-gamma)*self.f_values[x_pos-1,y_pos-1,z_pos-1]\n + (1-alpha)*(1-beta)*gamma*self.f_values[x_pos-1,y_pos-1,z_pos]\n + (1-alpha)*beta*(1-gamma)*self.f_values[x_pos-1,y_pos,z_pos-1]\n + (1-alpha)*beta*gamma*self.f_values[x_pos-1,y_pos,z_pos]\n + alpha*(1-beta)*(1-gamma)*self.f_values[x_pos,y_pos-1,z_pos-1]\n + alpha*(1-beta)*gamma*self.f_values[x_pos,y_pos-1,z_pos]\n + alpha*beta*(1-gamma)*self.f_values[x_pos,y_pos,z_pos-1]\n + alpha*beta*gamma*self.f_values[x_pos,y_pos,z_pos])\n return f\n\n def _derX(self,x,y,z):\n '''\n Returns the derivative with respect to x of the interpolated function\n at each value in x,y,z. Only called internally by HARKinterpolator3D.derivativeX.\n '''\n if _isscalar(x):\n x_pos = max(min(self.xSearchFunc(self.x_list,x),self.x_n-1),1)\n y_pos = max(min(self.ySearchFunc(self.y_list,y),self.y_n-1),1)\n z_pos = max(min(self.zSearchFunc(self.z_list,z),self.z_n-1),1)\n else:\n x_pos = self.xSearchFunc(self.x_list,x)\n x_pos[x_pos < 1] = 1\n x_pos[x_pos > self.x_n-1] = self.x_n-1\n y_pos = self.ySearchFunc(self.y_list,y)\n y_pos[y_pos < 1] = 1\n y_pos[y_pos > self.y_n-1] = self.y_n-1\n z_pos = self.zSearchFunc(self.z_list,z)\n z_pos[z_pos < 1] = 1\n z_pos[z_pos > self.z_n-1] = self.z_n-1\n beta = (y - self.y_list[y_pos-1])/(self.y_list[y_pos] - self.y_list[y_pos-1])\n gamma = (z - self.z_list[z_pos-1])/(self.z_list[z_pos] - self.z_list[z_pos-1])\n dfdx = (\n ( (1-beta)*(1-gamma)*self.f_values[x_pos,y_pos-1,z_pos-1]\n + (1-beta)*gamma*self.f_values[x_pos,y_pos-1,z_pos]\n + beta*(1-gamma)*self.f_values[x_pos,y_pos,z_pos-1]\n + beta*gamma*self.f_values[x_pos,y_pos,z_pos]) -\n ( (1-beta)*(1-gamma)*self.f_values[x_pos-1,y_pos-1,z_pos-1]\n + (1-beta)*gamma*self.f_values[x_pos-1,y_pos-1,z_pos]\n + beta*(1-gamma)*self.f_values[x_pos-1,y_pos,z_pos-1]\n + beta*gamma*self.f_values[x_pos-1,y_pos,z_pos]))/(self.x_list[x_pos] - self.x_list[x_pos-1])\n return dfdx\n\n def _derY(self,x,y,z):\n '''\n Returns the derivative with respect to y of the interpolated function\n at each value in x,y,z. Only called internally by HARKinterpolator3D.derivativeY.\n '''\n if _isscalar(x):\n x_pos = max(min(self.xSearchFunc(self.x_list,x),self.x_n-1),1)\n y_pos = max(min(self.ySearchFunc(self.y_list,y),self.y_n-1),1)\n z_pos = max(min(self.zSearchFunc(self.z_list,z),self.z_n-1),1)\n else:\n x_pos = self.xSearchFunc(self.x_list,x)\n x_pos[x_pos < 1] = 1\n x_pos[x_pos > self.x_n-1] = self.x_n-1\n y_pos = self.ySearchFunc(self.y_list,y)\n y_pos[y_pos < 1] = 1\n y_pos[y_pos > self.y_n-1] = self.y_n-1\n z_pos = self.zSearchFunc(self.z_list,z)\n z_pos[z_pos < 1] = 1\n z_pos[z_pos > self.z_n-1] = self.z_n-1\n alpha = (x - self.x_list[x_pos-1])/(self.x_list[x_pos] - self.x_list[x_pos-1])\n gamma = (z - self.z_list[z_pos-1])/(self.z_list[z_pos] - self.z_list[z_pos-1])\n dfdy = (\n ( (1-alpha)*(1-gamma)*self.f_values[x_pos-1,y_pos,z_pos-1]\n + (1-alpha)*gamma*self.f_values[x_pos-1,y_pos,z_pos]\n + alpha*(1-gamma)*self.f_values[x_pos,y_pos,z_pos-1]\n + alpha*gamma*self.f_values[x_pos,y_pos,z_pos]) -\n ( (1-alpha)*(1-gamma)*self.f_values[x_pos-1,y_pos-1,z_pos-1]\n + (1-alpha)*gamma*self.f_values[x_pos-1,y_pos-1,z_pos]\n + alpha*(1-gamma)*self.f_values[x_pos,y_pos-1,z_pos-1]\n + alpha*gamma*self.f_values[x_pos,y_pos-1,z_pos]))/(self.y_list[y_pos] - self.y_list[y_pos-1])\n return dfdy\n\n def _derZ(self,x,y,z):\n '''\n Returns the derivative with respect to z of the interpolated function\n at each value in x,y,z. Only called internally by HARKinterpolator3D.derivativeZ.\n '''\n if _isscalar(x):\n x_pos = max(min(self.xSearchFunc(self.x_list,x),self.x_n-1),1)\n y_pos = max(min(self.ySearchFunc(self.y_list,y),self.y_n-1),1)\n z_pos = max(min(self.zSearchFunc(self.z_list,z),self.z_n-1),1)\n else:\n x_pos = self.xSearchFunc(self.x_list,x)\n x_pos[x_pos < 1] = 1\n x_pos[x_pos > self.x_n-1] = self.x_n-1\n y_pos = self.ySearchFunc(self.y_list,y)\n y_pos[y_pos < 1] = 1\n y_pos[y_pos > self.y_n-1] = self.y_n-1\n z_pos = self.zSearchFunc(self.z_list,z)\n z_pos[z_pos < 1] = 1\n z_pos[z_pos > self.z_n-1] = self.z_n-1\n alpha = (x - self.x_list[x_pos-1])/(self.x_list[x_pos] - self.x_list[x_pos-1])\n beta = (y - self.y_list[y_pos-1])/(self.y_list[y_pos] - self.y_list[y_pos-1])\n dfdz = (\n ( (1-alpha)*(1-beta)*self.f_values[x_pos-1,y_pos-1,z_pos]\n + (1-alpha)*beta*self.f_values[x_pos-1,y_pos,z_pos]\n + alpha*(1-beta)*self.f_values[x_pos,y_pos-1,z_pos]\n + alpha*beta*self.f_values[x_pos,y_pos,z_pos]) -\n ( (1-alpha)*(1-beta)*self.f_values[x_pos-1,y_pos-1,z_pos-1]\n + (1-alpha)*beta*self.f_values[x_pos-1,y_pos,z_pos-1]\n + alpha*(1-beta)*self.f_values[x_pos,y_pos-1,z_pos-1]\n + alpha*beta*self.f_values[x_pos,y_pos,z_pos-1]))/(self.z_list[z_pos] - self.z_list[z_pos-1])\n return dfdz\n\n\nclass QuadlinearInterp(HARKinterpolator4D):\n '''\n Quadlinear full (or tensor) grid interpolation of a function f(w,x,y,z).\n '''\n distance_criteria = ['f_values','w_list','x_list','y_list','z_list']\n\n def __init__(self,f_values,w_list,x_list,y_list,z_list,wSearchFunc=None,xSearchFunc=None,ySearchFunc=None,zSearchFunc=None):\n '''\n Constructor to make a new quadlinear interpolation.\n\n Parameters\n ----------\n f_values : numpy.array\n An array of size (w_n,x_n,y_n,z_n) such that f_values[i,j,k,l] =\n f(w_list[i],x_list[j],y_list[k],z_list[l])\n w_list : numpy.array\n An array of x values, with length designated w_n.\n x_list : numpy.array\n An array of x values, with length designated x_n.\n y_list : numpy.array\n An array of y values, with length designated y_n.\n z_list : numpy.array\n An array of z values, with length designated z_n.\n wSearchFunc : function\n An optional function that returns the reference location for w values:\n indices = wSearchFunc(w_list,w). Default is np.searchsorted\n xSearchFunc : function\n An optional function that returns the reference location for x values:\n indices = xSearchFunc(x_list,x). Default is np.searchsorted\n ySearchFunc : function\n An optional function that returns the reference location for y values:\n indices = ySearchFunc(y_list,y). Default is np.searchsorted\n zSearchFunc : function\n An optional function that returns the reference location for z values:\n indices = zSearchFunc(z_list,z). Default is np.searchsorted\n\n Returns\n -------\n new instance of QuadlinearInterp\n '''\n self.f_values = f_values\n self.w_list = w_list\n self.x_list = x_list\n self.y_list = y_list\n self.z_list = z_list\n self.w_n = w_list.size\n self.x_n = x_list.size\n self.y_n = y_list.size\n self.z_n = z_list.size\n if wSearchFunc is None:\n wSearchFunc = np.searchsorted\n if xSearchFunc is None:\n xSearchFunc = np.searchsorted\n if ySearchFunc is None:\n ySearchFunc = np.searchsorted\n if zSearchFunc is None:\n zSearchFunc = np.searchsorted\n self.wSearchFunc = wSearchFunc\n self.xSearchFunc = xSearchFunc\n self.ySearchFunc = ySearchFunc\n self.zSearchFunc = zSearchFunc\n\n def _evaluate(self,w,x,y,z):\n '''\n Returns the level of the interpolated function at each value in x,y,z.\n Only called internally by HARKinterpolator4D.__call__ (etc).\n '''\n if _isscalar(w):\n w_pos = max(min(self.wSearchFunc(self.w_list,w),self.w_n-1),1)\n x_pos = max(min(self.xSearchFunc(self.x_list,x),self.x_n-1),1)\n y_pos = max(min(self.ySearchFunc(self.y_list,y),self.y_n-1),1)\n z_pos = max(min(self.zSearchFunc(self.z_list,z),self.z_n-1),1)\n else:\n w_pos = self.wSearchFunc(self.w_list,w)\n w_pos[w_pos < 1] = 1\n w_pos[w_pos > self.w_n-1] = self.w_n-1\n x_pos = self.xSearchFunc(self.x_list,x)\n x_pos[x_pos < 1] = 1\n x_pos[x_pos > self.x_n-1] = self.x_n-1\n y_pos = self.ySearchFunc(self.y_list,y)\n y_pos[y_pos < 1] = 1\n y_pos[y_pos > self.y_n-1] = self.y_n-1\n z_pos = self.zSearchFunc(self.z_list,z)\n z_pos[z_pos < 1] = 1\n z_pos[z_pos > self.z_n-1] = self.z_n-1\n i = w_pos # for convenience\n j = x_pos\n k = y_pos\n l = z_pos\n alpha = (w - self.w_list[i-1])/(self.w_list[i] - self.w_list[i-1])\n beta = (x - self.x_list[j-1])/(self.x_list[j] - self.x_list[j-1])\n gamma = (y - self.y_list[k-1])/(self.y_list[k] - self.y_list[k-1])\n delta = (z - self.z_list[l-1])/(self.z_list[l] - self.z_list[l-1])\n f = (\n (1-alpha)*((1-beta)*((1-gamma)*(1-delta)*self.f_values[i-1,j-1,k-1,l-1]\n + (1-gamma)*delta*self.f_values[i-1,j-1,k-1,l]\n + gamma*(1-delta)*self.f_values[i-1,j-1,k,l-1]\n + gamma*delta*self.f_values[i-1,j-1,k,l])\n + beta*((1-gamma)*(1-delta)*self.f_values[i-1,j,k-1,l-1]\n + (1-gamma)*delta*self.f_values[i-1,j,k-1,l]\n + gamma*(1-delta)*self.f_values[i-1,j,k,l-1]\n + gamma*delta*self.f_values[i-1,j,k,l]))\n + alpha*((1-beta)*((1-gamma)*(1-delta)*self.f_values[i,j-1,k-1,l-1]\n + (1-gamma)*delta*self.f_values[i,j-1,k-1,l]\n + gamma*(1-delta)*self.f_values[i,j-1,k,l-1]\n + gamma*delta*self.f_values[i,j-1,k,l])\n + beta*((1-gamma)*(1-delta)*self.f_values[i,j,k-1,l-1]\n + (1-gamma)*delta*self.f_values[i,j,k-1,l]\n + gamma*(1-delta)*self.f_values[i,j,k,l-1]\n + gamma*delta*self.f_values[i,j,k,l])))\n return f\n\n def _derW(self,w,x,y,z):\n '''\n Returns the derivative with respect to w of the interpolated function\n at each value in w,x,y,z. Only called internally by HARKinterpolator4D.derivativeW.\n '''\n if _isscalar(w):\n w_pos = max(min(self.wSearchFunc(self.w_list,w),self.w_n-1),1)\n x_pos = max(min(self.xSearchFunc(self.x_list,x),self.x_n-1),1)\n y_pos = max(min(self.ySearchFunc(self.y_list,y),self.y_n-1),1)\n z_pos = max(min(self.zSearchFunc(self.z_list,z),self.z_n-1),1)\n else:\n w_pos = self.wSearchFunc(self.w_list,w)\n w_pos[w_pos < 1] = 1\n w_pos[w_pos > self.w_n-1] = self.w_n-1\n x_pos = self.xSearchFunc(self.x_list,x)\n x_pos[x_pos < 1] = 1\n x_pos[x_pos > self.x_n-1] = self.x_n-1\n y_pos = self.ySearchFunc(self.y_list,y)\n y_pos[y_pos < 1] = 1\n y_pos[y_pos > self.y_n-1] = self.y_n-1\n z_pos = self.zSearchFunc(self.z_list,z)\n z_pos[z_pos < 1] = 1\n z_pos[z_pos > self.z_n-1] = self.z_n-1\n i = w_pos # for convenience\n j = x_pos\n k = y_pos\n l = z_pos\n beta = (x - self.x_list[j-1])/(self.x_list[j] - self.x_list[j-1])\n gamma = (y - self.y_list[k-1])/(self.y_list[k] - self.y_list[k-1])\n delta = (z - self.z_list[l-1])/(self.z_list[l] - self.z_list[l-1])\n dfdw = (\n ( (1-beta)*(1-gamma)*(1-delta)*self.f_values[i,j-1,k-1,l-1]\n + (1-beta)*(1-gamma)*delta*self.f_values[i,j-1,k-1,l]\n + (1-beta)*gamma*(1-delta)*self.f_values[i,j-1,k,l-1]\n + (1-beta)*gamma*delta*self.f_values[i,j-1,k,l]\n + beta*(1-gamma)*(1-delta)*self.f_values[i,j,k-1,l-1]\n + beta*(1-gamma)*delta*self.f_values[i,j,k-1,l]\n + beta*gamma*(1-delta)*self.f_values[i,j,k,l-1]\n + beta*gamma*delta*self.f_values[i,j,k,l] ) -\n ( (1-beta)*(1-gamma)*(1-delta)*self.f_values[i-1,j-1,k-1,l-1]\n + (1-beta)*(1-gamma)*delta*self.f_values[i-1,j-1,k-1,l]\n + (1-beta)*gamma*(1-delta)*self.f_values[i-1,j-1,k,l-1]\n + (1-beta)*gamma*delta*self.f_values[i-1,j-1,k,l]\n + beta*(1-gamma)*(1-delta)*self.f_values[i-1,j,k-1,l-1]\n + beta*(1-gamma)*delta*self.f_values[i-1,j,k-1,l]\n + beta*gamma*(1-delta)*self.f_values[i-1,j,k,l-1]\n + beta*gamma*delta*self.f_values[i-1,j,k,l] )\n )/(self.w_list[i] - self.w_list[i-1])\n return dfdw\n\n def _derX(self,w,x,y,z):\n '''\n Returns the derivative with respect to x of the interpolated function\n at each value in w,x,y,z. Only called internally by HARKinterpolator4D.derivativeX.\n '''\n if _isscalar(w):\n w_pos = max(min(self.wSearchFunc(self.w_list,w),self.w_n-1),1)\n x_pos = max(min(self.xSearchFunc(self.x_list,x),self.x_n-1),1)\n y_pos = max(min(self.ySearchFunc(self.y_list,y),self.y_n-1),1)\n z_pos = max(min(self.zSearchFunc(self.z_list,z),self.z_n-1),1)\n else:\n w_pos = self.wSearchFunc(self.w_list,w)\n w_pos[w_pos < 1] = 1\n w_pos[w_pos > self.w_n-1] = self.w_n-1\n x_pos = self.xSearchFunc(self.x_list,x)\n x_pos[x_pos < 1] = 1\n x_pos[x_pos > self.x_n-1] = self.x_n-1\n y_pos = self.ySearchFunc(self.y_list,y)\n y_pos[y_pos < 1] = 1\n y_pos[y_pos > self.y_n-1] = self.y_n-1\n z_pos = self.zSearchFunc(self.z_list,z)\n z_pos[z_pos < 1] = 1\n z_pos[z_pos > self.z_n-1] = self.z_n-1\n i = w_pos # for convenience\n j = x_pos\n k = y_pos\n l = z_pos\n alpha = (w - self.w_list[i-1])/(self.w_list[i] - self.w_list[i-1])\n gamma = (y - self.y_list[k-1])/(self.y_list[k] - self.y_list[k-1])\n delta = (z - self.z_list[l-1])/(self.z_list[l] - self.z_list[l-1])\n dfdx = (\n ( (1-alpha)*(1-gamma)*(1-delta)*self.f_values[i-1,j,k-1,l-1]\n + (1-alpha)*(1-gamma)*delta*self.f_values[i-1,j,k-1,l]\n + (1-alpha)*gamma*(1-delta)*self.f_values[i-1,j,k,l-1]\n + (1-alpha)*gamma*delta*self.f_values[i-1,j,k,l]\n + alpha*(1-gamma)*(1-delta)*self.f_values[i,j,k-1,l-1]\n + alpha*(1-gamma)*delta*self.f_values[i,j,k-1,l]\n + alpha*gamma*(1-delta)*self.f_values[i,j,k,l-1]\n + alpha*gamma*delta*self.f_values[i,j,k,l] ) -\n ( (1-alpha)*(1-gamma)*(1-delta)*self.f_values[i-1,j-1,k-1,l-1]\n + (1-alpha)*(1-gamma)*delta*self.f_values[i-1,j-1,k-1,l]\n + (1-alpha)*gamma*(1-delta)*self.f_values[i-1,j-1,k,l-1]\n + (1-alpha)*gamma*delta*self.f_values[i-1,j-1,k,l]\n + alpha*(1-gamma)*(1-delta)*self.f_values[i,j-1,k-1,l-1]\n + alpha*(1-gamma)*delta*self.f_values[i,j-1,k-1,l]\n + alpha*gamma*(1-delta)*self.f_values[i,j-1,k,l-1]\n + alpha*gamma*delta*self.f_values[i,j-1,k,l] )\n )/(self.x_list[j] - self.x_list[j-1])\n return dfdx\n\n def _derY(self,w,x,y,z):\n '''\n Returns the derivative with respect to y of the interpolated function\n at each value in w,x,y,z. Only called internally by HARKinterpolator4D.derivativeY.\n '''\n if _isscalar(w):\n w_pos = max(min(self.wSearchFunc(self.w_list,w),self.w_n-1),1)\n x_pos = max(min(self.xSearchFunc(self.x_list,x),self.x_n-1),1)\n y_pos = max(min(self.ySearchFunc(self.y_list,y),self.y_n-1),1)\n z_pos = max(min(self.zSearchFunc(self.z_list,z),self.z_n-1),1)\n else:\n w_pos = self.wSearchFunc(self.w_list,w)\n w_pos[w_pos < 1] = 1\n w_pos[w_pos > self.w_n-1] = self.w_n-1\n x_pos = self.xSearchFunc(self.x_list,x)\n x_pos[x_pos < 1] = 1\n x_pos[x_pos > self.x_n-1] = self.x_n-1\n y_pos = self.ySearchFunc(self.y_list,y)\n y_pos[y_pos < 1] = 1\n y_pos[y_pos > self.y_n-1] = self.y_n-1\n z_pos = self.zSearchFunc(self.z_list,z)\n z_pos[z_pos < 1] = 1\n z_pos[z_pos > self.z_n-1] = self.z_n-1\n i = w_pos # for convenience\n j = x_pos\n k = y_pos\n l = z_pos\n alpha = (w - self.w_list[i-1])/(self.w_list[i] - self.w_list[i-1])\n beta = (x - self.x_list[j-1])/(self.x_list[j] - self.x_list[j-1])\n delta = (z - self.z_list[l-1])/(self.z_list[l] - self.z_list[l-1])\n dfdy = (\n ( (1-alpha)*(1-beta)*(1-delta)*self.f_values[i-1,j-1,k,l-1]\n + (1-alpha)*(1-beta)*delta*self.f_values[i-1,j-1,k,l]\n + (1-alpha)*beta*(1-delta)*self.f_values[i-1,j,k,l-1]\n + (1-alpha)*beta*delta*self.f_values[i-1,j,k,l]\n + alpha*(1-beta)*(1-delta)*self.f_values[i,j-1,k,l-1]\n + alpha*(1-beta)*delta*self.f_values[i,j-1,k,l]\n + alpha*beta*(1-delta)*self.f_values[i,j,k,l-1]\n + alpha*beta*delta*self.f_values[i,j,k,l] ) -\n ( (1-alpha)*(1-beta)*(1-delta)*self.f_values[i-1,j-1,k-1,l-1]\n + (1-alpha)*(1-beta)*delta*self.f_values[i-1,j-1,k-1,l]\n + (1-alpha)*beta*(1-delta)*self.f_values[i-1,j,k-1,l-1]\n + (1-alpha)*beta*delta*self.f_values[i-1,j,k-1,l]\n + alpha*(1-beta)*(1-delta)*self.f_values[i,j-1,k-1,l-1]\n + alpha*(1-beta)*delta*self.f_values[i,j-1,k-1,l]\n + alpha*beta*(1-delta)*self.f_values[i,j,k-1,l-1]\n + alpha*beta*delta*self.f_values[i,j,k-1,l] )\n )/(self.y_list[k] - self.y_list[k-1])\n return dfdy\n\n def _derZ(self,w,x,y,z):\n '''\n Returns the derivative with respect to z of the interpolated function\n at each value in w,x,y,z. Only called internally by HARKinterpolator4D.derivativeZ.\n '''\n if _isscalar(w):\n w_pos = max(min(self.wSearchFunc(self.w_list,w),self.w_n-1),1)\n x_pos = max(min(self.xSearchFunc(self.x_list,x),self.x_n-1),1)\n y_pos = max(min(self.ySearchFunc(self.y_list,y),self.y_n-1),1)\n z_pos = max(min(self.zSearchFunc(self.z_list,z),self.z_n-1),1)\n else:\n w_pos = self.wSearchFunc(self.w_list,w)\n w_pos[w_pos < 1] = 1\n w_pos[w_pos > self.w_n-1] = self.w_n-1\n x_pos = self.xSearchFunc(self.x_list,x)\n x_pos[x_pos < 1] = 1\n x_pos[x_pos > self.x_n-1] = self.x_n-1\n y_pos = self.ySearchFunc(self.y_list,y)\n y_pos[y_pos < 1] = 1\n y_pos[y_pos > self.y_n-1] = self.y_n-1\n z_pos = self.zSearchFunc(self.z_list,z)\n z_pos[z_pos < 1] = 1\n z_pos[z_pos > self.z_n-1] = self.z_n-1\n i = w_pos # for convenience\n j = x_pos\n k = y_pos\n l = z_pos\n alpha = (w - self.w_list[i-1])/(self.w_list[i] - self.w_list[i-1])\n beta = (x - self.x_list[j-1])/(self.x_list[j] - self.x_list[j-1])\n gamma = (y - self.y_list[k-1])/(self.y_list[k] - self.y_list[k-1])\n dfdz = (\n ( (1-alpha)*(1-beta)*(1-gamma)*self.f_values[i-1,j-1,k-1,l]\n + (1-alpha)*(1-beta)*gamma*self.f_values[i-1,j-1,k,l]\n + (1-alpha)*beta*(1-gamma)*self.f_values[i-1,j,k-1,l]\n + (1-alpha)*beta*gamma*self.f_values[i-1,j,k,l]\n + alpha*(1-beta)*(1-gamma)*self.f_values[i,j-1,k-1,l]\n + alpha*(1-beta)*gamma*self.f_values[i,j-1,k,l]\n + alpha*beta*(1-gamma)*self.f_values[i,j,k-1,l]\n + alpha*beta*gamma*self.f_values[i,j,k,l] ) -\n ( (1-alpha)*(1-beta)*(1-gamma)*self.f_values[i-1,j-1,k-1,l-1]\n + (1-alpha)*(1-beta)*gamma*self.f_values[i-1,j-1,k,l-1]\n + (1-alpha)*beta*(1-gamma)*self.f_values[i-1,j,k-1,l-1]\n + (1-alpha)*beta*gamma*self.f_values[i-1,j,k,l-1]\n + alpha*(1-beta)*(1-gamma)*self.f_values[i,j-1,k-1,l-1]\n + alpha*(1-beta)*gamma*self.f_values[i,j-1,k,l-1]\n + alpha*beta*(1-gamma)*self.f_values[i,j,k-1,l-1]\n + alpha*beta*gamma*self.f_values[i,j,k,l-1] )\n )/(self.z_list[l] - self.z_list[l-1])\n return dfdz\n\n\nclass LowerEnvelope(HARKinterpolator1D):\n '''\n The lower envelope of a finite set of 1D functions, each of which can be of\n any class that has the methods __call__, derivative, and eval_with_derivative.\n Generally: it combines HARKinterpolator1Ds.\n '''\n distance_criteria = ['functions']\n\n def __init__(self,*functions):\n '''\n Constructor to make a new lower envelope iterpolation.\n\n Parameters\n ----------\n *functions : function\n Any number of real functions; often instances of HARKinterpolator1D\n\n Returns\n -------\n new instance of LowerEnvelope\n '''\n self.functions = []\n for function in functions:\n self.functions.append(function)\n self.funcCount = len(self.functions)\n\n def _evaluate(self,x):\n '''\n Returns the level of the function at each value in x as the minimum among\n all of the functions. Only called internally by HARKinterpolator1D.__call__.\n '''\n if _isscalar(x):\n y = np.nanmin([f(x) for f in self.functions])\n else:\n m = len(x)\n fx = np.zeros((m,self.funcCount))\n for j in range(self.funcCount):\n fx[:,j] = self.functions[j](x)\n y = np.nanmin(fx,axis=1)\n return y\n\n def _der(self,x):\n '''\n Returns the first derivative of the function at each value in x. Only\n called internally by HARKinterpolator1D.derivative.\n '''\n y,dydx = self.eval_with_derivative(x)\n return dydx # Sadly, this is the fastest / most convenient way...\n\n def _evalAndDer(self,x):\n '''\n Returns the level and first derivative of the function at each value in\n x. Only called internally by HARKinterpolator1D.eval_and_der.\n '''\n m = len(x)\n fx = np.zeros((m,self.funcCount))\n for j in range(self.funcCount):\n fx[:,j] = self.functions[j](x)\n fx[np.isnan(fx)] = np.inf\n i = np.argmin(fx,axis=1)\n y = fx[np.arange(m),i]\n dydx = np.zeros_like(y)\n for j in range(self.funcCount):\n c = i == j\n dydx[c] = self.functions[j].derivative(x[c])\n return y,dydx\n\n\nclass UpperEnvelope(HARKinterpolator1D):\n '''\n The upper envelope of a finite set of 1D functions, each of which can be of\n any class that has the methods __call__, derivative, and eval_with_derivative.\n Generally: it combines HARKinterpolator1Ds.\n '''\n distance_criteria = ['functions']\n\n def __init__(self,*functions):\n '''\n Constructor to make a new upper envelope iterpolation.\n\n Parameters\n ----------\n *functions : function\n Any number of real functions; often instances of HARKinterpolator1D\n\n Returns\n -------\n new instance of UpperEnvelope\n '''\n self.functions = []\n for function in functions:\n self.functions.append(function)\n self.funcCount = len(self.functions)\n\n def _evaluate(self,x):\n '''\n Returns the level of the function at each value in x as the maximum among\n all of the functions. Only called internally by HARKinterpolator1D.__call__.\n '''\n if _isscalar(x):\n y = np.nanmax([f(x) for f in self.functions])\n else:\n m = len(x)\n fx = np.zeros((m,self.funcCount))\n for j in range(self.funcCount):\n fx[:,j] = self.functions[j](x)\n y = np.nanmax(fx,axis=1)\n return y\n\n def _der(self,x):\n '''\n Returns the first derivative of the function at each value in x. Only\n called internally by HARKinterpolator1D.derivative.\n '''\n y,dydx = self.eval_with_derivative(x)\n return dydx # Sadly, this is the fastest / most convenient way...\n\n def _evalAndDer(self,x):\n '''\n Returns the level and first derivative of the function at each value in\n x. Only called internally by HARKinterpolator1D.eval_and_der.\n '''\n m = len(x)\n fx = np.zeros((m,self.funcCount))\n for j in range(self.funcCount):\n fx[:,j] = self.functions[j](x)\n fx[np.isnan(fx)] = np.inf\n i = np.argmax(fx,axis=1)\n y = fx[np.arange(m),i]\n dydx = np.zeros_like(y)\n for j in range(self.funcCount):\n c = i == j\n dydx[c] = self.functions[j].derivative(x[c])\n return y,dydx\n\n\nclass LowerEnvelope2D(HARKinterpolator2D):\n '''\n The lower envelope of a finite set of 2D functions, each of which can be of\n any class that has the methods __call__, derivativeX, and derivativeY.\n Generally: it combines HARKinterpolator2Ds.\n '''\n distance_criteria = ['functions']\n\n def __init__(self,*functions):\n '''\n Constructor to make a new lower envelope iterpolation.\n\n Parameters\n ----------\n *functions : function\n Any number of real functions; often instances of HARKinterpolator2D\n\n Returns\n -------\n new instance of LowerEnvelope2D\n '''\n self.functions = []\n for function in functions:\n self.functions.append(function)\n self.funcCount = len(self.functions)\n\n def _evaluate(self,x,y):\n '''\n Returns the level of the function at each value in (x,y) as the minimum\n among all of the functions. Only called internally by\n HARKinterpolator2D.__call__.\n '''\n if _isscalar(x):\n f = np.nanmin([f(x,y) for f in self.functions])\n else:\n m = len(x)\n temp = np.zeros((m,self.funcCount))\n for j in range(self.funcCount):\n temp[:,j] = self.functions[j](x,y)\n f = np.nanmin(temp,axis=1)\n return f\n\n def _derX(self,x,y):\n '''\n Returns the first derivative of the function with respect to X at each\n value in (x,y). Only called internally by HARKinterpolator2D._derX.\n '''\n m = len(x)\n temp = np.zeros((m,self.funcCount))\n for j in range(self.funcCount):\n temp[:,j] = self.functions[j](x,y)\n temp[np.isnan(temp)] = np.inf\n i = np.argmin(temp,axis=1)\n dfdx = np.zeros_like(x)\n for j in range(self.funcCount):\n c = i == j\n dfdx[c] = self.functions[j].derivativeX(x[c],y[c])\n return dfdx\n\n def _derY(self,x,y):\n '''\n Returns the first derivative of the function with respect to Y at each\n value in (x,y). Only called internally by HARKinterpolator2D._derY.\n '''\n m = len(x)\n temp = np.zeros((m,self.funcCount))\n for j in range(self.funcCount):\n temp[:,j] = self.functions[j](x,y)\n temp[np.isnan(temp)] = np.inf\n i = np.argmin(temp,axis=1)\n y = temp[np.arange(m),i]\n dfdy = np.zeros_like(x)\n for j in range(self.funcCount):\n c = i == j\n dfdy[c] = self.functions[j].derivativeY(x[c],y[c])\n return dfdy\n\n\nclass LowerEnvelope3D(HARKinterpolator3D):\n '''\n The lower envelope of a finite set of 3D functions, each of which can be of\n any class that has the methods __call__, derivativeX, derivativeY, and\n derivativeZ. Generally: it combines HARKinterpolator2Ds.\n '''\n distance_criteria = ['functions']\n\n def __init__(self,*functions):\n '''\n Constructor to make a new lower envelope iterpolation.\n\n Parameters\n ----------\n *functions : function\n Any number of real functions; often instances of HARKinterpolator3D\n\n Returns\n -------\n None\n '''\n self.functions = []\n for function in functions:\n self.functions.append(function)\n self.funcCount = len(self.functions)\n\n def _evaluate(self,x,y,z):\n '''\n Returns the level of the function at each value in (x,y,z) as the minimum\n among all of the functions. Only called internally by\n HARKinterpolator3D.__call__.\n '''\n if _isscalar(x):\n f = np.nanmin([f(x,y,z) for f in self.functions])\n else:\n m = len(x)\n temp = np.zeros((m,self.funcCount))\n for j in range(self.funcCount):\n temp[:,j] = self.functions[j](x,y,z)\n f = np.nanmin(temp,axis=1)\n return f\n\n def _derX(self,x,y,z):\n '''\n Returns the first derivative of the function with respect to X at each\n value in (x,y,z). Only called internally by HARKinterpolator3D._derX.\n '''\n m = len(x)\n temp = np.zeros((m,self.funcCount))\n for j in range(self.funcCount):\n temp[:,j] = self.functions[j](x,y,z)\n temp[np.isnan(temp)] = np.inf\n i = np.argmin(temp,axis=1)\n dfdx = np.zeros_like(x)\n for j in range(self.funcCount):\n c = i == j\n dfdx[c] = self.functions[j].derivativeX(x[c],y[c],z[c])\n return dfdx\n\n def _derY(self,x,y,z):\n '''\n Returns the first derivative of the function with respect to Y at each\n value in (x,y,z). Only called internally by HARKinterpolator3D._derY.\n '''\n m = len(x)\n temp = np.zeros((m,self.funcCount))\n for j in range(self.funcCount):\n temp[:,j] = self.functions[j](x,y,z)\n temp[np.isnan(temp)] = np.inf\n i = np.argmin(temp,axis=1)\n y = temp[np.arange(m),i]\n dfdy = np.zeros_like(x)\n for j in range(self.funcCount):\n c = i == j\n dfdy[c] = self.functions[j].derivativeY(x[c],y[c],z[c])\n return dfdy\n\n def _derZ(self,x,y,z):\n '''\n Returns the first derivative of the function with respect to Z at each\n value in (x,y,z). Only called internally by HARKinterpolator3D._derZ.\n '''\n m = len(x)\n temp = np.zeros((m,self.funcCount))\n for j in range(self.funcCount):\n temp[:,j] = self.functions[j](x,y,z)\n temp[np.isnan(temp)] = np.inf\n i = np.argmin(temp,axis=1)\n y = temp[np.arange(m),i]\n dfdz = np.zeros_like(x)\n for j in range(self.funcCount):\n c = i == j\n dfdz[c] = self.functions[j].derivativeZ(x[c],y[c],z[c])\n return dfdz\n\n\nclass VariableLowerBoundFunc2D(HARKobject):\n '''\n A class for representing a function with two real inputs whose lower bound\n in the first input depends on the second input. Useful for managing curved\n natural borrowing constraints, as occurs in the persistent shocks model.\n '''\n distance_criteria = ['func','lowerBound']\n\n def __init__(self,func,lowerBound):\n '''\n Make a new instance of VariableLowerBoundFunc2D.\n\n Parameters\n ----------\n func : function\n A function f: (R_+ x R) --> R representing the function of interest\n shifted by its lower bound in the first input.\n lowerBound : function\n The lower bound in the first input of the function of interest, as\n a function of the second input.\n\n Returns\n -------\n None\n '''\n self.func = func\n self.lowerBound = lowerBound\n\n def __call__(self,x,y):\n '''\n Evaluate the function at given state space points.\n\n Parameters\n ----------\n x : np.array\n First input values.\n y : np.array\n Second input values; should be of same shape as x.\n\n Returns\n -------\n f_out : np.array\n Function evaluated at (x,y), of same shape as inputs.\n '''\n xShift = self.lowerBound(y)\n f_out = self.func(x-xShift,y)\n return f_out\n\n def derivativeX(self,x,y):\n '''\n Evaluate the first derivative with respect to x of the function at given\n state space points.\n\n Parameters\n ----------\n x : np.array\n First input values.\n y : np.array\n Second input values; should be of same shape as x.\n\n Returns\n -------\n dfdx_out : np.array\n First derivative of function with respect to the first input,\n evaluated at (x,y), of same shape as inputs.\n '''\n xShift = self.lowerBound(y)\n dfdx_out = self.func.derivativeX(x-xShift,y)\n return dfdx_out\n\n def derivativeY(self,x,y):\n '''\n Evaluate the first derivative with respect to y of the function at given\n state space points.\n\n Parameters\n ----------\n x : np.array\n First input values.\n y : np.array\n Second input values; should be of same shape as x.\n\n Returns\n -------\n dfdy_out : np.array\n First derivative of function with respect to the second input,\n evaluated at (x,y), of same shape as inputs.\n '''\n xShift,xShiftDer = self.lowerBound.eval_with_derivative(y)\n dfdy_out = self.func.derivativeY(x-xShift,y) - xShiftDer*self.func.derivativeX(x-xShift,y)\n return dfdy_out\n\n\nclass VariableLowerBoundFunc3D(HARKobject):\n '''\n A class for representing a function with three real inputs whose lower bound\n in the first input depends on the second input. Useful for managing curved\n natural borrowing constraints.\n '''\n distance_criteria = ['func','lowerBound']\n\n def __init__(self,func,lowerBound):\n '''\n Make a new instance of VariableLowerBoundFunc3D.\n\n Parameters\n ----------\n func : function\n A function f: (R_+ x R^2) --> R representing the function of interest\n shifted by its lower bound in the first input.\n lowerBound : function\n The lower bound in the first input of the function of interest, as\n a function of the second input.\n\n Returns\n -------\n None\n '''\n self.func = func\n self.lowerBound = lowerBound\n\n def __call__(self,x,y,z):\n '''\n Evaluate the function at given state space points.\n\n Parameters\n ----------\n x : np.array\n First input values.\n y : np.array\n Second input values; should be of same shape as x.\n z : np.array\n Third input values; should be of same shape as x.\n\n Returns\n -------\n f_out : np.array\n Function evaluated at (x,y,z), of same shape as inputs.\n '''\n xShift = self.lowerBound(y)\n f_out = self.func(x-xShift,y,z)\n return f_out\n\n def derivativeX(self,x,y,z):\n '''\n Evaluate the first derivative with respect to x of the function at given\n state space points.\n\n Parameters\n ----------\n x : np.array\n First input values.\n y : np.array\n Second input values; should be of same shape as x.\n z : np.array\n Third input values; should be of same shape as x.\n\n Returns\n -------\n dfdx_out : np.array\n First derivative of function with respect to the first input,\n evaluated at (x,y,z), of same shape as inputs.\n '''\n xShift = self.lowerBound(y)\n dfdx_out = self.func.derivativeX(x-xShift,y,z)\n return dfdx_out\n\n def derivativeY(self,x,y,z):\n '''\n Evaluate the first derivative with respect to y of the function at given\n state space points.\n\n Parameters\n ----------\n x : np.array\n First input values.\n y : np.array\n Second input values; should be of same shape as x.\n z : np.array\n Third input values; should be of same shape as x.\n\n Returns\n -------\n dfdy_out : np.array\n First derivative of function with respect to the second input,\n evaluated at (x,y,z), of same shape as inputs.\n '''\n xShift,xShiftDer = self.lowerBound.eval_with_derivative(y)\n dfdy_out = self.func.derivativeY(x-xShift,y,z) - \\\n xShiftDer*self.func.derivativeX(x-xShift,y,z)\n return dfdy_out\n\n def derivativeZ(self,x,y,z):\n '''\n Evaluate the first derivative with respect to z of the function at given\n state space points.\n\n Parameters\n ----------\n x : np.array\n First input values.\n y : np.array\n Second input values; should be of same shape as x.\n z : np.array\n Third input values; should be of same shape as x.\n\n Returns\n -------\n dfdz_out : np.array\n First derivative of function with respect to the third input,\n evaluated at (x,y,z), of same shape as inputs.\n '''\n xShift = self.lowerBound(y)\n dfdz_out = self.func.derivativeZ(x-xShift,y,z)\n return dfdz_out\n\n\nclass LinearInterpOnInterp1D(HARKinterpolator2D):\n '''\n A 2D interpolator that linearly interpolates among a list of 1D interpolators.\n '''\n distance_criteria = ['xInterpolators','y_list']\n def __init__(self,xInterpolators,y_values):\n '''\n Constructor for the class, generating an approximation to a function of\n the form f(x,y) using interpolations over f(x,y_0) for a fixed grid of\n y_0 values.\n\n Parameters\n ----------\n xInterpolators : [HARKinterpolator1D]\n A list of 1D interpolations over the x variable. The nth element of\n xInterpolators represents f(x,y_values[n]).\n y_values: numpy.array\n An array of y values equal in length to xInterpolators.\n\n Returns\n -------\n new instance of LinearInterpOnInterp1D\n '''\n self.xInterpolators = xInterpolators\n self.y_list = y_values\n self.y_n = y_values.size\n\n def _evaluate(self,x,y):\n '''\n Returns the level of the interpolated function at each value in x,y.\n Only called internally by HARKinterpolator2D.__call__ (etc).\n '''\n if _isscalar(x):\n y_pos = max(min(np.searchsorted(self.y_list,y),self.y_n-1),1)\n alpha = (y - self.y_list[y_pos-1])/(self.y_list[y_pos] - self.y_list[y_pos-1])\n f = (1-alpha)*self.xInterpolators[y_pos-1](x) + alpha*self.xInterpolators[y_pos](x)\n else:\n m = len(x)\n y_pos = np.searchsorted(self.y_list,y)\n y_pos[y_pos > self.y_n-1] = self.y_n-1\n y_pos[y_pos < 1] = 1\n f = np.zeros(m) + np.nan\n if y.size > 0:\n for i in range(1,self.y_n):\n c = y_pos == i\n if np.any(c):\n alpha = (y[c] - self.y_list[i-1])/(self.y_list[i] - self.y_list[i-1])\n f[c] = (1-alpha)*self.xInterpolators[i-1](x[c]) + alpha*self.xInterpolators[i](x[c])\n return f\n\n def _derX(self,x,y):\n '''\n Returns the derivative with respect to x of the interpolated function\n at each value in x,y. Only called internally by HARKinterpolator2D.derivativeX.\n '''\n if _isscalar(x):\n y_pos = max(min(np.searchsorted(self.y_list,y),self.y_n-1),1)\n alpha = (y - self.y_list[y_pos-1])/(self.y_list[y_pos] - self.y_list[y_pos-1])\n dfdx = (1-alpha)*self.xInterpolators[y_pos-1]._der(x) + alpha*self.xInterpolators[y_pos]._der(x)\n else:\n m = len(x)\n y_pos = np.searchsorted(self.y_list,y)\n y_pos[y_pos > self.y_n-1] = self.y_n-1\n y_pos[y_pos < 1] = 1\n dfdx = np.zeros(m) + np.nan\n if y.size > 0:\n for i in range(1,self.y_n):\n c = y_pos == i\n if np.any(c):\n alpha = (y[c] - self.y_list[i-1])/(self.y_list[i] - self.y_list[i-1])\n dfdx[c] = (1-alpha)*self.xInterpolators[i-1]._der(x[c]) + alpha*self.xInterpolators[i]._der(x[c])\n return dfdx\n\n def _derY(self,x,y):\n '''\n Returns the derivative with respect to y of the interpolated function\n at each value in x,y. Only called internally by HARKinterpolator2D.derivativeY.\n '''\n if _isscalar(x):\n y_pos = max(min(np.searchsorted(self.y_list,y),self.y_n-1),1)\n dfdy = (self.xInterpolators[y_pos](x) - self.xInterpolators[y_pos-1](x))/(self.y_list[y_pos] - self.y_list[y_pos-1])\n else:\n m = len(x)\n y_pos = np.searchsorted(self.y_list,y)\n y_pos[y_pos > self.y_n-1] = self.y_n-1\n y_pos[y_pos < 1] = 1\n dfdy = np.zeros(m) + np.nan\n if y.size > 0:\n for i in range(1,self.y_n):\n c = y_pos == i\n if np.any(c):\n dfdy[c] = (self.xInterpolators[i](x[c]) - self.xInterpolators[i-1](x[c]))/(self.y_list[i] - self.y_list[i-1])\n return dfdy\n\n\nclass BilinearInterpOnInterp1D(HARKinterpolator3D):\n '''\n A 3D interpolator that bilinearly interpolates among a list of lists of 1D\n interpolators.\n '''\n distance_criteria = ['xInterpolators','y_list','z_list']\n\n def __init__(self,xInterpolators,y_values,z_values):\n '''\n Constructor for the class, generating an approximation to a function of\n the form f(x,y,z) using interpolations over f(x,y_0,z_0) for a fixed grid\n of y_0 and z_0 values.\n\n Parameters\n ----------\n xInterpolators : [[HARKinterpolator1D]]\n A list of lists of 1D interpolations over the x variable. The i,j-th\n element of xInterpolators represents f(x,y_values[i],z_values[j]).\n y_values: numpy.array\n An array of y values equal in length to xInterpolators.\n z_values: numpy.array\n An array of z values equal in length to xInterpolators[0].\n\n Returns\n -------\n new instance of BilinearInterpOnInterp1D\n '''\n self.xInterpolators = xInterpolators\n self.y_list = y_values\n self.y_n = y_values.size\n self.z_list = z_values\n self.z_n = z_values.size\n\n def _evaluate(self,x,y,z):\n '''\n Returns the level of the interpolated function at each value in x,y,z.\n Only called internally by HARKinterpolator3D.__call__ (etc).\n '''\n if _isscalar(x):\n y_pos = max(min(np.searchsorted(self.y_list,y),self.y_n-1),1)\n z_pos = max(min(np.searchsorted(self.z_list,z),self.z_n-1),1)\n alpha = (y - self.y_list[y_pos-1])/(self.y_list[y_pos] - self.y_list[y_pos-1])\n beta = (z - self.z_list[z_pos-1])/(self.z_list[z_pos] - self.z_list[z_pos-1])\n f = ((1-alpha)*(1-beta)*self.xInterpolators[y_pos-1][z_pos-1](x)\n + (1-alpha)*beta*self.xInterpolators[y_pos-1][z_pos](x)\n + alpha*(1-beta)*self.xInterpolators[y_pos][z_pos-1](x)\n + alpha*beta*self.xInterpolators[y_pos][z_pos](x))\n else:\n m = len(x)\n y_pos = np.searchsorted(self.y_list,y)\n y_pos[y_pos > self.y_n-1] = self.y_n-1\n y_pos[y_pos < 1] = 1\n z_pos = np.searchsorted(self.z_list,z)\n z_pos[z_pos > self.z_n-1] = self.z_n-1\n z_pos[z_pos < 1] = 1\n f = np.zeros(m) + np.nan\n for i in range(1,self.y_n):\n for j in range(1,self.z_n):\n c = np.logical_and(i == y_pos, j == z_pos)\n if np.any(c):\n alpha = (y[c] - self.y_list[i-1])/(self.y_list[i] - self.y_list[i-1])\n beta = (z[c] - self.z_list[j-1])/(self.z_list[j] - self.z_list[j-1])\n f[c] = (\n (1-alpha)*(1-beta)*self.xInterpolators[i-1][j-1](x[c])\n + (1-alpha)*beta*self.xInterpolators[i-1][j](x[c])\n + alpha*(1-beta)*self.xInterpolators[i][j-1](x[c])\n + alpha*beta*self.xInterpolators[i][j](x[c]))\n return f\n\n def _derX(self,x,y,z):\n '''\n Returns the derivative with respect to x of the interpolated function\n at each value in x,y,z. Only called internally by HARKinterpolator3D.derivativeX.\n '''\n if _isscalar(x):\n y_pos = max(min(np.searchsorted(self.y_list,y),self.y_n-1),1)\n z_pos = max(min(np.searchsorted(self.z_list,z),self.z_n-1),1)\n alpha = (y - self.y_list[y_pos-1])/(self.y_list[y_pos] - self.y_list[y_pos-1])\n beta = (z - self.z_list[z_pos-1])/(self.z_list[z_pos] - self.z_list[z_pos-1])\n dfdx = ((1-alpha)*(1-beta)*self.xInterpolators[y_pos-1][z_pos-1]._der(x)\n + (1-alpha)*beta*self.xInterpolators[y_pos-1][z_pos]._der(x)\n + alpha*(1-beta)*self.xInterpolators[y_pos][z_pos-1]._der(x)\n + alpha*beta*self.xInterpolators[y_pos][z_pos]._der(x))\n else:\n m = len(x)\n y_pos = np.searchsorted(self.y_list,y)\n y_pos[y_pos > self.y_n-1] = self.y_n-1\n y_pos[y_pos < 1] = 1\n z_pos = np.searchsorted(self.z_list,z)\n z_pos[z_pos > self.z_n-1] = self.z_n-1\n z_pos[z_pos < 1] = 1\n dfdx = np.zeros(m) + np.nan\n for i in range(1,self.y_n):\n for j in range(1,self.z_n):\n c = np.logical_and(i == y_pos, j == z_pos)\n if np.any(c):\n alpha = (y[c] - self.y_list[i-1])/(self.y_list[i] - self.y_list[i-1])\n beta = (z[c] - self.z_list[j-1])/(self.z_list[j] - self.z_list[j-1])\n dfdx[c] = (\n (1-alpha)*(1-beta)*self.xInterpolators[i-1][j-1]._der(x[c])\n + (1-alpha)*beta*self.xInterpolators[i-1][j]._der(x[c])\n + alpha*(1-beta)*self.xInterpolators[i][j-1]._der(x[c])\n + alpha*beta*self.xInterpolators[i][j]._der(x[c]))\n return dfdx\n\n def _derY(self,x,y,z):\n '''\n Returns the derivative with respect to y of the interpolated function\n at each value in x,y,z. Only called internally by HARKinterpolator3D.derivativeY.\n '''\n if _isscalar(x):\n y_pos = max(min(np.searchsorted(self.y_list,y),self.y_n-1),1)\n z_pos = max(min(np.searchsorted(self.z_list,z),self.z_n-1),1)\n beta = (z - self.z_list[z_pos-1])/(self.z_list[z_pos] - self.z_list[z_pos-1])\n dfdy = (((1-beta)*self.xInterpolators[y_pos][z_pos-1](x) + beta*self.xInterpolators[y_pos][z_pos](x))\n - ((1-beta)*self.xInterpolators[y_pos-1][z_pos-1](x) + beta*self.xInterpolators[y_pos-1][z_pos](x)))/(self.y_list[y_pos] - self.y_list[y_pos-1])\n else:\n m = len(x)\n y_pos = np.searchsorted(self.y_list,y)\n y_pos[y_pos > self.y_n-1] = self.y_n-1\n y_pos[y_pos < 1] = 1\n z_pos = np.searchsorted(self.z_list,z)\n z_pos[z_pos > self.z_n-1] = self.z_n-1\n z_pos[z_pos < 1] = 1\n dfdy = np.zeros(m) + np.nan\n for i in range(1,self.y_n):\n for j in range(1,self.z_n):\n c = np.logical_and(i == y_pos, j == z_pos)\n if np.any(c):\n beta = (z[c] - self.z_list[j-1])/(self.z_list[j] - self.z_list[j-1])\n dfdy[c] = (((1-beta)*self.xInterpolators[i][j-1](x[c]) + beta*self.xInterpolators[i][j](x[c]))\n - ((1-beta)*self.xInterpolators[i-1][j-1](x[c]) + beta*self.xInterpolators[i-1][j](x[c])))/(self.y_list[i] - self.y_list[i-1])\n return dfdy\n\n def _derZ(self,x,y,z):\n '''\n Returns the derivative with respect to z of the interpolated function\n at each value in x,y,z. Only called internally by HARKinterpolator3D.derivativeZ.\n '''\n if _isscalar(x):\n y_pos = max(min(np.searchsorted(self.y_list,y),self.y_n-1),1)\n z_pos = max(min(np.searchsorted(self.z_list,z),self.z_n-1),1)\n alpha = (y - self.y_list[y_pos-1])/(self.y_list[y_pos] - self.y_list[y_pos-1])\n dfdz = (((1-alpha)*self.xInterpolators[y_pos-1][z_pos](x) + alpha*self.xInterpolators[y_pos][z_pos](x))\n - ((1-alpha)*self.xInterpolators[y_pos-1][z_pos-1](x) + alpha*self.xInterpolators[y_pos][z_pos-1](x)))/(self.z_list[z_pos] - self.z_list[z_pos-1])\n else:\n m = len(x)\n y_pos = np.searchsorted(self.y_list,y)\n y_pos[y_pos > self.y_n-1] = self.y_n-1\n y_pos[y_pos < 1] = 1\n z_pos = np.searchsorted(self.z_list,z)\n z_pos[z_pos > self.z_n-1] = self.z_n-1\n z_pos[z_pos < 1] = 1\n dfdz = np.zeros(m) + np.nan\n for i in range(1,self.y_n):\n for j in range(1,self.z_n):\n c = np.logical_and(i == y_pos, j == z_pos)\n if np.any(c):\n alpha = (y[c] - self.y_list[i-1])/(self.y_list[i] - self.y_list[i-1])\n dfdz[c] = (((1-alpha)*self.xInterpolators[i-1][j](x[c]) + alpha*self.xInterpolators[i][j](x[c]))\n - ((1-alpha)*self.xInterpolators[i-1][j-1](x[c]) + alpha*self.xInterpolators[i][j-1](x[c])))/(self.z_list[j] - self.z_list[j-1])\n return dfdz\n\n\n\nclass TrilinearInterpOnInterp1D(HARKinterpolator4D):\n '''\n A 4D interpolator that trilinearly interpolates among a list of lists of 1D interpolators.\n '''\n distance_criteria = ['wInterpolators','x_list','y_list','z_list']\n\n def __init__(self,wInterpolators,x_values,y_values,z_values):\n '''\n Constructor for the class, generating an approximation to a function of\n the form f(w,x,y,z) using interpolations over f(w,x_0,y_0,z_0) for a fixed\n grid of y_0 and z_0 values.\n\n Parameters\n ----------\n wInterpolators : [[[HARKinterpolator1D]]]\n A list of lists of lists of 1D interpolations over the x variable.\n The i,j,k-th element of wInterpolators represents f(w,x_values[i],y_values[j],z_values[k]).\n x_values: numpy.array\n An array of x values equal in length to wInterpolators.\n y_values: numpy.array\n An array of y values equal in length to wInterpolators[0].\n z_values: numpy.array\n An array of z values equal in length to wInterpolators[0][0]\n\n Returns\n -------\n new instance of TrilinearInterpOnInterp1D\n '''\n self.wInterpolators = wInterpolators\n self.x_list = x_values\n self.x_n = x_values.size\n self.y_list = y_values\n self.y_n = y_values.size\n self.z_list = z_values\n self.z_n = z_values.size\n\n def _evaluate(self,w,x,y,z):\n '''\n Returns the level of the interpolated function at each value in w,x,y,z.\n Only called internally by HARKinterpolator4D.__call__ (etc).\n '''\n if _isscalar(w):\n x_pos = max(min(np.searchsorted(self.x_list,x),self.x_n-1),1)\n y_pos = max(min(np.searchsorted(self.y_list,y),self.y_n-1),1)\n z_pos = max(min(np.searchsorted(self.z_list,z),self.z_n-1),1)\n alpha = (x - self.x_list[x_pos-1])/(self.x_list[x_pos] - self.x_list[x_pos-1])\n beta = (y - self.y_list[y_pos-1])/(self.y_list[y_pos] - self.y_list[y_pos-1])\n gamma = (z - self.z_list[z_pos-1])/(self.z_list[z_pos] - self.z_list[z_pos-1])\n f = (\n (1-alpha)*(1-beta)*(1-gamma)*self.wInterpolators[x_pos-1][y_pos-1][z_pos-1](w)\n + (1-alpha)*(1-beta)*gamma*self.wInterpolators[x_pos-1][y_pos-1][z_pos](w)\n + (1-alpha)*beta*(1-gamma)*self.wInterpolators[x_pos-1][y_pos][z_pos-1](w)\n + (1-alpha)*beta*gamma*self.wInterpolators[x_pos-1][y_pos][z_pos](w)\n + alpha*(1-beta)*(1-gamma)*self.wInterpolators[x_pos][y_pos-1][z_pos-1](w)\n + alpha*(1-beta)*gamma*self.wInterpolators[x_pos][y_pos-1][z_pos](w)\n + alpha*beta*(1-gamma)*self.wInterpolators[x_pos][y_pos][z_pos-1](w)\n + alpha*beta*gamma*self.wInterpolators[x_pos][y_pos][z_pos](w))\n else:\n m = len(x)\n x_pos = np.searchsorted(self.x_list,x)\n x_pos[x_pos > self.x_n-1] = self.x_n-1\n y_pos = np.searchsorted(self.y_list,y)\n y_pos[y_pos > self.y_n-1] = self.y_n-1\n y_pos[y_pos < 1] = 1\n z_pos = np.searchsorted(self.z_list,z)\n z_pos[z_pos > self.z_n-1] = self.z_n-1\n z_pos[z_pos < 1] = 1\n f = np.zeros(m) + np.nan\n for i in range(1,self.x_n):\n for j in range(1,self.y_n):\n for k in range(1,self.z_n):\n c = np.logical_and(np.logical_and(i == x_pos, j == y_pos),k == z_pos)\n if np.any(c):\n alpha = (x[c] - self.x_list[i-1])/(self.x_list[i] - self.x_list[i-1])\n beta = (y[c] - self.y_list[j-1])/(self.y_list[j] - self.y_list[j-1])\n gamma = (z[c] - self.z_list[k-1])/(self.z_list[k] - self.z_list[k-1])\n f[c] = (\n (1-alpha)*(1-beta)*(1-gamma)*self.wInterpolators[i-1][j-1][k-1](w[c])\n + (1-alpha)*(1-beta)*gamma*self.wInterpolators[i-1][j-1][k](w[c])\n + (1-alpha)*beta*(1-gamma)*self.wInterpolators[i-1][j][k-1](w[c])\n + (1-alpha)*beta*gamma*self.wInterpolators[i-1][j][k](w[c])\n + alpha*(1-beta)*(1-gamma)*self.wInterpolators[i][j-1][k-1](w[c])\n + alpha*(1-beta)*gamma*self.wInterpolators[i][j-1][k](w[c])\n + alpha*beta*(1-gamma)*self.wInterpolators[i][j][k-1](w[c])\n + alpha*beta*gamma*self.wInterpolators[i][j][k](w[c]))\n return f\n\n def _derW(self,w,x,y,z):\n '''\n Returns the derivative with respect to w of the interpolated function\n at each value in w,x,y,z. Only called internally by HARKinterpolator4D.derivativeW.\n '''\n if _isscalar(w):\n x_pos = max(min(np.searchsorted(self.x_list,x),self.x_n-1),1)\n y_pos = max(min(np.searchsorted(self.y_list,y),self.y_n-1),1)\n z_pos = max(min(np.searchsorted(self.z_list,z),self.z_n-1),1)\n alpha = (x - self.x_list[x_pos-1])/(self.x_list[x_pos] - self.x_list[x_pos-1])\n beta = (y - self.y_list[y_pos-1])/(self.y_list[y_pos] - self.y_list[y_pos-1])\n gamma = (z - self.z_list[z_pos-1])/(self.z_list[z_pos] - self.z_list[z_pos-1])\n dfdw = (\n (1-alpha)*(1-beta)*(1-gamma)*self.wInterpolators[x_pos-1][y_pos-1][z_pos-1]._der(w)\n + (1-alpha)*(1-beta)*gamma*self.wInterpolators[x_pos-1][y_pos-1][z_pos]._der(w)\n + (1-alpha)*beta*(1-gamma)*self.wInterpolators[x_pos-1][y_pos][z_pos-1]._der(w)\n + (1-alpha)*beta*gamma*self.wInterpolators[x_pos-1][y_pos][z_pos]._der(w)\n + alpha*(1-beta)*(1-gamma)*self.wInterpolators[x_pos][y_pos-1][z_pos-1]._der(w)\n + alpha*(1-beta)*gamma*self.wInterpolators[x_pos][y_pos-1][z_pos]._der(w)\n + alpha*beta*(1-gamma)*self.wInterpolators[x_pos][y_pos][z_pos-1]._der(w)\n + alpha*beta*gamma*self.wInterpolators[x_pos][y_pos][z_pos]._der(w))\n else:\n m = len(x)\n x_pos = np.searchsorted(self.x_list,x)\n x_pos[x_pos > self.x_n-1] = self.x_n-1\n y_pos = np.searchsorted(self.y_list,y)\n y_pos[y_pos > self.y_n-1] = self.y_n-1\n y_pos[y_pos < 1] = 1\n z_pos = np.searchsorted(self.z_list,z)\n z_pos[z_pos > self.z_n-1] = self.z_n-1\n z_pos[z_pos < 1] = 1\n dfdw = np.zeros(m) + np.nan\n for i in range(1,self.x_n):\n for j in range(1,self.y_n):\n for k in range(1,self.z_n):\n c = np.logical_and(np.logical_and(i == x_pos, j == y_pos),k == z_pos)\n if np.any(c):\n alpha = (x[c] - self.x_list[i-1])/(self.x_list[i] - self.x_list[i-1])\n beta = (y[c] - self.y_list[j-1])/(self.y_list[j] - self.y_list[j-1])\n gamma = (z[c] - self.z_list[k-1])/(self.z_list[k] - self.z_list[k-1])\n dfdw[c] = (\n (1-alpha)*(1-beta)*(1-gamma)*self.wInterpolators[i-1][j-1][k-1]._der(w[c])\n + (1-alpha)*(1-beta)*gamma*self.wInterpolators[i-1][j-1][k]._der(w[c])\n + (1-alpha)*beta*(1-gamma)*self.wInterpolators[i-1][j][k-1]._der(w[c])\n + (1-alpha)*beta*gamma*self.wInterpolators[i-1][j][k]._der(w[c])\n + alpha*(1-beta)*(1-gamma)*self.wInterpolators[i][j-1][k-1]._der(w[c])\n + alpha*(1-beta)*gamma*self.wInterpolators[i][j-1][k]._der(w[c])\n + alpha*beta*(1-gamma)*self.wInterpolators[i][j][k-1]._der(w[c])\n + alpha*beta*gamma*self.wInterpolators[i][j][k]._der(w[c]))\n return dfdw\n\n def _derX(self,w,x,y,z):\n '''\n Returns the derivative with respect to x of the interpolated function\n at each value in w,x,y,z. Only called internally by HARKinterpolator4D.derivativeX.\n '''\n if _isscalar(w):\n x_pos = max(min(np.searchsorted(self.x_list,x),self.x_n-1),1)\n y_pos = max(min(np.searchsorted(self.y_list,y),self.y_n-1),1)\n z_pos = max(min(np.searchsorted(self.z_list,z),self.z_n-1),1)\n beta = (y - self.y_list[y_pos-1])/(self.y_list[y_pos] - self.y_list[y_pos-1])\n gamma = (z - self.z_list[z_pos-1])/(self.z_list[z_pos] - self.z_list[z_pos-1])\n dfdx = (\n ((1-beta)*(1-gamma)*self.wInterpolators[x_pos][y_pos-1][z_pos-1](w)\n + (1-beta)*gamma*self.wInterpolators[x_pos][y_pos-1][z_pos](w)\n + beta*(1-gamma)*self.wInterpolators[x_pos][y_pos][z_pos-1](w)\n + beta*gamma*self.wInterpolators[x_pos][y_pos][z_pos](w)) -\n ((1-beta)*(1-gamma)*self.wInterpolators[x_pos-1][y_pos-1][z_pos-1](w)\n + (1-beta)*gamma*self.wInterpolators[x_pos-1][y_pos-1][z_pos](w)\n + beta*(1-gamma)*self.wInterpolators[x_pos-1][y_pos][z_pos-1](w)\n + beta*gamma*self.wInterpolators[x_pos-1][y_pos][z_pos](w)))/(self.x_list[x_pos] - self.x_list[x_pos-1])\n else:\n m = len(x)\n x_pos = np.searchsorted(self.x_list,x)\n x_pos[x_pos > self.x_n-1] = self.x_n-1\n y_pos = np.searchsorted(self.y_list,y)\n y_pos[y_pos > self.y_n-1] = self.y_n-1\n y_pos[y_pos < 1] = 1\n z_pos = np.searchsorted(self.z_list,z)\n z_pos[z_pos > self.z_n-1] = self.z_n-1\n z_pos[z_pos < 1] = 1\n dfdx = np.zeros(m) + np.nan\n for i in range(1,self.x_n):\n for j in range(1,self.y_n):\n for k in range(1,self.z_n):\n c = np.logical_and(np.logical_and(i == x_pos, j == y_pos),k == z_pos)\n if np.any(c):\n beta = (y[c] - self.y_list[j-1])/(self.y_list[j] - self.y_list[j-1])\n gamma = (z[c] - self.z_list[k-1])/(self.z_list[k] - self.z_list[k-1])\n dfdx[c] = (\n ((1-beta)*(1-gamma)*self.wInterpolators[i][j-1][k-1](w[c])\n + (1-beta)*gamma*self.wInterpolators[i][j-1][k](w[c])\n + beta*(1-gamma)*self.wInterpolators[i][j][k-1](w[c])\n + beta*gamma*self.wInterpolators[i][j][k](w[c])) -\n ((1-beta)*(1-gamma)*self.wInterpolators[i-1][j-1][k-1](w[c])\n + (1-beta)*gamma*self.wInterpolators[i-1][j-1][k](w[c])\n + beta*(1-gamma)*self.wInterpolators[i-1][j][k-1](w[c])\n + beta*gamma*self.wInterpolators[i-1][j][k](w[c])))/(self.x_list[i] - self.x_list[i-1])\n return dfdx\n\n def _derY(self,w,x,y,z):\n '''\n Returns the derivative with respect to y of the interpolated function\n at each value in w,x,y,z. Only called internally by HARKinterpolator4D.derivativeY.\n '''\n if _isscalar(w):\n x_pos = max(min(np.searchsorted(self.x_list,x),self.x_n-1),1)\n y_pos = max(min(np.searchsorted(self.y_list,y),self.y_n-1),1)\n z_pos = max(min(np.searchsorted(self.z_list,z),self.z_n-1),1)\n alpha = (x - self.x_list[x_pos-1])/(self.y_list[x_pos] - self.x_list[x_pos-1])\n gamma = (z - self.z_list[z_pos-1])/(self.z_list[z_pos] - self.z_list[z_pos-1])\n dfdy = (\n ((1-alpha)*(1-gamma)*self.wInterpolators[x_pos-1][y_pos][z_pos-1](w)\n + (1-alpha)*gamma*self.wInterpolators[x_pos-1][y_pos][z_pos](w)\n + alpha*(1-gamma)*self.wInterpolators[x_pos][y_pos][z_pos-1](w)\n + alpha*gamma*self.wInterpolators[x_pos][y_pos][z_pos](w)) -\n ((1-alpha)*(1-gamma)*self.wInterpolators[x_pos-1][y_pos-1][z_pos-1](w)\n + (1-alpha)*gamma*self.wInterpolators[x_pos-1][y_pos-1][z_pos](w)\n + alpha*(1-gamma)*self.wInterpolators[x_pos][y_pos-1][z_pos-1](w)\n + alpha*gamma*self.wInterpolators[x_pos][y_pos-1][z_pos](w)))/(self.y_list[y_pos] - self.y_list[y_pos-1])\n else:\n m = len(x)\n x_pos = np.searchsorted(self.x_list,x)\n x_pos[x_pos > self.x_n-1] = self.x_n-1\n y_pos = np.searchsorted(self.y_list,y)\n y_pos[y_pos > self.y_n-1] = self.y_n-1\n y_pos[y_pos < 1] = 1\n z_pos = np.searchsorted(self.z_list,z)\n z_pos[z_pos > self.z_n-1] = self.z_n-1\n z_pos[z_pos < 1] = 1\n dfdy = np.zeros(m) + np.nan\n for i in range(1,self.x_n):\n for j in range(1,self.y_n):\n for k in range(1,self.z_n):\n c = np.logical_and(np.logical_and(i == x_pos, j == y_pos),k == z_pos)\n if np.any(c):\n alpha = (x[c] - self.x_list[i-1])/(self.x_list[i] - self.x_list[i-1])\n gamma = (z[c] - self.z_list[k-1])/(self.z_list[k] - self.z_list[k-1])\n dfdy[c] = (\n ((1-alpha)*(1-gamma)*self.wInterpolators[i-1][j][k-1](w[c])\n + (1-alpha)*gamma*self.wInterpolators[i-1][j][k](w[c])\n + alpha*(1-gamma)*self.wInterpolators[i][j][k-1](w[c])\n + alpha*gamma*self.wInterpolators[i][j][k](w[c])) -\n ((1-alpha)*(1-gamma)*self.wInterpolators[i-1][j-1][k-1](w[c])\n + (1-alpha)*gamma*self.wInterpolators[i-1][j-1][k](w[c])\n + alpha*(1-gamma)*self.wInterpolators[i][j-1][k-1](w[c])\n + alpha*gamma*self.wInterpolators[i][j-1][k](w[c])))/(self.y_list[j] - self.y_list[j-1])\n return dfdy\n\n def _derZ(self,w,x,y,z):\n '''\n Returns the derivative with respect to z of the interpolated function\n at each value in w,x,y,z. Only called internally by HARKinterpolator4D.derivativeZ.\n '''\n if _isscalar(w):\n x_pos = max(min(np.searchsorted(self.x_list,x),self.x_n-1),1)\n y_pos = max(min(np.searchsorted(self.y_list,y),self.y_n-1),1)\n z_pos = max(min(np.searchsorted(self.z_list,z),self.z_n-1),1)\n alpha = (x - self.x_list[x_pos-1])/(self.y_list[x_pos] - self.x_list[x_pos-1])\n beta = (y - self.y_list[y_pos-1])/(self.y_list[y_pos] - self.y_list[y_pos-1])\n dfdz = (\n ((1-alpha)*(1-beta)*self.wInterpolators[x_pos-1][y_pos-1][z_pos](w)\n + (1-alpha)*beta*self.wInterpolators[x_pos-1][y_pos][z_pos](w)\n + alpha*(1-beta)*self.wInterpolators[x_pos][y_pos-1][z_pos](w)\n + alpha*beta*self.wInterpolators[x_pos][y_pos][z_pos](w)) -\n ((1-alpha)*(1-beta)*self.wInterpolators[x_pos-1][y_pos-1][z_pos-1](w)\n + (1-alpha)*beta*self.wInterpolators[x_pos-1][y_pos][z_pos-1](w)\n + alpha*(1-beta)*self.wInterpolators[x_pos][y_pos-1][z_pos-1](w)\n + alpha*beta*self.wInterpolators[x_pos][y_pos][z_pos-1](w)))/(self.z_list[z_pos] - self.z_list[z_pos-1])\n else:\n m = len(x)\n x_pos = np.searchsorted(self.x_list,x)\n x_pos[x_pos > self.x_n-1] = self.x_n-1\n y_pos = np.searchsorted(self.y_list,y)\n y_pos[y_pos > self.y_n-1] = self.y_n-1\n y_pos[y_pos < 1] = 1\n z_pos = np.searchsorted(self.z_list,z)\n z_pos[z_pos > self.z_n-1] = self.z_n-1\n z_pos[z_pos < 1] = 1\n dfdz = np.zeros(m) + np.nan\n for i in range(1,self.x_n):\n for j in range(1,self.y_n):\n for k in range(1,self.z_n):\n c = np.logical_and(np.logical_and(i == x_pos, j == y_pos),k == z_pos)\n if np.any(c):\n alpha = (x[c] - self.x_list[i-1])/(self.x_list[i] - self.x_list[i-1])\n beta = (y[c] - self.y_list[j-1])/(self.y_list[j] - self.y_list[j-1])\n dfdz[c] = (\n ((1-alpha)*(1-beta)*self.wInterpolators[i-1][j-1][k](w[c])\n + (1-alpha)*beta*self.wInterpolators[i-1][j][k](w[c])\n + alpha*(1-beta)*self.wInterpolators[i][j-1][k](w[c])\n + alpha*beta*self.wInterpolators[i][j][k](w[c])) -\n ((1-alpha)*(1-beta)*self.wInterpolators[i-1][j-1][k-1](w[c])\n + (1-alpha)*beta*self.wInterpolators[i-1][j][k-1](w[c])\n + alpha*(1-beta)*self.wInterpolators[i][j-1][k-1](w[c])\n + alpha*beta*self.wInterpolators[i][j][k-1](w[c])))/(self.z_list[k] - self.z_list[k-1])\n return dfdz\n\n\nclass LinearInterpOnInterp2D(HARKinterpolator3D):\n '''\n A 3D interpolation method that linearly interpolates between \"layers\" of\n arbitrary 2D interpolations. Useful for models with two endogenous state\n variables and one exogenous state variable when solving with the endogenous\n grid method. NOTE: should not be used if an exogenous 3D grid is used, will\n be significantly slower than TrilinearInterp.\n '''\n distance_criteria = ['xyInterpolators','z_list']\n\n def __init__(self,xyInterpolators,z_values):\n '''\n Constructor for the class, generating an approximation to a function of\n the form f(x,y,z) using interpolations over f(x,y,z_0) for a fixed grid\n of z_0 values.\n\n Parameters\n ----------\n xyInterpolators : [HARKinterpolator2D]\n A list of 2D interpolations over the x and y variables. The nth\n element of xyInterpolators represents f(x,y,z_values[n]).\n z_values: numpy.array\n An array of z values equal in length to xyInterpolators.\n\n Returns\n -------\n new instance of LinearInterpOnInterp2D\n '''\n self.xyInterpolators = xyInterpolators\n self.z_list = z_values\n self.z_n = z_values.size\n\n def _evaluate(self,x,y,z):\n '''\n Returns the level of the interpolated function at each value in x,y,z.\n Only called internally by HARKinterpolator3D.__call__ (etc).\n '''\n if _isscalar(x):\n z_pos = max(min(np.searchsorted(self.z_list,z),self.z_n-1),1)\n alpha = (z - self.z_list[z_pos-1])/(self.z_list[z_pos] - self.z_list[z_pos-1])\n f = (1-alpha)*self.xyInterpolators[z_pos-1](x,y) + alpha*self.xyInterpolators[z_pos](x,y)\n else:\n m = len(x)\n z_pos = np.searchsorted(self.z_list,z)\n z_pos[z_pos > self.z_n-1] = self.z_n-1\n z_pos[z_pos < 1] = 1\n f = np.zeros(m) + np.nan\n if x.size > 0:\n for i in range(1,self.z_n):\n c = z_pos == i\n if np.any(c):\n alpha = (z[c] - self.z_list[i-1])/(self.z_list[i] - self.z_list[i-1])\n f[c] = (1-alpha)*self.xyInterpolators[i-1](x[c],y[c]) + alpha*self.xyInterpolators[i](x[c],y[c])\n return f\n\n def _derX(self,x,y,z):\n '''\n Returns the derivative with respect to x of the interpolated function\n at each value in x,y,z. Only called internally by HARKinterpolator3D.derivativeX.\n '''\n if _isscalar(x):\n z_pos = max(min(np.searchsorted(self.z_list,z),self.z_n-1),1)\n alpha = (z - self.z_list[z_pos-1])/(self.z_list[z_pos] - self.z_list[z_pos-1])\n dfdx = (1-alpha)*self.xyInterpolators[z_pos-1].derivativeX(x,y) + alpha*self.xyInterpolators[z_pos].derivativeX(x,y)\n else:\n m = len(x)\n z_pos = np.searchsorted(self.z_list,z)\n z_pos[z_pos > self.z_n-1] = self.z_n-1\n z_pos[z_pos < 1] = 1\n dfdx = np.zeros(m) + np.nan\n if x.size > 0:\n for i in range(1,self.z_n):\n c = z_pos == i\n if np.any(c):\n alpha = (z[c] - self.z_list[i-1])/(self.z_list[i] - self.z_list[i-1])\n dfdx[c] = (1-alpha)*self.xyInterpolators[i-1].derivativeX(x[c],y[c]) + alpha*self.xyInterpolators[i].derivativeX(x[c],y[c])\n return dfdx\n\n def _derY(self,x,y,z):\n '''\n Returns the derivative with respect to y of the interpolated function\n at each value in x,y,z. Only called internally by HARKinterpolator3D.derivativeY.\n '''\n if _isscalar(x):\n z_pos = max(min(np.searchsorted(self.z_list,z),self.z_n-1),1)\n alpha = (z - self.z_list[z_pos-1])/(self.z_list[z_pos] - self.z_list[z_pos-1])\n dfdy = (1-alpha)*self.xyInterpolators[z_pos-1].derivativeY(x,y) + alpha*self.xyInterpolators[z_pos].derivativeY(x,y)\n else:\n m = len(x)\n z_pos = np.searchsorted(self.z_list,z)\n z_pos[z_pos > self.z_n-1] = self.z_n-1\n z_pos[z_pos < 1] = 1\n dfdy = np.zeros(m) + np.nan\n if x.size > 0:\n for i in range(1,self.z_n):\n c = z_pos == i\n if np.any(c):\n alpha = (z[c] - self.z_list[i-1])/(self.z_list[i] - self.z_list[i-1])\n dfdy[c] = (1-alpha)*self.xyInterpolators[i-1].derivativeY(x[c],y[c]) + alpha*self.xyInterpolators[i].derivativeY(x[c],y[c])\n return dfdy\n\n def _derZ(self,x,y,z):\n '''\n Returns the derivative with respect to z of the interpolated function\n at each value in x,y,z. Only called internally by HARKinterpolator3D.derivativeZ.\n '''\n if _isscalar(x):\n z_pos = max(min(np.searchsorted(self.z_list,z),self.z_n-1),1)\n dfdz = (self.xyInterpolators[z_pos].derivativeX(x,y) - self.xyInterpolators[z_pos-1].derivativeX(x,y))/(self.z_list[z_pos] - self.z_list[z_pos-1])\n else:\n m = len(x)\n z_pos = np.searchsorted(self.z_list,z)\n z_pos[z_pos > self.z_n-1] = self.z_n-1\n z_pos[z_pos < 1] = 1\n dfdz = np.zeros(m) + np.nan\n if x.size > 0:\n for i in range(1,self.z_n):\n c = z_pos == i\n if np.any(c):\n dfdz[c] = (self.xyInterpolators[i](x[c],y[c]) - self.xyInterpolators[i-1](x[c],y[c]))/(self.z_list[i] - self.z_list[i-1])\n return dfdz\n\nclass BilinearInterpOnInterp2D(HARKinterpolator4D):\n '''\n A 4D interpolation method that bilinearly interpolates among \"layers\" of\n arbitrary 2D interpolations. Useful for models with two endogenous state\n variables and two exogenous state variables when solving with the endogenous\n grid method. NOTE: should not be used if an exogenous 4D grid is used, will\n be significantly slower than QuadlinearInterp.\n '''\n distance_criteria = ['wxInterpolators','y_list','z_list']\n\n def __init__(self,wxInterpolators,y_values,z_values):\n '''\n Constructor for the class, generating an approximation to a function of\n the form f(w,x,y,z) using interpolations over f(w,x,y_0,z_0) for a fixed\n grid of y_0 and z_0 values.\n\n Parameters\n ----------\n wxInterpolators : [[HARKinterpolator2D]]\n A list of lists of 2D interpolations over the w and x variables.\n The i,j-th element of wxInterpolators represents\n f(w,x,y_values[i],z_values[j]).\n y_values: numpy.array\n An array of y values equal in length to wxInterpolators.\n z_values: numpy.array\n An array of z values equal in length to wxInterpolators[0].\n\n Returns\n -------\n new instance of BilinearInterpOnInterp2D\n '''\n self.wxInterpolators = wxInterpolators\n self.y_list = y_values\n self.y_n = y_values.size\n self.z_list = z_values\n self.z_n = z_values.size\n\n def _evaluate(self,w,x,y,z):\n '''\n Returns the level of the interpolated function at each value in x,y,z.\n Only called internally by HARKinterpolator4D.__call__ (etc).\n '''\n if _isscalar(x):\n y_pos = max(min(np.searchsorted(self.y_list,y),self.y_n-1),1)\n z_pos = max(min(np.searchsorted(self.z_list,z),self.z_n-1),1)\n alpha = (y - self.y_list[y_pos-1])/(self.y_list[y_pos] - self.y_list[y_pos-1])\n beta = (z - self.z_list[z_pos-1])/(self.z_list[z_pos] - self.z_list[z_pos-1])\n f = ((1-alpha)*(1-beta)*self.wxInterpolators[y_pos-1][z_pos-1](w,x)\n + (1-alpha)*beta*self.wxInterpolators[y_pos-1][z_pos](w,x)\n + alpha*(1-beta)*self.wxInterpolators[y_pos][z_pos-1](w,x)\n + alpha*beta*self.wxInterpolators[y_pos][z_pos](w,x))\n else:\n m = len(x)\n y_pos = np.searchsorted(self.y_list,y)\n y_pos[y_pos > self.y_n-1] = self.y_n-1\n y_pos[y_pos < 1] = 1\n z_pos = np.searchsorted(self.z_list,z)\n z_pos[z_pos > self.z_n-1] = self.z_n-1\n z_pos[z_pos < 1] = 1\n f = np.zeros(m) + np.nan\n for i in range(1,self.y_n):\n for j in range(1,self.z_n):\n c = np.logical_and(i == y_pos, j == z_pos)\n if np.any(c):\n alpha = (y[c] - self.y_list[i-1])/(self.y_list[i] - self.y_list[i-1])\n beta = (z[c] - self.z_list[j-1])/(self.z_list[j] - self.z_list[j-1])\n f[c] = (\n (1-alpha)*(1-beta)*self.wxInterpolators[i-1][j-1](w[c],x[c])\n + (1-alpha)*beta*self.wxInterpolators[i-1][j](w[c],x[c])\n + alpha*(1-beta)*self.wxInterpolators[i][j-1](w[c],x[c])\n + alpha*beta*self.wxInterpolators[i][j](w[c],x[c]))\n return f\n\n def _derW(self,w,x,y,z):\n '''\n Returns the derivative with respect to w of the interpolated function\n at each value in w,x,y,z. Only called internally by HARKinterpolator4D.derivativeW.\n '''\n # This may look strange, as we call the derivativeX() method to get the\n # derivative with respect to w, but that's just a quirk of 4D interpolations\n # beginning with w rather than x. The derivative wrt the first dimension\n # of an element of wxInterpolators is the w-derivative of the main function.\n if _isscalar(x):\n y_pos = max(min(np.searchsorted(self.y_list,y),self.y_n-1),1)\n z_pos = max(min(np.searchsorted(self.z_list,z),self.z_n-1),1)\n alpha = (y - self.y_list[y_pos-1])/(self.y_list[y_pos] - self.y_list[y_pos-1])\n beta = (z - self.z_list[z_pos-1])/(self.z_list[z_pos] - self.z_list[z_pos-1])\n dfdw = ((1-alpha)*(1-beta)*self.wxInterpolators[y_pos-1][z_pos-1].derivativeX(w,x)\n + (1-alpha)*beta*self.wxInterpolators[y_pos-1][z_pos].derivativeX(w,x)\n + alpha*(1-beta)*self.wxInterpolators[y_pos][z_pos-1].derivativeX(w,x)\n + alpha*beta*self.wxInterpolators[y_pos][z_pos].derivativeX(w,x))\n else:\n m = len(x)\n y_pos = np.searchsorted(self.y_list,y)\n y_pos[y_pos > self.y_n-1] = self.y_n-1\n y_pos[y_pos < 1] = 1\n z_pos = np.searchsorted(self.z_list,z)\n z_pos[z_pos > self.z_n-1] = self.z_n-1\n z_pos[z_pos < 1] = 1\n dfdw = np.zeros(m) + np.nan\n for i in range(1,self.y_n):\n for j in range(1,self.z_n):\n c = np.logical_and(i == y_pos, j == z_pos)\n if np.any(c):\n alpha = (y[c] - self.y_list[i-1])/(self.y_list[i] - self.y_list[i-1])\n beta = (z[c] - self.z_list[j-1])/(self.z_list[j] - self.z_list[j-1])\n dfdw[c] = (\n (1-alpha)*(1-beta)*self.wxInterpolators[i-1][j-1].derivativeX(w[c],x[c])\n + (1-alpha)*beta*self.wxInterpolators[i-1][j].derivativeX(w[c],x[c])\n + alpha*(1-beta)*self.wxInterpolators[i][j-1].derivativeX(w[c],x[c])\n + alpha*beta*self.wxInterpolators[i][j].derivativeX(w[c],x[c]))\n return dfdw\n\n def _derX(self,w,x,y,z):\n '''\n Returns the derivative with respect to x of the interpolated function\n at each value in w,x,y,z. Only called internally by HARKinterpolator4D.derivativeX.\n '''\n # This may look strange, as we call the derivativeY() method to get the\n # derivative with respect to x, but that's just a quirk of 4D interpolations\n # beginning with w rather than x. The derivative wrt the second dimension\n # of an element of wxInterpolators is the x-derivative of the main function.\n if _isscalar(x):\n y_pos = max(min(np.searchsorted(self.y_list,y),self.y_n-1),1)\n z_pos = max(min(np.searchsorted(self.z_list,z),self.z_n-1),1)\n alpha = (y - self.y_list[y_pos-1])/(self.y_list[y_pos] - self.y_list[y_pos-1])\n beta = (z - self.z_list[z_pos-1])/(self.z_list[z_pos] - self.z_list[z_pos-1])\n dfdx = ((1-alpha)*(1-beta)*self.wxInterpolators[y_pos-1][z_pos-1].derivativeY(w,x)\n + (1-alpha)*beta*self.wxInterpolators[y_pos-1][z_pos].derivativeY(w,x)\n + alpha*(1-beta)*self.wxInterpolators[y_pos][z_pos-1].derivativeY(w,x)\n + alpha*beta*self.wxInterpolators[y_pos][z_pos].derivativeY(w,x))\n else:\n m = len(x)\n y_pos = np.searchsorted(self.y_list,y)\n y_pos[y_pos > self.y_n-1] = self.y_n-1\n y_pos[y_pos < 1] = 1\n z_pos = np.searchsorted(self.z_list,z)\n z_pos[z_pos > self.z_n-1] = self.z_n-1\n z_pos[z_pos < 1] = 1\n dfdx = np.zeros(m) + np.nan\n for i in range(1,self.y_n):\n for j in range(1,self.z_n):\n c = np.logical_and(i == y_pos, j == z_pos)\n if np.any(c):\n alpha = (y[c] - self.y_list[i-1])/(self.y_list[i] - self.y_list[i-1])\n beta = (z[c] - self.z_list[j-1])/(self.z_list[j] - self.z_list[j-1])\n dfdx[c] = (\n (1-alpha)*(1-beta)*self.wxInterpolators[i-1][j-1].derivativeY(w[c],x[c])\n + (1-alpha)*beta*self.wxInterpolators[i-1][j].derivativeY(w[c],x[c])\n + alpha*(1-beta)*self.wxInterpolators[i][j-1].derivativeY(w[c],x[c])\n + alpha*beta*self.wxInterpolators[i][j].derivativeY(w[c],x[c]))\n return dfdx\n\n def _derY(self,w,x,y,z):\n '''\n Returns the derivative with respect to y of the interpolated function\n at each value in w,x,y,z. Only called internally by HARKinterpolator4D.derivativeY.\n '''\n if _isscalar(x):\n y_pos = max(min(np.searchsorted(self.y_list,y),self.y_n-1),1)\n z_pos = max(min(np.searchsorted(self.z_list,z),self.z_n-1),1)\n beta = (z - self.z_list[z_pos-1])/(self.z_list[z_pos] - self.z_list[z_pos-1])\n dfdy = (((1-beta)*self.wxInterpolators[y_pos][z_pos-1](w,x) + beta*self.wxInterpolators[y_pos][z_pos](w,x))\n - ((1-beta)*self.wxInterpolators[y_pos-1][z_pos-1](w,x) + beta*self.wxInterpolators[y_pos-1][z_pos](w,x)))/(self.y_list[y_pos] - self.y_list[y_pos-1])\n else:\n m = len(x)\n y_pos = np.searchsorted(self.y_list,y)\n y_pos[y_pos > self.y_n-1] = self.y_n-1\n y_pos[y_pos < 1] = 1\n z_pos = np.searchsorted(self.z_list,z)\n z_pos[z_pos > self.z_n-1] = self.z_n-1\n z_pos[z_pos < 1] = 1\n dfdy = np.zeros(m) + np.nan\n for i in range(1,self.y_n):\n for j in range(1,self.z_n):\n c = np.logical_and(i == y_pos, j == z_pos)\n if np.any(c):\n beta = (z[c] - self.z_list[j-1])/(self.z_list[j] - self.z_list[j-1])\n dfdy[c] = (((1-beta)*self.wxInterpolators[i][j-1](w[c],x[c]) + beta*self.wxInterpolators[i][j](w[c],x[c]))\n - ((1-beta)*self.wxInterpolators[i-1][j-1](w[c],x[c]) + beta*self.wxInterpolators[i-1][j](w[c],x[c])))/(self.y_list[i] - self.y_list[i-1])\n return dfdy\n\n def _derZ(self,w,x,y,z):\n '''\n Returns the derivative with respect to z of the interpolated function\n at each value in w,x,y,z. Only called internally by HARKinterpolator4D.derivativeZ.\n '''\n if _isscalar(x):\n y_pos = max(min(np.searchsorted(self.y_list,y),self.y_n-1),1)\n z_pos = max(min(np.searchsorted(self.z_list,z),self.z_n-1),1)\n alpha = (y - self.y_list[y_pos-1])/(self.y_list[y_pos] - self.y_list[y_pos-1])\n dfdz = (((1-alpha)*self.wxInterpolators[y_pos-1][z_pos](w,x) + alpha*self.wxInterpolators[y_pos][z_pos](w,x))\n - ((1-alpha)*self.wxInterpolators[y_pos-1][z_pos-1](w,x) + alpha*self.wxInterpolators[y_pos][z_pos-1](w,x)))/(self.z_list[z_pos] - self.z_list[z_pos-1])\n else:\n m = len(x)\n y_pos = np.searchsorted(self.y_list,y)\n y_pos[y_pos > self.y_n-1] = self.y_n-1\n y_pos[y_pos < 1] = 1\n z_pos = np.searchsorted(self.z_list,z)\n z_pos[z_pos > self.z_n-1] = self.z_n-1\n z_pos[z_pos < 1] = 1\n dfdz = np.zeros(m) + np.nan\n for i in range(1,self.y_n):\n for j in range(1,self.z_n):\n c = np.logical_and(i == y_pos, j == z_pos)\n if np.any(c):\n alpha = (y[c] - self.y_list[i-1])/(self.y_list[i] - self.y_list[i-1])\n dfdz[c] = (((1-alpha)*self.wxInterpolators[i-1][j](w[c],x[c]) + alpha*self.wxInterpolators[i][j](w[c],x[c]))\n - ((1-alpha)*self.wxInterpolators[i-1][j-1](w[c],x[c]) + alpha*self.wxInterpolators[i][j-1](w[c],x[c])))/(self.z_list[j] - self.z_list[j-1])\n return dfdz\n\n\nclass Curvilinear2DInterp(HARKinterpolator2D):\n '''\n A 2D interpolation method for curvilinear or \"warped grid\" interpolation, as\n in White (2015). Used for models with two endogenous states that are solved\n with the endogenous grid method.\n '''\n distance_criteria = ['f_values','x_values','y_values']\n\n def __init__(self,f_values,x_values,y_values):\n '''\n Constructor for 2D curvilinear interpolation for a function f(x,y)\n\n Parameters\n ----------\n f_values: numpy.array\n A 2D array of function values such that f_values[i,j] =\n f(x_values[i,j],y_values[i,j]).\n x_values: numpy.array\n A 2D array of x values of the same size as f_values.\n y_values: numpy.array\n A 2D array of y values of the same size as f_values.\n\n Returns\n -------\n new instance of Curvilinear2DInterp\n '''\n self.f_values = f_values\n self.x_values = x_values\n self.y_values = y_values\n my_shape = f_values.shape\n self.x_n = my_shape[0]\n self.y_n = my_shape[1]\n self.updatePolarity()\n\n def updatePolarity(self):\n '''\n Fills in the polarity attribute of the interpolation, determining whether\n the \"plus\" (True) or \"minus\" (False) solution of the system of equations\n should be used for each sector. Needs to be called in __init__.\n\n Parameters\n ----------\n none\n\n Returns\n -------\n none\n '''\n # Grab a point known to be inside each sector: the midway point between\n # the lower left and upper right vertex of each sector\n x_temp = 0.5*(self.x_values[0:(self.x_n-1),0:(self.y_n-1)] + self.x_values[1:self.x_n,1:self.y_n])\n y_temp = 0.5*(self.y_values[0:(self.x_n-1),0:(self.y_n-1)] + self.y_values[1:self.x_n,1:self.y_n])\n size = (self.x_n-1)*(self.y_n-1)\n x_temp = np.reshape(x_temp,size)\n y_temp = np.reshape(y_temp,size)\n y_pos = np.tile(np.arange(0,self.y_n-1),self.x_n-1)\n x_pos = np.reshape(np.tile(np.arange(0,self.x_n-1),(self.y_n-1,1)).transpose(),size)\n\n # Set the polarity of all sectors to \"plus\", then test each sector\n self.polarity = np.ones((self.x_n-1,self.y_n-1),dtype=bool)\n alpha, beta = self.findCoords(x_temp,y_temp,x_pos,y_pos)\n polarity = np.logical_and(\n np.logical_and(alpha > 0, alpha < 1),\n np.logical_and(beta > 0, beta < 1))\n\n # Update polarity: if (alpha,beta) not in the unit square, then that\n # sector must use the \"minus\" solution instead\n self.polarity = np.reshape(polarity,(self.x_n-1,self.y_n-1))\n\n def findSector(self,x,y):\n '''\n Finds the quadrilateral \"sector\" for each (x,y) point in the input.\n Only called as a subroutine of _evaluate().\n\n Parameters\n ----------\n x : np.array\n Values whose sector should be found.\n y : np.array\n Values whose sector should be found. Should be same size as x.\n\n Returns\n -------\n x_pos : np.array\n Sector x-coordinates for each point of the input, of the same size.\n y_pos : np.array\n Sector y-coordinates for each point of the input, of the same size.\n '''\n # Initialize the sector guess\n m = x.size\n x_pos_guess = (np.ones(m)*self.x_n/2).astype(int)\n y_pos_guess = (np.ones(m)*self.y_n/2).astype(int)\n\n # Define a function that checks whether a set of points violates a linear\n # boundary defined by (x_bound_1,y_bound_1) and (x_bound_2,y_bound_2),\n # where the latter is *COUNTER CLOCKWISE* from the former. Returns\n # 1 if the point is outside the boundary and 0 otherwise.\n violationCheck = lambda x_check,y_check,x_bound_1,y_bound_1,x_bound_2,y_bound_2 : (\n (y_bound_2 - y_bound_1)*x_check - (x_bound_2 - x_bound_1)*y_check > x_bound_1*y_bound_2 - y_bound_1*x_bound_2 ) + 0\n\n # Identify the correct sector for each point to be evaluated\n these = np.ones(m,dtype=bool)\n max_loops = self.x_n + self.y_n\n loops = 0\n while np.any(these) and loops < max_loops:\n # Get coordinates for the four vertices: (xA,yA),...,(xD,yD)\n x_temp = x[these]\n y_temp = y[these]\n xA = self.x_values[x_pos_guess[these],y_pos_guess[these]]\n xB = self.x_values[x_pos_guess[these]+1,y_pos_guess[these]]\n xC = self.x_values[x_pos_guess[these],y_pos_guess[these]+1]\n xD = self.x_values[x_pos_guess[these]+1,y_pos_guess[these]+1]\n yA = self.y_values[x_pos_guess[these],y_pos_guess[these]]\n yB = self.y_values[x_pos_guess[these]+1,y_pos_guess[these]]\n yC = self.y_values[x_pos_guess[these],y_pos_guess[these]+1]\n yD = self.y_values[x_pos_guess[these]+1,y_pos_guess[these]+1]\n\n # Check the \"bounding box\" for the sector: is this guess plausible?\n move_down = (y_temp < np.minimum(yA,yB)) + 0\n move_right = (x_temp > np.maximum(xB,xD)) + 0\n move_up = (y_temp > np.maximum(yC,yD)) + 0\n move_left = (x_temp < np.minimum(xA,xC)) + 0\n\n # Check which boundaries are violated (and thus where to look next)\n c = (move_down + move_right + move_up + move_left) == 0\n move_down[c] = violationCheck(x_temp[c],y_temp[c],xA[c],yA[c],xB[c],yB[c])\n move_right[c] = violationCheck(x_temp[c],y_temp[c],xB[c],yB[c],xD[c],yD[c])\n move_up[c] = violationCheck(x_temp[c],y_temp[c],xD[c],yD[c],xC[c],yC[c])\n move_left[c] = violationCheck(x_temp[c],y_temp[c],xC[c],yC[c],xA[c],yA[c])\n\n # Update the sector guess based on the violations\n x_pos_next = x_pos_guess[these] - move_left + move_right\n x_pos_next[x_pos_next < 0] = 0\n x_pos_next[x_pos_next > (self.x_n-2)] = self.x_n-2\n y_pos_next = y_pos_guess[these] - move_down + move_up\n y_pos_next[y_pos_next < 0] = 0\n y_pos_next[y_pos_next > (self.y_n-2)] = self.y_n-2\n\n # Check which sectors have not changed, and mark them as complete\n no_move = np.array(np.logical_and(x_pos_guess[these] == x_pos_next, y_pos_guess[these] == y_pos_next))\n x_pos_guess[these] = x_pos_next\n y_pos_guess[these] = y_pos_next\n temp = these.nonzero()\n these[temp[0][no_move]] = False\n\n # Move to the next iteration of the search\n loops += 1\n\n # Return the output\n x_pos = x_pos_guess\n y_pos = y_pos_guess\n return x_pos, y_pos\n\n def findCoords(self,x,y,x_pos,y_pos):\n '''\n Calculates the relative coordinates (alpha,beta) for each point (x,y),\n given the sectors (x_pos,y_pos) in which they reside. Only called as\n a subroutine of __call__().\n\n Parameters\n ----------\n x : np.array\n Values whose sector should be found.\n y : np.array\n Values whose sector should be found. Should be same size as x.\n x_pos : np.array\n Sector x-coordinates for each point in (x,y), of the same size.\n y_pos : np.array\n Sector y-coordinates for each point in (x,y), of the same size.\n\n Returns\n -------\n alpha : np.array\n Relative \"horizontal\" position of the input in their respective sectors.\n beta : np.array\n Relative \"vertical\" position of the input in their respective sectors.\n '''\n # Calculate relative coordinates in the sector for each point\n xA = self.x_values[x_pos,y_pos]\n xB = self.x_values[x_pos+1,y_pos]\n xC = self.x_values[x_pos,y_pos+1]\n xD = self.x_values[x_pos+1,y_pos+1]\n yA = self.y_values[x_pos,y_pos]\n yB = self.y_values[x_pos+1,y_pos]\n yC = self.y_values[x_pos,y_pos+1]\n yD = self.y_values[x_pos+1,y_pos+1]\n polarity = 2.0*self.polarity[x_pos,y_pos] - 1.0\n a = xA\n b = (xB-xA)\n c = (xC-xA)\n d = (xA-xB-xC+xD)\n e = yA\n f = (yB-yA)\n g = (yC-yA)\n h = (yA-yB-yC+yD)\n denom = (d*g-h*c)\n mu = (h*b-d*f)/denom\n tau = (h*(a-x) - d*(e-y))/denom\n zeta = a - x + c*tau\n eta = b + c*mu + d*tau\n theta = d*mu\n alpha = (-eta + polarity*np.sqrt(eta**2.0 - 4.0*zeta*theta))/(2.0*theta)\n beta = mu*alpha + tau\n\n # Alternate method if there are sectors that are \"too regular\"\n z = np.logical_or(np.isnan(alpha),np.isnan(beta)) # These points weren't able to identify coordinates\n if np.any(z):\n these = np.isclose(f/b,(yD-yC)/(xD-xC)) # iso-beta lines have equal slope\n if np.any(these):\n kappa = f[these]/b[these]\n int_bot = yA[these] - kappa*xA[these]\n int_top = yC[these] - kappa*xC[these]\n int_these = y[these] - kappa*x[these]\n beta_temp = (int_these-int_bot)/(int_top-int_bot)\n x_left = beta_temp*xC[these] + (1.0-beta_temp)*xA[these]\n x_right = beta_temp*xD[these] + (1.0-beta_temp)*xB[these]\n alpha_temp= (x[these]-x_left)/(x_right-x_left)\n beta[these] = beta_temp\n alpha[these] = alpha_temp\n\n #print(np.sum(np.isclose(g/c,(yD-yB)/(xD-xB))))\n\n return alpha, beta\n\n def _evaluate(self,x,y):\n '''\n Returns the level of the interpolated function at each value in x,y.\n Only called internally by HARKinterpolator2D.__call__ (etc).\n '''\n x_pos, y_pos = self.findSector(x,y)\n alpha, beta = self.findCoords(x,y,x_pos,y_pos)\n\n # Calculate the function at each point using bilinear interpolation\n f = (\n (1-alpha)*(1-beta)*self.f_values[x_pos,y_pos]\n + (1-alpha)*beta*self.f_values[x_pos,y_pos+1]\n + alpha*(1-beta)*self.f_values[x_pos+1,y_pos]\n + alpha*beta*self.f_values[x_pos+1,y_pos+1])\n return f\n\n def _derX(self,x,y):\n '''\n Returns the derivative with respect to x of the interpolated function\n at each value in x,y. Only called internally by HARKinterpolator2D.derivativeX.\n '''\n x_pos, y_pos = self.findSector(x,y)\n alpha, beta = self.findCoords(x,y,x_pos,y_pos)\n\n # Get four corners data for each point\n xA = self.x_values[x_pos,y_pos]\n xB = self.x_values[x_pos+1,y_pos]\n xC = self.x_values[x_pos,y_pos+1]\n xD = self.x_values[x_pos+1,y_pos+1]\n yA = self.y_values[x_pos,y_pos]\n yB = self.y_values[x_pos+1,y_pos]\n yC = self.y_values[x_pos,y_pos+1]\n yD = self.y_values[x_pos+1,y_pos+1]\n fA = self.f_values[x_pos,y_pos]\n fB = self.f_values[x_pos+1,y_pos]\n fC = self.f_values[x_pos,y_pos+1]\n fD = self.f_values[x_pos+1,y_pos+1]\n\n # Calculate components of the alpha,beta --> x,y delta translation matrix\n alpha_x = (1-beta)*(xB-xA) + beta*(xD-xC)\n alpha_y = (1-beta)*(yB-yA) + beta*(yD-yC)\n beta_x = (1-alpha)*(xC-xA) + alpha*(xD-xB)\n beta_y = (1-alpha)*(yC-yA) + alpha*(yD-yB)\n\n # Invert the delta translation matrix into x,y --> alpha,beta\n det = alpha_x*beta_y - beta_x*alpha_y\n x_alpha = beta_y/det\n x_beta = -alpha_y/det\n\n # Calculate the derivative of f w.r.t. alpha and beta\n dfda = (1-beta)*(fB-fA) + beta*(fD-fC)\n dfdb = (1-alpha)*(fC-fA) + alpha*(fD-fB)\n\n # Calculate the derivative with respect to x (and return it)\n dfdx = x_alpha*dfda + x_beta*dfdb\n return dfdx\n\n def _derY(self,x,y):\n '''\n Returns the derivative with respect to y of the interpolated function\n at each value in x,y. Only called internally by HARKinterpolator2D.derivativeX.\n '''\n x_pos, y_pos = self.findSector(x,y)\n alpha, beta = self.findCoords(x,y,x_pos,y_pos)\n\n # Get four corners data for each point\n xA = self.x_values[x_pos,y_pos]\n xB = self.x_values[x_pos+1,y_pos]\n xC = self.x_values[x_pos,y_pos+1]\n xD = self.x_values[x_pos+1,y_pos+1]\n yA = self.y_values[x_pos,y_pos]\n yB = self.y_values[x_pos+1,y_pos]\n yC = self.y_values[x_pos,y_pos+1]\n yD = self.y_values[x_pos+1,y_pos+1]\n fA = self.f_values[x_pos,y_pos]\n fB = self.f_values[x_pos+1,y_pos]\n fC = self.f_values[x_pos,y_pos+1]\n fD = self.f_values[x_pos+1,y_pos+1]\n\n # Calculate components of the alpha,beta --> x,y delta translation matrix\n alpha_x = (1-beta)*(xB-xA) + beta*(xD-xC)\n alpha_y = (1-beta)*(yB-yA) + beta*(yD-yC)\n beta_x = (1-alpha)*(xC-xA) + alpha*(xD-xB)\n beta_y = (1-alpha)*(yC-yA) + alpha*(yD-yB)\n\n # Invert the delta translation matrix into x,y --> alpha,beta\n det = alpha_x*beta_y - beta_x*alpha_y\n y_alpha = -beta_x/det\n y_beta = alpha_x/det\n\n # Calculate the derivative of f w.r.t. alpha and beta\n dfda = (1-beta)*(fB-fA) + beta*(fD-fC)\n dfdb = (1-alpha)*(fC-fA) + alpha*(fD-fB)\n\n # Calculate the derivative with respect to x (and return it)\n dfdy = y_alpha*dfda + y_beta*dfdb\n return dfdy\n\n###############################################################################\n## Functions used in discrete choice models with T1EV taste shocks ############\n###############################################################################\n\n\ndef calcLogSumChoiceProbs(Vals, sigma):\n '''\n Returns the final optimal value and choice probabilities given the choice\n specific value functions `Vals`. Probabilities are degenerate if sigma == 0.0.\n Parameters\n ----------\n Vals : [numpy.array]\n A numpy.array that holds choice specific values at common grid points.\n sigma : float\n A number that controls the variance of the taste shocks\n Returns\n -------\n V : [numpy.array]\n A numpy.array that holds the integrated value function.\n P : [numpy.array]\n A numpy.array that holds the discrete choice probabilities\n '''\n # Assumes that NaNs have been replaced by -numpy.inf or similar\n if sigma == 0.0:\n # We could construct a linear index here and use unravel_index.\n Pflat = np.argmax(Vals, axis=0)\n\n V = np.zeros(Vals[0].shape)\n Probs = np.zeros(Vals.shape)\n for i in range(Vals.shape[0]):\n optimalIndices = Pflat == i\n V[optimalIndices] = Vals[i][optimalIndices]\n Probs[i][optimalIndices] = 1\n return V, Probs\n\n # else we have a taste shock\n maxV = np.max(Vals, axis=0)\n\n # calculate maxV+sigma*log(sum_i=1^J exp((V[i]-maxV))/sigma)\n sumexp = np.sum(np.exp((Vals-maxV)/sigma), axis=0)\n LogSumV = np.log(sumexp)\n LogSumV = maxV + sigma*LogSumV\n\n Probs = np.exp((Vals-LogSumV)/sigma)\n return LogSumV, Probs\n\ndef calcChoiceProbs(Vals, sigma):\n '''\n Returns the choice probabilities given the choice specific value functions\n `Vals`. Probabilities are degenerate if sigma == 0.0.\n Parameters\n ----------\n Vals : [numpy.array]\n A numpy.array that holds choice specific values at common grid points.\n sigma : float\n A number that controls the variance of the taste shocks\n Returns\n -------\n Probs : [numpy.array]\n A numpy.array that holds the discrete choice probabilities\n '''\n\n # Assumes that NaNs have been replaced by -numpy.inf or similar\n if sigma == 0.0:\n # We could construct a linear index here and use unravel_index.\n Pflat = np.argmax(Vals, axis=0)\n Probs = np.zeros(Vals.shape)\n for i in range(Vals.shape[0]):\n Probs[i][Pflat==i] = 1\n return Probs\n\n maxV = np.max(Vals, axis=0)\n Probs = np.divide(np.exp((Vals-maxV)/sigma), np.sum(np.exp((Vals-maxV)/sigma), axis=0))\n return Probs\n\n\ndef calcLogSum(Vals, sigma):\n '''\n Returns the optimal value given the choice specific value functions Vals.\n Parameters\n ----------\n Vals : [numpy.array]\n A numpy.array that holds choice specific values at common grid points.\n sigma : float\n A number that controls the variance of the taste shocks\n Returns\n -------\n V : [numpy.array]\n A numpy.array that holds the integrated value function.\n '''\n\n # Assumes that NaNs have been replaced by -numpy.inf or similar\n if sigma == 0.0:\n # We could construct a linear index here and use unravel_index.\n V = np.amax(Vals, axis=0)\n return V\n\n # else we have a taste shock\n maxV = np.max(Vals, axis=0)\n\n # calculate maxV+sigma*log(sum_i=1^J exp((V[i]-maxV))/sigma)\n sumexp = np.sum(np.exp((Vals-maxV)/sigma), axis=0)\n LogSumV = np.log(sumexp)\n LogSumV = maxV + sigma*LogSumV\n return LogSumV\n\ndef main():\n print(\"Sorry, HARK.interpolation doesn't actually do much on its own.\")\n print(\"To see some examples of its interpolation methods in action, look at any\")\n print(\"of the model modules in /ConsumptionSavingModel. In the future, running\")\n print(\"this module will show examples of each interpolation class.\")\n\n from time import clock\n import matplotlib.pyplot as plt\n\n RNG = np.random.RandomState(123)\n\n if False:\n x = np.linspace(1,20,39)\n y = np.log(x)\n dydx = 1.0/x\n f = CubicInterp(x,y,dydx)\n x_test = np.linspace(0,30,200)\n y_test = f(x_test)\n plt.plot(x_test,y_test)\n plt.show()\n\n if False:\n f = lambda x,y : 3.0*x**2.0 + x*y + 4.0*y**2.0\n dfdx = lambda x,y : 6.0*x + y\n dfdy = lambda x,y : x + 8.0*y\n\n y_list = np.linspace(0,5,100,dtype=float)\n xInterpolators = []\n xInterpolators_alt = []\n for y in y_list:\n this_x_list = np.sort((RNG.rand(100)*5.0))\n this_interpolation = LinearInterp(this_x_list,f(this_x_list,y*np.ones(this_x_list.size)))\n that_interpolation = CubicInterp(this_x_list,f(this_x_list,y*np.ones(this_x_list.size)),dfdx(this_x_list,y*np.ones(this_x_list.size)))\n xInterpolators.append(this_interpolation)\n xInterpolators_alt.append(that_interpolation)\n g = LinearInterpOnInterp1D(xInterpolators,y_list)\n h = LinearInterpOnInterp1D(xInterpolators_alt,y_list)\n\n rand_x = RNG.rand(100)*5.0\n rand_y = RNG.rand(100)*5.0\n z = (f(rand_x,rand_y) - g(rand_x,rand_y))/f(rand_x,rand_y)\n q = (dfdx(rand_x,rand_y) - g.derivativeX(rand_x,rand_y))/dfdx(rand_x,rand_y)\n r = (dfdy(rand_x,rand_y) - g.derivativeY(rand_x,rand_y))/dfdy(rand_x,rand_y)\n #print(z)\n #print(q)\n #print(r)\n\n z = (f(rand_x,rand_y) - g(rand_x,rand_y))/f(rand_x,rand_y)\n q = (dfdx(rand_x,rand_y) - g.derivativeX(rand_x,rand_y))/dfdx(rand_x,rand_y)\n r = (dfdy(rand_x,rand_y) - g.derivativeY(rand_x,rand_y))/dfdy(rand_x,rand_y)\n print(z)\n #print(q)\n #print(r)\n\n\n if False:\n f = lambda x,y,z : 3.0*x**2.0 + x*y + 4.0*y**2.0 - 5*z**2.0 + 1.5*x*z\n dfdx = lambda x,y,z : 6.0*x + y + 1.5*z\n dfdy = lambda x,y,z : x + 8.0*y\n dfdz = lambda x,y,z : -10.0*z + 1.5*x\n\n y_list = np.linspace(0,5,51,dtype=float)\n z_list = np.linspace(0,5,51,dtype=float)\n xInterpolators = []\n for y in y_list:\n temp = []\n for z in z_list:\n this_x_list = np.sort((RNG.rand(100)*5.0))\n this_interpolation = LinearInterp(this_x_list,f(this_x_list,y*np.ones(this_x_list.size),z*np.ones(this_x_list.size)))\n temp.append(this_interpolation)\n xInterpolators.append(deepcopy(temp))\n g = BilinearInterpOnInterp1D(xInterpolators,y_list,z_list)\n\n rand_x = RNG.rand(1000)*5.0\n rand_y = RNG.rand(1000)*5.0\n rand_z = RNG.rand(1000)*5.0\n z = (f(rand_x,rand_y,rand_z) - g(rand_x,rand_y,rand_z))/f(rand_x,rand_y,rand_z)\n q = (dfdx(rand_x,rand_y,rand_z) - g.derivativeX(rand_x,rand_y,rand_z))/dfdx(rand_x,rand_y,rand_z)\n r = (dfdy(rand_x,rand_y,rand_z) - g.derivativeY(rand_x,rand_y,rand_z))/dfdy(rand_x,rand_y,rand_z)\n p = (dfdz(rand_x,rand_y,rand_z) - g.derivativeZ(rand_x,rand_y,rand_z))/dfdz(rand_x,rand_y,rand_z)\n z.sort()\n\n\n\n if False:\n f = lambda w,x,y,z : 4.0*w*z - 2.5*w*x + w*y + 6.0*x*y - 10.0*x*z + 3.0*y*z - 7.0*z + 4.0*x + 2.0*y - 5.0*w\n dfdw = lambda w,x,y,z : 4.0*z - 2.5*x + y - 5.0\n dfdx = lambda w,x,y,z : -2.5*w + 6.0*y - 10.0*z + 4.0\n dfdy = lambda w,x,y,z : w + 6.0*x + 3.0*z + 2.0\n dfdz = lambda w,x,y,z : 4.0*w - 10.0*x + 3.0*y - 7\n\n x_list = np.linspace(0,5,16,dtype=float)\n y_list = np.linspace(0,5,16,dtype=float)\n z_list = np.linspace(0,5,16,dtype=float)\n wInterpolators = []\n for x in x_list:\n temp = []\n for y in y_list:\n temptemp = []\n for z in z_list:\n this_w_list = np.sort((RNG.rand(16)*5.0))\n this_interpolation = LinearInterp(this_w_list,f(this_w_list,x*np.ones(this_w_list.size),y*np.ones(this_w_list.size),z*np.ones(this_w_list.size)))\n temptemp.append(this_interpolation)\n temp.append(deepcopy(temptemp))\n wInterpolators.append(deepcopy(temp))\n g = TrilinearInterpOnInterp1D(wInterpolators,x_list,y_list,z_list)\n\n N = 20000\n rand_w = RNG.rand(N)*5.0\n rand_x = RNG.rand(N)*5.0\n rand_y = RNG.rand(N)*5.0\n rand_z = RNG.rand(N)*5.0\n t_start = clock()\n z = (f(rand_w,rand_x,rand_y,rand_z) - g(rand_w,rand_x,rand_y,rand_z))/f(rand_w,rand_x,rand_y,rand_z)\n q = (dfdw(rand_w,rand_x,rand_y,rand_z) - g.derivativeW(rand_w,rand_x,rand_y,rand_z))/dfdw(rand_w,rand_x,rand_y,rand_z)\n r = (dfdx(rand_w,rand_x,rand_y,rand_z) - g.derivativeX(rand_w,rand_x,rand_y,rand_z))/dfdx(rand_w,rand_x,rand_y,rand_z)\n p = (dfdy(rand_w,rand_x,rand_y,rand_z) - g.derivativeY(rand_w,rand_x,rand_y,rand_z))/dfdy(rand_w,rand_x,rand_y,rand_z)\n s = (dfdz(rand_w,rand_x,rand_y,rand_z) - g.derivativeZ(rand_w,rand_x,rand_y,rand_z))/dfdz(rand_w,rand_x,rand_y,rand_z)\n t_end = clock()\n\n z.sort()\n print(z)\n print(t_end-t_start)\n\n if False:\n f = lambda x,y : 3.0*x**2.0 + x*y + 4.0*y**2.0\n dfdx = lambda x,y : 6.0*x + y\n dfdy = lambda x,y : x + 8.0*y\n\n x_list = np.linspace(0,5,101,dtype=float)\n y_list = np.linspace(0,5,101,dtype=float)\n x_temp,y_temp = np.meshgrid(x_list,y_list,indexing='ij')\n g = BilinearInterp(f(x_temp,y_temp),x_list,y_list)\n\n rand_x = RNG.rand(100)*5.0\n rand_y = RNG.rand(100)*5.0\n z = (f(rand_x,rand_y) - g(rand_x,rand_y))/f(rand_x,rand_y)\n q = (f(x_temp,y_temp) - g(x_temp,y_temp))/f(x_temp,y_temp)\n #print(z)\n #print(q)\n\n\n if False:\n f = lambda x,y,z : 3.0*x**2.0 + x*y + 4.0*y**2.0 - 5*z**2.0 + 1.5*x*z\n dfdx = lambda x,y,z : 6.0*x + y + 1.5*z\n dfdy = lambda x,y,z : x + 8.0*y\n dfdz = lambda x,y,z : -10.0*z + 1.5*x\n\n x_list = np.linspace(0,5,11,dtype=float)\n y_list = np.linspace(0,5,11,dtype=float)\n z_list = np.linspace(0,5,101,dtype=float)\n x_temp,y_temp,z_temp = np.meshgrid(x_list,y_list,z_list,indexing='ij')\n g = TrilinearInterp(f(x_temp,y_temp,z_temp),x_list,y_list,z_list)\n\n rand_x = RNG.rand(1000)*5.0\n rand_y = RNG.rand(1000)*5.0\n rand_z = RNG.rand(1000)*5.0\n z = (f(rand_x,rand_y,rand_z) - g(rand_x,rand_y,rand_z))/f(rand_x,rand_y,rand_z)\n q = (dfdx(rand_x,rand_y,rand_z) - g.derivativeX(rand_x,rand_y,rand_z))/dfdx(rand_x,rand_y,rand_z)\n r = (dfdy(rand_x,rand_y,rand_z) - g.derivativeY(rand_x,rand_y,rand_z))/dfdy(rand_x,rand_y,rand_z)\n p = (dfdz(rand_x,rand_y,rand_z) - g.derivativeZ(rand_x,rand_y,rand_z))/dfdz(rand_x,rand_y,rand_z)\n p.sort()\n plt.plot(p)\n\n\n if False:\n f = lambda w,x,y,z : 4.0*w*z - 2.5*w*x + w*y + 6.0*x*y - 10.0*x*z + 3.0*y*z - 7.0*z + 4.0*x + 2.0*y - 5.0*w\n dfdw = lambda w,x,y,z : 4.0*z - 2.5*x + y - 5.0\n dfdx = lambda w,x,y,z : -2.5*w + 6.0*y - 10.0*z + 4.0\n dfdy = lambda w,x,y,z : w + 6.0*x + 3.0*z + 2.0\n dfdz = lambda w,x,y,z : 4.0*w - 10.0*x + 3.0*y - 7\n\n w_list = np.linspace(0,5,16,dtype=float)\n x_list = np.linspace(0,5,16,dtype=float)\n y_list = np.linspace(0,5,16,dtype=float)\n z_list = np.linspace(0,5,16,dtype=float)\n w_temp,x_temp,y_temp,z_temp = np.meshgrid(w_list,x_list,y_list,z_list,indexing='ij')\n mySearch = lambda trash,x : np.floor(x/5*32).astype(int)\n g = QuadlinearInterp(f(w_temp,x_temp,y_temp,z_temp),w_list,x_list,y_list,z_list)\n\n N = 1000000\n rand_w = RNG.rand(N)*5.0\n rand_x = RNG.rand(N)*5.0\n rand_y = RNG.rand(N)*5.0\n rand_z = RNG.rand(N)*5.0\n t_start = clock()\n z = (f(rand_w,rand_x,rand_y,rand_z) - g(rand_w,rand_x,rand_y,rand_z))/f(rand_w,rand_x,rand_y,rand_z)\n t_end = clock()\n #print(z)\n print(t_end-t_start)\n\n\n if False:\n f = lambda x,y : 3.0*x**2.0 + x*y + 4.0*y**2.0\n dfdx = lambda x,y : 6.0*x + y\n dfdy = lambda x,y : x + 8.0*y\n\n warp_factor = 0.01\n x_list = np.linspace(0,5,71,dtype=float)\n y_list = np.linspace(0,5,51,dtype=float)\n x_temp,y_temp = np.meshgrid(x_list,y_list,indexing='ij')\n x_adj = x_temp + warp_factor*(RNG.rand(x_list.size,y_list.size) - 0.5)\n y_adj = y_temp + warp_factor*(RNG.rand(x_list.size,y_list.size) - 0.5)\n g = Curvilinear2DInterp(f(x_adj,y_adj),x_adj,y_adj)\n\n rand_x = RNG.rand(1000)*5.0\n rand_y = RNG.rand(1000)*5.0\n t_start = clock()\n z = (f(rand_x,rand_y) - g(rand_x,rand_y))/f(rand_x,rand_y)\n q = (dfdx(rand_x,rand_y) - g.derivativeX(rand_x,rand_y))/dfdx(rand_x,rand_y)\n r = (dfdy(rand_x,rand_y) - g.derivativeY(rand_x,rand_y))/dfdy(rand_x,rand_y)\n t_end = clock()\n z.sort()\n q.sort()\n r.sort()\n #print(z)\n print(t_end-t_start)\n\n\n if False:\n f = lambda x,y,z : 3.0*x**2.0 + x*y + 4.0*y**2.0 - 5*z**2.0 + 1.5*x*z\n dfdx = lambda x,y,z : 6.0*x + y + 1.5*z\n dfdy = lambda x,y,z : x + 8.0*y\n dfdz = lambda x,y,z : -10.0*z + 1.5*x\n\n warp_factor = 0.01\n x_list = np.linspace(0,5,11,dtype=float)\n y_list = np.linspace(0,5,11,dtype=float)\n z_list = np.linspace(0,5,101,dtype=float)\n x_temp,y_temp = np.meshgrid(x_list,y_list,indexing='ij')\n xyInterpolators = []\n for j in range(z_list.size):\n x_adj = x_temp + warp_factor*(RNG.rand(x_list.size,y_list.size) - 0.5)\n y_adj = y_temp + warp_factor*(RNG.rand(x_list.size,y_list.size) - 0.5)\n z_temp = z_list[j]*np.ones(x_adj.shape)\n thisInterp = Curvilinear2DInterp(f(x_adj,y_adj,z_temp),x_adj,y_adj)\n xyInterpolators.append(thisInterp)\n g = LinearInterpOnInterp2D(xyInterpolators,z_list)\n\n N = 1000\n rand_x = RNG.rand(N)*5.0\n rand_y = RNG.rand(N)*5.0\n rand_z = RNG.rand(N)*5.0\n z = (f(rand_x,rand_y,rand_z) - g(rand_x,rand_y,rand_z))/f(rand_x,rand_y,rand_z)\n p = (dfdz(rand_x,rand_y,rand_z) - g.derivativeZ(rand_x,rand_y,rand_z))/dfdz(rand_x,rand_y,rand_z)\n p.sort()\n plt.plot(p)\n\n\n if False:\n f = lambda w,x,y,z : 4.0*w*z - 2.5*w*x + w*y + 6.0*x*y - 10.0*x*z + 3.0*y*z - 7.0*z + 4.0*x + 2.0*y - 5.0*w\n dfdw = lambda w,x,y,z : 4.0*z - 2.5*x + y - 5.0\n dfdx = lambda w,x,y,z : -2.5*w + 6.0*y - 10.0*z + 4.0\n dfdy = lambda w,x,y,z : w + 6.0*x + 3.0*z + 2.0\n dfdz = lambda w,x,y,z : 4.0*w - 10.0*x + 3.0*y - 7\n\n warp_factor = 0.1\n w_list = np.linspace(0,5,16,dtype=float)\n x_list = np.linspace(0,5,16,dtype=float)\n y_list = np.linspace(0,5,16,dtype=float)\n z_list = np.linspace(0,5,16,dtype=float)\n w_temp,x_temp = np.meshgrid(w_list,x_list,indexing='ij')\n wxInterpolators = []\n for i in range(y_list.size):\n temp = []\n for j in range(z_list.size):\n w_adj = w_temp + warp_factor*(RNG.rand(w_list.size,x_list.size) - 0.5)\n x_adj = x_temp + warp_factor*(RNG.rand(w_list.size,x_list.size) - 0.5)\n y_temp = y_list[i]*np.ones(w_adj.shape)\n z_temp = z_list[j]*np.ones(w_adj.shape)\n thisInterp = Curvilinear2DInterp(f(w_adj,x_adj,y_temp,z_temp),w_adj,x_adj)\n temp.append(thisInterp)\n wxInterpolators.append(temp)\n g = BilinearInterpOnInterp2D(wxInterpolators,y_list,z_list)\n\n N = 1000000\n rand_w = RNG.rand(N)*5.0\n rand_x = RNG.rand(N)*5.0\n rand_y = RNG.rand(N)*5.0\n rand_z = RNG.rand(N)*5.0\n\n t_start = clock()\n z = (f(rand_w,rand_x,rand_y,rand_z) - g(rand_w,rand_x,rand_y,rand_z))/f(rand_w,rand_x,rand_y,rand_z)\n t_end = clock()\n z.sort()\n print(z)\n print(t_end-t_start)\n\n\nif __name__ == '__main__':\n main()\n" ]
[ [ "numpy.ones_like", "numpy.isclose", "numpy.argmin", "numpy.minimum", "numpy.exp", "numpy.max", "numpy.zeros_like", "numpy.log", "numpy.nanmin", "numpy.logical_and", "numpy.argmax", "numpy.arange", "numpy.sqrt", "numpy.nanmax", "numpy.logical_or", "numpy.array", "numpy.reshape", "numpy.zeros", "numpy.isscalar", "numpy.amax", "matplotlib.pyplot.show", "numpy.searchsorted", "numpy.floor", "numpy.isnan", "numpy.asarray", "numpy.random.RandomState", "numpy.ones", "matplotlib.pyplot.plot", "numpy.any", "numpy.linspace", "numpy.meshgrid", "numpy.maximum" ] ]
xwyzsn/solo-learn
[ "16d021d8053439a3de205337ab2a11d191500b09", "16d021d8053439a3de205337ab2a11d191500b09" ]
[ "tests/utils/test_gather.py", "solo/methods/deepclusterv2.py" ]
[ "# Copyright 2021 solo-learn development team.\n\n# Permission is hereby granted, free of charge, to any person obtaining a copy of\n# this software and associated documentation files (the \"Software\"), to deal in\n# the Software without restriction, including without limitation the rights to use,\n# copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the\n# Software, and to permit persons to whom the Software is furnished to do so,\n# subject to the following conditions:\n\n# The above copyright notice and this permission notice shall be included in all copies\n# or substantial portions of the Software.\n\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,\n# INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR\n# PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE\n# FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR\n# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER\n# DEALINGS IN THE SOFTWARE.\n\nimport torch\nfrom solo.utils.misc import gather\n\n\ndef test_gather_layer():\n X = torch.randn(10, 30, requires_grad=True)\n X_gathered = gather(X)\n assert isinstance(X, torch.Tensor)\n\n dummy_loss = torch.mm(X_gathered, X_gathered.T).sum()\n dummy_loss.backward()\n assert X.grad is not None\n", "# Copyright 2021 solo-learn development team.\n\n# Permission is hereby granted, free of charge, to any person obtaining a copy of\n# this software and associated documentation files (the \"Software\"), to deal in\n# the Software without restriction, including without limitation the rights to use,\n# copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the\n# Software, and to permit persons to whom the Software is furnished to do so,\n# subject to the following conditions:\n\n# The above copyright notice and this permission notice shall be included in all copies\n# or substantial portions of the Software.\n\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,\n# INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR\n# PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE\n# FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR\n# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER\n# DEALINGS IN THE SOFTWARE.\n\nimport argparse\nfrom typing import Any, Dict, List, Sequence\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom solo.losses.deepclusterv2 import deepclusterv2_loss_func\nfrom solo.methods.base import BaseMethod\nfrom solo.utils.kmeans import KMeans\n\n\nclass DeepClusterV2(BaseMethod):\n def __init__(\n self,\n proj_output_dim: int,\n proj_hidden_dim: int,\n num_prototypes: Sequence[int],\n temperature: float,\n kmeans_iters: int,\n **kwargs,\n ):\n \"\"\"Implements DeepCluster V2 (https://arxiv.org/abs/2006.09882).\n\n Args:\n proj_output_dim (int): number of dimensions of the projected features.\n proj_hidden_dim (int): number of neurons in the hidden layers of the projector.\n num_prototypes (Sequence[int]): number of prototypes.\n temperature (float): temperature for the softmax.\n kmeans_iters (int): number of iterations for k-means clustering.\n \"\"\"\n\n super().__init__(**kwargs)\n\n self.proj_output_dim = proj_output_dim\n self.temperature = temperature\n self.num_prototypes = num_prototypes\n self.kmeans_iters = kmeans_iters\n\n # projector\n self.projector = nn.Sequential(\n nn.Linear(self.features_dim, proj_hidden_dim),\n nn.BatchNorm1d(proj_hidden_dim),\n nn.ReLU(),\n nn.Linear(proj_hidden_dim, proj_output_dim),\n )\n\n # prototypes\n self.prototypes = nn.ModuleList(\n [nn.Linear(proj_output_dim, np, bias=False) for np in num_prototypes]\n )\n # normalize and set requires grad to false\n for proto in self.prototypes:\n for params in proto.parameters():\n params.requires_grad = False\n proto.weight.copy_(F.normalize(proto.weight.data.clone(), dim=-1))\n\n @staticmethod\n def add_model_specific_args(parent_parser: argparse.ArgumentParser) -> argparse.ArgumentParser:\n parent_parser = super(DeepClusterV2, DeepClusterV2).add_model_specific_args(parent_parser)\n parser = parent_parser.add_argument_group(\"deepclusterv2\")\n\n # projector\n parser.add_argument(\"--proj_output_dim\", type=int, default=128)\n parser.add_argument(\"--proj_hidden_dim\", type=int, default=2048)\n\n # parameters\n parser.add_argument(\"--temperature\", type=float, default=0.1)\n parser.add_argument(\"--num_prototypes\", type=int, nargs=\"+\", default=[3000, 3000, 3000])\n parser.add_argument(\"--kmeans_iters\", type=int, default=10)\n\n return parent_parser\n\n @property\n def learnable_params(self) -> List[dict]:\n \"\"\"Adds projector and prototypes parameters to the parent's learnable parameters.\n\n Returns:\n List[dict]: list of learnable parameters.\n \"\"\"\n\n extra_learnable_params = [{\"params\": self.projector.parameters()}]\n return super().learnable_params + extra_learnable_params\n\n def on_train_start(self):\n \"\"\"Gets the world size and initializes the memory banks.\"\"\"\n # k-means needs the world size and the dataset size\n self.world_size = self.trainer.world_size if self.trainer else 1\n self.dataset_size = getattr(self, \"dali_epoch_size\", None) or len(\n self.trainer.train_dataloader.dataset\n )\n\n # build k-means helper object\n self.kmeans = KMeans(\n world_size=self.world_size,\n rank=self.global_rank,\n num_large_crops=self.num_large_crops,\n dataset_size=self.dataset_size,\n proj_features_dim=self.proj_output_dim,\n num_prototypes=self.num_prototypes,\n kmeans_iters=self.kmeans_iters,\n )\n\n # initialize memory banks\n size_memory_per_process = len(self.trainer.train_dataloader) * self.batch_size\n self.register_buffer(\n \"local_memory_index\",\n torch.zeros(size_memory_per_process).long().to(self.device, non_blocking=True),\n )\n self.register_buffer(\n \"local_memory_embeddings\",\n F.normalize(\n torch.randn(self.num_large_crops, size_memory_per_process, self.proj_output_dim),\n dim=-1,\n ).to(self.device, non_blocking=True),\n )\n\n def on_train_epoch_start(self) -> None:\n \"\"\"Prepares assigments and prototype centroids for the next epoch.\"\"\"\n\n if self.current_epoch == 0:\n self.assignments = -torch.ones(\n len(self.num_prototypes), self.dataset_size, device=self.device\n ).long()\n else:\n self.assignments, centroids = self.kmeans.cluster_memory(\n self.local_memory_index, self.local_memory_embeddings\n )\n for proto, centro in zip(self.prototypes, centroids):\n proto.weight.copy_(centro)\n\n def update_memory_banks(self, idxs: torch.Tensor, z: torch.Tensor, batch_idx: int) -> None:\n \"\"\"Updates DeepClusterV2's memory banks of indices and features.\n\n Args:\n idxs (torch.Tensor): set of indices of the samples of the current batch.\n z (torch.Tensor): projected features of the samples of the current batch.\n batch_idx (int): batch index relative to the current epoch.\n \"\"\"\n\n start_idx, end_idx = batch_idx * self.batch_size, (batch_idx + 1) * self.batch_size\n self.local_memory_index[start_idx:end_idx] = idxs\n for c, z_c in enumerate(z):\n self.local_memory_embeddings[c][start_idx:end_idx] = z_c.detach()\n\n def forward(self, X: torch.Tensor, *args, **kwargs) -> Dict[str, Any]:\n \"\"\"Performs the forward pass of the backbone, the projector and the prototypes.\n\n Args:\n X (torch.Tensor): a batch of images in the tensor format.\n\n Returns:\n Dict[str, Any]:\n a dict containing the outputs of the parent,\n the projected features and the logits.\n \"\"\"\n\n out = super().forward(X, *args, **kwargs)\n z = F.normalize(self.projector(out[\"feats\"]))\n p = torch.stack([p(z) for p in self.prototypes])\n return {**out, \"z\": z, \"p\": p}\n\n def training_step(self, batch: Sequence[Any], batch_idx: int) -> torch.Tensor:\n \"\"\"Training step for DeepClusterV2 reusing BaseMethod training step.\n\n Args:\n batch (Sequence[Any]): a batch of data in the format of [img_indexes, [X], Y], where\n [X] is a list of size num_crops containing batches of images.\n batch_idx (int): index of the batch.\n\n Returns:\n torch.Tensor: total loss composed of DeepClusterV2 loss and classification loss.\n \"\"\"\n\n idxs = batch[0]\n\n out = super().training_step(batch, batch_idx)\n class_loss = out[\"loss\"]\n feats1, feats2 = out[\"feats\"]\n\n z1 = F.normalize(self.projector(feats1))\n z2 = F.normalize(self.projector(feats2))\n\n p1 = torch.stack([proto(z1) for proto in self.prototypes])\n p2 = torch.stack([proto(z2) for proto in self.prototypes])\n\n # ------- deepclusterv2 loss -------\n preds = torch.stack([p1.unsqueeze(1), p2.unsqueeze(1)], dim=1)\n assignments = self.assignments[:, idxs]\n deepcluster_loss = deepclusterv2_loss_func(preds, assignments, self.temperature)\n\n # ------- update memory banks -------\n self.update_memory_banks(idxs, [z1, z2], batch_idx)\n\n self.log(\"train_deepcluster_loss\", deepcluster_loss, on_epoch=True, sync_dist=True)\n\n return deepcluster_loss + class_loss\n" ]
[ [ "torch.randn", "torch.mm" ], [ "torch.nn.Linear", "torch.zeros", "torch.nn.ReLU", "torch.nn.BatchNorm1d", "torch.randn" ] ]
yunndlalala/MCS-project
[ "3fa066fd8e3a3c9578560e11c10b1516f4a23430" ]
[ "nn_model.py" ]
[ "import torch\nimport torch.nn as nn\nfrom torch.utils.data import TensorDataset, DataLoader\nimport torch.nn.functional as F\n\nclass MLP(nn.Module):\n def __init__(self, input_size, hidden_layers, out_size):\n super(MLP, self).__init__()\n self.sizes = [input_size] + hidden_layers + [out_size]\n self.linears = [nn.Linear(in_dim, out_dim, True) for in_dim, out_dim in zip(self.sizes[: -1], self.sizes[1:])]\n self.linears = nn.ModuleList(self.linears)\n self.weight_init()\n\n def forward(self, x):\n for layer in self.linears[:-1]:\n x = F.relu(layer(x))\n x = self.linears[-1](x)\n return x\n\n def weight_init(self):\n for layer in self.linears:\n torch.nn.init.xavier_uniform(layer.weight)\n torch.nn.init.zeros_(layer.bias)\n\nclass MLP_bn(nn.Module):\n def __init__(self, input_size, hidden_layers, out_size):\n super(MLP_bn, self).__init__()\n self.sizes = [input_size] + hidden_layers + [out_size]\n self.linears = [nn.Sequential(nn.Linear(in_dim, out_dim, True), nn.BatchNorm1d(out_dim)) for in_dim, out_dim in zip(self.sizes[: -1], self.sizes[1:])]\n self.linears = nn.ModuleList(self.linears)\n self.weight_init()\n\n def forward(self, x):\n for layer in self.linears[:-1]:\n x = F.relu(layer(x))\n x = self.linears[-1][0](x)\n return x\n\n def weight_init(self):\n for layer in self.linears:\n torch.nn.init.xavier_uniform(layer[0].weight)\n torch.nn.init.zeros_(layer[0].bias)\n\nclass MLP_drop(nn.Module):\n def __init__(self, input_size, hidden_layers, out_size):\n super(MLP_drop, self).__init__()\n self.sizes = [input_size] + hidden_layers + [out_size]\n self.linears = [nn.Sequential(nn.Linear(in_dim, out_dim, True), nn.Dropout(0.5)) for in_dim, out_dim in zip(self.sizes[: -1], self.sizes[1:])]\n self.linears = nn.ModuleList(self.linears)\n self.weight_init()\n\n def forward(self, x):\n for layer in self.linears[:-1]:\n x = F.relu(layer(x))\n x = self.linears[-1][0](x)\n return x\n\n def weight_init(self):\n for layer in self.linears:\n torch.nn.init.xavier_uniform(layer[0].weight)\n torch.nn.init.zeros_(layer[0].bias)\n\ndef train_nn(model, data, num_epoch=5000):\n train_dataset = TensorDataset(torch.Tensor(data.Xtrain), torch.Tensor(data.Ytrain))\n train_dataloader = DataLoader(dataset=train_dataset, batch_size=128, shuffle=True)\n\n test_dataset = TensorDataset(torch.Tensor(data.Xtest), torch.Tensor(data.Ytest))\n test_dataloader = DataLoader(dataset=test_dataset, batch_size=128)\n\n criterion = torch.nn.MSELoss()\n optimizer = torch.optim.Adam(model.parameters(), lr=0.01)\n\n losses = []\n for epoch in range(num_epoch):\n for inputs, targets in train_dataloader:\n optimizer.zero_grad()\n outputs = model(inputs)\n loss = criterion(outputs, targets)\n loss.backward()\n optimizer.step()\n model.eval()\n loss = 0.\n for inputs, targets in test_dataloader:\n outputs = model(inputs)\n loss += criterion(outputs, targets).data\n losses.append(loss.data // len(test_dataloader))\n model.train()\n\n return losses\n" ]
[ [ "torch.nn.Linear", "torch.nn.Dropout", "torch.nn.MSELoss", "torch.nn.ModuleList", "torch.nn.init.xavier_uniform", "torch.nn.BatchNorm1d", "torch.utils.data.DataLoader", "torch.nn.init.zeros_", "torch.Tensor" ] ]
Z7Gao/InverseRenderingOfIndoorScene
[ "f245d20dcbe05b1de766c2e53af79fd489f58d74" ]
[ "nyuDataLoader.py" ]
[ "import glob\nimport numpy as np\nimport os.path as osp\nfrom PIL import Image\nimport random\nimport struct\nfrom torch.utils.data import Dataset\nimport scipy.ndimage as ndimage\nimport cv2\nfrom skimage.measure import block_reduce\nimport json\nimport scipy.ndimage as ndimage\n\n\nclass ConcatDataset(Dataset ):\n def __init__(self, *datasets):\n self.datasets = datasets\n\n def __getitem__(self, i):\n return tuple(d[i] for d in self.datasets )\n\n def __len__(self ):\n return max(len(d) for d in self.datasets )\n\n\n\nclass NYULoader(Dataset ):\n def __init__(self, imRoot, normalRoot, depthRoot, segRoot,\n imHeight = 480, imWidth = 640,\n imWidthMax = 600, imWidthMin = 560,\n phase='TRAIN', rseed = None ):\n\n self.imRoot = imRoot\n self.imHeight = imHeight\n self.imWidth = imWidth\n self.phase = phase.upper()\n\n self.imWidthMax = imWidthMax\n self.imWidthMin = imWidthMin\n\n\n if phase == 'TRAIN':\n with open('NYUTrain.txt', 'r') as fIn:\n imList = fIn.readlines()\n self.imList = [osp.join(self.imRoot, x.strip() ) for x in imList ]\n elif phase == 'TEST':\n with open('NYUTest.txt', 'r') as fIn:\n imList = fIn.readlines()\n self.imList = [osp.join(self.imRoot, x.strip() ) for x in imList ]\n\n\n self.normalList = [x.replace(imRoot, normalRoot) for x in self.imList ]\n self.segList = [x.replace(imRoot, segRoot) for x in self.imList ]\n self.depthList = [x.replace(imRoot, depthRoot).replace('.png', '.tiff') for x in self.imList]\n\n print('Image Num: %d' % len(self.imList) )\n\n # Permute the image list\n self.count = len(self.imList )\n self.perm = list(range(self.count ) )\n\n if rseed is not None:\n random.seed(0)\n random.shuffle(self.perm )\n\n def __len__(self):\n return len(self.perm )\n\n def __getitem__(self, ind):\n\n ind = (ind % len(self.perm) )\n if ind == 0:\n random.shuffle(self.perm )\n\n if self.phase == 'TRAIN':\n scale = np.random.random();\n imCropWidth = int( np.round( (self.imWidthMax - self.imWidthMin ) * scale + self.imWidthMin ) )\n imCropHeight = int( float(self.imHeight) / float(self.imWidth ) * imCropWidth )\n rs = int(np.round( (480 - imCropHeight) * np.random.random() ) )\n re = rs + imCropHeight\n cs = int(np.round( (640 - imCropWidth) * np.random.random() ) )\n ce = cs + imCropWidth\n elif self.phase == 'TEST':\n imCropWidth = self.imWidth\n imCropHeight = self.imHeight\n rs, re, cs, ce = 0, 480, 0, 640\n\n segNormal = 0.5 * ( self.loadImage(self.segList[self.perm[ind] ], rs, re, cs, ce) + 1)[0:1, :, :]\n\n # Read Image\n im = 0.5 * (self.loadImage(self.imList[self.perm[ind] ], rs, re, cs, ce, isGama = True ) + 1)\n\n # normalize the normal vector so that it will be unit length\n normal = self.loadImage( self.normalList[self.perm[ind] ], rs, re, cs, ce )\n normal = normal / np.sqrt(np.maximum(np.sum(normal * normal, axis=0), 1e-5) )[np.newaxis, :]\n\n # Read depth\n depth = self.loadDepth(self.depthList[self.perm[ind] ], rs, re, cs, ce )\n if imCropHeight != self.imHeight or imCropWidth != self.imWidth:\n depth = np.squeeze(depth, axis=0)\n depth = cv2.resize(depth, (self.imWidth, self.imHeight), interpolation = cv2.INTER_LINEAR)\n depth = depth[np.newaxis, :, :]\n segDepth = np.logical_and(depth > 1, depth < 10).astype(np.float32 )\n\n if imCropHeight != self.imHeight or imCropWidth != self.imWidth:\n normal = normal.transpose([1, 2, 0] )\n normal = cv2.resize(normal, (self.imWidth, self.imHeight), interpolation = cv2.INTER_LINEAR)\n normal = normal.transpose([2, 0, 1] )\n normal = normal / np.maximum(np.sqrt(np.sum(normal * normal, axis=0 )[np.newaxis, :, :] ), 1e-5)\n\n if imCropHeight != self.imHeight or imCropWidth != self.imWidth:\n segNormal = np.squeeze(segNormal, axis=0)\n segNormal = cv2.resize(segNormal, (self.imWidth, self.imHeight), interpolation = cv2.INTER_LINEAR)\n segNormal = segNormal[np.newaxis, :, :]\n\n im = im.transpose([1, 2, 0] )\n im = cv2.resize(im, (self.imWidth, self.imHeight), interpolation = cv2.INTER_LINEAR )\n im = im.transpose([2, 0, 1] )\n\n if self.phase == 'TRAIN':\n if np.random.random() > 0.5:\n normal = np.ascontiguousarray(normal[:, :, ::-1] )\n normal[0, :, :] = -normal[0, :, :]\n depth = np.ascontiguousarray(depth[:, :, ::-1] )\n segNormal = np.ascontiguousarray(segNormal[:, :, ::-1] )\n segDepth = np.ascontiguousarray(segDepth[:, :, ::-1] )\n im = np.ascontiguousarray(im[:, :, ::-1] )\n scale = 1 + ( np.random.random(3) * 0.4 - 0.2 )\n scale = scale.reshape([3, 1, 1] )\n im = im * scale\n\n\n batchDict = {'normal': normal,\n 'depth': depth,\n 'segNormal': segNormal,\n 'segDepth': segDepth,\n 'im': im.astype(np.float32 ),\n 'name': self.imList[self.perm[ind] ]\n } \n\n return batchDict \n\n def loadImage(self, imName, rs, re, cs, ce, isGama = False):\n if not(osp.isfile(imName ) ):\n print(imName )\n assert(False )\n\n im = cv2.imread(imName)\n if len(im.shape ) == 3:\n im = im[:, :, ::-1]\n\n im = im[rs:re, cs:ce, :]\n im = np.ascontiguousarray(im.astype(np.float32 ) )\n if isGama:\n im = (im / 255.0) ** 2.2\n im = 2 * im - 1\n else:\n im = (im - 127.5) / 127.5\n if len(im.shape) == 2:\n im = im[:, np.newaxis]\n im = np.transpose(im, [2, 0, 1] )\n\n return im\n\n def loadDepth(self, imName, rs, re, cs, ce ):\n if not osp.isfile(imName):\n print(imName )\n assert(False )\n\n im = cv2.imread(imName, -1)\n im = im[rs:re, cs:ce]\n im = im[np.newaxis, :, :]\n return im\n" ]
[ [ "numpy.round", "numpy.ascontiguousarray", "numpy.sum", "numpy.logical_and", "numpy.transpose", "numpy.random.random", "numpy.squeeze" ] ]
sdwfrost/covid19uk
[ "ffd59342d9daee2d819d2f7211afbe9713880612" ]
[ "covid19uk/data/util.py" ]
[ "\"\"\"Utility functions for COVID19 UK data\"\"\"\n\nimport os\nimport re\nimport datetime\nimport numpy as np\nimport pandas as pd\n\n\ndef prependDate(filename):\n now = datetime.now() # current date and time\n date_time = now.strftime(\"%Y-%m-%d\")\n return date_time + \"_\" + filename\n\n\ndef prependID(filename, config):\n return config[\"Global\"][\"prependID_Str\"] + \"_\" + filename\n\n\ndef format_input_filename(filename, config):\n # prepend with a set string\n # to load a specific date, this should be in the string\n p, f = os.path.split(filename)\n if config[\"Global\"][\"prependID\"]:\n f = prependID(f, config)\n filename = p + \"/\" + f\n return filename\n\n\ndef format_output_filename(filename, config):\n p, f = os.path.split(filename)\n if config[\"Global\"][\"prependID\"]:\n f = prependID(f, config)\n if config[\"Global\"][\"prependDate\"]:\n f = prependDate(f)\n filename = p + \"/\" + f\n return filename\n\n\ndef merge_lad_codes(lad19cd):\n merging = {\n \"E06000052\": \"E06000052,E06000053\", # City of London & Westminster\n \"E06000053\": \"E06000052,E06000053\", # City of London & Westminster\n \"E09000001\": \"E09000001,E09000033\", # Cornwall & Isles of Scilly\n \"E09000033\": \"E09000001,E09000033\", # Cornwall & Isles of Scilly\n }\n lad19cd = lad19cd.apply(lambda x: merging[x] if x in merging.keys() else x)\n\n return lad19cd\n\n\ndef merge_lad_values(df):\n df = df.groupby(\"lad19cd\").sum().reset_index()\n return df\n\n\ndef get_date_low_high(config):\n date_range = [np.datetime64(x) for x in config[\"date_range\"]]\n return tuple(date_range)\n\n\ndef check_date_format(df):\n df = df.reset_index()\n\n if (\n not pd.to_datetime(df[\"date\"], format=\"%Y-%m-%d\", errors=\"coerce\")\n .notnull()\n .all()\n ):\n raise ValueError(\"Invalid date format\")\n\n return True\n\n\ndef check_date_bounds(df, date_low, date_high):\n if not ((date_low <= df[\"date\"]) & (df[\"date\"] < date_high)).all():\n raise ValueError(\"Date out of bounds\")\n return True\n\n\ndef check_lad19cd_format(df):\n df = df.reset_index()\n\n # Must contain 9 characters, 1 region letter followed by 8 numbers\n split_code = df[\"lad19cd\"].apply(lambda x: re.split(\"(\\d+)\", x))\n if not split_code.apply(\n lambda x: (len(x[0]) == 1) & (x[0] in \"ENSW\") & (len(x[1]) == 8)\n ).all():\n raise ValueError(\"Invalid lad19cd format\")\n\n return True\n\n\ndef invalidInput(input):\n raise NotImplementedError(f'Input type \"{input}\" mode not implemented')\n" ]
[ [ "pandas.to_datetime", "numpy.datetime64" ] ]
gengala/Random-Probabilistic-Circuits
[ "8871a9f1e6ace9d8ea7604b69abcc270c7792620" ]
[ "utils.py" ]
[ "from spn.structure.Base import Product, Sum, get_nodes_by_type\nfrom spn.structure.leaves.cltree.CLTree import CLTree\nfrom spn.algorithms.Validity import is_consistent\n\nfrom scipy.sparse.csgraph import minimum_spanning_tree\nfrom scipy.sparse.csgraph import depth_first_order\n\nfrom error import RootVarError\n\nimport numpy as np\n\nROOT = -1\n\n\nclass VTreeNode:\n \"\"\" used to model a Vtree \"\"\"\n\n def __init__(self, var_id=None, children=[]):\n self.var_id = var_id\n self.parent = None\n self.innerNode = True if self.var_id is None else False\n self.set_children(children)\n self.scopes = None\n\n def get_var_id(self):\n return self.var_id\n\n def get_parent(self):\n return self.parent\n\n def get_children(self):\n return self.children\n\n def set_children(self, children):\n self.children = children\n for child in children:\n child.parent = self\n\n def set_scopes(self, scopes):\n self.scopes = scopes\n\n def get_scopes(self):\n return self.scopes\n\n def is_leaf(self):\n return len(self.children) == 0\n\n def is_inner(self):\n return len(self.children) != 0\n\n\nclass DTreeNode:\n \"\"\" used to model a dependency tree \"\"\"\n\n def __init__(self, var_id, parent=None):\n self.var_id = var_id\n self.set_parent(parent)\n self.children = []\n self.tree = None\n\n def get_var_id(self):\n return self.var_id\n\n def get_parent(self):\n return self.parent\n\n def get_children(self):\n return self.children\n\n def set_parent(self, parent):\n if parent is not None:\n self.parent = parent\n self.parent.children.append(self)\n\n def set_tree(self, scope, tree):\n self.tree = [scope, tree]\n\n def get_tree(self):\n return self.tree\n\n def is_leaf(self):\n return len(self.children) == 0\n\n\ndef compute_probs(data, alpha):\n\n #\n # for fast np dot\n if data.dtype != np.float32:\n data = data.astype(np.float32)\n\n n_features = data.shape[1]\n n_samples = data.shape[0]\n\n j_ones = np.dot(data.T, data)\n #\n # to avoid normalization errors (weights of sum nodes have to sum up to 1)\n j_ones = j_ones.astype(np.float64)\n j_ones_diag = np.diag(j_ones)\n\n cols_diag = j_ones_diag * np.ones((n_features, n_features))\n rows_diag = cols_diag.transpose()\n\n probs = np.zeros((n_features, 2))\n j_probs = np.zeros((n_features, n_features, 2, 2))\n\n probs[:, 1] = (j_ones_diag + 2 * alpha) / (n_samples + 4 * alpha)\n probs[:, 0] = 1 - probs[:, 1]\n\n j_probs[:, :, 0, 0] = n_samples - cols_diag - rows_diag + j_ones + alpha\n j_probs[:, :, 0, 1] = cols_diag - j_ones + alpha\n j_probs[:, :, 1, 0] = rows_diag - j_ones + alpha\n j_probs[:, :, 1, 1] = j_ones + alpha\n\n j_probs = j_probs / (n_samples + 4 * alpha)\n\n return probs, j_probs\n\n\ndef compute_factors(probs, j_probs, tree):\n\n factors = np.zeros((probs.shape[0], 2, 2))\n\n root_id = tree.index(ROOT)\n\n features = np.arange(probs.shape[0]).tolist()\n features.remove(root_id)\n\n parents = tree.copy()\n parents.pop(root_id)\n\n factors[root_id, 0, 0] = factors[root_id, 1, 0] = probs[root_id, 0]\n factors[root_id, 0, 1] = factors[root_id, 1, 1] = probs[root_id, 1]\n\n factors[features, 0, 0] = j_probs[features, parents, 0, 0] / probs[parents, 0]\n factors[features, 1, 0] = j_probs[features, parents, 0, 1] / probs[parents, 1]\n factors[features, 0, 1] = j_probs[features, parents, 1, 0] / probs[parents, 0]\n factors[features, 1, 1] = j_probs[features, parents, 1, 1] / probs[parents, 1]\n\n return factors\n\n\ndef compute_mi_by_probs(probs, j_probs, log_j_probs):\n\n prod_probs = np.zeros((probs.shape[0], probs.shape[0], 2, 2))\n\n prod_probs[:, :, 0, 0] = np.outer(probs[:, 0], probs[:, 0])\n prod_probs[:, :, 0, 1] = np.outer(probs[:, 0], probs[:, 1])\n prod_probs[:, :, 1, 0] = np.outer(probs[:, 1], probs[:, 0])\n prod_probs[:, :, 1, 1] = np.outer(probs[:, 1], probs[:, 1])\n\n mut_info = np.sum(j_probs * (log_j_probs - np.log(prod_probs)), axis=(2, 3))\n np.fill_diagonal(mut_info, 0)\n\n return mut_info\n\n\ndef compute_mi(data, alpha):\n\n probs, j_probs = compute_probs(data, alpha)\n\n return compute_mi_by_probs(probs, j_probs, np.log(j_probs))\n\n\ndef build_dependency_tree_from_mi(mut_info, scope, root_var=None):\n\n if root_var is None:\n root_var = np.random.choice(scope)\n else:\n if root_var not in scope:\n raise RootVarError()\n\n root_id = scope.index(root_var)\n\n mst = minimum_spanning_tree(-(mut_info + 1))\n dfs_tree = depth_first_order(mst, directed=False, i_start=root_id)\n tree = dfs_tree[1].tolist()\n tree[root_id] = ROOT\n\n dtree_nodes = {var_id: DTreeNode(var_id) for var_id in scope}\n\n parents = np.arange(mut_info.shape[0]).tolist()\n parents.remove(root_id)\n\n for p in parents:\n dtree_nodes[scope[p]].set_parent(dtree_nodes[scope[tree[p]]])\n\n dtree_nodes[scope[root_id]].set_tree(scope, tree)\n\n return dtree_nodes[scope[root_id]]\n\n\ndef learn_cltree(data, scope, root_var, alpha):\n\n if root_var not in scope:\n raise RootVarError()\n\n probs, j_probs = compute_probs(data, alpha)\n\n mut_info = compute_mi_by_probs(probs, j_probs, np.log(j_probs))\n\n dtree_root = build_dependency_tree_from_mi(mut_info, scope, root_var)\n\n factors = compute_factors(probs, j_probs, dtree_root.get_tree()[1])\n\n factors_dict = {var: factors[scope.index(var)] for var in scope}\n\n return dtree_root, factors_dict\n\n\ndef create_factors_dict(data, dtree_root, alpha):\n\n probs, j_probs = compute_probs(data, alpha)\n\n scope = dtree_root.get_tree()[0]\n tree = dtree_root.get_tree()[1]\n\n factors = compute_factors(probs, j_probs, tree)\n factors_dict = {var: factors[scope.index(var)] for var in scope}\n\n return factors_dict\n\n\ndef create_dtree_dict(data_l, cl_parts_l, conj_vars_l, alpha):\n\n n_vars = data_l[0].shape[1]\n\n sum_mut_info = np.zeros((n_vars, n_vars))\n for i in range(len(data_l)):\n for part in cl_parts_l[i]:\n mi = compute_mi(part.get_slice(data_l[i]), alpha)\n sum_mut_info[part.col_ids[:, None], part.col_ids] += mi\n\n #\n # create a dependency tree for each scope in scopes\n free_vars = list(set(np.arange(n_vars))-set([var for conj_vars in conj_vars_l for var in conj_vars]))\n\n if free_vars:\n scopes = conj_vars_l + [free_vars]\n else:\n scopes = conj_vars_l\n\n dtrees = []\n for scope in scopes:\n dtrees.append(build_dependency_tree_from_mi(sum_mut_info[scope][:, scope], scope))\n\n scope = dtrees[-1].get_tree()[0].copy()\n tree = dtrees[-1].get_tree()[1].copy()\n\n #\n # concatenate dtrees\n for k in reversed(range(0, len(dtrees) - 1)):\n\n tree += [t + len(scope) if t != ROOT else t for t in dtrees[k].get_tree()[1]]\n tree[tree.index(ROOT)] = tree.index(ROOT, len(scope))\n scope += dtrees[k].get_tree()[0]\n\n dtrees[k].set_tree(scope.copy(), tree.copy())\n dtrees[k + 1].set_parent(dtrees[k])\n\n #\n # return a dictionary of dtrees where keys are scope lengths\n return {len(dtree.get_tree()[0]): dtree for dtree in dtrees}\n\n\ndef is_structured_decomposable(spn, verbose=False):\n\n if not is_consistent(spn):\n return False\n\n nodes = get_nodes_by_type(spn)\n\n scope_set = set()\n for n in nodes:\n if isinstance(n, Product):\n scope_set.add(tuple(n.scope))\n elif isinstance(n, CLTree):\n vtree = from_dtree_to_vtree(n.dtree_root)\n scope_set.update([tuple(s) for s in vtree.scopes])\n\n scopes = list(scope_set)\n scopes = [set(t) for t in scopes]\n #\n # ordering is not needed, but useful for printing\n if verbose:\n scopes.sort(key=len)\n for s in scopes:\n print(s)\n\n for i in range(len(scopes)):\n for j in range(len(scopes)):\n int_len = len(scopes[i].intersection(scopes[j]))\n if int_len != 0 and int_len != min(len(scopes[i]), len(scopes[j])):\n return False\n\n return True\n\n\ndef circuit_size(spn):\n\n nodes = get_nodes_by_type(spn)\n size = 0\n for n in nodes:\n if isinstance(n, Product) or isinstance(n, Sum):\n size += len(n.children)\n elif isinstance(n, CLTree):\n queue = [n.dtree_root]\n clt_size = 0\n while queue:\n peek_node = queue.pop(0)\n queue.extend(peek_node.children)\n if not peek_node.is_leaf():\n clt_size += (1 + len(peek_node.children)) * 2\n clt_size += 4\n size += clt_size - 2\n\n return size\n\n\ndef from_dtree_to_vtree(dtree_root):\n\n if len(dtree_root.get_tree()[0]) == 1:\n vtree_root = VTreeNode(var_id=dtree_root.get_tree()[0][0])\n vtree_root.set_scopes([dtree_root.get_tree()[0]])\n return vtree_root\n\n scopes = []\n scopes_t = []\n\n last_node_visited = None\n dtree_stack = [dtree_root]\n buffer = []\n\n while dtree_stack:\n\n peek_dnode = dtree_stack[-1]\n\n if not peek_dnode.children or (last_node_visited in peek_dnode.children):\n\n if not peek_dnode.is_leaf():\n\n leaves = []\n\n n_parents = 0\n for child in peek_dnode.children:\n if child.is_leaf():\n leaves.append(VTreeNode(child.var_id))\n else:\n n_parents += 1\n\n if n_parents:\n temp_buffer = buffer[-n_parents:]\n del buffer[-n_parents:]\n else:\n temp_buffer = []\n\n vtree_root = VTreeNode(children=leaves + temp_buffer + [VTreeNode(peek_dnode.var_id)])\n\n #\n # this piece of code store all the scopes of the vtree\n # ------\n scope = []\n n_inner_children = 0\n for n in vtree_root.children:\n if not n.is_inner():\n scope.append(n.var_id)\n else:\n n_inner_children += 1\n if n_inner_children:\n prev_scopes = scopes_t[-n_inner_children:]\n del scopes_t[-n_inner_children:]\n scope.extend([v for s in prev_scopes for v in s])\n scopes_t.append(scope)\n scopes.append(scope)\n # ------\n\n buffer.append(vtree_root)\n\n dtree_stack.pop()\n last_node_visited = peek_dnode\n\n else:\n\n dtree_stack.extend(peek_dnode.children)\n\n vtree_root.set_scopes(scopes)\n\n return vtree_root\n" ]
[ [ "scipy.sparse.csgraph.depth_first_order", "numpy.dot", "numpy.random.choice", "numpy.fill_diagonal", "numpy.zeros", "numpy.log", "numpy.ones", "numpy.arange", "scipy.sparse.csgraph.minimum_spanning_tree", "numpy.outer", "numpy.diag" ] ]
bt3gl/Resources-Numerical_Methods_for_Physics
[ "8668215f107230fafd9bdeb0061d353328cf03e8" ]
[ "homework5_elliptic_PDES/part_c.py" ]
[ "\"\"\"\n We recover the original divergence-free velocity field via \n Ud,new = Ustar - Gphi\n\"\"\" \n\nimport numpy\nimport pylab\nimport operator\n\n\ndef do_plots_c(Ud, Unew):\n \"\"\" plot Ud,new and Ud with zoom on the bug \"\"\" \n pylab.clf()\n pylab.cla()\n \n f = pylab.figure() \n f.text(.5, .95, r\"$U_{\\rm d}$ (left) and $U_{\\rm d, new}$ (right) \", horizontalalignment='center')\n\n\n pylab.subplot(221)\n pylab.imshow(Ud[0])\n pylab.ylabel(\"# of cells\", size =8)\n\n \n pylab.subplot(223)\n pylab.imshow(Ud[1])\n pylab.xlim(1,32)\n pylab.xlabel(\"# of cells\", size =8)\n pylab.ylabel(\"# of cells\", size =8)\n\n pylab.subplot(222)\n pylab.imshow(Unew[0])\n pylab.ylabel(\"# of cells\", size =8)\n \n pylab.subplot(224) \n pylab.imshow(Unew[1])\n pylab.xlim(1,32)\n pylab.xlabel(\"# of cells\", size =8)\n pylab.ylabel(\"# of cells\", size =8)\n\n pylab.savefig(\"plots/item_c_Udnew.png\")\n\n\ndef doPartC(Ustar, phi_num, Ud, nx, ny, xmin, xmax, ymin, ymax, DO_PLOTS):\n \"\"\" coordinates of centers \"\"\"\n dx = (xmax - xmin)/nx\n dy = (ymax - ymin)/ny\n \n \"\"\" calcuates the new gradient\"\"\"\n Gphi = numpy.gradient(phi_num, dx, dy)\n\n\n \"\"\" recover Ud, new \"\"\"\n Unew = map(operator.sub, Ustar,Gphi)\n \n \n if (DO_PLOTS == 1):\n do_plots_c(Ud, Unew)\n \n return 0\n" ]
[ [ "numpy.gradient" ] ]
ShAlireza/ML-Tries
[ "4516be7a3275c9bdedd7bd258800be384b6b34f0" ]
[ "O3/_14_kernel_methods_linearly_inseparable_data/rbf_kernel_iris_dataset.py" ]
[ "import numpy as np\n\nimport matplotlib.pyplot as plt\n\nfrom sklearn.svm import SVC\n\nfrom O3 import prepare_data\n\nfrom utils import plot_decision_regions\n\nX_train, X_test, y_train, y_test = prepare_data(standardize=True,\n split=True)\n\nsvm = SVC(kernel='rbf', random_state=1, gamma=0.2, C=1.0)\n\nsvm.fit(X_train, y_train)\n\nX_combined = np.vstack((X_train, X_test))\ny_combined = np.hstack((y_train, y_test))\n\nplot_decision_regions(X_combined, y_combined, classifier=svm,\n test_idx=range(105, 150))\n\nplt.xlabel('petal length [standardize]')\nplt.ylabel('petal width [standardize]')\nplt.legend(loc='upper left')\nplt.tight_layout()\nplt.show()\n\nprint(f'Accuracy: {svm.score(X_test, y_test) * 100}%')\n" ]
[ [ "matplotlib.pyplot.xlabel", "matplotlib.pyplot.legend", "sklearn.svm.SVC", "matplotlib.pyplot.show", "matplotlib.pyplot.tight_layout", "matplotlib.pyplot.ylabel", "numpy.hstack", "numpy.vstack" ] ]
yaramohajerani/dynamic_mascons
[ "e893ca16df76fdaacf5b312650d6c9516870395b" ]
[ "combine_kernels.py" ]
[ "#!/usr/bin/env python\nu\"\"\"\ncombine_kernels.py\nby Yara Mohajerani\n\nCombine the sensitivity kernels of the sum of the 'fixed points'\nand produce netcdf and png outputs\n\nLast Update 12/2020\n\"\"\"\n#-- load required modules\nimport os\nimport sys\nimport numpy as np\nimport geopandas as gpd\nimport matplotlib.pyplot as plt\nfrom mpl_toolkits.axes_grid1 import make_axes_locatable\n#-- also import gravity toolkit modules\nfrom gravity_toolkit.ncdf_write import ncdf_write\nfrom gravity_toolkit.ncdf_read import ncdf_read\n\n#------------------------------------------------------------------------------\n#-- create sensitivity kernels for given voronoi harmonics\n#------------------------------------------------------------------------------\ndef combine_kernels(parameters):\n\tDDEG_RASTER = float(parameters['DDEG_RASTER'])\n\t#-- read harmonic parameters\n\tLMAX = int(parameters['LMAX'])\n\t#-- get output directory\n\tddir = os.path.expanduser(parameters['DIRECTORY'])\n\t#-- smoothing radius\n\tRAD = int(parameters['RAD'])\n\t#-- ocn redistribution label\n\tOCN = '_OCN' if parameters['MASCON_OCEAN'] in ['Y','y'] else ''\n\n\t#-- load mascon configuration of interest\n\tmascon_nums = np.array(parameters['MSCN_NUMS'].split(','),dtype=int)\n\tmascon_name = parameters['MSCN_NAME']\n\tout_lbl = '{0}_{1}'.format(mascon_name,parameters['MSCN_NUMS'].replace(',','+'))\n\n\t#----------------------------------------------------------------------\n\t#-- Read and sum up kernels corresponding to fixed points\n\t#----------------------------------------------------------------------\n\tkerns = {}\n\tfor i in mascon_nums:\n\t\t#- read the netcdf files\n\t\tkern_file = os.path.join(ddir,'MASCON_{0:d}_YLMS_{1:.2f}DEG_SKERNEL{2}_L{3:02d}_r{4:d}km.nc'.format(i,DDEG_RASTER,OCN,LMAX,RAD))\n\t\tkerns[i] = ncdf_read(kern_file,DATE=False)\n\t#-- sum up the kernels\n\tkern_sum = kerns[mascon_nums[0]]['data']\n\tfor i in mascon_nums[1:]:\n\t\tkern_sum += kerns[i]['data']\n\t#-- get grid for saving combined sensitivity kernel\n\tglat = kerns[mascon_nums[0]]['lat']\n\tglon = kerns[mascon_nums[0]]['lon']\n\n\t#----------------------------------------------------------------------\n\t#-- write kernel sum to file\n\t#----------------------------------------------------------------------\n\toutfile = os.path.join(ddir,'MASCON_{0}_YLMS_{1:.2f}DEG_SKERNEL_OCN_L{2:02d}_r{3:d}km.nc'.format(out_lbl,DDEG_RASTER,LMAX,RAD))\n\tncdf_write(kern_sum,glon,glat,0,FILENAME=outfile,DATE=False,UNITS='unitless',LONGNAME='Sensitivity_Kernel')\n\n\t#----------------------------------------------------------------------\n\t#-- plot summed kernel\n\t#----------------------------------------------------------------------\n\t#-- load in world map for plotting in background\n\tworld = gpd.read_file(gpd.datasets.get_path('naturalearth_lowres'))\n\t#-- plot summed kernel\n\tfig, ax = plt.subplots(1,figsize = (10,6),dpi=100)\n\tklim = np.max(np.abs(kern_sum))*0.95\n\tc = ax.contourf(glon,glat,kern_sum,cmap='bwr',levels=np.linspace(-klim,klim,16))\n\t#-- use an axis divider for the colorbar\n\tdrx = make_axes_locatable(ax)\n\tcax = drx.append_axes(\"right\", size=\"5%\", pad=0.1)\n\tcbar = fig.colorbar(c,cax=cax)\n\tcbar.set_label('Sensitivity Kernel (min:{0:.1f}, max:{1:.1f})'.format(np.min(kern_sum),np.max(kern_sum)))\n\tworld.plot(ax=ax,alpha=0.3,fc='none',ec='k',linewidth=1.2,rasterized=True)\n\tplt.tight_layout()\n\tplt.savefig(outfile.replace('.nc','.png'),format='PNG')\n\tplt.close(fig=fig)\n\n#------------------------------------------------------------------------------\n#-- main function\n#------------------------------------------------------------------------------\ndef main():\n\tif len(sys.argv) == 1:\n\t\tsys.exit('No paramter file given')\n\telse:\n\t\t#-- read input files\n\t\tinput_files = sys.argv[1:]\n\t\tparameters = {}\n\t\tfor infile in input_files:\n\t\t\t#-- for each paramter file, extract parameters\n\t\t\tfid = open(infile, 'r')\n\t\t\tfor fileline in fid:\n\t\t\t\tpart = fileline.split()\n\t\t\t\tparameters[part[0]] = part[1]\n\t\t\tfid.close()\n\t\t\t#-- feed parameters to function to combine and plot kernels\n\t\t\tcombine_kernels(parameters)\n\n#------------------------------------------------------------------------------\n#-- run main program\n#------------------------------------------------------------------------------\nif __name__ == '__main__':\n\tmain()" ]
[ [ "numpy.max", "matplotlib.pyplot.close", "matplotlib.pyplot.subplots", "numpy.min", "matplotlib.pyplot.tight_layout", "numpy.abs", "numpy.linspace" ] ]
zhuxinqimac/DisentanglementICML19
[ "703a8ae2e263b387d49ddae122054d5e77a52832", "703a8ae2e263b387d49ddae122054d5e77a52832" ]
[ "Dsprites_exp/VAE-VC/local_nets.py", "Dsprites_exp/GroupVAE/main.py" ]
[ "import os\nimport sys\nsys.path.append(os.path.join(os.path.abspath(os.path.dirname(__file__)), '..'))\n\nimport tensorflow as tf\n\nslim = tf.contrib.slim\n\ndef disc_net_64(img1, img2, target_dim, scope=\"DISC\", reuse=False):\n nets_dict = dict()\n nets_dict['input1'] = img1\n nets_dict['input2'] = img2\n with tf.variable_scope(scope, reuse=reuse):\n with slim.arg_scope([slim.conv2d, slim.fully_connected], weights_regularizer=slim.l2_regularizer(0.00004)):\n with slim.arg_scope([slim.conv2d], weights_initializer=tf.contrib.slim.variance_scaling_initializer(), stride=2, padding='SAME', activation_fn=tf.nn.relu) :\n with slim.arg_scope([slim.fully_connected], biases_initializer=tf.zeros_initializer()):\n nets_dict['concat'] = tf.concat([nets_dict['input1'], nets_dict['input2']], axis=3)\n nets_dict['conv2d0'] = slim.conv2d(nets_dict['concat'], 32, [4, 4], scope='conv2d_0')\n nets_dict['conv2d1'] = slim.conv2d(nets_dict['conv2d0'], 32, [4, 4], scope='conv2d_1')\n nets_dict['conv2d2'] = slim.conv2d(nets_dict['conv2d1'], 64, [4, 4], scope='conv2d_2')\n nets_dict['conv2d3'] = slim.conv2d(nets_dict['conv2d2'], 64, [4, 4], scope='conv2d_3')\n n = tf.reshape(nets_dict['conv2d3'], [-1, 4*4*64])\n nets_dict['fc0'] = slim.fully_connected(n, 256, activation_fn=tf.nn.relu, scope = \"output_fc0\")\n nets_dict['output'] = slim.fully_connected(nets_dict['fc0'], target_dim, activation_fn=None, scope = \"output_fc1\")\n return nets_dict\n", "import os\nimport sys\n# sys.path.append(os.path.join(os.path.abspath(os.path.dirname(__file__)), '../..'))\nsys.path.insert(\n 0,\n os.path.dirname(os.path.dirname(os.path.dirname(os.path.realpath(__file__)))))\n\n# from model_with_split import ModelSplit\nfrom model import Model\n\nfrom config.path import subdirs5resultdir, muldir2mulsubdir\n\nfrom utils.datasetmanager import dsprites_manager\nfrom utils.format_op import FileIdManager\n\nfrom local_config import local_dsprites_parser, RESULT_DIR, ID_STRUCTURE\n\nimport tensorflow as tf\nimport numpy as np\n\nif __name__ == '__main__':\n # NITER = 300000\n # PITER = 20000\n # SITER = 10000\n\n parser = local_dsprites_parser()\n args = parser.parse_args() # parameter required for model\n NITER = args.niter\n PITER = args.piter\n SITER = args.siter\n\n fim = FileIdManager(ID_STRUCTURE)\n\n np.random.seed(args.rseed)\n FILE_ID = fim.get_id_from_args(args)\n SAVE_DIR, LOG_DIR, ASSET_DIR = subdirs5resultdir(RESULT_DIR, True)\n SAVE_SUBDIR, ASSET_SUBDIR = muldir2mulsubdir([SAVE_DIR, ASSET_DIR], FILE_ID, True)\n\n dm = dsprites_manager()\n dm.print_shape()\n\n model = Model(dm, LOG_DIR+FILE_ID+'.log', args)\n\n # print('SAVE_SUBDIR:', SAVE_SUBDIR)\n # if os.path.exists(os.path.join(SAVE_SUBDIR, 'checkpoint')):\n # print(\"Restoring %s\"%SAVE_SUBDIR)\n # model.restore(save_dir=SAVE_SUBDIR)\n # else:\n # print(\"Not restoring.\")\n\n model.set_up_train()\n model.initialize()\n model.train(niter=NITER, siter=SITER, piter=PITER, save_dir=SAVE_SUBDIR, asset_dir=ASSET_SUBDIR)\n # model.restore(save_dir=SAVE_SUBDIR)\n # train_idx = model.start_iter//PITER\n # include_discrete = False if train_idx < args.ntime else True\n # print('include_discrete:', include_discrete)\n # accuracy = model.evaluate(include_discrete=include_discrete)\n # print('accuracy:', accuracy)\n\n" ]
[ [ "tensorflow.zeros_initializer", "tensorflow.contrib.slim.variance_scaling_initializer", "tensorflow.concat", "tensorflow.reshape", "tensorflow.variable_scope" ], [ "numpy.random.seed" ] ]
zlxy9892/chatbot-retrieval-based-smn
[ "65ae5391f0a01b84f998cbbec1e04eda30ddd569" ]
[ "retrieval_model.py" ]
[ "# coding:utf-8\n\nimport time\nimport datetime\nimport os\nimport tensorflow as tf\nimport pickle\nimport utils\nfrom keras.preprocessing.sequence import pad_sequences\nimport numpy as np\nimport evaluate\nfrom utils import Utils\n\n\nclass SMN():\n def __init__(self,\n device_name='/cpu:0',\n lr=0.001,\n max_num_utterance=5,\n negative_samples=1,\n max_sentence_len=20,\n word_embedding_size=100,\n rnn_units=100,\n total_words=66958,\n batch_size=32,\n max_epoch=100,\n num_checkpoints=10,\n evaluate_every=100,\n checkpoint_every=100):\n self.utils = Utils()\n self.device_name = device_name\n self.lr = lr\n self.max_num_utterance = max_num_utterance\n self.negative_samples = negative_samples\n self.max_sentence_len = max_sentence_len\n self.word_embedding_size = word_embedding_size\n self.rnn_units = rnn_units\n self.total_words = total_words\n self.batch_size = batch_size\n self.max_epoch = max_epoch\n self.num_checkpoints = num_checkpoints\n self.evaluate_every = evaluate_every\n self.checkpoint_every = checkpoint_every\n\n def LoadModel(self):\n #init = tf.global_variables_initializer()\n saver = tf.train.Saver()\n sess = tf.Session()\n #with tf.Session() as sess:\n #sess.run(init)\n saver.restore(sess,\"neg5model\\\\model.5\")\n return sess\n # Later, launch the model, use the saver to restore variables from disk, and\n # do some work with the model.\n # with tf.Session() as sess:\n # # Restore variables from disk.\n # saver.restore(sess, \"/model/model.5\")\n # print(\"Model restored.\")\n\n def build_model(self):\n # placeholders\n self.utterance_ph = tf.placeholder(tf.int32, shape=(None, self.max_num_utterance, self.max_sentence_len), name='utterances')\n self.response_ph = tf.placeholder(tf.int32, shape=(None, self.max_sentence_len), name='responses')\n self.y_true = tf.placeholder(tf.int32, shape=(None,), name='y_true')\n # self.embedding_ph = tf.placeholder(tf.float32, shape=(self.total_words, self.word_embedding_size))\n self.response_len = tf.placeholder(tf.int32, shape=(None,), name='responses_len')\n self.all_utterance_len_ph = tf.placeholder(tf.int32, shape=(None, self.max_num_utterance), name='utterances_len')\n\n with tf.device(self.device_name):\n # word_embedding vector\n word_embeddings = tf.get_variable('word_embeddings_v', initializer=tf.random_normal_initializer(mean=0.0, stddev=0.1), shape=(self.total_words, self.word_embedding_size), dtype=tf.float32, trainable=True)\n # word_embeddings = tf.get_variable('word_embeddings_v', shape=(self.total_words, self.word_embedding_size), dtype=tf.float32, trainable=False)\n # self.embedding_init = word_embeddings.assign(self.embedding_ph)\n\n # utterance embedding\n all_utterance_embeddings = tf.nn.embedding_lookup(word_embeddings, self.utterance_ph)\n all_utterance_embeddings = tf.unstack(all_utterance_embeddings, num=self.max_num_utterance, axis=1)\n all_utterance_len = tf.unstack(self.all_utterance_len_ph, num=self.max_num_utterance, axis=1)\n\n # response embedding\n response_embeddings = tf.nn.embedding_lookup(word_embeddings, self.response_ph)\n \n # GRU initialize\n sentence_GRU = tf.nn.rnn_cell.GRUCell(self.rnn_units, kernel_initializer=tf.orthogonal_initializer())\n final_GRU = tf.nn.rnn_cell.GRUCell(self.rnn_units, kernel_initializer=tf.orthogonal_initializer())\n\n # matrix 1\n A_matrix = tf.get_variable('A_matrix_v', shape=(self.rnn_units, self.rnn_units), initializer=tf.contrib.layers.xavier_initializer(), dtype=tf.float32)\n reuse = None\n\n response_GRU_embeddings, _ = tf.nn.dynamic_rnn(sentence_GRU, response_embeddings, sequence_length=self.response_len, dtype=tf.float32,\n scope='sentence_GRU')\n self.response_embedding_save = response_GRU_embeddings\n response_embeddings = tf.transpose(response_embeddings, perm=[0, 2, 1])\n response_GRU_embeddings = tf.transpose(response_GRU_embeddings, perm=[0, 2, 1])\n\n # generate matching vectors\n matching_vectors = []\n for utterance_embeddings, utterance_len in zip(all_utterance_embeddings, all_utterance_len):\n matrix1 = tf.matmul(utterance_embeddings, response_embeddings)\n utterance_GRU_embeddings, _ = tf.nn.dynamic_rnn(sentence_GRU, utterance_embeddings, sequence_length=utterance_len, dtype=tf.float32,\n scope='sentence_GRU')\n matrix2 = tf.einsum('aij,jk->aik', utterance_GRU_embeddings, A_matrix) # TODO:check this\n matrix2 = tf.matmul(matrix2, response_GRU_embeddings)\n matrix = tf.stack([matrix1, matrix2], axis=3, name='matrix_stack')\n conv_layer = tf.layers.conv2d(matrix, filters=8, kernel_size=(3, 3), padding='VALID',\n kernel_initializer=tf.contrib.keras.initializers.he_normal(),\n activation=tf.nn.relu, reuse=reuse, name='conv') # TODO: check other params\n pooling_layer = tf.layers.max_pooling2d(conv_layer, (3, 3), strides=(3, 3),\n padding='VALID', name='max_pooling') # TODO: check other params\n matching_vector = tf.layers.dense(tf.contrib.layers.flatten(pooling_layer), 50,\n kernel_initializer=tf.contrib.layers.xavier_initializer(),\n activation=tf.tanh, reuse=reuse, name='matching_v') # TODO: check wthether this is correct\n if not reuse:\n reuse = True\n matching_vectors.append(matching_vector)\n \n # last hidden layer\n _, last_hidden = tf.nn.dynamic_rnn(final_GRU, tf.stack(matching_vectors, axis=0, name='matching_stack'), dtype=tf.float32,\n time_major=True, scope='final_GRU') # TODO: check time_major\n \n # output layer\n output = tf.layers.dense(last_hidden, 2, kernel_initializer=tf.contrib.layers.xavier_initializer(), name='final_v')\n self.logits = tf.nn.softmax(output, name='y_logits')\n self.y_pred = tf.cast(tf.argmax(input=output, axis=1), 'int32', name='y_pred')\n\n # loss\n self.loss = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(labels=self.y_true, logits=output), name='loss')\n\n # accuracy\n correct_predictions = tf.equal(self.y_pred, self.y_true)\n self.accuracy = tf.reduce_mean(tf.cast(correct_predictions, 'float'), name='accuracy')\n\n # optimize\n self.global_step = tf.Variable(0, trainable=False, name='global_step')\n optimizer = tf.train.AdamOptimizer(learning_rate=self.lr)\n grads_and_vars = optimizer.compute_gradients(self.loss)\n self.train_op = optimizer.apply_gradients(grads_and_vars, global_step=self.global_step, name='train_op')\n \n\n def Evaluate(self, sess):\n pass\n '''\n with open(evaluate_file, 'rb') as f:\n history, true_utt, labels = pickle.load(f)\n self.all_candidate_scores = []\n history, history_len = utils.multi_sequences_padding(history, self.max_sentence_len)\n history, history_len = np.array(history), np.array(history_len)\n true_utt_len = np.array(utils.get_sequences_length(true_utt, maxlen=self.max_sentence_len))\n true_utt = np.array(pad_sequences(true_utt, padding='post', maxlen=self.max_sentence_len))\n low = 0\n while True:\n feed_dict = {\n self.utterance_ph: np.concatenate([history[low:low + 200]], axis=0),\n self.all_utterance_len_ph: np.concatenate([history_len[low:low + 200]], axis=0),\n self.response_ph: np.concatenate([true_utt[low:low + 200]], axis=0),\n self.response_len: np.concatenate([true_utt_len[low:low + 200]], axis=0),\n }\n candidate_scores = sess.run(self.y_pred, feed_dict=feed_dict)\n self.all_candidate_scores.append(candidate_scores[:, 1])\n low = low + 200\n if low >= history.shape[0]:\n break\n all_candidate_scores = np.concatenate(self.all_candidate_scores, axis=0)\n evaluate.ComputeR10_1(all_candidate_scores,labels)\n evaluate.ComputeR2_1(all_candidate_scores,labels)\n '''\n \n def train_model(self, all_sequences, all_responses_true, use_pre_trained=False, pre_trained_modelpath='./model/pre-trained-model'):\n config = tf.ConfigProto(allow_soft_placement=True, log_device_placement=True)\n with tf.Session(config=config) as sess:\n # output directory for models and summaries\n timestamp = str(int(time.time()))\n out_dir = os.path.abspath(os.path.join(os.curdir, 'log', timestamp))\n print('Writing log to {}\\n'.format(out_dir))\n\n # summary all the trainable variables\n for var in tf.trainable_variables():\n tf.summary.histogram(name=var.name, values=var)\n\n # summaries for loss and accuracy\n loss_summary = tf.summary.scalar('summary_loss', self.loss)\n acc_summary = tf.summary.scalar('summary_accuracy', self.accuracy)\n\n # train summaries\n train_summary_op = tf.summary.merge_all()\n train_summary_dir = os.path.join(out_dir, 'summaries', 'train')\n train_summary_writer = tf.summary.FileWriter(train_summary_dir, tf.get_default_graph())\n\n # dev summaries\n dev_summary_op = tf.summary.merge_all()\n dev_summary_dir = os.path.join(out_dir, 'summaries', 'dev')\n dev_summary_writer = tf.summary.FileWriter(dev_summary_dir, tf.get_default_graph())\n\n # checkpointing, tensorflow assumes this directory already existed, so we need to create it\n checkpoint_dir = os.path.join(out_dir, 'checkpoints')\n checkpoint_prefix = os.path.join(checkpoint_dir, 'model')\n if not os.path.exists(checkpoint_dir):\n os.makedirs(checkpoint_dir)\n \n saver = tf.train.Saver(tf.global_variables(), max_to_keep=self.num_checkpoints)\n\n # initialize all variables\n sess.run(tf.global_variables_initializer())\n\n # use pre-trained model to continue\n if use_pre_trained:\n print('reloading model parameters...')\n saver.restore(sess, pre_trained_modelpath)\n \n # get input data\n actions = all_responses_true[:]\n\n history, history_len = self.utils.multi_sequences_padding(all_sequences, self.max_sentence_len)\n true_utt_len = np.array(self.utils.get_sequences_length(all_responses_true, maxlen=self.max_sentence_len))\n true_utt = np.array(pad_sequences(all_responses_true, padding='post', maxlen=self.max_sentence_len))\n actions_len = np.array(self.utils.get_sequences_length(actions, maxlen=self.max_sentence_len))\n actions = np.array(pad_sequences(actions, padding='post', maxlen=self.max_sentence_len))\n history, history_len = np.array(history), np.array(history_len)\n \n low = 0\n epoch = 1\n while epoch <= self.max_epoch:\n n_sample = min(low + self.batch_size, history.shape[0]) - low\n negative_indices = [np.random.randint(0, actions.shape[0], n_sample) for _ in range(self.negative_samples)]\n negs = [actions[negative_indices[i], :] for i in range(self.negative_samples)]\n negs_len = [actions_len[negative_indices[i]] for i in range(self.negative_samples)]\n feed_dict = {\n self.utterance_ph: np.concatenate([history[low:low + n_sample]] * (self.negative_samples + 1), axis=0),\n self.all_utterance_len_ph: np.concatenate([history_len[low:low + n_sample]] * (self.negative_samples + 1), axis=0),\n self.response_ph: np.concatenate([true_utt[low:low + n_sample]] + negs, axis=0),\n self.response_len: np.concatenate([true_utt_len[low:low + n_sample]] + negs_len, axis=0),\n self.y_true: np.concatenate([np.ones(n_sample)] + [np.zeros(n_sample)] * self.negative_samples, axis=0)\n }\n _, step, summaries, loss, accuracy, y_logits, y_pred, y_true = sess.run(\n [self.train_op, self.global_step, train_summary_op, self.loss, self.accuracy, self.logits, self.y_pred, self.y_true],\n feed_dict)\n y_pred_proba = y_logits[:,1]\n timestr = datetime.datetime.now().isoformat()\n print('{}: => epoch {} | step {} | loss {:.6f} | acc {:.6f}'.format(timestr, epoch, step, loss, accuracy))\n train_summary_writer.add_summary(summaries, step)\n \n current_step = tf.train.global_step(sess, self.global_step)\n low += n_sample\n if current_step % self.evaluate_every == 0:\n pass\n # print(\"loss\", sess.run(self.loss, feed_dict=feed_dict))\n # self.Evaluate(sess)\n if current_step % self.checkpoint_every == 0:\n path = saver.save(sess=sess, save_path=checkpoint_prefix, global_step=self.global_step)\n print('\\nSaved model checkpoint to {}\\n'.format(path))\n if low >= history.shape[0]:\n low = 0\n epoch += 1\n \n def predict(self, model_file, dev_utterances, dev_responses, dev_utterances_len, dev_responses_len):\n # self.build_model()\n graph = tf.Graph()\n with tf.Session(graph=graph) as sess:\n # Load the saved meta graph and restore variables\n saver = tf.train.import_meta_graph('{}.meta'.format(model_file))\n saver.restore(sess, model_file)\n\n # Access and create placeholders variables and create feed-dict to feed new data\n graph = tf.get_default_graph()\n ph_utterances = graph.get_tensor_by_name('utterances:0')\n ph_responses = graph.get_tensor_by_name('responses:0')\n ph_utterances_len = graph.get_tensor_by_name('utterances_len:0')\n ph_responses_len = graph.get_tensor_by_name('responses_len:0')\n ph_y_true = graph.get_tensor_by_name('y_true:0')\n feed_dict = {\n ph_utterances: dev_utterances,\n ph_responses: dev_responses,\n ph_utterances_len: dev_utterances_len,\n ph_responses_len: dev_responses_len\n }\n\n op_y_logits = graph.get_tensor_by_name('y_logits:0')\n op_y_pred = graph.get_tensor_by_name('y_pred:0')\n\n y_logits, y_pred = sess.run([op_y_logits, op_y_pred], feed_dict)\n y_pred_proba = y_logits[:,1]\n # print(y_logits)\n # print(y_pred)\n return y_pred_proba, y_pred\n\n\nif __name__ == \"__main__\":\n smn = SMN()\n smn.build_model()\n # smn.train_model()\n #sess = scn.LoadModel()\n #scn.Evaluate(sess)\n #results = scn.BuildIndex(sess)\n #print(len(results))\n\n #scn.TrainModel()\n" ]
[ [ "tensorflow.contrib.layers.xavier_initializer", "tensorflow.matmul", "tensorflow.nn.embedding_lookup", "tensorflow.stack", "tensorflow.nn.softmax", "tensorflow.global_variables_initializer", "tensorflow.contrib.layers.flatten", "tensorflow.einsum", "tensorflow.cast", "tensorflow.random_normal_initializer", "tensorflow.trainable_variables", "numpy.concatenate", "tensorflow.get_default_graph", "tensorflow.argmax", "tensorflow.summary.histogram", "tensorflow.orthogonal_initializer", "tensorflow.Variable", "tensorflow.train.Saver", "tensorflow.transpose", "tensorflow.global_variables", "tensorflow.train.global_step", "tensorflow.ConfigProto", "numpy.random.randint", "tensorflow.nn.dynamic_rnn", "numpy.array", "tensorflow.train.AdamOptimizer", "numpy.zeros", "tensorflow.summary.scalar", "tensorflow.Session", "tensorflow.placeholder", "tensorflow.summary.merge_all", "tensorflow.unstack", "tensorflow.nn.sparse_softmax_cross_entropy_with_logits", "tensorflow.layers.max_pooling2d", "tensorflow.Graph", "tensorflow.equal", "tensorflow.contrib.keras.initializers.he_normal", "numpy.ones", "tensorflow.device" ] ]
MD2Korg/CerebralCortex-2.0
[ "8dfcef1ba96fb8653980d1cd3eee7ed3d7f28b60" ]
[ "cerebralcortex/markers/brushing/features.py" ]
[ "from typing import List\n\nimport numpy as np\nimport pandas as pd\nfrom pyspark.sql.functions import pandas_udf, PandasUDFType\nfrom pyspark.sql.types import *\n# from pyspark.sql.functions import pandas_udf,PandasUDFType\nfrom pyspark.sql.types import StructType\n\nfrom cerebralcortex.core.datatypes import DataStream\nfrom cerebralcortex.core.metadata_manager.stream.metadata import Metadata\n\n\ndef compute_corr_mse_accel_gyro(self, exclude_col_names: list = [],\n accel_column_names: list = ['accelerometer_x', 'accelerometer_y', 'accelerometer_z'],\n gyro_column_names: list = ['gyroscope_y', 'gyroscope_x', 'gyroscope_z'],\n windowDuration: int = None,\n slideDuration: int = None,\n groupByColumnName: List[str] = [], startTime=None):\n \"\"\"\n Compute correlation and mean standard error of accel and gyro sensors\n\n Args:\n exclude_col_names list(str): name of the columns on which features should not be computed\n accel_column_names list(str): name of accel data column\n gyro_column_names list(str): name of gyro data column\n windowDuration (int): duration of a window in seconds\n slideDuration (int): slide duration of a window\n groupByColumnName List[str]: groupby column names, for example, groupby user, col1, col2\n startTime (datetime): The startTime is the offset with respect to 1970-01-01 00:00:00 UTC with which to start window intervals. For example, in order to have hourly tumbling windows that start 15 minutes past the hour, e.g. 12:15-13:15, 13:15-14:15... provide startTime as 15 minutes. First time of data will be used as startTime if none is provided\n\n\n Returns:\n DataStream object with all the existing data columns and FFT features\n \"\"\"\n feature_names = [\"ax_ay_corr\", 'ax_az_corr', 'ay_az_corr', 'gx_gy_corr', 'gx_gz_corr',\n 'gy_gz_corr', 'ax_ay_mse', 'ax_az_mse', 'ay_az_mse', 'gx_gy_mse', 'gx_gz_mse', 'gy_gz_mse']\n\n exclude_col_names.extend([\"timestamp\", \"localtime\", \"user\", \"version\"])\n\n data = self._data.drop(*exclude_col_names)\n\n basic_schema = StructType([\n StructField(\"timestamp\", TimestampType()),\n StructField(\"localtime\", TimestampType()),\n StructField(\"user\", StringType()),\n StructField(\"version\", IntegerType()),\n StructField(\"start_time\", TimestampType()),\n StructField(\"end_time\", TimestampType())\n ])\n\n features_list = []\n for fn in feature_names:\n features_list.append(StructField(fn, FloatType(), True))\n\n features_schema = StructType(basic_schema.fields + features_list)\n\n @pandas_udf(features_schema, PandasUDFType.GROUPED_MAP)\n def get_corr_mse_features_udf(df):\n timestamp = df['timestamp'].iloc[0]\n localtime = df['localtime'].iloc[0]\n user = df['user'].iloc[0]\n version = df['version'].iloc[0]\n start_time = timestamp\n end_time = df['timestamp'].iloc[-1]\n\n ax_ay_corr = df[accel_column_names[0]].corr(df[accel_column_names[1]])\n ax_az_corr = df[accel_column_names[0]].corr(df[accel_column_names[2]])\n ay_az_corr = df[accel_column_names[1]].corr(df[accel_column_names[2]])\n gx_gy_corr = df[gyro_column_names[0]].corr(df[gyro_column_names[1]])\n gx_gz_corr = df[gyro_column_names[0]].corr(df[gyro_column_names[2]])\n gy_gz_corr = df[gyro_column_names[1]].corr(df[gyro_column_names[2]])\n\n ax_ay_mse = ((df[accel_column_names[0]] - df[accel_column_names[1]]) ** 2).mean()\n ax_az_mse = ((df[accel_column_names[0]] - df[accel_column_names[2]]) ** 2).mean()\n ay_az_mse = ((df[accel_column_names[1]] - df[accel_column_names[2]]) ** 2).mean()\n gx_gy_mse = ((df[accel_column_names[0]] - df[accel_column_names[1]]) ** 2).mean()\n gx_gz_mse = ((df[accel_column_names[0]] - df[accel_column_names[2]]) ** 2).mean()\n gy_gz_mse = ((df[accel_column_names[1]] - df[accel_column_names[2]]) ** 2).mean()\n\n basic_df = pd.DataFrame([[timestamp, localtime, user, int(version), start_time, end_time, ax_ay_corr,\n ax_az_corr, ay_az_corr, gx_gy_corr, gx_gz_corr, gy_gz_corr, ax_ay_mse, ax_az_mse,\n ay_az_mse, gx_gy_mse, gx_gz_mse, gy_gz_mse]],\n columns=['timestamp', 'localtime', 'user', 'version', 'start_time', 'end_time',\n \"ax_ay_corr\", 'ax_az_corr', 'ay_az_corr', 'gx_gy_corr', 'gx_gz_corr',\n 'gy_gz_corr', 'ax_ay_mse', 'ax_az_mse', 'ay_az_mse', 'gx_gy_mse',\n 'gx_gz_mse', 'gy_gz_mse'])\n return basic_df\n\n data = self.compute(get_corr_mse_features_udf, windowDuration=windowDuration, slideDuration=slideDuration,\n groupByColumnName=groupByColumnName, startTime=startTime)\n return DataStream(data=data._data, metadata=Metadata())\n\ndef compute_fourier_features(self, exclude_col_names: list = [],\n feature_names=[\"fft_centroid\", 'fft_spread', 'spectral_entropy',\n 'spectral_entropy_old', 'fft_flux',\n 'spectral_falloff'], windowDuration: int = None,\n slideDuration: int = None,\n groupByColumnName: List[str] = [], startTime=None):\n \"\"\"\n Transforms data from time domain to frequency domain.\n\n Args:\n exclude_col_names list(str): name of the columns on which features should not be computed\n feature_names list(str): names of the features. Supported features are fft_centroid, fft_spread, spectral_entropy, spectral_entropy_old, fft_flux, spectral_falloff\n windowDuration (int): duration of a window in seconds\n slideDuration (int): slide duration of a window\n groupByColumnName List[str]: groupby column names, for example, groupby user, col1, col2\n startTime (datetime): The startTime is the offset with respect to 1970-01-01 00:00:00 UTC with which to start window intervals. For example, in order to have hourly tumbling windows that start 15 minutes past the hour, e.g. 12:15-13:15, 13:15-14:15... provide startTime as 15 minutes. First time of data will be used as startTime if none is provided\n\n\n Returns:\n DataStream object with all the existing data columns and FFT features\n \"\"\"\n eps = 0.00000001\n\n exclude_col_names.extend([\"timestamp\", \"localtime\", \"user\", \"version\"])\n\n data = self._data.drop(*exclude_col_names)\n\n df_column_names = data.columns\n\n basic_schema = StructType([\n StructField(\"timestamp\", TimestampType()),\n StructField(\"localtime\", TimestampType()),\n StructField(\"user\", StringType()),\n StructField(\"version\", IntegerType()),\n StructField(\"start_time\", TimestampType()),\n StructField(\"end_time\", TimestampType())\n ])\n\n features_list = []\n for cn in df_column_names:\n for sf in feature_names:\n features_list.append(StructField(cn + \"_\" + sf, FloatType(), True))\n\n features_schema = StructType(basic_schema.fields + features_list)\n\n def stSpectralCentroidAndSpread(X, fs):\n \"\"\"Computes spectral centroid of frame (given abs(FFT))\"\"\"\n ind = (np.arange(1, len(X) + 1)) * (fs / (2.0 * len(X)))\n\n Xt = X.copy()\n Xt = Xt / Xt.max()\n NUM = np.sum(ind * Xt)\n DEN = np.sum(Xt) + eps\n\n # Centroid:\n C = (NUM / DEN)\n\n # Spread:\n S = np.sqrt(np.sum(((ind - C) ** 2) * Xt) / DEN)\n\n # Normalize:\n C = C / (fs / 2.0)\n S = S / (fs / 2.0)\n\n return (C, S)\n\n def stSpectralFlux(X, Xprev):\n \"\"\"\n Computes the spectral flux feature of the current frame\n ARGUMENTS:\n X: the abs(fft) of the current frame\n Xpre: the abs(fft) of the previous frame\n \"\"\"\n # compute the spectral flux as the sum of square distances:\n\n sumX = np.sum(X + eps)\n sumPrevX = np.sum(Xprev + eps)\n F = np.sum((X / sumX - Xprev / sumPrevX) ** 2)\n\n return F\n\n def stSpectralRollOff(X, c, fs):\n \"\"\"Computes spectral roll-off\"\"\"\n\n totalEnergy = np.sum(X ** 2)\n fftLength = len(X)\n Thres = c * totalEnergy\n # Ffind the spectral rolloff as the frequency position where the respective spectral energy is equal to c*totalEnergy\n CumSum = np.cumsum(X ** 2) + eps\n [a, ] = np.nonzero(CumSum > Thres)\n if len(a) > 0:\n mC = np.float64(a[0]) / (float(fftLength))\n else:\n mC = 0.0\n return (mC)\n\n def stSpectralEntropy(X, numOfShortBlocks=10):\n \"\"\"Computes the spectral entropy\"\"\"\n L = len(X) # number of frame samples\n Eol = np.sum(X ** 2) # total spectral energy\n\n subWinLength = int(np.floor(L / numOfShortBlocks)) # length of sub-frame\n if L != subWinLength * numOfShortBlocks:\n X = X[0:subWinLength * numOfShortBlocks]\n\n subWindows = X.reshape(subWinLength, numOfShortBlocks,\n order='F').copy() # define sub-frames (using matrix reshape)\n s = np.sum(subWindows ** 2, axis=0) / (Eol + eps) # compute spectral sub-energies\n En = -np.sum(s * np.log2(s + eps)) # compute spectral entropy\n\n return En\n\n def spectral_entropy(data, sampling_freq, bands=None):\n\n psd = np.abs(np.fft.rfft(data)) ** 2\n psd /= np.sum(psd) # psd as a pdf (normalised to one)\n\n if bands is None:\n power_per_band = psd[psd > 0]\n else:\n freqs = np.fft.rfftfreq(data.size, 1 / float(sampling_freq))\n bands = np.asarray(bands)\n\n freq_limits_low = np.concatenate([[0.0], bands])\n freq_limits_up = np.concatenate([bands, [np.Inf]])\n\n power_per_band = [np.sum(psd[np.bitwise_and(freqs >= low, freqs < up)])\n for low, up in zip(freq_limits_low, freq_limits_up)]\n\n power_per_band = power_per_band[power_per_band > 0]\n\n return -np.sum(power_per_band * np.log2(power_per_band))\n\n def fourier_features_pandas_udf(data, frequency: float = 16.0):\n\n Fs = frequency # the sampling freq (in Hz)\n results = []\n # fourier transforms!\n # data_fft = abs(np.fft.rfft(data))\n\n X = abs(np.fft.fft(data))\n nFFT = int(len(X) / 2) + 1\n\n X = X[0:nFFT] # normalize fft\n X = X / len(X)\n\n if \"fft_centroid\" or \"fft_spread\" in feature_names:\n C, S = stSpectralCentroidAndSpread(X, Fs) # spectral centroid and spread\n if \"fft_centroid\" in feature_names:\n results.append(C)\n if \"fft_spread\" in feature_names:\n results.append(S)\n if \"spectral_entropy\" in feature_names:\n se = stSpectralEntropy(X) # spectral entropy\n results.append(se)\n if \"spectral_entropy_old\" in feature_names:\n se_old = spectral_entropy(X, frequency) # spectral flux\n results.append(se_old)\n if \"fft_flux\" in feature_names:\n flx = stSpectralFlux(X, X.copy()) # spectral flux\n results.append(flx)\n if \"spectral_folloff\" in feature_names:\n roff = stSpectralRollOff(X, 0.90, frequency) # spectral rolloff\n results.append(roff)\n return pd.Series(results)\n\n @pandas_udf(features_schema, PandasUDFType.GROUPED_MAP)\n def get_fft_features(df):\n timestamp = df['timestamp'].iloc[0]\n localtime = df['localtime'].iloc[0]\n user = df['user'].iloc[0]\n version = df['version'].iloc[0]\n start_time = timestamp\n end_time = df['timestamp'].iloc[-1]\n\n df.drop(exclude_col_names, axis=1, inplace=True)\n\n df_ff = df.apply(fourier_features_pandas_udf)\n df3 = df_ff.T\n pd.set_option('display.max_colwidth', -1)\n # split column into multiple columns\n # df3 = pd.DataFrame(df_ff.values.tolist(), index=df_ff.index)\n # print(\"**\"*50)\n # print(type(df), type(df_ff), type(df3))\n # print(df)\n # print(df_ff)\n # print(df_ff.values.tolist())\n # print(df3)\n # print(\"**\" * 50)\n # print(\"FEATURE-NAMES\", feature_names)\n df3.columns = feature_names\n\n # multiple rows to one row\n output = df3.unstack().to_frame().sort_index(level=1).T\n output.columns = [f'{j}_{i}' for i, j in output.columns]\n\n basic_df = pd.DataFrame([[timestamp, localtime, user, int(version), start_time, end_time]],\n columns=['timestamp', 'localtime', 'user', 'version', 'start_time', 'end_time'])\n # df.insert(loc=0, columns=, value=basic_cols)\n return basic_df.assign(**output)\n\n return self.compute(get_fft_features, windowDuration=windowDuration, slideDuration=slideDuration,\n groupByColumnName=groupByColumnName, startTime=startTime)" ]
[ [ "numpy.concatenate", "numpy.fft.rfft", "numpy.asarray", "pandas.set_option", "numpy.sum", "numpy.nonzero", "numpy.float64", "numpy.fft.fft", "numpy.bitwise_and", "numpy.cumsum", "pandas.Series", "numpy.log2", "numpy.floor" ] ]
ReginaPeralta/ReginaPeralta
[ "68ab1462083d40d7efb4af77d6fa915ad73de2b8" ]
[ "src/pygaps/modelling/temkinapprox.py" ]
[ "\"\"\"Temkin Approximation isotherm model.\"\"\"\n\nimport numpy\nimport scipy\n\nfrom ..utilities.exceptions import CalculationError\nfrom .base_model import IsothermBaseModel\n\n\nclass TemkinApprox(IsothermBaseModel):\n r\"\"\"\n Asymptotic approximation to the Temkin isotherm.\n\n .. math::\n\n n(p) = n_m \\frac{K p}{1 + K p} + n_m \\theta (\\frac{K p}{1 + K p})^2 (\\frac{K p}{1 + K p} -1)\n\n Notes\n -----\n The Temkin adsorption isotherm [#]_, like the Langmuir model, considers\n a surface with n_m identical adsorption sites, but takes into account adsorbate-\n adsorbate interactions by assuming that the enthalpy of adsorption is a linear\n function of the coverage. The Temkin isotherm is derived [#]_ using a\n mean-field argument and used an asymptotic approximation\n to obtain an explicit equation for the loading.\n\n Here, :math:`n_m` and K have the same physical meaning as in the Langmuir model.\n The additional parameter :math:`\\theta` describes the strength of the adsorbate-adsorbate\n interactions (:math:`\\theta < 0` for attractions).\n\n References\n ----------\n .. [#] V. P. M.I. Tempkin, Kinetics of ammonia synthesis on promoted iron\n catalyst, Acta Phys. Chim. USSR 12 (1940) 327–356.\n .. [#] Phys. Chem. Chem. Phys., 2014,16, 5499-5513\n\n \"\"\"\n\n # Model parameters\n name = 'TemkinApprox'\n calculates = 'loading'\n param_names = [\"n_m\", \"K\", \"tht\"]\n param_bounds = {\n \"n_m\": [0, numpy.inf],\n \"K\": [0, numpy.inf],\n \"tht\": [0, numpy.inf],\n }\n\n def __init__(self):\n \"\"\"Instantiation function.\"\"\"\n\n self.params = {\"n_m\": numpy.nan, \"K\": numpy.nan, \"tht\": numpy.nan}\n\n def loading(self, pressure):\n \"\"\"\n Calculate loading at specified pressure.\n\n Parameters\n ----------\n pressure : float\n The pressure at which to calculate the loading.\n\n Returns\n -------\n float\n Loading at specified pressure.\n \"\"\"\n lang_load = self.params[\"K\"] * pressure / (1.0 + self.params[\"K\"] * pressure)\n return self.params[\"n_m\"] * (lang_load + self.params[\"tht\"] * lang_load ** 2 *\n (lang_load - 1))\n\n def pressure(self, loading):\n \"\"\"\n Calculate pressure at specified loading.\n\n For the TemkinApprox model, the pressure will\n be computed numerically as no analytical inversion is possible.\n\n Parameters\n ----------\n loading : float\n The loading at which to calculate the pressure.\n\n Returns\n -------\n float\n Pressure at specified loading.\n \"\"\"\n def fun(x):\n return self.loading(x) - loading\n\n opt_res = scipy.optimize.root(fun, 0, method='hybr')\n\n if not opt_res.success:\n raise CalculationError(\"\"\"\n Root finding for value {0} failed.\n \"\"\".format(loading))\n\n return opt_res.x\n\n def spreading_pressure(self, pressure):\n r\"\"\"\n Calculate spreading pressure at specified gas pressure.\n\n Function that calculates spreading pressure by solving the\n following integral at each point i.\n\n .. math::\n\n \\pi = \\int_{0}^{p_i} \\frac{n_i(p_i)}{p_i} dp_i\n\n The integral for the TemkinApprox model is solved analytically.\n\n .. math::\n\n \\pi = n_m \\Big( \\ln{(1 + K p)} + \\frac{\\theta (2 K p + 1)}{2(1 + K p)^2}\\Big)\n\n Parameters\n ----------\n pressure : float\n The pressure at which to calculate the spreading pressure.\n\n Returns\n -------\n float\n Spreading pressure at specified pressure.\n \"\"\"\n one_plus_kp = 1.0 + self.params[\"K\"] * pressure\n return self.params[\"n_m\"] * (numpy.log(one_plus_kp) +\n self.params[\"tht\"] * (2.0 * self.params[\"K\"] * pressure + 1.0) /\n (2.0 * one_plus_kp ** 2))\n\n def initial_guess(self, pressure, loading):\n \"\"\"\n Return initial guess for fitting.\n\n Parameters\n ----------\n pressure : ndarray\n Pressure data.\n loading : ndarray\n Loading data.\n\n Returns\n -------\n dict\n Dictionary of initial guesses for the parameters.\n \"\"\"\n saturation_loading, langmuir_k = super().initial_guess(pressure, loading)\n\n guess = {\"n_m\": saturation_loading, \"K\": langmuir_k, \"tht\": 0.0}\n\n for param in guess:\n if guess[param] < self.param_bounds[param][0]:\n guess[param] = self.param_bounds[param][0]\n if guess[param] > self.param_bounds[param][1]:\n guess[param] = self.param_bounds[param][1]\n\n return guess\n" ]
[ [ "scipy.optimize.root", "numpy.log" ] ]
SHSongs/ETF
[ "3bfdebc37e3956648e71094b54f5674e132a6aca" ]
[ "client/main.py" ]
[ "import urllib.request as ul\nimport json\nimport pandas as pd\n\n\ndef get_chart(ticker, period1, period2):\n url = f\"http://localhost:9000/chart/{ticker}?period1={period1}&period2={period2}\"\n\n request = ul.Request(url)\n response = ul.urlopen(request)\n\n rescode = response.getcode()\n if rescode != 200:\n return None\n\n responsedata = response.read()\n my_json = responsedata.decode('utf8').replace(\"'\", '\"')\n data = json.loads(my_json)\n\n return data[\"data\"][\"history\"]\n\n\ninfo = get_chart(\"aaa\", 20211015, 20211104)\ndf = pd.json_normalize(info)\ndf.to_csv(\"aaa_chart.csv\")\n\nprint(df)\n" ]
[ [ "pandas.json_normalize" ] ]
tudo-spect/plot_spect_images
[ "ccd472f463777703328a07c9f6b03dd3d58a8b5e" ]
[ "plot_spect_image.py" ]
[ "import numpy as np\nimport matplotlib.pyplot as plt\nimport argparse\n\nfrom camera import add_pmts\n\nparser = argparse.ArgumentParser()\nparser.add_argument('inputfile', help='A datafile created by the old SPECT camera')\nparser.add_argument(\n '--outputfile', '-o', required=False,\n dest='outputfile',\n help='If given, save the image to outputfile'\n)\n\n\nif __name__ == '__main__':\n\n args = parser.parse_args()\n\n data = np.fromfile(args.inputfile, dtype='<u2')\n width = np.sqrt(data.size)\n assert width.is_integer()\n width = int(width)\n\n img = data.reshape((width, width))\n\n width = 60\n x_offset = 1\n y_offset = 0\n\n x0 = -width/2 - x_offset\n x1 = width/2 - x_offset\n y0 = -width/2 - y_offset\n y1 = width/2 - y_offset\n\n fig, ax = plt.subplots()\n\n ax.set_aspect(1)\n ax.set_axis_bgcolor('k')\n\n plot = ax.imshow(\n img,\n cmap='inferno',\n interpolation='nearest',\n extent=np.array([x0, x1, y0, y1]),\n )\n fig.colorbar(plot, label='Counts')\n\n add_pmts(ax=ax, linewidth=1.5)\n\n ax.set_xlim(-35, 35)\n ax.set_ylim(-26, 26)\n\n ax.set_xlabel('$x \\,/\\, \\mathrm{cm}$')\n ax.set_ylabel('$y \\,/\\, \\mathrm{cm}$')\n\n if args.outputfile:\n fig.savefig(args.outputfile, dpi=300)\n else:\n plt.show()\n" ]
[ [ "numpy.array", "matplotlib.pyplot.subplots", "numpy.sqrt", "numpy.fromfile", "matplotlib.pyplot.show" ] ]
MS44neuro/drtest
[ "9e17105f39338cef7d65e541996c70f2c2731ab4" ]
[ "examples/scripts/data_reduction_ex1.py" ]
[ "\"\"\"This code demonstrates how to perform the tested data reduction module.\r\n\"\"\"\r\n\r\nimport os\r\nimport sys\r\nimport glob\r\nimport pyabf\r\nimport matplotlib.pyplot as plt\r\n\r\n\r\npathToHere = os.path.abspath(os.path.dirname(__file__))\r\npathToData = os.path.abspath(pathToHere + \"/../data/\")\r\npathToModule = os.path.abspath(pathToHere + \"/../../src/\")\r\n\r\nsys.path.insert(0, pathToModule)\r\n\r\nimport drtest as dr\r\n\r\n\r\nfor file in sorted(glob.glob(pathToData + \"/*.abf\")):\r\n \r\n abf = pyabf.ABF(file)\r\n abf.setSweep(4, 1)\r\n \r\n xdata = abf.sweepX\r\n ydata = abf.sweepY\r\n \r\n da = dr.DataAnalysis(xdata, ydata)\r\n xdec, ydec = da.data_reduction(method='decimate', reduction_factor=4)\r\n xavr, yavr = da.data_reduction(method='average', reduction_factor=4)\r\n xmin, ymin = da.data_reduction(method='min', reduction_factor=4)\r\n xmax, ymax = da.data_reduction(method='max', reduction_factor=4)\r\n xminmax, yminmax = da.data_reduction(method='min/max', reduction_factor=4)\r\n \r\n xxxx = [xdec, xavr, xmin, xmax, xminmax]\r\n yyyy = [ydec, yavr, ymin, ymax, yminmax] \r\n \r\n ## 2D plot\r\n # plt.plot(xdec, ydec)\r\n # plt.plot(xavr, yavr)\r\n # plt.plot(xmin, ymin)\r\n # plt.plot(xmax, ymax)\r\n # plt.show()\r\n \r\n ## 3D plot\r\n fig = plt.figure()\r\n ax = fig.gca(projection='3d')\r\n zs = [i for i in range(0, 6)]\r\n\r\n ax.plot(xdata, ydata, zs[0], zdir='y', color='black', linewidth=1.5)\r\n ax.plot(xdec, ydec, zs[1], zdir='y', color='red', linewidth=1.5)\r\n ax.plot(xavr, yavr, zs[2], zdir='y', color='green', linewidth=1.5)\r\n ax.plot(xmin, ymin, zs[3], zdir='y', color='orange', linewidth=1.5)\r\n ax.plot(xmax, ymax, zs[4], zdir='y', color='blue', linewidth=1.5)\r\n ax.plot(xminmax, yminmax, zs[5], zdir='y', color='brown', linewidth=1.5)\r\n \r\n zlabels = [' ', 'raw data', 'decimate', 'average', 'minimum', 'maximum', 'min/max']\r\n ax.set_xlabel('Time (s)', fontweight='bold', fontsize='medium')\r\n ax.set_zlabel('Voltage (mV)', fontweight='bold', fontsize='medium')\r\n ax.set_yticklabels(zlabels, rotation=-15, verticalalignment='baseline', horizontalalignment='left', fontweight='bold')\r\n \r\n for angle in range(0, 360):\r\n ax.view_init(25, angle)\r\n plt.draw()\r\n plt.pause(.0001)" ]
[ [ "matplotlib.pyplot.draw", "matplotlib.pyplot.pause", "matplotlib.pyplot.figure" ] ]